hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fea776840ba3b32f75565766babfd041aa64ab68
| 1,830
|
py
|
Python
|
environments/recommenders/recsim_wrapper_test.py
|
jackblandin/ml-fairness-gym
|
dce1feaacf2588e0a2d6187e896796241a25ed81
|
[
"Apache-2.0"
] | null | null | null |
environments/recommenders/recsim_wrapper_test.py
|
jackblandin/ml-fairness-gym
|
dce1feaacf2588e0a2d6187e896796241a25ed81
|
[
"Apache-2.0"
] | null | null | null |
environments/recommenders/recsim_wrapper_test.py
|
jackblandin/ml-fairness-gym
|
dce1feaacf2588e0a2d6187e896796241a25ed81
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2022 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for recsim.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import test_util
from environments.recommenders import recsim_wrapper
from recsim.environments import interest_exploration
class RecommenderTest(absltest.TestCase):
def test_interest_exploration_can_run(self):
env_config = {
'num_candidates': 5,
'slate_size': 2,
'resample_documents': False,
'seed': 100,
}
params = recsim_wrapper.Params(
recsim_env=interest_exploration.create_environment(env_config))
env = recsim_wrapper.RecsimWrapper(params)
test_util.run_test_simulation(env=env, stackelberg=True)
def test_interest_exploration_can_run_with_resampling(self):
env_config = {
'num_candidates': 5,
'slate_size': 2,
'resample_documents': True,
'seed': 100,
}
params = recsim_wrapper.Params(
recsim_env=interest_exploration.create_environment(env_config))
env = recsim_wrapper.RecsimWrapper(params)
test_util.run_test_simulation(env=env, stackelberg=True)
if __name__ == '__main__':
absltest.main()
| 31.551724
| 74
| 0.742623
| 240
| 1,830
| 5.408333
| 0.5
| 0.046225
| 0.03698
| 0.024653
| 0.389831
| 0.389831
| 0.340524
| 0.340524
| 0.340524
| 0.340524
| 0
| 0.013289
| 0.177596
| 1,830
| 57
| 75
| 32.105263
| 0.849169
| 0.337158
| 0
| 0.5
| 0
| 0
| 0.083822
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.21875
| 0
| 0.3125
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fea7d2eca288a3ef4c60e731703c65a5e9641808
| 3,034
|
py
|
Python
|
moss_client_cli.py
|
mernst32/dl-searchcode-code
|
504fe59df245ba123ad8ad6e45f03b17de6ef236
|
[
"MIT"
] | null | null | null |
moss_client_cli.py
|
mernst32/dl-searchcode-code
|
504fe59df245ba123ad8ad6e45f03b17de6ef236
|
[
"MIT"
] | null | null | null |
moss_client_cli.py
|
mernst32/dl-searchcode-code
|
504fe59df245ba123ad8ad6e45f03b17de6ef236
|
[
"MIT"
] | null | null | null |
import argparse
import csv
import os
from moss_client.core import submit_and_dl, parse_moss_reports
data_folder = 'data'
def handle_input(user_id, base_folder, parse, only_parse, join_file, batch):
global data_folder
abs_path = os.path.abspath(os.path.dirname(__file__))
root_data_folder = os.path.join(abs_path, data_folder)
if not os.path.exists(root_data_folder):
os.makedirs(root_data_folder)
report_links_file = os.path.join(root_data_folder, 'links_to_moss_reports.html')
report_csv_file = os.path.join(root_data_folder, 'moss_report.csv')
if not os.path.isabs(base_folder):
base_folder = os.path.join(abs_path, base_folder)
if len(join_file) > 0:
expected_keys = ["SC_Filepath", "Stackoverflow_Links"]
with open(join_file, mode='r', encoding='utf-8') as csv_file:
csv_reader = csv.DictReader(csv_file)
actual_keys = csv_reader.fieldnames
if expected_keys[0] != actual_keys[0] or expected_keys[1] != actual_keys[1]:
print("Error: Unexpected Headers! SC_Filepath and Stackoverflow_Links are required!")
return -1
if not only_parse:
submit_and_dl(user_id, base_folder, report_links_file, batch)
if parse or only_parse:
print("Parsing the moss reports...")
parse_moss_reports(report_links_file, report_csv_file, join_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="MOSS CLI client for submitting java files to the service and downloading the report from the "
"service locally. Will go through the sub folders of the given folder and submit the java files "
"for plagiarism checks and download the reports locally, creating a linking file in the process")
parser.add_argument('user_id', metavar='U', nargs=1, help="Your user-id for the MOSS service.")
parser.add_argument('folder', metavar='F', nargs=1, help="The folder whose contents you want to submit.")
parser.add_argument('-p', '--parse', action='store_true', help="Parses the moss reports into a csv file.")
parser.add_argument('-o', '--only-parse', action='store_true',
help="Only parses the local moss reports and does not submit files and download the reports. "
"Requires the reports and the links_to_reports html file created normally by this app.")
parser.add_argument('-j', '--join-file', nargs=1, default=[""],
help="When the parse or only-parse option is given, joins the parsed data with the parsed data.")
parser.add_argument('-b', '--batch-mode', action='store_true',
help="Only submits a 100 folders to the Moss Service, also looks for already processed folders so "
"that it does not submit those again.")
args = parser.parse_args()
handle_input(args.user_id[0], args.folder[0], args.parse, args.only_parse, args.join_file[0], args.batch_mode)
| 57.245283
| 123
| 0.680949
| 438
| 3,034
| 4.493151
| 0.33105
| 0.04065
| 0.051829
| 0.028963
| 0.089939
| 0.051829
| 0.028455
| 0
| 0
| 0
| 0
| 0.006754
| 0.219183
| 3,034
| 52
| 124
| 58.346154
| 0.823976
| 0
| 0
| 0
| 0
| 0
| 0.354977
| 0.00857
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021739
| false
| 0
| 0.086957
| 0
| 0.130435
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fea81883e0bc239697344b2c58f07b4a45f346d3
| 6,495
|
py
|
Python
|
catkin_ws/src/localization/src/localization_node.py
|
DiegoOrtegoP/Software
|
4a07dd2dab29db910ca2e26848fa6b53b7ab00cd
|
[
"CC-BY-2.0"
] | 12
|
2016-04-14T12:21:46.000Z
|
2021-06-18T07:51:40.000Z
|
catkin_ws/src/localization/src/localization_node.py
|
DiegoOrtegoP/Software
|
4a07dd2dab29db910ca2e26848fa6b53b7ab00cd
|
[
"CC-BY-2.0"
] | 14
|
2017-03-03T23:33:05.000Z
|
2018-04-03T18:07:53.000Z
|
catkin_ws/src/localization/src/localization_node.py
|
DiegoOrtegoP/Software
|
4a07dd2dab29db910ca2e26848fa6b53b7ab00cd
|
[
"CC-BY-2.0"
] | 113
|
2016-05-03T06:11:42.000Z
|
2019-06-01T14:37:38.000Z
|
#!/usr/bin/env python
import rospy
#from apriltags_ros.msg import AprilTagDetectionArray
from duckietown_msgs.msg import AprilTagsWithInfos
import tf2_ros
from tf2_msgs.msg import TFMessage
import tf.transformations as tr
from geometry_msgs.msg import Transform, TransformStamped
import numpy as np
from localization import PoseAverage
from visualization_msgs.msg import Marker
# Localization Node
# Author: Teddy Ort
# Inputs: apriltags/duckietown_msgs/AprilTags - A list of april tags in a camera frame
# Outputs: pose2d/duckietown_msgs/Pose2dStamped - The estimated pose of the robot in the world frame in 2D coordinates
# pose3d/geometry_msgs/PoseStamped - The estimated pose of the robot in the world frame in 3D coordinates
class LocalizationNode(object):
def __init__(self):
self.node_name = 'localization_node'
# Constants
self.world_frame = "world"
self.duckiebot_frame = "duckiebot"
self.duckiebot_lifetime = self.setupParam("~duckiebot_lifetime", 5) # The number of seconds to keep the duckiebot alive bewtween detections
self.highlight_lifetime = self.setupParam("~highlight_lifetime", 3) # The number of seconds to keep a sign highlighted after a detection
# Setup the publishers and subscribers
self.sub_april = rospy.Subscriber("~apriltags", AprilTagsWithInfos, self.tag_callback)
self.pub_tf = rospy.Publisher("/tf", TFMessage, queue_size=1, latch=True)
self.pub_rviz = rospy.Publisher("/sign_highlights", Marker, queue_size=1, latch=True)
# Setup the transform listener
self.tfbuf = tf2_ros.Buffer()
self.tfl = tf2_ros.TransformListener(self.tfbuf)
# Use a timer to make the duckiebot disappear
self.lifetimer = rospy.Time.now()
self.publish_duckie_marker()
rospy.loginfo("[%s] has started", self.node_name)
def tag_callback(self, msg_tag):
# Listen for the transform of the tag in the world
avg = PoseAverage.PoseAverage()
for tag in msg_tag.detections:
try:
Tt_w = self.tfbuf.lookup_transform(self.world_frame, "tag_{id}".format(id=tag.id), rospy.Time(), rospy.Duration(1))
Mtbase_w=self.transform_to_matrix(Tt_w.transform)
Mt_tbase = tr.concatenate_matrices(tr.translation_matrix((0,0,0.17)), tr.euler_matrix(0,0,np.pi))
Mt_w = tr.concatenate_matrices(Mtbase_w,Mt_tbase)
Mt_r=self.pose_to_matrix(tag.pose)
Mr_t=np.linalg.inv(Mt_r)
Mr_w=np.dot(Mt_w,Mr_t)
Tr_w = self.matrix_to_transform(Mr_w)
avg.add_pose(Tr_w)
self.publish_sign_highlight(tag.id)
except(tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as ex:
rospy.logwarn("Error looking up transform for tag_%s", tag.id)
rospy.logwarn(ex.message)
Tr_w = avg.get_average() # Average of the opinions
# Broadcast the robot transform
if Tr_w is not None:
# Set the z translation, and x and y rotations to 0
Tr_w.translation.z = 0
rot = Tr_w.rotation
rotz=tr.euler_from_quaternion((rot.x, rot.y, rot.z, rot.w))[2]
(rot.x, rot.y, rot.z, rot.w) = tr.quaternion_from_euler(0, 0, rotz)
T = TransformStamped()
T.transform = Tr_w
T.header.frame_id = self.world_frame
T.header.stamp = rospy.Time.now()
T.child_frame_id = self.duckiebot_frame
self.pub_tf.publish(TFMessage([T]))
self.lifetimer = rospy.Time.now()
def publish_duckie_marker(self):
# Publish a duckiebot transform far away unless the timer was reset
rate = rospy.Rate(10)
while not rospy.is_shutdown():
rate.sleep()
if rospy.Time.now() - self.lifetimer > rospy.Duration(self.duckiebot_lifetime):
T = TransformStamped()
T.transform.translation.z = 1000 # Throw it 1km in the air
T.transform.rotation.w = 1
T.header.frame_id = self.world_frame
T.header.stamp = rospy.Time.now()
T.child_frame_id = self.duckiebot_frame
self.pub_tf.publish(TFMessage([T]))
def publish_sign_highlight(self, id):
# Publish a highlight marker on the sign that is seen by the robot
m = Marker()
m.header.frame_id="tag_{id}".format(id=id)
m.header.stamp = rospy.Time.now()
m.id=id
m.lifetime = rospy.Duration(self.highlight_lifetime)
m.type = Marker.CYLINDER
p = m.pose.position
o = m.pose.orientation
c = m.color
s = m.scale
s.x, s.y, s.z = (0.1, 0.1, 0.3)
p.z = 0.15
c.a, c.r, c.g, c.b = (0.2, 0.9, 0.9, 0.0)
o.w = 1
self.pub_rviz.publish(m)
def pose_to_matrix(self, p):
# Return the 4x4 homogeneous matrix for a PoseStamped.msg p from the geometry_msgs
trans = (p.pose.position.x, p.pose.position.y, p.pose.position.z)
rot = (p.pose.orientation.x, p.pose.orientation.y, p.pose.orientation.z, p.pose.orientation.w)
return np.dot(tr.translation_matrix(trans), tr.quaternion_matrix(rot))
def transform_to_matrix(self, T):
# Return the 4x4 homogeneous matrix for a TransformStamped.msg T from the geometry_msgs
trans = (T.translation.x, T.translation.y, T.translation.z)
rot = (T.rotation.x, T.rotation.y, T.rotation.z, T.rotation.w)
return np.dot(tr.translation_matrix(trans), tr.quaternion_matrix(rot))
def matrix_to_transform(self, M):
# Return a TransformStamped.msg T from the geometry_msgs from a 4x4 homogeneous matrix
T=Transform()
(T.translation.x, T.translation.y, T.translation.z) = tr.translation_from_matrix(M)
(T.rotation.x, T.rotation.y, T.rotation.z, T.rotation.w) = tr.quaternion_from_matrix(M)
return T
def setupParam(self, param_name, default_value):
value = rospy.get_param(param_name, default_value)
rospy.set_param(param_name, value) #Write to parameter server for transparancy
rospy.loginfo("[%s] %s = %s " % (self.node_name, param_name, value))
return value
if __name__ == '__main__':
rospy.init_node('localization_node', anonymous=False)
localization_node = LocalizationNode()
rospy.spin()
| 45.41958
| 147
| 0.652194
| 915
| 6,495
| 4.47541
| 0.242623
| 0.017582
| 0.017582
| 0.014652
| 0.231013
| 0.196825
| 0.185104
| 0.169231
| 0.14188
| 0.122833
| 0
| 0.012477
| 0.247267
| 6,495
| 142
| 148
| 45.739437
| 0.825118
| 0.197383
| 0
| 0.134615
| 0
| 0
| 0.039507
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.086538
| 0
| 0.211538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fea8219f00f084855cf10ddacc7d1729db19658a
| 1,030
|
py
|
Python
|
gen_data/get_teams.py
|
wusui/NCAA2019
|
d33a69926dc2d5355f33f9b69e39475c54d03c56
|
[
"MIT"
] | null | null | null |
gen_data/get_teams.py
|
wusui/NCAA2019
|
d33a69926dc2d5355f33f9b69e39475c54d03c56
|
[
"MIT"
] | null | null | null |
gen_data/get_teams.py
|
wusui/NCAA2019
|
d33a69926dc2d5355f33f9b69e39475c54d03c56
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# pylint: disable=W0223
"""
Get a list of teams
"""
from html.parser import HTMLParser
import requests
class ChkTeams(HTMLParser):
"""
Extract team names from page
"""
def __init__(self):
HTMLParser.__init__(self)
self.retval = []
def handle_starttag(self, tag, attrs):
for apt in attrs:
if apt[0] == 'title':
if apt[1] != "ESPN Search":
self.retval.append(apt[1])
DATALOC = "http://www.espn.com/mens-college-basketball/tournament/bracket"
def check_teams():
"""
Extract a list of teams (schools)
"""
req = requests.get(DATALOC)
parser = ChkTeams()
parser.feed(req.text)
retv = parser.retval
return retv[8:]
def make_team_list():
"""
Call check_teams and stick result in text file
"""
listv = check_teams()
with open('teams.txt', 'w') as ofile:
for team in listv:
ofile.write(team + '\n')
if __name__ == '__main__':
make_team_list()
| 20.196078
| 74
| 0.590291
| 130
| 1,030
| 4.492308
| 0.576923
| 0.05137
| 0.023973
| 0.041096
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010753
| 0.27767
| 1,030
| 50
| 75
| 20.6
| 0.774194
| 0.163107
| 0
| 0
| 0
| 0
| 0.121287
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16
| false
| 0
| 0.08
| 0
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fea8eab09203e9965fd3c37311110a5d329a6d18
| 2,882
|
py
|
Python
|
svgserver/app.py
|
omniscale/svgserver
|
a98f75ec9547fda25941129e854af046ba8f5dfe
|
[
"Apache-2.0"
] | 2
|
2018-10-18T07:15:58.000Z
|
2020-04-09T20:42:07.000Z
|
svgserver/app.py
|
omniscale/svgserver
|
a98f75ec9547fda25941129e854af046ba8f5dfe
|
[
"Apache-2.0"
] | null | null | null |
svgserver/app.py
|
omniscale/svgserver
|
a98f75ec9547fda25941129e854af046ba8f5dfe
|
[
"Apache-2.0"
] | 2
|
2019-06-20T01:29:59.000Z
|
2021-12-01T12:18:55.000Z
|
import codecs
import tempfile
from contextlib import closing
from .cgi import CGIClient
from .combine import CombineSVG
from .mapserv import MapServer, InternalError
from .tree import build_tree
def _recursive_add_layer(nodes, params, svg, mapserver, translations):
for node in nodes:
group_name = format_group_name(node, translations)
svg.push_group(group_name)
if node.layer:
params["layers"] = node.layer
params["format"] = "image/svg+xml"
resp = mapserver.get(params)
if resp.headers["Content-type"] != "image/svg+xml":
raise InternalError(
"received non SVG response for layer %s:\n%s\n%s"
% (node.layer, resp.headers, resp.read())
)
svg.add(resp)
if node.subs:
_recursive_add_layer(node.subs, params, svg, mapserver, translations)
svg.pop_group()
def format_group_name(node, translations):
if isinstance(node.name, tuple):
return ', '.join(translations.get(n, n) for n in node.name)
return translations.get(node.name, node.name)
def layered_svg(params, translations={}, mapserver_binary="mapserv", root_id='map'):
mapserver = MapServer(binary=mapserver_binary)
layers = mapserver.layer_names(params)
nodes = build_tree(layers)
root_id = translations.get(root_id, root_id)
f = tempfile.TemporaryFile()
try:
with CombineSVG(f, root_id=root_id) as svg:
_recursive_add_layer(
nodes,
params=params,
svg=svg,
mapserver=mapserver,
translations=translations,
)
f.seek(0)
return f
except:
# close to remove temporary file
f.close()
raise
def load_translations(filename):
if not filename:
return {}
translations = {}
with codecs.open(filename, encoding="utf8") as f:
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
if '=' not in line:
continue
key, translation = line.split('=', 1)
translations[key.strip()] = translation.strip()
return translations
if __name__ == "__main__":
import os
import logging
logging.basicConfig(level=logging.DEBUG)
params = {
"service": "WMS",
"version": "1.1.1",
"request": "GetMap",
"width": 1234,
"height": 769,
"srs": "EPSG:3857",
"styles": "",
"format": "image/svg+xml",
"bbox": "775214.9923087133,6721788.224989068,776688.4414913012,6722705.993822992",
"map": os.path.abspath(os.path.dirname(__file__) + "/../tests/ms.map"),
}
with closing(layered_svg(params)) as f:
print(f.read())
| 29.408163
| 90
| 0.586051
| 324
| 2,882
| 5.080247
| 0.37037
| 0.021871
| 0.030984
| 0.026731
| 0.071689
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040079
| 0.298751
| 2,882
| 97
| 91
| 29.71134
| 0.774369
| 0.010409
| 0
| 0.025
| 0
| 0
| 0.105614
| 0.024912
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.1125
| 0
| 0.225
| 0.0125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
feab97b0913494abc7216c346f3470dd95d2e154
| 1,001
|
py
|
Python
|
test/lib_config_test.py
|
yokoyama-flogics/ibp_monitor_2
|
1a7df55a524ff3a7908df330e7e02c9f27e24ae0
|
[
"BSD-2-Clause"
] | 3
|
2017-11-23T13:29:47.000Z
|
2021-01-08T09:28:35.000Z
|
test/lib_config_test.py
|
yokoyama-flogics/ibp_monitor_2
|
1a7df55a524ff3a7908df330e7e02c9f27e24ae0
|
[
"BSD-2-Clause"
] | null | null | null |
test/lib_config_test.py
|
yokoyama-flogics/ibp_monitor_2
|
1a7df55a524ff3a7908df330e7e02c9f27e24ae0
|
[
"BSD-2-Clause"
] | 2
|
2018-02-15T08:11:24.000Z
|
2021-01-08T09:28:43.000Z
|
import os
import sys
import unittest
# Set Python search path to the parent directory
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from lib.config import *
class TestLibConfig(unittest.TestCase):
def test_config_noconfigfile(self):
config = BeaconConfigParser('not_exist.cfg')
with self.assertRaises(ConfigParser.NoSectionError):
config.getpath('Test', 'dbdir')
def test_config_default(self):
import os
os.environ['HOME'] = 'notexist'
config = BeaconConfigParser()
with self.assertRaises(ConfigParser.NoSectionError):
config.get('Signal', 'samplerate')
def test_config_items(self):
config = BeaconConfigParser('test_config.cfg')
self.assertEqual(config.get('Test', 'dbdir'), 'nodb')
self.assertEqual(config.getpath('Test', 'dbdir'), 'nodb')
self.assertEqual(config.getint('Signal', 'samplerate'), 16000)
if __name__ == "__main__":
unittest.main(buffer=True)
| 33.366667
| 70
| 0.679321
| 111
| 1,001
| 5.945946
| 0.477477
| 0.060606
| 0.059091
| 0.09697
| 0.260606
| 0.260606
| 0
| 0
| 0
| 0
| 0
| 0.006165
| 0.18981
| 1,001
| 29
| 71
| 34.517241
| 0.807645
| 0.045954
| 0
| 0.173913
| 0
| 0
| 0.12277
| 0
| 0
| 0
| 0
| 0
| 0.217391
| 1
| 0.130435
| false
| 0
| 0.217391
| 0
| 0.391304
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
feb1c1e0c98bd37c082895d1888d0fe15b8aaccf
| 19,367
|
py
|
Python
|
claripy/vsa/valueset.py
|
kwalberg/claripy
|
b5cfa0a355eaa3cd5403e1d81f0b80bb3db20c90
|
[
"BSD-2-Clause"
] | null | null | null |
claripy/vsa/valueset.py
|
kwalberg/claripy
|
b5cfa0a355eaa3cd5403e1d81f0b80bb3db20c90
|
[
"BSD-2-Clause"
] | null | null | null |
claripy/vsa/valueset.py
|
kwalberg/claripy
|
b5cfa0a355eaa3cd5403e1d81f0b80bb3db20c90
|
[
"BSD-2-Clause"
] | null | null | null |
import functools
import itertools
import numbers
from ..backend_object import BackendObject
from ..annotation import Annotation
def normalize_types_two_args(f):
@functools.wraps(f)
def normalizer(self, region, o):
"""
Convert any object to an object that we can process.
"""
if isinstance(o, Base):
raise ClaripyValueError("BoolResult can't handle AST objects directly")
if not isinstance(o, StridedInterval):
raise ClaripyVSAOperationError('Unsupported operand type %s' % type(o))
return f(self, region, o)
return normalizer
def normalize_types_one_arg(f):
@functools.wraps(f)
def normalizer(self, o):
"""
Convert any object to an object that we can process.
"""
if isinstance(o, Base):
raise ClaripyValueError("BoolResult can't handle AST objects directly")
return f(self, o)
return normalizer
vs_id_ctr = itertools.count()
class RegionAnnotation(Annotation):
"""
Use RegionAnnotation to annotate ASTs. Normally, an AST annotated by RegionAnnotations is treated as a ValueSet.
Note that Annotation objects are immutable. Do not change properties of an Annotation object without creating a new
one.
"""
def __init__(self, region_id, region_base_addr, offset):
self.region_id = region_id
self.region_base_addr = region_base_addr
self.offset = offset
# Do necessary conversion here
if isinstance(self.region_base_addr, Base):
self.region_base_addr = self.region_base_addr._model_vsa
if isinstance(self.offset, Base):
self.offset = self.offset._model_vsa
@property
def eliminatable(self):
"""
A Region annotation is not eliminatable in simplifications.
:return: False
:rtype: bool
"""
return False
@property
def relocatable(self):
"""
A Region annotation is not relocatable in simplifications.
:return: False
:rtype: bool
"""
return False
#
# Public methods
#
def relocate(self, src, dst):
"""
Override Annotation.relocate().
:param src: The old AST
:param dst: The new AST, as the result of a simplification
:return: The new annotation that should be applied on the new AST
"""
raise ClaripyVSAError('RegionAnnotation is not relocatable')
#
# Overriding base methods
#
def __hash__(self):
return hash((self.region_id, self.region_base_addr, hash(self.offset)))
def __repr__(self):
return "<RegionAnnotation %s:%#08x>" % (self.region_id, self.offset)
class ValueSet(BackendObject):
"""
ValueSet is a mapping between memory regions and corresponding offsets.
"""
def __init__(self, name=None, region=None, region_base_addr=None, bits=None, val=None):
"""
Constructor.
:param str name: Name of this ValueSet object. Only for debugging purposes.
:param str region: Region ID.
:param int region_base_addr: Base address of the region.
:param int bits: Size of the ValueSet.
:param val: an initial offset
"""
self._name = 'VS_%d' % next(vs_id_ctr) if name is None else name
if bits is None:
raise ClaripyVSAError('bits must be specified when creating a ValueSet.')
self._bits = bits
self._si = StridedInterval.empty(bits)
self._regions = {}
self._region_base_addrs = {}
self._reversed = False
# Shortcuts for initialization
# May not be useful though...
if region is not None and region_base_addr is not None and val is not None:
if isinstance(region_base_addr, numbers.Number):
# Convert it to a StridedInterval
region_base_addr = StridedInterval(bits=self._bits, stride=1,
lower_bound=region_base_addr,
upper_bound=region_base_addr)
if isinstance(val, numbers.Number):
val = StridedInterval(bits=bits, stride=0, lower_bound=val, upper_bound=val)
if isinstance(val, StridedInterval):
self._set_si(region, region_base_addr, val)
else:
raise ClaripyVSAError("Unsupported type '%s' for argument 'val'" % type(val))
else:
if region is not None or val is not None:
raise ClaripyVSAError("You must specify 'region' and 'val' at the same time.")
#
# Properties
#
@property
def name(self):
return self._name
@property
def bits(self):
return self._bits
@property
def regions(self):
return self._regions
@property
def reversed(self):
return self._reversed
@property
def unique(self):
return len(self.regions) == 1 and self.regions.values()[0].unique
@property
def cardinality(self):
card = 0
for region in self._regions:
card += self._regions[region].cardinality
return card
@property
def is_empty(self):
return len(self._regions) == 0
@property
def valueset(self):
return self
#
# Private methods
#
def _set_si(self, region, region_base_addr, si):
if isinstance(si, numbers.Number):
si = StridedInterval(bits=self.bits, stride=0, lower_bound=si, upper_bound=si)
if isinstance(region_base_addr, numbers.Number):
region_base_addr = StridedInterval(bits=self.bits, stride=0, lower_bound=region_base_addr,
upper_bound=region_base_addr
)
if not isinstance(si, StridedInterval):
raise ClaripyVSAOperationError('Unsupported type %s for si' % type(si))
self._regions[region] = si
self._region_base_addrs[region] = region_base_addr
self._si = self._si.union(region_base_addr + si)
def _merge_si(self, region, region_base_addr, si):
if isinstance(region_base_addr, numbers.Number):
region_base_addr = StridedInterval(bits=self.bits, stride=0, lower_bound=region_base_addr,
upper_bound=region_base_addr
)
if region not in self._regions:
self._set_si(region, region_base_addr, si)
else:
self._regions[region] = self._regions[region].union(si)
self._region_base_addrs[region] = self._region_base_addrs[region].union(region_base_addr)
self._si = self._si.union(region_base_addr + si)
#
# Public methods
#
@staticmethod
def empty(bits):
return ValueSet(bits=bits)
def items(self):
return self._regions.items()
def size(self):
return len(self)
def copy(self):
"""
Make a copy of self and return.
:return: A new ValueSet object.
:rtype: ValueSet
"""
vs = ValueSet(bits=self.bits)
vs._regions = self._regions.copy()
vs._region_base_addrs = self._region_base_addrs.copy()
vs._reversed = self._reversed
vs._si = self._si.copy()
return vs
def get_si(self, region):
if region in self._regions:
return self._regions[region]
# TODO: Should we return a None, or an empty SI instead?
return None
def stridedinterval(self):
return self._si
def apply_annotation(self, annotation):
"""
Apply a new annotation onto self, and return a new ValueSet object.
:param RegionAnnotation annotation: The annotation to apply.
:return: A new ValueSet object
:rtype: ValueSet
"""
vs = self.copy()
vs._merge_si(annotation.region_id, annotation.region_base_addr, annotation.offset)
return vs
def __repr__(self):
s = ""
for region, si in self._regions.items():
s = "%s: %s" % (region, si)
return "(" + s + ")"
def __len__(self):
return self._bits
def __hash__(self):
return hash(tuple((r, hash(self._regions[r])) for r in self._regions))
#
# Arithmetic operations
#
@normalize_types_one_arg
def __add__(self, other):
"""
Binary operation: addition
Note that even if "other" is a ValueSet object. we still treat it as a StridedInterval. Adding two ValueSets
together does not make sense (which is essentially adding two pointers together).
:param StridedInterval other: The other operand.
:return: A new ValueSet object
:rtype: ValueSet
"""
new_vs = ValueSet(bits=self.bits)
# Call __add__ on self._si
new_vs._si = self._si.__add__(other)
for region in self._regions:
new_vs._regions[region] = self._regions[region] + other
return new_vs
@normalize_types_one_arg
def __radd__(self, other):
return self.__add__(other)
@normalize_types_one_arg
def __sub__(self, other):
"""
Binary operation: subtraction
:param other: The other operand
:return: A StridedInterval or a ValueSet.
"""
deltas = [ ]
# TODO: Handle more cases
if isinstance(other, ValueSet):
# A subtraction between two ValueSets produces a StridedInterval
if self.regions.keys() == other.regions.keys():
for region in self._regions:
deltas.append(self._regions[region] - other._regions[region])
else:
# TODO: raise the proper exception here
raise NotImplementedError()
delta = StridedInterval.empty(self.bits)
for d in deltas:
delta = delta.union(d)
return delta
else:
# A subtraction between a ValueSet and a StridedInterval produces another ValueSet
new_vs = self.copy()
# Call __sub__ on the base class
new_vs._si = self._si.__sub__(other)
for region, si in new_vs._regions.items():
new_vs._regions[region] = si - other
return new_vs
@normalize_types_one_arg
def __and__(self, other):
"""
Binary operation: and
Note that even if `other` is a ValueSet object, it will be treated as a StridedInterval as well. Doing & between
two pointers that are not the same do not make sense.
:param other: The other operand
:return: A ValueSet as the result
:rtype: ValueSet
"""
if type(other) is ValueSet:
# The only case where calling & between two points makes sense
if self.identical(other):
return self.copy()
if BoolResult.is_true(other == 0):
# Corner case: a & 0 = 0
return StridedInterval(bits=self.bits, stride=0, lower_bound=0, upper_bound=0)
if BoolResult.is_true(other < 0x100):
# Special case - sometimes (addr & mask) is used for testing whether the address is aligned or not
# We return a StridedInterval instead
ret = None
for region, si in self._regions.items():
r = si.__and__(other)
ret = r if ret is None else ret.union(r)
return ret
else:
# We should return a ValueSet here
new_vs = self.copy()
for region, si in self._regions.items():
r = si.__and__(other)
new_vs._regions[region] = r
return new_vs
def __eq__(self, other):
"""
Binary operation: ==
:param other: The other operand
:return: True/False/Maybe
"""
if isinstance(other, ValueSet):
same = False
different = False
for region, si in other.regions.items():
if region in self.regions:
comp_ret = self.regions[region] == si
if BoolResult.has_true(comp_ret):
same = True
if BoolResult.has_false(comp_ret):
different = True
else:
different = True
if same and not different:
return TrueResult()
if same and different:
return MaybeResult()
return FalseResult()
elif isinstance(other, StridedInterval):
if 'global' in self.regions:
return self.regions['global'] == other
else:
return FalseResult()
else:
return FalseResult()
def __ne__(self, other):
"""
Binary operation: ==
:param other: The other operand
:return: True/False/Maybe
"""
return ~ (self == other)
#
# Backend operations
#
def eval(self, n, signed=False):
if signed:
# How are you going to deal with a negative pointer?
raise ClaripyVSAOperationError('`signed` cannot be True when calling ValueSet.eval().')
results = []
for _, si in self._regions.items():
if len(results) < n:
results.extend(si.eval(n))
return results
@property
def min(self):
"""
The minimum integer value of a value-set. It is only defined when there is exactly one region.
:return: A integer that represents the minimum integer value of this value-set.
:rtype: int
"""
if len(self.regions) != 1:
raise ClaripyVSAOperationError("'min()' onlly works on single-region value-sets.")
return self.get_si(next(iter(self.regions))).min
@property
def max(self):
"""
The maximum integer value of a value-set. It is only defined when there is exactly one region.
:return: A integer that represents the maximum integer value of this value-set.
:rtype: int
"""
if len(self.regions) != 1:
raise ClaripyVSAOperationError("'max()' onlly works on single-region value-sets.")
return self.get_si(next(iter(self.regions))).max
def reverse(self):
# TODO: obviously valueset.reverse is not properly implemented. I'm disabling the old annoying output line for
# TODO: now. I will implement the proper reversing support soon.
vs = self.copy()
vs._reversed = not vs._reversed
return vs
def extract(self, high_bit, low_bit):
"""
Operation extract
- A cheap hack is implemented: a copy of self is returned if (high_bit - low_bit + 1 == self.bits), which is a
ValueSet instance. Otherwise a StridedInterval is returned.
:param high_bit:
:param low_bit:
:return: A ValueSet or a StridedInterval
"""
if high_bit - low_bit + 1 == self.bits:
return self.copy()
if ('global' in self._regions and len(self._regions.keys()) > 1) or \
len(self._regions.keys()) > 0:
si_ret = StridedInterval.top(high_bit - low_bit + 1)
else:
if 'global' in self._regions:
si = self._regions['global']
si_ret = si.extract(high_bit, low_bit)
else:
si_ret = StridedInterval.empty(high_bit - low_bit + 1)
return si_ret
def concat(self, b):
new_vs = ValueSet(bits=self.bits + b.bits)
# TODO: This logic is obviously flawed. Correct it later :-(
if isinstance(b, StridedInterval):
for region, si in self._regions.items():
new_vs._set_si(region, self._region_base_addrs[region], si.concat(b))
elif isinstance(b, ValueSet):
for region, si in self._regions.items():
new_vs._set_si(region, self._region_base_addrs[region], si.concat(b.get_si(region)))
else:
raise ClaripyVSAOperationError('ValueSet.concat() got an unsupported operand %s (type %s)' % (b, type(b)))
return new_vs
@normalize_types_one_arg
def union(self, b):
merged_vs = self.copy()
if type(b) is ValueSet:
for region, si in b.regions.items():
if region not in merged_vs._regions:
merged_vs._regions[region] = si
else:
merged_vs._regions[region] = merged_vs._regions[region].union(si)
merged_vs._si = merged_vs._si.union(b._si)
else:
for region, si in merged_vs._regions.items():
merged_vs._regions[region] = merged_vs._regions[region].union(b)
merged_vs._si = merged_vs._si.union(b)
return merged_vs
@normalize_types_one_arg
def widen(self, b):
merged_vs = self.copy()
if isinstance(b, ValueSet):
for region, si in b.regions.items():
if region not in merged_vs.regions:
merged_vs.regions[region] = si
else:
merged_vs.regions[region] = merged_vs.regions[region].widen(si)
merged_vs._si = merged_vs._si.widen(b._si)
else:
for region in merged_vs._regions:
merged_vs._regions[region] = merged_vs._regions[region].widen(b)
merged_vs._si = merged_vs._si.widen(b)
return merged_vs
@normalize_types_one_arg
def intersection(self, b):
vs = self.copy()
if isinstance(b, ValueSet):
for region, si in b.regions.items():
if region not in vs.regions:
pass
else:
vs.regions[region] = vs.regions[region].intersection(si)
if vs.regions[region].is_empty:
del vs.regions[region]
vs._si = vs._si.intersection(b._si)
else:
for region in self._regions:
vs.regions[region] = vs.regions[region].intersection(b)
if vs.regions[region].is_empty:
del vs.regions[region]
vs._si = vs._si.intersection(b)
return vs
def identical(self, o):
"""
Used to make exact comparisons between two ValueSets.
:param o: The other ValueSet to compare with.
:return: True if they are exactly same, False otherwise.
"""
if self._reversed != o._reversed:
return False
for region, si in self.regions.items():
if region in o.regions:
o_si = o.regions[region]
if not si.identical(o_si):
return False
else:
return False
return True
from ..ast.base import Base
from .strided_interval import StridedInterval
from .bool_result import BoolResult, TrueResult, FalseResult, MaybeResult
from .errors import ClaripyVSAOperationError, ClaripyVSAError
from ..errors import ClaripyValueError
| 29.795385
| 120
| 0.58357
| 2,290
| 19,367
| 4.758952
| 0.139738
| 0.043402
| 0.039824
| 0.014315
| 0.402826
| 0.343641
| 0.309782
| 0.267113
| 0.220316
| 0.183979
| 0
| 0.00224
| 0.331543
| 19,367
| 649
| 121
| 29.841294
| 0.839564
| 0.208241
| 0
| 0.339286
| 0
| 0
| 0.040863
| 0
| 0
| 0
| 0.000345
| 0.006163
| 0
| 1
| 0.139881
| false
| 0.002976
| 0.029762
| 0.047619
| 0.342262
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
feb21c64003d71c234c911e57ed8a4baa217c7cb
| 2,663
|
py
|
Python
|
fardaastationapi.py
|
sina-cb/fardaastationapi
|
0e27afe05195f346e17fd52e1c30b853c954a3b0
|
[
"Apache-2.0"
] | null | null | null |
fardaastationapi.py
|
sina-cb/fardaastationapi
|
0e27afe05195f346e17fd52e1c30b853c954a3b0
|
[
"Apache-2.0"
] | 1
|
2017-12-21T19:54:36.000Z
|
2018-01-08T02:05:11.000Z
|
fardaastationapi.py
|
sina-cb/fardaastationapi
|
0e27afe05195f346e17fd52e1c30b853c954a3b0
|
[
"Apache-2.0"
] | null | null | null |
import logging
from episodes import find_updates, db, count_all
from logging import error as logi
from flask import Flask, jsonify, request
def create_app(config, debug=False, testing=False, config_overrides=None):
app = Flask(__name__)
app.config.from_object(config)
app.config['JSON_AS_ASCII'] = False
app.debug = debug
app.testing = testing
if config_overrides:
app.config.update(config_overrides)
# Configure logging
if not app.testing:
logging.basicConfig(level=logging.INFO)
@app.before_request
def before_request():
db.connect()
@app.after_request
def after_request(response):
db.close()
return response
@app.route('/get_new_episodes')
def get_new_episodes():
appengine_request = request.headers.get('X-Appengine-Cron')
if appengine_request == 'true':
from scraper import update_episodes
update_episodes()
return '<h1>Success</h1>'
else:
return '<h1>This is a crobjob and all the requests should come from appengine.</h1>'
@app.route('/get_updates')
def get_update():
timestamp = request.args.get('timestamp', '')
if timestamp == '':
logi('Default timestamp')
timestamp = 0
else:
timestamp = long(timestamp)
result = find_updates(timestamp)
return jsonify(result)
@app.route('/')
def welcome():
message = '{}{}{}{}'.format('<h1>Welcome to FardaStationAPI WebService</h1>',
'<p>To get information about the latest episodes of Fardaa Station (by '
'RadioFarda.com) please send a GET request to '
'http://fardastationapi.appspot.com/get_updates URL.</p>',
'<p>A UNIX epoch timestamp can also be passed in as an argument to filter out the '
'episodes before that timestamp. Example: '
'https://fardastationapi.appspot.com/get_updates?timestamp=1512629949</p>',
'<h1>Current number of episodes: {}</h1>'.format(count_all()))
return message
# Add an error handler. This is useful for debugging the live application,
# however, you should disable the output of the exception for production
# applications.
@app.errorhandler(500)
def server_error(e):
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
return app
| 33.708861
| 119
| 0.592189
| 299
| 2,663
| 5.167224
| 0.438127
| 0.023301
| 0.014239
| 0.036246
| 0.045307
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01358
| 0.308674
| 2,663
| 78
| 120
| 34.141026
| 0.825638
| 0.065715
| 0
| 0.034483
| 0
| 0
| 0.29561
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12069
| false
| 0.017241
| 0.086207
| 0.017241
| 0.327586
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
feb49cfe9fd1f9a9e260952a3552e9f39bc9e707
| 12,199
|
py
|
Python
|
catapult.py
|
spraakbanken/sparv-catapult
|
03273985ceea6feef47a56084c595580d0338f7d
|
[
"MIT"
] | null | null | null |
catapult.py
|
spraakbanken/sparv-catapult
|
03273985ceea6feef47a56084c595580d0338f7d
|
[
"MIT"
] | 2
|
2021-12-13T19:47:29.000Z
|
2021-12-15T16:14:50.000Z
|
catapult.py
|
spraakbanken/sparv-catapult
|
03273985ceea6feef47a56084c595580d0338f7d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# catapult: runs python scripts in already running processes to eliminate the
# python interpreter startup time.
#
# The lexicon for sparv.saldo.annotate and sparv.saldo.compound can be pre-loaded and
# shared between processes. See the variable annotators in handle and start.
#
# Run scripts in the catapult with the c program catalaunch.
from builtins import range, object
from multiprocessing import Process, cpu_count
from decorator import decorator
import logging
import os
import re
import runpy
import socket
import sys
import traceback
import sparv.util as util
RECV_LEN = 4096
# Important to preload all modules otherwise processes will need to do
# it upon request, introducing new delays.
#
# These imports uses the __all__ variables in the __init__ files.
from sparv.util import *
from sparv import *
logging.basicConfig(format="%(process)d %(asctime)-15s %(message)s")
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
"""
Splits at every space that is not preceded by a backslash.
"""
splitter = re.compile('(?<!\\\\) ')
def set_last_argument(*values):
"""
Decorates a function f, setting its last argument(s) to the given value(s).
Used for setting the saldo lexicons to sparv.saldo.annotate and
sparv.saldo.compound, and the process "dictionary" to sparv.malt.maltparse.
The decorator module is used to give the same signature and
docstring to the function, which is exploited in sparv.util.run.
"""
@decorator
def inner(f, *args, **kwargs):
args = list(args)
for v in values:
args.pop()
for v in values:
args.append(v)
f(*args, **kwargs)
return inner
def handle(client_sock, verbose, annotators):
"""
Handle a client: parse the arguments, change to the relevant
directory, then run the script. Stdout and stderr are directed
to /dev/null or to the client socket.
"""
def chunk_send(msg):
"""
Sends a message chunk until it is totally received in the other end
"""
msg = msg.encode(util.UTF8)
while len(msg) > 0:
sent = client_sock.send(msg)
if sent == 0:
raise RuntimeError("socket connection broken")
msg = msg[sent:]
def set_stdout_stderr():
"""
Put stdout and stderr to the client_sock, if verbose.
Returns the clean-up handler.
"""
class Writer(object):
def write(self, msg):
log.debug(msg)
if verbose:
chunk_send(msg)
def flush(self):
pass
orig_stds = sys.stdout, sys.stderr
w = Writer()
sys.stdout = w
sys.stderr = w
def cleanup():
"""
Restores stdout and stderr
"""
sys.stdout = orig_stds[0]
sys.stderr = orig_stds[1]
client_sock.close()
return cleanup
# Receive data
data = b""
new_data = None
# Message is terminated with a lone \
while new_data is None or not new_data.endswith(b'\\'):
new_data = client_sock.recv(RECV_LEN)
log.debug("Received %s", new_data)
data += new_data
if len(new_data) == 0:
log.warning("Received null!")
chunk_send("Error when receiving: got an empty message")
return
# Drop the terminating \
data = data[0:-1]
# Split arguments on spaces, and replace '\ ' to ' ' and \\ to \
args = [arg.replace('\\ ', ' ').replace('\\\\', '\\')
for arg in re.split(splitter, data.decode(util.UTF8))]
log.debug("Args: %s", args)
### PING? ###
if len(args) == 2 and args[1] == "PING":
log.info("Ping requested")
chunk_send("PONG")
return
# If the first argument is -m, the following argument is a module
# name instead of a script name
module_flag = len(args) > 2 and args[1] == '-m'
if module_flag:
args.pop(1)
if len(args) > 1:
# First argument is the pwd of the caller
old_pwd = os.getcwd()
pwd = args.pop(0)
log.info('Running %s', args[0])
log.debug('with arguments: %s', ' '.join(args[1:]))
log.debug('in directory %s', pwd)
# Set stdout and stderr, which returns the cleaup function
cleanup = set_stdout_stderr()
# Run the command
try:
sys.argv = args
os.chdir(pwd)
if module_flag:
annotator = annotators.get(args[0], None)
if not annotator:
# some of the annotators require two arguments
annotator = annotators.get((args[0], args[1]), None)
if annotator:
# skip the first argument now
sys.argv = args[0]
sys.argv.extend(args[2:])
if annotator:
util.run.main(annotator)
else:
runpy.run_module(args[0], run_name='__main__')
else:
runpy.run_path(args[0], run_name='__main__')
except (ImportError, IOError):
# If file does not exist, send the error message
chunk_send("%s\n" % sys.exc_info()[1])
cleanup()
log.exception("File does not exist")
except:
# Send other errors, and if verbose, send tracebacks
chunk_send("%s\n" % sys.exc_info()[1])
traceback.print_exception(*sys.exc_info())
cleanup()
log.exception("Unknown error")
else:
cleanup()
os.chdir(old_pwd)
# Run the cleanup function if there is one (only used with malt)
annotators.get((args[0], 'cleanup'), lambda: None)()
log.info('Completed %s', args[0])
else:
log.info('Cannot handle %s', data)
chunk_send('Cannot handle %s\n' % data)
def worker(server_socket, verbose, annotators, malt_args=None, swener_args=None):
"""
Workers listen to the socket server, and handle incoming requests
Each process starts an own maltparser process, because they are
cheap and cannot serve multiple clients at the same time.
"""
if malt_args:
process_dict = dict(process=None, restart=True)
def start_malt():
if process_dict['process'] is None or process_dict['restart']:
old_process = process_dict['process']
old_process and util.system.kill_process(old_process)
malt_process = malt.maltstart(**malt_args)
if verbose:
log.info('(Re)started malt process: %s', malt_process)
process_dict['process'] = malt_process
annotators['sparv.malt'] = set_last_argument(process_dict)(malt.maltparse)
elif verbose:
log.info("Not restarting malt this time")
start_malt()
annotators['sparv.malt', 'cleanup'] = start_malt
if swener_args:
process_dict = dict(process=None, restart=True)
def start_swener():
if process_dict['process'] is None or process_dict['restart']:
old_process = process_dict['process']
old_process and util.system.kill_process(old_process)
swener_process = swener.swenerstart(**swener_args)
if verbose:
log.info('(Re)started SweNER process: %s', swener_process)
process_dict['process'] = swener_process
annotators['sparv.swener'] = set_last_argument(process_dict)(swener.tag_ne)
elif verbose:
log.info("Not restarting SweNER this time")
start_swener()
annotators['sparv.swener', 'cleanup'] = start_swener
if verbose:
log.info("Worker running!")
while True:
client_sock, addr = server_socket.accept()
try:
handle(client_sock, verbose, annotators)
except:
log.exception('Error in handling code')
traceback.print_exception(*sys.exc_info())
client_sock.close()
def start(socket_path, processes=1, verbose='false',
saldo_model=None, compound_model=None, stats_model=None,
dalin_model=None, swedberg_model=None, blingbring_model=None,
malt_jar=None, malt_model=None, malt_encoding=util.UTF8,
sentiment_model=None, swefn_model=None, swener=False,
swener_encoding=util.UTF8):
"""
Starts a catapult on a socket file, using a number of processes.
If verbose is false, all stdout and stderr programs produce is
piped to /dev/null, otherwise it is sent to the client. The
computation is done by the catapult processes, however.
Regardless of what verbose is, client errors should be reported
both in the catapult and to the client.
The saldo model and compound model can be pre-loaded and shared in
memory between processes.
Start processes using catalaunch.
"""
if os.path.exists(socket_path):
log.error('socket %s already exists', socket_path)
exit(1)
verbose = verbose.lower() == 'true'
log.info('Verbose: %s', verbose)
# If processes does not contain an int, set it to the number of processors
try:
processes = int(processes)
except:
processes = cpu_count()
# Start the socket
server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server_socket.bind(socket_path)
server_socket.listen(processes)
# The dictionary of functions with saved lexica, indexed by module name strings
annotators = {}
# Load Saldo and older lexicons
lexicons = [m for m in [saldo_model, dalin_model, swedberg_model] if m]
if lexicons:
lexicon_dict = {}
for lexicon in lexicons:
lexicon_dict[os.path.basename(lexicon).rstrip(".pickle")] = saldo.SaldoLexicon(lexicon)
annotators['sparv.saldo'] = set_last_argument(lexicon_dict)(saldo.annotate)
if stats_model and compound_model:
annotators['sparv.compound'] = set_last_argument(
compound.SaldoCompLexicon(compound_model),
compound.StatsLexicon(stats_model))(compound.annotate)
elif compound_model:
annotators['sparv.compound_simple'] = set_last_argument(
compound_simple.SaldoLexicon(compound_model))(compound_simple.annotate)
# if blingbring_model:
# annotators['sparv.lexical_classes'] = set_last_argument(
# util.PickledLexicon(blingbring_model))(lexical_classes.annotate_bb_words)
# if swefn_model:
# annotators['sparv.lexical_classes'] = set_last_argument(
# util.PickledLexicon(swefn_model))(lexical_classes.annotate_swefn_words)
if sentiment_model:
annotators['sparv.sentiment'] = set_last_argument(
util.PickledLexicon(sentiment_model))(sentiment.sentiment)
# if models_1700s:
# models = models_1700s.split()
# lexicons = [saldo.SaldoLexicon(lex) for lex in models]
# annotators[('sparv.fsv', '--annotate_fallback')] = set_last_argument(lexicons)(fsv.annotate_fallback)
# annotators[('sparv.fsv', '--annotate_full')] = set_last_argument(lexicons)(fsv.annotate_full)
if verbose:
log.info('Loaded annotators: %s', list(annotators.keys()))
if malt_jar and malt_model:
malt_args = dict(maltjar=malt_jar, model=malt_model,
encoding=malt_encoding, send_empty_sentence=True)
else:
malt_args = None
if swener:
swener_args = dict(stdin="", encoding=swener_encoding, verbose=True)
else:
swener_args = None
# Start processes-1 workers
workers = [Process(target=worker, args=[server_socket, verbose, annotators, malt_args])
for i in range(processes - 1)]
for p in workers:
p.start()
# Additionally, let this thread be worker 0
worker(server_socket, verbose, annotators, malt_args, swener_args)
if __name__ == '__main__':
util.run.main(start)
| 32.617647
| 111
| 0.61792
| 1,511
| 12,199
| 4.848445
| 0.244209
| 0.019656
| 0.022523
| 0.01365
| 0.189189
| 0.142643
| 0.100191
| 0.069888
| 0.063882
| 0.063882
| 0
| 0.005835
| 0.283548
| 12,199
| 373
| 112
| 32.705094
| 0.83238
| 0.275268
| 0
| 0.211538
| 0
| 0
| 0.08759
| 0.002466
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057692
| false
| 0.004808
| 0.067308
| 0
| 0.149038
| 0.009615
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
feb55dc64767ea42fd4dbdb633eb49cefc5afea8
| 2,445
|
py
|
Python
|
tests/test_sentiments.py
|
rajeshkumargp/TextBlob
|
a8709368f2a8a8ba4d87730111f8b6675d0735cd
|
[
"MIT"
] | 6,608
|
2015-01-02T13:13:16.000Z
|
2022-03-31T13:44:41.000Z
|
tests/test_sentiments.py
|
rajeshkumargp/TextBlob
|
a8709368f2a8a8ba4d87730111f8b6675d0735cd
|
[
"MIT"
] | 277
|
2015-01-01T15:08:55.000Z
|
2022-03-28T20:00:06.000Z
|
tests/test_sentiments.py
|
rajeshkumargp/TextBlob
|
a8709368f2a8a8ba4d87730111f8b6675d0735cd
|
[
"MIT"
] | 1,110
|
2015-01-01T22:04:39.000Z
|
2022-03-20T20:39:26.000Z
|
from __future__ import unicode_literals
import unittest
from nose.tools import * # PEP8 asserts
from nose.plugins.attrib import attr
from textblob.sentiments import PatternAnalyzer, NaiveBayesAnalyzer, DISCRETE, CONTINUOUS
class TestPatternSentiment(unittest.TestCase):
def setUp(self):
self.analyzer = PatternAnalyzer()
def test_kind(self):
assert_equal(self.analyzer.kind, CONTINUOUS)
def test_analyze(self):
p1 = "I feel great this morning."
n1 = "This is a terrible car."
p1_result = self.analyzer.analyze(p1)
n1_result = self.analyzer.analyze(n1)
assert_true(p1_result[0] > 0)
assert_true(n1_result[0] < 0)
assert_equal(p1_result.polarity, p1_result[0])
assert_equal(p1_result.subjectivity, p1_result[1])
def test_analyze_assessments(self):
p1 = "I feel great this morning."
n1 = "This is a terrible car."
p1_result = self.analyzer.analyze(p1,keep_assessments=True)
n1_result = self.analyzer.analyze(n1,keep_assessments=True)
p1_assessment = p1_result.assessments[0]
n1_assessment = n1_result.assessments[0]
assert_true(p1_assessment[1] > 0)
assert_true(n1_assessment[1] < 0)
assert_equal(p1_result.polarity, p1_assessment[1])
assert_equal(p1_result.subjectivity, p1_assessment[2])
class TestNaiveBayesAnalyzer(unittest.TestCase):
def setUp(self):
self.analyzer = NaiveBayesAnalyzer()
def test_kind(self):
assert_equal(self.analyzer.kind, DISCRETE)
@attr('slow')
def test_analyze(self):
p1 = 'I feel great this morning.'
n1 = 'This is a terrible car.'
p1_result = self.analyzer.analyze(p1)
assert_equal(p1_result[0], 'pos')
assert_equal(self.analyzer.analyze(n1)[0], 'neg')
# The 2nd item should be the probability that it is positive
assert_true(isinstance(p1_result[1], float))
# 3rd item is probability that it is negative
assert_true(isinstance(p1_result[2], float))
assert_about_equal(p1_result[1] + p1_result[2], 1)
assert_equal(p1_result.classification, p1_result[0])
assert_equal(p1_result.p_pos, p1_result[1])
assert_equal(p1_result.p_neg, p1_result[2])
def assert_about_equal(first, second, places=4):
return assert_equal(round(first, places), second)
if __name__ == '__main__':
unittest.main()
| 35.434783
| 89
| 0.685481
| 325
| 2,445
| 4.92
| 0.249231
| 0.110069
| 0.073171
| 0.095059
| 0.461538
| 0.399625
| 0.333959
| 0.212633
| 0.212633
| 0.1601
| 0
| 0.037461
| 0.213906
| 2,445
| 68
| 90
| 35.955882
| 0.794485
| 0.047035
| 0
| 0.230769
| 0
| 0
| 0.070937
| 0
| 0
| 0
| 0
| 0
| 0.384615
| 1
| 0.153846
| false
| 0
| 0.096154
| 0.019231
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
feb7b66503cd218d51059640f9914912cefb66a6
| 14,533
|
py
|
Python
|
tests/scripts/thread-cert/test_network_layer.py
|
AdityaHPatwardhan/openthread
|
a201e9d5d0273bb51fa20efc8758be20a725018e
|
[
"BSD-3-Clause"
] | 2,962
|
2016-05-11T15:06:06.000Z
|
2022-03-27T20:06:16.000Z
|
tests/scripts/thread-cert/test_network_layer.py
|
AdityaHPatwardhan/openthread
|
a201e9d5d0273bb51fa20efc8758be20a725018e
|
[
"BSD-3-Clause"
] | 5,899
|
2016-05-11T19:21:49.000Z
|
2022-03-31T18:17:20.000Z
|
tests/scripts/thread-cert/test_network_layer.py
|
AdityaHPatwardhan/openthread
|
a201e9d5d0273bb51fa20efc8758be20a725018e
|
[
"BSD-3-Clause"
] | 1,113
|
2016-05-11T15:37:42.000Z
|
2022-03-31T09:37:04.000Z
|
#!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import io
import random
import struct
import unittest
import common
import network_layer
def any_eid():
return bytearray([random.getrandbits(8) for _ in range(16)])
def any_mac_extended_address():
return bytearray([random.getrandbits(8) for _ in range(8)])
def any_rloc16():
return random.getrandbits(16)
def any_ml_eid():
return bytearray([random.getrandbits(8) for _ in range(8)])
def any_status():
return random.getrandbits(1)
def any_seconds():
return random.getrandbits(32)
def any_id_sequence():
return random.getrandbits(8)
def any_router_id_mask():
return random.getrandbits(64)
def any_options(count=None):
count = count if count is not None else random.randint(0, 255)
return [random.getrandbits(8) for _ in range(count)]
def any_tlv_data(length=None):
_type = random.getrandbits(8)
length = length if length is not None else random.getrandbits(8)
value = bytearray([random.getrandbits(8) for _ in range(length)])
return bytearray([_type, length]) + value
def any_tlvs_data(count=None):
count = count if count is not None else random.randint(0, 16)
data = bytearray()
for _ in range(count):
data += any_tlv_data(random.randint(1, 15))
return data
class TestTargetEid(unittest.TestCase):
def test_should_return_eid_value_when_eid_property_is_called(self):
# GIVEN
eid = any_eid()
target_eid = network_layer.TargetEid(eid)
# WHEN
actual_eid = target_eid.eid
# THEN
self.assertEqual(eid, actual_eid)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
eid = any_eid()
target_eid = network_layer.TargetEid(eid)
# THEN
self.assertEqual(target_eid, network_layer.TargetEid(eid))
class TestTargetEidFactory(unittest.TestCase):
def test_should_create_TargetEid_from_bytearray_when_parse_method_is_called(self):
# GIVEN
eid = any_eid()
factory = network_layer.TargetEidFactory()
# WHEN
target_eid = factory.parse(io.BytesIO(eid), common.MessageInfo())
# THEN
self.assertTrue(isinstance(target_eid, network_layer.TargetEid))
self.assertEqual(eid, target_eid.eid)
class TestMacExtendedAddress(unittest.TestCase):
def test_should_return_mac_address_value_when_mac_address_property_is_called(self):
# GIVEN
mac_address = any_mac_extended_address()
mac_extended_address = network_layer.MacExtendedAddress(mac_address)
# WHEN
actual_mac_address = mac_extended_address.mac_address
# THEN
self.assertEqual(mac_address, actual_mac_address)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
mac_address = any_mac_extended_address()
mac_extended_address = network_layer.MacExtendedAddress(mac_address)
# THEN
self.assertEqual(mac_extended_address, network_layer.MacExtendedAddress(mac_address))
class TestMacExtendedAddressFactory(unittest.TestCase):
def test_should_create_MacExtendedAddress_from_bytearray_when_parse_method_is_called(self):
# GIVEN
mac_address = any_mac_extended_address()
factory = network_layer.MacExtendedAddressFactory()
# WHEN
mac_extended_address = factory.parse(io.BytesIO(mac_address), common.MessageInfo())
# THEN
self.assertTrue(isinstance(mac_extended_address, network_layer.MacExtendedAddress))
self.assertEqual(mac_address, mac_extended_address.mac_address)
class TestRloc16(unittest.TestCase):
def test_should_return_rloc16_value_when_rloc16_property_is_called(self):
# GIVEN
rloc16 = any_rloc16()
rloc16_obj = network_layer.Rloc16(rloc16)
# WHEN
actual_rloc16 = rloc16_obj.rloc16
# THEN
self.assertEqual(rloc16, actual_rloc16)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
rloc16 = any_rloc16()
rloc16_obj = network_layer.Rloc16(rloc16)
# THEN
self.assertEqual(rloc16_obj, network_layer.Rloc16(rloc16))
class TestRloc16Factory(unittest.TestCase):
def test_should_create_Rloc16_from_bytearray_when_parse_method_is_called(self):
# GIVEN
rloc16 = any_rloc16()
factory = network_layer.Rloc16Factory()
data = bytearray(struct.pack(">H", rloc16))
# WHEN
rloc16_obj = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(rloc16_obj, network_layer.Rloc16))
self.assertEqual(rloc16, rloc16_obj.rloc16)
class TestMlEid(unittest.TestCase):
def test_should_return_ml_eid_value_when_ml_eid_property_is_called(self):
# GIVEN
ml_eid = any_ml_eid()
ml_eid_obj = network_layer.MlEid(ml_eid)
# WHEN
actual_ml_eid = ml_eid_obj.ml_eid
# THEN
self.assertEqual(ml_eid, actual_ml_eid)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
ml_eid = any_ml_eid()
ml_eid_obj = network_layer.MlEid(ml_eid)
# THEN
self.assertEqual(ml_eid_obj, network_layer.MlEid(ml_eid))
class TestMlEidFactory(unittest.TestCase):
def test_should_create_MlEid_from_bytearray_when_parse_method_is_called(self):
# GIVEN
ml_eid = any_ml_eid()
factory = network_layer.MlEidFactory()
# WHEN
ml_eid_obj = factory.parse(io.BytesIO(ml_eid), common.MessageInfo())
# THEN
self.assertTrue(isinstance(ml_eid_obj, network_layer.MlEid))
self.assertEqual(ml_eid, ml_eid_obj.ml_eid)
class TestStatus(unittest.TestCase):
def test_should_return_status_value_when_status_property_is_called(self):
# GIVEN
status = any_status()
status_obj = network_layer.Status(status)
# WHEN
actual_status = status_obj.status
# THEN
self.assertEqual(status, actual_status)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
status = any_status()
status_obj = network_layer.Status(status)
# THEN
self.assertEqual(status_obj, network_layer.Status(status))
class TestStatusFactory(unittest.TestCase):
def test_should_create_Status_from_bytearray_when_parse_method_is_called(self):
# GIVEN
status = any_status()
factory = network_layer.StatusFactory()
data = bytearray([status])
# WHEN
status_obj = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(status_obj, network_layer.Status))
self.assertEqual(status, status_obj.status)
class TestTimeSinceLastTransaction(unittest.TestCase):
def test_should_return_seconds_value_when_seconds_property_is_called(self):
# GIVEN
seconds = any_seconds()
time_since_last_transaction = network_layer.TimeSinceLastTransaction(seconds)
# WHEN
actual_seconds = time_since_last_transaction.seconds
# THEN
self.assertEqual(seconds, actual_seconds)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
seconds = any_seconds()
time_since_last_transaction = network_layer.TimeSinceLastTransaction(seconds)
# THEN
self.assertEqual(
time_since_last_transaction,
network_layer.TimeSinceLastTransaction(seconds),
)
class TestTimeSinceLastTransactionFactory(unittest.TestCase):
def test_should_create_TimeSinceLastTransaction_from_bytearray_when_parse_method_is_called(self):
# GIVEN
seconds = any_seconds()
factory = network_layer.TimeSinceLastTransactionFactory()
data = bytearray(struct.pack(">L", seconds))
# WHEN
time_since_last_transaction = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(
time_since_last_transaction,
network_layer.TimeSinceLastTransaction,
))
self.assertEqual(seconds, time_since_last_transaction.seconds)
class TestRouterMask(unittest.TestCase):
def test_should_return_id_sequence_value_when_id_sequence_property_is_called(self):
# GIVEN
id_sequence = any_id_sequence()
router_mask = network_layer.RouterMask(id_sequence, any_router_id_mask())
# WHEN
actual_id_sequence = router_mask.id_sequence
# THEN
self.assertEqual(id_sequence, actual_id_sequence)
def test_should_return_router_id_mask_value_when_router_id_mask_property_is_called(self):
# GIVEN
router_id_mask = any_router_id_mask()
router_mask = network_layer.RouterMask(any_id_sequence(), router_id_mask)
# WHEN
actual_router_id_mask = router_mask.router_id_mask
# THEN
self.assertEqual(router_id_mask, actual_router_id_mask)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
id_sequence = any_id_sequence()
router_id_mask = any_router_id_mask()
router_mask = network_layer.RouterMask(id_sequence, router_id_mask)
# THEN
self.assertEqual(router_mask, network_layer.RouterMask(id_sequence, router_id_mask))
class TestRouterMaskFactory(unittest.TestCase):
def test_should_create_RouterMask_from_bytearray_when_parse_method_is_called(self):
# GIVEN
id_sequence = any_id_sequence()
router_id_mask = any_router_id_mask()
factory = network_layer.RouterMaskFactory()
data = bytearray([id_sequence]) + struct.pack(">Q", router_id_mask)
# WHEN
router_mask = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(router_mask, network_layer.RouterMask))
self.assertEqual(id_sequence, router_mask.id_sequence)
self.assertEqual(router_id_mask, router_mask.router_id_mask)
class TestNdOption(unittest.TestCase):
def test_should_return_options_value_when_options_property_is_called(self):
# GIVEN
options = any_options()
nd_option = network_layer.NdOption(options)
# WHEN
actual_options = nd_option.options
# THEN
self.assertEqual(options, actual_options)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
options = any_options()
nd_option = network_layer.NdOption(options)
# THEN
self.assertEqual(nd_option, network_layer.NdOption(options))
class TestNdOptionFactory(unittest.TestCase):
def test_should_create_NdOption_from_bytearray_when_parse_method_is_called(self):
# GIVEN
options = any_options()
factory = network_layer.NdOptionFactory()
data = bytearray(options)
# WHEN
nd_option = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(nd_option, network_layer.NdOption))
self.assertEqual(options, nd_option.options)
class TestThreadNetworkData(unittest.TestCase):
def test_should_return_options_value_when_options_property_is_called(self):
# GIVEN
tlvs = any_tlvs_data()
thread_network_data = network_layer.ThreadNetworkData(tlvs)
# WHEN
actual_tlvs = thread_network_data.tlvs
# THEN
self.assertEqual(tlvs, actual_tlvs)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
tlvs = any_tlvs_data()
thread_network_data = network_layer.ThreadNetworkData(tlvs)
# THEN
self.assertEqual(thread_network_data, network_layer.ThreadNetworkData(tlvs))
class TestThreadNetworkDataFactory(unittest.TestCase):
def test_should_create_ThreadNetworkData_from_bytearray_when_parse_method_is_called(self):
# GIVEN
tlvs = any_tlvs_data()
class DummyNetworkDataTlvsFactory:
def parse(self, data, message_info):
return bytearray(data.read())
factory = network_layer.ThreadNetworkDataFactory(DummyNetworkDataTlvsFactory())
# WHEN
thread_network_data = factory.parse(io.BytesIO(tlvs), common.MessageInfo())
# THEN
self.assertTrue(isinstance(thread_network_data, network_layer.ThreadNetworkData))
self.assertEqual(tlvs, thread_network_data.tlvs)
if __name__ == "__main__":
unittest.main()
| 29.538618
| 103
| 0.718021
| 1,777
| 14,533
| 5.500281
| 0.141249
| 0.057704
| 0.037242
| 0.036935
| 0.606507
| 0.555044
| 0.436873
| 0.377225
| 0.352875
| 0.307755
| 0
| 0.009144
| 0.209867
| 14,533
| 491
| 104
| 29.598778
| 0.842027
| 0.134246
| 0
| 0.286385
| 0
| 0
| 0.001122
| 0
| 0
| 0
| 0
| 0
| 0.178404
| 1
| 0.187793
| false
| 0
| 0.028169
| 0.042254
| 0.361502
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
feb8045cb4a0a0c1c1b374f1a7ddff3513dfcc95
| 7,079
|
py
|
Python
|
salt/modules/kernelpkg_linux_apt.py
|
markgras/salt
|
d66cd3c935533c63870b83228b978ce43e0ef70d
|
[
"Apache-2.0"
] | 9,425
|
2015-01-01T05:59:24.000Z
|
2022-03-31T20:44:05.000Z
|
salt/modules/kernelpkg_linux_apt.py
|
markgras/salt
|
d66cd3c935533c63870b83228b978ce43e0ef70d
|
[
"Apache-2.0"
] | 33,507
|
2015-01-01T00:19:56.000Z
|
2022-03-31T23:48:20.000Z
|
salt/modules/kernelpkg_linux_apt.py
|
markgras/salt
|
d66cd3c935533c63870b83228b978ce43e0ef70d
|
[
"Apache-2.0"
] | 5,810
|
2015-01-01T19:11:45.000Z
|
2022-03-31T02:37:20.000Z
|
"""
Manage Linux kernel packages on APT-based systems
"""
import functools
import logging
import re
try:
from salt.utils.versions import LooseVersion as _LooseVersion
from salt.exceptions import CommandExecutionError
HAS_REQUIRED_LIBS = True
except ImportError:
HAS_REQUIRED_LIBS = False
log = logging.getLogger(__name__)
__virtualname__ = "kernelpkg"
def __virtual__():
"""
Load this module on Debian-based systems only
"""
if not HAS_REQUIRED_LIBS:
return (False, "Required library could not be imported")
if __grains__.get("os_family", "") in ("Kali", "Debian"):
return __virtualname__
elif __grains__.get("os_family", "") == "Cumulus":
return __virtualname__
return (False, "Module kernelpkg_linux_apt: no APT based system detected")
def active():
"""
Return the version of the running kernel.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.active
"""
if "pkg.normalize_name" in __salt__:
return __salt__["pkg.normalize_name"](__grains__["kernelrelease"])
return __grains__["kernelrelease"]
def list_installed():
"""
Return a list of all installed kernels.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.list_installed
"""
pkg_re = re.compile(r"^{}-[\d.-]+-{}$".format(_package_prefix(), _kernel_type()))
pkgs = __salt__["pkg.list_pkgs"](versions_as_list=True)
if pkgs is None:
pkgs = []
result = list(filter(pkg_re.match, pkgs))
if result is None:
return []
prefix_len = len(_package_prefix()) + 1
return sorted(
[pkg[prefix_len:] for pkg in result], key=functools.cmp_to_key(_cmp_version)
)
def latest_available():
"""
Return the version of the latest kernel from the package repositories.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.latest_available
"""
result = __salt__["pkg.latest_version"](
"{}-{}".format(_package_prefix(), _kernel_type())
)
if result == "":
return latest_installed()
version = re.match(r"^(\d+\.\d+\.\d+)\.(\d+)", result)
return "{}-{}-{}".format(version.group(1), version.group(2), _kernel_type())
def latest_installed():
"""
Return the version of the latest installed kernel.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.latest_installed
.. note::
This function may not return the same value as
:py:func:`~salt.modules.kernelpkg_linux_apt.active` if a new kernel
has been installed and the system has not yet been rebooted.
The :py:func:`~salt.modules.kernelpkg_linux_apt.needs_reboot` function
exists to detect this condition.
"""
pkgs = list_installed()
if pkgs:
return pkgs[-1]
return None
def needs_reboot():
"""
Detect if a new kernel version has been installed but is not running.
Returns True if a new kernel is installed, False otherwise.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.needs_reboot
"""
return _LooseVersion(active()) < _LooseVersion(latest_installed())
def upgrade(reboot=False, at_time=None):
"""
Upgrade the kernel and optionally reboot the system.
reboot : False
Request a reboot if a new kernel is available.
at_time : immediate
Schedule the reboot at some point in the future. This argument
is ignored if ``reboot=False``. See
:py:func:`~salt.modules.system.reboot` for more details
on this argument.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.upgrade
salt '*' kernelpkg.upgrade reboot=True at_time=1
.. note::
An immediate reboot often shuts down the system before the minion has a
chance to return, resulting in errors. A minimal delay (1 minute) is
useful to ensure the result is delivered to the master.
"""
result = __salt__["pkg.install"](
name="{}-{}".format(_package_prefix(), latest_available())
)
_needs_reboot = needs_reboot()
ret = {
"upgrades": result,
"active": active(),
"latest_installed": latest_installed(),
"reboot_requested": reboot,
"reboot_required": _needs_reboot,
}
if reboot and _needs_reboot:
log.warning("Rebooting system due to kernel upgrade")
__salt__["system.reboot"](at_time=at_time)
return ret
def upgrade_available():
"""
Detect if a new kernel version is available in the repositories.
Returns True if a new kernel is available, False otherwise.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.upgrade_available
"""
return _LooseVersion(latest_available()) > _LooseVersion(latest_installed())
def remove(release):
"""
Remove a specific version of the kernel.
release
The release number of an installed kernel. This must be the entire release
number as returned by :py:func:`~salt.modules.kernelpkg_linux_apt.list_installed`,
not the package name.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.remove 4.4.0-70-generic
"""
if release not in list_installed():
raise CommandExecutionError(
"Kernel release '{}' is not installed".format(release)
)
if release == active():
raise CommandExecutionError("Active kernel cannot be removed")
target = "{}-{}".format(_package_prefix(), release)
log.info("Removing kernel package %s", target)
__salt__["pkg.purge"](target)
return {"removed": [target]}
def cleanup(keep_latest=True):
"""
Remove all unused kernel packages from the system.
keep_latest : True
In the event that the active kernel is not the latest one installed, setting this to True
will retain the latest kernel package, in addition to the active one. If False, all kernel
packages other than the active one will be removed.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.cleanup
"""
removed = []
# Loop over all installed kernel packages
for kernel in list_installed():
# Keep the active kernel package
if kernel == active():
continue
# Optionally keep the latest kernel package
if keep_latest and kernel == latest_installed():
continue
# Remove the kernel package
removed.extend(remove(kernel)["removed"])
return {"removed": removed}
def _package_prefix():
"""
Return static string for the package prefix
"""
return "linux-image"
def _kernel_type():
"""
Parse the kernel name and return its type
"""
return re.match(r"^[\d.-]+-(.+)$", active()).group(1)
def _cmp_version(item1, item2):
"""
Compare function for package version sorting
"""
vers1 = _LooseVersion(item1)
vers2 = _LooseVersion(item2)
if vers1 < vers2:
return -1
if vers1 > vers2:
return 1
return 0
| 24.49481
| 98
| 0.638932
| 858
| 7,079
| 5.081585
| 0.241259
| 0.029817
| 0.028899
| 0.03922
| 0.182339
| 0.155505
| 0.124312
| 0.064679
| 0.022936
| 0
| 0
| 0.004733
| 0.253849
| 7,079
| 288
| 99
| 24.579861
| 0.820712
| 0.418703
| 0
| 0.04
| 0
| 0
| 0.152616
| 0.006235
| 0
| 0
| 0
| 0
| 0
| 1
| 0.13
| false
| 0
| 0.07
| 0
| 0.42
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
feb9338f0d564ca62f3ee051a6a33301b2ea1017
| 1,818
|
py
|
Python
|
main.py
|
david-slatinek/running-a-program-on-the-CPU-vs.-on-the-GPU
|
971b911efee8f52c5950ba777b79e58a4f840024
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
david-slatinek/running-a-program-on-the-CPU-vs.-on-the-GPU
|
971b911efee8f52c5950ba777b79e58a4f840024
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
david-slatinek/running-a-program-on-the-CPU-vs.-on-the-GPU
|
971b911efee8f52c5950ba777b79e58a4f840024
|
[
"Apache-2.0"
] | null | null | null |
import json
import numpy as np
from numba import jit
from timeit import default_timer as timer
# Constant, used in the formula.
# Defined here to speed up the calculation, i.e. it's calculated only once
# and then placed in the formula.
SQRT_2PI = np.float32(np.sqrt(2 * np.pi))
# This function will run on the CPU.
def gaussian_cpu(values, mean, sigma):
"""Calculate values of the Gaussian function.
:param values: list, function input parameters.
:param mean: float, arithmetic mean.
:param sigma: float, standard deviation.
:return: list.
"""
result = np.zeros_like(values)
for index, item in enumerate(values):
result[index] = (1 / (sigma * SQRT_2PI)) * (np.e ** (-0.5 * ((item - mean) / sigma) ** 2))
return result
# This function will run on the GPU.
gaussian_gpu = jit(gaussian_cpu)
def write_to_file(name, values):
"""Write results to a file.
:param name: string, file name, only prefix.
:param values: dictionary, values to write.
"""
with open(name + ".json", 'w') as f:
json.dump(values, f, indent=4)
if __name__ == "__main__":
# Randomly generated values.
x = np.random.uniform(-3, 3, size=1000000).astype(np.float32)
# Randomly generated mean.
m = np.random.uniform(1, 10)
# Randomly generated standard deviation.
s = np.random.uniform(1, 10)
# The number of rounds.
n = 1
# Used to store execution time.
time_results = {}
for i in range(n):
start = timer()
gaussian_cpu(x, m, s)
end = timer() - start
time_results[i] = end
write_to_file("cpu", time_results)
for i in range(n):
start = timer()
gaussian_gpu(x, m, s)
end = timer() - start
time_results[i] = end
write_to_file("gpu", time_results)
| 25.605634
| 98
| 0.633663
| 263
| 1,818
| 4.273764
| 0.406844
| 0.048932
| 0.029359
| 0.033808
| 0.22242
| 0.190391
| 0.147687
| 0.147687
| 0.147687
| 0.147687
| 0
| 0.020513
| 0.249175
| 1,818
| 70
| 99
| 25.971429
| 0.80293
| 0.356986
| 0
| 0.25
| 0
| 0
| 0.017953
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.125
| 0
| 0.21875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
227dbc607b392dad80b7a078ce5ee4e6eb5704f6
| 5,605
|
py
|
Python
|
utility_functions.py
|
Team-501-The-PowerKnights/Powerknights-Slack-Bot
|
1ce25c954aa0c089aa93a3d63bd475d585d39bb6
|
[
"Apache-2.0"
] | 1
|
2019-05-03T13:20:09.000Z
|
2019-05-03T13:20:09.000Z
|
utility_functions.py
|
Team-501-The-PowerKnights/Powerknights-Slack-Bot
|
1ce25c954aa0c089aa93a3d63bd475d585d39bb6
|
[
"Apache-2.0"
] | 8
|
2019-05-04T17:06:21.000Z
|
2020-05-29T12:37:06.000Z
|
utility_functions.py
|
Team-501-The-PowerKnights/Powerknights-Slack-Bot
|
1ce25c954aa0c089aa93a3d63bd475d585d39bb6
|
[
"Apache-2.0"
] | null | null | null |
import datetime
def iso_extract_info(string):
"""
Will get all of the info and return it as an array
:param string: ISO formatted string that will be used for extraction
:return: array [year, month, day, military_time_hour, minutes, hours]
:note: every item is an int except for minutes
:note: hours only is there is military_time_hour is greater than 12
"""
elements = []
characters = list(string)
year_int = int("".join(characters[0:4]))
month_int = int("".join(characters[5:7]))
day_int = int("".join(characters[8:10]))
military_time_hours_int = int("".join(characters[11:13]))
minutes_int = "".join(characters[14:16])
hours = 0
elements.append(year_int)
elements.append(month_int)
elements.append(day_int)
elements.append(minutes_int)
if military_time_hours_int > 12:
hours += military_time_hours_int - 12
elements.append(hours)
return elements
# # Testing:
# print("[year, month, day, military_time_hour, minutes, hours]")
# print(iso_extract_info('2019-04-27T16:00:00-04:00'))
# Doesn't use the "iso_extract_info" function
def iso_format_to_regular(string):
"""
Will take a string that is an iso formatted string and make it look readable
:param string: the iso formatted string
:return: str
"""
characters = list(string)
year_int = int("".join(characters[0:4]))
month_int = int("".join(characters[5:7]))
day_int = int("".join(characters[8:10]))
military_time_hours_int = int("".join(characters[11:13]))
minutes_int = "".join(characters[14:16])
if military_time_hours_int > 12:
hours = military_time_hours_int - 12
final_string = "{month}/{day}/{year} {hour}:{minute}PM".format(
month=month_int, day=day_int, year=year_int, hour=hours, minute=minutes_int)
return final_string
else:
final_string = "{month}/{day}/{year} {hour}:{minute}AM".format(
month=month_int, day=day_int, year=year_int, hour=military_time_hours_int, minute=minutes_int)
return final_string
# Testing:
# print(iso_format_to_regular('2019-04-27T16:00:00-04:00'))
# Doesn't use the "iso_extract_info" function
def fix_time(strange_date):
"""
Will rearrange the strange date that Google gives and repalce it with the normal string.
:param strange_date: strange time that google gives when an event is marked as "all day"
:return: str
"""
items = strange_date.split("-")
year_int = int(items[0])
month_int = int(items[1])
day_int = int(items[2])
new_str = "{month}/{day}/{year}".format(
month=month_int, day=day_int, year=year_int)
return new_str
# Doesn't use the "iso_extract_info" function
def multiday_checker_STRANGE(start_date, end_date):
"""
Will check if an event is more than day long
:param start_date: Strange Google formatted date of the start of the event
:param end_date: Strange Google formatted date of the end of the event
:return: Boolean
"""
start_date_items = start_date.split("-")
end_date_items = end_date.split("-")
start_date_sum = 0
end_date_sum = 0
for string in start_date_items:
number = int(string)
start_date_sum += number
for string in end_date_items:
number = int(string)
end_date_sum += number
date_dif = start_date_sum - end_date_sum
if date_dif > 2:
return True
else:
return False
# Testing:
# print(multiday_checker_STRANGE('2019-04-21', '2019-04-22'))
# Doesn't use the "iso_extract_info" function
def STRANGE_string_weekday(string):
"""
Will take a string that is a date formatted in the Google format and find what day of the week it is
:param string: Google formatted string for the date
:return: string
"""
items = string.split("/")
year_int = int(items[2])
month_int = int(items[0])
day_int = int(items[1])
datetime_instance = datetime.date(year_int, month_int, day_int)
week_day_number = datetime_instance.weekday()
if week_day_number == 0:
return "Monday"
elif week_day_number == 1:
return "Tuesday"
elif week_day_number == 2:
return "Wendsday"
elif week_day_number == 3:
return "Thursday"
elif week_day_number == 4:
return "Friday"
elif week_day_number == 5:
return "Saturday"
elif week_day_number == 6:
return "Sunday"
else:
return "Error"
# Testing:
# print(STRANGE_string_weekday("2019-04-27"))
# Doesn't use the "iso_extract_info" function
def ISO_string_weekday(string):
"""
Will take a string that is a date formatted in the ISO format and find what day of the week it is
:param string: ISO formatted string for the date
:return: string
"""
characters = list(string)
year_int = int("".join(characters[0:4]))
month_int = int("".join(characters[5:7]))
day_int = int("".join(characters[8:10]))
datetime_instance = datetime.date(year_int, month_int, day_int)
week_day_number = datetime_instance.weekday()
if week_day_number == 0:
return "Monday"
elif week_day_number == 1:
return "Tuesday"
elif week_day_number == 2:
return "Wendsday"
elif week_day_number == 3:
return "Thursday"
elif week_day_number == 4:
return "Friday"
elif week_day_number == 5:
return "Saturday"
elif week_day_number == 6:
return "Sunday"
else:
return "Error"
# Testing:
# print(ISO_string_weekday('2019-06-28T16:00:00-04:00'))
| 31.846591
| 106
| 0.662979
| 814
| 5,605
| 4.355037
| 0.167076
| 0.028773
| 0.058674
| 0.057546
| 0.635825
| 0.601975
| 0.583357
| 0.516502
| 0.493935
| 0.473061
| 0
| 0.034324
| 0.225513
| 5,605
| 175
| 107
| 32.028571
| 0.782308
| 0.316503
| 0
| 0.576923
| 0
| 0
| 0.056769
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057692
| false
| 0
| 0.009615
| 0
| 0.278846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
228079c406da2849bf07a999b9fbe4042daf4300
| 1,424
|
py
|
Python
|
python/ch_06_Animatronic_Head.py
|
tallamjr/mbms
|
6763faa870d1a16f272b3eade70b433ed3df0e51
|
[
"MIT"
] | 18
|
2018-06-07T07:11:59.000Z
|
2022-02-28T20:08:23.000Z
|
python/ch_06_Animatronic_Head.py
|
tallamjr/mbms
|
6763faa870d1a16f272b3eade70b433ed3df0e51
|
[
"MIT"
] | 1
|
2020-05-20T16:24:24.000Z
|
2020-05-21T09:03:24.000Z
|
python/ch_06_Animatronic_Head.py
|
tallamjr/mbms
|
6763faa870d1a16f272b3eade70b433ed3df0e51
|
[
"MIT"
] | 8
|
2019-04-10T16:04:11.000Z
|
2022-01-08T20:39:15.000Z
|
from microbit import *
import random, speech, radio
eye_angles = [50, 140, 60, 90, 140]
radio.off()
sentences = [
"Hello my name is Mike",
"What is your name",
"I am looking at you",
"Exterminate exterminate exterminate",
"Number Five is alive",
"I cant do that Dave",
"daisee daisee give me your answer do"
]
lips0 = Image("00000:"
"00000:"
"99999:"
"00000:"
"00000")
lips1 = Image("00000:"
"00900:"
"99099:"
"00900:"
"00000")
lips2 = Image("00000:"
"09990:"
"99099:"
"09990:"
"00000")
lips = [lips0, lips1, lips2]
def set_servo_angle(pin, angle):
duty = 26 + (angle * 51) / 90
pin.write_analog(duty)
def speak(sentence):
words = sentence.split()
for i in range(0, len(words)):
display.show(random.choice(lips))
speech.say(words[i])
display.show(lips0)
def act():
set_servo_angle(pin2, random.choice(eye_angles))
sleep(300)
speak(random.choice(sentences))
set_servo_angle(pin2, 90)
base_z = 0
while True:
new_z = abs(accelerometer.get_z())
if abs(new_z - base_z) > 20:
base_z = new_z
act()
if random.randint(0, 1000) == 0: # say something 1 time in 1000
act()
sleep(200)
| 21.575758
| 67
| 0.525281
| 174
| 1,424
| 4.206897
| 0.522989
| 0.040984
| 0.053279
| 0.046448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134844
| 0.349017
| 1,424
| 66
| 68
| 21.575758
| 0.6548
| 0.019663
| 0
| 0.25
| 0
| 0
| 0.182079
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057692
| false
| 0
| 0.038462
| 0
| 0.096154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2283023fbf32c038ed31074c2a312a5a7aa70d38
| 5,248
|
py
|
Python
|
src/hammer-vlsi/technology/sky130/sram_compiler/__init__.py
|
httpsgithu/hammer
|
6099f4169a49f71cee2e24bb1052f273039505cd
|
[
"BSD-3-Clause"
] | 138
|
2017-08-15T18:56:55.000Z
|
2022-03-29T05:23:37.000Z
|
src/hammer-vlsi/technology/sky130/sram_compiler/__init__.py
|
httpsgithu/hammer
|
6099f4169a49f71cee2e24bb1052f273039505cd
|
[
"BSD-3-Clause"
] | 444
|
2017-09-11T01:15:37.000Z
|
2022-03-31T17:30:33.000Z
|
src/hammer-vlsi/technology/sky130/sram_compiler/__init__.py
|
httpsgithu/hammer
|
6099f4169a49f71cee2e24bb1052f273039505cd
|
[
"BSD-3-Clause"
] | 33
|
2017-10-30T14:23:53.000Z
|
2022-03-25T01:36:13.000Z
|
import os, tempfile, subprocess
from hammer_vlsi import MMMCCorner, MMMCCornerType, HammerTool, HammerToolStep, HammerSRAMGeneratorTool, SRAMParameters
from hammer_vlsi.units import VoltageValue, TemperatureValue
from hammer_tech import Library, ExtraLibrary
from typing import NamedTuple, Dict, Any, List
from abc import ABCMeta, abstractmethod
class SKY130SRAMGenerator(HammerSRAMGeneratorTool):
def tool_config_prefix(self) -> str:
return "sram_generator.sky130"
def version_number(self, version: str) -> int:
return 0
# Run generator for a single sram and corner
def generate_sram(self, params: SRAMParameters, corner: MMMCCorner) -> ExtraLibrary:
tech_cache_dir = os.path.abspath(self.technology.cache_dir)
#TODO: this is really an abuse of the corner stuff
if corner.type == MMMCCornerType.Setup:
speed_name = "slow"
speed = "SS"
elif corner.type == MMMCCornerType.Hold:
speed_name = "fast"
speed = "FF"
elif corner.type == MMMCCornerType.Extra:
speed_name = "typical"
speed = "TT"
# Different target memories based on port count
# if params.family == "1rw":
# self.logger.info("Compiling 1rw memories to DFFRAM instances")
# base_dir = self.get_setting("technology.sky130.dffram_lib")
# fam_code = params.family
# sram_name = "RAM{d}x{w}".format(
# d=params.depth,
# w=params.width)
# #TODO: need real libs (perhaps run Liberate here?)
# #For now, use the dummy lib for all corners
# corner_str = "" #
# lib_path = "{b}/{n}.lib".format(
# b=base_dir,
# n=sram_name)
# if not os.path.exists(lib_path):
# self.logger.error("SKY130 1rw1r SRAM cache does not support corner: {c}".format(c=corner_str))
# return ExtraLibrary(prefix=None, library=Library(
# name=sram_name,
# nldm_liberty_file=lib_path,
# lef_file="{b}/{n}/{n}.lef".format(b=base_dir,n=sram_name),
# #TODO: GDS not generated. Unclear which DEF to use?
# #gds_file="{b}/{n}/{n}.gds".format(b=base_dir,n=sram_name),
# spice_file="{b}/{n}/{n}.spice".format(b=base_dir,n=sram_name),
# #TODO: Will not work as-is for behav. sim (this is a structural netlist referencing std. cells)
# #Need to add std cell behavioral Verilog to sim.inputs.input_files
# verilog_sim="{b}/{n}/{n}.nl.v".format(b=base_dir,n=sram_name),
# corner={'nmos': speed_name, 'pmos': speed_name, 'temperature': str(corner.temp.value_in_units("C")) + " C"},
# supplies={'VDD': str(corner.voltage.value_in_units("V")) + " V", 'GND': "0 V"},
# provides=[{'lib_type': "sram", 'vt': params.vt}]))
# elif params.family == "1rw1r":
if params.family == "1rw":
self.logger.info("Compiling 1rw1r memories to OpenRAM instances")
base_dir = self.get_setting("technology.sky130.openram_lib")
fam_code = params.family
s=round(round(params.width*params.depth/8, -3)/1000) # size in kiB
w=params.width
d=params.depth
m=8
sram_name = f"sky130_sram_{s}kbyte_1rw1r_{w}x{d}_{m}"
print(f"SRAM_NAME: {sram_name}")
#TODO: Hammer SRAMParameters doesn't have this info
#TODO: replace this if OpenRAM characterization done for other corners
#For now, use typical lib for all corners
corner_str = "TT_1p8V_25C"
#corner_str = "{speed}_{volt}V_{temp}C".format(
# speed = speed,
# volt = str(corner.voltage.value_in_units("V")).replace(".","p"),
# temp = str(int(corner.temp.value_in_units("C"))).replace(".","p"))
lib_path = "{b}/{n}/{n}_{c}.lib".format(
b=base_dir,
n=sram_name,
c=corner_str)
if not os.path.exists(lib_path):
self.logger.error("SKY130 1rw1r SRAM cache does not support corner: {c}".format(c=corner_str))
return ExtraLibrary(prefix=None, library=Library(
name=sram_name,
nldm_liberty_file=lib_path,
lef_file="{b}/{n}/{n}.lef".format(b=base_dir,n=sram_name),
gds_file="{b}/{n}/{n}.gds".format(b=base_dir,n=sram_name),
spice_file="{b}/{n}/{n}.lvs.sp".format(b=base_dir,n=sram_name),
verilog_sim="{b}/{n}/{n}.v".format(b=base_dir,n=sram_name),
corner={'nmos': speed_name, 'pmos': speed_name, 'temperature': str(corner.temp.value_in_units("C")) + " C"},
supplies={'VDD': str(corner.voltage.value_in_units("V")) + " V", 'GND': "0 V"},
provides=[{'lib_type': "sram", 'vt': params.vt}]))
else:
self.logger.error("SKY130 SRAM cache does not support family:{f}".format(f=params.family))
return ExtraLibrary(prefix=None, library=None)
tool=SKY130SRAMGenerator
| 51.960396
| 126
| 0.582127
| 667
| 5,248
| 4.428786
| 0.290855
| 0.043331
| 0.037238
| 0.047393
| 0.459716
| 0.416385
| 0.391672
| 0.374069
| 0.287745
| 0.287745
| 0
| 0.01435
| 0.282965
| 5,248
| 100
| 127
| 52.48
| 0.770662
| 0.391578
| 0
| 0
| 0
| 0
| 0.131997
| 0.02799
| 0
| 0
| 0
| 0.01
| 0
| 1
| 0.056604
| false
| 0
| 0.113208
| 0.037736
| 0.264151
| 0.018868
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2283626d76b9fe6781848e584e29b4b24ab5e062
| 2,837
|
py
|
Python
|
Section 4/nlp-4-ngrams.py
|
PacktPublishing/Hands-on-NLP-with-NLTK-and-scikit-learn-
|
8bb2095093a822363675368a4216d30d14cac501
|
[
"MIT"
] | 34
|
2018-08-14T09:59:13.000Z
|
2021-11-08T13:12:50.000Z
|
Section 4/nlp-4-ngrams.py
|
anapatgl/Hands-on-NLP-with-NLTK-and-scikit-learn-
|
8bb2095093a822363675368a4216d30d14cac501
|
[
"MIT"
] | 1
|
2018-11-28T19:20:37.000Z
|
2018-11-28T19:20:37.000Z
|
Section 4/nlp-4-ngrams.py
|
anapatgl/Hands-on-NLP-with-NLTK-and-scikit-learn-
|
8bb2095093a822363675368a4216d30d14cac501
|
[
"MIT"
] | 31
|
2018-08-07T07:34:33.000Z
|
2022-03-15T08:50:44.000Z
|
import collections
import nltk
import os
from sklearn import (
datasets, model_selection, feature_extraction, linear_model, naive_bayes,
ensemble
)
def extract_features(corpus):
'''Extract TF-IDF features from corpus'''
sa_stop_words = nltk.corpus.stopwords.words("english")
# words that might invert a sentence's meaning
white_list = [
'what', 'but', 'if', 'because', 'as', 'until', 'against',
'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again',
'further', 'then', 'once', 'here', 'there', 'why', 'how', 'all', 'any',
'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own',
'same', 'so', 'than', 'too', 'can', 'will', 'just', 'don', 'should']
# take these out of the standard NLTK stop word list
sa_stop_words = [sw for sw in sa_stop_words if sw not in white_list]
# vectorize means we turn non-numerical data into an array of numbers
count_vectorizer = feature_extraction.text.CountVectorizer(
lowercase=True, # for demonstration, True by default
tokenizer=nltk.word_tokenize, # use the NLTK tokenizer
min_df=2, # minimum document frequency, i.e. the word must appear more than once.
ngram_range=(1, 2),
stop_words=sa_stop_words
)
processed_corpus = count_vectorizer.fit_transform(corpus)
processed_corpus = feature_extraction.text.TfidfTransformer().fit_transform(
processed_corpus)
return processed_corpus
data_directory = 'movie_reviews'
movie_sentiment_data = datasets.load_files(data_directory, shuffle=True)
print('{} files loaded.'.format(len(movie_sentiment_data.data)))
print('They contain the following classes: {}.'.format(
movie_sentiment_data.target_names))
movie_tfidf = extract_features(movie_sentiment_data.data)
X_train, X_test, y_train, y_test = model_selection.train_test_split(
movie_tfidf, movie_sentiment_data.target, test_size=0.30, random_state=42)
# similar to nltk.NaiveBayesClassifier.train()
clf1 = linear_model.LogisticRegression()
clf1.fit(X_train, y_train)
print('Logistic Regression performance: {}'.format(clf1.score(X_test, y_test)))
clf2 = linear_model.SGDClassifier()
clf2.fit(X_train, y_train)
print('SGDClassifier performance: {}'.format(clf2.score(X_test, y_test)))
clf3 = naive_bayes.MultinomialNB()
clf3.fit(X_train, y_train)
print('MultinomialNB performance: {}'.format(clf3.score(X_test, y_test)))
clf4 = naive_bayes.BernoulliNB()
clf4.fit(X_train, y_train)
print('BernoulliNB performance: {}'.format(clf4.score(X_test, y_test)))
voting_model = ensemble.VotingClassifier(
estimators=[('lr', clf1), ('sgd', clf2), ('mnb', clf3), ('bnb', clf4)],
voting='hard')
voting_model.fit(X_train, y_train)
print('Voting classifier performance: {}'.format(
voting_model.score(X_test, y_test)))
| 36.844156
| 90
| 0.70638
| 382
| 2,837
| 5.028796
| 0.473822
| 0.01874
| 0.01874
| 0.026028
| 0.091098
| 0.052056
| 0
| 0
| 0
| 0
| 0
| 0.009988
| 0.152979
| 2,837
| 76
| 91
| 37.328947
| 0.78943
| 0.131477
| 0
| 0
| 0
| 0
| 0.164831
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018868
| false
| 0
| 0.075472
| 0
| 0.113208
| 0.132075
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2284b104a47dc324bd27f42ce83e41850b152d6c
| 27,170
|
py
|
Python
|
nemo/collections/tts/torch/data.py
|
MalikIdreesHasanKhan/NeMo
|
984fd34921e81659c4594a22ab142311808b3bb7
|
[
"Apache-2.0"
] | 4,145
|
2019-09-13T08:29:43.000Z
|
2022-03-31T18:31:44.000Z
|
nemo/collections/tts/torch/data.py
|
MalikIdreesHasanKhan/NeMo
|
984fd34921e81659c4594a22ab142311808b3bb7
|
[
"Apache-2.0"
] | 2,031
|
2019-09-17T16:51:39.000Z
|
2022-03-31T23:52:41.000Z
|
nemo/collections/tts/torch/data.py
|
MalikIdreesHasanKhan/NeMo
|
984fd34921e81659c4594a22ab142311808b3bb7
|
[
"Apache-2.0"
] | 1,041
|
2019-09-13T10:08:21.000Z
|
2022-03-30T06:37:38.000Z
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pickle
from pathlib import Path
from typing import Callable, Dict, List, Optional, Union
import librosa
import torch
from nemo_text_processing.text_normalization.normalize import Normalizer
from tqdm import tqdm
from nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer
from nemo.collections.tts.torch.helpers import (
BetaBinomialInterpolator,
beta_binomial_prior_distribution,
general_padding,
)
from nemo.collections.tts.torch.tts_data_types import (
DATA_STR2DATA_CLASS,
MAIN_DATA_TYPES,
VALID_SUPPLEMENTARY_DATA_TYPES,
DurationPrior,
Durations,
Energy,
LMTokens,
LogMel,
Pitch,
SpeakerID,
WithLens,
)
from nemo.collections.tts.torch.tts_tokenizers import BaseTokenizer, EnglishCharsTokenizer, EnglishPhonemesTokenizer
from nemo.core.classes import Dataset
from nemo.utils import logging
class TTSDataset(Dataset):
def __init__(
self,
manifest_filepath: str,
sample_rate: int,
text_tokenizer: Union[BaseTokenizer, Callable[[str], List[int]]],
tokens: Optional[List[str]] = None,
text_normalizer: Optional[Union[Normalizer, Callable[[str], str]]] = None,
text_normalizer_call_args: Optional[Dict] = None,
text_tokenizer_pad_id: Optional[int] = None,
sup_data_types: Optional[List[str]] = None,
sup_data_path: Optional[Union[Path, str]] = None,
max_duration: Optional[float] = None,
min_duration: Optional[float] = None,
ignore_file: Optional[str] = None,
trim: bool = False,
n_fft=1024,
win_length=None,
hop_length=None,
window="hann",
n_mels=80,
lowfreq=0,
highfreq=None,
**kwargs,
):
"""Dataset that loads main data types (audio and text) and specified supplementary data types (e.g. log mel, durations, pitch).
Most supplementary data types will be computed on the fly and saved in the supplementary_folder if they did not exist before.
Arguments for supplementary data should be also specified in this class and they will be used from kwargs (see keyword args section).
Args:
manifest_filepath (str, Path, List[str, Path]): Path(s) to the .json manifests containing information on the
dataset. Each line in the .json file should be valid json. Note: the .json file itself is not valid
json. Each line should contain the following:
"audio_filepath": <PATH_TO_WAV>
"mel_filepath": <PATH_TO_LOG_MEL_PT> (Optional)
"duration": <Duration of audio clip in seconds> (Optional)
"text": <THE_TRANSCRIPT> (Optional)
sample_rate (int): The sample rate of the audio. Or the sample rate that we will resample all files to.
text_tokenizer (Optional[Union[BaseTokenizer, Callable[[str], List[int]]]]): BaseTokenizer or callable which represents text tokenizer.
tokens (Optional[List[str]]): Tokens from text_tokenizer. Should be specified if text_tokenizer is not BaseTokenizer.
text_normalizer (Optional[Union[Normalizer, Callable[[str], str]]]): Normalizer or callable which represents text normalizer.
text_normalizer_call_args (Optional[Dict]): Additional arguments for text_normalizer function.
text_tokenizer_pad_id (Optional[int]): Index of padding. Should be specified if text_tokenizer is not BaseTokenizer.
sup_data_types (Optional[List[str]]): List of supplementary data types.
sup_data_path (Optional[Union[Path, str]]): A folder that contains or will contain supplementary data (e.g. pitch).
max_duration (Optional[float]): Max duration of audio clips in seconds. All samples exceeding this will be
pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load
audio to compute duration. Defaults to None which does not prune.
min_duration (Optional[float]): Min duration of audio clips in seconds. All samples lower than this will be
pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load
audio to compute duration. Defaults to None which does not prune.
ignore_file (Optional[str, Path]): The location of a pickle-saved list of audio_ids (the stem of the audio
files) that will be pruned prior to training. Defaults to None which does not prune.
trim (Optional[bool]): Whether to apply librosa.effects.trim to the audio file. Defaults to False.
n_fft (Optional[int]): The number of fft samples. Defaults to 1024
win_length (Optional[int]): The length of the stft windows. Defaults to None which uses n_fft.
hop_length (Optional[int]): The hope length between fft computations. Defaults to None which uses n_fft//4.
window (Optional[str]): One of 'hann', 'hamming', 'blackman','bartlett', 'none'. Which corresponds to the
equivalent torch window function.
n_mels (Optional[int]): The number of mel filters. Defaults to 80.
lowfreq (Optional[int]): The lowfreq input to the mel filter calculation. Defaults to 0.
highfreq (Optional[int]): The highfreq input to the mel filter calculation. Defaults to None.
Keyword Args:
durs_file (Optional[str]): String path to pickled durations location.
durs_type (Optional[str]): Type of durations. Currently supported only "aligned-based".
use_beta_binomial_interpolator (Optional[bool]): Whether to use beta-binomial interpolator. Defaults to False.
pitch_fmin (Optional[float]): The fmin input to librosa.pyin. Defaults to librosa.note_to_hz('C2').
pitch_fmax (Optional[float]): The fmax input to librosa.pyin. Defaults to librosa.note_to_hz('C7').
pitch_avg (Optional[float]): The mean that we use to normalize the pitch.
pitch_std (Optional[float]): The std that we use to normalize the pitch.
pitch_norm (Optional[bool]): Whether to normalize pitch (via pitch_avg and pitch_std) or not.
"""
super().__init__()
self.text_normalizer = text_normalizer
self.text_normalizer_call = (
self.text_normalizer.normalize if isinstance(self.text_normalizer, Normalizer) else self.text_normalizer
)
self.text_normalizer_call_args = text_normalizer_call_args if text_normalizer_call_args is not None else {}
self.text_tokenizer = text_tokenizer
if isinstance(self.text_tokenizer, BaseTokenizer):
self.text_tokenizer_pad_id = text_tokenizer.pad
self.tokens = text_tokenizer.tokens
else:
if text_tokenizer_pad_id is None:
raise ValueError(f"text_tokenizer_pad_id must be specified if text_tokenizer is not BaseTokenizer")
if tokens is None:
raise ValueError(f"tokens must be specified if text_tokenizer is not BaseTokenizer")
self.text_tokenizer_pad_id = text_tokenizer_pad_id
self.tokens = tokens
if isinstance(manifest_filepath, str):
manifest_filepath = [manifest_filepath]
self.manifest_filepath = manifest_filepath
if sup_data_path is not None:
Path(sup_data_path).mkdir(parents=True, exist_ok=True)
self.sup_data_path = sup_data_path
self.sup_data_types = (
[DATA_STR2DATA_CLASS[d_as_str] for d_as_str in sup_data_types] if sup_data_types is not None else []
)
self.sup_data_types_set = set(self.sup_data_types)
self.data = []
audio_files = []
total_duration = 0
for manifest_file in self.manifest_filepath:
with open(Path(manifest_file).expanduser(), 'r') as f:
logging.info(f"Loading dataset from {manifest_file}.")
for line in tqdm(f):
item = json.loads(line)
file_info = {
"audio_filepath": item["audio_filepath"],
"mel_filepath": item["mel_filepath"] if "mel_filepath" in item else None,
"duration": item["duration"] if "duration" in item else None,
"text_tokens": None,
"speaker_id": item["speaker"] if "speaker" in item else None,
}
if "text" in item:
text = item["text"]
if self.text_normalizer is not None:
text = self.text_normalizer_call(text, **self.text_normalizer_call_args)
text_tokens = self.text_tokenizer(text)
file_info["raw_text"] = item["text"]
file_info["text_tokens"] = text_tokens
audio_files.append(file_info)
if file_info["duration"] is None:
logging.info(
"Not all audio files have duration information. Duration logging will be disabled."
)
total_duration = None
if total_duration is not None:
total_duration += item["duration"]
logging.info(f"Loaded dataset with {len(audio_files)} files.")
if total_duration is not None:
logging.info(f"Dataset contains {total_duration / 3600:.2f} hours.")
if ignore_file:
logging.info(f"using {ignore_file} to prune dataset.")
with open(Path(ignore_file).expanduser(), "rb") as f:
wavs_to_ignore = set(pickle.load(f))
pruned_duration = 0 if total_duration is not None else None
pruned_items = 0
for item in audio_files:
audio_path = item['audio_filepath']
audio_id = Path(audio_path).stem
# Prune data according to min/max_duration & the ignore file
if total_duration is not None:
if (min_duration and item["duration"] < min_duration) or (
max_duration and item["duration"] > max_duration
):
pruned_duration += item["duration"]
pruned_items += 1
continue
if ignore_file and (audio_id in wavs_to_ignore):
pruned_items += 1
pruned_duration += item["duration"]
wavs_to_ignore.remove(audio_id)
continue
self.data.append(item)
logging.info(f"Pruned {pruned_items} files. Final dataset contains {len(self.data)} files")
if pruned_duration is not None:
logging.info(
f"Pruned {pruned_duration / 3600:.2f} hours. Final dataset contains "
f"{(total_duration - pruned_duration) / 3600:.2f} hours."
)
self.sample_rate = sample_rate
self.featurizer = WaveformFeaturizer(sample_rate=self.sample_rate)
self.trim = trim
self.n_fft = n_fft
self.n_mels = n_mels
self.lowfreq = lowfreq
self.highfreq = highfreq
self.window = window
self.win_length = win_length or self.n_fft
self.hop_length = hop_length
self.hop_len = self.hop_length or self.n_fft // 4
self.fb = torch.tensor(
librosa.filters.mel(
self.sample_rate, self.n_fft, n_mels=self.n_mels, fmin=self.lowfreq, fmax=self.highfreq
),
dtype=torch.float,
).unsqueeze(0)
window_fn = {
'hann': torch.hann_window,
'hamming': torch.hamming_window,
'blackman': torch.blackman_window,
'bartlett': torch.bartlett_window,
'none': None,
}.get(self.window, None)
self.stft = lambda x: torch.stft(
input=x,
n_fft=self.n_fft,
hop_length=self.hop_len,
win_length=self.win_length,
window=window_fn(self.win_length, periodic=False).to(torch.float) if window_fn else None,
)
for data_type in self.sup_data_types:
if data_type not in VALID_SUPPLEMENTARY_DATA_TYPES:
raise NotImplementedError(f"Current implementation of TTSDataset doesn't support {data_type} type.")
getattr(self, f"add_{data_type.name}")(**kwargs)
def add_log_mel(self, **kwargs):
pass
def add_durations(self, **kwargs):
durs_file = kwargs.pop('durs_file')
durs_type = kwargs.pop('durs_type')
audio_stem2durs = torch.load(durs_file)
self.durs = []
for tag in [Path(d["audio_filepath"]).stem for d in self.data]:
durs = audio_stem2durs[tag]
if durs_type == "aligner-based":
self.durs.append(durs)
else:
raise NotImplementedError(
f"{durs_type} duration type is not supported. Only align-based is supported at this moment."
)
def add_duration_prior(self, **kwargs):
self.use_beta_binomial_interpolator = kwargs.pop('use_beta_binomial_interpolator', False)
if self.use_beta_binomial_interpolator:
self.beta_binomial_interpolator = BetaBinomialInterpolator()
def add_pitch(self, **kwargs):
self.pitch_fmin = kwargs.pop("pitch_fmin", librosa.note_to_hz('C2'))
self.pitch_fmax = kwargs.pop("pitch_fmax", librosa.note_to_hz('C7'))
self.pitch_avg = kwargs.pop("pitch_avg", None)
self.pitch_std = kwargs.pop("pitch_std", None)
self.pitch_norm = kwargs.pop("pitch_norm", False)
def add_energy(self, **kwargs):
pass
def add_speaker_id(self, **kwargs):
pass
def get_spec(self, audio):
with torch.cuda.amp.autocast(enabled=False):
spec = self.stft(audio)
if spec.dtype in [torch.cfloat, torch.cdouble]:
spec = torch.view_as_real(spec)
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-9)
return spec
def get_log_mel(self, audio):
with torch.cuda.amp.autocast(enabled=False):
spec = self.get_spec(audio)
mel = torch.matmul(self.fb.to(spec.dtype), spec)
log_mel = torch.log(torch.clamp(mel, min=torch.finfo(mel.dtype).tiny))
return log_mel
def __getitem__(self, index):
sample = self.data[index]
audio_stem = Path(sample["audio_filepath"]).stem
features = self.featurizer.process(sample["audio_filepath"], trim=self.trim)
audio, audio_length = features, torch.tensor(features.shape[0]).long()
text = torch.tensor(sample["text_tokens"]).long()
text_length = torch.tensor(len(sample["text_tokens"])).long()
log_mel, log_mel_length = None, None
if LogMel in self.sup_data_types_set:
mel_path = sample["mel_filepath"]
if mel_path is not None and Path(mel_path).exists():
log_mel = torch.load(mel_path)
else:
mel_path = Path(self.sup_data_path) / f"mel_{audio_stem}.pt"
if mel_path.exists():
log_mel = torch.load(mel_path)
else:
log_mel = self.get_log_mel(audio)
torch.save(log_mel, mel_path)
log_mel = log_mel.squeeze(0)
log_mel_length = torch.tensor(log_mel.shape[1]).long()
durations = None
if Durations in self.sup_data_types_set:
durations = self.durs[index]
duration_prior = None
if DurationPrior in self.sup_data_types_set:
if self.use_beta_binomial_interpolator:
mel_len = self.get_log_mel(audio).shape[2]
duration_prior = torch.from_numpy(self.beta_binomial_interpolator(mel_len, text_length.item()))
else:
prior_path = Path(self.sup_data_path) / f"pr_{audio_stem}.pt"
if prior_path.exists():
duration_prior = torch.load(prior_path)
else:
mel_len = self.get_log_mel(audio).shape[2]
duration_prior = beta_binomial_prior_distribution(text_length, mel_len)
duration_prior = torch.from_numpy(duration_prior)
torch.save(duration_prior, prior_path)
pitch, pitch_length = None, None
if Pitch in self.sup_data_types_set:
pitch_name = (
f"{audio_stem}_pitch_pyin_"
f"fmin{self.pitch_fmin}_fmax{self.pitch_fmax}_"
f"fl{self.win_length}_hs{self.hop_len}.pt"
)
pitch_path = Path(self.sup_data_path) / pitch_name
if pitch_path.exists():
pitch = torch.load(pitch_path).float()
else:
pitch, _, _ = librosa.pyin(
audio.numpy(),
fmin=self.pitch_fmin,
fmax=self.pitch_fmax,
frame_length=self.win_length,
sr=self.sample_rate,
fill_na=0.0,
)
pitch = torch.from_numpy(pitch).float()
torch.save(pitch, pitch_path)
if self.pitch_avg is not None and self.pitch_std is not None and self.pitch_norm:
pitch -= self.pitch_avg
pitch[pitch == -self.pitch_avg] = 0.0 # Zero out values that were perviously zero
pitch /= self.pitch_std
pitch_length = torch.tensor(len(pitch)).long()
energy, energy_length = None, None
if Energy in self.sup_data_types_set:
energy_path = Path(self.sup_data_path) / f"{audio_stem}_energy_wl{self.win_length}_hs{self.hop_len}.pt"
if energy_path.exists():
energy = torch.load(energy_path).float()
else:
spec = self.get_spec(audio)
energy = torch.linalg.norm(spec.squeeze(0), axis=0).float()
torch.save(energy, energy_path)
energy_length = torch.tensor(len(energy)).long()
speaker_id = None
if SpeakerID in self.sup_data_types_set:
speaker_id = torch.tensor(sample["speaker_id"]).long()
return (
audio,
audio_length,
text,
text_length,
log_mel,
log_mel_length,
durations,
duration_prior,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
)
def __len__(self):
return len(self.data)
def join_data(self, data_dict):
result = []
for data_type in MAIN_DATA_TYPES + self.sup_data_types:
result.append(data_dict[data_type.name])
if issubclass(data_type, WithLens):
result.append(data_dict[f"{data_type.name}_lens"])
return tuple(result)
def general_collate_fn(self, batch):
(
_,
audio_lengths,
_,
tokens_lengths,
_,
log_mel_lengths,
durations_list,
duration_priors_list,
pitches,
pitches_lengths,
energies,
energies_lengths,
_,
) = zip(*batch)
max_audio_len = max(audio_lengths).item()
max_tokens_len = max(tokens_lengths).item()
max_log_mel_len = max(log_mel_lengths) if LogMel in self.sup_data_types_set else None
max_durations_len = max([len(i) for i in durations_list]) if Durations in self.sup_data_types_set else None
max_pitches_len = max(pitches_lengths).item() if Pitch in self.sup_data_types_set else None
max_energies_len = max(energies_lengths).item() if Energy in self.sup_data_types_set else None
if LogMel in self.sup_data_types_set:
log_mel_pad = torch.finfo(batch[0][2].dtype).tiny
duration_priors = (
torch.zeros(
len(duration_priors_list),
max([prior_i.shape[0] for prior_i in duration_priors_list]),
max([prior_i.shape[1] for prior_i in duration_priors_list]),
)
if DurationPrior in self.sup_data_types_set
else []
)
audios, tokens, log_mels, durations_list, pitches, energies, speaker_ids = [], [], [], [], [], [], []
for i, sample_tuple in enumerate(batch):
(
audio,
audio_len,
token,
token_len,
log_mel,
log_mel_len,
durations,
duration_prior,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
) = sample_tuple
audio = general_padding(audio, audio_len.item(), max_audio_len)
audios.append(audio)
token = general_padding(token, token_len.item(), max_tokens_len, pad_value=self.text_tokenizer_pad_id)
tokens.append(token)
if LogMel in self.sup_data_types_set:
log_mels.append(general_padding(log_mel, log_mel_len, max_log_mel_len, pad_value=log_mel_pad))
if Durations in self.sup_data_types_set:
durations_list.append(general_padding(durations, len(durations), max_durations_len))
if DurationPrior in self.sup_data_types_set:
duration_priors[i, : duration_prior.shape[0], : duration_prior.shape[1]] = duration_prior
if Pitch in self.sup_data_types_set:
pitches.append(general_padding(pitch, pitch_length.item(), max_pitches_len))
if Energy in self.sup_data_types_set:
energies.append(general_padding(energy, energy_length.item(), max_energies_len))
if SpeakerID in self.sup_data_types_set:
speaker_ids.append(speaker_id)
data_dict = {
"audio": torch.stack(audios),
"audio_lens": torch.stack(audio_lengths),
"text": torch.stack(tokens),
"text_lens": torch.stack(tokens_lengths),
"log_mel": torch.stack(log_mels) if LogMel in self.sup_data_types_set else None,
"log_mel_lens": torch.stack(log_mel_lengths) if LogMel in self.sup_data_types_set else None,
"durations": torch.stack(durations_list) if Durations in self.sup_data_types_set else None,
"duration_prior": duration_priors if DurationPrior in self.sup_data_types_set else None,
"pitch": torch.stack(pitches) if Pitch in self.sup_data_types_set else None,
"pitch_lens": torch.stack(pitches_lengths) if Pitch in self.sup_data_types_set else None,
"energy": torch.stack(energies) if Energy in self.sup_data_types_set else None,
"energy_lens": torch.stack(energies_lengths) if Energy in self.sup_data_types_set else None,
"speaker_id": torch.stack(speaker_ids) if SpeakerID in self.sup_data_types_set else None,
}
return data_dict
def _collate_fn(self, batch):
data_dict = self.general_collate_fn(batch)
joined_data = self.join_data(data_dict)
return joined_data
class MixerTTSDataset(TTSDataset):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _albert(self):
from transformers import AlbertTokenizer # noqa pylint: disable=import-outside-toplevel
self.lm_model_tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
self.lm_padding_value = self.lm_model_tokenizer._convert_token_to_id('<pad>')
space_value = self.lm_model_tokenizer._convert_token_to_id('▁')
self.id2lm_tokens = {}
for i, d in enumerate(self.data):
raw_text = d["raw_text"]
assert isinstance(self.text_tokenizer, EnglishPhonemesTokenizer) or isinstance(
self.text_tokenizer, EnglishCharsTokenizer
)
preprocess_text_as_tts_input = self.text_tokenizer.text_preprocessing_func(raw_text)
lm_tokens_as_ids = self.lm_model_tokenizer.encode(preprocess_text_as_tts_input, add_special_tokens=False)
if self.text_tokenizer.pad_with_space:
lm_tokens_as_ids = [space_value] + lm_tokens_as_ids + [space_value]
self.id2lm_tokens[i] = lm_tokens_as_ids
def add_lm_tokens(self, **kwargs):
lm_model = kwargs.pop('lm_model')
if lm_model == "albert":
self._albert()
else:
raise NotImplementedError(
f"{lm_model} lm model is not supported. Only albert is supported at this moment."
)
def __getitem__(self, index):
(
audio,
audio_length,
text,
text_length,
log_mel,
log_mel_length,
durations,
duration_prior,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
) = super().__getitem__(index)
lm_tokens = None
if LMTokens in self.sup_data_types_set:
lm_tokens = torch.tensor(self.id2lm_tokens[index]).long()
return (
audio,
audio_length,
text,
text_length,
log_mel,
log_mel_length,
durations,
duration_prior,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
lm_tokens,
)
def _collate_fn(self, batch):
batch = list(zip(*batch))
data_dict = self.general_collate_fn(list(zip(*batch[:13])))
lm_tokens_list = batch[13]
if LMTokens in self.sup_data_types_set:
lm_tokens = torch.full(
(len(lm_tokens_list), max([lm_tokens.shape[0] for lm_tokens in lm_tokens_list])),
fill_value=self.lm_padding_value,
)
for i, lm_tokens_i in enumerate(lm_tokens_list):
lm_tokens[i, : lm_tokens_i.shape[0]] = lm_tokens_i
data_dict[LMTokens.name] = lm_tokens
joined_data = self.join_data(data_dict)
return joined_data
| 42.386895
| 147
| 0.61325
| 3,373
| 27,170
| 4.68989
| 0.12689
| 0.02124
| 0.027119
| 0.034389
| 0.325368
| 0.275049
| 0.225299
| 0.197042
| 0.154435
| 0.107719
| 0
| 0.004432
| 0.302392
| 27,170
| 640
| 148
| 42.453125
| 0.830115
| 0.180199
| 0
| 0.236025
| 0
| 0
| 0.076466
| 0.010872
| 0
| 0
| 0
| 0
| 0.00207
| 1
| 0.039337
| false
| 0.006211
| 0.031056
| 0.00207
| 0.093168
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2284c119fbaa59ef00a4dd53417eccef839221b3
| 1,140
|
py
|
Python
|
anmotordesign/server.py
|
MarkWengSTR/ansys-maxwell-online
|
f9bbc535c7637d8f34abb241acfb97d1bdbe4103
|
[
"MIT"
] | 8
|
2021-01-25T11:17:32.000Z
|
2022-03-29T05:34:47.000Z
|
anmotordesign/server.py
|
MarkWengSTR/ansys-maxwell-online
|
f9bbc535c7637d8f34abb241acfb97d1bdbe4103
|
[
"MIT"
] | 1
|
2021-06-14T18:40:16.000Z
|
2021-08-25T14:37:21.000Z
|
anmotordesign/server.py
|
MarkWengSTR/ansys-maxwell-online
|
f9bbc535c7637d8f34abb241acfb97d1bdbe4103
|
[
"MIT"
] | 8
|
2020-09-25T15:40:07.000Z
|
2022-03-29T05:34:48.000Z
|
from flask import Flask, request, jsonify
from flask_cors import CORS
from run import run_ansys
from api.validate import spec_present, data_type_validate, spec_keys_validate, ansys_overload_check
ansys_processing_count = 0
# debug
# import ipdb; ipdb.set_trace()
app = Flask(__name__)
CORS(app) # local development cors
@app.route('/run_simu', methods=["POST"])
def run_simulation():
global ansys_processing_count
ansys_processing_count += 1
ctx = {
"request": request.get_json(),
"allow_run": True,
"process": {
"limit": 4,
"count": ansys_processing_count,
},
"start_run_response": {"msg": "start run at background"},
"error": {
"validate": {"msg": ""}
}
}
if spec_present(ctx) and \
data_type_validate(ctx) and \
spec_keys_validate(ctx) and \
ansys_overload_check(ctx):
ctx = run_ansys(self.ctx)
else:
return jsonify(ctx["error"]["validate"])
return jsonify(ctx["response"])
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True)
| 25.909091
| 99
| 0.62193
| 140
| 1,140
| 4.75
| 0.421429
| 0.090226
| 0.120301
| 0.075188
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012926
| 0.253509
| 1,140
| 43
| 100
| 26.511628
| 0.768508
| 0.050877
| 0
| 0
| 0
| 0
| 0.131725
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.121212
| 0
| 0.212121
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
228727092b8b8c1cbde1234be034bd7032daae7a
| 1,488
|
py
|
Python
|
admin_tools/urls.py
|
aucoeur/WeVoteServer
|
7b30bdbb59d6e0c19abc81237aa42fba7de1a432
|
[
"MIT"
] | 44
|
2015-11-19T04:52:39.000Z
|
2021-03-17T02:08:26.000Z
|
admin_tools/urls.py
|
aucoeur/WeVoteServer
|
7b30bdbb59d6e0c19abc81237aa42fba7de1a432
|
[
"MIT"
] | 748
|
2015-09-03T04:18:33.000Z
|
2022-03-10T14:08:10.000Z
|
admin_tools/urls.py
|
aucoeur/WeVoteServer
|
7b30bdbb59d6e0c19abc81237aa42fba7de1a432
|
[
"MIT"
] | 145
|
2015-09-19T10:10:44.000Z
|
2022-03-04T21:01:12.000Z
|
# admin_tools/urls.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.conf.urls import re_path
from . import views
urlpatterns = [
re_path(r'^$', views.admin_home_view, name='admin_home',),
re_path(r'^data_cleanup/$', views.data_cleanup_view, name='data_cleanup'),
re_path(r'^data_cleanup_organization_analysis/$',
views.data_cleanup_organization_analysis_view, name='data_cleanup_organization_analysis'),
re_path(r'^data_cleanup_organization_list_analysis/$',
views.data_cleanup_organization_list_analysis_view, name='data_cleanup_organization_list_analysis'),
re_path(r'^data_cleanup_position_list_analysis/$',
views.data_cleanup_position_list_analysis_view, name='data_cleanup_position_list_analysis'),
re_path(r'^data_cleanup_voter_hanging_data_process/$',
views.data_cleanup_voter_hanging_data_process_view, name='data_cleanup_voter_hanging_data_process'),
re_path(r'^data_cleanup_voter_list_analysis/$',
views.data_cleanup_voter_list_analysis_view, name='data_cleanup_voter_list_analysis'),
re_path(r'^data_voter_statistics/$', views.data_voter_statistics_view, name='data_voter_statistics'),
re_path(r'^import_sample_data/$', views.import_sample_data_view, name='import_sample_data'),
re_path(r'^statistics/$', views.statistics_summary_view, name='statistics_summary'),
re_path(r'^sync_dashboard/$', views.sync_data_with_master_servers_view, name='sync_dashboard'),
]
| 55.111111
| 108
| 0.78293
| 209
| 1,488
| 5.052632
| 0.210526
| 0.1875
| 0.072917
| 0.072917
| 0.569129
| 0.387311
| 0.056818
| 0
| 0
| 0
| 0
| 0.000746
| 0.099462
| 1,488
| 26
| 109
| 57.230769
| 0.787313
| 0.051747
| 0
| 0
| 0
| 0
| 0.396588
| 0.312011
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.15
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22875dd3eed7789c404cf71dae058c78660c2f50
| 3,414
|
py
|
Python
|
hippynn/graphs/nodes/base/multi.py
|
tautomer/hippynn
|
df4504a5ea4680cfc61f490984dcddeac7ed99ee
|
[
"BSD-3-Clause"
] | 21
|
2021-11-17T00:56:35.000Z
|
2022-03-22T05:57:11.000Z
|
hippynn/graphs/nodes/base/multi.py
|
tautomer/hippynn
|
df4504a5ea4680cfc61f490984dcddeac7ed99ee
|
[
"BSD-3-Clause"
] | 4
|
2021-12-17T16:16:53.000Z
|
2022-03-16T23:50:38.000Z
|
hippynn/graphs/nodes/base/multi.py
|
tautomer/hippynn
|
df4504a5ea4680cfc61f490984dcddeac7ed99ee
|
[
"BSD-3-Clause"
] | 6
|
2021-11-30T21:09:31.000Z
|
2022-03-18T07:07:32.000Z
|
"""
A base node that provides several output tensors.
"""
from ....layers.algebra import Idx
from .base import SingleNode, Node
from .. import _debprint
from ...indextypes import IdxType
class IndexNode(SingleNode):
_input_names = ("parent",)
def __init__(self, name, parents, index, index_state=None):
if len(parents) != 1:
raise TypeError("Index node takes exactly one parent.")
par = parents[0]
iname = par._output_names[index] if hasattr(par, "_output_names") else "<{index}>".format(index=index)
repr_info = {"parent_name": par.name, "index": iname}
module = Idx(index, repr_info=repr_info)
self.index = index
self._index_state = IdxType.NotFound if index_state is None else index_state
super().__init__(name, parents, module=module)
class MultiNode(Node): # Multinode
_output_names = NotImplemented
_output_index_states = NotImplemented # optional?
_main_output = NotImplemented
def __init__(self, name, parents, module="auto", *args, db_name=None, **kwargs):
super().__init__(name, parents, *args, module=module, **kwargs)
self.children = tuple(
IndexNode(name + "." + cn, (self,), index=i, index_state=cidx)
for i, (cn, cidx) in enumerate(zip(self._output_names, self._output_index_states))
)
self.main_output.db_name = db_name
def set_dbname(self, db_name):
self.main_output.set_dbname(db_name)
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
# Enforce _child_index_states has same length as _output_names
if cls._output_index_states is not NotImplemented:
if len(cls._output_index_states) != len(cls._output_names):
raise AssertionError(
"Lengths of _child_index_states {} doesn't match lengths of ouput_names {}".format(
cls._output_index_states, cls._output_names
)
)
# Enforce no name conflict between input names and output names
if cls._input_names is not NotImplemented:
try:
assert all(o not in cls._input_names for o in cls._output_names)
except AssertionError as ae:
raise ValueError(
"Multi-node output names {} conflict with input names {}".format(
cls._output_names, cls._input_names
)
) from ae
def __dir__(self):
dir_ = super().__dir__()
if self._output_names is not NotImplemented:
dir_ = dir_ + list(self._output_names)
return dir_
def __getattr__(self, item):
if item in ("children", "_output_names"): # Guard against recursion
raise AttributeError("Attribute {} not yet present.".format(item))
try:
return super().__getattr__(item) # Defer to BaseNode first
except AttributeError:
pass
try:
return self.children[self._output_names.index(item)]
except (AttributeError, ValueError):
raise AttributeError("{} object has no attribute '{}'".format(self.__class__, item))
@property
def main_output(self):
if self._main_output is NotImplemented:
return super().main_output
return getattr(self, self._main_output)
| 38.795455
| 110
| 0.626245
| 399
| 3,414
| 5.022556
| 0.295739
| 0.082335
| 0.042415
| 0.02994
| 0.021956
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000811
| 0.277973
| 3,414
| 87
| 111
| 39.241379
| 0.81217
| 0.070592
| 0
| 0.044118
| 0
| 0
| 0.093038
| 0
| 0
| 0
| 0
| 0
| 0.044118
| 1
| 0.102941
| false
| 0.014706
| 0.058824
| 0
| 0.323529
| 0.014706
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
228856c2bad586d523ebf387bffc058ae9b589d7
| 4,151
|
py
|
Python
|
barber/cutter.py
|
LSSTDESC/barber
|
9dbe69e69a078ef3b70a316807517e2a4d4e60cd
|
[
"MIT"
] | null | null | null |
barber/cutter.py
|
LSSTDESC/barber
|
9dbe69e69a078ef3b70a316807517e2a4d4e60cd
|
[
"MIT"
] | 6
|
2020-04-28T15:20:08.000Z
|
2020-04-28T15:37:02.000Z
|
barber/cutter.py
|
LSSTDESC/barber
|
9dbe69e69a078ef3b70a316807517e2a4d4e60cd
|
[
"MIT"
] | null | null | null |
import numpy as np
import numpy.random as npr
import scipy.optimize as spo
import tomo_challenge.metrics as tcm
# custom data type, could be replaced with/tie in to tree.py class
# cut_vals is (nfeat, nbins - 1) numpy array, float
# tree_ids is ((nbins,) * nfeat) numpy array, int
TreePars = namedtuple('TreePars', ['cut_vals', 'tree_ids'])
# should maybe put this function in a class so we can call TreePars.to_array
def treepars_to_array(treepars):
"""
Flattens cut_vals and tree_ids for optimizer
"""
cuts = np.flatten(treepars.cut_vals)
ids = np.flatten(treepars.tree_ids)
arr = np.concatenate((cuts, ids))
return(arr)
# should maybe put this function in a class so we can call TreePars.from_array
def array_to_treepars(arr):
"""
Converts optimizer format of 1D array back into namedtuple of arrays
"""
flat_cuts = arr[type(arr) == float]
flat_ids = arr[type(arr) == int]
nbins = len(np.unique(flat_ids))
nfeat = len(flat_cuts) / (nbins - 1)
# maybe do some assert checks with these just in case types have problems
# cuts = arr[0:nfeat*(nbins-1)].reshape((nfeat, nbins-1))
# ids = arr[feat*(nbins-1):].reshape((nbins,) * nfeat)
cuts = flat_cuts.reshape((nfeat, nbins-1))
ids = flat_ids.reshape((nbins,) * nfeat)
treepars = TreePars(cuts, ids)
return(treepars)
def get_cuts(galaxies, ival_treepars=None, nbins=3):
"""
Obtains simplest possible bin definitions: cuts in the space of observables given number of bins
Parameters
----------
galaxies: numpy.ndarray, float
observables (magnitudes and/or colors and/or errors) to serve as features for set of galaxies
shape(galaxies) = (ngals, nfeat)
ival_treepars: namedtuple, numpy.ndarray, float and int, optional
initial values for decision tree parameters
shape(ivals.cut_vals) = (nfeat, (nbins - 1))
shape(tree_ids) = ((nbins,) * nfeat)
nbins: int, optional
number of bins for which to obtain cuts
Returns
-------
assignments: numpy.ndarray, int
bin assignment for each galaxy
shape(assignments) = (ngals, 1)
Notes
-----
`sort_gals` does the heavy lifting.
`eval_metric` will call one of the metrics from [tomo_challenge](https://github.com/LSSTDESC/tomo_challenge/blob/master/tomo_challenge/metrics.py).
The original idea for a general, non-cut-based optimizer was to have parameters equal to the (ngals) length array of ints representing the bin assignments, but that's not necessary for the simple cut-and-sweep barber and would probably break `spo.minimize`.
"""
(ngals, nfeat) = np.shape(galaxies)
if ival_treepars is None:
cut_ivals = np.quantile(galaxies, np.linspace(0., 1., nbins), axis=1)
assert(len(np.flatten(ivals)) == nbins**nfeat)
# need structure and way of making dumb version of these
tree_ids = npr.random_integers(0, nbins, nbins**nfeat)
assert(len(np.unique(tree_ids)) == nbins)
tree_ids.reshape((nfeat, nbins))
ival_treepars = TreePars(cut_ivals, tree_ids)
ivals = treepars_to_array(ival_treepars)
opt_res = spo.minimize(eval_metric, ivals, args=galaxies)
treepars = array_to_treepars(opt_res.x)
assignments = sort_gals(galaxies, treepars)
return(assignments)
def sort_gals(galaxies, tree_pars):
"""
Divides available galaxies into subsets according to a given decision tree on their observables
Parameters
----------
galaxies: nfeature x n_gal array
tree: tree object
Notes
-----
could be based on bisect, or maybe a sklearn object?
"""
pass
def eval_metric(arr, galaxies):
"""
Just calls a metric from tomo_challenge wrapped for the `spo.minimize` API
Notes
-----
Replace `tcm.metric` with actual call to one of the tomo_challenge metrics
Actually, there's a problem in that the current tomo_challenge metrics require the true redshifts...
"""
treepars = array_to_treepars(arr)
assignments = sort_gals(galaxies, treepars)
metval = tcm.metric(assignments)
return metval
| 35.478632
| 261
| 0.685859
| 586
| 4,151
| 4.757679
| 0.343003
| 0.022597
| 0.019727
| 0.012912
| 0.078192
| 0.03802
| 0.03802
| 0.03802
| 0.03802
| 0.03802
| 0
| 0.004589
| 0.212479
| 4,151
| 116
| 262
| 35.784483
| 0.848272
| 0.55312
| 0
| 0.05
| 0
| 0
| 0.014607
| 0
| 0
| 0
| 0
| 0
| 0.05
| 1
| 0.125
| false
| 0.025
| 0.1
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2288f93227622fced04679bfe49afbad16de4e0a
| 480
|
py
|
Python
|
examples/transfer/highscore.py
|
coding-world/matrix_max7219
|
3126604ee400a9ec1d25797f6957a2eae8a3f33c
|
[
"MIT"
] | null | null | null |
examples/transfer/highscore.py
|
coding-world/matrix_max7219
|
3126604ee400a9ec1d25797f6957a2eae8a3f33c
|
[
"MIT"
] | null | null | null |
examples/transfer/highscore.py
|
coding-world/matrix_max7219
|
3126604ee400a9ec1d25797f6957a2eae8a3f33c
|
[
"MIT"
] | null | null | null |
import shelve
regal = shelve.open('score.txt')
def updateScore(neuerScore):
if('score' in regal):
score = regal['score']
if(neuerScore not in score):
score.insert(0, neuerScore)
score.sort()
ranking = score.index(neuerScore)
ranking = len(score)-ranking
else:
score = [neuerScore]
ranking = 1
print(score)
print(ranking)
regal['score'] = score
return ranking
neuerScore = int(input("Neuer HighScore: \n"))
updateScore(neuerScore)
| 20
| 46
| 0.66875
| 58
| 480
| 5.534483
| 0.465517
| 0.093458
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005208
| 0.2
| 480
| 24
| 47
| 20
| 0.830729
| 0
| 0
| 0
| 0
| 0
| 0.089397
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.052632
| 0
| 0.157895
| 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2289dcddf267c6a1a0e8cb907450531ad79de492
| 493
|
py
|
Python
|
urban-sound-classification/feature_merge.py
|
tensorflow-korea/tfk-notebooks
|
67831acce7f435500377bf03e6bd9d15fdd5f1bc
|
[
"MIT"
] | 50
|
2016-06-18T12:52:29.000Z
|
2021-12-10T07:13:20.000Z
|
urban-sound-classification/feature_merge.py
|
tensorflow-korea/tfk-notebooks
|
67831acce7f435500377bf03e6bd9d15fdd5f1bc
|
[
"MIT"
] | null | null | null |
urban-sound-classification/feature_merge.py
|
tensorflow-korea/tfk-notebooks
|
67831acce7f435500377bf03e6bd9d15fdd5f1bc
|
[
"MIT"
] | 51
|
2016-04-30T16:38:05.000Z
|
2021-01-15T18:12:03.000Z
|
import glob
import numpy as np
X = np.empty((0, 193))
y = np.empty((0, 10))
groups = np.empty((0, 1))
npz_files = glob.glob('./urban_sound_?.npz')
for fn in npz_files:
print(fn)
data = np.load(fn)
X = np.append(X, data['X'], axis=0)
y = np.append(y, data['y'], axis=0)
groups = np.append(groups, data['groups'], axis=0)
print(groups[groups>0])
print(X.shape, y.shape)
for r in y:
if np.sum(r) > 1.5:
print(r)
np.savez('urban_sound', X=X, y=y, groups=groups)
| 22.409091
| 54
| 0.602434
| 94
| 493
| 3.106383
| 0.329787
| 0.071918
| 0.082192
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037688
| 0.192698
| 493
| 21
| 55
| 23.47619
| 0.69598
| 0
| 0
| 0
| 0
| 0
| 0.077079
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.222222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
228ad78fbc730707861e4c8d9c262be93d22bf72
| 485
|
py
|
Python
|
program/program/trackers/TrackerCorrelation.py
|
JankaSvK/thesis
|
c440ab8242b058f580fdf9d5a1d00708a1696561
|
[
"MIT"
] | 1
|
2018-11-29T14:13:47.000Z
|
2018-11-29T14:13:47.000Z
|
program/program/trackers/TrackerCorrelation.py
|
JankaSvK/thesis
|
c440ab8242b058f580fdf9d5a1d00708a1696561
|
[
"MIT"
] | 3
|
2018-04-24T18:30:00.000Z
|
2018-05-11T23:25:07.000Z
|
program/program/trackers/TrackerCorrelation.py
|
JankaSvK/thesis
|
c440ab8242b058f580fdf9d5a1d00708a1696561
|
[
"MIT"
] | null | null | null |
import dlib
class CorrelationTracker(object):
def init(self, image, bbox):
self.tracker = dlib.correlation_tracker()
x, y, x2, y2 = bbox
x2 += x
y2 += y
self.tracker.start_track(image, dlib.rectangle(x, y, x2, y2))
return True
def update(self, image):
self.tracker.update(image)
out = self.tracker.get_position()
return True, (out.left(), out.top(), out.right() - out.left(), out.bottom() - out.top())
| 28.529412
| 96
| 0.585567
| 64
| 485
| 4.390625
| 0.453125
| 0.156584
| 0.02847
| 0.042705
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016901
| 0.268041
| 485
| 16
| 97
| 30.3125
| 0.774648
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.076923
| 0
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
228b9e5c3d1a55dd867bb42f9e9fbbc7ed2e9fc5
| 10,684
|
py
|
Python
|
SROMPy/optimize/ObjectiveFunction.py
|
jwarner308/SROMPy
|
12007e4cd99c88446f10974a93050405c5cd925b
|
[
"Apache-2.0"
] | 23
|
2018-05-13T05:13:03.000Z
|
2022-01-29T19:43:28.000Z
|
SROMPy/optimize/ObjectiveFunction.py
|
jwarner308/SROMPy
|
12007e4cd99c88446f10974a93050405c5cd925b
|
[
"Apache-2.0"
] | 11
|
2018-03-28T13:13:44.000Z
|
2022-03-30T18:56:57.000Z
|
SROMPy/optimize/ObjectiveFunction.py
|
jwarner308/SROMPy
|
12007e4cd99c88446f10974a93050405c5cd925b
|
[
"Apache-2.0"
] | 19
|
2018-06-01T14:49:30.000Z
|
2022-03-05T05:02:06.000Z
|
# Copyright 2018 United States Government as represented by the Administrator of
# the National Aeronautics and Space Administration. No copyright is claimed in
# the United States under Title 17, U.S. Code. All Other Rights Reserved.
# The Stochastic Reduced Order Models with Python (SROMPy) platform is licensed
# under the Apache License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import numpy as np
from SROMPy.target import RandomVector
from SROMPy.target.RandomEntity import RandomEntity
class ObjectiveFunction:
"""
Defines the objective function for optimizing SROM parameters. Calculates
errors between the statistics of the SROM and the target random vector
being model by it.
Will create objective function for optimization library (e.g. scipy) that
essentially wraps this class's evaluate function
"""
def __init__(self, srom, target, obj_weights=None, error='mean',
max_moment=5, num_cdf_grid_points=100):
"""
Initialize objective function. Pass in SROM & target random vector
objects that have been previously initialized. Objective function
calculates the errors between the statistics of this SROM and the
target random vector (these objects must have compute_moments,CDF,
corr_mat functions defined).
inputs:
-SROM - initialized SROM object
-targetRV - initialized RandomVector object (either
AnalyticRandomVector or SampleRandomVector) with same
dimension as SROM
-obj_weights - array of floats defining the relative weight of the
terms in the objective function. Terms are error in moments,
CDFs, and correlation matrix in that order. Default is equal
weights ([1.0,1.0,1.0])
-error - string 'mean','max', or 'sse' defining how error is defined
between the statistics of the SROM & target
-max_moment - int, max order to evaluate moment errors up to
-num_cdf_grid_points - int, # pts to evaluate CDF errors on
"""
self.__test_init_params(srom, target, obj_weights, error,
max_moment, num_cdf_grid_points)
self._SROM = srom
self._target = target
self._x_grid = None
# Generate grids for evaluating CDFs based on target RV's range
self.generate_cdf_grids(num_cdf_grid_points)
self._metric = error.upper()
self._max_moment = max_moment
def get_moment_error(self, samples, probabilities):
"""
Returns moment error for given samples & probabilities
"""
self._SROM.set_params(samples, probabilities)
return self.compute_moment_error()
def get_cdf_error(self, samples, probabilities):
"""
Returns CDF error for given samples & probabilities
"""
self._SROM.set_params(samples, probabilities)
return self.compute_cdf_error()
def get_corr_error(self, samples, probabilities):
"""
Returns correlation error for given samples & probabilities
"""
self._SROM.set_params(samples, probabilities)
return self.compute_correlation_error()
def evaluate(self, samples, probabilities):
"""
Evaluates the objective function for the specified SROM samples &
probabilities. Calculates errrors in statistics between SROM/target
"""
error = 0.0
# SROM is by the current values of samples/probabilities for stats.
self._SROM.set_params(samples, probabilities)
if self._weights[0] > 0.0:
cdf_error = self.compute_cdf_error()
error += cdf_error * self._weights[0]
if self._weights[1] > 0.0:
moment_error = self.compute_moment_error()
error += moment_error * self._weights[1]
if self._weights[2] > 0.0:
corr_error = self.compute_correlation_error()
error += corr_error * self._weights[2]
return error
def compute_moment_error(self):
"""
Calculate error in moments between SROM & target
"""
srom_moments = self._SROM.compute_moments(self._max_moment)
target_moments = self._target.compute_moments(self._max_moment)
# Reshape to 2D if returned as 1D for scalar RV.
if len(target_moments.shape) == 1:
target_moments = target_moments.reshape((self._max_moment, 1))
# Prevent divide by zero.
zero_indices = np.where(np.abs(target_moments) <= 1e-12)[0]
target_moments[zero_indices] = 1.0
# Squared relative difference:
if self._metric == "SSE":
rel_diffs = ((srom_moments-target_moments)/target_moments)**2.0
error = 0.5*np.sum(rel_diffs)
# Max absolute value:
elif self._metric == "MAX":
diffs = np.abs(srom_moments - target_moments)
error = np.max(diffs)
elif self._metric == "MEAN":
diffs = np.abs(srom_moments - target_moments)
error = np.mean(diffs)
else:
raise ValueError("Invalid error metric")
return error
def compute_cdf_error(self):
"""
Calculate error in CDFs between SROM & target at pts in x_grid
"""
srom_cdfs = self._SROM.compute_cdf(self._x_grid)
target_cdfs = self._target.compute_cdf(self._x_grid)
# Check for 0 cdf values to prevent divide by zero.
nonzero_indices = np.where(target_cdfs[:, 0] > 0)[0]
srom_cdfs = srom_cdfs[nonzero_indices, :]
target_cdfs = target_cdfs[nonzero_indices, :]
if self._metric == "SSE":
squared_diffs = (srom_cdfs - target_cdfs)**2.0
rel_diffs = squared_diffs / target_cdfs**2.0
error = 0.5*np.sum(rel_diffs)
elif self._metric == "MAX":
diffs = np.abs(srom_cdfs - target_cdfs)
error = np.max(diffs)
elif self._metric == "MEAN":
diffs = np.abs(srom_cdfs - target_cdfs)
error = np.mean(diffs)
else:
raise ValueError("Invalid error metric")
return error
def compute_correlation_error(self):
"""
Calculate error in correlation matrix between SROM & target
"""
# Neglect for 1D random variable:
if self._target._dim == 1:
return 0.0
srom_corr = self._SROM.compute_corr_mat()
target_corr = self._target.compute_correlation_matrix()
if self._metric == "SSE":
squared_diffs = (srom_corr - target_corr)**2.0
rel_diffs = squared_diffs / target_corr**2.0
error = 0.5*np.sum(rel_diffs)
elif self._metric == "MAX":
diffs = np.abs(srom_corr - target_corr)
error = np.max(diffs)
elif self._metric == "MEAN":
diffs = np.abs(srom_corr - target_corr)
error = np.mean(diffs)
else:
raise ValueError("Invalid error metric")
return error
def generate_cdf_grids(self, num_cdf_grid_points):
"""
Generate numerical grids for evaluating the CDF errors based on the
range of the target random vector. Create x_grid member variable with
num_cdf_grid_points along each dimension of the random vector.
"""
self._x_grid = np.zeros((num_cdf_grid_points, self._target._dim))
for i in range(self._target._dim):
grid = np.linspace(self._target.mins[i],
self._target.maxs[i],
num_cdf_grid_points)
self._x_grid[:, i] = grid
def __test_init_params(self, srom, target, obj_weights, error, max_moment,
num_cdf_grid_points):
"""
Due to the large numbers of parameters passed into __init__() that
need to be tested, the testing is done in this utility function
instead of __init__().
"""
# Test target.
if not (isinstance(target, RandomEntity)):
raise TypeError("target must inherit from RandomEntity.")
# Test srom.
from SROMPy.srom import SROM
if not isinstance(srom, SROM):
raise TypeError("srom must be of type SROM.")
# Ensure srom and target have same dimensions if target is RandomVector.
if isinstance(target, RandomVector):
if target._dim != srom._dim:
raise ValueError("target and srom must have same dimensions.")
# Test obj_weights.
if obj_weights is not None:
if isinstance(obj_weights, list):
obj_weights = np.array(obj_weights)
if not isinstance(obj_weights, np.ndarray):
raise TypeError("obj_weights must be of type ndarray or list.")
if len(obj_weights.shape) != 1:
raise ValueError("obj_weights must be a one dimensional array.")
if obj_weights.shape[0] != 3:
raise ValueError("obj_weights must have exactly 3 elements.")
if np.min(obj_weights) < 0.:
raise ValueError("obj_weights cannot be less than zero.")
self._weights = obj_weights
else:
self._weights = np.ones((3,))
# Test error function name.
if not isinstance(error, str):
raise TypeError("error must be a string: 'MEAN', 'MAX', or 'SSE'.")
if error.upper() not in ["MEAN", "MAX", "SSE"]:
raise ValueError("error must be either 'mean', 'max', or 'SSE'.")
# Test max_moment.
if not isinstance(max_moment, int):
raise TypeError("max_moment must be a positive integer.")
if max_moment < 1:
raise ValueError("max_moment must be a positive integer.")
# Test num_cdf_grid_points.
if not isinstance(num_cdf_grid_points, int):
raise TypeError("cf_grid_pts must be a positive integer.")
if num_cdf_grid_points < 1:
raise ValueError("num_cdf_grid_points must be a positive integer.")
| 37.356643
| 80
| 0.624579
| 1,340
| 10,684
| 4.79403
| 0.210448
| 0.02802
| 0.020237
| 0.032379
| 0.292653
| 0.206258
| 0.179328
| 0.151308
| 0.148661
| 0.128113
| 0
| 0.010097
| 0.295489
| 10,684
| 285
| 81
| 37.487719
| 0.843364
| 0.321041
| 0
| 0.288889
| 0
| 0
| 0.092944
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.02963
| 0
| 0.17037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
228bb0a969acb617ccc7d0b12b1281bd81283a5f
| 4,016
|
py
|
Python
|
test/utils.py
|
vasili-v/distcovery
|
e07882d55ebe2e4fd78a720764803e6b3e8cbc7d
|
[
"MIT"
] | null | null | null |
test/utils.py
|
vasili-v/distcovery
|
e07882d55ebe2e4fd78a720764803e6b3e8cbc7d
|
[
"MIT"
] | null | null | null |
test/utils.py
|
vasili-v/distcovery
|
e07882d55ebe2e4fd78a720764803e6b3e8cbc7d
|
[
"MIT"
] | null | null | null |
import os
import errno
import sys
def mock_directory_tree(tree):
tree = dict([(os.path.join(*key), value) \
for key, value in tree.iteritems()])
def listdir(path):
try:
names = tree[path]
except KeyError:
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), path)
if names is None:
raise OSError(errno.ENOTDIR, os.strerror(errno.ENOTDIR), path)
return names
def isfile(path):
try:
item = tree[path]
except KeyError:
return False
return item is None
def isdir(path):
try:
item = tree[path]
except KeyError:
return False
return item is not None
return listdir, isfile, isdir
class PreserveOs(object):
def setUp(self):
super(PreserveOs, self).setUp()
self.__listdir = os.listdir
self.__isfile = os.path.isfile
self.__isdir = os.path.isdir
def tearDown(self):
os.path.isdir = self.__isdir
os.path.isfile = self.__isfile
os.listdir = self.__listdir
super(PreserveOs, self).tearDown()
def full_test_tree(self):
tree = {('.',): ('__init__.py', 'test_first.py', 'test_second.py',
'test_sub_first', 't_sub_first', 'test_sub_third'),
('.', '__init__.py'): None,
('.', 'test_first.py'): None,
('.', 'test_second.py'): None,
('.', 'test_sub_first'): ('__init__.py', 'test_sub_first.py'),
('.', 'test_sub_first', '__init__.py'): None,
('.', 'test_sub_first', 'test_sub_first.py'): None,
('.', 't_sub_first'): ('__init__.py', 'test_sub_first.py'),
('.', 't_sub_first', '__init__.py'): None,
('.', 't_sub_first', 'test_sub_first.py'): None,
('.', 'test_sub_second'): ('test_sub_first.py',),
('.', 'test_sub_second', 'test_sub_first.py'): None,
('.', 'test_sub_third'): ('__init__.py', 'test_sub_first.py',
'test_sub_second'),
('.', 'test_sub_third', '__init__.py'): None,
('.', 'test_sub_third', 'test_sub_first.py'): None,
('.', 'test_sub_third', 'test_sub_second'): \
('__init__.py', 'test_sub_first.py', 't_sub_second.py'),
('.', 'test_sub_third', 'test_sub_second', '__init__.py'): None,
('.', 'test_sub_third', 'test_sub_second',
'test_sub_first.py'): None,
('.', 'test_sub_third', 'test_sub_second',
't_sub_second.py'): None}
os.listdir, os.path.isfile, os.path.isdir = mock_directory_tree(tree)
self.expected_content = {'first': 'test_first',
'second': 'test_second',
'sub_first': 'test_sub_first',
'sub_first.sub_first': \
'test_sub_first.test_sub_first',
'sub_third': 'test_sub_third',
'sub_third.sub_first': \
'test_sub_third.test_sub_first',
'sub_third.sub_second': \
'test_sub_third.test_sub_second',
'sub_third.sub_second.sub_first': \
'test_sub_third.test_sub_second.' \
'test_sub_first'}
class ImportTrash(object):
def setUp(self):
self.modules_trash = []
self.meta_path_trash = []
def tearDown(self):
for item in self.meta_path_trash:
if item in sys.meta_path:
sys.meta_path.remove(item)
for name in self.modules_trash:
if name in sys.modules:
del sys.modules[name]
| 36.844037
| 80
| 0.493775
| 430
| 4,016
| 4.202326
| 0.139535
| 0.154953
| 0.126176
| 0.077476
| 0.449917
| 0.39845
| 0.351965
| 0.279469
| 0.194245
| 0.140564
| 0
| 0
| 0.372012
| 4,016
| 108
| 81
| 37.185185
| 0.716495
| 0
| 0
| 0.181818
| 0
| 0
| 0.248817
| 0.037111
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102273
| false
| 0
| 0.045455
| 0
| 0.238636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
228d8328feac3519c1eb966b9a43a964120c8c6c
| 1,369
|
py
|
Python
|
tests/test_parser_create_site_users.py
|
WillAyd/tabcmd
|
1ba4a6ce1586b5ec4286aca0edff0fbaa1c69f15
|
[
"MIT"
] | null | null | null |
tests/test_parser_create_site_users.py
|
WillAyd/tabcmd
|
1ba4a6ce1586b5ec4286aca0edff0fbaa1c69f15
|
[
"MIT"
] | null | null | null |
tests/test_parser_create_site_users.py
|
WillAyd/tabcmd
|
1ba4a6ce1586b5ec4286aca0edff0fbaa1c69f15
|
[
"MIT"
] | null | null | null |
import sys
import unittest
try:
from unittest import mock
except ImportError:
import mock
import argparse
from tabcmd.parsers.create_site_users_parser import CreateSiteUsersParser
from .common_setup import *
commandname = 'createsiteusers'
class CreateSiteUsersParserTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser_under_test, manager, mock_command = initialize_test_pieces(commandname)
CreateSiteUsersParser.create_site_user_parser(manager, mock_command)
def test_create_site_users_parser_users_file(self):
with mock.patch('builtins.open', mock.mock_open(read_data='test')) as open_file:
mock_args = [commandname, "users.csv"]
args = self.parser_under_test.parse_args(mock_args)
open_file.assert_called_with('users.csv', 'r', -1, None, None)
def test_create_site_user_parser_missing_arguments(self):
mock_args = [commandname]
with self.assertRaises(SystemExit):
args = self.parser_under_test.parse_args(mock_args)
def test_create_site_user_parser_role(self):
with mock.patch('builtins.open', mock.mock_open(read_data='test')):
mock_args = [commandname, "users.csv", '--site', 'site-name']
args = self.parser_under_test.parse_args(mock_args)
assert args.site == 'site-name', args
| 37
| 90
| 0.720964
| 172
| 1,369
| 5.430233
| 0.331395
| 0.051392
| 0.06424
| 0.06424
| 0.357602
| 0.299786
| 0.24197
| 0.24197
| 0.24197
| 0.11349
| 0
| 0.000896
| 0.184806
| 1,369
| 36
| 91
| 38.027778
| 0.836022
| 0
| 0
| 0.103448
| 0
| 0
| 0.073776
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 1
| 0.137931
| false
| 0
| 0.275862
| 0
| 0.448276
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
228e4efae17879a415faffa2bdf7cfbc08f32c9f
| 1,078
|
py
|
Python
|
secretsmanager_env.py
|
iarlyy/secretsmanager-env
|
3a34a4e9561e4651fa2975ff6f32b00ef0c0ca73
|
[
"MIT"
] | 1
|
2020-02-13T17:11:29.000Z
|
2020-02-13T17:11:29.000Z
|
secretsmanager_env.py
|
iarlyy/secretsmanager-env
|
3a34a4e9561e4651fa2975ff6f32b00ef0c0ca73
|
[
"MIT"
] | null | null | null |
secretsmanager_env.py
|
iarlyy/secretsmanager-env
|
3a34a4e9561e4651fa2975ff6f32b00ef0c0ca73
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import argparse
import json
import os
import boto3
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''\
Output following the defined format.
Options are:
dotenv - dotenv style [default]
export - shell export style
stdout - secret plain value style'''
)
parser.add_argument(
'--output',
default='dotenv',
choices=['stdout', 'dotenv', 'export'],
)
args = parser.parse_args()
try:
secret_id = os.environ.get("ENV_SECRET_NAME")
secretsmanager = boto3.client('secretsmanager')
secret_values = json.loads(secretsmanager.get_secret_value(SecretId=secret_id)['SecretString'])
except:
print('Error getting secret')
raise
if args.output == 'export':
prefix = 'export '
else:
prefix = ''
if args.output != 'stdout':
for envvar in secret_values:
print(prefix+envvar+"=$'"+secret_values[envvar].replace('\\n', '\n')+"'")
else:
print(json.dumps(secret_values.replace('\\n', '\n'), indent=2, sort_keys=True))
| 24.5
| 99
| 0.670686
| 125
| 1,078
| 5.672
| 0.528
| 0.067701
| 0.03385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003421
| 0.186456
| 1,078
| 43
| 100
| 25.069767
| 0.805017
| 0.018553
| 0
| 0.057143
| 0
| 0
| 0.273415
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.114286
| 0
| 0.114286
| 0.085714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
228e74b0f9248fe2ef101b86260ca316c5578c5c
| 1,730
|
py
|
Python
|
109.py
|
juandarr/ProjectEuler
|
951705ac62f550d7fbecdc3f35ab8c38b53b9225
|
[
"MIT"
] | null | null | null |
109.py
|
juandarr/ProjectEuler
|
951705ac62f550d7fbecdc3f35ab8c38b53b9225
|
[
"MIT"
] | null | null | null |
109.py
|
juandarr/ProjectEuler
|
951705ac62f550d7fbecdc3f35ab8c38b53b9225
|
[
"MIT"
] | null | null | null |
"""
Finds the number of distinct ways a player can checkout a score less than 100
Author: Juan Rios
"""
import math
def checkout_solutions(checkout,sequence,idx_sq,d):
'''
returns the number of solution for a given checkout value
'''
counter = 0
for double in d:
if double>checkout:
break
res = checkout-double
if res==0:
counter +=1
continue
if res<=60:
if res in idx_sq:
index = idx_sq[res]
else:
index = len(sequence)-1
while res>sequence[index]:
index -=1
else:
index = len(sequence)-1
for idx in range(index,-1,-1):
a = sequence[idx]
if a==res:
counter+=1
continue
for idx2 in range(idx,-1,-1):
if a+sequence[idx2]==res:
counter +=1
elif a+sequence[idx2]<res:
break
return counter
def darts_checkout(limit_value):
s = [i for i in range(1,21)]+[25]
d = [2*i for i in range(1,21)]+[50]
t = [3*i for i in range(1,21)]
sequence = sorted(s+d+t)
idx_sq = {}
for idx in range(len(sequence)-1):
if sequence[idx]!=sequence[idx+1]:
idx_sq[sequence[idx]]=idx
idx_sq[sequence[-1]]=len(sequence)-1
n = limit_value
total = 0
for checkout in range(1,limit_value+1):
total += checkout_solutions(checkout,sequence,idx_sq,d)
return total
if __name__ == "__main__":
limit_value=99
print('The number of distinct ways a player can checkout a score less than 100 is {0}'.format(darts_checkout(limit_value)))
| 28.360656
| 128
| 0.540462
| 239
| 1,730
| 3.811715
| 0.280335
| 0.038419
| 0.052689
| 0.023052
| 0.308452
| 0.262349
| 0.262349
| 0.127333
| 0.127333
| 0.127333
| 0
| 0.042895
| 0.353179
| 1,730
| 61
| 128
| 28.360656
| 0.771224
| 0.088439
| 0
| 0.229167
| 0
| 0
| 0.055305
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.020833
| 0
| 0.104167
| 0.020833
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
228eb608e052e061a5945151be48c2a98a56d133
| 1,758
|
py
|
Python
|
setup.py
|
kinnala/gammy
|
85237d424001f77f296d724c95c8dec5803a8e1e
|
[
"MIT"
] | null | null | null |
setup.py
|
kinnala/gammy
|
85237d424001f77f296d724c95c8dec5803a8e1e
|
[
"MIT"
] | null | null | null |
setup.py
|
kinnala/gammy
|
85237d424001f77f296d724c95c8dec5803a8e1e
|
[
"MIT"
] | null | null | null |
import os
from setuptools import setup, find_packages
import versioneer
if __name__ == "__main__":
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
meta = {}
base_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(base_dir, 'gammy', '_meta.py')) as fp:
exec(fp.read(), meta)
setup(
name = "gammy",
version = versioneer.get_version(),
author = meta["__author__"],
author_email = meta["__contact__"],
description = "Generalized additive models with a Bayesian twist",
url = "https://github.com/malmgrek/Gammy",
cmdclass = versioneer.get_cmdclass(),
packages = find_packages(),
install_requires = [
"attrs",
"bayespy",
"h5py",
"matplotlib",
"numpy",
"scipy"
],
extras_require = {
"dev": [
"versioneer",
"pytest",
"hypothesis",
],
},
keywords = [
"Statistical modeling",
"Bayesian statistics",
"Machine learning",
],
classifiers = [
"Programming Language :: Python :: 3 :: Only",
"Development Status :: 1 - Planning",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: {0}".format(meta["__license__"]),
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering",
],
long_description = read('README.md'),
long_description_content_type = "text/markdown",
)
| 30.842105
| 75
| 0.513083
| 150
| 1,758
| 5.733333
| 0.673333
| 0.034884
| 0.023256
| 0.032558
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003556
| 0.360068
| 1,758
| 57
| 76
| 30.842105
| 0.760889
| 0
| 0
| 0.078431
| 0
| 0
| 0.291643
| 0.012507
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0
| 0.058824
| 0.019608
| 0.098039
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2290bfd1c4b65da8f41f786b9bf73bcded25e4b1
| 4,203
|
py
|
Python
|
predictors/scene_predictor.py
|
XenonLamb/higan
|
6e7b47f91df23d8d6075d95921e664c9fa4f1306
|
[
"MIT"
] | 83
|
2020-03-11T21:20:59.000Z
|
2022-03-17T10:08:27.000Z
|
predictors/scene_predictor.py
|
XenonLamb/higan
|
6e7b47f91df23d8d6075d95921e664c9fa4f1306
|
[
"MIT"
] | 8
|
2020-04-16T14:37:42.000Z
|
2021-09-20T20:18:06.000Z
|
predictors/scene_predictor.py
|
billzhonggz/higan
|
168f24f7e3969bc8dc580e2c997463e76644c17f
|
[
"MIT"
] | 19
|
2020-04-13T02:55:51.000Z
|
2022-01-28T06:37:25.000Z
|
# python 3.7
"""Predicts the scene category, attribute."""
import numpy as np
from PIL import Image
import torch
import torch.nn.functional as F
import torchvision.transforms as transforms
from .base_predictor import BasePredictor
from .scene_wideresnet import resnet18
__all__ = ['ScenePredictor']
NUM_CATEGORIES = 365
NUM_ATTRIBUTES = 102
FEATURE_DIM = 512
class ScenePredictor(BasePredictor):
"""Defines the predictor class for scene analysis."""
def __init__(self):
super().__init__('scene')
def build(self):
self.net = resnet18(num_classes=NUM_CATEGORIES)
def load(self):
# Load category labels.
self.check_attr('category_anno_path')
self.category_name_to_idx = {}
self.category_idx_to_name = {}
with open(self.category_anno_path, 'r') as f:
for line in f:
name, idx = line.strip().split(' ')
name = name[3:].replace('/', '__')
idx = int(idx)
self.category_name_to_idx[name] = idx
self.category_idx_to_name[idx] = name
assert len(self.category_name_to_idx) == NUM_CATEGORIES
assert len(self.category_idx_to_name) == NUM_CATEGORIES
# Load attribute labels.
self.check_attr('attribute_anno_path')
self.attribute_name_to_idx = {}
self.attribute_idx_to_name = {}
with open(self.attribute_anno_path, 'r') as f:
for idx, line in enumerate(f):
name = line.strip().replace(' ', '_')
self.attribute_name_to_idx[name] = idx
self.attribute_idx_to_name[idx] = name
assert len(self.attribute_name_to_idx) == NUM_ATTRIBUTES
assert len(self.attribute_idx_to_name) == NUM_ATTRIBUTES
# Transform for input images.
self.transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# Load pre-trained weights for category prediction.
checkpoint = torch.load(self.weight_path,
map_location=lambda storage, loc: storage)
state_dict = {k.replace('module.', ''): v
for k, v in checkpoint['state_dict'].items()}
self.net.load_state_dict(state_dict)
fc_weight = list(self.net.parameters())[-2].data.numpy()
fc_weight[fc_weight < 0] = 0
# Load additional weights for attribute prediction.
self.check_attr('attribute_additional_weight_path')
self.attribute_weight = np.load(self.attribute_additional_weight_path)
assert self.attribute_weight.shape == (NUM_ATTRIBUTES, FEATURE_DIM)
def _predict(self, images):
if not isinstance(images, np.ndarray):
raise ValueError(f'Images should be with type `numpy.ndarray`!')
if images.dtype != np.uint8:
raise ValueError(f'Images should be with dtype `numpy.uint8`!')
if not (len(images.shape) == 4 and
0 < images.shape[0] <= self.batch_size and
images.shape[3] == self.image_channels):
raise ValueError(f'Images should be with shape [batch_size, height '
f'width, channel], where `batch_size` no larger than '
f'{self.batch_size}, and `channel` equals to '
f'{self.image_channels}!\n'
f'But {images.shape} received!')
xs = [self.transform(Image.fromarray(img)).unsqueeze(0) for img in images]
xs = torch.cat(xs, dim=0).to(self.run_device)
logits, features = self.net(xs)
category_scores = self.get_value(F.softmax(logits, dim=1))
features = self.get_value(features).squeeze(axis=(2, 3))
attribute_scores = features.dot(self.attribute_weight.T)
assert (len(category_scores.shape) == 2 and
category_scores.shape[1] == NUM_CATEGORIES)
assert (len(attribute_scores.shape) == 2 and
attribute_scores.shape[1] == NUM_ATTRIBUTES)
results = {
'category': category_scores,
'attribute': attribute_scores,
}
if self.use_cuda:
torch.cuda.empty_cache()
return results
def predict(self, images, **kwargs):
return self.batch_run(images, self._predict)
| 36.232759
| 79
| 0.647395
| 543
| 4,203
| 4.793738
| 0.292818
| 0.054937
| 0.020745
| 0.020745
| 0.176719
| 0.122935
| 0.061468
| 0.022282
| 0
| 0
| 0
| 0.019988
| 0.238163
| 4,203
| 115
| 80
| 36.547826
| 0.792942
| 0.064716
| 0
| 0
| 0
| 0
| 0.107632
| 0.014737
| 0
| 0
| 0
| 0
| 0.081395
| 1
| 0.05814
| false
| 0
| 0.081395
| 0.011628
| 0.174419
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2291547d5512bbb1bda47b665f654ae2a6cde5f2
| 652
|
py
|
Python
|
src/etc/gec/3.py
|
iml1111/algorithm-study
|
f21f6f9f43235248f3496f034a899f2314ab6fcc
|
[
"MIT"
] | null | null | null |
src/etc/gec/3.py
|
iml1111/algorithm-study
|
f21f6f9f43235248f3496f034a899f2314ab6fcc
|
[
"MIT"
] | null | null | null |
src/etc/gec/3.py
|
iml1111/algorithm-study
|
f21f6f9f43235248f3496f034a899f2314ab6fcc
|
[
"MIT"
] | null | null | null |
from collections import deque
def solution(N, bus_stop):
answer = [[1300 for _ in range(N)] for _ in range(N)]
bus_stop = [(x-1, y-1) for x,y in bus_stop]
q = deque(bus_stop)
for x,y in bus_stop:
answer[x][y] = 0
while q:
x, y = q.popleft()
for nx, ny in ((x-1, y), (x+1, y), (x, y+1), (x, y-1)):
if (
0 <= nx < N and 0 <= ny < N
and answer[nx][ny] > answer[x][y]
):
answer[nx][ny] = answer[x][y] + 1
q.append((nx, ny))
return answer
if __name__ == '__main__':
print(solution(
3, [[1,2],[3,3]],
))
| 27.166667
| 63
| 0.45092
| 105
| 652
| 2.657143
| 0.304762
| 0.057348
| 0.032258
| 0.078853
| 0.229391
| 0.229391
| 0
| 0
| 0
| 0
| 0
| 0.046455
| 0.372699
| 652
| 24
| 64
| 27.166667
| 0.635697
| 0
| 0
| 0
| 0
| 0
| 0.012251
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.047619
| 0
| 0.142857
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22941cdcf437ea8fe9f771e15f228dacff7fbb5f
| 5,452
|
py
|
Python
|
plaso/parsers/winreg_plugins/usbstor.py
|
berggren/plaso
|
2658c80c5076f97a9a27272e73997bde8c39e875
|
[
"Apache-2.0"
] | 2
|
2020-02-09T01:11:08.000Z
|
2021-09-17T04:16:31.000Z
|
plaso/parsers/winreg_plugins/usbstor.py
|
berggren/plaso
|
2658c80c5076f97a9a27272e73997bde8c39e875
|
[
"Apache-2.0"
] | null | null | null |
plaso/parsers/winreg_plugins/usbstor.py
|
berggren/plaso
|
2658c80c5076f97a9a27272e73997bde8c39e875
|
[
"Apache-2.0"
] | 1
|
2021-03-17T09:47:01.000Z
|
2021-03-17T09:47:01.000Z
|
# -*- coding: utf-8 -*-
"""File containing a Windows Registry plugin to parse the USBStor key."""
from __future__ import unicode_literals
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import logger
from plaso.parsers import winreg
from plaso.parsers.winreg_plugins import interface
class USBStorEventData(events.EventData):
"""USBStor event data attribute container.
Attributes:
device_type (str): type of USB device.
display_name (str): display name of the USB device.
key_path (str): Windows Registry key path.
parent_id_prefix (str): parent identifier prefix of the USB device.
product (str): product of the USB device.
serial (str): serial number of the USB device.
revision (str): revision number of the USB device.
subkey_name (str): name of the Windows Registry subkey.
vendor (str): vendor of the USB device.
"""
DATA_TYPE = 'windows:registry:usbstor'
def __init__(self):
"""Initializes event data."""
super(USBStorEventData, self).__init__(data_type=self.DATA_TYPE)
self.device_type = None
self.display_name = None
self.key_path = None
self.parent_id_prefix = None
self.product = None
self.revision = None
self.serial = None
# TODO: rename subkey_name to something that closer matches its purpose.
self.subkey_name = None
self.vendor = None
class USBStorPlugin(interface.WindowsRegistryPlugin):
"""USBStor key plugin.
Also see:
http://www.forensicswiki.org/wiki/USB_History_Viewing
"""
NAME = 'windows_usbstor_devices'
DESCRIPTION = 'Parser for USB Plug And Play Manager USBStor Registry Key.'
FILTERS = frozenset([
interface.WindowsRegistryKeyPathFilter(
'HKEY_LOCAL_MACHINE\\System\\CurrentControlSet\\Enum\\USBSTOR')])
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
"""
for subkey in registry_key.GetSubkeys():
subkey_name = subkey.name
name_values = subkey_name.split('&')
number_of_name_values = len(name_values)
# Normally we expect 4 fields here however that is not always the case.
if number_of_name_values != 4:
logger.warning(
'Expected 4 &-separated values in: {0:s}'.format(subkey_name))
event_data = USBStorEventData()
event_data.key_path = registry_key.path
event_data.subkey_name = subkey_name
if number_of_name_values >= 1:
event_data.device_type = name_values[0]
if number_of_name_values >= 2:
event_data.vendor = name_values[1]
if number_of_name_values >= 3:
event_data.product = name_values[2]
if number_of_name_values >= 4:
event_data.revision = name_values[3]
if subkey.number_of_subkeys == 0:
# Time last USB device of this class was first inserted.
event = time_events.DateTimeValuesEvent(
subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
continue
for device_key in subkey.GetSubkeys():
event_data.serial = device_key.name
friendly_name_value = device_key.GetValueByName('FriendlyName')
if friendly_name_value:
event_data.display_name = friendly_name_value.GetDataAsObject()
# ParentIdPrefix applies to Windows XP Only.
parent_id_prefix_value = device_key.GetValueByName('ParentIdPrefix')
if parent_id_prefix_value:
event_data.parent_id_prefix = parent_id_prefix_value.GetDataAsObject()
# Time last USB device of this class was first inserted.
event = time_events.DateTimeValuesEvent(
subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
# Win7 - Last Connection.
# Vista/XP - Time of an insert.
event = time_events.DateTimeValuesEvent(
device_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
device_parameter_key = device_key.GetSubkeyByName('Device Parameters')
if device_parameter_key:
event = time_events.DateTimeValuesEvent(
device_parameter_key.last_written_time,
definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
log_configuration_key = device_key.GetSubkeyByName('LogConf')
if log_configuration_key:
event = time_events.DateTimeValuesEvent(
log_configuration_key.last_written_time,
definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
properties_key = device_key.GetSubkeyByName('Properties')
if properties_key:
event = time_events.DateTimeValuesEvent(
properties_key.last_written_time,
definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
winreg.WinRegistryParser.RegisterPlugin(USBStorPlugin)
| 37.6
| 80
| 0.716985
| 644
| 5,452
| 5.807453
| 0.270186
| 0.043316
| 0.012834
| 0.02246
| 0.290107
| 0.22139
| 0.21016
| 0.21016
| 0.21016
| 0.21016
| 0
| 0.003485
| 0.210565
| 5,452
| 144
| 81
| 37.861111
| 0.865474
| 0.238995
| 0
| 0.204819
| 0
| 0
| 0.065191
| 0.026322
| 0
| 0
| 0
| 0.006944
| 0
| 1
| 0.024096
| false
| 0
| 0.084337
| 0
| 0.180723
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
229d03edb58694ea053e0d0cf56108a3ca34b32c
| 17,257
|
py
|
Python
|
rltoolkit/rltoolkit/acm/off_policy/ddpg_acm.py
|
MIMUW-RL/spp-rl
|
86b96cdd220cc4eae86f7cfd26924c69b498dcc6
|
[
"MIT"
] | 7
|
2020-06-15T12:25:53.000Z
|
2021-11-03T01:08:47.000Z
|
rltoolkit/rltoolkit/acm/off_policy/ddpg_acm.py
|
MIMUW-RL/spp-rl
|
86b96cdd220cc4eae86f7cfd26924c69b498dcc6
|
[
"MIT"
] | null | null | null |
rltoolkit/rltoolkit/acm/off_policy/ddpg_acm.py
|
MIMUW-RL/spp-rl
|
86b96cdd220cc4eae86f7cfd26924c69b498dcc6
|
[
"MIT"
] | 1
|
2020-12-21T11:21:22.000Z
|
2020-12-21T11:21:22.000Z
|
import numpy as np
import torch
from torch.nn import functional as F
from rltoolkit.acm.off_policy import AcMOffPolicy
from rltoolkit.algorithms import DDPG
from rltoolkit.algorithms.ddpg.models import Actor, Critic
class DDPG_AcM(AcMOffPolicy, DDPG):
def __init__(
self, unbiased_update: bool = False, custom_loss: float = 0.0,
custom_loss_target: float = 0.0, custom_loss_lr: float = 0.0001,
refill_buffer: bool = False,
lagrangian_custom_loss: bool = False, separate_custom_loss: bool = False,
cw_cl_targets: list = None, custom_loss_target_decay: int = None,
custom_loss_target_dfactor: float = None,
*args, **kwargs,
):
f"""DDPG with AcM class
Args:
unbiased_update (bool, optional): Use next_obs as action for update.
Defaults to { False }.
refill_buffer (bool, optional): if buffer should be refilled with new observations, when its full
Defaults to {False}
"""
super().__init__(*args, **kwargs)
self.unbiased_update = unbiased_update
self.actor = Actor(
self.ob_dim, ac_lim=self.actor_ac_lim, ac_dim=self.actor_output_dim
)
if not self.acm_critic:
self.critic = Critic(self.ob_dim, ac_dim=self.actor_output_dim)
self.custom_loss = custom_loss
custom_loss_scaled = np.log(np.exp(custom_loss) - 1)
self.custom_loss_param = torch.tensor(custom_loss_scaled) if not separate_custom_loss else torch.Tensor([custom_loss_scaled] * self.actor_output_dim)
self.custom_loss_param.requires_grad = lagrangian_custom_loss
self.custom_loss_target = custom_loss_target
self.cw_cl_targets = cw_cl_targets
if lagrangian_custom_loss and cw_cl_targets:
self.custom_loss_target = cw_cl_targets
self.lagrangian_custom_loss = lagrangian_custom_loss
self.custom_loss_lr = custom_loss_lr
self.separate_custom_loss = separate_custom_loss
self.custom_loss_optimizer = self.opt([self.custom_loss_param], lr=custom_loss_lr)
self.refill_buffer = refill_buffer
self.custom_loss_target_decay = custom_loss_target_decay
self.custom_loss_target_dfactor = custom_loss_target_dfactor
if self.custom_loss:
self.loss["ddpg"] = 0.0
self.loss["dist"] = 0.0
if lagrangian_custom_loss:
if self.separate_custom_loss:
self.distances = []
for i in range(self.actor_output_dim):
self.loss[f"custom_loss_param/{i}"] = 0.0
else:
self.loss["custom_loss_param"] = 0.0
new_hparams = {
"hparams/unbiased_update": self.unbiased_update,
"hparams/custom_loss": self.custom_loss,
"hparams/lagrangian_cl": self.lagrangian_custom_loss,
"hparams/custom_loss_target_decay": self.custom_loss_target_decay,
"hparams/custom_loss_target_dfactor": self.custom_loss_target_dfactor,
}
if self.lagrangian_custom_loss:
if self.cw_cl_targets is None:
new_hparams["hparams/cl_target"] = self.custom_loss_target
new_hparams["hparams/cl_lr"] = self.custom_loss_lr
self.hparams_acm.update(new_hparams)
self.hparams.update(self.hparams_acm)
def noise_action(self, obs, act_noise, deterministic=False):
action, _ = self._actor.act(obs, deterministic)
noise = act_noise * torch.randn(self.actor_output_dim, device=self.device)
action += noise * self.actor_ac_lim
action = np.clip(
action.cpu(), -1.1 * self.actor_ac_lim.cpu(), 1.1 * self.actor_ac_lim.cpu()
)
action = action.to(self.device)
if self.denormalize_actor_out:
action = self.replay_buffer.denormalize(action, self.acm_ob_idx)
return action
def custom_loss_target_decay_condition(self):
return(
self.custom_loss_target_decay is not None
and self.custom_loss_target_dfactor is not None
and self.iterations > 0
and self.stats_logger.frames % self.custom_loss_target_decay == 0
)
def acm_update_condition(self):
return (
self.iteration > 0
and self.acm_epochs > 0
and self.stats_logger.frames % self.acm_update_freq == 0
)
def make_unbiased_update(self):
if self.update_condition():
for _ in range(self.grad_steps):
batch = self.replay_buffer.sample_batch(
self.update_batch_size, self.device
)
obs, next_obs, _, reward, done, acm_action = batch
self.update(
obs=obs,
next_obs=next_obs,
action=next_obs,
reward=reward,
done=done,
acm_action=acm_action,
)
def make_update(self):
if self.unbiased_update:
self.make_unbiased_update()
else:
super().make_update()
if self.custom_loss_target_decay_condition():
self.custom_loss_target *= self.custom_loss_target_dfactor
print(f"CUSTOM LOSS TARTGET DECAY, CURRENT VALUE {self.custom_loss_target}")
if self.acm_update_condition():
if self.acm_update_batches:
self.update_acm_batches(self.acm_update_batches)
else:
self.update_acm(self.acm_epochs)
def collect_params_dict(self):
params_dict = super().collect_params_dict()
params_dict["acm"] = self.acm.state_dict()
return params_dict
def apply_params_dict(self, params_dict):
super().apply_params_dict(params_dict)
self.acm.load_state_dict(params_dict["acm"])
def save_model(self, save_path=None):
save_path = DDPG.save_model(self, save_path)
torch.save(self.acm.state_dict(), save_path + "_acm_model.pt")
def compute_qfunc_targ(
self, reward: torch.Tensor, next_obs: torch.Tensor, done: torch.Tensor
):
"""Compute targets for Q-functions
Args:
reward (torch.Tensor): batch of rewards
next_obs (torch.Tensor): batch of next observations
done (torch.Tensor): batch of done
Returns:
torch.Tensor: Q-function targets for the batch
"""
with torch.no_grad():
next_action, _ = self.actor_targ(next_obs)
next_action = self.replay_buffer.denormalize(next_action, self.acm_ob_idx)
if self.acm_critic:
acm_obs = torch.cat([next_obs, next_action], axis=1)
next_action = self.acm(acm_obs)
q_target = self.critic_targ(next_obs, next_action)
qfunc_target = reward + self.gamma * (1 - done) * q_target
return qfunc_target
def add_custom_loss(self, loss, action, denorm_action, next_obs):
if self.custom_loss:
self.loss["ddpg"] = loss.item()
if self.norm_closs:
next_obs = self.replay_buffer.normalize(next_obs, force=True)
else:
action = denorm_action
if not self.separate_custom_loss:
loss_dist = F.mse_loss(action, self.cut_obs(next_obs))
self.loss["dist"] = loss_dist.item()
if self.lagrangian_custom_loss:
loss += F.softplus(self.custom_loss_param) * (loss_dist - self.custom_loss_target)
else:
loss += self.custom_loss * loss_dist
if self.custom_loss_target_decay is not None:
self.loss["custom_loss_target"] = self.custom_loss_target
else:
distances = torch.mean(F.mse_loss(action, self.cut_obs(next_obs), reduction='none'), dim=0)
if self.cw_cl_targets is None:
loss += torch.sum(F.softplus(self.custom_loss_param) * (distances - self.custom_loss_target))
else:
loss += torch.sum(F.softplus(self.custom_loss_param) * (distances - torch.Tensor(self.custom_loss_target)))
self.loss["dist"] = distances.detach()
if self.debug_mode:
for j in range(distances.shape[0]):
self.loss[f"dist/cw/{j}"] = distances[j]
return loss
def compute_pi_loss(self, obs, next_obs):
action, _ = self._actor(obs)
denorm_action = self.replay_buffer.denormalize(action, self.acm_ob_idx)
if self.acm_critic:
acm_obs = torch.cat([obs, denorm_action], axis=1)
critic_action = self.acm(acm_obs)
else:
critic_action = denorm_action
loss = -self._critic(obs, critic_action).mean()
return self.add_custom_loss(loss, action, denorm_action, next_obs)
def update_custom_loss_param_loss(self):
if not self.lagrangian_custom_loss:
return
dist_loss = self.loss["dist"]
if self.cw_cl_targets is None:
loss = -F.softplus(self.custom_loss_param) * (dist_loss - self.custom_loss_target)
else:
loss = -F.softplus(self.custom_loss_param) * (dist_loss - torch.Tensor(self.custom_loss_target))
if self.separate_custom_loss:
for i in range(len(loss)):
self.loss[f"custom_loss_param/{i}"] = loss[i].item()
self.loss["dist"] = torch.mean(self.loss["dist"]).item()
loss = torch.sum(loss)
else:
self.loss["custom_loss_param"] = loss.item()
self.custom_loss_optimizer.zero_grad()
loss.backward()
self.custom_loss_optimizer.step()
def copy_offline_dataset(self, dataset, size):
"""copies the provided offlineRL dataset into the replay buffer.
for the moment assumes D4RL dataset format (a dictionary)
and copies elements one-by-one
"""
i = 0
traj = 0
while i < size:
traj += 1
done = torch.tensor(dataset['timeouts'][i] or dataset['terminals'][i])
obs = torch.tensor(dataset['observations'][i])
prev_idx = self.replay_buffer.add_obs(obs)
i += 1
ep_len = 0
while(not done and i < size):
nextobs = torch.tensor(dataset['observations'][i])
rew = torch.tensor( dataset['rewards'][i] )
done = torch.tensor( dataset['timeouts'][i] or dataset['terminals'][i] )
action = torch.tensor( dataset['actions'][i] )
end = torch.tensor( dataset['terminals'][i] )
next_idx = self.replay_buffer.add_obs(nextobs)
self.replay_buffer.add_timestep(
prev_idx, next_idx, nextobs, rew, done, end
)
self.replay_buffer.add_acm_action(action)
prev_idx = next_idx
i += 1
ep_len += 1
print(f"copied offline dataset with {i} samples, contains {traj} trajectories")
#sets the internal variables according to the provided offline dataset
self.acm_pre_train_samples = i
self.buffer_size = i
self.max_frames = i
self.iterations = i / self.steps_per_epoch
#updates std/dev/min/max parameters of the dataset
self.update_obs_mean_std(self.replay_buffer)
def collect_batch_and_train(self, steps_per_epoch: int, *args, **kwargs):
"""SPP variant of rollouts and collect samples if there is enough samples
in replay buffer use existing samples to perform actor/critic update
otherwise generate new samples till steps_per_epoch number of steps
will be added to the replay buffer
Args:
steps_per_epoch (int): number of samples to collect and train
*args, **kwargs: arguments for make_update
"""
collected = 0
while collected < steps_per_epoch:
# important part,
# when the replay buffer is filled stop generating new frames, just use the existing buffer
# such that the number of used experience in learning is counted correctly
if (self.stats_logger.frames >= self.buffer_size - self.acm_pre_train_samples) and not self.refill_buffer:
self.stats_logger.frames += 1
collected += 1
self.make_update(*args, **kwargs)
continue
self.stats_logger.rollouts += 1
obs = self.env.reset()
# end - end of the episode from perspective of the simulation
# done - end of the episode from perspective of the model
end = False
obs = self.process_obs(obs)
prev_idx = self.replay_buffer.add_obs(obs)
ep_len = 0
while not end:
obs = self.replay_buffer.normalize(obs)
if (self.stats_logger.frames > self.acm_pre_train_samples) and (self.stats_logger.frames <= self.acm_pre_train_samples + self.random_frames):
action = self.initial_act(obs)
else:
action = self.noise_action(obs, self.act_noise)
action_proc = self.process_action(action, obs)
prev_obs = obs
obs, rew, done, _ = self.env.step(action_proc)
ep_len += 1
end = True if ep_len == self.max_ep_len else done
done = False if ep_len == self.max_ep_len else done
obs = self.process_obs(obs)
if self.next_obs_diff is not None:
obs = self.compute_next_obs_diff(prev_obs, obs)
next_idx = self.replay_buffer.add_obs(obs)
self.replay_buffer.add_timestep(
prev_idx, next_idx, action, rew, done, end
)
prev_idx = next_idx
self.stats_logger.frames += 1
collected += 1
self.make_update(*args, **kwargs)
def update(
self,
obs: torch.Tensor,
next_obs: torch.Tensor,
action: torch.Tensor,
reward: torch.Tensor,
done: torch.Tensor,
acm_action: torch.Tensor,
):
"""DDPG update step
Args:
obs (torch.Tensor): observations tensor
next_obs (torch.Tensor): next observations tensor
action (torch.Tensor): actions tensor
reward (torch.Tensor): rewards tensor
done (torch.Tensor): dones tensor
acm_action (torch.Tensor): tensor of acm actions
"""
for param in self.acm.parameters():
param.requires_grad = False
if self.acm_critic:
action = acm_action
y = self.compute_qfunc_targ(reward, next_obs, done)
# Update Q-function by one step
y_q = self._critic(obs, action)
loss_q = F.mse_loss(y_q, y)
self.loss["critic"] = loss_q.item()
self.critic_optimizer.zero_grad()
loss_q.backward()
self.critic_optimizer.step()
# Update policy by one step
self._critic.eval()
loss = self.compute_pi_loss(obs, next_obs)
self.loss["actor"] = loss.item()
self.actor_optimizer.zero_grad()
loss.backward()
self.actor_optimizer.step()
#update temperature of Lagrangian optimization obj
self.update_custom_loss_param_loss()
# Update target networks
self.update_target_nets()
self._critic.train()
for param in self.acm.parameters():
param.requires_grad = True
def add_tensorboard_logs(self, buffer, done):
super().add_tensorboard_logs(buffer, done)
if self.lagrangian_custom_loss:
self.tensorboard_writer.log_custom_loss_param(
self.iteration, self.custom_loss_param)
if __name__ == "__main__":
#with torch.cuda.device(0):
model = DDPG_AcM(
# unbiased_update=True,
# custom_loss=True,
# acm_update_batches=50,
# denormalize_actor_out=True,
env_name="Pendulum-v0",
buffer_size=50000,
act_noise=0.05,
iterations=100,
gamma=0.99,
steps_per_epoch=200,
stats_freq=5,
test_episodes=3,
custom_loss=1,
lagrangian_custom_loss=False,
# tensorboard_dir="logs_ddpg",
# tensorboard_comment="",
acm_update_freq=200,
acm_epochs=1,
acm_pre_train_epochs=10,
acm_pre_train_samples=10000,
use_gpu=False,
render=False,
)
model.pre_train()
model.train()
| 39.042986
| 157
| 0.589963
| 2,098
| 17,257
| 4.577216
| 0.13775
| 0.094762
| 0.058315
| 0.043736
| 0.364261
| 0.257732
| 0.180464
| 0.13652
| 0.097886
| 0.050817
| 0
| 0.007116
| 0.3241
| 17,257
| 441
| 158
| 39.131519
| 0.816187
| 0.102915
| 0
| 0.17134
| 0
| 0
| 0.057514
| 0.011621
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05296
| false
| 0
| 0.018692
| 0.006231
| 0.096573
| 0.006231
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22a0ba4419e5d5479b0eea3b85e6ded458dffecb
| 13,025
|
py
|
Python
|
pelutils/logger.py
|
peleiden/pelutils
|
9860734c0e06481aa58a9f767a4cfb5129cb48ec
|
[
"BSD-3-Clause"
] | 3
|
2021-02-28T13:03:12.000Z
|
2022-01-01T09:53:33.000Z
|
pelutils/logger.py
|
peleiden/pelutils
|
9860734c0e06481aa58a9f767a4cfb5129cb48ec
|
[
"BSD-3-Clause"
] | 72
|
2020-10-13T09:20:01.000Z
|
2022-02-26T09:12:21.000Z
|
pelutils/logger.py
|
peleiden/pelutils
|
9860734c0e06481aa58a9f767a4cfb5129cb48ec
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import annotations
import os
import traceback as tb
from collections import defaultdict
from enum import IntEnum
from functools import update_wrapper
from itertools import chain
from typing import Any, Callable, DefaultDict, Generator, Iterable, Optional
from pelutils import get_timestamp, get_repo
from .format import RichString
class LogLevels(IntEnum):
""" Logging levels by priority. Don't set any to 0, as falsiness is used in the code """
SECTION = 6
CRITICAL = 5
ERROR = 4
WARNING = 3
INFO = 2
DEBUG = 1
_STDERR_LEVELS = { LogLevels.CRITICAL, LogLevels.ERROR, LogLevels.WARNING }
# https://rich.readthedocs.io/en/stable/appendix/colors.html
_TIMESTAMP_COLOR = "#72b9e0"
_LEVEL_FORMAT = {
LogLevels.SECTION: "bright_yellow",
LogLevels.CRITICAL: "red1",
LogLevels.ERROR: "red3",
LogLevels.WARNING: "gold3",
LogLevels.INFO: "chartreuse3",
LogLevels.DEBUG: "deep_sky_blue1",
}
class _LevelManager:
"""
Used for disabling logging below a certain level
Example:
with log.level(Levels.WARNING):
log.error("This will be logged")
log.info("This will not be logged")
"""
level: LogLevels
is_active = False
def with_level(self, level: LogLevels | int) -> _LevelManager:
self.level = level
return self
def __enter__(self):
self.is_active = True
def __exit__(self, *args):
self.is_active = False
del self.level # Prevent silent failures by having level accidentally set
class _LogErrors:
"""
Used for catching exceptions with logger and logging them before reraising them
"""
def __init__(self, log):
self._log = log
def __enter__(self):
pass
def __exit__(self, et, ev, tb_):
if et and self._log._collect:
self._log.log_collected()
if et:
self._log._throw(ev, tb_)
class LoggingException(RuntimeError):
pass
class _Logger:
"""
A simple logger which creates a log file and pushes strings both to stdout and the log file
Sections, verbosity and error logging is supported
"""
_loggers: DefaultDict[str, dict[str, Any]]
_selected_logger: str
_maxlen = max(len(l.name) for l in LogLevels)
_spacing = 4 * " "
_yes = { "j", "y" }
_no = { "n" }
@property
def _logger(self) -> dict:
return self._loggers[self._selected_logger]
@property
def _fpath(self) -> str:
return self._logger["fpath"]
@property
def _default_sep(self) -> str:
return self._logger["default_sep"]
@property
def _include_micros(self) -> bool:
return self._logger["include_micros"]
@property
def _print_level(self) -> LogLevels:
return self._logger["print_level"]
@property
def _level_mgr(self) -> _LevelManager:
return self._logger["level_mgr"]
@property
def _level(self) -> LogLevels:
return self._level_mgr.level
def __init__(self):
self._log_errors = _LogErrors(self)
self._collect = False
self._collected_log: list[RichString] = list()
self._collected_print: list[RichString] = list()
self._loggers = defaultdict(dict)
self.clean()
self.configure(logger_name="print_only", print_level=LogLevels.DEBUG)
def configure(
self,
fpath: Optional[str] = None, # Path to place logger. Any missing directories are created
title: Optional[str] = None, # Title on first line of logfile
default_seperator = "\n",
include_micros = False, # Include microseconds in timestamps
log_commit = False, # Log commit of git repository
logger_name = "default", # Name of logger
append = False, # Set to True to append to old log file instead of overwriting it
print_level = LogLevels.INFO, # Highest level that will be printed. All will be logged. None for no print
):
""" Configure a logger. If not called, the logger will act like a print statement """
if logger_name in self._loggers:
raise LoggingException("Logger '%s' already exists. Did you call log.configure(...) twice?" % logger_name)
if self._collect:
raise LoggingException("Cannot configure a new logger while using collect_logs")
self._selected_logger = logger_name
self._loggers[logger_name]["fpath"] = os.path.realpath(fpath) if fpath else None
self._loggers[logger_name]["default_sep"] = default_seperator
self._loggers[logger_name]["include_micros"] = include_micros
self._loggers[logger_name]["level_mgr"] = _LevelManager()
self._loggers[logger_name]["print_level"] = print_level or len(LogLevels) + 1
if fpath is not None:
dirs = os.path.split(fpath)[0]
if dirs:
os.makedirs(dirs, exist_ok=True)
exists = os.path.exists(fpath)
with open(fpath, "a" if append else "w", encoding="utf-8") as logfile:
logfile.write("\n\n" if append and exists else "")
if title is not None:
self.section(title + "\n")
if log_commit:
repo, commit = get_repo()
if repo is not None:
self.debug(
"Executing in repository %s" % repo,
"Commit: %s\n" % commit,
)
else:
self.debug("Unable to find repository that code was executed in")
def set_logger(self, logger: str):
if logger not in self._loggers:
raise LoggingException("Logger '%s' does not exist. Available loggers: %s" % (logger, list(self._loggers)))
if self._collect:
raise LoggingException("Cannot configure a new logger while using collect_logs")
self._selected_logger = logger
def level(self, level: LogLevels):
""" Log only at given level and above. Use with a with block """
return self._level_mgr.with_level(level)
@property
def no_log(self):
""" Disable logging inside a with block """
return self._level_mgr.with_level(max(LogLevels)+1)
@property
def log_errors(self):
return self._log_errors
def __call__(self, *tolog, with_info=True, sep=None, with_print=None, level: LogLevels=LogLevels.INFO):
self._log(*tolog, level=level, with_info=with_info, sep=sep, with_print=with_print)
def _write_to_log(self, content: RichString):
if self._fpath is not None:
with open(self._fpath, "a", encoding="utf-8") as logfile:
logfile.write(f"{content}\n")
@staticmethod
def _format(s: str, format: str) -> str:
return f"[{format}]{s}[/]"
def _log(self, *tolog, level=LogLevels.INFO, with_info=True, sep=None, with_print=None):
if not self._loggers:
return
if self._level_mgr.is_active and level < self._level_mgr.level:
return
sep = sep or self._default_sep
with_print = level >= self._print_level if with_print is None else with_print
time = get_timestamp()
tolog = sep.join([str(x) for x in tolog])
time_spaces = len(time) * " "
level_format = level.name + (self._maxlen - len(level.name)) * " "
space = self._spacing + self._maxlen * " " + self._spacing
logs = tolog.split("\n")
rs = RichString(stderr=level in _STDERR_LEVELS) # Send warning
if with_info and tolog:
rs.add_string(
f"{time}{self._spacing}{level_format}{self._spacing}",
self._format(time, _TIMESTAMP_COLOR) +\
self._spacing +\
self._format(level_format, _LEVEL_FORMAT[level]) +\
self._spacing,
)
rs.add_string(logs[0])
else:
rs.add_string(f"{time_spaces}{space}{logs[0]}".rstrip())
for i in range(1, len(logs)):
s = f"\n{time_spaces}{space}{logs[i]}".rstrip()
rs.add_string(
s if s.strip() else "\n"
)
if not self._collect:
self._write_to_log(rs)
if with_print:
rs.print()
else:
self._collected_log.append(rs)
if with_print:
self._collected_print.append(rs)
def _format_tb(self, error: Exception, tb_) -> list[str]:
stack = list(chain.from_iterable([elem.split("\n") for elem in tb.format_tb(tb_)]))
stack = [line for line in stack if line.strip()]
return [
"ERROR: %s thrown with stacktrace" % type(error).__name__,
*stack,
"%s: %s" % (type(error).__name__, error),
]
def _throw(self, error: Exception, tb_=None):
stack = list()
has_cause = error.__cause__ is not None
cur_error = error.__context__
while cur_error:
stack += self._format_tb(cur_error, cur_error.__traceback__)
if has_cause:
stack += ["", "The above exception was the direct cause of the following exception:", ""]
else:
stack += ["", "During handling of the above exception, another exception occurred:", ""]
has_cause = cur_error.__cause__ is not None
cur_error = cur_error.__context__
stack += self._format_tb(error, tb_)
self.critical(*stack, with_print=False)
raise error
def _input(self, prompt: str) -> str:
self.info("Prompt: '%s'" % prompt, with_print=False)
response = input(prompt)
self.info("Input: '%s'" % response, with_print=False)
return response
def input(self, prompt: str | Iterable[str] = "") -> str | Generator[str]:
"""
Get user input and log both prompt an input
If prompt is an iterable, a generator of user inputs will be returned
"""
self._log("Waiting for user input", with_print=False)
if isinstance(prompt, str):
return self._input(prompt)
else:
return (self._input(p) for p in prompt)
@classmethod
def bool_input(cls, inp: str, default=True) -> bool:
""" Parse a yes/no user input """
inp = inp.lower()
if default:
return inp[0] not in cls._no if inp else True
else:
return inp[0] in cls._yes if inp else False
def _reset_collected(self):
self._collected_log = list()
self._collected_print = list()
def set_collect_mode(self, collect: bool):
self._collect = collect
if not collect:
self._reset_collected()
def log_collected(self):
if self._collected_log:
logs = "\n".join(str(log) for log in self._collected_log)
self._write_to_log(logs)
if self._collected_print:
RichString.multiprint(self._collected_print)
def clean(self):
""" Resets the loggers and removes all existing logger configurations """
self._loggers = defaultdict(dict)
self._selected_logger = "default"
def section(self, *tolog, with_info=True, sep=None, with_print=None, newline=True):
if newline:
self._log("")
self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.SECTION)
def critical(self, *tolog, with_info=True, sep=None, with_print=None):
self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.CRITICAL)
def error(self, *tolog, with_info=True, sep=None, with_print=None):
self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.ERROR)
def warning(self, *tolog, with_info=True, sep=None, with_print=None):
self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.WARNING)
def info(self, *tolog, with_info=True, sep=None, with_print=None):
self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.INFO)
def debug(self, *tolog, with_info=True, sep=None, with_print=None):
self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.DEBUG)
log = _Logger()
class collect_logs:
"""
Wrap functions with this class to have them output all their output at once
Useful with multiprocessing, e.g.
```
with mp.Pool() as p:
p.map(collect_logs(fun), ...)
```
Loggers cannot be changed or configured during this
"""
def __init__(self, fun: Callable):
self.fun = fun
update_wrapper(self, fun)
def __call__(self, *args, **kwargs):
log.set_collect_mode(True)
return_value = self.fun(*args, **kwargs)
log.log_collected()
log.set_collect_mode(False)
return return_value
| 36.080332
| 123
| 0.61666
| 1,651
| 13,025
| 4.638401
| 0.190793
| 0.036432
| 0.022068
| 0.01567
| 0.199399
| 0.161792
| 0.161792
| 0.135414
| 0.131235
| 0.121572
| 0
| 0.002874
| 0.278618
| 13,025
| 360
| 124
| 36.180556
| 0.812154
| 0.116238
| 0
| 0.115672
| 0
| 0
| 0.078199
| 0.009709
| 0
| 0
| 0
| 0
| 0
| 1
| 0.149254
| false
| 0.007463
| 0.037313
| 0.033582
| 0.339552
| 0.123134
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22a11f4324f76cab0ee6ba121cab810e162f6104
| 10,942
|
py
|
Python
|
tests/test_metrics.py
|
aaxelb/django-elasticsearch-metrics
|
8a02ffc57f57257843834d4f84c41480f4e27fbd
|
[
"MIT"
] | 5
|
2018-08-21T19:48:39.000Z
|
2021-04-01T22:11:31.000Z
|
tests/test_metrics.py
|
aaxelb/django-elasticsearch-metrics
|
8a02ffc57f57257843834d4f84c41480f4e27fbd
|
[
"MIT"
] | 18
|
2018-07-26T16:04:53.000Z
|
2018-08-30T19:31:30.000Z
|
tests/test_metrics.py
|
aaxelb/django-elasticsearch-metrics
|
8a02ffc57f57257843834d4f84c41480f4e27fbd
|
[
"MIT"
] | 5
|
2019-04-01T17:47:08.000Z
|
2022-01-28T17:23:11.000Z
|
import mock
import pytest
import datetime as dt
from django.utils import timezone
from elasticsearch_metrics import metrics
from elasticsearch_dsl import IndexTemplate
from elasticsearch_metrics import signals
from elasticsearch_metrics.exceptions import (
IndexTemplateNotFoundError,
IndexTemplateOutOfSyncError,
)
from tests.dummyapp.metrics import (
DummyMetric,
DummyMetricWithExplicitTemplateName,
DummyMetricWithExplicitTemplatePattern,
)
class PreprintView(metrics.Metric):
provider_id = metrics.Keyword(index=True)
user_id = metrics.Keyword(index=True)
preprint_id = metrics.Keyword(index=True)
class Index:
settings = {"refresh_interval": "-1"}
class Meta:
app_label = "dummyapp"
template_name = "osf_metrics_preprintviews"
template = "osf_metrics_preprintviews-*"
class TestGetIndexName:
def test_get_index_name(self):
date = dt.date(2020, 2, 14)
assert (
PreprintView.get_index_name(date=date)
== "osf_metrics_preprintviews_2020.02.14"
)
def test_get_index_name_respects_date_format_setting(self, settings):
settings.ELASTICSEARCH_METRICS_DATE_FORMAT = "%Y-%m-%d"
date = dt.date(2020, 2, 14)
assert (
PreprintView.get_index_name(date=date)
== "osf_metrics_preprintviews_2020-02-14"
)
def test_get_index_name_gets_index_for_today_by_default(self):
today = timezone.now().date()
today_formatted = today.strftime("%Y.%m.%d")
assert PreprintView.get_index_name() == "osf_metrics_preprintviews_{}".format(
today_formatted
)
class TestGetIndexTemplate:
def test_get_index_template_returns_template_with_correct_name_and_pattern(self):
template = PreprintView.get_index_template()
assert isinstance(template, IndexTemplate)
assert template._template_name == "osf_metrics_preprintviews"
assert "osf_metrics_preprintviews-*" in template.to_dict()["index_patterns"]
def test_get_index_template_respects_index_settings(self):
template = PreprintView.get_index_template()
assert template._index.to_dict()["settings"] == {"refresh_interval": "-1"}
def test_get_index_template_creates_template_with_mapping(self):
template = PreprintView.get_index_template()
mappings = template.to_dict()["mappings"]
assert mappings["doc"]["_source"]["enabled"] is False
properties = mappings["doc"]["properties"]
assert "timestamp" in properties
assert properties["timestamp"] == {"doc_values": True, "type": "date"}
assert properties["provider_id"] == {"type": "keyword", "index": True}
assert properties["user_id"] == {"type": "keyword", "index": True}
assert properties["preprint_id"] == {"type": "keyword", "index": True}
# regression test
def test_mappings_are_not_shared(self):
template1 = DummyMetric.get_index_template()
template2 = DummyMetricWithExplicitTemplateName.get_index_template()
assert "my_int" in template1.to_dict()["mappings"]["doc"]["properties"]
assert "my_keyword" not in template1.to_dict()["mappings"]["doc"]["properties"]
assert "my_int" not in template2.to_dict()["mappings"]["doc"]["properties"]
assert "my_keyword" in template2.to_dict()["mappings"]["doc"]["properties"]
def test_declaring_metric_with_no_app_label_or_template_name_errors(self):
with pytest.raises(RuntimeError):
class BadMetric(metrics.Metric):
pass
with pytest.raises(RuntimeError):
class MyMetric(metrics.Metric):
class Meta:
template_name = "osf_metrics_preprintviews"
def test_get_index_template_default_template_name(self):
template = DummyMetric.get_index_template()
assert isinstance(template, IndexTemplate)
assert template._template_name == "dummyapp_dummymetric"
assert "dummyapp_dummymetric_*" in template.to_dict()["index_patterns"]
def test_get_index_template_uses_app_label_in_class_meta(self):
class MyMetric(metrics.Metric):
class Meta:
app_label = "myapp"
template = MyMetric.get_index_template()
assert template._template_name == "myapp_mymetric"
def test_template_name_defined_with_no_template_falls_back_to_default_template(
self
):
template = DummyMetricWithExplicitTemplateName.get_index_template()
# template name specified in class Meta
assert template._template_name == "dummymetric"
# template is not specified, so it's generated
assert (
"dummyapp_dummymetricwithexplicittemplatename_*"
in template.to_dict()["index_patterns"]
)
def test_template_defined_with_no_template_name_falls_back_to_default_name(self):
template = DummyMetricWithExplicitTemplatePattern.get_index_template()
# template name specified in class Meta
assert (
template._template_name == "dummyapp_dummymetricwithexplicittemplatepattern"
)
# template is not specified, so it's generated
assert "dummymetric-*" in template.to_dict()["index_patterns"]
def test_inheritance(self):
class MyBaseMetric(metrics.Metric):
user_id = metrics.Keyword(index=True)
class Index:
settings = {"number_of_shards": 2}
class Meta:
abstract = True
class ConcreteMetric(MyBaseMetric):
class Meta:
app_label = "dummyapp"
template = ConcreteMetric.get_index_template()
assert template._template_name == "dummyapp_concretemetric"
assert template._index.to_dict()["settings"] == {"number_of_shards": 2}
def test_source_may_be_enabled(self):
class MyMetric(metrics.Metric):
class Meta:
app_label = "dummyapp"
template_name = "mymetric"
template = "mymetric-*"
source = metrics.MetaField(enabled=True)
template = MyMetric.get_index_template()
template_dict = template.to_dict()
doc = template_dict["mappings"]["doc"]
assert doc["_source"]["enabled"] is True
class TestRecord:
def test_calls_save(self, mock_save):
timestamp = dt.datetime(2017, 8, 21)
p = PreprintView.record(timestamp=timestamp, provider_id="abc12")
assert mock_save.call_count == 1
assert p.timestamp == timestamp
assert p.provider_id == "abc12"
@mock.patch.object(timezone, "now")
def test_defaults_timestamp_to_now(self, mock_now, mock_save):
fake_now = dt.datetime(2016, 8, 21)
mock_now.return_value = fake_now
p = PreprintView.record(provider_id="abc12")
assert mock_save.call_count == 1
assert p.timestamp == fake_now
class TestSignals:
@mock.patch.object(PreprintView, "get_index_template")
def test_create_metric_sends_signals(self, mock_get_index_template):
mock_pre_index_template_listener = mock.Mock()
mock_post_index_template_listener = mock.Mock()
signals.pre_index_template_create.connect(mock_pre_index_template_listener)
signals.post_index_template_create.connect(mock_post_index_template_listener)
PreprintView.sync_index_template()
assert mock_pre_index_template_listener.call_count == 1
assert mock_post_index_template_listener.call_count == 1
pre_call_kwargs = mock_pre_index_template_listener.call_args[1]
assert "index_template" in pre_call_kwargs
assert "using" in pre_call_kwargs
post_call_kwargs = mock_pre_index_template_listener.call_args[1]
assert "index_template" in post_call_kwargs
assert "using" in post_call_kwargs
def test_save_sends_signals(self, mock_save):
mock_pre_save_listener = mock.Mock()
mock_post_save_listener = mock.Mock()
signals.pre_save.connect(mock_pre_save_listener, sender=PreprintView)
signals.post_save.connect(mock_post_save_listener, sender=PreprintView)
provider_id = "12345"
user_id = "abcde"
preprint_id = "zyxwv"
doc = PreprintView(
provider_id=provider_id, user_id=user_id, preprint_id=preprint_id
)
doc.save()
assert mock_pre_save_listener.call_count == 1
pre_save_kwargs = mock_pre_save_listener.call_args[1]
assert isinstance(pre_save_kwargs["instance"], PreprintView)
assert "index" in pre_save_kwargs
assert "using" in pre_save_kwargs
assert pre_save_kwargs["sender"] is PreprintView
assert mock_post_save_listener.call_count == 1
post_save_kwargs = mock_pre_save_listener.call_args[1]
assert isinstance(post_save_kwargs["instance"], PreprintView)
assert "index" in post_save_kwargs
assert "using" in post_save_kwargs
assert post_save_kwargs["sender"] is PreprintView
@pytest.mark.es
class TestIntegration:
def test_init(self, client):
PreprintView.init()
name = PreprintView.get_index_name()
mapping = client.indices.get_mapping(index=name)
properties = mapping[name]["mappings"]["doc"]["properties"]
assert properties["timestamp"] == {"type": "date"}
assert properties["provider_id"] == {"type": "keyword"}
assert properties["user_id"] == {"type": "keyword"}
assert properties["preprint_id"] == {"type": "keyword"}
def test_create_document(self, client):
provider_id = "12345"
user_id = "abcde"
preprint_id = "zyxwv"
doc = PreprintView(
provider_id=provider_id, user_id=user_id, preprint_id=preprint_id
)
doc.save()
document = PreprintView.get(id=doc.meta.id, index=PreprintView.get_index_name())
# TODO flesh out this test more. Try to query ES?
assert document is not None
def test_check_index_template(self):
with pytest.raises(IndexTemplateNotFoundError):
assert PreprintView.check_index_template() is False
PreprintView.sync_index_template()
assert PreprintView.check_index_template() is True
# When settings change, template is out of sync
PreprintView._index.settings(
**{"refresh_interval": "1s", "number_of_shards": 1, "number_of_replicas": 2}
)
with pytest.raises(IndexTemplateOutOfSyncError) as excinfo:
assert PreprintView.check_index_template() is False
error = excinfo.value
assert error.settings_in_sync is False
assert error.mappings_in_sync is True
assert error.patterns_in_sync is True
PreprintView.sync_index_template()
assert PreprintView.check_index_template() is True
| 39.501805
| 88
| 0.683787
| 1,245
| 10,942
| 5.673092
| 0.148594
| 0.069942
| 0.040776
| 0.01699
| 0.529237
| 0.395724
| 0.344046
| 0.273963
| 0.23701
| 0.190571
| 0
| 0.009791
| 0.225279
| 10,942
| 276
| 89
| 39.644928
| 0.823405
| 0.025224
| 0
| 0.253394
| 0
| 0
| 0.114843
| 0.034434
| 0
| 0
| 0
| 0.003623
| 0.276018
| 1
| 0.095023
| false
| 0.004525
| 0.040724
| 0
| 0.239819
| 0.19457
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22a1b8da531316fb6c21092916dd14f6945d1c1d
| 1,924
|
py
|
Python
|
tests/unit/test_iris_helpers.py
|
jvegreg/ESMValCore
|
03eb1c942bf1dc3be98cb30c3592b42e82a94f16
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_iris_helpers.py
|
jvegreg/ESMValCore
|
03eb1c942bf1dc3be98cb30c3592b42e82a94f16
|
[
"Apache-2.0"
] | 2
|
2022-03-02T16:16:06.000Z
|
2022-03-10T12:58:49.000Z
|
tests/unit/test_iris_helpers.py
|
valeriupredoi/ESMValCore
|
b46b948c47d8579d997b28501f8588f5531aa354
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for :mod:`esmvalcore.iris_helpers`."""
import datetime
import iris
import numpy as np
import pytest
from cf_units import Unit
from esmvalcore.iris_helpers import date2num, var_name_constraint
@pytest.fixture
def cubes():
"""Test cubes."""
cubes = iris.cube.CubeList([
iris.cube.Cube(0.0, var_name='a', long_name='a'),
iris.cube.Cube(0.0, var_name='a', long_name='b'),
iris.cube.Cube(0.0, var_name='c', long_name='d'),
])
return cubes
@pytest.fixture
def units():
return Unit('days since 0001-01-01', calendar='proleptic_gregorian')
@pytest.mark.parametrize("date, dtype, expected", [
(datetime.datetime(1, 1, 1), np.float64, 0.0),
(datetime.datetime(1, 1, 1), int, 0.0),
(datetime.datetime(1, 1, 2, 12), np.float64, 1.5),
])
def test_date2num_scalar(date, dtype, expected, units):
num = date2num(date, units, dtype=dtype)
assert num == expected
assert num.dtype == dtype
def test_var_name_constraint(cubes):
"""Test :func:`esmvalcore.iris_helpers.var_name_constraint`."""
out_cubes = cubes.extract(var_name_constraint('a'))
assert out_cubes == iris.cube.CubeList([
iris.cube.Cube(0.0, var_name='a', long_name='a'),
iris.cube.Cube(0.0, var_name='a', long_name='b'),
])
out_cubes = cubes.extract(var_name_constraint('b'))
assert out_cubes == iris.cube.CubeList([])
out_cubes = cubes.extract(var_name_constraint('c'))
assert out_cubes == iris.cube.CubeList([
iris.cube.Cube(0.0, var_name='c', long_name='d'),
])
with pytest.raises(iris.exceptions.ConstraintMismatchError):
cubes.extract_cube(var_name_constraint('a'))
with pytest.raises(iris.exceptions.ConstraintMismatchError):
cubes.extract_cube(var_name_constraint('b'))
out_cube = cubes.extract_cube(var_name_constraint('c'))
assert out_cube == iris.cube.Cube(0.0, var_name='c', long_name='d')
| 33.172414
| 72
| 0.677755
| 281
| 1,924
| 4.459075
| 0.217082
| 0.089385
| 0.122107
| 0.072626
| 0.57063
| 0.554669
| 0.463687
| 0.3751
| 0.3751
| 0.3751
| 0
| 0.028501
| 0.161123
| 1,924
| 57
| 73
| 33.754386
| 0.747831
| 0.057692
| 0
| 0.363636
| 0
| 0
| 0.045075
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 1
| 0.090909
| false
| 0
| 0.136364
| 0.022727
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22a33ada09a97d4c429f1c99f360e9ceb37d5903
| 771
|
py
|
Python
|
figures/plot_log_figure_paper.py
|
davidADSP/deepAI_paper
|
f612e80aa0e8507444228940c54554a83bc16119
|
[
"MIT"
] | 21
|
2017-09-09T18:41:40.000Z
|
2022-03-16T06:50:00.000Z
|
figures/plot_log_figure_paper.py
|
davidADSP/deepAI_paper
|
f612e80aa0e8507444228940c54554a83bc16119
|
[
"MIT"
] | null | null | null |
figures/plot_log_figure_paper.py
|
davidADSP/deepAI_paper
|
f612e80aa0e8507444228940c54554a83bc16119
|
[
"MIT"
] | 6
|
2017-09-09T18:41:53.000Z
|
2022-02-25T08:11:40.000Z
|
import numpy
import matplotlib.pyplot as plt
fig_convergence = plt.figure(1,figsize=(12,6))
x = numpy.loadtxt('log_deepAI_paper_nonlin_action_long.txt')
plt.subplot(122)
plt.plot(x[:,0])
plt.xlim([0,500])
plt.ylim([-10,200])
plt.xlabel('Steps')
plt.ylabel('Free Action')
plt.axvline(x=230.0,linestyle=':')
plt.axvline(x=250.0,linestyle=':')
plt.axvline(x=270.0,linestyle=':')
ax = plt.subplot(121)
plt.plot(x[:,0])
plt.ylim([-10,200])
ax.axvspan(0, 500, alpha=0.3, color='red')
plt.xlim([0,30000])
plt.xlabel('Steps')
plt.ylabel('Free Action')
fig_convergence.subplots_adjust(left=0.07, bottom=0.1, right=0.95, top=0.95,
wspace=0.2, hspace=0.15)
fig_convergence.savefig('fig_convergence.pdf')
plt.show()
| 24.09375
| 76
| 0.657588
| 127
| 771
| 3.913386
| 0.496063
| 0.112676
| 0.066398
| 0.036217
| 0.265594
| 0.132797
| 0.132797
| 0
| 0
| 0
| 0
| 0.099849
| 0.142672
| 771
| 31
| 77
| 24.870968
| 0.652042
| 0
| 0
| 0.333333
| 0
| 0
| 0.124514
| 0.050584
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22a452c901b5e5a2bc4953164caa1bd099196d19
| 2,938
|
py
|
Python
|
setup.py
|
matiasgrana/nagios_sql
|
7858b852cf539da418a1a289e8c06e386b62287a
|
[
"MIT"
] | null | null | null |
setup.py
|
matiasgrana/nagios_sql
|
7858b852cf539da418a1a289e8c06e386b62287a
|
[
"MIT"
] | 4
|
2017-08-08T13:42:39.000Z
|
2019-11-25T10:29:29.000Z
|
setup.py
|
matiasgrana/nagios_sql
|
7858b852cf539da418a1a289e8c06e386b62287a
|
[
"MIT"
] | 4
|
2019-01-28T13:58:09.000Z
|
2019-11-29T14:01:07.000Z
|
#! python3
# Help from: http://www.scotttorborg.com/python-packaging/minimal.html
# https://docs.python.org/3/distutils/commandref.html#sdist-cmd
# https://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# https://docs.python.org/3.4/tutorial/modules.html
# Install it with python setup.py install
# Or use: python setup.py develop (changes to the source files will be
# immediately available)
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
from setuptools import setup, find_packages
import os
from os import path
import rstcheck
exec(open('src/version.py').read())
# __version__ comes when execution src/version.py
version = __version__
here = path.abspath(path.dirname(__file__))
with open(os.path.join(here, 'requirements.txt')) as f:
requires = [x.strip() for x in f if x.strip()]
def check_readme(file='README.rst'):
"""
Checks readme rst file, to ensure it will upload to pypi and be formatted
correctly.
:param file:
:return:
"""
# Get the long description from the relevant file
with open(file, encoding='utf-8') as f:
readme_content = f.read()
errors = list(rstcheck.check(readme_content))
if errors:
msg = 'There are errors in {}, errors \n {}'.format(file,
errors[0].message)
raise SystemExit(msg)
else:
msg = 'No errors in {}'.format(file)
print(msg)
readme_file = path.join(here, 'README.rst')
# Get the long description from the relevant file
with open(readme_file, encoding='utf-8') as f:
long_description = f.read()
check_readme(readme_file)
# Define setuptools specifications
setup(name='nagios_sql',
version=version,
description='Nagios plugin with sqlchecks',
long_description=long_description, # this is the file README.rst
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: SQL',
'Topic :: System :: Monitoring',
'Topic :: Database :: Database Engines/Servers',
'Topic :: System :: Systems Administration'
],
url='https://github.com/pablodav/nagios_sql',
author='Pablo Estigarribia',
author_email='pablodav@gmail.com',
license='MIT',
packages=find_packages(),
#include_package_data=True,
#package_data={
# 'data': 'src/data/*',
#},
#data_files=[('VERSION', ['src/VERSION'])],
entry_points={
'console_scripts': [
'nagios_sql = src.nagios_sql:main'
]
},
install_requires=requires,
tests_require=['pytest',
'pytest-cov'],
zip_safe=False)
| 32.285714
| 84
| 0.636147
| 353
| 2,938
| 5.184136
| 0.467422
| 0.040984
| 0.02459
| 0.029508
| 0.105464
| 0.095082
| 0.052459
| 0.052459
| 0.052459
| 0.052459
| 0
| 0.005798
| 0.236896
| 2,938
| 90
| 85
| 32.644444
| 0.810437
| 0.299523
| 0
| 0
| 0
| 0
| 0.314428
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018868
| false
| 0
| 0.075472
| 0
| 0.09434
| 0.018868
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22a4a9fee06a32718975fa561659e922ae3f756e
| 1,838
|
py
|
Python
|
textnn/utils/test/test_progress_iterator.py
|
tongr/TextNN
|
a0294a197d3be284177214e8f019e9fed13dff1a
|
[
"Apache-2.0"
] | 1
|
2019-03-08T12:12:45.000Z
|
2019-03-08T12:12:45.000Z
|
textnn/utils/test/test_progress_iterator.py
|
tongr/TextNN
|
a0294a197d3be284177214e8f019e9fed13dff1a
|
[
"Apache-2.0"
] | 16
|
2019-02-14T11:51:30.000Z
|
2019-06-11T08:25:53.000Z
|
textnn/utils/test/test_progress_iterator.py
|
tongr/TextNN
|
a0294a197d3be284177214e8f019e9fed13dff1a
|
[
"Apache-2.0"
] | null | null | null |
import io
import sys
from textnn.utils import ProgressIterator
#inspired by https://stackoverflow.com/a/34738440
def capture_sysout(cmd):
capturedOutput = io.StringIO() # Create StringIO object
sys.stdout = capturedOutput # and redirect stdout.
cmd() # Call function.
sys.stdout = sys.__stdout__ # Reset redirect.
return capturedOutput.getvalue() # Now works as before.
def test_progress_iterator():
def progress_generator():
sum(ProgressIterator([1, 2, 3], interval=0, description=""))
report = capture_sysout(cmd=progress_generator)
lines = report.strip().split("\n")
# expected result (with changing numbers):
# 1/3 [=========>....................] - ETA: 7s
# 2/3 [===================>..........] - ETA: 1s
# 3/3 [==============================] - 4s 1s/step
assert lines[0].startswith("1/3")
assert "ETA: " in lines[0]
assert lines[1].startswith("2/3")
assert "ETA: " in lines[1]
assert lines[2].startswith("3/3")
assert lines[2].endswith("s/step")
def test_progress_iterator_with_statement():
def progress_generator():
with ProgressIterator([1,2,3], interval=0, description="") as it:
sum(it)
report = capture_sysout(cmd=progress_generator)
lines = report.strip().split("\n")
# expected result (with changing numbers):
# 1/3 [=========>....................] - ETA: 7s
# 2/3 [===================>..........] - ETA: 1s
# 3/3 [==============================] - 4s 1s/step
assert lines[0].startswith("1/3")
assert "ETA: " in lines[0]
assert lines[1].startswith("2/3")
assert "ETA: " in lines[1]
assert lines[2].startswith("3/3")
assert lines[2].endswith("s/step")
| 34.679245
| 76
| 0.541349
| 210
| 1,838
| 4.657143
| 0.319048
| 0.08998
| 0.0409
| 0.04908
| 0.588957
| 0.588957
| 0.588957
| 0.509202
| 0.509202
| 0.509202
| 0
| 0.042674
| 0.235038
| 1,838
| 52
| 77
| 35.346154
| 0.652916
| 0.280196
| 0
| 0.5625
| 0
| 0
| 0.041284
| 0
| 0
| 0
| 0
| 0
| 0.375
| 1
| 0.15625
| false
| 0
| 0.09375
| 0
| 0.28125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22a5f31f1b502fe38b7dada2cca91916da3eb320
| 24,973
|
py
|
Python
|
pyvisa_py/highlevel.py
|
Handfeger/pyvisa-py
|
fcfb45895cd44dd922985c3a9d8f3372c8318d63
|
[
"MIT"
] | 1
|
2019-03-25T20:26:16.000Z
|
2019-03-25T20:26:16.000Z
|
pyvisa_py/highlevel.py
|
Handfeger/pyvisa-py
|
fcfb45895cd44dd922985c3a9d8f3372c8318d63
|
[
"MIT"
] | null | null | null |
pyvisa_py/highlevel.py
|
Handfeger/pyvisa-py
|
fcfb45895cd44dd922985c3a9d8f3372c8318d63
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Highlevel wrapper of the VISA Library.
:copyright: 2014-2020 by PyVISA-py Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
import random
from collections import OrderedDict
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union, cast
from pyvisa import constants, highlevel, rname
from pyvisa.constants import StatusCode
from pyvisa.typing import VISAEventContext, VISARMSession, VISASession
from pyvisa.util import LibraryPath
from . import sessions
from .common import logger
class PyVisaLibrary(highlevel.VisaLibraryBase):
"""A pure Python backend for PyVISA.
The object is basically a dispatcher with some common functions implemented.
When a new resource object is requested to pyvisa, the library creates a
Session object (that knows how to perform low-level communication operations)
associated with a session handle (a number, usually refered just as session).
A call to a library function is handled by PyVisaLibrary if it involves a
resource agnostic function or dispatched to the correct session object
(obtained from the session id).
Importantly, the user is unaware of this. PyVisaLibrary behaves for
the user just as NIVisaLibrary.
"""
#: Live session object identified by a randon session ID
sessions: Dict[int, sessions.Session]
# Try to import packages implementing lower level functionality.
try:
from .serial import SerialSession
logger.debug("SerialSession was correctly imported.")
except Exception as e:
logger.debug("SerialSession was not imported %s." % e)
try:
from .usb import USBRawSession, USBSession
logger.debug("USBSession and USBRawSession were correctly imported.")
except Exception as e:
logger.debug("USBSession and USBRawSession were not imported %s." % e)
try:
from .tcpip import TCPIPInstrSession, TCPIPSocketSession
logger.debug("TCPIPSession was correctly imported.")
except Exception as e:
logger.debug("TCPIPSession was not imported %s." % e)
try:
from .gpib import GPIBSession
logger.debug("GPIBSession was correctly imported.")
except Exception as e:
logger.debug("GPIBSession was not imported %s." % e)
@staticmethod
def get_library_paths() -> Iterable[LibraryPath]:
"""List a dummy library path to allow to create the library."""
return (LibraryPath("py"),)
@staticmethod
def get_debug_info() -> Dict[str, Union[str, List[str], Dict[str, str]]]:
"""Return a list of lines with backend info."""
from . import __version__
d: OrderedDict[str, Union[str, List[str], Dict[str, str]]] = OrderedDict()
d["Version"] = "%s" % __version__
for key, val in sessions.Session.iter_valid_session_classes():
key_name = "%s %s" % (key[0].name.upper(), key[1])
d[key_name] = "Available " + val.get_low_level_info()
for key, issue in sessions.Session.iter_session_classes_issues():
key_name = "%s %s" % (key[0].name.upper(), key[1])
d[key_name] = issue.split("\n")
return d
def _init(self) -> None:
"""Custom initialization code."""
# Map session handle to session object.
self.sessions = {}
def _register(self, obj: object) -> VISASession:
"""Creates a random but unique session handle for a session object.
Register it in the sessions dictionary and return the value.
"""
session = None
while session is None or session in self.sessions:
session = random.randint(1000000, 9999999)
self.sessions[session] = obj
return session
def open(
self,
session: VISARMSession,
resource_name: str,
access_mode: constants.AccessModes = constants.AccessModes.no_lock,
open_timeout: int = constants.VI_TMO_IMMEDIATE,
) -> Tuple[VISASession, StatusCode]:
"""Opens a session to the specified resource.
Corresponds to viOpen function of the VISA library.
Parameters
----------
session : VISARMSession
Resource Manager session (should always be a session returned from
open_default_resource_manager()).
resource_name : str
Unique symbolic name of a resource.
access_mode : constants.AccessModes, optional
Specifies the mode by which the resource is to be accessed.
open_timeout : int
Specifies the maximum time period (in milliseconds) that this
operation waits before returning an error. constants.VI_TMO_IMMEDIATE
and constants.VI_TMO_INFINITE are used as min and max.
Returns
-------
VISASession
Unique logical identifier reference to a session
StatusCode
Return value of the library call.
"""
try:
open_timeout = int(open_timeout)
except ValueError:
raise ValueError(
"open_timeout (%r) must be an integer (or compatible type)"
% open_timeout
)
try:
parsed = rname.parse_resource_name(resource_name)
except rname.InvalidResourceName:
return (
VISASession(0),
self.handle_return_value(None, StatusCode.error_invalid_resource_name),
)
cls = sessions.Session.get_session_class(
parsed.interface_type_const, parsed.resource_class
)
sess = cls(session, resource_name, parsed, open_timeout)
return self._register(sess), StatusCode.success
def clear(self, session: VISASession) -> StatusCode:
"""Clears a device.
Corresponds to viClear function of the VISA library.
Parameters
----------
session : typin.VISASession
Unique logical identifier to a session.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
return self.handle_return_value(session, sess.clear())
def flush(
self, session: VISASession, mask: constants.BufferOperation
) -> StatusCode:
"""Flush the specified buffers.
The buffers can be associated with formatted I/O operations and/or
serial communication.
Corresponds to viFlush function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
mask : constants.BufferOperation
Specifies the action to be taken with flushing the buffer.
The values can be combined using the | operator. However multiple
operations on a single buffer cannot be combined.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
return self.handle_return_value(session, sess.flush(mask))
def gpib_command(
self, session: VISASession, command_byte: bytes
) -> Tuple[int, StatusCode]:
"""Write GPIB command bytes on the bus.
Corresponds to viGpibCommand function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
command_byte : bytes
Data to write.
Returns
-------
int
Number of written bytes
StatusCode
Return value of the library call.
"""
try:
written, st = self.sessions[session].gpib_command(command_byte)
return written, self.handle_return_value(session, st)
except KeyError:
return 0, self.handle_return_value(session, StatusCode.error_invalid_object)
def assert_trigger(
self, session: VISASession, protocol: constants.TriggerProtocol
) -> StatusCode:
"""Assert software or hardware trigger.
Corresponds to viAssertTrigger function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
protocol : constants.TriggerProtocol
Trigger protocol to use during assertion.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session, self.sessions[session].assert_trigger(protocol)
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def gpib_send_ifc(self, session: VISASession) -> StatusCode:
"""Pulse the interface clear line (IFC) for at least 100 microseconds.
Corresponds to viGpibSendIFC function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session, self.sessions[session].gpib_send_ifc()
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def gpib_control_ren(
self, session: VISASession, mode: constants.RENLineOperation
) -> StatusCode:
"""Controls the state of the GPIB Remote Enable (REN) interface line.
Optionally the remote/local state of the device can also be set.
Corresponds to viGpibControlREN function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
mode : constants.RENLineOperation
State of the REN line and optionally the device remote/local state.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session, self.sessions[session].gpib_control_ren(mode)
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def gpib_control_atn(
self, session: VISASession, mode: constants.ATNLineOperation
) -> StatusCode:
"""Specifies the state of the ATN line and the local active controller state.
Corresponds to viGpibControlATN function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
mode : constants.ATNLineOperation
State of the ATN line and optionally the local active controller state.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session, self.sessions[session].gpib_control_atn(mode)
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def gpib_pass_control(
self, session: VISASession, primary_address: int, secondary_address: int
) -> StatusCode:
"""Tell a GPIB device to become controller in charge (CIC).
Corresponds to viGpibPassControl function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
primary_address : int
Primary address of the GPIB device to which you want to pass control.
secondary_address : int
Secondary address of the targeted GPIB device.
If the targeted device does not have a secondary address, this parameter
should contain the value Constants.VI_NO_SEC_ADDR.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session,
self.sessions[session].gpib_pass_control(
primary_address, secondary_address
),
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def read_stb(self, session: VISASession) -> Tuple[int, StatusCode]:
"""Reads a status byte of the service request.
Corresponds to viReadSTB function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
Returns
-------
int
Service request status byte
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return 0, self.handle_return_value(session, StatusCode.error_invalid_object)
stb, status_code = sess.read_stb()
return stb, self.handle_return_value(session, status_code)
def close(
self, session: Union[VISASession, VISAEventContext, VISARMSession]
) -> StatusCode:
"""Closes the specified session, event, or find list.
Corresponds to viClose function of the VISA library.
Parameters
---------
session : Union[VISASession, VISAEventContext, VISARMSession]
Unique logical identifier to a session, event, resource manager.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
# The RM session directly references the library.
if sess is not self:
return self.handle_return_value(session, sess.close())
else:
return self.handle_return_value(session, StatusCode.success)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def open_default_resource_manager(self) -> Tuple[VISARMSession, StatusCode]:
"""This function returns a session to the Default Resource Manager resource.
Corresponds to viOpenDefaultRM function of the VISA library.
Returns
-------
VISARMSession
Unique logical identifier to a Default Resource Manager session
StatusCode
Return value of the library call.
"""
return (
cast(VISARMSession, self._register(self)),
self.handle_return_value(None, StatusCode.success),
)
def list_resources(
self, session: VISARMSession, query: str = "?*::INSTR"
) -> Tuple[str, ...]:
"""Return a tuple of all connected devices matching query.
Parameters
----------
session : VISARMSession
Unique logical identifier to the resource manager session.
query : str
Regular expression used to match devices.
Returns
-------
Tuple[str, ...]
Resource names of all the connected devices matching the query.
"""
# For each session type, ask for the list of connected resources and
# merge them into a single list.
# HINT: the cast should not be necessary here
resources: List[str] = []
for key, st in sessions.Session.iter_valid_session_classes():
resources += st.list_resources()
return rname.filter(resources, query)
def read(self, session: VISASession, count: int) -> Tuple[bytes, StatusCode]:
"""Reads data from device or interface synchronously.
Corresponds to viRead function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
count : int
Number of bytes to be read.
Returns
-------
bytes
Date read
StatusCode
Return value of the library call.
"""
# from the session handle, dispatch to the read method of the session object.
try:
data, status_code = self.sessions[session].read(count)
except KeyError:
return (
b"",
self.handle_return_value(session, StatusCode.error_invalid_object),
)
return data, self.handle_return_value(session, status_code)
def write(self, session: VISASession, data: bytes) -> Tuple[int, StatusCode]:
"""Write data to device or interface synchronously.
Corresponds to viWrite function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
data : bytes
Data to be written.
Returns
-------
int
Number of bytes actually transferred
StatusCode
Return value of the library call.
"""
# from the session handle, dispatch to the write method of the session object.
try:
written, status_code = self.sessions[session].write(data)
except KeyError:
return 0, self.handle_return_value(session, StatusCode.error_invalid_object)
return written, self.handle_return_value(session, status_code)
def buffer_read(self, session: VISASession, count: int) -> Tuple[bytes, StatusCode]:
"""Reads data through the use of a formatted I/O read buffer.
The data can be read from a device or an interface.
Corresponds to viBufRead function of the VISA library.
Parameters
----------
session : VISASession\
Unique logical identifier to a session.
count : int
Number of bytes to be read.
Returns
-------
bytes
Data read
StatusCode
Return value of the library call.
"""
return self.read(session, count)
def buffer_write(self, session: VISASession, data: bytes) -> Tuple[int, StatusCode]:
"""Writes data to a formatted I/O write buffer synchronously.
Corresponds to viBufWrite function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
data : bytes
Data to be written.
Returns
-------
int
number of written bytes
StatusCode
return value of the library call.
"""
return self.write(session, data)
def get_attribute(
self,
session: Union[VISASession, VISAEventContext, VISARMSession],
attribute: Union[constants.ResourceAttribute, constants.EventAttribute],
) -> Tuple[Any, StatusCode]:
"""Retrieves the state of an attribute.
Corresponds to viGetAttribute function of the VISA library.
Parameters
----------
session : Union[VISASession, VISAEventContext]
Unique logical identifier to a session, event, or find list.
attribute : Union[constants.ResourceAttribute, constants.EventAttribute]
Resource or event attribute for which the state query is made.
Returns
-------
Any
State of the queried attribute for a specified resource
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return (
None,
self.handle_return_value(session, StatusCode.error_invalid_object),
)
state, status_code = sess.get_attribute(
cast(constants.ResourceAttribute, attribute)
)
return state, self.handle_return_value(session, status_code)
def set_attribute(
self,
session: VISASession,
attribute: constants.ResourceAttribute,
attribute_state: Any,
) -> StatusCode:
"""Set the state of an attribute.
Corresponds to viSetAttribute function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
attribute : constants.ResourceAttribute
Attribute for which the state is to be modified.
attribute_state : Any
The state of the attribute to be set for the specified object.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session,
self.sessions[session].set_attribute(attribute, attribute_state),
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def lock(
self,
session: VISASession,
lock_type: constants.Lock,
timeout: int,
requested_key: Optional[str] = None,
) -> Tuple[str, StatusCode]:
"""Establishes an access mode to the specified resources.
Corresponds to viLock function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
lock_type : constants.Lock
Specifies the type of lock requested.
timeout : int
Absolute time period (in milliseconds) that a resource waits to get
unlocked by the locking session before returning an error.
requested_key : Optional[str], optional
Requested locking key in the case of a shared lock. For an exclusive
lock it should be None.
Returns
-------
str
Key that can then be passed to other sessions to share the lock, or
None for an exclusive lock.
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return (
"",
self.handle_return_value(session, StatusCode.error_invalid_object),
)
key, status_code = sess.lock(lock_type, timeout, requested_key)
return key, self.handle_return_value(session, status_code)
def unlock(self, session: VISASession) -> StatusCode:
"""Relinquish a lock for the specified resource.
Corresponds to viUnlock function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
return self.handle_return_value(session, sess.unlock())
def disable_event(
self,
session: VISASession,
event_type: constants.EventType,
mechanism: constants.EventMechanism,
) -> StatusCode:
"""Disable notification for an event type(s) via the specified mechanism(s).
Corresponds to viDisableEvent function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
event_type : constants.EventType
Event type.
mechanism : constants.EventMechanism
Event handling mechanisms to be disabled.
Returns
-------
StatusCode
Return value of the library call.
"""
pass
def discard_events(
self,
session: VISASession,
event_type: constants.EventType,
mechanism: constants.EventMechanism,
) -> StatusCode:
"""Discard event occurrences for a given type and mechanisms in a session.
Corresponds to viDiscardEvents function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
event_type : constans.EventType
Logical event identifier.
mechanism : constants.EventMechanism
Specifies event handling mechanisms to be discarded.
Returns
-------
StatusCode
Return value of the library call.
"""
pass
| 31.893997
| 88
| 0.610019
| 2,640
| 24,973
| 5.680682
| 0.159848
| 0.019004
| 0.037341
| 0.04901
| 0.4979
| 0.461159
| 0.419084
| 0.386944
| 0.359272
| 0.328332
| 0
| 0.001993
| 0.317022
| 24,973
| 782
| 89
| 31.934783
| 0.877287
| 0.436351
| 0
| 0.408397
| 0
| 0
| 0.036045
| 0
| 0
| 0
| 0
| 0
| 0.007634
| 1
| 0.103053
| false
| 0.015267
| 0.083969
| 0
| 0.354962
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22a63f951029bec63e4f61cb892764b3e55fdcae
| 13,219
|
py
|
Python
|
detectron/utils/webly_vis.py
|
sisrfeng/NA-fWebSOD
|
49cb75a9a0d557b05968c6b11b0f17a7043f2077
|
[
"Apache-2.0"
] | 23
|
2020-03-30T11:48:33.000Z
|
2022-03-11T06:34:31.000Z
|
detectron/utils/webly_vis.py
|
sisrfeng/NA-fWebSOD
|
49cb75a9a0d557b05968c6b11b0f17a7043f2077
|
[
"Apache-2.0"
] | 9
|
2020-09-28T07:15:16.000Z
|
2022-03-25T08:11:06.000Z
|
detectron/utils/webly_vis.py
|
sisrfeng/NA-fWebSOD
|
49cb75a9a0d557b05968c6b11b0f17a7043f2077
|
[
"Apache-2.0"
] | 10
|
2020-03-30T11:48:34.000Z
|
2021-06-02T06:12:36.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import cv2
import numpy as np
import os
import math
from PIL import Image, ImageDraw, ImageFont
from caffe2.python import workspace
from detectron.core.config import cfg
from detectron.core.config import get_output_dir
def vis_training(cur_iter):
prefix = ''
if cfg.WEBLY.MINING:
prefix = 'mining_'
if not (cfg.WSL.DEBUG or
(cfg.WSL.SAMPLE and cur_iter % cfg.WSL.SAMPLE_ITER == 0)):
return
output_dir = get_output_dir(cfg.TRAIN.DATASETS, training=True)
sample_dir = os.path.join(output_dir, 'webly_sample')
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
for gpu_id in range(cfg.NUM_GPUS):
data_ids = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, 'data_ids'))
ims = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, 'data'))
labels_oh = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, 'labels_oh'))
im_score = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, 'cls_prob'))
roi_score = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_pred'))
# roi_score_softmax = workspace.FetchBlob('gpu_{}/{}'.format(
# gpu_id, prefix + 'rois_pred_softmax'))
rois = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, prefix + 'rois'))
# anchor_argmax = workspace.FetchBlob('gpu_{}/{}'.format(
# gpu_id, 'anchor_argmax'))
preffix = 'iter_' + str(cur_iter) + '_gpu_' + str(gpu_id)
save_im(labels_oh, im_score, ims, cfg.PIXEL_MEANS, preffix, sample_dir)
save_rois(labels_oh, im_score, roi_score, ims, rois, cfg.PIXEL_MEANS,
preffix, '', sample_dir)
# continue
if cfg.WEBLY.ENTROPY:
pass
else:
continue
class_weight = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_class_weight'))
rois_pred_hatE = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_pred_hatE'))
rois_pred_E = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_pred_E'))
y_logN__logy = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_pred_y_logN__logy'))
save_entropy(labels_oh, im_score, class_weight, roi_score, ims, rois,
cfg.PIXEL_MEANS, preffix, '', sample_dir, rois_pred_hatE,
rois_pred_E, y_logN__logy)
def save_im(labels_oh, im_score, ims, pixel_means, prefix, output_dir):
batch_size, num_classes = im_score.shape
for b in range(batch_size):
for c in range(num_classes):
# if labels_oh[b][c] == 0.0:
# continue
if im_score[b][c] < 0.1:
continue
im = ims[b, :, :, :].copy()
channel_swap = (1, 2, 0)
im = im.transpose(channel_swap)
im += pixel_means
im = im.astype(np.uint8)
file_name = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '.png')
cv2.imwrite(file_name, im)
def save_rois(labels_oh, im_score, roi_score, ims, rois, pixel_means, prefix,
suffix, output_dir):
num_rois, num_classes = roi_score.shape
batch_size, _, height, weight = ims.shape
has_bg = False
num_rois_this = min(500, num_rois)
for b in range(batch_size):
for c in range(num_classes):
# if labels_oh[b][c] == 0.0:
# continue
if im_score[b][c] < 0.1:
if has_bg:
continue
has_bg = True
im = ims[b, :, :, :].copy()
channel_swap = (1, 2, 0)
im = im.transpose(channel_swap)
im += pixel_means
im = im.astype(np.uint8)
im_S = im.copy()
im_A = im.copy()
argsort = np.argsort(-np.abs(roi_score[:, c]))
argsort = argsort[:num_rois_this]
argsort = argsort[::-1]
if im_score[b][c] < 0.1:
scale_p = 1.0
else:
scale_p = 1.0 / roi_score[:, c].max()
for n in range(num_rois_this):
roi = rois[argsort[n]]
if roi[0] != b:
continue
if roi_score[argsort[n]][c] * scale_p < 0.4:
thickness = 3
else:
thickness = 6
jet = gray2jet(roi_score[argsort[n]][c] * scale_p)
cv2.rectangle(im_S, (roi[1], roi[2]), (roi[3], roi[4]), jet, thickness)
file_name = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_' +
suffix + '.png')
cv2.imwrite(file_name, im_S)
continue
num_anchors = anchor_argmax.shape[0]
for n in range(num_rois):
roi = rois[n]
if roi[0] != b:
continue
for a in range(num_anchors):
if anchor_argmax[a][n] == 1.0:
break
jet = gray2jet(1.0 * a / num_anchors)
cv2.rectangle(im_A, (roi[1], roi[2]), (roi[3], roi[4]), jet, 1)
file_name = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_A_' +
suffix + '.png')
cv2.imwrite(file_name, im_A)
def save_entropy(labels_oh, im_score, class_weight, roi_score, ims, rois,
pixel_means, prefix, suffix, output_dir, rois_pred_hatE,
rois_pred_E, y_logN__logy):
num_rois, num_classes = roi_score.shape
batch_size, _, height, weight = ims.shape
rois_pred_E_sum = np.sum(rois_pred_E, axis=0).reshape(1, -1)
E_sum_norm = np.true_divide(rois_pred_E_sum, y_logN__logy)
E_sum_norm = np.where(E_sum_norm > 1., 1., E_sum_norm)
E_class_weight = 1 - E_sum_norm
for b in range(batch_size):
for c in range(num_classes):
if labels_oh[b][c] == 0.0 and im_score[b][c] < 0.1:
continue
im = ims[b, :, :, :].copy()
channel_swap = (1, 2, 0)
im = im.transpose(channel_swap)
im += pixel_means
im = im.astype(np.uint8)
im_S = im.copy()
im_A = im.copy()
im_hatE = im.copy()
im_E = im.copy()
_NUM = 10
argsort_roi = np.argsort(roi_score[:, c])[::-1]
argsort_hatE = np.argsort(rois_pred_hatE[:, c])[::-1]
argsort_E = np.argsort(rois_pred_E[:, c])[::-1]
if len(argsort_roi) >= _NUM:
_NUM = 10
else:
_NUM = len(argsort_roi)
argsort_roi = argsort_roi[:_NUM][::-1]
argsort_hatE = argsort_hatE[:_NUM][::-1]
argsort_E = argsort_E[:_NUM][::-1]
argsort_hatE = argsort_roi
argsort_E = argsort_roi
scale_p = 1.0 / roi_score[:, c].max()
scale_p = 1.0
for n in range(_NUM):
roi = rois[argsort_roi[n]]
hatE_roi = rois[argsort_hatE[n]]
E_roi = rois[argsort_E[n]]
if roi[0] != b:
continue
# draw roi
jet = gray2jet(roi_score[argsort_roi[n]][c] * scale_p)
bgr = jet
rgb = (jet[2], jet[1], jet[0])
# roi location
cv2.rectangle(im_S, (roi[1], roi[2]), (roi[3], roi[4]),
bgr,
2,
lineType=cv2.LINE_AA)
text = "{:.4f}".format(roi_score[argsort_roi[n]][c])
im_S = putText_with_TNR(im_S, int(roi[1]), int(roi[2]), 15,
jet, rgb, text)
if hatE_roi[0] != b:
continue
# draw rois_pred_hatE
# jet = gray2jet(rois_pred_hatE[argsort_hatE[n]][c] * scale_p)
# bgr = jet
# rgb = (jet[2], jet[1], jet[0])
# roi location
cv2.rectangle(im_hatE, (hatE_roi[1], hatE_roi[2]),
(hatE_roi[3], hatE_roi[4]),
bgr,
2,
lineType=cv2.LINE_AA)
# put Text hat_E
text = "{:.4f}".format(rois_pred_hatE[argsort_hatE[n]][c])
im_hatE = putText_with_TNR(im_hatE, int(hatE_roi[1]),
int(hatE_roi[2]), 15, jet, rgb,
text)
if E_roi[0] != b:
continue
# draw rois_pred_E
# jet = gray2jet(rois_pred_E[argsort_E[n]][c] * scale_p)
# bgr = jet
# rgb = (jet[2], jet[1], jet[0])
# roi location
cv2.rectangle(im_E, (E_roi[1], E_roi[2]), (E_roi[3], E_roi[4]),
bgr,
2,
lineType=cv2.LINE_AA)
# put Text E
text = "{:.4f}".format(rois_pred_E[argsort_E[n]][c])
im_E = putText_with_TNR(im_E, int(E_roi[1]), int(E_roi[2]), 15,
jet, rgb, text)
# write im_score
text = "{:.4f}".format(im_score[b][c])
im_S = putText_with_TNR(im_S, 0, 0, 20, (0, 140, 255),
(255, 255, 255), text)
# write class_weight
text = "{:.4f}".format(class_weight[b][c])
im_hatE = putText_with_TNR(im_hatE, 0, 0, 20, (0, 140, 255),
(255, 255, 255), text)
# write class_weight
text = "{:.4f}".format(E_class_weight[b][c])
im_E = putText_with_TNR(im_E, 0, 0, 20, (0, 140, 255),
(255, 255, 255), text)
file_name_roi = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_roi' +
suffix + '.png')
cv2.imwrite(file_name_roi, im_S)
file_name_hatE = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) +
'_hatE' + suffix + '.png')
cv2.imwrite(file_name_hatE, im_hatE)
file_name_E = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_E' +
suffix + '.png')
cv2.imwrite(file_name_E, im_E)
def dump_proto_files(model, output_dir):
"""Save prototxt descriptions of the training network and parameter
initialization network."""
with open(os.path.join(output_dir, model.net.Proto().name), 'w') as fid:
fid.write(str(model.net.Proto()))
with open(os.path.join(output_dir,
model.param_init_net.Proto().name), 'w') as fid:
fid.write(str(model.param_init_net.Proto()))
def gray2jet(f):
# plot short rainbow RGB
a = f / 0.25 # invert and group
X = math.floor(a) # this is the integer part
Y = math.floor(255 * (a - X)) # fractional part from 0 to 255
Z = math.floor(128 * (a - X)) # fractional part from 0 to 128
if X == 0:
r = 0
g = Y
b = 128 - Z
elif X == 1:
r = Y
g = 255
b = 0
elif X == 2:
r = 255
g = 255 - Z
b = 0
elif X == 3:
r = 255
g = 128 - Z
b = 0
elif X == 4:
r = 255
g = 0
b = 0
# opencv is bgr, not rgb
return (b, g, r)
def putText_with_TNR(img, x, y, size, fontColor, bgColor, string):
thickness = 2
font_scale = 1.1
font = cv2.FONT_HERSHEY_SIMPLEX
s = cv2.getTextSize(string, font, font_scale, thickness)
cv2.rectangle(
img,
(x + thickness, y + thickness),
(x + thickness + s[0][0] + 2, y + thickness + s[0][1] + 2),
# (0, 140, 255),
fontColor,
cv2.FILLED,
lineType=cv2.LINE_AA)
position = (x + thickness + 1, y + thickness + s[0][1] + 1)
cv2.putText(img, string, position, font, font_scale, (255, 255, 255),
thickness, cv2.LINE_AA)
return img
# from OpenCV to PIL
font = "/home/chenzhiwei/Documents/myFonts/timesnewroman.ttf"
img_PIL = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
font = ImageFont.truetype(font, size)
position = (x + 3, y - 2)
draw = ImageDraw.Draw(img_PIL)
offsetx, offsety = font.getoffset(string)
width, height = font.getsize(string)
draw.rectangle((offsetx + x + 2, offsety + y - 3, offsetx + x + width + 3,
offsety + y + height - 3),
fill=bgColor)
draw.text(position, string, font=font, fill=fontColor)
# back to OpenCV type
img_OpenCV = cv2.cvtColor(np.asarray(img_PIL), cv2.COLOR_RGB2BGR)
return img_OpenCV
| 37.341808
| 87
| 0.504577
| 1,711
| 13,219
| 3.644652
| 0.135593
| 0.028223
| 0.040411
| 0.051956
| 0.550513
| 0.50898
| 0.444997
| 0.36594
| 0.310295
| 0.269885
| 0
| 0.035761
| 0.367501
| 13,219
| 353
| 88
| 37.447592
| 0.710083
| 0.069219
| 0
| 0.305455
| 0
| 0
| 0.031811
| 0.006036
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025455
| false
| 0.003636
| 0.043636
| 0
| 0.083636
| 0.003636
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22a72547959131b60da1f328cdda0445ca0ed7eb
| 13,740
|
py
|
Python
|
salt/runner.py
|
StepOneInc/salt
|
ee210172c37bf0cee224794cd696b38e288e4073
|
[
"Apache-2.0"
] | 1
|
2016-04-26T03:42:32.000Z
|
2016-04-26T03:42:32.000Z
|
salt/runner.py
|
apergos/salt
|
106c715d495a9c2bd747c8ca75745236b0d7fb41
|
[
"Apache-2.0"
] | null | null | null |
salt/runner.py
|
apergos/salt
|
106c715d495a9c2bd747c8ca75745236b0d7fb41
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Execute salt convenience routines
'''
# Import python libs
from __future__ import print_function
from __future__ import absolute_import
import collections
import logging
import time
import sys
import multiprocessing
# Import salt libs
import salt.exceptions
import salt.loader
import salt.minion
import salt.utils
import salt.utils.args
import salt.utils.event
from salt.client import mixins
from salt.output import display_output
from salt.utils.error import raise_error
from salt.utils.event import tagify
import salt.ext.six as six
log = logging.getLogger(__name__)
class RunnerClient(mixins.SyncClientMixin, mixins.AsyncClientMixin, object):
'''
The interface used by the :command:`salt-run` CLI tool on the Salt Master
It executes :ref:`runner modules <all-salt.runners>` which run on the Salt
Master.
Importing and using ``RunnerClient`` must be done on the same machine as
the Salt Master and it must be done using the same user that the Salt
Master is running as.
Salt's :conf_master:`external_auth` can be used to authenticate calls. The
eauth user must be authorized to execute runner modules: (``@runner``).
Only the :py:meth:`master_call` below supports eauth.
'''
client = 'runner'
tag_prefix = 'run'
def __init__(self, opts):
self.opts = opts
self.functions = salt.loader.runner(opts) # Must be self.functions for mixin to work correctly :-/
self.returners = salt.loader.returners(opts, self.functions)
self.outputters = salt.loader.outputters(opts)
self.event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
def cmd(self, fun, arg, pub_data=None, kwarg=None):
'''
Execute a runner function
.. code-block:: python
>>> opts = salt.config.master_config('/etc/salt/master')
>>> runner = salt.runner.RunnerClient(opts)
>>> runner.cmd('jobs.list_jobs', [])
{
'20131219215650131543': {
'Arguments': [300],
'Function': 'test.sleep',
'StartTime': '2013, Dec 19 21:56:50.131543',
'Target': '*',
'Target-type': 'glob',
'User': 'saltdev'
},
'20131219215921857715': {
'Arguments': [300],
'Function': 'test.sleep',
'StartTime': '2013, Dec 19 21:59:21.857715',
'Target': '*',
'Target-type': 'glob',
'User': 'saltdev'
},
}
'''
if kwarg is None:
kwarg = {}
if not isinstance(kwarg, dict):
raise salt.exceptions.SaltInvocationError(
'kwarg must be formatted as a dictionary'
)
if pub_data is None:
pub_data = {}
if not isinstance(pub_data, dict):
raise salt.exceptions.SaltInvocationError(
'pub_data must be formatted as a dictionary'
)
arglist = salt.utils.args.parse_input(arg)
def _append_kwarg(arglist, kwarg):
'''
Append the kwarg dict to the arglist
'''
kwarg['__kwarg__'] = True
arglist.append(kwarg)
if kwarg:
try:
if isinstance(arglist[-1], dict) \
and '__kwarg__' in arglist[-1]:
for key, val in six.iteritems(kwarg):
if key in arglist[-1]:
log.warning(
'Overriding keyword argument {0!r}'.format(key)
)
arglist[-1][key] = val
else:
# No kwargs yet present in arglist
_append_kwarg(arglist, kwarg)
except IndexError:
# arglist is empty, just append
_append_kwarg(arglist, kwarg)
self._verify_fun(fun)
args, kwargs = salt.minion.load_args_and_kwargs(
self.functions[fun], arglist, pub_data
)
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
jid = self.returners[fstr]()
log.debug('Runner starting with jid {0}'.format(jid))
self.event.fire_event({'runner_job': fun}, tagify([jid, 'new'], 'job'))
target = RunnerClient._thread_return
data = {'fun': fun, 'jid': jid, 'args': args, 'kwargs': kwargs}
args = (self, self.opts, data)
ret = jid
if self.opts.get('async', False):
process = multiprocessing.Process(
target=target, args=args
)
process.start()
else:
ret = target(*args)
return ret
@classmethod
def _thread_return(cls, instance, opts, data):
'''
The multiprocessing process calls back here
to stream returns
'''
# Runners modules runtime injection:
# - the progress event system with the correct jid
# - Provide JID if the runner wants to access it directly
done = {}
progress = salt.utils.event.get_runner_event(opts, data['jid']).fire_progress
for func_name, func in instance.functions.items():
if func.__module__ in done:
continue
mod = sys.modules[func.__module__]
mod.__jid__ = data['jid']
mod.__progress__ = progress
done[func.__module__] = mod
ret = instance.functions[data['fun']](*data['args'], **data['kwargs'])
# Sleep for just a moment to let any progress events return
time.sleep(0.1)
ret_load = {'return': ret, 'fun': data['fun'], 'fun_args': data['args']}
# Don't use the invoking processes' event socket because it could be closed down by the time we arrive here.
# Create another, for safety's sake.
salt.utils.event.MasterEvent(opts['sock_dir']).fire_event(ret_load, tagify([data['jid'], 'return'], 'runner'))
try:
fstr = '{0}.save_runner_load'.format(opts['master_job_cache'])
instance.returners[fstr](data['jid'], ret_load)
except KeyError:
log.debug(
'The specified returner used for the master job cache '
'"{0}" does not have a save_runner_load function! The results '
'of this runner execution will not be stored.'.format(
opts['master_job_cache']
)
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
if opts.get('async', False):
return data['jid']
else:
return ret
def master_call(self, **kwargs):
'''
Execute a runner function through the master network interface (eauth).
'''
load = kwargs
load['cmd'] = 'runner'
sreq = salt.transport.Channel.factory(self.opts,
crypt='clear',
usage='master_call')
ret = sreq.send(load)
if isinstance(ret, collections.Mapping):
if 'error' in ret:
raise_error(**ret['error'])
return ret
def _reformat_low(self, low):
'''
Format the low data for RunnerClient()'s master_call() function
The master_call function here has a different function signature than
on WheelClient. So extract all the eauth keys and the fun key and
assume everything else is a kwarg to pass along to the runner function
to be called.
'''
auth_creds = dict([(i, low.pop(i)) for i in [
'username', 'password', 'eauth', 'token', 'client',
] if i in low])
reformatted_low = {'fun': low.pop('fun')}
reformatted_low.update(auth_creds)
reformatted_low['kwarg'] = low
return reformatted_low
def cmd_async(self, low):
'''
Execute a runner function asynchronously; eauth is respected
This function requires that :conf_master:`external_auth` is configured
and the user is authorized to execute runner functions: (``@runner``).
.. code-block:: python
runner.eauth_async({
'fun': 'jobs.list_jobs',
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'pam',
})
'''
reformatted_low = self._reformat_low(low)
return self.master_call(**reformatted_low)
def cmd_sync(self, low, timeout=None):
'''
Execute a runner function synchronously; eauth is respected
This function requires that :conf_master:`external_auth` is configured
and the user is authorized to execute runner functions: (``@runner``).
.. code-block:: python
runner.eauth_sync({
'fun': 'jobs.list_jobs',
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'pam',
})
'''
sevent = salt.utils.event.get_event('master',
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts)
reformatted_low = self._reformat_low(low)
job = self.master_call(**reformatted_low)
ret_tag = tagify('ret', base=job['tag'])
timelimit = time.time() + (timeout or 300)
while True:
ret = sevent.get_event(full=True)
if ret is None:
if time.time() > timelimit:
raise salt.exceptions.SaltClientTimeout(
"RunnerClient job '{0}' timed out".format(job['jid']),
jid=job['jid'])
else:
continue
if ret['tag'] == ret_tag:
return ret['data']['return']
class Runner(RunnerClient):
'''
Execute the salt runner interface
'''
def print_docs(self):
'''
Print out the documentation!
'''
arg = self.opts.get('fun', None)
docs = super(Runner, self).get_docs(arg)
for fun in sorted(docs):
display_output('{0}:'.format(fun), 'text', self.opts)
print(docs[fun])
def run(self):
'''
Execute the runner sequence
'''
ret = {}
if self.opts.get('doc', False):
self.print_docs()
else:
try:
# Run the runner!
jid = super(Runner, self).cmd(
self.opts['fun'], self.opts['arg'], self.opts)
if self.opts.get('async', False):
log.info('Running in async mode. Results of this execution may '
'be collected by attaching to the master event bus or '
'by examing the master job cache, if configured.')
sys.exit(0)
rets = self.get_runner_returns(jid)
else:
rets = [jid]
# Gather the returns
for ret in rets:
if not self.opts.get('quiet', False):
if isinstance(ret, dict) and 'outputter' in ret and ret['outputter'] is not None:
print(self.outputters[ret['outputter']](ret['data']))
else:
salt.output.display_output(ret, '', self.opts)
except salt.exceptions.SaltException as exc:
ret = str(exc)
print(ret)
return ret
log.debug('Runner return: {0}'.format(ret))
return ret
def get_runner_returns(self, jid, timeout=None):
'''
Gather the return data from the event system, break hard when timeout
is reached.
'''
if timeout is None:
timeout = self.opts['timeout'] * 2
timeout_at = time.time() + timeout
last_progress_timestamp = time.time()
while True:
raw = self.event.get_event(timeout, full=True)
time.sleep(0.1)
# If we saw no events in the event bus timeout
# OR
# we have reached the total timeout
# AND
# have not seen any progress events for the length of the timeout.
if raw is None and (time.time() > timeout_at and
time.time() - last_progress_timestamp > timeout):
# Timeout reached
break
try:
if not raw['tag'].split('/')[1] == 'runner' and raw['tag'].split('/')[2] == jid:
continue
elif raw['tag'].split('/')[3] == 'progress' and raw['tag'].split('/')[2] == jid:
last_progress_timestamp = time.time()
yield {'data': raw['data']['data'], 'outputter': raw['data']['outputter']}
elif raw['tag'].split('/')[3] == 'return' and raw['tag'].split('/')[2] == jid:
yield raw['data']['return']
break
# Handle a findjob that might have been kicked off under the covers
elif raw['data']['fun'] == 'saltutil.findjob':
timeout_at = timeout_at + 10
continue
except (IndexError, KeyError):
continue
| 37.135135
| 118
| 0.527365
| 1,496
| 13,740
| 4.737299
| 0.235963
| 0.022577
| 0.011853
| 0.012417
| 0.158036
| 0.111472
| 0.071681
| 0.071681
| 0.071681
| 0.071681
| 0
| 0.012828
| 0.364556
| 13,740
| 369
| 119
| 37.235772
| 0.798878
| 0.252766
| 0
| 0.171296
| 0
| 0
| 0.113062
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050926
| false
| 0.00463
| 0.083333
| 0
| 0.194444
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22a8bf88232fd22e170f70f6a4d8e344cbe114aa
| 4,257
|
py
|
Python
|
pong-pg.py
|
s-gv/pong-keras
|
38a0f25ae0e628f357512d085dc957720d83ece2
|
[
"0BSD"
] | null | null | null |
pong-pg.py
|
s-gv/pong-keras
|
38a0f25ae0e628f357512d085dc957720d83ece2
|
[
"0BSD"
] | null | null | null |
pong-pg.py
|
s-gv/pong-keras
|
38a0f25ae0e628f357512d085dc957720d83ece2
|
[
"0BSD"
] | null | null | null |
# Copyright (c) 2019 Sagar Gubbi. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import numpy as np
import gym
import tensorflow as tf
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Input, Lambda, Dense, Conv2D, MaxPool2D, Flatten, BatchNormalization, Dropout
from tensorflow.keras.optimizers import RMSprop, Adam
import tensorflow.keras.backend as K
env = gym.make('PongDeterministic-v4')
UP_ACTION = 2
DOWN_ACTION = 3
ACTIONS = [UP_ACTION, DOWN_ACTION]
# Neural net model takes the state and outputs action and value for that state
model = Sequential([
Dense(512, activation='elu', input_shape=(2*6400,)),
Dense(len(ACTIONS), activation='softmax'),
])
model.compile(optimizer=RMSprop(1e-4), loss='sparse_categorical_crossentropy')
gamma = 0.99
# preprocess frames
def prepro(I):
""" prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector. http://karpathy.github.io/2016/05/31/rl/ """
if I is None: return np.zeros((6400,))
I = I[35:195] # crop
I = I[::2,::2,0] # downsample by factor of 2
I[I == 144] = 0 # erase background (background type 1)
I[I == 109] = 0 # erase background (background type 2)
I[I != 0] = 1 # everything else (paddles, ball) just set to 1
return I.astype(np.float).ravel()
def discount_rewards(r):
""" take 1D float array of rewards and compute discounted reward. http://karpathy.github.io/2016/05/31/rl/ """
discounted_r = np.zeros((len(r),))
running_add = 0
for t in reversed(range(0, len(r))):
if r[t] != 0: running_add = 0 # reset the sum, since this was a game boundary (pong specific!)
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
def train():
reward_sums = []
for ep in range(2000):
Xs, ys, rewards = [], [], []
prev_obs, obs = None, env.reset()
for t in range(99000):
x = np.hstack([prepro(obs), prepro(prev_obs)])
prev_obs = obs
action_probs = model.predict(x[None, :])
ya = np.random.choice(len(ACTIONS), p=action_probs[0])
action = ACTIONS[ya]
obs, reward, done, _ = env.step(action)
Xs.append(x)
ys.append(ya)
rewards.append(reward)
#if reward != 0: print(f'Episode {ep} -- step: {t}, ya: {ya}, reward: {reward}')
if done:
Xs = np.array(Xs)
ys = np.array(ys)
discounted_rewards = discount_rewards(rewards)
advantages = (discounted_rewards - discounted_rewards.mean()) / discounted_rewards.std()
print(f'adv: {np.min(advantages):.2f}, {np.max(advantages):.2f}')
model.fit(Xs, ys, sample_weight=advantages, epochs=1, batch_size=1024)
reward_sum = sum(rewards)
reward_sums.append(reward_sum)
avg_reward_sum = sum(reward_sums[-50:]) / len(reward_sums[-50:])
print(f'Episode {ep} -- reward_sum: {reward_sum}, avg_reward_sum: {avg_reward_sum}\n')
if ep % 20 == 0:
model.save_weights('params/model3.h5')
break
def test():
global env
env = gym.wrappers.Monitor(env, './tmp', video_callable=lambda ep_id: True, force=True)
model.load_weights('params/model3.h5')
reward_sum = 0
prev_obs, obs = None, env.reset()
for t in range(99000):
x = np.hstack([prepro(obs), prepro(prev_obs)])
prev_obs = obs
action_probs = model.predict(x[None, :])
#ya = np.argmax(action_probs[0])
ya = np.random.choice(len(ACTIONS), p=action_probs[0])
action = ACTIONS[ya]
obs, reward, done, _ = env.step(action)
reward_sum += reward
if reward != 0:
print(f't: {t} -- reward: {reward}')
if done:
print(f't: {t} -- reward_sum: {reward_sum}')
break
def main():
if len(sys.argv) >= 2 and sys.argv[1] == 'test':
test()
else:
train()
if __name__ == '__main__':
main()
| 33.257813
| 116
| 0.597369
| 581
| 4,257
| 4.263339
| 0.375215
| 0.039968
| 0.016149
| 0.021801
| 0.248688
| 0.193783
| 0.176827
| 0.176827
| 0.152604
| 0.152604
| 0
| 0.040645
| 0.271788
| 4,257
| 127
| 117
| 33.519685
| 0.758387
| 0.183463
| 0
| 0.224719
| 0
| 0
| 0.087246
| 0.023188
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05618
| false
| 0
| 0.089888
| 0
| 0.168539
| 0.044944
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22a8ec1abea9d6f95b972cc7b4d65ddb840ef8b2
| 2,962
|
py
|
Python
|
dexp/cli/dexp_commands/crop.py
|
JoOkuma/dexp
|
6d9003384605b72f387d38b5befa29e4e2246af8
|
[
"BSD-3-Clause"
] | null | null | null |
dexp/cli/dexp_commands/crop.py
|
JoOkuma/dexp
|
6d9003384605b72f387d38b5befa29e4e2246af8
|
[
"BSD-3-Clause"
] | null | null | null |
dexp/cli/dexp_commands/crop.py
|
JoOkuma/dexp
|
6d9003384605b72f387d38b5befa29e4e2246af8
|
[
"BSD-3-Clause"
] | null | null | null |
import click
from arbol.arbol import aprint, asection
from dexp.cli.defaults import DEFAULT_CLEVEL, DEFAULT_CODEC, DEFAULT_STORE
from dexp.cli.parsing import _get_output_path, _parse_channels, _parse_chunks
from dexp.datasets.open_dataset import glob_datasets
from dexp.datasets.operations.crop import dataset_crop
@click.command()
@click.argument("input_paths", nargs=-1) # , help='input path'
@click.option("--output_path", "-o") # , help='output path'
@click.option("--channels", "-c", default=None, help="List of channels, all channels when ommited.")
@click.option(
"--quantile",
"-q",
default=0.99,
type=float,
help="Quantile parameter for lower bound of brightness for thresholding.",
show_default=True,
)
@click.option(
"--reference-channel",
"-rc",
default=None,
help="Reference channel to estimate cropping. If no provided it picks the first one.",
)
@click.option("--store", "-st", default=DEFAULT_STORE, help="Zarr store: ‘dir’, ‘ndir’, or ‘zip’", show_default=True)
@click.option("--chunks", "-chk", default=None, help="Dataset chunks dimensions, e.g. (1, 126, 512, 512).")
@click.option(
"--codec",
"-z",
default=DEFAULT_CODEC,
help="Compression codec: zstd for ’, ‘blosclz’, ‘lz4’, ‘lz4hc’, ‘zlib’ or ‘snappy’ ",
show_default=True,
)
@click.option("--clevel", "-l", type=int, default=DEFAULT_CLEVEL, help="Compression level", show_default=True)
@click.option("--overwrite", "-w", is_flag=True, help="Forces overwrite of target", show_default=True)
@click.option(
"--workers",
"-wk",
default=-4,
help="Number of worker threads to spawn. Negative numbers n correspond to: number_of _cores / |n| ",
show_default=True,
) #
@click.option("--check", "-ck", default=True, help="Checking integrity of written file.", show_default=True) #
def crop(
input_paths,
output_path,
channels,
quantile,
reference_channel,
store,
chunks,
codec,
clevel,
overwrite,
workers,
check,
):
input_dataset, input_paths = glob_datasets(input_paths)
output_path = _get_output_path(input_paths[0], output_path, "_crop")
channels = _parse_channels(input_dataset, channels)
if reference_channel is None:
reference_channel = input_dataset.channels()[0]
chunks = _parse_chunks(chunks)
with asection(
f"Cropping from: {input_paths} to {output_path} for channels: {channels}, "
f"using channel {reference_channel} as a reference."
):
dataset_crop(
input_dataset,
output_path,
channels=channels,
reference_channel=reference_channel,
quantile=quantile,
store=store,
chunks=chunks,
compression=codec,
compression_level=clevel,
overwrite=overwrite,
workers=workers,
check=check,
)
input_dataset.close()
aprint("Done!")
| 32.549451
| 117
| 0.660365
| 359
| 2,962
| 5.281337
| 0.348189
| 0.063819
| 0.05538
| 0.063291
| 0.082278
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008082
| 0.20628
| 2,962
| 90
| 118
| 32.911111
| 0.798384
| 0.013842
| 0
| 0.13253
| 0
| 0.012048
| 0.274348
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012048
| false
| 0
| 0.072289
| 0
| 0.084337
| 0.024096
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22a950c4c4a0d6a5d8ae35400f9dc583d0a56a66
| 2,287
|
py
|
Python
|
morse_DMT/write_dipha_file_3d_revise.py
|
YinuoJin/DMT_loss
|
c6e66cb7997b7cd5616156faaf294e350e77c4c2
|
[
"MIT"
] | 1
|
2021-12-06T13:06:55.000Z
|
2021-12-06T13:06:55.000Z
|
morse_DMT/write_dipha_file_3d_revise.py
|
YinuoJin/DMT_loss
|
c6e66cb7997b7cd5616156faaf294e350e77c4c2
|
[
"MIT"
] | null | null | null |
morse_DMT/write_dipha_file_3d_revise.py
|
YinuoJin/DMT_loss
|
c6e66cb7997b7cd5616156faaf294e350e77c4c2
|
[
"MIT"
] | null | null | null |
import sys
from matplotlib import image as mpimg
import numpy as np
import os
DIPHA_CONST = 8067171840
DIPHA_IMAGE_TYPE_CONST = 1
DIM = 3
input_dir = os.path.join(os.getcwd(), sys.argv[1])
dipha_output_filename = sys.argv[2]
vert_filename = sys.argv[3]
input_filenames = [name
for name in os.listdir(input_dir)
if (os.path.isfile(input_dir + '/' + name)) and (name != ".DS_Store")]
input_filenames.sort()
image = mpimg.imread(os.path.join(input_dir, input_filenames[0]))
nx, ny = image.shape
del image
nz = len(input_filenames)
print(nx, ny, nz)
#sys.exit()
im_cube = np.zeros([nx, ny, nz])
i = 0
for name in input_filenames:
sys.stdout.flush()
print(i, name)
fileName = input_dir + "/" + name
im_cube[:, :, i] = mpimg.imread(fileName)
i = i + 1
print('writing dipha output...')
with open(dipha_output_filename, 'wb') as output_file:
# this is needed to verify you are giving dipha a dipha file
np.int64(DIPHA_CONST).tofile(output_file)
# this tells dipha that we are giving an image as input
np.int64(DIPHA_IMAGE_TYPE_CONST).tofile(output_file)
# number of points
np.int64(nx * ny * nz).tofile(output_file)
# dimension
np.int64(DIM).tofile(output_file)
# pixels in each dimension
np.int64(nx).tofile(output_file)
np.int64(ny).tofile(output_file)
np.int64(nz).tofile(output_file)
# pixel values
for k in range(nz):
sys.stdout.flush()
print('dipha - working on image', k)
for j in range(ny):
for i in range(nx):
val = int(-im_cube[i, j, k]*255)
'''
if val != 0 and val != -1:
print('val check:', val)
'''
np.float64(val).tofile(output_file)
output_file.close()
print('writing vert file')
with open(vert_filename, 'w') as vert_file:
for k in range(nz):
sys.stdout.flush()
print('verts - working on image', k)
for j in range(ny):
for i in range(nx):
vert_file.write(str(i) + ' ' + str(j) + ' ' + str(k) + ' ' + str(int(-im_cube[i, j, k] * 255)) + '\n')
vert_file.close()
print(nx, ny, nz)
| 29.701299
| 119
| 0.584609
| 335
| 2,287
| 3.865672
| 0.292537
| 0.07722
| 0.098842
| 0.044015
| 0.171429
| 0.135907
| 0.135907
| 0.112741
| 0.112741
| 0.06332
| 0
| 0.025672
| 0.284652
| 2,287
| 76
| 120
| 30.092105
| 0.765892
| 0.082204
| 0
| 0.203704
| 0
| 0
| 0.056168
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.074074
| 0
| 0.074074
| 0.12963
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22aabcb0f1d4d4e04e99859300806fd807e56ef4
| 1,223
|
py
|
Python
|
MetropolisMCMC.py
|
unrealTOM/MC
|
5a4cdf1ee11ef3d438f24dd38e894731103448ac
|
[
"MIT"
] | 4
|
2020-04-11T09:54:27.000Z
|
2021-08-18T07:06:52.000Z
|
MetropolisMCMC.py
|
unrealTOM/MC
|
5a4cdf1ee11ef3d438f24dd38e894731103448ac
|
[
"MIT"
] | null | null | null |
MetropolisMCMC.py
|
unrealTOM/MC
|
5a4cdf1ee11ef3d438f24dd38e894731103448ac
|
[
"MIT"
] | 5
|
2019-01-22T03:47:17.000Z
|
2022-02-14T18:09:07.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import math
def normal(mu,sigma,x): #normal distribution
return 1/(math.pi*2)**0.5/sigma*np.exp(-(x-mu)**2/2/sigma**2)
def eval(x):
return normal(-4,1,x) + normal(4,1,x)
#return 0.3*np.exp(-0.2*x**2)+0.7*np.exp(-0.2*(x-10)**2)
def ref(x_star,x): #normal distribution
return normal(x,10,x_star)
N = [100,500,1000,5000]
fig = plt.figure()
for i in range(4):
X = np.array([])
x = 0.1 #initialize x0 to be 0.1
for j in range(N[i]):
u = np.random.rand()
x_star = np.random.normal(x,10)
A = min(1,eval(x_star)/eval(x)) #*q(x,x_star)/p(x)/q(x_star,x))
if u < A:
x = x_star
X=np.hstack((X,x))
ax = fig.add_subplot(2,2,i+1)
ax.hist(X,bins=100,density=True)
x = np.linspace(-10,20,5000)
#ax.plot(x,eval(x)/2.7) #2.7 approximates the normalizing constant
ax.plot(x,eval(x)/2) #2 approximates the normalizing constant
ax.set_ylim(0,0.35)
ax.text(-9,0.25,'I=%d'%N[i])
fig.suptitle('Metropolis_Hastings for MCMC(Normal)')
#fig.suptitle('Metropolis_Hastings for MCMC(Exp.)')
plt.savefig('MetropolisNormal.png',dpi=100)
#plt.savefig('MetropolisExp.png',dpi=100)
plt.show()
| 29.829268
| 71
| 0.623058
| 236
| 1,223
| 3.182203
| 0.360169
| 0.046605
| 0.023968
| 0.066578
| 0.245007
| 0.130493
| 0
| 0
| 0
| 0
| 0
| 0.081592
| 0.17825
| 1,223
| 40
| 72
| 30.575
| 0.665672
| 0.277187
| 0
| 0
| 0
| 0
| 0.06865
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0.1
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22ac5683811849c14d8a103b4887cbd79b2ac236
| 9,338
|
py
|
Python
|
core/simulators/carla_scenario_simulator.py
|
RangiLyu/DI-drive
|
f7db2e7b19d70c05184d6d6edae6b7e035a324d7
|
[
"Apache-2.0"
] | null | null | null |
core/simulators/carla_scenario_simulator.py
|
RangiLyu/DI-drive
|
f7db2e7b19d70c05184d6d6edae6b7e035a324d7
|
[
"Apache-2.0"
] | null | null | null |
core/simulators/carla_scenario_simulator.py
|
RangiLyu/DI-drive
|
f7db2e7b19d70c05184d6d6edae6b7e035a324d7
|
[
"Apache-2.0"
] | null | null | null |
import os
from typing import Any, Dict, List, Optional
import carla
from core.simulators.carla_simulator import CarlaSimulator
from core.simulators.carla_data_provider import CarlaDataProvider
from .srunner.scenarios.route_scenario import RouteScenario, SCENARIO_CLASS_DICT
from .srunner.scenariomanager.scenario_manager import ScenarioManager
class CarlaScenarioSimulator(CarlaSimulator):
"""
Carla simualtor used to run scenarios.
The simulator loads configs of provided scenario, and create hero actor, npc vehicles, walkers, world map
according to it. The sensors and running status are set as common Carla simulator.
When created, it will set up Carla client due to arguments, set simulator basic configurations used all around
its lifetime, and set some default running configurations.
If no traffic manager port is provided, it will find random free port in system.
:Arguments:
- cfg (Dict): Config Dict.
- client (carla.Client, optional): Already established Carla client. Defaults to None.
- host (str, optional): TCP host Carla client link to. Defaults to 'localhost'.
- port (int, optional): TCP port Carla client link to. Defaults to 9000.
- tm_port (int, optional): Traffic manager port Carla client link to. Defaults to None.
- timeout (float, optional): Carla client link timeout. Defaults to 10.0.
:Interfaces:
init, get_state, get_sensor_data, get_navigation, get_information, apply_control, run_step, clean_up
:Properties:
- town_name (str): Current town name.
- hero_player (carla.Actor): hero actor in simulation.
- collided (bool): Whether collided in current episode.
- end_distance (float): Distance to target in current frame.
- end_timeout (float): Timeout for entire route provided by planner.
- total_diatance (float): Dictance for entire route provided by planner.
- scenario_manager (Any): Scenario Manager instance used to get running state.
"""
config = dict(
town='Town01',
weather='random',
sync_mode=True,
delta_seconds=0.1,
no_rendering=False,
auto_pilot=False,
n_vehicles=0,
n_pedestrians=0,
disable_two_wheels=False,
col_threshold=400,
resolution=1.0,
waypoint_num=20,
obs=list(),
planner=dict(),
aug=None,
verbose=True,
debug=False,
)
def __init__(
self,
cfg: Dict,
client: Optional[carla.Client] = None,
host: str = 'localhost',
port: int = 9000,
tm_port: int = 9050,
timeout: float = 10.0,
**kwargs
) -> None:
"""
Init Carla scenario simulator.
"""
super().__init__(cfg, client, host, port, tm_port, timeout)
self._resolution = self._cfg.resolution
self._scenario = None
self._start_scenario = False
self._manager = ScenarioManager(self._debug, self._sync_mode, self._client_timeout)
self._criteria_status = dict()
def init(self, config: Any) -> None:
"""
Init simulator episode with provided args.
This method takes an scneario configuration instance to set up scenarios in Carla server. the scenario could be
a single scenario, or a route scenario together with several scenarios during navigating the route. A scneario
manager is used to manager and check the running status and tick scenarios. A local planner is set to trace the
route to generate target waypoint and road options in each tick. It will set world, map, vehicles, pedestrians
dut to provided args and default configs, and reset running status. If no collision happens when creating
actors, the init will end and return.
:Arguments:
- config (Any): Scenario configuration instance, containing information about the scenarios.
"""
self._scenario_config = config
self.clean_up()
self._set_town(config.town)
self._set_weather(self._weather)
self._blueprints = self._world.get_blueprint_library()
while True:
self.clean_up()
CarlaDataProvider.set_client(self._client)
CarlaDataProvider.set_world(self._world)
CarlaDataProvider.set_traffic_manager_port(self._tm.get_port())
if CarlaDataProvider.get_map().name != config.town and CarlaDataProvider.get_map().name != "OpenDriveMap":
print("WARNING: The CARLA server uses the wrong map: {}".format(CarlaDataProvider.get_map().name))
print("WARNING: This scenario requires to use map: {}".format(config.town))
print("[SIMULATOR] Preparing scenario: " + config.name)
config.n_vehicles = self._n_vehicles
config.disable_two_wheels = self._disable_two_wheels
if "RouteScenario" in config.name:
self._scenario = RouteScenario(
world=self._world, config=config, debug_mode=self._debug, resolution=self._resolution
)
self._hero_actor = self._scenario.ego_vehicles[0]
self._prepare_observations()
self._manager.load_scenario(self._scenario)
self._planner.set_route(CarlaDataProvider.get_hero_vehicle_route(), clean=True)
self._total_distance = self._planner.distance_to_goal
self._end_timeout = self._scenario.route_timeout
else:
# select scenario
if config.type in SCENARIO_CLASS_DICT:
scenario_class = SCENARIO_CLASS_DICT[config.type]
ego_vehicles = []
for vehicle in config.ego_vehicles:
ego_vehicles.append(
CarlaDataProvider.request_new_actor(
vehicle.model,
vehicle.transform,
vehicle.rolename,
True,
color=vehicle.color,
actor_category=vehicle.category
)
)
self._scenario = scenario_class(
world=self._world, ego_vehicles=ego_vehicles, config=config, debug_mode=self._debug
)
else:
raise RuntimeError("Scenario '{}' not support!".format(config.type))
self._hero_actor = self._scenario.ego_vehicles[0]
self._prepare_observations()
self._manager.load_scenario(self._scenario)
self._planner.set_destination(config.route.data[0], config.route.data[1], clean=True)
self._total_distance = self._planner.distance_to_goal
self._spawn_pedestrians()
if self._ready():
if self._debug:
self._count_actors()
break
def run_step(self) -> None:
"""
Run one step simulation.
This will tick Carla world and scenarios, update informations for all sensors and measurement.
"""
if not self._start_scenario:
self._manager.start_scenario()
self._start_scenario = True
self._tick += 1
world_snapshot = self._world.get_snapshot()
timestamp = world_snapshot.timestamp
self._timestamp = timestamp.elapsed_seconds
self._manager.tick_scenario(timestamp)
if self._planner is not None:
self._planner.run_step()
self._collided = self._collision_sensor.collided
self._traffic_light_helper.tick()
if self._bev_wrapper is not None:
if CarlaDataProvider._hero_vehicle_route is not None:
self._bev_wrapper.tick()
def get_criteria(self) -> List:
"""
Get criteria status list of scenario in current frame. Criteria related with hero actor is encounted.
:Returns:
List: Criteria list of scenario.
"""
criterion_list = self._manager.analyze_tick()
for name, actor_id, result, actual_value, expected_value in criterion_list:
if actor_id == self._hero_actor.id:
self._criteria_status.update({name: [result, actual_value, expected_value]})
return self._criteria_status
def end_scenario(self) -> None:
"""
End current scenario. Must be called before ending an episode.
"""
if self._start_scenario:
self._manager.end_scenario()
self._start_scenario = False
def clean_up(self) -> None:
"""
Destroy all actors and sensors in current world. Clear all messages saved in simulator and data provider,
and clean up running scenarios. This will NOT destroy theCarla client, so simulator can use same carla
client to start next episode.
"""
if self._manager is not None:
self._manager.clean_up()
self._criteria_status.clear()
super().clean_up()
@property
def scenario_manager(self) -> Any:
return self._manager
| 41.502222
| 119
| 0.624331
| 1,057
| 9,338
| 5.306528
| 0.257332
| 0.019611
| 0.015154
| 0.009093
| 0.116777
| 0.096096
| 0.069531
| 0.058477
| 0.058477
| 0.058477
| 0
| 0.005521
| 0.301671
| 9,338
| 224
| 120
| 41.6875
| 0.854624
| 0.310666
| 0
| 0.104478
| 0
| 0
| 0.032507
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052239
| false
| 0
| 0.052239
| 0.007463
| 0.134328
| 0.029851
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22ad0b38c724e88cb9ecf306aa56fd0fb313ec45
| 3,325
|
py
|
Python
|
features/hdf_features.py
|
DerekYJC/bmi_python
|
7b9cf3f294a33688db24b0863c1035e9cc6999ea
|
[
"Apache-2.0"
] | null | null | null |
features/hdf_features.py
|
DerekYJC/bmi_python
|
7b9cf3f294a33688db24b0863c1035e9cc6999ea
|
[
"Apache-2.0"
] | null | null | null |
features/hdf_features.py
|
DerekYJC/bmi_python
|
7b9cf3f294a33688db24b0863c1035e9cc6999ea
|
[
"Apache-2.0"
] | null | null | null |
'''
HDF-saving features
'''
import time
import tempfile
import random
import traceback
import numpy as np
import fnmatch
import os, sys
import subprocess
from riglib import calibrations, bmi
from riglib.bmi import extractor
from riglib.experiment import traits
import hdfwriter
class SaveHDF(object):
'''
Saves data from registered sources into tables in an HDF file
'''
def init(self):
'''
Secondary init function. See riglib.experiment.Experiment.init()
Prior to starting the task, this 'init' starts an HDFWriter sink.
'''
from riglib import sink
self.sinks = sink.sinks
self.h5file = tempfile.NamedTemporaryFile(suffix=".h5", delete=False)
self.h5file.flush()
self.h5file.close()
self.hdf = sink.sinks.start(self.sink_class, filename=self.h5file.name)
super(SaveHDF, self).init()
@property
def sink_class(self):
'''
Specify the sink class as a function in case future descendant classes want to use a different type of sink
'''
return hdfwriter.HDFWriter
def run(self):
'''
Code to execute immediately prior to the beginning of the task FSM executing, or after the FSM has finished running.
See riglib.experiment.Experiment.run(). This 'run' method stops the HDF sink after the FSM has finished running
'''
try:
super(SaveHDF, self).run()
finally:
self.hdf.stop()
def join(self):
'''
Re-join any spawned process for cleanup
'''
self.hdf.join()
super(SaveHDF, self).join()
def set_state(self, condition, **kwargs):
'''
Save task state transitions to HDF
Parameters
----------
condition: string
Name of new state to transition into. The state name must be a key in the 'status' dictionary attribute of the task
Returns
-------
None
'''
self.hdf.sendMsg(condition)
super(SaveHDF, self).set_state(condition, **kwargs)
def record_annotation(self, msg):
""" Record a user-input annotation """
self.hdf.sendMsg("annotation: " + msg)
super(SaveHDF, self).record_annotation(msg)
print("Saved annotation to HDF: " + msg)
def get_h5_filename(self):
return self.h5file.name
def cleanup(self, database, saveid, **kwargs):
'''
See LogExperiment.cleanup for documentation
'''
super(SaveHDF, self).cleanup(database, saveid, **kwargs)
print("Beginning HDF file cleanup")
print("\tHDF data currently saved to temp file: %s" % self.h5file.name)
try:
print("\tRunning self.cleanup_hdf()")
self.cleanup_hdf()
except:
print("\n\n\n\n\nError cleaning up HDF file!")
import traceback
traceback.print_exc()
# this 'if' is needed because the remote procedure call to save_data doesn't like kwargs
dbname = kwargs['dbname'] if 'dbname' in kwargs else 'default'
if dbname == 'default':
database.save_data(self.h5file.name, "hdf", saveid)
else:
database.save_data(self.h5file.name, "hdf", saveid, dbname=dbname)
| 31.367925
| 127
| 0.61203
| 401
| 3,325
| 5.0399
| 0.38404
| 0.039584
| 0.047501
| 0.028699
| 0.067293
| 0.067293
| 0.038595
| 0.038595
| 0
| 0
| 0
| 0.004234
| 0.289624
| 3,325
| 105
| 128
| 31.666667
| 0.851397
| 0.294737
| 0
| 0.070175
| 0
| 0
| 0.097816
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.140351
| false
| 0
| 0.245614
| 0.017544
| 0.438596
| 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22ad9d02328e75faf184ffbf1cc357191c9ff796
| 7,979
|
py
|
Python
|
tf_crnn/libs/infer.py
|
sunmengnan/city_brain
|
478f0b974f4491b4201956f37b83ce6860712bc8
|
[
"MIT"
] | null | null | null |
tf_crnn/libs/infer.py
|
sunmengnan/city_brain
|
478f0b974f4491b4201956f37b83ce6860712bc8
|
[
"MIT"
] | null | null | null |
tf_crnn/libs/infer.py
|
sunmengnan/city_brain
|
478f0b974f4491b4201956f37b83ce6860712bc8
|
[
"MIT"
] | null | null | null |
import time
import os
import math
import numpy as np
from libs import utils
from libs.img_dataset import ImgDataset
from nets.crnn import CRNN
from nets.cnn.paper_cnn import PaperCNN
import shutil
def calculate_accuracy(predicts, labels):
"""
:param predicts: encoded predict result
:param labels: ground true label
:return: accuracy
"""
assert len(predicts) == len(labels)
correct_count = 0
for i, p_label in enumerate(predicts):
if p_label == labels[i]:
correct_count += 1
acc = correct_count / len(predicts)
return acc, correct_count
def calculate_edit_distance_mean(edit_distences):
"""
排除了 edit_distance == 0 的值计算编辑距离的均值
:param edit_distences:
:return:
"""
data = np.array(edit_distences)
data = data[data != 0]
if len(data) == 0:
return 0
return np.mean(data)
def validation(sess, feeds, fetches, dataset, converter, result_dir, name,
step=None, print_batch_info=False, copy_failed=False):
"""
Save file name: {acc}_{step}.txt
:param sess: tensorflow session
:param model: crnn network
:param result_dir:
:param name: val, test, infer. used to create sub dir in result_dir
:return:
"""
sess.run(dataset.init_op)
img_paths = []
predicts = []
trimed_predicts = []
labels = []
trimed_labels = []
edit_distances = []
total_batch_time = 0
for batch in range(dataset.num_batches):
img_batch, widths, label_batch, batch_labels, batch_img_paths = dataset.get_next_batch(sess)
if len(batch_labels) == 0:
continue
batch_start_time = time.time()
feed = {feeds['inputs']: img_batch,
feeds['labels']: label_batch,
feeds['sequence_length']: PaperCNN.get_sequence_lengths(widths),
feeds['is_training']: False}
try:
batch_predicts, edit_distance, batch_edit_distances = sess.run(fetches, feed)
except Exception:
print(batch_labels)
continue
batch_predicts = [converter.decode(p, CRNN.CTC_INVALID_INDEX) for p in batch_predicts]
trimed_batch_predicts = [utils.remove_all_symbols(txt) for txt in batch_predicts]
trimed_batch_labels = [utils.remove_all_symbols(txt) for txt in batch_labels]
img_paths.extend(batch_img_paths)
predicts.extend(batch_predicts)
labels.extend(batch_labels)
trimed_predicts.extend(trimed_batch_predicts)
trimed_labels.extend(trimed_batch_labels)
edit_distances.extend(batch_edit_distances)
acc, correct_count = calculate_accuracy(batch_predicts, batch_labels)
trimed_acc, trimed_correct_count = calculate_accuracy(trimed_batch_predicts, trimed_batch_labels)
batch_time = time.time() - batch_start_time
total_batch_time += batch_time
if print_batch_info:
print("{:.03f}s [{}/{}] acc: {:.03f}({}/{}), edit_distance: {:.03f}, trim_acc {:.03f}({}/{})"
.format(batch_time, batch, dataset.num_batches,
acc, correct_count, dataset.batch_size,
edit_distance,
trimed_acc, trimed_correct_count, dataset.batch_size))
acc, correct_count = calculate_accuracy(predicts, labels)
trimed_acc, trimed_correct_count = calculate_accuracy(trimed_predicts, trimed_labels)
edit_distance_mean = calculate_edit_distance_mean(edit_distances)
total_edit_distance = sum(edit_distances)
acc_str = "Accuracy: {:.03f} ({}/{}), Trimed Accuracy: {:.03f} ({}/{})" \
"Total edit distance: {:.03f}, " \
"Average edit distance: {:.03f}, Average batch time: {:.03f}" \
.format(acc, correct_count, dataset.size,
trimed_acc, trimed_correct_count, dataset.size,
total_edit_distance, edit_distance_mean, total_batch_time / dataset.num_batches)
print(acc_str)
save_dir = os.path.join(result_dir, name)
utils.check_dir_exist(save_dir)
result_file_path = save_txt_result(save_dir, acc, step, labels, predicts, 'acc',
edit_distances, acc_str)
save_txt_result(save_dir, acc, step, labels, predicts, 'acc', edit_distances,
acc_str, only_failed=True)
save_txt_result(save_dir, trimed_acc, step, trimed_labels, trimed_predicts, 'tacc',
edit_distances)
save_txt_result(save_dir, trimed_acc, step, trimed_labels, trimed_predicts, 'tacc',
edit_distances, only_failed=True)
save_txt_4_analyze(save_dir, labels, predicts, 'acc', step)
save_txt_4_analyze(save_dir, trimed_labels, trimed_predicts, 'tacc', step)
# Copy image not all match to a dir
# TODO: we will only save failed imgs for acc
if copy_failed:
failed_infer_img_dir = result_file_path[:-4] + "_failed"
if os.path.exists(failed_infer_img_dir) and os.path.isdir(failed_infer_img_dir):
shutil.rmtree(failed_infer_img_dir)
utils.check_dir_exist(failed_infer_img_dir)
failed_image_indices = []
for i, val in enumerate(edit_distances):
if val != 0:
failed_image_indices.append(i)
for i in failed_image_indices:
img_path = img_paths[i]
img_name = img_path.split("/")[-1]
dst_path = os.path.join(failed_infer_img_dir, img_name)
shutil.copyfile(img_path, dst_path)
failed_infer_result_file_path = os.path.join(failed_infer_img_dir, "result.txt")
with open(failed_infer_result_file_path, 'w', encoding='utf-8') as f:
for i in failed_image_indices:
p_label = predicts[i]
t_label = labels[i]
f.write("{}\n".format(img_paths[i]))
f.write("input: {:17s} length: {}\n".format(t_label, len(t_label)))
f.write("predict: {:17s} length: {}\n".format(p_label, len(p_label)))
f.write("edit distance: {}\n".format(edit_distances[i]))
f.write('-' * 30 + '\n')
return acc, trimed_acc, edit_distance_mean, total_edit_distance, correct_count, trimed_correct_count
def save_txt_4_analyze(save_dir, labels, predicts, acc_type, step):
"""
把测试集的真值和预测结果放在保存在同一个 txt 文件中,方便统计
"""
txt_path = os.path.join(save_dir, '%d_%s_gt_and_pred.txt' % (step, acc_type))
with open(txt_path, 'w', encoding='utf-8') as f:
for i, p_label in enumerate(predicts):
t_label = labels[i]
f.write("{}__$__{}\n".format(t_label, p_label))
def save_txt_result(save_dir, acc, step, labels, predicts, acc_type,
edit_distances=None, acc_str=None, only_failed=False):
"""
:param acc_type: 'acc' or 'tacc'
:return:
"""
failed_suffix = ''
if only_failed:
failed_suffix = 'failed'
if step is not None:
txt_path = os.path.join(save_dir, '%d_%s_%.3f_%s.txt' % (step, acc_type, acc, failed_suffix))
else:
txt_path = os.path.join(save_dir, '%s_%.3f_%s.txt' % (acc_type, acc, failed_suffix))
print("Write result to %s" % txt_path)
with open(txt_path, 'w', encoding='utf-8') as f:
for i, p_label in enumerate(predicts):
t_label = labels[i]
all_match = (t_label == p_label)
if only_failed and all_match:
continue
# f.write("{}\n".format(img_paths[i]))
f.write("input: {:17s} length: {}\n".format(t_label, len(t_label)))
f.write("predict: {:17s} length: {}\n".format(p_label, len(p_label)))
f.write("all match: {}\n".format(1 if all_match else 0))
if edit_distances:
f.write("edit distance: {}\n".format(edit_distances[i]))
f.write('-' * 30 + '\n')
if acc_str:
f.write(acc_str + "\n")
return txt_path
| 36.104072
| 105
| 0.628525
| 1,045
| 7,979
| 4.495694
| 0.164593
| 0.040868
| 0.02086
| 0.02533
| 0.386122
| 0.298212
| 0.269689
| 0.258408
| 0.239676
| 0.167305
| 0
| 0.008609
| 0.257551
| 7,979
| 220
| 106
| 36.268182
| 0.784436
| 0.067427
| 0
| 0.158621
| 0
| 0
| 0.081251
| 0.002868
| 0
| 0
| 0
| 0.004545
| 0.006897
| 1
| 0.034483
| false
| 0
| 0.062069
| 0
| 0.131034
| 0.041379
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22ae7c79d1d1030557cb109b5f2d23a5d5fb88a4
| 5,706
|
py
|
Python
|
modules/templates/RLPPTM/tools/mis.py
|
nursix/rlpptm
|
e7b50b2fdf6277aed5f198ca10ad773c5ca0b947
|
[
"MIT"
] | 1
|
2022-03-21T21:58:30.000Z
|
2022-03-21T21:58:30.000Z
|
modules/templates/RLPPTM/tools/mis.py
|
nursix/rlpptm
|
e7b50b2fdf6277aed5f198ca10ad773c5ca0b947
|
[
"MIT"
] | null | null | null |
modules/templates/RLPPTM/tools/mis.py
|
nursix/rlpptm
|
e7b50b2fdf6277aed5f198ca10ad773c5ca0b947
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Helper Script for Mass-Invitation of Participant Organisations
#
# RLPPTM Template Version 1.0
#
# Execute in web2py folder after code upgrade like:
# python web2py.py -S eden -M -R applications/eden/modules/templates/RLPPTM/tools/mis.py
#
import os
import sys
from core import s3_format_datetime
from templates.RLPPTM.config import SCHOOLS
from templates.RLPPTM.helpers import InviteUserOrg
# Batch limit (set to False to disable)
BATCH_LIMIT = 250
# Override auth (disables all permission checks)
auth.override = True
# Failed-flag
failed = False
# Info
log = None
def info(msg):
sys.stderr.write("%s" % msg)
if log:
log.write("%s" % msg)
def infoln(msg):
sys.stderr.write("%s\n" % msg)
if log:
log.write("%s\n" % msg)
# Load models for tables
otable = s3db.org_organisation
gtable = s3db.org_group
mtable = s3db.org_group_membership
utable = s3db.auth_user
oltable = s3db.org_organisation_user
pltable = s3db.pr_person_user
ctable = s3db.pr_contact
timestmp = s3_format_datetime(dtfmt="%Y%m%d%H%M%S")
LOGFILE = os.path.join(request.folder, "private", "mis_%s.log" % timestmp)
# -----------------------------------------------------------------------------
# Invite organisations
#
if not failed:
try:
with open(LOGFILE, "w", encoding="utf-8") as logfile:
log = logfile
join = [mtable.on((mtable.organisation_id == otable.id) & \
(mtable.deleted == False)),
gtable.on((gtable.id == mtable.group_id) & \
(gtable.name == SCHOOLS) & \
(gtable.deleted == False)),
]
query = (otable.deleted == False)
organisations = db(query).select(otable.id,
otable.pe_id,
otable.name,
join = join,
orderby = otable.id,
)
total = len(organisations)
infoln("Total: %s Organisations" % total)
infoln("")
skipped = sent = failures = 0
invite_org = InviteUserOrg.invite_account
for organisation in organisations:
info("%s..." % organisation.name)
# Get all accounts that are linked to this org
organisation_id = organisation.id
join = oltable.on((oltable.user_id == utable.id) & \
(oltable.deleted == False))
left = pltable.on((pltable.user_id == utable.id) & \
(pltable.deleted == False))
query = (oltable.organisation_id == organisation_id)
rows = db(query).select(utable.id,
utable.email,
utable.registration_key,
pltable.pe_id,
join = join,
left = left,
)
if rows:
# There are already accounts linked to this organisation
invited, registered = [], []
for row in rows:
username = row.auth_user.email
if row.pr_person_user.pe_id:
registered.append(username)
else:
invited.append(username)
if registered:
infoln("already registered (%s)." % ", ".join(registered))
else:
infoln("already invited (%s)." % ", ".join(invited))
skipped += 1
continue
# Find email address
query = (ctable.pe_id == organisation.pe_id) & \
(ctable.contact_method == "EMAIL") & \
(ctable.deleted == False)
contact = db(query).select(ctable.value,
orderby = ctable.priority,
limitby = (0, 1),
).first()
if contact:
email = contact.value
info("(%s)..." % email)
else:
infoln("no email address.")
skipped += 1
continue
error = invite_org(organisation, email, account=None)
if not error:
sent += 1
infoln("invited.")
db.commit()
else:
failures += 1
infoln("invitation failed (%s)." % error)
if BATCH_LIMIT and sent >= BATCH_LIMIT:
infoln("Batch limit (%s) reached" % BATCH_LIMIT)
skipped = total - (sent + failures)
break
infoln("")
infoln("%s invitations sent" % sent)
infoln("%s invitations failed" % failures)
infoln("%s organisations skipped" % skipped)
log = None
except IOError:
infoln("...failed (could not create logfile)")
failed = True
# -----------------------------------------------------------------------------
# Finishing up
#
if failed:
db.rollback()
infoln("PROCESS FAILED - Action rolled back.")
else:
db.commit()
infoln("PROCESS SUCCESSFUL.")
| 35.222222
| 88
| 0.45496
| 508
| 5,706
| 5.033465
| 0.336614
| 0.023465
| 0.015252
| 0.013297
| 0.027376
| 0.013297
| 0
| 0
| 0
| 0
| 0
| 0.007624
| 0.425342
| 5,706
| 161
| 89
| 35.440994
| 0.772187
| 0.119699
| 0
| 0.162393
| 0
| 0
| 0.072615
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017094
| false
| 0
| 0.042735
| 0
| 0.059829
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22aeec83fb0e871521d1f1a2e9afa8b18858d4b4
| 728
|
py
|
Python
|
engine/test_sysctl.py
|
kingsd041/os-tests
|
2ea57cb6f1da534633a4670ccb83d40300989886
|
[
"Apache-2.0"
] | null | null | null |
engine/test_sysctl.py
|
kingsd041/os-tests
|
2ea57cb6f1da534633a4670ccb83d40300989886
|
[
"Apache-2.0"
] | null | null | null |
engine/test_sysctl.py
|
kingsd041/os-tests
|
2ea57cb6f1da534633a4670ccb83d40300989886
|
[
"Apache-2.0"
] | null | null | null |
# coding = utf-8
# Create date: 2018-11-05
# Author :Hailong
def test_sysctl(ros_kvm_with_paramiko, cloud_config_url):
command = 'sudo cat /proc/sys/kernel/domainname'
feed_back = 'test'
client = ros_kvm_with_paramiko(cloud_config='{url}/test_sysctl.yml'.format(url=cloud_config_url))
stdin, stdout, stderr = client.exec_command(command, timeout=10)
output = stdout.read().decode('utf-8').replace('\n', '')
assert (feed_back == output)
command_b = 'sudo cat /proc/sys/dev/cdrom/debug'
feed_back_b = '1'
stdin, stdout, stderr = client.exec_command(command_b, timeout=10)
output_b = stdout.read().decode('utf-8').replace('\n', '')
client.close()
assert (feed_back_b == output_b)
| 36.4
| 101
| 0.68956
| 107
| 728
| 4.457944
| 0.46729
| 0.067086
| 0.08805
| 0.075472
| 0.42348
| 0.42348
| 0.42348
| 0
| 0
| 0
| 0
| 0.026016
| 0.15522
| 728
| 19
| 102
| 38.315789
| 0.749594
| 0.074176
| 0
| 0
| 0
| 0
| 0.164179
| 0.108955
| 0
| 0
| 0
| 0
| 0.153846
| 1
| 0.076923
| false
| 0
| 0
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22aeecf51ba4f5585bf276df470496e100ee4eac
| 3,310
|
py
|
Python
|
paprika_sync/core/management/commands/import_recipes_from_file.py
|
grschafer/paprika-sync
|
8b6fcd6246557bb79009fa9355fd4d588fb8ed90
|
[
"MIT"
] | null | null | null |
paprika_sync/core/management/commands/import_recipes_from_file.py
|
grschafer/paprika-sync
|
8b6fcd6246557bb79009fa9355fd4d588fb8ed90
|
[
"MIT"
] | null | null | null |
paprika_sync/core/management/commands/import_recipes_from_file.py
|
grschafer/paprika-sync
|
8b6fcd6246557bb79009fa9355fd4d588fb8ed90
|
[
"MIT"
] | null | null | null |
import json
import logging
from django.core.management.base import BaseCommand
from django.db import transaction
from paprika_sync.core.models import PaprikaAccount
from paprika_sync.core.serializers import RecipeSerializer, CategorySerializer
from paprika_sync.core.utils import log_start_end
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Import all recipes from file to specified PaprikaAccount'
def add_arguments(self, parser):
parser.add_argument(
'file',
help='Path to json file containing list of all recipes',
)
parser.add_argument(
'--categories-file',
help='Path to json file containing list of all categories',
)
parser.add_argument(
'paprika_account_id',
type=int,
help='ID of PaprikaAccount to import recipes to',
)
parser.add_argument(
'-r', '--remove',
action='store_true',
help="Removes all of account's existing recipes before importing",
)
@log_start_end(logger)
def handle(self, *args, **options):
recipes_file = options['file']
categories_file = options['categories_file']
pa_id = options['paprika_account_id']
wipe_account = options['remove']
logger.info('Starting import for PaprikaAccount id %s from %s, wipe_account=%s', pa_id, recipes_file, wipe_account)
pa = PaprikaAccount.objects.get(id=pa_id)
with open(recipes_file, 'rt') as fin:
recipes = json.load(fin)
logger.info('Found %s recipes to import to %s', len(recipes), pa)
categories = []
if categories_file:
with open(categories_file, 'rt') as fin:
categories = json.load(fin)
logger.info('Found %s categories to import to %s', len(categories), pa)
with transaction.atomic():
if wipe_account:
pa.recipes.all().delete()
pa.categories.all().delete()
for category in categories:
category['paprika_account'] = pa.id
cs = CategorySerializer(data=category)
if cs.is_valid():
cs.save()
else:
logger.warning('Failed to import category %s (%s) due to errors: %s', category['uid'], category['name'], cs.errors)
for recipe in recipes:
# Remove categories if we're not bothering to import them
if not categories:
recipe['categories'] = []
recipe['paprika_account'] = pa.id
rs = RecipeSerializer(data=recipe)
if rs.is_valid():
rs.save()
else:
logger.warning('Failed to import recipe %s (%s) due to errors: %s', recipe['uid'], recipe['name'], rs.errors)
# recipe_field_names = set([f.name for f in Recipe._meta.fields])
# Recipe.objects.create(
# paprika_account=pa,
# **{k: v for k, v in recipe.items() if k in recipe_field_names},
# )
logger.info('Finished recipe import successfully')
# transaction.set_rollback(True)
| 35.978261
| 135
| 0.578248
| 375
| 3,310
| 4.981333
| 0.298667
| 0.025696
| 0.036403
| 0.030514
| 0.140257
| 0.125268
| 0.110278
| 0.043897
| 0.043897
| 0.043897
| 0
| 0
| 0.322659
| 3,310
| 91
| 136
| 36.373626
| 0.833185
| 0.080665
| 0
| 0.090909
| 0
| 0
| 0.224382
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.242424
| 0
| 0.30303
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22b29bb3979813975d0a62cdf7e26438790eeb19
| 448
|
py
|
Python
|
output/models/ms_data/element/elem_q017_xsd/elem_q017.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/ms_data/element/elem_q017_xsd/elem_q017.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/ms_data/element/elem_q017_xsd/elem_q017.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
@dataclass
class FooTest:
class Meta:
name = "fooTest"
value: str = field(
init=False,
default="Hello"
)
@dataclass
class Root:
class Meta:
name = "root"
foo_test: str = field(
init=False,
default="Hello",
metadata={
"name": "fooTest",
"type": "Element",
"required": True,
}
)
| 15.448276
| 40
| 0.5
| 41
| 448
| 5.439024
| 0.560976
| 0.125561
| 0.116592
| 0.152466
| 0.26009
| 0.26009
| 0
| 0
| 0
| 0
| 0
| 0
| 0.386161
| 448
| 28
| 41
| 16
| 0.810909
| 0
| 0
| 0.272727
| 0
| 0
| 0.113839
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.045455
| 0
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22b2c7ab0a465a4d5e5a4f3cd082436d406520c8
| 43,545
|
py
|
Python
|
contrib_src/predict.py
|
modelhub-ai/mic-dkfz-brats
|
4522a26442f1e323f97aa45fbd5047bfe9029b2b
|
[
"MIT"
] | 1
|
2020-01-09T11:45:26.000Z
|
2020-01-09T11:45:26.000Z
|
contrib_src/predict.py
|
modelhub-ai/mic-dkfz-brats
|
4522a26442f1e323f97aa45fbd5047bfe9029b2b
|
[
"MIT"
] | null | null | null |
contrib_src/predict.py
|
modelhub-ai/mic-dkfz-brats
|
4522a26442f1e323f97aa45fbd5047bfe9029b2b
|
[
"MIT"
] | null | null | null |
import json
import os
from collections import OrderedDict
from copy import deepcopy
import SimpleITK as sitk
from batchgenerators.augmentations.utils import resize_segmentation # resize_softmax_output
from skimage.transform import resize
from torch.optim import lr_scheduler
from torch import nn
import numpy as np
import torch
from scipy.ndimage import binary_fill_holes
'''
This code is not intended to be looked at by anyone. It is messy. It is undocumented.
And the entire training pipeline is missing.
'''
max_num_filters_3d = 320
max_num_filters_2d = 480
join = os.path.join
def load_json(file):
with open(file, 'r') as f:
a = json.load(f)
return a
def resize_image(image, old_spacing, new_spacing, order=3, cval=0):
new_shape = (int(np.round(old_spacing[0]/new_spacing[0]*float(image.shape[0]))),
int(np.round(old_spacing[1]/new_spacing[1]*float(image.shape[1]))),
int(np.round(old_spacing[2]/new_spacing[2]*float(image.shape[2]))))
if any([i != j for i, j in zip(image.shape, new_shape)]):
res = resize(image, new_shape, order=order, mode='edge', cval=cval)
else:
res = image
return res
class ConvDropoutNormNonlin(nn.Module):
def __init__(self, input_channels, output_channels,
conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None):
super(ConvDropoutNormNonlin, self).__init__()
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True}
self.nonlin_kwargs = nonlin_kwargs
self.nonlin = nonlin
self.dropout_op = dropout_op
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.conv_op = conv_op
self.norm_op = norm_op
self.conv = self.conv_op(input_channels, output_channels, **self.conv_kwargs)
if self.dropout_op is not None and self.dropout_op_kwargs['p'] is not None and self.dropout_op_kwargs[
'p'] > 0:
self.dropout = self.dropout_op(**self.dropout_op_kwargs)
else:
self.dropout = None
self.instnorm = self.norm_op(output_channels, **self.norm_op_kwargs)
self.lrelu = nn.LeakyReLU(**self.nonlin_kwargs)
def forward(self, x):
x = self.conv(x)
if self.dropout is not None:
x = self.dropout(x)
return self.lrelu(self.instnorm(x))
def pad_nd_image(image, new_shape=None, mode="edge", kwargs=None, return_slicer=False, shape_must_be_divisible_by=None):
if kwargs is None:
kwargs = {}
if new_shape is not None:
old_shape = np.array(image.shape[-len(new_shape):])
else:
assert shape_must_be_divisible_by is not None
assert isinstance(shape_must_be_divisible_by, (list, tuple, np.ndarray))
new_shape = image.shape[-len(shape_must_be_divisible_by):]
old_shape = new_shape
num_axes_nopad = len(image.shape) - len(new_shape)
new_shape = [max(new_shape[i], old_shape[i]) for i in range(len(new_shape))]
if not isinstance(new_shape, np.ndarray):
new_shape = np.array(new_shape)
if shape_must_be_divisible_by is not None:
if not isinstance(shape_must_be_divisible_by, (list, tuple, np.ndarray)):
shape_must_be_divisible_by = [shape_must_be_divisible_by] * len(new_shape)
else:
assert len(shape_must_be_divisible_by) == len(new_shape)
for i in range(len(new_shape)):
if new_shape[i] % shape_must_be_divisible_by[i] == 0:
new_shape[i] -= shape_must_be_divisible_by[i]
new_shape = np.array([new_shape[i] + shape_must_be_divisible_by[i] - new_shape[i] % shape_must_be_divisible_by[i] for i in range(len(new_shape))])
difference = new_shape - old_shape
pad_below = difference // 2
pad_above = difference // 2 + difference % 2
pad_list = [[0, 0]]*num_axes_nopad + list([list(i) for i in zip(pad_below, pad_above)])
res = np.pad(image, pad_list, mode, **kwargs)
if not return_slicer:
return res
else:
pad_list = np.array(pad_list)
pad_list[:, 1] = np.array(res.shape) - pad_list[:, 1]
slicer = list(slice(*i) for i in pad_list)
return res, slicer
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
def get_device(self):
if next(self.parameters()).device == "cpu":
return "cpu"
else:
return next(self.parameters()).device.index
def set_device(self, device):
if device == "cpu":
self.cpu()
else:
self.cuda(device)
def forward(self, x):
raise NotImplementedError
class SegmentationNetwork(NeuralNetwork):
def __init__(self):
self.input_shape_must_be_divisible_by = None
self.conv_op = None
super(NeuralNetwork, self).__init__()
self.inference_apply_nonlin = lambda x:x
def predict_3D(self, x, do_mirroring, num_repeats=1, use_train_mode=False, batch_size=1, mirror_axes=(2, 3, 4),
tiled=False, tile_in_z=True, step=2, patch_size=None, regions_class_order=None, use_gaussian=False,
pad_border_mode="edge", pad_kwargs=None):
"""
:param x: (c, x, y , z)
:param do_mirroring:
:param num_repeats:
:param use_train_mode:
:param batch_size:
:param mirror_axes:
:param tiled:
:param tile_in_z:
:param step:
:param patch_size:
:param regions_class_order:
:param use_gaussian:
:return:
"""
current_mode = self.training
if use_train_mode is not None and use_train_mode:
self.train()
elif use_train_mode is not None and not use_train_mode:
self.eval()
else:
pass
assert len(x.shape) == 4, "data must have shape (c,x,y,z)"
if self.conv_op == nn.Conv3d:
if tiled:
res = self._internal_predict_3D_3Dconv_tiled(x, num_repeats, batch_size, tile_in_z, step, do_mirroring,
mirror_axes, patch_size, regions_class_order, use_gaussian,
pad_border_mode, pad_kwargs=pad_kwargs)
else:
res = self._internal_predict_3D_3Dconv(x, do_mirroring, num_repeats, patch_size, batch_size,
mirror_axes, regions_class_order, pad_border_mode, pad_kwargs=pad_kwargs)
elif self.conv_op == nn.Conv2d:
if tiled:
res = self._internal_predict_3D_2Dconv_tiled(x, do_mirroring, num_repeats, batch_size, mirror_axes,
step, patch_size, regions_class_order, use_gaussian,
pad_border_mode, pad_kwargs=pad_kwargs)
else:
res = self._internal_predict_3D_2Dconv(x, do_mirroring, num_repeats, patch_size, batch_size,
mirror_axes, regions_class_order, pad_border_mode, pad_kwargs=pad_kwargs)
else:
raise RuntimeError("Invalid conv op, cannot determine what dimensionality (2d/3d) the network is")
if use_train_mode is not None:
self.train(current_mode)
return res
def _internal_maybe_mirror_and_pred_3D(self, x, num_repeats, mirror_axes, do_mirroring=True):
with torch.no_grad():
a = torch.zeros(x.shape).float()
if self.get_device() == "cpu":
a = a.cpu()
else:
a = a.cuda(self.get_device())
if do_mirroring:
mirror_idx = 8
else:
mirror_idx = 1
all_preds = []
for i in range(num_repeats):
for m in range(mirror_idx):
data_for_net = np.array(x)
do_stuff = False
if m == 0:
do_stuff = True
pass
if m == 1 and (4 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, :, :, ::-1]
if m == 2 and (3 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, :, ::-1, :]
if m == 3 and (4 in mirror_axes) and (3 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, :, ::-1, ::-1]
if m == 4 and (2 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, ::-1, :, :]
if m == 5 and (2 in mirror_axes) and (4 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, ::-1, :, ::-1]
if m == 6 and (2 in mirror_axes) and (3 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, ::-1, ::-1, :]
if m == 7 and (2 in mirror_axes) and (3 in mirror_axes) and (4 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, ::-1, ::-1, ::-1]
if do_stuff:
_ = a.data.copy_(torch.from_numpy(np.copy(data_for_net)))
p = self.inference_apply_nonlin(self(a))
p = p.data.cpu().numpy()
if m == 0:
pass
if m == 1 and (4 in mirror_axes):
p = p[:, :, :, :, ::-1]
if m == 2 and (3 in mirror_axes):
p = p[:, :, :, ::-1, :]
if m == 3 and (4 in mirror_axes) and (3 in mirror_axes):
p = p[:, :, :, ::-1, ::-1]
if m == 4 and (2 in mirror_axes):
p = p[:, :, ::-1, :, :]
if m == 5 and (2 in mirror_axes) and (4 in mirror_axes):
p = p[:, :, ::-1, :, ::-1]
if m == 6 and (2 in mirror_axes) and (3 in mirror_axes):
p = p[:, :, ::-1, ::-1, :]
if m == 7 and (2 in mirror_axes) and (3 in mirror_axes) and (4 in mirror_axes):
p = p[:, :, ::-1, ::-1, ::-1]
all_preds.append(p)
return np.vstack(all_preds)
def _internal_predict_3D_3Dconv(self, x, do_mirroring, num_repeats, min_size=None, BATCH_SIZE=None,
mirror_axes=(2, 3, 4), regions_class_order=None, pad_border_mode="edge",
pad_kwargs=None):
with torch.no_grad():
x, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True, self.input_shape_must_be_divisible_by)
#x, old_shape = pad_patient_3D_incl_c(x, self.input_shape_must_be_divisible_by, min_size)
new_shp = x.shape
data = np.zeros(tuple([1] + list(new_shp)), dtype=np.float32)
data[0] = x
if BATCH_SIZE is not None:
data = np.vstack([data] * BATCH_SIZE)
stacked = self._internal_maybe_mirror_and_pred_3D(data, num_repeats, mirror_axes, do_mirroring)
slicer = [slice(0, stacked.shape[i]) for i in range(len(stacked.shape) - (len(slicer) - 1))] + slicer[1:]
stacked = stacked[slicer]
uncertainty = stacked.var(0)
bayesian_predictions = stacked
softmax_pred = stacked.mean(0)
if regions_class_order is None:
predicted_segmentation = softmax_pred.argmax(0)
else:
predicted_segmentation_shp = softmax_pred[0].shape
predicted_segmentation = np.zeros(predicted_segmentation_shp)
for i, c in enumerate(regions_class_order):
predicted_segmentation[softmax_pred[i] > 0.5] = c
return predicted_segmentation, bayesian_predictions, softmax_pred, uncertainty
def softmax_helper(x):
rpt = [1 for _ in range(len(x.size()))]
rpt[1] = x.size(1)
x_max = x.max(1, keepdim=True)[0].repeat(*rpt)
e_x = torch.exp(x - x_max)
return e_x / e_x.sum(1, keepdim=True).repeat(*rpt)
class InitWeights_He(object):
def __init__(self, neg_slope=1e-2):
self.neg_slope = neg_slope
def __call__(self, module):
if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d):
module.weight = nn.init.kaiming_normal_(module.weight, a=1e-2)
if module.bias is not None:
module.bias = nn.init.constant_(module.bias, 0)
class StackedConvLayers(nn.Module):
def __init__(self, input_feature_channels, output_feature_channels, num_convs,
conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None, first_stride=None):
self.input_channels = input_feature_channels
self.output_channels = output_feature_channels
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True}
self.nonlin_kwargs = nonlin_kwargs
self.nonlin = nonlin
self.dropout_op = dropout_op
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.conv_op = conv_op
self.norm_op = norm_op
if first_stride is not None:
self.conv_kwargs_first_conv = deepcopy(conv_kwargs)
self.conv_kwargs_first_conv['stride'] = first_stride
else:
self.conv_kwargs_first_conv = conv_kwargs
super(StackedConvLayers, self).__init__()
self.blocks = nn.Sequential(
*([ConvDropoutNormNonlin(input_feature_channels, output_feature_channels, self.conv_op,
self.conv_kwargs_first_conv,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs)] +
[ConvDropoutNormNonlin(output_feature_channels, output_feature_channels, self.conv_op,
self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs) for _ in range(num_convs - 1)]))
def forward(self, x):
return self.blocks(x)
def soft_dice(net_output, gt, smooth=1., smooth_in_nom=1.):
axes = tuple(range(2, len(net_output.size())))
intersect = sum_tensor(net_output * gt, axes, keepdim=False)
denom = sum_tensor(net_output + gt, axes, keepdim=False)
result = (- ((2 * intersect + smooth_in_nom) / (denom + smooth))).mean()
return result
def sum_tensor(input, axes, keepdim=False):
axes = np.unique(axes)
if keepdim:
for ax in axes:
input = input.sum(ax, keepdim=True)
else:
for ax in sorted(axes, reverse=True):
input = input.sum(ax)
return input
class Generic_UNet_Cotraining(SegmentationNetwork):
def __init__(self, input_channels, base_num_features, num_classes, num_conv_per_stage=2, num_downscale=4,
feat_map_mul_on_downscale=2, conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None, deep_supervision=True, dropout_in_localization=False,
final_nonlin=softmax_helper, weightInitializer=InitWeights_He(1e-2), pool_op_kernel_sizes=None,
upscale_logits=False, convolutional_pooling=False, convolutional_upsampling=False):
"""
Have fun lookint at that one. This is my go-to model. I crammed the cotraining code in there somehow, so yeah.
What a mess.
You know what's the best part? No documentation. What a great piece of code.
:param input_channels:
:param base_num_features:
:param num_classes:
:param num_conv_per_stage:
:param num_downscale:
:param feat_map_mul_on_downscale:
:param conv_op:
:param conv_kwargs:
:param norm_op:
:param norm_op_kwargs:
:param dropout_op:
:param dropout_op_kwargs:
:param nonlin:
:param nonlin_kwargs:
:param deep_supervision:
:param dropout_in_localization:
:param final_nonlin:
:param weightInitializer:
:param pool_op_kernel_sizes:
:param upscale_logits:
:param convolutional_pooling:
:param convolutional_upsampling:
"""
super(Generic_UNet_Cotraining, self).__init__()
assert isinstance(num_classes, (list, tuple)), "for cotraining, num_classes must be list or tuple of int"
self.num_classes = num_classes
self.input_shape_must_be_divisible_by = np.prod(pool_op_kernel_sizes, 0)
self.pool_op_kernel_sizes = pool_op_kernel_sizes
self.convolutional_upsampling = convolutional_upsampling
self.convolutional_pooling = convolutional_pooling
self.upscale_logits = upscale_logits
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope':1e-2, 'inplace':True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p':0.5, 'inplace':True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps':1e-5, 'affine':True, 'momentum':0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size':3, 'stride':1, 'padding':1, 'dilation':1, 'bias':True}
self.nonlin = nonlin
self.nonlin_kwargs = nonlin_kwargs
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.weightInitializer = weightInitializer
self.conv_op = conv_op
self.norm_op = norm_op
self.dropout_op = dropout_op
if pool_op_kernel_sizes is None:
if conv_op == nn.Conv2d:
pool_op_kernel_sizes = [(2, 2)] * num_downscale
elif conv_op == nn.Conv3d:
pool_op_kernel_sizes = [(2, 2, 2)] * num_downscale
else:
raise ValueError("unknown convolution dimensionality, conv op: %s" % str(conv_op))
self.pool_op_kernel_sizes = pool_op_kernel_sizes
self.final_nonlin = final_nonlin
assert num_conv_per_stage > 1, "this implementation does not support only one conv per stage"
if conv_op == nn.Conv2d:
upsample_mode = 'bilinear'
pool_op = nn.MaxPool2d
transpconv = nn.ConvTranspose2d
elif conv_op == nn.Conv3d:
upsample_mode = 'trilinear'
pool_op = nn.MaxPool3d
transpconv = nn.ConvTranspose3d
else:
raise ValueError("unknown convolution dimensionality, conv op: %s" % str(conv_op))
self.do_ds = deep_supervision
self.conv_blocks_context = []
self.conv_blocks_localization = []
self.td = []
self.tu = []
self.seg_outputs = []
output_features = base_num_features
input_features = input_channels
for d in range(num_downscale):
if d != 0 and self.convolutional_pooling:
first_stride = pool_op_kernel_sizes[d-1]
else:
first_stride = None
self.conv_blocks_context.append(StackedConvLayers(input_features, output_features, num_conv_per_stage,
self.conv_op, self.conv_kwargs, self.norm_op,
self.norm_op_kwargs, self.dropout_op,
self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs,
first_stride))
if not self.convolutional_pooling:
self.td.append(pool_op(pool_op_kernel_sizes[d]))
input_features = output_features
output_features = int(np.round(output_features * feat_map_mul_on_downscale))
if self.conv_op == nn.Conv3d:
output_features = min(output_features, max_num_filters_3d)
else:
output_features = min(output_features, max_num_filters_2d)
if self.convolutional_pooling:
first_stride = pool_op_kernel_sizes[-1]
else:
first_stride = None
if self.convolutional_upsampling:
final_num_features = output_features
else:
final_num_features = self.conv_blocks_context[-1].output_channels
self.conv_blocks_context.append(nn.Sequential(
StackedConvLayers(input_features, output_features, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs, first_stride),
StackedConvLayers(output_features, final_num_features, 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs)))
if not dropout_in_localization:
old_dropout_p = self.dropout_op_kwargs['p']
self.dropout_op_kwargs['p'] = 0.0
for u in range(num_downscale):
nfeatures_from_down = final_num_features
nfeatures_from_skip = self.conv_blocks_context[-(2 + u)].output_channels # self.conv_blocks_context[-1] is bottleneck, so start with -2
n_features_after_tu_and_concat = nfeatures_from_skip * 2
# the first conv reduces the number of features to match those of skip
# the following convs work on that number of features
# if not convolutional upsampling then the final conv reduces the num of features again
if u != num_downscale-1 and not self.convolutional_upsampling:
final_num_features = self.conv_blocks_context[-(3 + u)].output_channels
else:
final_num_features = nfeatures_from_skip
if not self.convolutional_upsampling:
self.tu.append(nn.Upsample(scale_factor=pool_op_kernel_sizes[-(u+1)], mode=upsample_mode))
else:
self.tu.append(transpconv(nfeatures_from_down, nfeatures_from_skip, pool_op_kernel_sizes[-(u+1)], pool_op_kernel_sizes[-(u+1)], bias=False))
self.conv_blocks_localization.append(nn.Sequential(
StackedConvLayers(n_features_after_tu_and_concat, nfeatures_from_skip, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs),
StackedConvLayers(nfeatures_from_skip, final_num_features, 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs)
))
for ds in range(len(self.conv_blocks_localization)):
self.seg_outputs.append(nn.ModuleList([conv_op(self.conv_blocks_localization[ds][-1].output_channels, i, 1, 1, 0, 1, 1, False) for i in num_classes]))
self.upscale_logits_ops = []
cum_upsample = np.cumprod(np.vstack(pool_op_kernel_sizes), axis=0)[::-1]
for usl in range(num_downscale - 1):
if self.upscale_logits:
self.upscale_logits_ops.append(nn.Upsample(scale_factor=tuple([int(i) for i in cum_upsample[usl+1]]), mode=upsample_mode))
else:
self.upscale_logits_ops.append(lambda x: x)
if not dropout_in_localization:
self.dropout_op_kwargs['p'] = old_dropout_p
# register all modules properly
self.conv_blocks_localization = nn.ModuleList(self.conv_blocks_localization)
self.conv_blocks_context = nn.ModuleList(self.conv_blocks_context)
self.td = nn.ModuleList(self.td)
self.tu = nn.ModuleList(self.tu)
self.seg_outputs = nn.ModuleList(self.seg_outputs)
if self.upscale_logits:
self.upscale_logits_ops = nn.ModuleList(self.upscale_logits_ops) # lambda x:x is not a Module so we need to distinguish here
self.apply(self.weightInitializer)
self.test_return_output = 0
self.inference = False
def train(self, mode=True):
super(Generic_UNet_Cotraining, self).train(mode)
def eval(self):
super(Generic_UNet_Cotraining, self).eval()
def infer(self, infer):
self.train(False)
self.inference = infer
def forward(self, x):
#input_var = x
skips = []
seg_outputs = []
for d in range(len(self.conv_blocks_context) - 1):
x = self.conv_blocks_context[d](x)
skips.append(x)
if not self.convolutional_pooling:
x = self.td[d](x)
x = self.conv_blocks_context[-1](x)
for u in range(len(self.tu)):
x = self.tu[u](x)
x = torch.cat((x, skips[-(u + 1)]), dim=1)
x = self.conv_blocks_localization[u](x)
if not self.inference:
seg_outputs.append([self.final_nonlin(self.seg_outputs[u][i](x[(x.shape[0]//len(self.num_classes) * i): (x.shape[0]//len(self.num_classes) * (i+1))])) for i in range(len(self.num_classes))])
else:
seg_outputs.append(self.final_nonlin(self.seg_outputs[u][self.test_return_output](x)))
if self.do_ds:
return tuple([seg_outputs[-1]] + [i(j) for i, j in zip(list(self.upscale_logits_ops)[::-1], seg_outputs[:-1][::-1])])
else:
return seg_outputs[-1]
class NetworkTrainerBraTS2018Baseline2RegionsCotrainingBraTSDecSDCE(object):
def __init__(self):
self.preprocessed_data_directory = None
# set through arguments from init
self.experiment_name = "baseline_inspired_by_decathlon 2_regions_cotraining brats dec sd ce"
self.experiment_description = "NetworkTrainerBraTS2018Baseline 2_regions_cotraining brats dec sd ce"
self.output_folder = 'model/params'
self.dataset_directory = None
self.device = 0
self.fold = 0
self.preprocessed_data_directory = None
self.gt_niftis_folder = None
# set in self.initialize()
self.network = None
self.num_input_channels = self.num_classes = self.net_pool_per_axis = self.patch_size = self.batch_size = \
self.threeD = self.base_num_features = self.intensity_properties = self.normalization_schemes = None # loaded automatically from plans_file
self.basic_generator_patch_size = self.data_aug_params = self.plans = None
self.was_initialized = False
self.also_val_in_tr_mode = False
self.dataset = None
self.inference_apply_nonlin = nn.Sigmoid()
def initialize(self, training=True):
if not os.path.isdir(self.output_folder):
os.mkdir(self.output_folder)
self.output_folder = os.path.join(self.output_folder, "fold%d" % self.fold)
if not os.path.isdir(self.output_folder):
os.mkdir(self.output_folder)
self.process_plans_file()
if training:
raise NotImplementedError
self.initialize_network_optimizer_and_scheduler()
self.network.inference_apply_nonlin = self.inference_apply_nonlin
self.was_initialized = True
def initialize_network_optimizer_and_scheduler(self):
net_numpool = max(self.net_pool_per_axis)
net_pool_kernel_sizes = []
for s in range(1, net_numpool+1):
this_pool_kernel_sizes = [1, 1, 1]
if self.net_pool_per_axis[0] >= s:
this_pool_kernel_sizes[0] = 2
if self.net_pool_per_axis[1] >= s:
this_pool_kernel_sizes[1] = 2
if len(self.patch_size)>2:
if self.net_pool_per_axis[2] >= s:
this_pool_kernel_sizes[2] = 2
else:
this_pool_kernel_sizes = this_pool_kernel_sizes[:-1]
net_pool_kernel_sizes.append(tuple(this_pool_kernel_sizes))
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
conv_kwargs = {'kernel_size':3, 'stride':1, 'padding':1, 'dilation':1, 'bias':True}
norm_op_kwargs = {'eps':1e-5, 'affine':True, 'momentum':0.02, 'track_running_stats':False}
dropout_op_kwargs = {'p':0, 'inplace':True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope':1e-2, 'inplace':True}
self.network = Generic_UNet_Cotraining(self.num_input_channels, self.base_num_features, self.num_classes, 2, net_numpool, 2,
conv_op, conv_kwargs, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, False, False, lambda x:x, InitWeights_He(1e-2),
net_pool_kernel_sizes, True, False, False)
self.optimizer = None
self.lr_scheduler = None
self.network.set_device(self.device)
def process_plans_file(self):
self.batch_size = 2
self.net_pool_per_axis = [4, 4, 4]
self.patch_size = (128, 128, 128)
self.intensity_properties = None
self.normalization_schemes = ["nonCT"] * 4
self.base_num_features = 30
self.num_input_channels = 4
self.do_dummy_2D_aug = False
self.use_mask_for_norm = True
self.only_keep_largest_connected_component = {(0, ): False}
if len(self.patch_size) == 2:
self.threeD = False
elif len(self.patch_size) == 3:
self.threeD = True
else:
raise RuntimeError("invalid patch size in plans file: %s" % str(self.patch_size))
self.regions = ((1, 2, 3, 4), (2, 3, 4), (2,))
self.regions_class_order = (1, 3, 2)
self.batch_size = 2
self.base_num_features = 30
self.num_classes = (3, 3)
def predict_preprocessed_data_return_softmax(self, data, do_mirroring, num_repeats, use_train_mode, batch_size, mirror_axes, tiled, tile_in_z, step, min_size, use_gaussian):
return self.network.predict_3D(data, do_mirroring, num_repeats, use_train_mode, batch_size, mirror_axes, tiled, tile_in_z, step, min_size, use_gaussian=use_gaussian)[2]
def load_best_checkpoint(self, train=True):
self.load_checkpoint(os.path.join(self.output_folder, "model_best.model"), train=train)
def load_checkpoint(self, fname, train=True):
print("loading checkpoint", fname, "train=", train)
if not self.was_initialized:
self.initialize()
saved_model = torch.load(fname)
new_state_dict = OrderedDict()
for k, value in saved_model['state_dict'].items():
key = k
new_state_dict[key] = value
self.network.load_state_dict(new_state_dict)
self.epoch = saved_model['epoch']
if train:
optimizer_state_dict = saved_model['optimizer_state_dict']
if optimizer_state_dict is not None:
self.optimizer.load_state_dict(optimizer_state_dict)
if self.lr_scheduler is not None and not isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
self.lr_scheduler.load_state_dict(saved_model['lr_scheduler_state_dict'])
if len(saved_model['plot_stuff']) < 9:
self.all_tr_losses_x, self.all_tr_losses, self.all_tr_eval_metrics, self.all_val_losses_x, \
self.all_val_losses, self.all_val_eval_metrics_dc_per_sample, self.all_val_losses_tr_mode, \
self.all_val_eval_metrics_dc_glob = saved_model['plot_stuff']
self.all_val_eval_metrics_dc_per_sample_std = []
else:
self.all_tr_losses_x, self.all_tr_losses, self.all_tr_eval_metrics, self.all_val_losses_x, \
self.all_val_losses, self.all_val_eval_metrics_dc_per_sample, self.all_val_losses_tr_mode, \
self.all_val_eval_metrics_dc_glob, self.all_val_eval_metrics_dc_per_sample_std = saved_model['plot_stuff']
self.network.set_device(self.device)
def resize_softmax_output(softmax_output, new_shape, order=3):
'''
Resizes softmax output. Resizes each channel in c separately and fuses results back together
:param softmax_output: c x x x y x z
:param new_shape: x x y x z
:param order:
:return:
'''
tpe = softmax_output.dtype
new_shp = [softmax_output.shape[0]] + list(new_shape)
result = np.zeros(new_shp, dtype=softmax_output.dtype)
for i in range(softmax_output.shape[0]):
result[i] = resize(softmax_output[i].astype(float), new_shape, order, "constant", 0, True)
return result.astype(tpe)
def save_segmentation_nifti_softmax(softmax_output, dct, out_fname, order=3, region_class_order=None):
'''
segmentation must have the same spacing as the original nifti (for now). segmentation may have been cropped out
of the original image
:param segmentation:
:param dct:
:param out_fname:
:return:
'''
old_size = dct.get('size_before_cropping')
bbox = dct.get('brain_bbox')
if bbox is not None:
seg_old_size = np.zeros([softmax_output.shape[0]] + list(old_size))
for c in range(3):
bbox[c][1] = np.min((bbox[c][0] + softmax_output.shape[c+1], old_size[c]))
seg_old_size[:, bbox[0][0]:bbox[0][1],
bbox[1][0]:bbox[1][1],
bbox[2][0]:bbox[2][1]] = softmax_output
else:
seg_old_size = softmax_output
segmentation = resize_softmax_output(seg_old_size, np.array(dct['size'])[[2, 1, 0]], order=order)
if region_class_order is None:
segmentation = segmentation.argmax(0)
else:
seg_old_spacing_final = np.zeros(segmentation.shape[1:])
for i, c in enumerate(region_class_order):
seg_old_spacing_final[segmentation[i] > 0.5] = c
segmentation = seg_old_spacing_final
return segmentation.astype(np.uint8)
def subfiles(folder, join=True, prefix=None, suffix=None, sort=True):
if join:
l = os.path.join
else:
l = lambda x, y: y
res = [l(folder, i) for i in os.listdir(folder) if os.path.isfile(os.path.join(folder, i))
and (prefix is None or i.startswith(prefix))
and (suffix is None or i.endswith(suffix))]
if sort:
res.sort()
return res
def maybe_mkdir_p(directory):
splits = directory.split("/")[1:]
for i in range(0, len(splits)):
if not os.path.isdir(os.path.join("/", *splits[:i+1])):
os.mkdir(os.path.join("/", *splits[:i+1]))
def convert_labels_back(seg):
new_seg = np.zeros(seg.shape, dtype=seg.dtype)
new_seg[seg == 1] = 2
new_seg[seg == 2] = 4
new_seg[seg == 3] = 1
return new_seg
def preprocess_image(itk_image, is_seg=False, spacing_target=(1, 0.5, 0.5), brain_mask=None, cval=0):
"""
brain mask must be a numpy array that has the same shape as itk_image's pixel array. This function is not ideal but
gets the job done
:param itk_image:
:param is_seg:
:param spacing_target:
:param brain_mask:
:return:
"""
spacing = np.array(itk_image.GetSpacing())[[2, 1, 0]]
image = sitk.GetArrayFromImage(itk_image).astype(float)
if not is_seg:
if brain_mask is None:
brain_mask = (image!=image[0,0,0]).astype(float)
if np.any([[i!=j] for i, j in zip(spacing, spacing_target)]):
image = resize_image(image, spacing, spacing_target, 3, cval).astype(np.float32)
brain_mask = resize_image(brain_mask.astype(float), spacing, spacing_target, order=0).astype(int)
image[brain_mask==0] = 0
#subtract mean, divide by std. use heuristic masking
image[brain_mask!=0] -= image[brain_mask!=0].mean()
image[brain_mask!=0] /= image[brain_mask!=0].std()
else:
new_shape = (int(np.round(spacing[0] / spacing_target[0] * float(image.shape[0]))),
int(np.round(spacing[1] / spacing_target[1] * float(image.shape[1]))),
int(np.round(spacing[2] / spacing_target[2] * float(image.shape[2]))))
image = resize_segmentation(image, new_shape, 1, cval)
return image
def create_brain_masks(data):
"""
data must be (b, c, x, y, z), brain mask is hole filled binary mask where all sequences are 0 (this is a heuristic
to recover a brain mask form brain extracted mri sequences, not an actual brain ectraction)
:param data:
:return:
"""
shp = list(data.shape)
brain_mask = np.zeros(shp, dtype=np.float32)
for b in range(data.shape[0]):
for c in range(data.shape[1]):
this_mask = data[b, c] != 0
this_mask = binary_fill_holes(this_mask)
brain_mask[b, c] = this_mask
return brain_mask
def extract_brain_region(image, segmentation, outside_value=0):
brain_voxels = np.where(segmentation != outside_value)
minZidx = int(np.min(brain_voxels[0]))
maxZidx = int(np.max(brain_voxels[0]))
minXidx = int(np.min(brain_voxels[1]))
maxXidx = int(np.max(brain_voxels[1]))
minYidx = int(np.min(brain_voxels[2]))
maxYidx = int(np.max(brain_voxels[2]))
# resize images
resizer = (slice(minZidx, maxZidx), slice(minXidx, maxXidx), slice(minYidx, maxYidx))
return image[resizer], [[minZidx, maxZidx], [minXidx, maxXidx], [minYidx, maxYidx]]
def load_and_preprocess(t1_file, t1km_file, t2_file, flair_file, seg_file=None, bet_file=None, encode_bet_mask_in_seg=False, label_conversion_fn=None):
images = {}
# t1
images["T1"] = sitk.ReadImage(t1_file)
# t1km
images["T1KM"] = sitk.ReadImage(t1km_file)
properties_dict = {
"spacing": images["T1"].GetSpacing(),
"direction": images["T1"].GetDirection(),
"size": images["T1"].GetSize(),
"origin": images["T1"].GetOrigin()
}
# t2
images["T2"] = sitk.ReadImage(t2_file)
# flair
images["FLAIR"] = sitk.ReadImage(flair_file)
if seg_file is not None:
images['seg'] = sitk.ReadImage(seg_file)
if bet_file is not None:
images['bet_mask'] = sitk.ReadImage(bet_file)
else:
t1_npy = sitk.GetArrayFromImage(images["T1"])
mask = create_brain_masks(t1_npy[None])[0].astype(int)
mask = sitk.GetImageFromArray(mask)
mask.CopyInformation(images["T1"])
images['bet_mask'] = mask
try:
images["t1km_sub"] = images["T1KM"] - images["T1"]
except RuntimeError:
tmp1 = sitk.GetArrayFromImage(images["T1KM"])
tmp2 = sitk.GetArrayFromImage(images["T1"])
res = tmp1 - tmp2
res_itk = sitk.GetImageFromArray(res)
res_itk.CopyInformation(images["T1"])
images["t1km_sub"] = res_itk
for k in ['T1', 'T1KM', 'T2', 'FLAIR', "t1km_sub"]:
images[k] = sitk.Mask(images[k], images['bet_mask'], 0)
bet_numpy = sitk.GetArrayFromImage(images['bet_mask'])
for k in images.keys():
is_seg = (k == "seg") | (k == "bet_mask")
if is_seg:
cval = -1
else:
cval = 0
images[k] = preprocess_image(images[k], is_seg=is_seg,
spacing_target=(1., 1., 1.), brain_mask=np.copy(bet_numpy), cval=cval)
properties_dict['size_before_cropping'] = images["T1"].shape
mask = np.copy(images['bet_mask'])
for k in images.keys():
images[k], bbox = extract_brain_region(images[k], mask, False)
properties_dict['brain_bbox'] = bbox
if (label_conversion_fn is not None) and ("seg" in images.keys()):
images["seg"] = label_conversion_fn(images["seg"])
use_these = ['T1', 'T1KM', 'T2', 'FLAIR', "t1km_sub", 'seg']
if (not encode_bet_mask_in_seg) or ("seg" not in images.keys()):
use_these.append("bet_mask")
else:
images["seg"][images["bet_mask"] <= 0] = -1
imgs = []
for seq in use_these:
if seq not in images.keys():
imgs.append(np.zeros(images["T1"].shape)[None])
else:
imgs.append(images[seq][None])
all_data = np.vstack(imgs)
return all_data, properties_dict
def segment(t1_file, t1ce_file, t2_file, flair_file, netLoc):
"""
Segments the passed files
"""
trainer = NetworkTrainerBraTS2018Baseline2RegionsCotrainingBraTSDecSDCE()
trainer.initialize(False)
all_data, dct = load_and_preprocess(t1_file, t1ce_file, t2_file, flair_file, None, None,
True, None)
all_softmax = []
for fold in range(5):
trainer.output_folder = join(netLoc, "%d" % fold)
trainer.load_best_checkpoint(False)
trainer.network.infer(True)
trainer.network.test_return_output = 0
softmax = trainer.predict_preprocessed_data_return_softmax(all_data[:4], True, 1, False, 1, (2, 3, 4), False,
None, None, trainer.patch_size, True)
all_softmax.append(softmax[None])
softmax_consolidated = np.vstack(all_softmax).mean(0)
output = save_segmentation_nifti_softmax(softmax_consolidated, dct,
"tumor_isen2018_class.nii.gz", 1,
trainer.regions_class_order)
return output
| 43.807847
| 206
| 0.610288
| 5,743
| 43,545
| 4.347205
| 0.101515
| 0.018265
| 0.018625
| 0.013619
| 0.409998
| 0.32348
| 0.275375
| 0.2503
| 0.21353
| 0.192101
| 0
| 0.016607
| 0.286439
| 43,545
| 993
| 207
| 43.851964
| 0.786882
| 0.057021
| 0
| 0.247043
| 0
| 0
| 0.033667
| 0.002746
| 0
| 0
| 0
| 0
| 0.007884
| 1
| 0.055191
| false
| 0.003942
| 0.015769
| 0.002628
| 0.114323
| 0.001314
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22b364d4334f94cc1d058ea248dee07fc3c34b86
| 982
|
py
|
Python
|
plot/finderror.py
|
architsakhadeo/Offline-Hyperparameter-Tuning-for-RL
|
94b8f205b12f0cc59ae8e19b2e6099f34be929d6
|
[
"MIT"
] | null | null | null |
plot/finderror.py
|
architsakhadeo/Offline-Hyperparameter-Tuning-for-RL
|
94b8f205b12f0cc59ae8e19b2e6099f34be929d6
|
[
"MIT"
] | null | null | null |
plot/finderror.py
|
architsakhadeo/Offline-Hyperparameter-Tuning-for-RL
|
94b8f205b12f0cc59ae8e19b2e6099f34be929d6
|
[
"MIT"
] | null | null | null |
import os
basepath = '/home/archit/scratch/cartpoles/data/hyperparam/cartpole/offline_learning/esarsa-adam/'
dirs = os.listdir(basepath)
string = ''''''
for dir in dirs:
print(dir)
subbasepath = basepath + dir + '/'
subdirs = os.listdir(subbasepath)
for subdir in subdirs:
print(subdir)
subsubbasepath = subbasepath + subdir + '/'
subsubdirs = os.listdir(subsubbasepath)
string += subsubbasepath + '\n'
content = []
for i in range(0,len(subsubdirs)-1):
for j in range(i+1, len(subsubdirs)):
a = os.system('diff ' + subsubbasepath + subsubdirs[i] + '/log_json.txt ' + subsubbasepath + subsubdirs[j] + '/log_json.txt')
content.append([a, subsubdirs[i], subsubdirs[j]])
filteredcontent = [i for i in content if i[0] == 0]
for i in range(len(filteredcontent)):
string += ' and '.join(filteredcontent[i][1:])
if i != len(filteredcontent) - 1:
string += ', '
string += '\n\n'
f = open('offlinelearningerrors.txt','w')
f.write(string)
f.close()
| 33.862069
| 129
| 0.669043
| 129
| 982
| 5.069767
| 0.395349
| 0.041284
| 0.027523
| 0.033639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008516
| 0.162933
| 982
| 28
| 130
| 35.071429
| 0.787105
| 0
| 0
| 0
| 0
| 0.037037
| 0.160896
| 0.112016
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037037
| 0
| 0.037037
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22b4a1c6f88314760073b0d207d79b3e4653b1cf
| 4,848
|
py
|
Python
|
src/pybacked/zip_handler.py
|
bluePlatinum/pyback
|
1c12a52974232b0482981c12a9af27e52dd2190e
|
[
"MIT"
] | null | null | null |
src/pybacked/zip_handler.py
|
bluePlatinum/pyback
|
1c12a52974232b0482981c12a9af27e52dd2190e
|
[
"MIT"
] | null | null | null |
src/pybacked/zip_handler.py
|
bluePlatinum/pyback
|
1c12a52974232b0482981c12a9af27e52dd2190e
|
[
"MIT"
] | null | null | null |
import os
import shutil
import tempfile
import zipfile
def archive_write(archivepath, data, filename, compression, compressionlevel):
"""
Create a file named filename in the archive and write data to it
:param archivepath: The path to the zip-archive
:type archivepath: str
:param data: The data to be written to the file
:type data: str
:param filename: The filename for the newly created file
:type filename: str
:param compression: The desired compression for the zip-archive
:type compression: int
:param compressionlevel: The desired compression level for the zip-archive
:type compressionlevel: int
:return: void
"""
archive = zipfile.ZipFile(archivepath, mode='a',
compression=compression,
compresslevel=compressionlevel)
archive.writestr(filename, data)
archive.close()
def create_archive(archivepath, filedict, compression, compressionlevel):
"""
Write filedict to zip-archive data subdirectory. Will check wether archive
at archivepath exists before writing. If file exists will raise a
FileExistsError.
:param archivepath: the path to the file
:param filedict: dictionary containing the filepath, filename key-value
pairs
:param compression: desired compression methods (see zipfile documentation)
:param compressionlevel: compression level (see zipfile documentation)
:return: void
"""
if os.path.isfile(archivepath):
raise FileExistsError("Specified file already exists")
else:
archive = zipfile.ZipFile(archivepath, mode='x',
compression=compression,
compresslevel=compressionlevel)
for filepath, filename in filedict.items():
archive.write(filepath, arcname="data/" + filename)
archive.close()
def extract_archdata(archivepath, filename, destination):
"""
Extract a file from a archive and write it to the destination. If the
destination path already exists extract_archdata will not overwrite but
will throw a "FileExists" error.
:param archivepath: The path to the archive containing the file
:type archivepath: str
:param filename: The archive name of the desired file.
:type filename: str
:param destination: The path at which the extracted file is to be placed.
:type destination: str
:return: void
:rtype: None
"""
# check if destination path already exists
if os.path.exists(destination):
raise FileExistsError("The specified destination is already in use")
archive = zipfile.ZipFile(archivepath, mode='r')
with tempfile.TemporaryDirectory() as tmpdir:
archive.extract(filename, path=tmpdir)
# create directories for the destination
os.makedirs(os.path.dirname(destination), exist_ok=True)
shutil.copy(os.path.abspath(tmpdir + "/" + filename), destination)
def read_bin(archivepath, filelist):
"""
Read a list of files from an archive and return the file data as a
dictionary of filename, data key-value pairs.
:param archivepath: the path to the archive
:param filelist: list of filenames to read
:return: dictionary with filename, data key-value pairs
:rtype: dict
"""
datadict = dict()
if os.path.isfile(archivepath):
archive = zipfile.ZipFile(archivepath, mode='r')
else:
raise FileNotFoundError("Specified file does not exist")
for filename in filelist:
try:
file = archive.open(filename)
datadict[filename] = file.read().decode()
file.close()
except KeyError:
datadict[filename] = None
archive.close()
return datadict
def read_diff_log(archivepath):
"""
Read the diff-log.csv from a given archive file.
:param archivepath: The path to the zip-archive
:type archivepath: str
:return: The diff-log.csv contents in ascii string form.
:rtype: str
"""
arch = zipfile.ZipFile(archivepath, mode='r')
diff_log_file = arch.open("diff-log.csv")
diff_log_bin = diff_log_file.read()
diff_log = diff_log_bin.decode()
diff_log_file.close()
arch.close()
return diff_log
def zip_extract(archivepath, filelist, extractpath):
"""
Extract a list of files to a specific location
:param archivepath: the path to the zip-archive
:param filelist: list of member filenames to extract
:param extractpath: path for the extracted files
:return: void
"""
if os.path.isfile(archivepath):
archive = zipfile.ZipFile(archivepath, mode='r')
else:
raise FileNotFoundError("Specified file does not exist")
archive.extractall(path=extractpath, members=filelist)
archive.close()
| 33.902098
| 79
| 0.679868
| 584
| 4,848
| 5.607877
| 0.226027
| 0.023511
| 0.034809
| 0.042137
| 0.254656
| 0.169771
| 0.149924
| 0.114809
| 0.103206
| 0.103206
| 0
| 0
| 0.241543
| 4,848
| 142
| 80
| 34.140845
| 0.890672
| 0.442038
| 0
| 0.327586
| 0
| 0
| 0.062986
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.068966
| 0
| 0.206897
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22b65cc97460c0c9287ab847203def7abf74c5bd
| 1,642
|
py
|
Python
|
kinto/__main__.py
|
s-utsch/kinto
|
5e368849a8ab652a6e1923f44febcf89afd2c78b
|
[
"Apache-2.0"
] | null | null | null |
kinto/__main__.py
|
s-utsch/kinto
|
5e368849a8ab652a6e1923f44febcf89afd2c78b
|
[
"Apache-2.0"
] | null | null | null |
kinto/__main__.py
|
s-utsch/kinto
|
5e368849a8ab652a6e1923f44febcf89afd2c78b
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import sys
from cliquet.scripts import cliquet
from pyramid.scripts import pserve
from pyramid.paster import bootstrap
def main(args=None):
"""The main routine."""
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(description="Kinto commands")
subparsers = parser.add_subparsers(title='subcommands',
description='valid subcommands',
help='init/start/migrate')
parser_init = subparsers.add_parser('init')
parser_init.set_defaults(which='init')
parser_init.add_argument('--config_file', required=False,
help='Config file may be passed as argument')
parser_migrate = subparsers.add_parser('migrate')
parser_migrate.set_defaults(which='migrate')
parser_start = subparsers.add_parser('start')
parser_start.set_defaults(which='start')
args = vars(parser.parse_args())
if args['which'] == 'init':
if(args['config_file'] is None):
env = bootstrap('config/kinto.ini')
else:
config_file = format(args['config_file'])
env = bootstrap(config_file)
elif args['which'] == 'migrate':
env = bootstrap('config/kinto.ini')
cliquet.init_schema(env)
elif args['which'] == 'start':
pserve_argv = ['pserve', 'config/kinto.ini', '--reload']
pserve.main(pserve_argv)
if __name__ == "__main__":
main()
| 34.93617
| 78
| 0.563337
| 168
| 1,642
| 5.315476
| 0.333333
| 0.067189
| 0.06383
| 0.051512
| 0.058231
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0009
| 0.323386
| 1,642
| 46
| 79
| 35.695652
| 0.80288
| 0.010353
| 0
| 0.057143
| 0
| 0
| 0.163681
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0.028571
| 0.142857
| 0
| 0.171429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22b7e88858264b834f72f09e2cb52dba1a8d0aee
| 3,423
|
py
|
Python
|
tests/unit/media/test_synthesis.py
|
AnantTiwari-Naman/pyglet
|
4774f2889057da95a78785a69372112931e6a620
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/media/test_synthesis.py
|
AnantTiwari-Naman/pyglet
|
4774f2889057da95a78785a69372112931e6a620
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/media/test_synthesis.py
|
AnantTiwari-Naman/pyglet
|
4774f2889057da95a78785a69372112931e6a620
|
[
"BSD-3-Clause"
] | 1
|
2021-09-16T20:47:07.000Z
|
2021-09-16T20:47:07.000Z
|
from ctypes import sizeof
from io import BytesIO
import unittest
from pyglet.media.synthesis import *
local_dir = os.path.dirname(__file__)
test_data_path = os.path.abspath(os.path.join(local_dir, '..', '..', 'data'))
del local_dir
def get_test_data_file(*file_parts):
"""Get a file from the test data directory in an OS independent way.
Supply relative file name as you would in os.path.join().
"""
return os.path.join(test_data_path, *file_parts)
class SynthesisSourceTest:
"""Simple test to check if synthesized sources provide data."""
source_class = None
def test_default(self):
source = self.source_class(1.)
self._test_total_duration(source)
if self.source_class is not WhiteNoise:
self._test_generated_bytes(source)
def test_sample_rate_11025(self):
source = self.source_class(1., sample_rate=11025)
self._test_total_duration(source)
if self.source_class is not WhiteNoise:
self._test_generated_bytes(source, sample_rate=11025)
def _test_total_duration(self, source):
total_bytes = source.audio_format.bytes_per_second
self._check_audio_data(source, total_bytes, 1.)
def _check_audio_data(self, source, expected_bytes, expected_duration):
data = source.get_audio_data(expected_bytes + 100)
self.assertIsNotNone(data)
self.assertAlmostEqual(expected_bytes, data.length, delta=20)
self.assertAlmostEqual(expected_duration, data.duration)
self.assertIsNotNone(data.data)
self.assertAlmostEqual(expected_bytes, len(data.data), delta=20)
# Should now be out of data
last_data = source.get_audio_data(100)
self.assertIsNone(last_data)
def test_seek_default(self):
source = self.source_class(1.)
self._test_seek(source)
def _test_seek(self, source):
seek_time = .5
bytes_left = source.audio_format.bytes_per_second * .5
source.seek(seek_time)
self._check_audio_data(source, bytes_left, .5)
def _test_generated_bytes(self, source, sample_rate=44800, sample_size=16):
source_name = self.source_class.__name__.lower()
filename = "synthesis_{0}_{1}_{2}_1ch.wav".format(source_name, sample_size, sample_rate)
with open(get_test_data_file('media', filename), 'rb') as f:
# discard the wave header:
loaded_bytes = f.read()[44:]
source.seek(0)
generated_data = source.get_audio_data(source._max_offset)
bytes_buffer = BytesIO(generated_data.data).getvalue()
# Compare a small chunk, to avoid hanging on mismatch:
assert bytes_buffer[:1000] == loaded_bytes[:1000],\
"Generated bytes do not match sample wave file."
class SilenceTest(SynthesisSourceTest, unittest.TestCase):
source_class = Silence
class WhiteNoiseTest(SynthesisSourceTest, unittest.TestCase):
source_class = WhiteNoise
class SineTest(SynthesisSourceTest, unittest.TestCase):
source_class = Sine
class TriangleTest(SynthesisSourceTest, unittest.TestCase):
source_class = Triangle
class SawtoothTest(SynthesisSourceTest, unittest.TestCase):
source_class = Sawtooth
class SquareTest(SynthesisSourceTest, unittest.TestCase):
source_class = Square
class FMTest(SynthesisSourceTest, unittest.TestCase):
source_class = SimpleFM
| 32.6
| 96
| 0.706106
| 432
| 3,423
| 5.31713
| 0.30787
| 0.067044
| 0.106661
| 0.124946
| 0.369177
| 0.146278
| 0.107967
| 0.107967
| 0.107967
| 0.075751
| 0
| 0.019802
| 0.20333
| 3,423
| 104
| 97
| 32.913462
| 0.822516
| 0.083845
| 0
| 0.092308
| 0
| 0
| 0.028902
| 0.009313
| 0
| 0
| 0
| 0
| 0.107692
| 1
| 0.123077
| false
| 0
| 0.061538
| 0
| 0.446154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22b916a799056741ecb2a3c045e0fdb664033699
| 11,424
|
py
|
Python
|
Algorithm.Python/Alphas/GreenblattMagicFormulaAlgorithm.py
|
aaronwJordan/Lean
|
3486a6de56a739e44af274f421ac302cbbc98f8d
|
[
"Apache-2.0"
] | null | null | null |
Algorithm.Python/Alphas/GreenblattMagicFormulaAlgorithm.py
|
aaronwJordan/Lean
|
3486a6de56a739e44af274f421ac302cbbc98f8d
|
[
"Apache-2.0"
] | null | null | null |
Algorithm.Python/Alphas/GreenblattMagicFormulaAlgorithm.py
|
aaronwJordan/Lean
|
3486a6de56a739e44af274f421ac302cbbc98f8d
|
[
"Apache-2.0"
] | null | null | null |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Indicators")
AddReference("QuantConnect.Algorithm.Framework")
from System import *
from QuantConnect import *
from QuantConnect.Orders.Fees import ConstantFeeModel
from QuantConnect.Data.UniverseSelection import *
from QuantConnect.Indicators import *
from Selection.FundamentalUniverseSelectionModel import FundamentalUniverseSelectionModel
from datetime import timedelta, datetime
from math import ceil
from itertools import chain
#
# This alpha picks stocks according to Joel Greenblatt's Magic Formula.
# First, each stock is ranked depending on the relative value of the ratio EV/EBITDA. For example, a stock
# that has the lowest EV/EBITDA ratio in the security universe receives a score of one while a stock that has
# the tenth lowest EV/EBITDA score would be assigned 10 points.
#
# Then, each stock is ranked and given a score for the second valuation ratio, Return on Capital (ROC).
# Similarly, a stock that has the highest ROC value in the universe gets one score point.
# The stocks that receive the lowest combined score are chosen for insights.
#
# Source: Greenblatt, J. (2010) The Little Book That Beats the Market
#
# This alpha is part of the Benchmark Alpha Series created by QuantConnect which are open
# sourced so the community and client funds can see an example of an alpha.
#
class GreenblattMagicFormulaAlgorithm(QCAlgorithmFramework):
''' Alpha Streams: Benchmark Alpha: Pick stocks according to Joel Greenblatt's Magic Formula'''
def Initialize(self):
self.SetStartDate(2018, 1, 1)
self.SetCash(100000)
#Set zero transaction fees
self.SetSecurityInitializer(lambda security: security.SetFeeModel(ConstantFeeModel(0)))
# select stocks using MagicFormulaUniverseSelectionModel
self.SetUniverseSelection(GreenBlattMagicFormulaUniverseSelectionModel())
# Use MagicFormulaAlphaModel to establish insights
self.SetAlpha(RateOfChangeAlphaModel())
# Equally weigh securities in portfolio, based on insights
self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel())
## Set Immediate Execution Model
self.SetExecution(ImmediateExecutionModel())
## Set Null Risk Management Model
self.SetRiskManagement(NullRiskManagementModel())
class RateOfChangeAlphaModel(AlphaModel):
'''Uses Rate of Change (ROC) to create magnitude prediction for insights.'''
def __init__(self, *args, **kwargs):
self.lookback = kwargs['lookback'] if 'lookback' in kwargs else 1
self.resolution = kwargs['resolution'] if 'resolution' in kwargs else Resolution.Daily
self.predictionInterval = Time.Multiply(Extensions.ToTimeSpan(self.resolution), self.lookback)
self.symbolDataBySymbol = {}
def Update(self, algorithm, data):
insights = []
for symbol, symbolData in self.symbolDataBySymbol.items():
if symbolData.CanEmit:
insights.append(Insight.Price(symbol, self.predictionInterval, InsightDirection.Up, symbolData.Return, None))
return insights
def OnSecuritiesChanged(self, algorithm, changes):
# clean up data for removed securities
for removed in changes.RemovedSecurities:
symbolData = self.symbolDataBySymbol.pop(removed.Symbol, None)
if symbolData is not None:
symbolData.RemoveConsolidators(algorithm)
# initialize data for added securities
symbols = [ x.Symbol for x in changes.AddedSecurities ]
history = algorithm.History(symbols, self.lookback, self.resolution)
if history.empty: return
tickers = history.index.levels[0]
for ticker in tickers:
symbol = SymbolCache.GetSymbol(ticker)
if symbol not in self.symbolDataBySymbol:
symbolData = SymbolData(symbol, self.lookback)
self.symbolDataBySymbol[symbol] = symbolData
symbolData.RegisterIndicators(algorithm, self.resolution)
symbolData.WarmUpIndicators(history.loc[ticker])
class SymbolData:
'''Contains data specific to a symbol required by this model'''
def __init__(self, symbol, lookback):
self.Symbol = symbol
self.ROC = RateOfChange('{}.ROC({})'.format(symbol, lookback), lookback)
self.Consolidator = None
self.previous = 0
def RegisterIndicators(self, algorithm, resolution):
self.Consolidator = algorithm.ResolveConsolidator(self.Symbol, resolution)
algorithm.RegisterIndicator(self.Symbol, self.ROC, self.Consolidator)
def RemoveConsolidators(self, algorithm):
if self.Consolidator is not None:
algorithm.SubscriptionManager.RemoveConsolidator(self.Symbol, self.Consolidator)
def WarmUpIndicators(self, history):
for tuple in history.itertuples():
self.ROC.Update(tuple.Index, tuple.close)
@property
def Return(self):
return float(self.ROC.Current.Value)
@property
def CanEmit(self):
if self.previous == self.ROC.Samples:
return False
self.previous = self.ROC.Samples
return self.ROC.IsReady
def __str__(self, **kwargs):
return '{}: {:.2%}'.format(self.ROC.Name, (1 + self.Return)**252 - 1)
class GreenBlattMagicFormulaUniverseSelectionModel(FundamentalUniverseSelectionModel):
'''Defines a universe according to Joel Greenblatt's Magic Formula, as a universe selection model for the framework algorithm.
From the universe QC500, stocks are ranked using the valuation ratios, Enterprise Value to EBITDA (EV/EBITDA) and Return on Assets (ROA).
'''
def __init__(self,
filterFineData = True,
universeSettings = None,
securityInitializer = None):
'''Initializes a new default instance of the MagicFormulaUniverseSelectionModel'''
super().__init__(filterFineData, universeSettings, securityInitializer)
# Number of stocks in Coarse Universe
self.NumberOfSymbolsCoarse = 500
# Number of sorted stocks in the fine selection subset using the valuation ratio, EV to EBITDA (EV/EBITDA)
self.NumberOfSymbolsFine = 20
# Final number of stocks in security list, after sorted by the valuation ratio, Return on Assets (ROA)
self.NumberOfSymbolsInPortfolio = 10
self.lastMonth = -1
self.dollarVolumeBySymbol = {}
self.symbols = []
def SelectCoarse(self, algorithm, coarse):
'''Performs coarse selection for constituents.
The stocks must have fundamental data
The stock must have positive previous-day close price
The stock must have positive volume on the previous trading day'''
month = algorithm.Time.month
if month == self.lastMonth:
return self.symbols
self.lastMonth = month
# The stocks must have fundamental data
# The stock must have positive previous-day close price
# The stock must have positive volume on the previous trading day
filtered = [x for x in coarse if x.HasFundamentalData
and x.Volume > 0
and x.Price > 0]
# sort the stocks by dollar volume and take the top 1000
top = sorted(filtered, key=lambda x: x.DollarVolume, reverse=True)[:self.NumberOfSymbolsCoarse]
self.dollarVolumeBySymbol = { i.Symbol: i.DollarVolume for i in top }
self.symbols = list(self.dollarVolumeBySymbol.keys())
return self.symbols
def SelectFine(self, algorithm, fine):
'''QC500: Performs fine selection for the coarse selection constituents
The company's headquarter must in the U.S.
The stock must be traded on either the NYSE or NASDAQ
At least half a year since its initial public offering
The stock's market cap must be greater than 500 million
Magic Formula: Rank stocks by Enterprise Value to EBITDA (EV/EBITDA)
Rank subset of previously ranked stocks (EV/EBITDA), using the valuation ratio Return on Assets (ROA)'''
# QC500:
## The company's headquarter must in the U.S.
## The stock must be traded on either the NYSE or NASDAQ
## At least half a year since its initial public offering
## The stock's market cap must be greater than 500 million
filteredFine = [x for x in fine if x.CompanyReference.CountryId == "USA"
and (x.CompanyReference.PrimaryExchangeID == "NYS" or x.CompanyReference.PrimaryExchangeID == "NAS")
and (algorithm.Time - x.SecurityReference.IPODate).days > 180
and x.EarningReports.BasicAverageShares.ThreeMonths * x.EarningReports.BasicEPS.TwelveMonths * x.ValuationRatios.PERatio > 5e8]
count = len(filteredFine)
if count == 0: return []
myDict = dict()
percent = float(self.NumberOfSymbolsFine / count)
# select stocks with top dollar volume in every single sector
for key in ["N", "M", "U", "T", "B", "I"]:
value = [x for x in filteredFine if x.CompanyReference.IndustryTemplateCode == key]
value = sorted(value, key=lambda x: self.dollarVolumeBySymbol[x.Symbol], reverse = True)
myDict[key] = value[:ceil(len(value) * percent)]
# stocks in QC500 universe
topFine = list(chain.from_iterable(myDict.values()))[:self.NumberOfSymbolsCoarse]
# Magic Formula:
## Rank stocks by Enterprise Value to EBITDA (EV/EBITDA)
## Rank subset of previously ranked stocks (EV/EBITDA), using the valuation ratio Return on Assets (ROA)
# sort stocks in the security universe of QC500 based on Enterprise Value to EBITDA valuation ratio
sortedByEVToEBITDA = sorted(topFine, key=lambda x: x.ValuationRatios.EVToEBITDA , reverse=True)
# sort subset of stocks that have been sorted by Enterprise Value to EBITDA, based on the valuation ratio Return on Assets (ROA)
sortedByROA = sorted(sortedByEVToEBITDA[:self.NumberOfSymbolsFine], key=lambda x: x.ValuationRatios.ForwardROA, reverse=False)
# retrieve list of securites in portfolio
top = sortedByROA[:self.NumberOfSymbolsInPortfolio]
self.symbols = [f.Symbol for f in top]
return self.symbols
| 44.8
| 167
| 0.68785
| 1,307
| 11,424
| 5.996174
| 0.306809
| 0.009187
| 0.009187
| 0.014036
| 0.163073
| 0.147123
| 0.135001
| 0.121475
| 0.110246
| 0.110246
| 0
| 0.0091
| 0.240109
| 11,424
| 254
| 168
| 44.976378
| 0.893676
| 0.370011
| 0
| 0.040984
| 0
| 0
| 0.024721
| 0.011003
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114754
| false
| 0
| 0.081967
| 0.016393
| 0.295082
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22be5667afd253d36e99d23282612d6ddbb78c15
| 2,132
|
py
|
Python
|
src/archive/greatcircle.py
|
AuraUAS/aura-core
|
4711521074db72ba9089213e14455d89dc5306c0
|
[
"MIT",
"BSD-2-Clause-FreeBSD"
] | 8
|
2016-08-03T19:35:03.000Z
|
2019-12-15T06:25:05.000Z
|
src/archive/greatcircle.py
|
jarilq/aura-core
|
7880ed265396bf8c89b783835853328e6d7d1589
|
[
"MIT",
"BSD-2-Clause-FreeBSD"
] | 4
|
2018-09-27T15:48:56.000Z
|
2018-11-05T12:38:10.000Z
|
src/archive/greatcircle.py
|
jarilq/aura-core
|
7880ed265396bf8c89b783835853328e6d7d1589
|
[
"MIT",
"BSD-2-Clause-FreeBSD"
] | 5
|
2017-06-28T19:15:36.000Z
|
2020-02-19T19:31:24.000Z
|
# From: http://williams.best.vwh.net/avform.htm#GCF
import math
EPS = 0.0001
d2r = math.pi / 180.0
r2d = 180.0 / math.pi
rad2nm = (180.0 * 60.0) / math.pi
nm2rad = 1.0 / rad2nm
nm2meter = 1852
meter2nm = 1.0 / nm2meter
# p1 = (lat1(deg), lon1(deg))
# p2 = (lat2(deg), lon2(deg))
def course_and_dist(p1, p2):
# this formulations uses postive lon = W (opposite of usual, so we
# invert the longitude.)
lat1 = p1[0] * d2r
lon1 = -p1[1] * d2r
lat2 = p2[0] * d2r
lon2 = -p2[1] * d2r
dist_rad = 2.0 * math.asin(math.sqrt((math.sin((lat1-lat2)/2.0))**2.0 + math.cos(lat1)*math.cos(lat2)*(math.sin((lon1-lon2)/2.0))**2))
# if starting point is on a pole
if math.cos(lat1) < EPS:
# EPS a small number ~ machine precision
if (lat1 > 0.0):
# starting from N pole
tc1_rad = math.pi
else:
# starting from S pole
tc1_rad = 2.0 * math.pi
# For starting points other than the poles:
if dist_rad < 0.000000001:
# about a cm
tc1_rad = 0.0
else:
num1 = math.sin(lat2) - math.sin(lat1)*math.cos(dist_rad)
den1 = math.sin(dist_rad) * math.cos(lat1)
tmp1 = num1 / den1
if tmp1 < -1.0:
#print "CLIPPING TMP1 to -1.0!"
tmp1 = -1.0
if tmp1 > 1.0:
#print "CLIPPING TMP1 to 1.0!"
tmp1 = 1.0
if math.sin(lon2-lon1) < 0.0:
tc1_rad = math.acos(tmp1)
else:
tc1_rad = 2.0 * math.pi - math.acos(tmp1)
dist_nm = dist_rad * rad2nm
dist_m = dist_nm * nm2meter
tc1_deg = tc1_rad * r2d
return (tc1_deg, dist_m)
def project_course_distance(p1, course_deg, dist_m):
lat1 = p1[0] * d2r
lon1 = -p1[1] * d2r
tc = course_deg * d2r
d = dist_m * meter2nm * nm2rad
lat = math.asin(math.sin(lat1)*math.cos(d)+math.cos(lat1)*math.sin(d)*math.cos(tc))
if math.cos(lat) < EPS:
lon = lon1 # endpoint a pole
else:
lon = math.fmod(lon1-math.asin(math.sin(tc)*math.sin(d)/math.cos(lat))+math.pi, 2*math.pi) - math.pi
return (lat*r2d, -lon*r2d)
| 29.611111
| 138
| 0.560976
| 349
| 2,132
| 3.355301
| 0.275072
| 0.059778
| 0.023911
| 0.023057
| 0.175918
| 0.119556
| 0.095645
| 0.095645
| 0.061486
| 0.061486
| 0
| 0.104042
| 0.292214
| 2,132
| 71
| 139
| 30.028169
| 0.671968
| 0.202627
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.020833
| 0
| 0.104167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22be826c96db32727162b13681b36634865339c6
| 1,195
|
py
|
Python
|
app/__init__.py
|
JoeCare/flask_geolocation_api
|
ad9ea0d22b738a7af8421cc57c972bd0e0fa80da
|
[
"Apache-2.0"
] | null | null | null |
app/__init__.py
|
JoeCare/flask_geolocation_api
|
ad9ea0d22b738a7af8421cc57c972bd0e0fa80da
|
[
"Apache-2.0"
] | 2
|
2021-03-14T03:55:49.000Z
|
2021-03-14T04:01:32.000Z
|
app/__init__.py
|
JoeCare/flask_geolocation_api
|
ad9ea0d22b738a7af8421cc57c972bd0e0fa80da
|
[
"Apache-2.0"
] | null | null | null |
import connexion, os
from connexion.resolver import RestyResolver
from flask import json
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
# Globally accessible libraries
db = SQLAlchemy()
mm = Marshmallow()
def init_app():
"""Initialize the Connexion application."""
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
openapi_path = os.path.join(BASE_DIR, "../")
conn_app = connexion.FlaskApp(
__name__, specification_dir=openapi_path, options={
"swagger_ui": True,
"serve_spec": True
}
)
conn_app.add_api("openapi.yaml", resolver=RestyResolver('run'),
strict_validation=True)
# Flask app and getting into app_context
app = conn_app.app
# Load application config
app.config.from_object('config.ProdConfig')
app.json_encoder = json.JSONEncoder
# Initialize Plugins
db.init_app(app)
mm.init_app(app)
with app.app_context():
# Include our Routes/views
import run
# Register Blueprints
# app.register_blueprint(auth.auth_bp)
# app.register_blueprint(admin.admin_bp)
return app
| 26.555556
| 67
| 0.672803
| 141
| 1,195
| 5.468085
| 0.48227
| 0.031128
| 0.02594
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.23682
| 1,195
| 44
| 68
| 27.159091
| 0.845395
| 0.226778
| 0
| 0
| 0
| 0
| 0.060373
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.230769
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22c02d3ee15e860f429769f7b7700c393718fcdc
| 29,893
|
py
|
Python
|
RIPv2-Simulation/Router.py
|
vkmanojk/Networks-VirtualLAN
|
52c6546da611a7a7b9fdea65c567b284664a99b4
|
[
"MIT"
] | null | null | null |
RIPv2-Simulation/Router.py
|
vkmanojk/Networks-VirtualLAN
|
52c6546da611a7a7b9fdea65c567b284664a99b4
|
[
"MIT"
] | null | null | null |
RIPv2-Simulation/Router.py
|
vkmanojk/Networks-VirtualLAN
|
52c6546da611a7a7b9fdea65c567b284664a99b4
|
[
"MIT"
] | null | null | null |
'''
Summary: Program that implements a routing deamon based on the
RIP version 2 protocol from RFC2453.
Usage: python3 Router.py <router_config_file>
Configuration File:
The user supplies a router configuration file of the format:
[Settings]
router-id = <router_number>
input-ports = <input> [, <input>, ...]
outputs = <output>-<metric>-<destination_router>
[, <output>-<metric>-<destination_router>, ...]
where,
router_number: ID of router between 1 - 64000.
input: port number between 1024 - 64000.
output: port number between 1024 - 6400,
not equal to any inputs.
metric: metric of output between 1 - 16.
destination_router: ID of destination router.
Description:
This program implements a basic RIPv2 routing protocol from RFC2453
for routing computations in computer networks. It takes a configuration
file as shown above and sets up a router with a new socket for each
input-port.
The RIPv2 protocol uses a routing table to keep track of all reachable
routers on the network along with their metric/cost and the direct
next hop router ID along the route to that destination router. However,
it can only send messages to the direct neighbours specified in outputs.
The protocol uses the Bellman-Ford distance vector algorithm to compute
the lowest cost route to each router in the network. If the metric is
16 or greater, the router is considered unreachable.
The routing table initially starts with a single route entry (RTE) for
itself with a metric of zero. The routing table is periodically
transmitted too each of its direct output ports via an unsolicited
response message as defined in RFC2453 section 3.9.2 and 4. This is
performed on a separate thread so it does not interfere with other
operations
The receives messages from other routers by using the python select()
function which blocks until a message is ready to be read. Once a
message is received the header and contents are validated.
If the message is valid each RTE is processed according to RFC2453
section 3.9.2.
If a new router is found the RTE is added
to the routing table, adding the cost to the metric for the output
the message was received on.
If the RTE already exists, but the metric is smaller, the metric
is updated to the lower metric.
If the lower metric is from a different next hop router, change the
next hop.
If nothing has changed, restart the timeout timer.
If RTE metric >= max metric of 16, mark the entry for
garbage collection and update the metric in the table.
If any change has occurred in the routing table as a result of a
received message, a triggered update (RFC2453 section 3.10.1) is sent
to all outputs with the updated entries. Triggered updates are sent with
a random delay between 1 - 5 seconds to prevent synchronized updates.
Request messages are not implemented in this program.
Timers (all timers are on separate threads) (RFC2453 section 3.8):
Update timer - Periodic unsolicited response message sent to all
outputs. The period is adjusted each time to a random value
between 0.8 * BASE_TIMER and 1.2 * BASE_TIMER to prevent
synchronized updates.
Timeout - used to check the routing table for RTEs which have
have not been updated within the ROUTE_TIMEOUT interval. If
a router has not been heard from within this time, then set the
metric to the max metric of 16 and start the garbage collection
timer.
Garbage timer - used to check the routing table for RTEs set
for garbage collection. If the timeout >= DELETE_TIMEOUT,
mark the RTE for deletion.
Garbage Collection - used to check the routing table for RTEs
marked for deletion, and removes those entries from the table.
'''
import configparser
import select
import socket
import sys
import time
import threading
import struct
import datetime
from random import randint, randrange
DEBUG = False
HOST = '127.0.0.1' # localhost
BASE_TIMER = 5
MAX_METRIC = 16
ROUTE_TIMEOUT = BASE_TIMER * 6
DELETE_TIMEOUT = BASE_TIMER * 4
AF_INET = 2
# ===========================================================================
# TRANSITIONS
class Transistion():
'''Class Representing a transition between states.'''
def __init__(self, to_state):
self.to_state = to_state
def execute(self):
'''Run the transition functions'''
pass
# ===========================================================================
# STATES
class State():
'''Class Representing a generic state'''
def __init__(self, fsm):
self.fsm = fsm
def enter(self):
'''Execute functions for entering a state'''
pass
def execute(self):
'''Execute functions while in state'''
pass
def exit(self):
'''Execute functions for leaving a state'''
pass
class StartUp(State):
'''Class Representing the Start up state which reads the configuration file
'''
def __init__(self, fsm):
super(StartUp, self).__init__(fsm)
def execute(self):
'''Execute the configuration functions'''
print_message("Loading Configuration File: '"
+ self.fsm.router.config_file + "'")
config = configparser.ConfigParser()
config.read(self.fsm.router.config_file)
self.get_router_id(config)
self.setup_inputs(config)
self.get_outputs(config)
self.setup_routing_table()
self.fsm.router.print_routing_table()
self.fsm.to_transition("toWaiting")
def exit(self):
'''Print complete message'''
print_message("Router Setup Complete.")
def get_router_id(self, config):
'''Read the router id number from the configuration file'''
if 1 <= int(config['Settings']['router-id']) <= 64000:
self.fsm.router.router_settings['id'] = \
int(config['Settings']['router-id'])
else:
raise Exception('Invalid Router ID Number')
def get_outputs(self, config):
'''Return a dictionary of outputs containing port, cost and destination
router id from the Configuration file'''
outputs = config['Settings']['outputs'].split(', ')
outputs = [i.split('-') for i in outputs]
self.fsm.router.router_settings['outputs'] = {}
existing_ports = []
for output in outputs:
is_valid_port = 1024 <= int(output[0]) <= 64000 and not \
int(output[0]) in existing_ports
is_valid_cost = 1 <= int(output[1]) < 16
is_valid_id = 1 <= int(output[2]) <= 64000
if is_valid_port and is_valid_cost and is_valid_id:
existing_ports.append(int(output[0]))
self.fsm.router.router_settings['outputs'][int(output[2])] = \
{'metric': int(output[1]),
'port': int(output[0])}
else:
raise Exception('Invalid Outputs')
def setup_inputs(self, config):
'''Create input sockets from the inputs specified in the config file'''
# get inputs from configuration file
ports = config['Settings']['input-ports'].split(', ')
inputs = []
for port in ports:
if 1024 <= int(port) <= 64000 and not int(port) in inputs:
inputs.append(int(port))
else:
raise Exception('Invalid Port Number')
self.fsm.router.router_settings['inputs'] = {}
# create socket for each input port
for port in inputs:
try:
self.fsm.router.router_settings['inputs'][port] = \
socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print_message('Socket ' + str(port) + ' Created.')
except socket.error as msg:
print('Failed to create socket. Message: ' + str(msg))
sys.exit()
# bind port to socket
try:
self.fsm.router.router_settings['inputs'][port].bind(
(HOST, port))
print_message('Socket ' + str(port) + ' Bind Complete.')
except socket.error as msg:
print('Failed to create socket. Message ' + str(msg))
sys.exit()
def setup_routing_table(self):
'''Setup routing table with the outputs specified in the config file'''
self.fsm.router.routing_table[self.fsm.router.router_settings['id']] = \
RIPRouteEntry(address=self.fsm.router.router_settings['id'],
nexthop=0,
metric=0,
imported=True)
class Waiting(State):
'''
Class representing the waiting state of the FSM where the router waits
for messages to be received on its input sockets. When a message is
received the state changes to the ReadMeassage state.
'''
def __init__(self, fsm):
super(Waiting, self).__init__(fsm)
def enter(self):
'''Display State entry message'''
print_message("Entering idle state...")
def execute(self):
'''Waits for input sockets to be readable and then changes the state
to process the received message.'''
readable = select.select(
self.fsm.router.router_settings['inputs'].values(), [], [])
if readable[0]:
self.fsm.router.readable_ports = readable[0]
self.fsm.to_transition("toReadMessage")
def exit(self):
'''Display State exit message'''
print_message("Message Received")
class ReadMessage(State):
'''Class representing the state for reading messages received on the input
sockets'''
def __init__(self, fsm):
super(ReadMessage, self).__init__(fsm)
def enter(self):
print_message("Reading Messages...")
def execute(self):
for port in self.fsm.router.readable_ports:
packet = RIPPacket(port.recvfrom(1024)[0])
self.fsm.router.update_routing_table(packet)
if self.fsm.router.route_change:
self.fsm.router.trigger_update()
self.fsm.router.print_routing_table()
self.fsm.to_transition("toWaiting")
def exit(self):
print_message("Messages Read.")
# ===========================================================================
# FINITE STATE MACHINE
class RouterFSM():
'''Class representing the Router finite state machine'''
def __init__(self, rip_router):
self.router = rip_router
self.states = {}
self.transitions = {}
self.cur_state = None
self.trans = None
def add_transistion(self, trans_name, transition):
'''Add a new transition to the FSM'''
self.transitions[trans_name] = transition
def add_state(self, state_name, state):
'''Add a new state to the FSM'''
self.states[state_name] = state
def set_state(self, state_name):
'''Set the current state of the FSM'''
self.cur_state = self.states[state_name]
def to_transition(self, to_trans):
'''Set the current transition of the FSM'''
self.trans = self.transitions[to_trans]
def execute(self):
'''Run the FSM'''
if self.trans:
self.cur_state.exit()
self.trans.execute()
self.set_state(self.trans.to_state)
self.cur_state.enter()
self.trans = None
self.cur_state.execute()
# ===========================================================================
# IMPLEMENTATION
class RIPPacket:
'''Class representing a RIP packet containing a header and body as defined
in RFC2453 RIPv2 section 4.'''
def __init__(self, data=None, header=None, rtes=None):
if data:
self._init_from_network(data)
elif header and rtes:
self._init_from_host(header, rtes)
else:
raise ValueError
def __repr__(self):
return "RIPPacket: Command {}, Ver. {}, number of RTEs {}.". \
format(self.header.cmd, self.header.ver, len(self.rtes))
def _init_from_network(self, data):
'''Init for RIPPacket if data is from the network'''
# Packet Validation
datalen = len(data)
if datalen < RIPHeader.SIZE:
raise FormatException
malformed_rtes = (datalen - RIPHeader.SIZE) % RIPRouteEntry.SIZE
if malformed_rtes:
raise FormatException
# Convert bytes in packet to header and RTE data
num_rtes = int((datalen - RIPHeader.SIZE) / RIPRouteEntry.SIZE)
self.header = RIPHeader(data[0:RIPHeader.SIZE])
self.rtes = []
rte_start = RIPHeader.SIZE
rte_end = RIPHeader.SIZE + RIPRouteEntry.SIZE
# Loop over data packet to obtain each RTE
for i in range(num_rtes):
self.rtes.append(RIPRouteEntry(rawdata=data[rte_start:rte_end],
src_id=self.header.src))
rte_start += RIPRouteEntry.SIZE
rte_end += RIPRouteEntry.SIZE
def _init_from_host(self, header, rtes):
'''Init for imported data'''
if header.ver != 2:
raise ValueError("Only Version 2 is supported.")
self.header = header
self.rtes = rtes
def serialize(self):
'''Return the byte sting representing this packet for network
transmission'''
packed = self.header.serialize()
for rte in self.rtes:
packed += rte.serialize()
return packed
class RIPHeader:
'''Class representing the header of a RIP packet'''
FORMAT = "!BBH"
SIZE = struct.calcsize(FORMAT)
TYPE_RESPONSE = 2
VERSION = 2
def __init__(self, rawdata=None, router_id=None):
self.packed = None
if rawdata:
self._init_from_network(rawdata)
elif router_id:
self._init_from_host(router_id)
else:
raise ValueError
def __repr__(self):
return "RIP Header (cmd = {}, ver = {}, src = {})".format(self.cmd,
self.ver,
self.src)
def _init_from_network(self, rawdata):
'''init for data from network'''
header = struct.unpack(self.FORMAT, rawdata)
self.cmd = header[0]
self.ver = header[1]
self.src = header[2]
def _init_from_host(self, router_id):
'''Init for data from host'''
self.cmd = self.TYPE_RESPONSE
self.ver = self.VERSION
self.src = router_id
def serialize(self):
'''Return the byte sting representing this header for network
transmission'''
return struct.pack(self.FORMAT, self.cmd, self.ver, self.src)
class RIPRouteEntry:
'''Class representing a single RIP route entry (RTE)'''
FORMAT = "!HHIII"
SIZE = struct.calcsize(FORMAT)
MIN_METRIC = 0
MAX_METRIC = 16
def __init__(self, rawdata=None, src_id=None, address=None,
nexthop=None, metric=None, imported=False):
self.changed = False
self.imported = imported
self.init_timeout()
if rawdata and src_id != None:
self._init_from_network(rawdata, src_id)
elif address and nexthop != None and metric != None:
self._init_from_host(address, nexthop, metric)
else:
raise ValueError
def __repr__(self):
template = "|{:^11}|{:^10}|{:^11}|{:^15}|{:^10}|{:^13}|"
# Check that timeout is set
if self.timeout == None:
return template.format(self.addr, self.metric, self.nexthop,
self.changed, self.garbage,
str(self.timeout))
else:
timeout = (datetime.datetime.now() - self.timeout).total_seconds()
return template.format(self.addr, self.metric, self.nexthop,
self.changed, self.garbage,
round(timeout, 1))
def _init_from_host(self, address, nexthop, metric):
'''Init for data from host'''
self.afi = AF_INET
self.tag = 0 # not used
self.addr = address
self.nexthop = nexthop
self.metric = metric
def _init_from_network(self, rawdata, src_id):
'''Init for data received from network'''
rte = struct.unpack(self.FORMAT, rawdata)
self.afi = rte[0]
self.tag = rte[1]
self.addr = rte[2]
self.set_nexthop(rte[3])
self.metric = rte[4]
if self.nexthop == 0:
self.nexthop = src_id
# Validation
if not self.MIN_METRIC <= self.metric <= self.MAX_METRIC:
raise FormatException
def init_timeout(self):
'''Initialize the timeout property'''
if self.imported:
self.timeout = None
else:
self.timeout = datetime.datetime.now()
self.garbage = False
self.marked_for_delection = False
def __eq__(self, other):
if self.afi == other.afi and \
self.addr == other.addr and \
self.tag == other.tag and \
self.nexthop == other.nexthop and \
self.metric == other.metric:
return True
else:
return False
def set_nexthop(self, nexthop):
'''Set the nexthop property'''
self.nexthop = nexthop
def serialize(self):
'''Pack entries into typical RIPv2 packet format for sending over the
network. '''
return struct.pack(self.FORMAT, self.afi, self.tag, self.addr,
self.nexthop, self.metric)
class FormatException(Exception):
'''Class representing the Format Exception'''
def __init__(self, message=""):
self.message = message
class Router:
'''Class representing a single router'''
def __init__(self, config_file):
self.fsm = RouterFSM(self)
self.config_file = config_file
# Dictionary of router settings, including router-id, inputs and
# outputs
self.router_settings = {}
self.readable_ports = []
# Dictionary of routing table
self.routing_table = {}
self.route_change = False
# STATES
self.fsm.add_state("StartUp", StartUp(self.fsm))
self.fsm.add_state("Waiting", Waiting(self.fsm))
self.fsm.add_state("ReadMessage", ReadMessage(self.fsm))
# TRANSITIONS
self.fsm.add_transistion("toWaiting", Transistion("Waiting"))
self.fsm.add_transistion("toReadMessage", Transistion("ReadMessage"))
self.fsm.set_state("StartUp")
def execute(self):
'''Run the router's finite state machine'''
self.fsm.execute()
def update_routing_table(self, packet):
'''Update Routing table if new route info exist'''
for rte in packet.rtes:
# ignore RTEs of self
if rte.addr != self.fsm.router.router_settings['id']:
bestroute = self.routing_table.get(rte.addr)
# set nexthop to source router and calculate metric
rte.set_nexthop(packet.header.src)
rte.metric = min(rte.metric +
self.router_settings['outputs'][
packet.header.src]['metric'],
RIPRouteEntry.MAX_METRIC)
# Route dosn't yet exist
if not bestroute:
# ignore RTEs with a metric of MAX_METRIC
if rte.metric == RIPRouteEntry.MAX_METRIC:
return
# Add new RTE to routing table
rte.changed = True
self.route_change = True
self.routing_table[rte.addr] = rte
print_message("RTE added for Router: " + str(rte.addr))
return
else:
# Route already exists
if rte.nexthop == bestroute.nexthop:
if bestroute.metric != rte.metric:
if bestroute.metric != RIPRouteEntry.MAX_METRIC \
and rte.metric >= RIPRouteEntry.MAX_METRIC:
# mark for garbage collection
bestroute.metric = RIPRouteEntry.MAX_METRIC
bestroute.garbage = True
bestroute.changed = True
self.route_change = True
else:
self.update_route(bestroute, rte)
# Route still exists with same values
elif not bestroute.garbage:
bestroute.init_timeout()
# Lower metric on existing route
elif rte.metric < bestroute.metric:
self.update_route(bestroute, rte)
def update_route(self, bestroute, rte):
'''Update an existing route entry with new route info'''
bestroute.init_timeout()
bestroute.garbage = False
bestroute.changed = True
bestroute.metric = rte.metric
bestroute.nexthop = rte.nexthop
self.route_change = True
print_message("RTE for Router: " + str(rte.addr) +
" updated with metric=" + str(rte.metric) +
", nexthop=" + str(rte.nexthop) + ".")
def print_routing_table(self):
'''Print the routing table to the terminal'''
line = "+-----------+----------+-----------+---------------+----------+-------------+"
print(line)
print(
"| Routing Table (Router "
+ str(self.router_settings['id']) + ") |")
print(line)
print(
"|Router ID | Metric | NextHop | ChangedFlag | Garbage | Timeout(s) |")
print(line)
print(self.routing_table[self.router_settings['id']])
print(
"+===========+==========+===========+===============+==========+=============+")
for entry in self.routing_table:
if entry != self.router_settings['id']:
print(self.routing_table[entry])
print(line)
print('\n')
def trigger_update(self):
'''Send Routing update for only the routes which have changed'''
changed_rtes = []
print_message("Sending Trigger update.")
for rte in self.routing_table.values():
if rte.changed:
changed_rtes.append(rte)
rte.changed = False
self.route_change = False
# send update with random delay between 1 and 5 seconds
delay = randint(1, 5)
threading.Timer(delay, self.update, [changed_rtes])
def update(self, entries):
'''Send a message to all output ports'''
if self.router_settings != {}:
sock = list(self.router_settings['inputs'].values())[1]
local_header = RIPHeader(router_id=self.router_settings['id'])
for output in self.router_settings['outputs']:
# Split horizon
# Remove RTES for which nexthop == output
split_horizon_entries = []
for entry in entries:
if entry.nexthop != output:
split_horizon_entries.append(entry)
else:
# Poison reverse
# Create new entry to get around some funky referencing
# When doing poisoned_entry = entry
poisoned_entry = RIPRouteEntry(rawdata=None,
src_id=None, address=entry.addr,
nexthop=entry.nexthop, metric= RIPRouteEntry.MAX_METRIC,
imported=entry.imported)
split_horizon_entries.append(poisoned_entry)
# comment out to disable split horizon
packet = RIPPacket(
header=local_header, rtes=split_horizon_entries)
# Uncomment to disable split horizon
# packet = RIPPacket(header=local_header, rtes=entries)
sock.sendto(packet.serialize(),
(HOST,
self.router_settings['outputs'][output]["port"]))
print_message("Message Sent To Router: " + str(output))
def check_timeout(self):
'''Check the current timeout value for each RTE in the routing table.
If the time difference with now is greater than ROUTE_TIMEOUT, then
set the metric to 16 and start the garbage collection timer.'''
print_message("Checking timeout...")
if self.routing_table != {}:
for rte in self.routing_table.values():
if rte.timeout != None and \
(datetime.datetime.now() - rte.timeout).total_seconds() \
>= ROUTE_TIMEOUT:
rte.garbage = True
rte.changed = True
self.route_change = True
rte.metric = RIPRouteEntry.MAX_METRIC
rte.timeout = datetime.datetime.now()
self.print_routing_table()
print_message("Router: " + str(rte.addr) + " timed out.")
def garbage_timer(self):
'''Check the status of the garbage property of each RTE. If true, and
the timeout value difference with now is greater than DELETE_TIMEOUT,
mark it for deletion'''
print_message("Checking garbage timeout...")
if self.routing_table != {}:
for rte in self.routing_table.values():
if rte.garbage:
if (datetime.datetime.now() - rte.timeout).total_seconds() \
>= DELETE_TIMEOUT:
rte.marked_for_delection = True
def garbage_collection(self):
'''Check the routing table for RTE's that are marked for deletion and
remove them.'''
print_message("Collecting Garbage...")
if self.routing_table != {}:
delete_routes = []
for rte in self.routing_table.values():
if rte.marked_for_delection:
delete_routes.append(rte.addr)
print_message("Router: " + str(rte.addr) + " has been " +
"removed from the routing table.")
for entry in delete_routes:
del self.routing_table[entry]
self.print_routing_table()
def timer(self, function, param=None):
'''Start a periodic timer which calls a specified function'''
if param != None:
function(list(param.values()))
period = BASE_TIMER * randrange(8, 12, 1) / 10
else:
period = BASE_TIMER
function()
threading.Timer(period, self.timer, [function, param]).start()
def start_timers(self):
'''Start the timers on separate threads'''
self.timer(self.update, param=self.routing_table)
self.timer(self.check_timeout)
self.timer(self.garbage_timer)
self.timer(self.garbage_collection)
def main_loop(self):
'''Start the main loop for the program.'''
while True:
self.execute()
# RUN THE PROGRAM
def print_message(message):
'''Print the given message with the current time before it'''
if DEBUG:
print("[" + time.strftime("%H:%M:%S") + "]: " + message)
def main():
'''Main function to run the program.'''
if __name__ == "__main__":
router = Router(str(sys.argv[-1]))
router.start_timers()
router.main_loop()
main()
| 35.12691
| 95
| 0.544174
| 3,247
| 29,893
| 4.894672
| 0.13243
| 0.031712
| 0.016359
| 0.011955
| 0.217517
| 0.134713
| 0.085006
| 0.063361
| 0.052098
| 0.041402
| 0
| 0.010031
| 0.356371
| 29,893
| 850
| 96
| 35.168235
| 0.816008
| 0.287091
| 0
| 0.224444
| 0
| 0
| 0.067423
| 0.010024
| 0
| 0
| 0
| 0
| 0
| 1
| 0.135556
| false
| 0.008889
| 0.031111
| 0.004444
| 0.233333
| 0.08
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22c090ce75cc118c533814274bbfc243abbfc79a
| 5,669
|
py
|
Python
|
atlaselectrophysiology/extract_files.py
|
alowet/iblapps
|
9be936cd6806153dde0cbff1b6f2180191de3aeb
|
[
"MIT"
] | null | null | null |
atlaselectrophysiology/extract_files.py
|
alowet/iblapps
|
9be936cd6806153dde0cbff1b6f2180191de3aeb
|
[
"MIT"
] | null | null | null |
atlaselectrophysiology/extract_files.py
|
alowet/iblapps
|
9be936cd6806153dde0cbff1b6f2180191de3aeb
|
[
"MIT"
] | null | null | null |
from ibllib.io import spikeglx
import numpy as np
import ibllib.dsp as dsp
from scipy import signal
from ibllib.misc import print_progress
from pathlib import Path
import alf.io as aio
import logging
import ibllib.ephys.ephysqc as ephysqc
from phylib.io import alf
_logger = logging.getLogger('ibllib')
RMS_WIN_LENGTH_SECS = 3
WELCH_WIN_LENGTH_SAMPLES = 1024
def rmsmap(fbin, spectra=True):
"""
Computes RMS map in time domain and spectra for each channel of Neuropixel probe
:param fbin: binary file in spike glx format (will look for attached metatdata)
:type fbin: str or pathlib.Path
:param spectra: whether to compute the power spectrum (only need for lfp data)
:type: bool
:return: a dictionary with amplitudes in channeltime space, channelfrequency space, time
and frequency scales
"""
if not isinstance(fbin, spikeglx.Reader):
sglx = spikeglx.Reader(fbin)
rms_win_length_samples = 2 ** np.ceil(np.log2(sglx.fs * RMS_WIN_LENGTH_SECS))
# the window generator will generates window indices
wingen = dsp.WindowGenerator(ns=sglx.ns, nswin=rms_win_length_samples, overlap=0)
# pre-allocate output dictionary of numpy arrays
win = {'TRMS': np.zeros((wingen.nwin, sglx.nc)),
'nsamples': np.zeros((wingen.nwin,)),
'fscale': dsp.fscale(WELCH_WIN_LENGTH_SAMPLES, 1 / sglx.fs, one_sided=True),
'tscale': wingen.tscale(fs=sglx.fs)}
win['spectral_density'] = np.zeros((len(win['fscale']), sglx.nc))
# loop through the whole session
for first, last in wingen.firstlast:
D = sglx.read_samples(first_sample=first, last_sample=last)[0].transpose()
# remove low frequency noise below 1 Hz
D = dsp.hp(D, 1 / sglx.fs, [0, 1])
iw = wingen.iw
win['TRMS'][iw, :] = dsp.rms(D)
win['nsamples'][iw] = D.shape[1]
if spectra:
# the last window may be smaller than what is needed for welch
if last - first < WELCH_WIN_LENGTH_SAMPLES:
continue
# compute a smoothed spectrum using welch method
_, w = signal.welch(D, fs=sglx.fs, window='hanning', nperseg=WELCH_WIN_LENGTH_SAMPLES,
detrend='constant', return_onesided=True, scaling='density',
axis=-1)
win['spectral_density'] += w.T
# print at least every 20 windows
if (iw % min(20, max(int(np.floor(wingen.nwin / 75)), 1))) == 0:
print_progress(iw, wingen.nwin)
return win
def extract_rmsmap(fbin, out_folder=None, spectra=True):
"""
Wrapper for rmsmap that outputs _ibl_ephysRmsMap and _ibl_ephysSpectra ALF files
:param fbin: binary file in spike glx format (will look for attached metatdata)
:param out_folder: folder in which to store output ALF files. Default uses the folder in which
the `fbin` file lives.
:param spectra: whether to compute the power spectrum (only need for lfp data)
:type: bool
:return: None
"""
_logger.info(f"Computing QC for {fbin}")
sglx = spikeglx.Reader(fbin)
# check if output ALF files exist already:
if out_folder is None:
out_folder = Path(fbin).parent
else:
out_folder = Path(out_folder)
alf_object_time = f'_iblqc_ephysTimeRms{sglx.type.upper()}'
alf_object_freq = f'_iblqc_ephysSpectralDensity{sglx.type.upper()}'
# crunch numbers
rms = rmsmap(fbin, spectra=spectra)
# output ALF files, single precision with the optional label as suffix before extension
if not out_folder.exists():
out_folder.mkdir()
tdict = {'rms': rms['TRMS'].astype(np.single), 'timestamps': rms['tscale'].astype(np.single)}
aio.save_object_npy(out_folder, object=alf_object_time, dico=tdict)
if spectra:
fdict = {'power': rms['spectral_density'].astype(np.single),
'freqs': rms['fscale'].astype(np.single)}
aio.save_object_npy(out_folder, object=alf_object_freq, dico=fdict)
def _sample2v(ap_file):
"""
Convert raw ephys data to Volts
"""
md = spikeglx.read_meta_data(ap_file.with_suffix('.meta'))
s2v = spikeglx._conversion_sample2v_from_meta(md)
return s2v['ap'][0]
def ks2_to_alf(ks_path, bin_path, out_path, bin_file=None, ampfactor=1, label=None, force=True):
"""
Convert Kilosort 2 output to ALF dataset for single probe data
:param ks_path:
:param bin_path: path of raw data
:param out_path:
:return:
"""
m = ephysqc.phy_model_from_ks2_path(ks2_path=ks_path, bin_path=bin_path, bin_file=bin_file)
ephysqc.spike_sorting_metrics_ks2(ks_path, m, save=True)
ac = alf.EphysAlfCreator(m)
ac.convert(out_path, label=label, force=force, ampfactor=ampfactor)
def extract_data(ks_path, ephys_path, out_path):
efiles = spikeglx.glob_ephys_files(ephys_path)
for efile in efiles:
if efile.get('ap') and efile.ap.exists():
ks2_to_alf(ks_path, ephys_path, out_path, bin_file=efile.ap,
ampfactor=_sample2v(efile.ap), label=None, force=True)
extract_rmsmap(efile.ap, out_folder=out_path, spectra=False)
if efile.get('lf') and efile.lf.exists():
extract_rmsmap(efile.lf, out_folder=out_path)
# if __name__ == '__main__':
#
# ephys_path = Path('C:/Users/Mayo/Downloads/raw_ephys_data')
# ks_path = Path('C:/Users/Mayo/Downloads/KS2')
# out_path = Path('C:/Users/Mayo/Downloads/alf')
# extract_data(ks_path, ephys_path, out_path)
| 40.492857
| 99
| 0.657259
| 793
| 5,669
| 4.523329
| 0.319042
| 0.030109
| 0.026763
| 0.023418
| 0.167828
| 0.154725
| 0.126011
| 0.126011
| 0.107611
| 0.107611
| 0
| 0.008792
| 0.237608
| 5,669
| 139
| 100
| 40.784173
| 0.821148
| 0.28894
| 0
| 0.053333
| 0
| 0
| 0.073294
| 0.022388
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.226667
| 0.026667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22c0aad467733eae25b9c32e9a7eb9d1b86f8921
| 9,955
|
py
|
Python
|
examples/basics/visuals/line_prototype.py
|
3DAlgoLab/vispy
|
91972307cf336674aad58198fb26b9e46f8f9ca1
|
[
"BSD-3-Clause"
] | 2,617
|
2015-01-02T07:52:18.000Z
|
2022-03-29T19:31:15.000Z
|
examples/basics/visuals/line_prototype.py
|
3DAlgoLab/vispy
|
91972307cf336674aad58198fb26b9e46f8f9ca1
|
[
"BSD-3-Clause"
] | 1,674
|
2015-01-01T00:36:08.000Z
|
2022-03-31T19:35:56.000Z
|
examples/basics/visuals/line_prototype.py
|
3DAlgoLab/vispy
|
91972307cf336674aad58198fb26b9e46f8f9ca1
|
[
"BSD-3-Clause"
] | 719
|
2015-01-10T14:25:00.000Z
|
2022-03-02T13:24:56.000Z
|
# -*- coding: utf-8 -*-
# vispy: gallery 10
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import sys
import numpy as np
from vispy import app, gloo, visuals
from vispy.visuals.filters import Clipper, ColorFilter
from vispy.visuals.shaders import MultiProgram
from vispy.visuals.collections import PointCollection
from vispy.visuals.transforms import STTransform
from vispy.scene import SceneCanvas
from vispy.scene.visuals import create_visual_node
class LineVisual(visuals.Visual):
"""Example of a very simple GL-line visual.
This shows the minimal set of methods that need to be reimplemented to
make a new visual class.
"""
def __init__(self, pos=None, color=(1, 1, 1, 1)):
vcode = """
attribute vec2 a_pos;
void main() {
gl_Position = $transform(vec4(a_pos, 0., 1.));
gl_PointSize = 10.;
}
"""
fcode = """
void main() {
gl_FragColor = $color;
}
"""
visuals.Visual.__init__(self, vcode=vcode, fcode=fcode)
self.pos_buf = gloo.VertexBuffer()
# The Visual superclass contains a MultiProgram, which is an object
# that behaves like a normal shader program (you can assign shader
# code, upload values, set template variables, etc.) but internally
# manages multiple ModularProgram instances, one per view.
# The MultiProgram is accessed via the `shared_program` property, so
# the following modifications to the program will be applied to all
# views:
self.shared_program['a_pos'] = self.pos_buf
self.shared_program.frag['color'] = color
self._need_upload = False
# Visual keeps track of draw mode, index buffer, and GL state. These
# are shared between all views.
self._draw_mode = 'line_strip'
self.set_gl_state('translucent', depth_test=False)
if pos is not None:
self.set_data(pos)
def set_data(self, pos):
self._pos = pos
self._need_upload = True
def _prepare_transforms(self, view=None):
view.view_program.vert['transform'] = view.transforms.get_transform()
def _prepare_draw(self, view=None):
"""This method is called immediately before each draw.
The *view* argument indicates which view is about to be drawn.
"""
if self._need_upload:
# Note that pos_buf is shared between all views, so we have no need
# to use the *view* argument in this example. This will be true
# for most visuals.
self.pos_buf.set_data(self._pos)
self._need_upload = False
class PointVisual(LineVisual):
"""Another simple visual class.
Due to the simplicity of these example classes, it was only necessary to
subclass from LineVisual and set the draw mode to 'points'. A more
fully-featured PointVisual class might not follow this approach.
"""
def __init__(self, pos=None, color=(1, 1, 1, 1)):
LineVisual.__init__(self, pos, color)
self._draw_mode = 'points'
class PlotLineVisual(visuals.CompoundVisual):
"""An example compound visual that draws lines and points.
To the user, the compound visual behaves exactly like a normal visual--it
has a transform system, draw() and bounds() methods, etc. Internally, the
compound visual automatically manages proxying these transforms and methods
to its sub-visuals.
"""
def __init__(self, pos=None, line_color=(1, 1, 1, 1),
point_color=(1, 1, 1, 1)):
self._line = LineVisual(pos, color=line_color)
self._point = PointVisual(pos, color=point_color)
visuals.CompoundVisual.__init__(self, [self._line, self._point])
class PointCollectionVisual(visuals.Visual):
"""Thin wrapper around a point collection.
Note: This is currently broken!
"""
def __init__(self):
prog = MultiProgram(vcode='', fcode='')
self.points = PointCollection("agg", color="shared", program=prog)
visuals.Visual.__init__(self, program=prog)
def _prepare_draw(self, view):
if self.points._need_update:
self.points._update()
self._draw_mode = self.points._mode
self._index_buffer = self.points._indices_buffer
def append(self, *args, **kwargs):
self.points.append(*args, **kwargs)
def _prepare_transforms(self, view=None):
pass
@property
def color(self):
return self.points['color']
@color.setter
def color(self, c):
self.points['color'] = c
class PanZoomTransform(STTransform):
def __init__(self, canvas=None, aspect=None, **kwargs):
self._aspect = aspect
self.attach(canvas)
STTransform.__init__(self, **kwargs)
def attach(self, canvas):
""" Attach this tranform to a canvas """
self._canvas = canvas
canvas.events.mouse_wheel.connect(self.on_mouse_wheel)
canvas.events.mouse_move.connect(self.on_mouse_move)
def on_mouse_move(self, event):
if event.is_dragging:
dxy = event.pos - event.last_event.pos
button = event.press_event.button
if button == 1:
self.move(dxy)
elif button == 2:
center = event.press_event.pos
if self._aspect is None:
self.zoom(np.exp(dxy * (0.01, -0.01)), center)
else:
s = dxy[1] * -0.01
self.zoom(np.exp(np.array([s, s])), center)
def on_mouse_wheel(self, event):
self.zoom(np.exp(event.delta * (0.01, -0.01)), event.pos)
canvas = app.Canvas(keys='interactive', size=(900, 600), show=True,
title="Visual Canvas")
pos = np.random.normal(size=(1000, 2), loc=0, scale=50).astype('float32')
pos[0] = [0, 0]
# Make a line visual
line = LineVisual(pos=pos)
line.transforms.canvas = canvas
line.transform = STTransform(scale=(2, 1), translate=(20, 20))
panzoom = PanZoomTransform(canvas)
line.transforms.scene_transform = panzoom
panzoom.changed.connect(lambda ev: canvas.update())
# Attach color filter to all views (current and future) of the visual
line.attach(ColorFilter((1, 1, 0.5, 0.7)))
# Attach a clipper just to this view. The Clipper filter requires a
# transform that maps from the framebuffer coordinate system to the
# clipping coordinates.
tr = line.transforms.get_transform('framebuffer', 'canvas')
line.attach(Clipper((20, 20, 260, 260), transform=tr), view=line)
# Make a view of the line that will draw its shadow
shadow = line.view()
shadow.transforms.canvas = canvas
shadow.transform = STTransform(scale=(2, 1), translate=(25, 25))
shadow.transforms.scene_transform = panzoom
shadow.attach(ColorFilter((0, 0, 0, 0.6)), view=shadow)
tr = shadow.transforms.get_transform('framebuffer', 'canvas')
shadow.attach(Clipper((20, 20, 260, 260), transform=tr), view=shadow)
# And make a second view of the line with different clipping bounds
view = line.view()
view.transforms.canvas = canvas
view.transform = STTransform(scale=(2, 0.5), translate=(450, 150))
tr = view.transforms.get_transform('framebuffer', 'canvas')
view.attach(Clipper((320, 20, 260, 260), transform=tr), view=view)
# Make a compound visual
plot = PlotLineVisual(pos, (0.5, 1, 0.5, 0.2), (0.5, 1, 1, 0.3))
plot.transforms.canvas = canvas
plot.transform = STTransform(translate=(80, 450), scale=(1.5, 1))
tr = plot.transforms.get_transform('framebuffer', 'canvas')
plot.attach(Clipper((20, 320, 260, 260), transform=tr), view=plot)
# And make a view on the compound
view2 = plot.view()
view2.transforms.canvas = canvas
view2.transform = STTransform(scale=(1.5, 1), translate=(450, 400))
tr = view2.transforms.get_transform('framebuffer', 'canvas')
view2.attach(Clipper((320, 320, 260, 260), transform=tr), view=view2)
# And a shadow for the view
shadow2 = plot.view()
shadow2.transforms.canvas = canvas
shadow2.transform = STTransform(scale=(1.5, 1), translate=(455, 405))
shadow2.attach(ColorFilter((0, 0, 0, 0.6)), view=shadow2)
tr = shadow2.transforms.get_transform('framebuffer', 'canvas')
shadow2.attach(Clipper((320, 320, 260, 260), transform=tr), view=shadow2)
# Example of a collection visual
collection = PointCollectionVisual()
collection.transforms.canvas = canvas
collection.transform = STTransform(translate=(750, 150))
collection.append(np.random.normal(loc=0, scale=20, size=(10000, 3)),
itemsize=5000)
collection.color = (1, 0.5, 0.5, 1), (0.5, 0.5, 1, 1)
shadow3 = collection.view()
shadow3.transforms.canvas = canvas
shadow3.transform = STTransform(scale=(1, 1), translate=(752, 152))
shadow3.attach(ColorFilter((0, 0, 0, 0.6)), view=shadow3)
# tr = shadow3.transforms.get_transform('framebuffer', 'canvas')
# shadow3.attach(Clipper((320, 320, 260, 260), transform=tr), view=shadow2)
order = [shadow, line, view, plot, shadow2, view2, shadow3, collection]
@canvas.connect
def on_draw(event):
canvas.context.clear((0.3, 0.3, 0.3, 1.0))
for v in order:
v.draw()
def on_resize(event):
# Set canvas viewport and reconfigure visual transforms to match.
vp = (0, 0, canvas.physical_size[0], canvas.physical_size[1])
canvas.context.set_viewport(*vp)
for v in order:
v.transforms.configure(canvas=canvas, viewport=vp)
canvas.events.resize.connect(on_resize)
on_resize(None)
Line = create_visual_node(LineVisual)
canvas2 = SceneCanvas(keys='interactive', title='Scene Canvas', show=True)
v = canvas2.central_widget.add_view(margin=10)
v.border_color = (1, 1, 1, 1)
v.bgcolor = (0.3, 0.3, 0.3, 1)
v.camera = 'panzoom'
line2 = Line(pos, parent=v.scene)
def mouse(ev):
print(ev)
v.events.mouse_press.connect(mouse)
if __name__ == '__main__':
if sys.flags.interactive != 1:
app.run()
| 34.209622
| 79
| 0.668609
| 1,366
| 9,955
| 4.762079
| 0.237921
| 0.005842
| 0.004612
| 0.035511
| 0.161722
| 0.096849
| 0.066103
| 0.052575
| 0.040584
| 0.022752
| 0
| 0.039934
| 0.210146
| 9,955
| 290
| 80
| 34.327586
| 0.787359
| 0.239076
| 0
| 0.069767
| 0
| 0
| 0.065089
| 0.002959
| 0
| 0
| 0
| 0
| 0
| 1
| 0.110465
| false
| 0.005814
| 0.052326
| 0.005814
| 0.197674
| 0.005814
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22c0e10976672b4523dad7b6dd7cde8c3d5b7c7b
| 6,272
|
py
|
Python
|
util/util.py
|
harshitAgr/vess2ret
|
5702175bcd9ecde34d4fedab45a7cd2878a0184c
|
[
"MIT"
] | 111
|
2017-01-30T17:49:15.000Z
|
2022-03-28T05:53:51.000Z
|
util/util.py
|
engineerlion/vess2ret
|
5702175bcd9ecde34d4fedab45a7cd2878a0184c
|
[
"MIT"
] | 19
|
2017-03-06T10:28:16.000Z
|
2020-12-09T12:25:22.000Z
|
util/util.py
|
engineerlion/vess2ret
|
5702175bcd9ecde34d4fedab45a7cd2878a0184c
|
[
"MIT"
] | 46
|
2017-02-10T18:39:25.000Z
|
2022-03-05T21:39:46.000Z
|
"""Auxiliary methods."""
import os
import json
from errno import EEXIST
import numpy as np
import seaborn as sns
import cPickle as pickle
import matplotlib.pyplot as plt
sns.set()
DEFAULT_LOG_DIR = 'log'
ATOB_WEIGHTS_FILE = 'atob_weights.h5'
D_WEIGHTS_FILE = 'd_weights.h5'
class MyDict(dict):
"""
Dictionary that allows to access elements with dot notation.
ex:
>> d = MyDict({'key': 'val'})
>> d.key
'val'
>> d.key2 = 'val2'
>> d
{'key2': 'val2', 'key': 'val'}
"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
def convert_to_rgb(img, is_binary=False):
"""Given an image, make sure it has 3 channels and that it is between 0 and 1."""
if len(img.shape) != 3:
raise Exception("""Image must have 3 dimensions (channels x height x width). """
"""Given {0}""".format(len(img.shape)))
img_ch, _, _ = img.shape
if img_ch != 3 and img_ch != 1:
raise Exception("""Unsupported number of channels. """
"""Must be 1 or 3, given {0}.""".format(img_ch))
imgp = img
if img_ch == 1:
imgp = np.repeat(img, 3, axis=0)
if not is_binary:
imgp = imgp * 127.5 + 127.5
imgp /= 255.
return np.clip(imgp.transpose((1, 2, 0)), 0, 1)
def compose_imgs(a, b, is_a_binary=True, is_b_binary=False):
"""Place a and b side by side to be plotted."""
ap = convert_to_rgb(a, is_binary=is_a_binary)
bp = convert_to_rgb(b, is_binary=is_b_binary)
if ap.shape != bp.shape:
raise Exception("""A and B must have the same size. """
"""{0} != {1}""".format(ap.shape, bp.shape))
# ap.shape and bp.shape must have the same size here
h, w, ch = ap.shape
composed = np.zeros((h, 2*w, ch))
composed[:, :w, :] = ap
composed[:, w:, :] = bp
return composed
def get_log_dir(log_dir, expt_name):
"""Compose the log_dir with the experiment name."""
if log_dir is None:
raise Exception('log_dir can not be None.')
if expt_name is not None:
return os.path.join(log_dir, expt_name)
return log_dir
def mkdir(mypath):
"""Create a directory if it does not exist."""
try:
os.makedirs(mypath)
except OSError as exc:
if exc.errno == EEXIST and os.path.isdir(mypath):
pass
else:
raise
def create_expt_dir(params):
"""Create the experiment directory and return it."""
expt_dir = get_log_dir(params.log_dir, params.expt_name)
# Create directories if they do not exist
mkdir(params.log_dir)
mkdir(expt_dir)
# Save the parameters
json.dump(params, open(os.path.join(expt_dir, 'params.json'), 'wb'),
indent=4, sort_keys=True)
return expt_dir
def plot_loss(loss, label, filename, log_dir):
"""Plot a loss function and save it in a file."""
plt.figure(figsize=(5, 4))
plt.plot(loss, label=label)
plt.legend()
plt.savefig(os.path.join(log_dir, filename))
plt.clf()
def log(losses, atob, it_val, N=4, log_dir=DEFAULT_LOG_DIR, expt_name=None,
is_a_binary=True, is_b_binary=False):
"""Log losses and atob results."""
log_dir = get_log_dir(log_dir, expt_name)
# Save the losses for further inspection
pickle.dump(losses, open(os.path.join(log_dir, 'losses.pkl'), 'wb'))
###########################################################################
# PLOT THE LOSSES #
###########################################################################
plot_loss(losses['d'], 'discriminator', 'd_loss.png', log_dir)
plot_loss(losses['d_val'], 'discriminator validation', 'd_val_loss.png', log_dir)
plot_loss(losses['p2p'], 'Pix2Pix', 'p2p_loss.png', log_dir)
plot_loss(losses['p2p_val'], 'Pix2Pix validation', 'p2p_val_loss.png', log_dir)
###########################################################################
# PLOT THE A->B RESULTS #
###########################################################################
plt.figure(figsize=(10, 6))
for i in range(N*N):
a, _ = next(it_val)
bp = atob.predict(a)
img = compose_imgs(a[0], bp[0], is_a_binary=is_a_binary, is_b_binary=is_b_binary)
plt.subplot(N, N, i+1)
plt.imshow(img)
plt.axis('off')
plt.savefig(os.path.join(log_dir, 'atob.png'))
plt.clf()
# Make sure all the figures are closed.
plt.close('all')
def save_weights(models, log_dir=DEFAULT_LOG_DIR, expt_name=None):
"""Save the weights of the models into a file."""
log_dir = get_log_dir(log_dir, expt_name)
models.atob.save_weights(os.path.join(log_dir, ATOB_WEIGHTS_FILE), overwrite=True)
models.d.save_weights(os.path.join(log_dir, D_WEIGHTS_FILE), overwrite=True)
def load_weights(atob, d, log_dir=DEFAULT_LOG_DIR, expt_name=None):
"""Load the weights into the corresponding models."""
log_dir = get_log_dir(log_dir, expt_name)
atob.load_weights(os.path.join(log_dir, ATOB_WEIGHTS_FILE))
d.load_weights(os.path.join(log_dir, D_WEIGHTS_FILE))
def load_weights_of(m, weights_file, log_dir=DEFAULT_LOG_DIR, expt_name=None):
"""Load the weights of the model m."""
log_dir = get_log_dir(log_dir, expt_name)
m.load_weights(os.path.join(log_dir, weights_file))
def load_losses(log_dir=DEFAULT_LOG_DIR, expt_name=None):
"""Load the losses of the given experiment."""
log_dir = get_log_dir(log_dir, expt_name)
losses = pickle.load(open(os.path.join(log_dir, 'losses.pkl'), 'rb'))
return losses
def load_params(params):
"""
Load the parameters of an experiment and return them.
The params passed as argument will be merged with the new params dict.
If there is a conflict with a key, the params passed as argument prevails.
"""
expt_dir = get_log_dir(params.log_dir, params.expt_name)
expt_params = json.load(open(os.path.join(expt_dir, 'params.json'), 'rb'))
# Update the loaded parameters with the current parameters. This will
# override conflicting keys as expected.
expt_params.update(params)
return expt_params
| 30.745098
| 89
| 0.603795
| 921
| 6,272
| 3.908795
| 0.229099
| 0.086667
| 0.033333
| 0.046667
| 0.300833
| 0.269722
| 0.262222
| 0.221944
| 0.139722
| 0.058889
| 0
| 0.011366
| 0.228476
| 6,272
| 203
| 90
| 30.896552
| 0.732589
| 0.213967
| 0
| 0.084906
| 0
| 0
| 0.083747
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113208
| false
| 0.009434
| 0.066038
| 0
| 0.273585
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22c1ccef20d9d7a1d41049e783b9575459b18d70
| 834
|
py
|
Python
|
services/apiRequests.py
|
CakeCrusher/voon-video_processing
|
6ecaacf4e36baa72d713a92101b445885b3d95ef
|
[
"MIT"
] | null | null | null |
services/apiRequests.py
|
CakeCrusher/voon-video_processing
|
6ecaacf4e36baa72d713a92101b445885b3d95ef
|
[
"MIT"
] | null | null | null |
services/apiRequests.py
|
CakeCrusher/voon-video_processing
|
6ecaacf4e36baa72d713a92101b445885b3d95ef
|
[
"MIT"
] | null | null | null |
from github import Github
def parseGithubURL(url):
splitURL = url.split('/')
owner = splitURL[3]
repo = splitURL[4]
return {
"owner": owner,
"repo": repo
}
def fetchRepoFiles(owner, repo):
files = []
g = Github('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')
repo = g.get_repo(f'{owner}/{repo}')
contents = repo.get_contents('')
while contents:
file_content = contents.pop(0)
if file_content.type == 'dir':
contents.extend(repo.get_contents(file_content.path))
else:
files.append(file_content.path)
return files
# parsedUrl = parseGithubURL('https://github.com/CakeCrusher/restock_emailer')
# filePaths = fetchRepoFiles(parsedUrl['owner'], parsedUrl['repo'])
# files = [path.split('/')[-1] for path in filePaths]
# print(files)
| 29.785714
| 78
| 0.642686
| 93
| 834
| 5.666667
| 0.473118
| 0.083491
| 0.056926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015291
| 0.215827
| 834
| 27
| 79
| 30.888889
| 0.79052
| 0.248201
| 0
| 0
| 0
| 0
| 0.107717
| 0.064309
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.047619
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22c3df00575427d7293f54af4b1eb86f32f1ea11
| 995
|
py
|
Python
|
utils/tricks.py
|
HouchangX-AI/Dialog-Solution
|
1f68f847d9c9c4a46ef0b5fc6a78014402a4dd7a
|
[
"MIT"
] | 3
|
2020-03-12T06:28:01.000Z
|
2020-03-27T20:15:53.000Z
|
utils/tricks.py
|
HouchangX-AI/Dialog-Solution
|
1f68f847d9c9c4a46ef0b5fc6a78014402a4dd7a
|
[
"MIT"
] | null | null | null |
utils/tricks.py
|
HouchangX-AI/Dialog-Solution
|
1f68f847d9c9c4a46ef0b5fc6a78014402a4dd7a
|
[
"MIT"
] | 2
|
2020-03-19T02:47:37.000Z
|
2021-12-14T02:26:40.000Z
|
#-*- coding: utf-8 -*-
import codecs
import random
from utils.global_names import GlobalNames, get_file_path
def modify_tokens(tokens):
new_tokens = []
pos = 0
len_ = len(tokens)
while pos < len_:
if tokens[pos] == "[":
if pos+2 < len_ and tokens[pos+2] == "]":
token = "".join(tokens[pos:pos+3])
new_tokens.append(token)
pos += 3
elif pos+3 < len_ and tokens[pos+3] == "]":
if tokens[pos+2].isdigit():
tokens[pos+2] = "_digit_"
token = "".join(tokens[pos:pos+4])
new_tokens.append(token)
pos += 4
else:
pos += 1
else:
new_tokens.append(tokens[pos])
pos += 1
return new_tokens
def length_weight(corpus, orders, length_limit=6):
for idx, _ in enumerate(orders):
if len(corpus[idx]) > length_limit:
return idx
return 0
| 26.184211
| 57
| 0.501508
| 119
| 995
| 4.033613
| 0.386555
| 0.16875
| 0.0625
| 0.0625
| 0.183333
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025723
| 0.374874
| 995
| 37
| 58
| 26.891892
| 0.745981
| 0.021106
| 0
| 0.2
| 0
| 0
| 0.010277
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.1
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22c76b57ffb3eeb2695ac101001d7de50b9a816d
| 4,344
|
py
|
Python
|
facetools/test/testcases.py
|
bigsassy/django-facetools
|
aeedaea81ab0007ee8e96b2f81f1404dc8bddb3c
|
[
"MIT"
] | 2
|
2018-01-24T20:41:27.000Z
|
2019-06-27T13:24:18.000Z
|
facetools/test/testcases.py
|
bigsassy/django-facetools
|
aeedaea81ab0007ee8e96b2f81f1404dc8bddb3c
|
[
"MIT"
] | null | null | null |
facetools/test/testcases.py
|
bigsassy/django-facetools
|
aeedaea81ab0007ee8e96b2f81f1404dc8bddb3c
|
[
"MIT"
] | null | null | null |
import types
import django.test.testcases
from django.conf import settings
from facetools.models import TestUser
from facetools.common import _create_signed_request
from facetools.test import TestUserNotLoaded
from facetools.signals import sync_facebook_test_user, setup_facebook_test_client
from facetools.common import _get_facetools_test_fixture_name
class FacebookTestCaseMixin(object):
"""
TestCase which makes it possible to test views when the FacebookMiddleware
and SyncFacebookUser middlewares are activated. Must use the Client
attached to this object (i.e. self.client).
"""
facebook_test_user = None
def set_client_signed_request(self, facebook_id, access_token):
"""
Allow code to configure the test client so it has a signed request
of the specified test user for each request
"""
setup_facebook_test_client.send(sender=None, client=self.client, signed_request=_create_signed_request(
settings.FACEBOOK_APPLICATION_SECRET_KEY, facebook_id, oauth_token=access_token))
def _pre_setup(self):
if self.facebook_test_user:
if type(self.facebook_test_user) not in [str, unicode]:
raise Exception("facebook_test_user variable must be a string (found a %s)" % type(self.facebook_test_user))
app_name = get_app_name_from_test_case(type(self).__module__)
facetools_fixture_name = _get_facetools_test_fixture_name(app_name)
if not hasattr(self, 'fixtures'):
self.fixtures = []
if facetools_fixture_name not in self.fixtures:
self.fixtures.append(facetools_fixture_name)
super(FacebookTestCaseMixin, self)._pre_setup()
# Make sure anybody that needs to sync their models loaded from fixtures
# has a chance to do so now that the refreshed user test data is available.
try:
for test_user in TestUser.objects.all():
sync_facebook_test_user.send(sender=None, test_user=test_user)
self.test_user = TestUser.objects.get(name=self.facebook_test_user)
self.set_client_signed_request(self.test_user.facebook_id, self.test_user.access_token)
except TestUser.DoesNotExist:
raise TestUserNotLoaded("Test user %s hasn't been loaded via the %s fixture (did you run sync_facebook_test_users?)" %
(self.facebook_test_user, facetools_fixture_name))
else:
super(FacebookTestCaseMixin, self)._pre_setup()
def get_app_name_from_test_case(module_path_string):
"""
Gets thet Django app from the __class__ attribute of a TestCase in a Django app.
class_string should look something like this: 'facetools_tests.tests.test_test_module'
"""
packages = module_path_string.split(".")
try:
tests_location = packages.index("tests")
except ValueError:
raise ValueError("Couldn't find tests module in %s (are you running this test from tests.py or a tests package in your Django app?)" % module_path_string)
if tests_location == 0:
raise ValueError("Facetools doesn't support Django app's with a name of 'tests', or it failed to find the Django app name out of %s" % module_path_string)
app_name = packages[tests_location - 1]
if app_name not in settings.INSTALLED_APPS:
raise ValueError("Facetools didn't find %s among INSTALLED_APPS. (app name pulled from %s)" % (app_name, module_path_string))
return app_name
# -----------------------------------------------------------------------------
# Test Cases
# -----------------------------------------------------------------------------
class FacebookTransactionTestCase(FacebookTestCaseMixin, django.test.testcases.TransactionTestCase):
def _pre_setup(self):
super(FacebookTransactionTestCase, self)._pre_setup()
class FacebookTestCase(FacebookTestCaseMixin, django.test.testcases.TestCase):
def _pre_setup(self):
super(FacebookTestCase, self)._pre_setup()
if 'LiveServerTestCase' in dir(django.test.testcases):
class FacebookLiveServerTestCase(FacebookTestCaseMixin, django.test.testcases.LiveServerTestCase):
def _pre_setup(self):
super(FacebookLiveServerTestCase, self)._pre_setup()
| 51.714286
| 162
| 0.69268
| 539
| 4,344
| 5.328386
| 0.300557
| 0.047354
| 0.050139
| 0.034819
| 0.116295
| 0.01532
| 0
| 0
| 0
| 0
| 0
| 0.000579
| 0.20419
| 4,344
| 83
| 163
| 52.337349
| 0.8302
| 0.179328
| 0
| 0.142857
| 0
| 0.035714
| 0.136794
| 0.007456
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.142857
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22c82577ce9bb70304bc0ff3dee27fa81b62e25c
| 564
|
py
|
Python
|
homework_08/calc_fitness.py
|
ufpa-organization-repositories/evolutionary-computing
|
e16786f9619e2b357b94ab91ff3a7b352e6a0d92
|
[
"MIT"
] | null | null | null |
homework_08/calc_fitness.py
|
ufpa-organization-repositories/evolutionary-computing
|
e16786f9619e2b357b94ab91ff3a7b352e6a0d92
|
[
"MIT"
] | null | null | null |
homework_08/calc_fitness.py
|
ufpa-organization-repositories/evolutionary-computing
|
e16786f9619e2b357b94ab91ff3a7b352e6a0d92
|
[
"MIT"
] | null | null | null |
def calc_fitness(pop):
from to_decimal import to_decimal
from math import sin, sqrt
for index, elem in enumerate(pop):
# só atribui a fitness a cromossomos que ainda não possuem fitness
# print(elem[0], elem[1])
x = to_decimal(elem[0])
y = to_decimal(elem[1])
# x = elem[0]
# y = elem[1]
f6 = 0.5 - ((sin(sqrt(x**2 + y**2)))**2 - 0.5) / (1 + 0.001 * (x**2 + y**2))**2
pop[index] = [f6, elem]
return 0
# populacao = [[0,0],[-3,1]]
# calc_fitness(pop=populacao)
# print(populacao)
| 25.636364
| 87
| 0.546099
| 90
| 564
| 3.355556
| 0.4
| 0.119205
| 0.092715
| 0.02649
| 0.033113
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069479
| 0.285461
| 564
| 21
| 88
| 26.857143
| 0.679901
| 0.326241
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.222222
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22cc9cf5c82866cdbb6751a30f5964a624debd38
| 2,753
|
py
|
Python
|
ch05/ch05-02-timeseries.py
|
alexmalins/kagglebook
|
260f6634b6bbaa94c2e989770e75dc7101f5c614
|
[
"BSD-3-Clause"
] | 13
|
2021-02-20T08:57:28.000Z
|
2022-03-31T12:47:08.000Z
|
ch05/ch05-02-timeseries.py
|
Tharunkumar01/kagglebook
|
260f6634b6bbaa94c2e989770e75dc7101f5c614
|
[
"BSD-3-Clause"
] | null | null | null |
ch05/ch05-02-timeseries.py
|
Tharunkumar01/kagglebook
|
260f6634b6bbaa94c2e989770e75dc7101f5c614
|
[
"BSD-3-Clause"
] | 2
|
2021-07-15T03:56:39.000Z
|
2021-07-29T00:53:54.000Z
|
# ---------------------------------
# Prepare the data etc.
# ----------------------------------
import numpy as np
import pandas as pd
# train_x is the training data, train_y is the target values, and test_x is the test data
# stored in pandas DataFrames and Series (numpy arrays also used)
train = pd.read_csv('../input/sample-data/train_preprocessed.csv')
train_x = train.drop(['target'], axis=1)
train_y = train['target']
test_x = pd.read_csv('../input/sample-data/test_preprocessed.csv')
# As time-series data assume a period variable is set that changes with time
train_x['period'] = np.arange(0, len(train_x)) // (len(train_x) // 4)
train_x['period'] = np.clip(train_x['period'], 0, 3)
test_x['period'] = 4
# -----------------------------------
# Hold-out method for time-series data
# -----------------------------------
# Partition using the period variable as the basis (0 to 3 are the training data, 4 is the test data)
# Here for within the training data period 3 is used for validation and periods 0 to 2 are used for training
is_tr = train_x['period'] < 3
is_va = train_x['period'] == 3
tr_x, va_x = train_x[is_tr], train_x[is_va]
tr_y, va_y = train_y[is_tr], train_y[is_va]
# -----------------------------------
# Cross validation for time-series data (use method that follows time)
# -----------------------------------
# Partition using the period variable as the basis (0 to 3 are the training data, 4 is the test data)
# Periods 1, 2 and 3 are each used for cross-validation, and the preceding periods are used for training
va_period_list = [1, 2, 3]
for va_period in va_period_list:
is_tr = train_x['period'] < va_period
is_va = train_x['period'] == va_period
tr_x, va_x = train_x[is_tr], train_x[is_va]
tr_y, va_y = train_y[is_tr], train_y[is_va]
# (For reference) Using TimeSeriesSplit() function is difficult as only the order of the data can be used
from sklearn.model_selection import TimeSeriesSplit
tss = TimeSeriesSplit(n_splits=4)
for tr_idx, va_idx in tss.split(train_x):
tr_x, va_x = train_x.iloc[tr_idx], train_x.iloc[va_idx]
tr_y, va_y = train_y.iloc[tr_idx], train_y.iloc[va_idx]
# -----------------------------------
# Cross validation for time-series data (method to simply partition by time)
# -----------------------------------
# Partition using the period variable as the basis (0 to 3 are the training data, 4 is the test data)
# Periods 1, 2 and 3 are each used for cross-validation, and the preceding periods are used for training
va_period_list = [0, 1, 2, 3]
for va_period in va_period_list:
is_tr = train_x['period'] != va_period
is_va = train_x['period'] == va_period
tr_x, va_x = train_x[is_tr], train_x[is_va]
tr_y, va_y = train_y[is_tr], train_y[is_va]
| 43.698413
| 108
| 0.653106
| 468
| 2,753
| 3.638889
| 0.196581
| 0.07751
| 0.063418
| 0.035232
| 0.530241
| 0.51145
| 0.431591
| 0.431591
| 0.431591
| 0.431591
| 0
| 0.01458
| 0.152924
| 2,753
| 62
| 109
| 44.403226
| 0.715695
| 0.519797
| 0
| 0.333333
| 0
| 0
| 0.120862
| 0.065435
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22cf451d04e0bf782f9148035e8ed296f046dac4
| 2,152
|
py
|
Python
|
python-scripts/plot_delay.py
|
GayashanNA/my-scripts
|
d865e828c833d6b54c787ce9475da512f8488278
|
[
"Apache-2.0"
] | null | null | null |
python-scripts/plot_delay.py
|
GayashanNA/my-scripts
|
d865e828c833d6b54c787ce9475da512f8488278
|
[
"Apache-2.0"
] | null | null | null |
python-scripts/plot_delay.py
|
GayashanNA/my-scripts
|
d865e828c833d6b54c787ce9475da512f8488278
|
[
"Apache-2.0"
] | null | null | null |
import csv
import matplotlib.pyplot as plt
import time
PLOT_PER_WINDOW = False
WINDOW_LENGTH = 60000
BINS = 1000
delay_store = {}
perwindow_delay_store = {}
plotting_delay_store = {}
filename = "output-large.csv"
# filename = "output.csv"
# filename = "output-medium.csv"
# filename = "output-small.csv"
# filename = "output-tiny.csv"
with open(filename, "rU") as dataFile:
csvreader = csv.reader(dataFile)
for row in csvreader:
if len(row) > 2 and str(row[0]).isdigit():
delay_store[long(row[1])] = long(row[2])
window_begin = min(delay_store.keys())
window_end = max(delay_store.keys())
if PLOT_PER_WINDOW:
window_end = window_begin + WINDOW_LENGTH
# find the time delays that are within the window of choice
for (tapp, delay) in delay_store.iteritems():
if window_begin <= tapp <= window_end:
perwindow_delay_store[tapp] = delay
plotting_delay_store = perwindow_delay_store
else:
plotting_delay_store = delay_store
# the histogram of the data
n, bins, patches = plt.hist(plotting_delay_store.values(), BINS, histtype='stepfilled',
normed=True, cumulative=False, facecolor='blue', alpha=0.9)
# plt.axhline(y=0.95, color='red', label='0.95')
max_delay = max(plotting_delay_store.values())
min_delay = min(plotting_delay_store.values())
count = len(plotting_delay_store.values())
# format epoch time to date time to be shown in the plot figure
window_begin_in_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(window_begin / 1000))
window_end_in_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(window_end / 1000))
title = "Window begin: %s\n" % window_begin_in_datetime
title += "Window end: %s\n" % window_end_in_datetime
# title += "Window length: %dms\n" % WINDOW_LENGTH
title += "Window length: ~%dmins\n" % ((window_end - window_begin)/60000)
title += "Maximum delay: %dms\n" % max_delay
title += "Minimum delay: %dms\n" % min_delay
title += "Count: %d" % count
# start plotting
plt.xlabel('Delay (ms)')
plt.ylabel('Probability')
plt.grid(True)
plt.legend()
plt.suptitle(title)
plt.subplots_adjust(top=0.8)
plt.show()
| 33.107692
| 98
| 0.703067
| 320
| 2,152
| 4.540625
| 0.353125
| 0.110117
| 0.086717
| 0.06607
| 0.104611
| 0.064694
| 0.064694
| 0.064694
| 0.064694
| 0.064694
| 0
| 0.019835
| 0.156599
| 2,152
| 64
| 99
| 33.625
| 0.780716
| 0.171933
| 0
| 0
| 0
| 0
| 0.110672
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22cfe37b118c380f98097dbe5e6dfaa75be99d71
| 427
|
py
|
Python
|
video/rest/compositionhooks/delete-hook/delete-hook.6.x.py
|
afeld/api-snippets
|
d77456c387c9471d36aa949e2cf785d8a534a370
|
[
"MIT"
] | 3
|
2020-05-05T10:01:02.000Z
|
2021-02-06T14:23:13.000Z
|
video/rest/compositionhooks/delete-hook/delete-hook.6.x.py
|
afeld/api-snippets
|
d77456c387c9471d36aa949e2cf785d8a534a370
|
[
"MIT"
] | null | null | null |
video/rest/compositionhooks/delete-hook/delete-hook.6.x.py
|
afeld/api-snippets
|
d77456c387c9471d36aa949e2cf785d8a534a370
|
[
"MIT"
] | null | null | null |
# Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
api_key_sid = 'SKXXXX'
api_key_secret = 'your_api_key_secret'
client = Client(api_key_sid, api_key_secret)
did_delete = client.video\
.compositionHooks('HKXXXX')\
.delete()
if(did_delete):
print('Composition removed')
| 28.466667
| 72
| 0.709602
| 58
| 427
| 5
| 0.568966
| 0.103448
| 0.124138
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.201405
| 427
| 14
| 73
| 30.5
| 0.85044
| 0.295082
| 0
| 0
| 0
| 0
| 0.167785
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22d06d326dbc942db8f36ca27ac8dc094685d70b
| 6,924
|
py
|
Python
|
advesarial_text/data/data_utils_test.py
|
slowy07/tensorflow-model-research
|
48ba4ba6240452eb3e3350fe7099f2b045acc530
|
[
"MIT"
] | null | null | null |
advesarial_text/data/data_utils_test.py
|
slowy07/tensorflow-model-research
|
48ba4ba6240452eb3e3350fe7099f2b045acc530
|
[
"MIT"
] | null | null | null |
advesarial_text/data/data_utils_test.py
|
slowy07/tensorflow-model-research
|
48ba4ba6240452eb3e3350fe7099f2b045acc530
|
[
"MIT"
] | null | null | null |
from __future__ import absoulte_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from data import data_utils
data = data_utils
class SequenceWrapperTest(tf.test.TestCase):
def testDefaultTimesteps(self):
seq = data.SequenceWrapper()
t1 = seq.add_timestep()
_ = seq.add_timestep()
self.assertEqual(len(seq), 2)
self.assertEqual(t1.weight, 0.0)
self.assertEqual(t1.label, 0)
self.assertEqual(t1.token, 0)
def testSettersAndGetters(self):
ts = data.SequenceWrapper().add_timestep()
ts.set_token(3)
ts.set_label(4)
ts.set_weight(2.0)
self.assertEqual(ts.token, 3)
self.assertEqual(ts.label, 4)
self.assertEqual(ts.weight, 2.0)
def testTimestepIteration(self):
seq = data.SequenceWrapper()
seq.add_timestep().set_token(0)
seq.add_timestep().set_token(1)
seq.add_timestep().set_token(2)
for i, ts in enumerate(seq):
self.assertEqual(ts.token, i)
def testFillsSequenceExampleCorrectly(self):
seq = data.SequenceWrapper()
seq.add_timestep().set_token(1).set_label(2).set_weight(3.0)
seq.add_timestep().set_token(10).set_label(20).set_weight(30.0)
seq_ex = seq.seq
fl = seq_ex.feature_lists.feature_list
fl_token = fl[data.SequenceWrapper.F_TOKEN_ID].feature
fl_label = fl[data.SequenceWrapper.F_LABEL].feature
fl_weight = fl[data.SequenceWrapper.F_WEIGHT].feature
_ = [self.assertEqual(len(f), 2) for f in [fl_token, fl_label, fl_weight]]
self.assertAllEqual([f.int64_list.value[0] for f in fl_token], [1, 10])
self.assertAllEqual([f.int64_list.value[0] for f in fl_label], [2, 20])
self.assertAllEqual([f.float_list.value[0] for f in fl_weight], [3.0, 30.0])
class DataUtilsTest(tf.test.TestCase):
def testSplitByPunct(self):
output = data.split_by_punct(
"hello! world, i've been\nwaiting\tfor\ryou for.a long time"
)
expected = [
"hello",
"world",
"i",
"ve",
"been",
"waiting",
"for",
"you",
"for",
"a",
"long",
"time",
]
self.assertListEqual(output, expected)
def _buildDummySequence(self):
seq = data.SequenceWrapper()
for i in range(10):
seq.add_timestep().set_token(i)
return seq
def testBuildLMSeq(self):
seq = self._buildDummySequence()
lm_seq = data.build_lm_sequence(seq)
for i, ts in enumerate(lm_seq):
# For end of sequence, the token and label should be same, and weight
# should be 0.0.
if i == len(lm_seq) - 1:
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, i)
self.assertEqual(ts.weight, 0.0)
else:
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, i + 1)
self.assertEqual(ts.weight, 1.0)
def testBuildSAESeq(self):
seq = self._buildDummySequence()
sa_seq = data.build_seq_ae_sequence(seq)
self.assertEqual(len(sa_seq), len(seq) * 2 - 1)
# Tokens should be sequence twice, minus the EOS token at the end
for i, ts in enumerate(sa_seq):
self.assertEqual(ts.token, seq[i % 10].token)
# Weights should be len-1 0.0's and len 1.0's.
for i in range(len(seq) - 1):
self.assertEqual(sa_seq[i].weight, 0.0)
for i in range(len(seq) - 1, len(sa_seq)):
self.assertEqual(sa_seq[i].weight, 1.0)
# Labels should be len-1 0's, and then the sequence
for i in range(len(seq) - 1):
self.assertEqual(sa_seq[i].label, 0)
for i in range(len(seq) - 1, len(sa_seq)):
self.assertEqual(sa_seq[i].label, seq[i - (len(seq) - 1)].token)
def testBuildLabelSeq(self):
seq = self._buildDummySequence()
eos_id = len(seq) - 1
label_seq = data.build_labeled_sequence(seq, True)
for i, ts in enumerate(label_seq[:-1]):
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, 0)
self.assertEqual(ts.weight, 0.0)
final_timestep = label_seq[-1]
self.assertEqual(final_timestep.token, eos_id)
self.assertEqual(final_timestep.label, 1)
self.assertEqual(final_timestep.weight, 1.0)
def testBuildBidirLabelSeq(self):
seq = self._buildDummySequence()
reverse_seq = data.build_reverse_sequence(seq)
bidir_seq = data.build_bidirectional_seq(seq, reverse_seq)
label_seq = data.build_labeled_sequence(bidir_seq, True)
for (i, ts), j in zip(enumerate(label_seq[:-1]), reversed(range(len(seq) - 1))):
self.assertAllEqual(ts.tokens, [i, j])
self.assertEqual(ts.label, 0)
self.assertEqual(ts.weight, 0.0)
final_timestep = label_seq[-1]
eos_id = len(seq) - 1
self.assertAllEqual(final_timestep.tokens, [eos_id, eos_id])
self.assertEqual(final_timestep.label, 1)
self.assertEqual(final_timestep.weight, 1.0)
def testReverseSeq(self):
seq = self._buildDummySequence()
reverse_seq = data.build_reverse_sequence(seq)
for i, ts in enumerate(reversed(reverse_seq[:-1])):
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, 0)
self.assertEqual(ts.weight, 0.0)
final_timestep = reverse_seq[-1]
eos_id = len(seq) - 1
self.assertEqual(final_timestep.token, eos_id)
self.assertEqual(final_timestep.label, 0)
self.assertEqual(final_timestep.weight, 0.0)
def testBidirSeq(self):
seq = self._buildDummySequence()
reverse_seq = data.build_reverse_sequence(seq)
bidir_seq = data.build_bidirectional_seq(seq, reverse_seq)
for (i, ts), j in zip(enumerate(bidir_seq[:-1]), reversed(range(len(seq) - 1))):
self.assertAllEqual(ts.tokens, [i, j])
self.assertEqual(ts.label, 0)
self.assertEqual(ts.weight, 0.0)
final_timestep = bidir_seq[-1]
eos_id = len(seq) - 1
self.assertAllEqual(final_timestep.tokens, [eos_id, eos_id])
self.assertEqual(final_timestep.label, 0)
self.assertEqual(final_timestep.weight, 0.0)
def testLabelGain(self):
seq = self._buildDummySequence()
label_seq = data.build_labeled_sequence(seq, True, label_gain=True)
for i, ts in enumerate(label_seq):
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, 1)
self.assertNear(ts.weight, float(i) / (len(seq) - 1), 1e-3)
if __name__ == "__main__":
tf.test.main()
| 36.0625
| 88
| 0.608897
| 912
| 6,924
| 4.449561
| 0.135965
| 0.158945
| 0.096353
| 0.069
| 0.566289
| 0.517003
| 0.4793
| 0.453425
| 0.418679
| 0.383933
| 0
| 0.024405
| 0.272097
| 6,924
| 191
| 89
| 36.251309
| 0.780754
| 0.034806
| 0
| 0.37013
| 0
| 0
| 0.016175
| 0.003445
| 0
| 0
| 0
| 0
| 0.337662
| 1
| 0.084416
| false
| 0
| 0.032468
| 0
| 0.136364
| 0.006494
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22d0f53b1d93eab616a976b47567e50595d96288
| 3,546
|
py
|
Python
|
LipSDP/solve_sdp.py
|
revbucket/LipSDP
|
39f2ffe65cb656440e055e4e86a750bc7e77e357
|
[
"MIT"
] | 1
|
2021-07-21T12:19:01.000Z
|
2021-07-21T12:19:01.000Z
|
LipSDP/solve_sdp.py
|
revbucket/LipSDP
|
39f2ffe65cb656440e055e4e86a750bc7e77e357
|
[
"MIT"
] | null | null | null |
LipSDP/solve_sdp.py
|
revbucket/LipSDP
|
39f2ffe65cb656440e055e4e86a750bc7e77e357
|
[
"MIT"
] | null | null | null |
import argparse
import numpy as np
import matlab.engine
from scipy.io import savemat
import os
from time import time
def main(args):
start_time = time()
eng = matlab.engine.start_matlab()
eng.addpath(os.path.join(file_dir, 'matlab_engine'))
eng.addpath(os.path.join(file_dir, r'matlab_engine/weight_utils'))
eng.addpath(os.path.join(file_dir, r'matlab_engine/error_messages'))
eng.addpath(os.path.join(file_dir, r'examples/saved_weights'))
network = {
'alpha': matlab.double([args.alpha]),
'beta': matlab.double([args.beta]),
'weight_path': args.weight_path,
}
lip_params = {
'formulation': args.form,
'split': matlab.logical([args.split]),
'parallel': matlab.logical([args.parallel]),
'verbose': matlab.logical([args.verbose]),
'split_size': matlab.double([args.split_size]),
'num_neurons': matlab.double([args.num_neurons]),
'num_workers': matlab.double([args.num_workers]),
'num_dec_vars': matlab.double([args.num_decision_vars])
}
L = eng.solve_LipSDP(network, lip_params, nargout=1)
if lip_params['verbose']:
print(f'LipSDP-{args.form.capitalize()} gives a Lipschitz constant of %.03f' % L)
print('Total time %.03f' % (time() - start_time))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--form',
default='neuron',
const='neuron',
nargs='?',
choices=('neuron', 'network', 'layer', 'network-rand', 'network-dec-vars'),
help='LipSDP formulation to use')
parser.add_argument('-v', '--verbose',
action='store_true',
help='prints CVX output from solve if supplied')
parser.add_argument('--alpha',
type=float,
default=0,
nargs=1,
help='lower bound for slope restriction bound')
parser.add_argument('--beta',
type=float,
default=1,
nargs=1,
help='lower bound for slope restriction bound')
parser.add_argument('--num-neurons',
type=int,
default=100,
nargs=1,
help='number of neurons to couple for LipSDP-Network-rand formulation')
parser.add_argument('--split',
action='store_true',
help='splits network into subnetworks for more efficient solving if supplied')
parser.add_argument('--parallel',
action='store_true',
help='parallelizes solving for split formulations if supplied')
parser.add_argument('--split-size',
type=int,
default=2,
nargs=1,
help='number of layers in each subnetwork for splitting formulations')
parser.add_argument('--num-workers',
type=int,
default=0,
nargs=1,
help='number of workers for parallelization of splitting formulations')
parser.add_argument('--num-decision-vars',
type=int,
default=10,
nargs=1,
help='specify number of decision variables to be used for LipSDP')
parser.add_argument('--weight-path',
type=str,
required=True,
nargs=1,
help='path of weights corresponding to trained neural network model')
args = parser.parse_args()
if args.parallel is True and args.num_workers[0] < 1:
raise ValueError('When you use --parallel, --num-workers must be an integer >= 1.')
if args.split is True and args.split_size[0] < 1:
raise ValueError('When you use --split, --split-size must be an integer >= 1.')
main(args)
| 30.834783
| 91
| 0.631416
| 447
| 3,546
| 4.888143
| 0.306488
| 0.045309
| 0.085584
| 0.029291
| 0.263616
| 0.179863
| 0.142334
| 0.105263
| 0.092449
| 0.092449
| 0
| 0.009956
| 0.235195
| 3,546
| 114
| 92
| 31.105263
| 0.795723
| 0
| 0
| 0.222222
| 0
| 0
| 0.334179
| 0.030175
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011111
| false
| 0
| 0.066667
| 0
| 0.077778
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22d1e9715d6acd537e633072609ca037ec95ec12
| 805
|
py
|
Python
|
stockprophet/__init__.py
|
chihyi-liao/stockprophet
|
891c91b2a446e3bd30bb56b88be3874d7dda1b8d
|
[
"BSD-3-Clause"
] | 1
|
2021-11-15T13:07:19.000Z
|
2021-11-15T13:07:19.000Z
|
stockprophet/__init__.py
|
chihyi-liao/stockprophet
|
891c91b2a446e3bd30bb56b88be3874d7dda1b8d
|
[
"BSD-3-Clause"
] | null | null | null |
stockprophet/__init__.py
|
chihyi-liao/stockprophet
|
891c91b2a446e3bd30bb56b88be3874d7dda1b8d
|
[
"BSD-3-Clause"
] | 1
|
2021-09-15T09:25:39.000Z
|
2021-09-15T09:25:39.000Z
|
from stockprophet.cli import entry_point
from stockprophet.crawler import (
init_stock_type, init_stock_category
)
from stockprophet.db import init_db
from .utils import read_db_settings
def preprocessing() -> bool:
result = False
# noinspection PyBroadException
try:
db_config = read_db_settings()
if not db_config:
print("config.ini 找不到 'database' 區段")
return result
except Exception:
print("無法讀取或解析config.ini")
return result
# noinspection PyBroadException
try:
init_db(db_config)
init_stock_category()
init_stock_type()
result = True
except Exception as e:
print("無法連線資料庫: %s" % (str(e), ))
return result
def main():
if preprocessing():
entry_point()
| 22.361111
| 49
| 0.645963
| 92
| 805
| 5.445652
| 0.456522
| 0.071856
| 0.051896
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.277019
| 805
| 35
| 50
| 23
| 0.860825
| 0.073292
| 0
| 0.185185
| 0
| 0
| 0.07537
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.148148
| 0
| 0.333333
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22d23a29cb139320e7b38591cd284a89f2406142
| 475
|
py
|
Python
|
6/6.2.py
|
Hunter1753/adventofcode
|
962df52af01f6ab575e8f00eb2d1c1335dba5430
|
[
"CC0-1.0"
] | 1
|
2020-12-08T21:53:19.000Z
|
2020-12-08T21:53:19.000Z
|
6/6.2.py
|
Hunter1753/adventofcode
|
962df52af01f6ab575e8f00eb2d1c1335dba5430
|
[
"CC0-1.0"
] | null | null | null |
6/6.2.py
|
Hunter1753/adventofcode
|
962df52af01f6ab575e8f00eb2d1c1335dba5430
|
[
"CC0-1.0"
] | null | null | null |
def setIntersectionCount(group):
return len(set.intersection(*group))
groupList = []
tempGroup = []
with open("./6/input.txt") as inputFile:
for line in inputFile:
line = line.replace("\n","")
if len(line) > 0:
tempGroup.append(set(line))
else:
groupList.append(tempGroup)
tempGroup = []
if len(tempGroup) > 0:
groupList.append(tempGroup)
groupList = list(map(setIntersectionCount,groupList))
print("{} common options in groups".format(sum(groupList)))
| 25
| 59
| 0.703158
| 58
| 475
| 5.758621
| 0.568966
| 0.02994
| 0.143713
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007335
| 0.138947
| 475
| 19
| 59
| 25
| 0.809291
| 0
| 0
| 0.25
| 0
| 0
| 0.088235
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0
| 0.0625
| 0.125
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22d2adc9a61d389ca50d1c98a9058e597ec58a82
| 2,964
|
py
|
Python
|
demo/gpnas/CVPR2021_NAS_competition_gpnas_demo.py
|
ZichaoGuo/PaddleSlim
|
2550fb4ec86aee6155c1c8a2c9ab174e239918a3
|
[
"Apache-2.0"
] | 926
|
2019-12-16T05:06:56.000Z
|
2022-03-31T07:22:10.000Z
|
demo/gpnas/CVPR2021_NAS_competition_gpnas_demo.py
|
ZichaoGuo/PaddleSlim
|
2550fb4ec86aee6155c1c8a2c9ab174e239918a3
|
[
"Apache-2.0"
] | 327
|
2019-12-16T06:04:31.000Z
|
2022-03-30T11:08:18.000Z
|
demo/gpnas/CVPR2021_NAS_competition_gpnas_demo.py
|
ZichaoGuo/PaddleSlim
|
2550fb4ec86aee6155c1c8a2c9ab174e239918a3
|
[
"Apache-2.0"
] | 234
|
2019-12-16T03:12:08.000Z
|
2022-03-27T12:59:39.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import copy
import numpy as np
from paddleslim.nas import GPNAS
# 使用GP-NAS参加[CVPR 2021 NAS国际比赛](https://www.cvpr21-nas.com/competition) Track2 demo
# [CVPR 2021 NAS国际比赛Track2 studio地址](https://aistudio.baidu.com/aistudio/competition/detail/71?lang=en)
# [AI studio GP-NAS demo](https://aistudio.baidu.com/aistudio/projectdetail/1824958)
# demo 基于paddleslim自研NAS算法GP-NAS:Gaussian Process based Neural Architecture Search
# 基于本demo的改进版可以获得双倍奖金
def preprare_trainning_data(file_name, t_flag):
## t_flag ==1 using all trainning data
## t_flag ==2 using half trainning data
with open(file_name, 'r') as f:
arch_dict = json.load(f)
Y_all = []
X_all = []
for sub_dict in arch_dict.items():
Y_all.append(sub_dict[1]['acc'] * 100)
X_all.append(np.array(sub_dict[1]['arch']).T.reshape(4, 16)[2])
X_all, Y_all = np.array(X_all), np.array(Y_all)
X_train, Y_train, X_test, Y_test = X_all[0::t_flag], Y_all[
0::t_flag], X_all[1::t_flag], Y_all[1::t_flag]
return X_train, Y_train, X_test, Y_test
if __name__ == '__main__':
stage1_file = './datasets/Track2_stage1_trainning.json'
stage2_file = './datasets/Track2_stage2_few_show_trainning.json'
X_train_stage1, Y_train_stage1, X_test_stage1, Y_test_stage1 = preprare_trainning_data(
stage1_file, 1)
X_train_stage2, Y_train_stage2, X_test_stage2, Y_test_stage2 = preprare_trainning_data(
stage2_file, 2)
gpnas = GPNAS()
w = gpnas.get_initial_mean(X_test_stage1, Y_test_stage1)
init_cov = gpnas.get_initial_cov(X_train_stage1)
error_list = np.array(
Y_test_stage2.reshape(len(Y_test_stage2), 1) - gpnas.get_predict(
X_test_stage2))
print('RMSE trainning on stage1 testing on stage2:',
np.sqrt(np.dot(error_list.T, error_list) / len(error_list)))
gpnas.get_posterior_mean(X_train_stage2[0::3], Y_train_stage2[0::3])
gpnas.get_posterior_mean(X_train_stage2[1::3], Y_train_stage2[1::3])
gpnas.get_posterior_cov(X_train_stage2[1::3], Y_train_stage2[1::3])
error_list = np.array(
Y_test_stage2.reshape(len(Y_test_stage2), 1) - gpnas.get_predict_jiont(
X_test_stage2, X_train_stage2[::1], Y_train_stage2[::1]))
print('RMSE using stage1 as prior:',
np.sqrt(np.dot(error_list.T, error_list) / len(error_list)))
| 44.909091
| 103
| 0.721323
| 476
| 2,964
| 4.218487
| 0.340336
| 0.054781
| 0.035857
| 0.025896
| 0.233068
| 0.204183
| 0.182271
| 0.155378
| 0.133466
| 0.133466
| 0
| 0.039612
| 0.165317
| 2,964
| 65
| 104
| 45.6
| 0.772029
| 0.345816
| 0
| 0.102564
| 0
| 0
| 0.090292
| 0.045407
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.102564
| 0
| 0.153846
| 0.051282
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22d789885783516e44018b1a27dcbc9e0ec012e0
| 6,443
|
py
|
Python
|
pymemcache/client/retrying.py
|
liquidpele/pymemcache
|
0001f94a06b91078ed7b7708729ef0d1aaa73a68
|
[
"Apache-2.0"
] | null | null | null |
pymemcache/client/retrying.py
|
liquidpele/pymemcache
|
0001f94a06b91078ed7b7708729ef0d1aaa73a68
|
[
"Apache-2.0"
] | null | null | null |
pymemcache/client/retrying.py
|
liquidpele/pymemcache
|
0001f94a06b91078ed7b7708729ef0d1aaa73a68
|
[
"Apache-2.0"
] | null | null | null |
""" Module containing the RetryingClient wrapper class. """
from time import sleep
def _ensure_tuple_argument(argument_name, argument_value):
"""
Helper function to ensure the given arguments are tuples of Exceptions (or
subclasses), or can at least be converted to such.
Args:
argument_name: str, name of the argument we're checking, only used for
raising meaningful exceptions.
argument: any, the argument itself.
Returns:
tuple[Exception]: A tuple with the elements from the argument if they are
valid.
Exceptions:
ValueError: If the argument was not None, tuple or Iterable.
ValueError: If any of the elements of the argument is not a subclass of
Exception.
"""
# Ensure the argument is a tuple, set or list.
if argument_value is None:
return tuple()
elif not isinstance(argument_value, (tuple, set, list)):
raise ValueError("%s must be either a tuple, a set or a list." % argument_name)
# Convert the argument before checking contents.
argument_tuple = tuple(argument_value)
# Check that all the elements are actually inherited from Exception.
# (Catchable)
if not all([issubclass(arg, Exception) for arg in argument_tuple]):
raise ValueError(
"%s is only allowed to contain elements that are subclasses of "
"Exception." % argument_name
)
return argument_tuple
class RetryingClient(object):
"""
Client that allows retrying calls for the other clients.
"""
def __init__(
self, client, attempts=2, retry_delay=0, retry_for=None, do_not_retry_for=None
):
"""
Constructor for RetryingClient.
Args:
client: Client|PooledClient|HashClient, inner client to use for
performing actual work.
attempts: optional int, how many times to attempt an action before
failing. Must be 1 or above. Defaults to 2.
retry_delay: optional int|float, how many seconds to sleep between
each attempt.
Defaults to 0.
retry_for: optional None|tuple|set|list, what exceptions to
allow retries for. Will allow retries for all exceptions if None.
Example:
`(MemcacheClientError, MemcacheUnexpectedCloseError)`
Accepts any class that is a subclass of Exception.
Defaults to None.
do_not_retry_for: optional None|tuple|set|list, what
exceptions should be retried. Will not block retries for any
Exception if None.
Example:
`(IOError, MemcacheIllegalInputError)`
Accepts any class that is a subclass of Exception.
Defaults to None.
Exceptions:
ValueError: If `attempts` is not 1 or above.
ValueError: If `retry_for` or `do_not_retry_for` is not None, tuple or
Iterable.
ValueError: If any of the elements of `retry_for` or
`do_not_retry_for` is not a subclass of Exception.
ValueError: If there is any overlap between `retry_for` and
`do_not_retry_for`.
"""
if attempts < 1:
raise ValueError(
"`attempts` argument must be at least 1. "
"Otherwise no attempts are made."
)
self._client = client
self._attempts = attempts
self._retry_delay = retry_delay
self._retry_for = _ensure_tuple_argument("retry_for", retry_for)
self._do_not_retry_for = _ensure_tuple_argument(
"do_not_retry_for", do_not_retry_for
)
# Verify no overlap in the go/no-go exception collections.
for exc_class in self._retry_for:
if exc_class in self._do_not_retry_for:
raise ValueError(
'Exception class "%s" was present in both `retry_for` '
"and `do_not_retry_for`. Any exception class is only "
"allowed in a single argument." % repr(exc_class)
)
# Take dir from the client to speed up future checks.
self._client_dir = dir(self._client)
def _retry(self, name, func, *args, **kwargs):
"""
Workhorse function, handles retry logic.
Args:
name: str, Name of the function called.
func: callable, the function to retry.
*args: args, array arguments to pass to the function.
**kwargs: kwargs, keyword arguments to pass to the function.
"""
for attempt in range(self._attempts):
try:
result = func(*args, **kwargs)
return result
except Exception as exc:
# Raise the exception to caller if either is met:
# - We've used the last attempt.
# - self._retry_for is set, and we do not match.
# - self._do_not_retry_for is set, and we do match.
# - name is not actually a member of the client class.
if (
attempt >= self._attempts - 1
or (self._retry_for and not isinstance(exc, self._retry_for))
or (
self._do_not_retry_for
and isinstance(exc, self._do_not_retry_for)
)
or name not in self._client_dir
):
raise exc
# Sleep and try again.
sleep(self._retry_delay)
# This is the real magic soup of the class, we catch anything that isn't
# strictly defined for ourselves and pass it on to whatever client we've
# been given.
def __getattr__(self, name):
return lambda *args, **kwargs: self._retry(
name, self._client.__getattribute__(name), *args, **kwargs
)
# We implement these explicitly because they're "magic" functions and won't
# get passed on by __getattr__.
def __dir__(self):
return self._client_dir
# These magics are copied from the base client.
def __setitem__(self, key, value):
self.set(key, value, noreply=True)
def __getitem__(self, key):
value = self.get(key)
if value is None:
raise KeyError
return value
def __delitem__(self, key):
self.delete(key, noreply=True)
| 35.994413
| 87
| 0.60298
| 800
| 6,443
| 4.68375
| 0.26875
| 0.055511
| 0.034694
| 0.045103
| 0.193488
| 0.14625
| 0.119295
| 0.09581
| 0.09581
| 0.057112
| 0
| 0.002091
| 0.331833
| 6,443
| 178
| 88
| 36.196629
| 0.868293
| 0.465622
| 0
| 0.072464
| 0
| 0
| 0.114125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115942
| false
| 0
| 0.014493
| 0.028986
| 0.231884
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22d92edfa8963f3c42a5dc829d7d8e2eae0773ab
| 461
|
py
|
Python
|
8.1.py
|
HuaichenOvO/EIE3280HW
|
e1424abb8baf715a4e9372e2ca6b0bed1e62f3d6
|
[
"MIT"
] | null | null | null |
8.1.py
|
HuaichenOvO/EIE3280HW
|
e1424abb8baf715a4e9372e2ca6b0bed1e62f3d6
|
[
"MIT"
] | null | null | null |
8.1.py
|
HuaichenOvO/EIE3280HW
|
e1424abb8baf715a4e9372e2ca6b0bed1e62f3d6
|
[
"MIT"
] | null | null | null |
import numpy as np
import numpy.linalg as lg
A_mat = np.matrix([
[0, 1, 1, 1, 0],
[1, 0, 0, 0, 1],
[1, 0, 0, 1, 1],
[1, 0, 1, 0, 1],
[0, 1, 1, 1, 0]
])
eigen = lg.eig(A_mat) # return Arr[5] with 5 different linear independent eigen values
vec = eigen[1][:, 0] # the column (eigen vector) with the largest eigen value
value = eigen[0][0] # the largest eigen value
print(vec)
print(A_mat * vec)
print(value * vec)
| 20.043478
| 87
| 0.566161
| 83
| 461
| 3.108434
| 0.337349
| 0.062016
| 0.046512
| 0.046512
| 0.077519
| 0.054264
| 0.054264
| 0
| 0
| 0
| 0
| 0.093093
| 0.277657
| 461
| 22
| 88
| 20.954545
| 0.681682
| 0.305857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22dbcb72dc9b6914e75bad92c8d92d61083088a7
| 6,145
|
py
|
Python
|
src/automata_learning_with_policybank/Traces.py
|
logic-and-learning/AdvisoRL
|
3bbd741e681e6ea72562fec142d54e9d781d097d
|
[
"MIT"
] | 4
|
2021-02-04T17:33:07.000Z
|
2022-01-24T10:29:39.000Z
|
src/automata_learning_with_policybank/Traces.py
|
logic-and-learning/AdvisoRL
|
3bbd741e681e6ea72562fec142d54e9d781d097d
|
[
"MIT"
] | null | null | null |
src/automata_learning_with_policybank/Traces.py
|
logic-and-learning/AdvisoRL
|
3bbd741e681e6ea72562fec142d54e9d781d097d
|
[
"MIT"
] | null | null | null |
import os
class Traces:
def __init__(self, positive = set(), negative = set()):
self.positive = positive
self.negative = negative
"""
IG: at the moment we are adding a trace only if it ends up in an event.
should we be more restrictive, e.g. consider xxx, the same as xxxxxxxxxx (where x is an empty event '')
recent suggestion (from the meeting): ignore empty events altogether and don't consider them as events at all (neither for
execution, nor for learning)
"""
def _should_add(self, trace, i):
prefixTrace = trace[:i]
if not prefixTrace[-1] == '':
return True
else:
return False
def _get_prefixes(self, trace, up_to_limit = None):
if up_to_limit is None:
up_to_limit = len(trace)
all_prefixes = set()
for i in range(1, up_to_limit+1):
if self._should_add(trace, i):
all_prefixes.add(trace[:i])
return all_prefixes
def symbol_to_trace(self,symbols):
letters = ['a','b','c','d','e','f','g', 'h', 'n']
numbers = [int(i) for i in range(0,9)]
dictionary = dict(zip(letters, numbers))
traces = list()
for symbol in symbols:
traces.append(dictionary.get(symbol))
return tuple(traces)
def trace_to_symbol(self,traces):
letters = ['a','b','c','d','e','f','g', 'h', 'n']
numbers = [int(i) for i in range(0,9)]
dictionary = dict(zip(numbers, letters))
symbols = list()
for trace in traces:
symbols.append(dictionary.get(trace))
return tuple(traces)
def rm_trace_to_symbol(self,rm_file):
file = rm_file
letters = ['a','b','c','d','e','f','g', 'h', 'n']
numbers = [int(i) for i in range(0,9)]
dictionary = dict(zip(numbers, letters))
with open(file) as f:
content = f.readlines()
lines = []
for line in content:
end = 0
begin = 1 #initialize values based on what won't enter the loops; initial values irrelevant
number = 0 #random, had to initialize
if line != content[0]:
number = str()
check = 0
count=0
for character in line:
if ((check==1) & (character=="'")): #looks for second quotation
check = 10 #end search
end = count-1
elif (character == "'"): #looks for first quotation
check = 1
begin = count+1
elif (check==1):
number += character
count = count+1
symbol = dictionary.get(int(number))
#symbol = symbol + '&!n'
line = list(line) #necessary for use of pop,insert
if end==begin+1:
line.pop(end)
line.pop(begin)
line.insert(begin,symbol)
elif end==begin:
line.pop(begin)
line.insert(begin,symbol)
lines.append(line)
with open(rm_file, 'w') as f:
for line in lines:
for item in line:
f.write(str(item))
def fix_rmfiles(self,rmfile):
file = rmfile
with open(file) as f:
content = f.readlines()
final_state = str()
for line in content:
if line != content[0]:
brackets = 0
commas = 0
state = str()
next_state = str()
for character in line:
if (character == "(") & (brackets == 0):
brackets = 1
elif brackets == 1:
if character == "(":
brackets = 2
elif brackets == 2:
if character == "1":
final_state = next_state
print(final_state)
if ((commas == 0) & (brackets == 1)):
if character == ",":
commas = 1
else:
state += character
elif ((commas == 1) & (brackets == 1)):
if character == ",":
commas = 2
else:
next_state += character
# with open(rmfile, 'w') as f:
# for line in content:
# for item in line:
# f.write(str(item))
# f.write("\n")
# writethis = "(" + str(final_state) + "," + str(final_state) + ",'True',ConstantRewardFunction(0))"
# f.write(writethis)
"""
when adding a trace, it additionally adds all prefixes as negative traces
"""
def add_trace(self, trace, reward, learned):
trace = tuple(trace)
if reward > 0:
self.positive.add(trace)
# | is a set union operator
#if learned==0:
self.negative |= self._get_prefixes(trace, len(trace)-1)
else:
#if learned == 0:
self.negative |= self._get_prefixes(trace)
# else:
# self.negative.add(trace)
def export_traces(self, filename):
parent_path = os.path.dirname(filename)
os.makedirs(parent_path,exist_ok=True)
with open(filename, "w") as output_file:
output_file.write("POSITIVE:")
for trace in self.positive:
output_file.write("\n")
string_repr = [str(el) for el in trace]
output_file.write(','.join(string_repr))
output_file.write("\nNEGATIVE:")
for trace in self.negative:
output_file.write("\n")
string_repr = [str(el) for el in trace]
output_file.write(','.join(string_repr))
def __repr__(self):
return repr(self.positive) + "\n\n" + repr(self.negative)
| 36.577381
| 127
| 0.479414
| 687
| 6,145
| 4.195051
| 0.245997
| 0.024289
| 0.031228
| 0.015267
| 0.252255
| 0.220333
| 0.211312
| 0.188411
| 0.148161
| 0.119015
| 0
| 0.012098
| 0.408137
| 6,145
| 167
| 128
| 36.796407
| 0.780313
| 0.091782
| 0
| 0.27907
| 0
| 0
| 0.012908
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.077519
| false
| 0
| 0.007752
| 0.007752
| 0.139535
| 0.007752
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22dbf84787aba6cdbf21c855e5dcbb4cff617bd6
| 1,758
|
py
|
Python
|
example/comp/urls.py
|
edwilding/django-comments-xtd
|
c3a335b6345b52c75cce69c66b7cf0ef72439d35
|
[
"BSD-2-Clause"
] | null | null | null |
example/comp/urls.py
|
edwilding/django-comments-xtd
|
c3a335b6345b52c75cce69c66b7cf0ef72439d35
|
[
"BSD-2-Clause"
] | null | null | null |
example/comp/urls.py
|
edwilding/django-comments-xtd
|
c3a335b6345b52c75cce69c66b7cf0ef72439d35
|
[
"BSD-2-Clause"
] | 1
|
2021-06-01T20:35:25.000Z
|
2021-06-01T20:35:25.000Z
|
import django
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
if django.VERSION[:2] > (1, 9):
from django.views.i18n import JavaScriptCatalog
else:
from django.views.i18n import javascript_catalog
from django_comments_xtd import LatestCommentFeed
from django_comments_xtd.views import XtdCommentListView
from comp import views
admin.autodiscover()
urlpatterns = [
url(r'^$', views.HomepageView.as_view(), name='homepage'),
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^admin/', include(admin.site.urls)),
url(r'^articles/', include('comp.articles.urls')),
url(r'^quotes/', include('comp.quotes.urls')),
url(r'^comments/', include('django_comments_xtd.urls')),
url(r'^comments/$', XtdCommentListView.as_view(
content_types=["articles.article", "quotes.quote"],
paginate_by=10, page_range=5),
name='comments-xtd-list'),
url(r'^feeds/comments/$', LatestCommentFeed(), name='comments-feed'),
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework')),
]
if django.VERSION[:2] > (1, 9):
urlpatterns.append(
url(r'^jsi18n/$', JavaScriptCatalog.as_view(),
name='javascript-catalog')
)
else:
js_info_dict = {
'packages': ('django_comments_xtd',)
}
urlpatterns.append(
url(r'^jsi18n/$', javascript_catalog, js_info_dict,
name='javascript-catalog')
)
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
if 'rosetta' in settings.INSTALLED_APPS:
urlpatterns += [url(r'^rosetta/', include('rosetta.urls'))]
| 31.392857
| 77
| 0.67463
| 210
| 1,758
| 5.528571
| 0.319048
| 0.041344
| 0.05857
| 0.027562
| 0.120586
| 0.031008
| 0
| 0
| 0
| 0
| 0
| 0.014493
| 0.175768
| 1,758
| 55
| 78
| 31.963636
| 0.786749
| 0
| 0
| 0.177778
| 0
| 0
| 0.209329
| 0.025597
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22df9e5579ccb8577b1f37196d5e862a47aa496e
| 1,026
|
py
|
Python
|
tests/basic/test_basic.py
|
kopp/python-astar
|
642dd4bcef9829776614dc0f12681ac94634a3bc
|
[
"BSD-3-Clause"
] | 133
|
2017-05-05T03:40:13.000Z
|
2022-03-30T06:37:23.000Z
|
src/test/basic/basic.py
|
ReznicencuBogdan/python-astar
|
48d1caedd6e839c51315555f85ced567f7f166a7
|
[
"BSD-3-Clause"
] | 6
|
2019-01-17T20:46:34.000Z
|
2021-12-23T22:59:57.000Z
|
src/test/basic/basic.py
|
ReznicencuBogdan/python-astar
|
48d1caedd6e839c51315555f85ced567f7f166a7
|
[
"BSD-3-Clause"
] | 61
|
2017-03-17T14:05:34.000Z
|
2022-02-18T21:27:40.000Z
|
import unittest
import astar
class BasicTests(unittest.TestCase):
def test_bestpath(self):
"""ensure that we take the shortest path, and not the path with less elements.
the path with less elements is A -> B with a distance of 100
the shortest path is A -> C -> D -> B with a distance of 60
"""
nodes = {'A': [('B', 100), ('C', 20)],
'C': [('D', 20)], 'D': [('B', 20)]}
def neighbors(n):
for n1, d in nodes[n]:
yield n1
def distance(n1, n2):
for n, d in nodes[n1]:
if n == n2:
return d
def cost(n, goal):
return 1
path = list(astar.find_path('A', 'B', neighbors_fnct=neighbors,
heuristic_cost_estimate_fnct=cost, distance_between_fnct=distance))
self.assertEqual(4, len(path))
for i, n in enumerate('ACDB'):
self.assertEqual(n, path[i])
if __name__ == '__main__':
unittest.main()
| 30.176471
| 87
| 0.522417
| 136
| 1,026
| 3.823529
| 0.433824
| 0.011538
| 0.057692
| 0.057692
| 0.15
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033083
| 0.351852
| 1,026
| 33
| 88
| 31.090909
| 0.748872
| 0.191033
| 0
| 0
| 0
| 0
| 0.026482
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.181818
| false
| 0
| 0.090909
| 0.045455
| 0.409091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22e090fdaf1d3e3871f2d87d1370e0c27a711e78
| 2,623
|
py
|
Python
|
potions.py
|
abdza/skyrim_formulas
|
bf6be3c82715cfde89810d6e6183c95a55a4414c
|
[
"MIT"
] | null | null | null |
potions.py
|
abdza/skyrim_formulas
|
bf6be3c82715cfde89810d6e6183c95a55a4414c
|
[
"MIT"
] | null | null | null |
potions.py
|
abdza/skyrim_formulas
|
bf6be3c82715cfde89810d6e6183c95a55a4414c
|
[
"MIT"
] | null | null | null |
#!/bin/env python3
import csv
def intersect(list1,list2):
list3 = [ value for value in list1 if value in list2]
return list3
def category(list1,effects):
cat = 'Good'
good = 0
bad = 0
for ing in list1:
if effects[ing]=='Good':
good += 1
else:
bad += 1
if bad==0:
return 'Potion'
elif good==0:
return 'Poison'
else:
return 'Downside'
effects = {}
ingredients = {}
print("Formulating formulas")
with open('ingredients.csv') as csvfile:
aff = csv.reader(csvfile, delimiter=',')
for row in aff:
if row[0] not in effects.keys():
effects[row[0]] = row[1]
with open('skyrim-ingredients.csv', newline='') as csvfile:
ingre = csv.reader(csvfile, delimiter=',')
for row in ingre:
if row[0] not in ingredients.keys():
ingredients[row[0]] = [row[1],row[2],row[3],row[4]]
multieffects = {}
for ce in effects:
curing = []
for ing in ingredients:
if ce in ingredients[ing]:
curing.append(ing)
for k,curi in enumerate(curing):
for i in range(k+1,len(curing)):
cureff = intersect(ingredients[curi],ingredients[curing[i]])
cureff.sort()
if len(cureff)>1:
if curi>curing[i]:
curname = curing[i] + ':' + curi
else:
curname = curi + ':' + curing[i]
multieffects[curname] = cureff
finallist = {}
for me in multieffects:
curing = me.split(":")
for ing in ingredients:
if ing!=curing[0] and ing!=curing[1]:
eff1 = intersect(ingredients[curing[0]],ingredients[ing])
eff2 = intersect(ingredients[curing[1]],ingredients[ing])
if len(eff1)>0 or len(eff2)>0:
tmpname = [ val for val in curing ]
tmpname.append(ing)
tmpname.sort()
finalname = ":".join(tmpname)
finallist[finalname] = list(set(multieffects[me] + eff1 + eff2))
finallist[finalname].sort()
with open('formulas.csv',mode='w') as formula_file:
formula_writer = csv.writer(formula_file, delimiter=',')
formula_writer.writerow(['Category','Ingredient 1','Ingredient 2','Ingredient 3','Effect 1','Effect 2','Effect 3','Effect 4','Effect 5'])
for fl in finallist:
formula_writer.writerow([category(finallist[fl],effects)] + fl.split(":") + finallist[fl])
for fl in multieffects:
formula_writer.writerow([category(multieffects[fl],effects)] + fl.split(":") + [''] + multieffects[fl])
| 31.60241
| 141
| 0.569577
| 318
| 2,623
| 4.679245
| 0.264151
| 0.010753
| 0.016129
| 0.058468
| 0.087366
| 0.044355
| 0.044355
| 0
| 0
| 0
| 0
| 0.024638
| 0.28822
| 2,623
| 82
| 142
| 31.987805
| 0.772362
| 0.006481
| 0
| 0.072464
| 0
| 0
| 0.073321
| 0.008445
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028986
| false
| 0
| 0.014493
| 0
| 0.101449
| 0.014493
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22e2114d0da96fc447264d248b0ab2d8a5d86656
| 3,469
|
py
|
Python
|
Tests/Methods/Mesh/Interpolation/test_interpolation.py
|
harshasunder-1/pyleecan
|
32ae60f98b314848eb9b385e3652d7fc50a77420
|
[
"Apache-2.0"
] | 2
|
2020-08-28T14:54:55.000Z
|
2021-03-13T19:34:45.000Z
|
Tests/Methods/Mesh/Interpolation/test_interpolation.py
|
harshasunder-1/pyleecan
|
32ae60f98b314848eb9b385e3652d7fc50a77420
|
[
"Apache-2.0"
] | null | null | null |
Tests/Methods/Mesh/Interpolation/test_interpolation.py
|
harshasunder-1/pyleecan
|
32ae60f98b314848eb9b385e3652d7fc50a77420
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from unittest import TestCase
from pyleecan.Classes.CellMat import CellMat
from pyleecan.Classes.MeshSolution import MeshSolution
from pyleecan.Classes.PointMat import PointMat
from pyleecan.Classes.MeshMat import MeshMat
from pyleecan.Classes.ScalarProductL2 import ScalarProductL2
from pyleecan.Classes.Interpolation import Interpolation
from pyleecan.Classes.RefSegmentP1 import RefSegmentP1
from pyleecan.Classes.FPGNSeg import FPGNSeg
@pytest.mark.MeshSol
class unittest_real_points(TestCase):
""" Tests for interpolation method"""
def test_line(self):
DELTA = 1e-10
mesh = MeshMat()
mesh.cell["line"] = CellMat(nb_pt_per_cell=2)
mesh.point = PointMat()
mesh.point.add_point(np.array([0, 0]))
mesh.point.add_point(np.array([1, 0]))
mesh.point.add_point(np.array([0, 1]))
mesh.point.add_point(np.array([2, 3]))
mesh.point.add_point(np.array([3, 3]))
mesh.add_cell(np.array([0, 1]), "line")
mesh.add_cell(np.array([0, 2]), "line")
mesh.add_cell(np.array([1, 2]), "line")
c_line = mesh.cell["line"]
c_line.interpolation = Interpolation()
c_line.interpolation.ref_cell = RefSegmentP1()
c_line.interpolation.scalar_product = ScalarProductL2()
c_line.interpolation.gauss_point = FPGNSeg()
meshsol = MeshSolution()
meshsol.mesh = [mesh]
vert = mesh.get_vertice(0)["line"]
test_pt = np.array([0.7, 0])
test_field = np.array([1, 1])
sol = [1]
func = c_line.interpolation.ref_cell.interpolation(test_pt, vert, test_field)
testA = np.sum(abs(func - sol))
msg = "Wrong result: returned " + str(func) + ", expected: " + str(test_field)
self.assertAlmostEqual(testA, 0, msg=msg, delta=DELTA)
vert = mesh.get_vertice(0)["line"]
test_pt = np.array([0.7, 0])
test_field = np.ones(
(2, 120, 3)
) # Simulate a 3D vector field for 120 time step
func = c_line.interpolation.ref_cell.interpolation(test_pt, vert, test_field)
sol = np.ones((120, 3))
testA = np.sum(abs(func - sol))
msg = "Wrong result: returned " + str(func) + ", expected: " + str(sol)
self.assertAlmostEqual(testA, 0, msg=msg, delta=DELTA)
vert = mesh.get_vertice(2)["line"]
test_pt = np.array([0.6, 0.4])
test_field = np.zeros((2, 120, 3))
test_field[0, :] = np.ones(
(1, 120, 3)
) # Simulate a 3D vector field for 120 time step
func = c_line.interpolation.ref_cell.interpolation(test_pt, vert, test_field)
sol = 0.6 * np.ones((120, 3))
testA = np.sum(abs(sol - func))
msg = "Wrong result: returned " + str(func) + ", expected: " + str(sol)
self.assertAlmostEqual(testA, 0, msg=msg, delta=DELTA)
vert = mesh.get_vertice(1)["line"]
test_pt = np.array([0, 0.4])
test_field = np.zeros((2, 120, 3))
test_field[1, :] = np.ones(
(1, 120, 3)
) # Simulate a 3D vector field for 120 time step
func = c_line.interpolation.ref_cell.interpolation(test_pt, vert, test_field)
sol = 0.4 * np.ones((120, 3))
testA = np.sum(abs(sol - func))
msg = "Wrong result: returned " + str(func) + ", expected: " + str(sol)
self.assertAlmostEqual(testA, 0, msg=msg, delta=DELTA)
| 38.544444
| 86
| 0.618622
| 475
| 3,469
| 4.406316
| 0.170526
| 0.043478
| 0.072623
| 0.040612
| 0.615862
| 0.603918
| 0.533206
| 0.496894
| 0.492117
| 0.492117
| 0
| 0.040381
| 0.243298
| 3,469
| 89
| 87
| 38.977528
| 0.756952
| 0.054194
| 0
| 0.315068
| 0
| 0
| 0.053806
| 0
| 0
| 0
| 0
| 0
| 0.054795
| 1
| 0.013699
| false
| 0
| 0.150685
| 0
| 0.178082
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22e2925cc3811ca52e0058f9e3c1868295f2875f
| 13,863
|
py
|
Python
|
lib/models.py
|
ecarg/grace
|
8c1540116c07648f7d8852ee5e9edff33b6ae2f6
|
[
"BSD-2-Clause"
] | 7
|
2017-11-20T03:30:46.000Z
|
2021-06-10T15:33:07.000Z
|
lib/models.py
|
ecarg/grace
|
8c1540116c07648f7d8852ee5e9edff33b6ae2f6
|
[
"BSD-2-Clause"
] | 47
|
2017-09-08T07:02:42.000Z
|
2017-11-04T13:50:50.000Z
|
lib/models.py
|
ecarg/grace
|
8c1540116c07648f7d8852ee5e9edff33b6ae2f6
|
[
"BSD-2-Clause"
] | 2
|
2018-10-19T05:05:23.000Z
|
2019-10-31T06:27:24.000Z
|
# -*- coding: utf-8 -*-
"""
Pytorch models
__author__ = 'Jamie (krikit@naver.com)'
__copyright__ = 'No copyright. Just copyleft!'
"""
# pylint: disable=no-member
# pylint: disable=invalid-name
###########
# imports #
###########
import torch
import torch.nn as nn
from embedder import Embedder
from pos_models import PosTagger, FnnTagger, CnnTagger # pylint: disable=unused-import
#############
# Ner Class #
#############
class Ner(nn.Module):
"""
named entity recognizer pytorch model
"""
def __init__(self, embedder, encoder, decoder):
"""
* embedder (Embedder)
[sentence_len, context_len] => [sentence_len, context_len, embed_dim]
* encoder (nn.Module)
[sentence_len, context_len, embed_dim] => [sentence_len, hidden_dim]
* decoder (nn.Module)
[sentence_len, hidden_dim] => [sentence_len, n_tags],
"""
super().__init__()
self.embedder = embedder
self.encoder = encoder
self.decoder = decoder
assert isinstance(embedder, Embedder)
assert isinstance(encoder, nn.Module)
assert isinstance(decoder, nn.Module)
def forward(self, sentence, gazet, pos, words): #pylint: disable=arguments-differ
# [sentence_len, context_len] => [sentence_len, context_len, embed_dim]
sentence_embed = self.embedder(sentence, gazet, pos, words)
# [sentence_len, context_len, embed_dim] => [sentence_len, hidden_dim]
hidden = self.encoder(sentence_embed)
# [sentence_len, hidden_dim] => [sentence_len, n_tags]
predicted_tags = self.decoder(hidden)
return predicted_tags
def save(self, path):
"""
모델을 저장하는 메소드
:param path: 경로
"""
if torch.cuda.is_available():
self.cpu()
torch.save(self, str(path))
if torch.cuda.is_available():
self.cuda()
@classmethod
def load(cls, path):
"""
저장된 모델을 로드하는 메소드
:param path: 경로
:return: 모델 클래스 객체
"""
model = torch.load(str(path))
if torch.cuda.is_available():
model.cuda()
return model
#################
# Encoder Class #
#################
class Fnn5(nn.Module):
"""
2-Layer Full-Connected Neural Networks
"""
def __init__(self, context_len=21, in_dim=50, hidden_dim=500):
super(Fnn5, self).__init__()
self.context_len = context_len
self.hidden_dim = hidden_dim
self.out_dim = hidden_dim
self.net = nn.Sequential(
nn.Linear(context_len*in_dim, hidden_dim),
)
def forward(self, x):#pylint: disable=arguments-differ
"""
Args:
x: [sentence_len, context_len, in_dim]
Return:
x: [sentence_len, out_dim]
"""
sentence_len = x.size(0)
x = x.view(sentence_len, -1) # [sentence_len, context_len x in_dim]
x = self.net(x) # [setence_len, out_dim]
return x
class Cnn7(nn.Module):
"""
ConvNet kernels=[2,3,4,5] + Fully-Connected
"""
def __init__(self, in_dim=50, hidden_dim=500):
"""
"""
super(Cnn7, self).__init__()
self.in_dim = in_dim
self.hidden_dim = hidden_dim
self.out_dim = in_dim * 4
self.conv2 = nn.Sequential(
nn.Conv1d(in_dim, in_dim, kernel_size=2), # 20
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 10
nn.Conv1d(in_dim, in_dim, kernel_size=2), # 9
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 5
nn.Conv1d(in_dim, in_dim, kernel_size=2), # 4
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 2
nn.Conv1d(in_dim, in_dim, kernel_size=2), # 1
)
self.conv3 = nn.Sequential(
nn.Conv1d(in_dim, in_dim, kernel_size=3, padding=1), # 21
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 11
nn.Conv1d(in_dim, in_dim, kernel_size=3, padding=1), # 11
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 6
nn.Conv1d(in_dim, in_dim, kernel_size=3, padding=1), # 6
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 3
nn.Conv1d(in_dim, in_dim, kernel_size=3), # 1
)
self.conv4 = nn.Sequential(
nn.Conv1d(in_dim, in_dim, kernel_size=4, padding=1), # 20
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 10
nn.Conv1d(in_dim, in_dim, kernel_size=4, padding=1), # 9
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 5
nn.Conv1d(in_dim, in_dim, kernel_size=4, padding=1), # 4
nn.ReLU(),
nn.Conv1d(in_dim, in_dim, kernel_size=4), # 1
)
self.conv5 = nn.Sequential(
nn.Conv1d(in_dim, in_dim, kernel_size=5, padding=2), # 21
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 11
nn.Conv1d(in_dim, in_dim, kernel_size=5, padding=2), # 11
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 6
nn.Conv1d(in_dim, in_dim, kernel_size=5, padding=2), # 6
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 3
nn.Conv1d(in_dim, in_dim, kernel_size=5, padding=1), # 1
)
def forward(self, x): #pylint: disable=arguments-differ
"""
Args:
x: [sentence_length, context_len, in_dim]
Return:
x: [sentence_length, in_dim * 4]
"""
# [sentence_length, in_dim, context_len]
x = x.transpose(1, 2)
conv2 = self.conv2(x).squeeze(-1) # [sentence_len, in_dim]
conv3 = self.conv3(x).squeeze(-1) # [sentence_len, in_dim]
conv4 = self.conv4(x).squeeze(-1) # [sentence_len, in_dim]
conv5 = self.conv5(x).squeeze(-1) # [sentence_len, in_dim]
# [sentence_len, in_dim * 4]
out = torch.cat([conv2, conv3, conv4, conv5], dim=1)
return out
class Cnn8(nn.Module):
"""
9-layer Conv NN + Batch Norm + Residual
"""
def __init__(self, context_len=21, in_dim=64, hidden_dim=None):
super(Cnn8, self).__init__()
self.context_len = context_len
# conv block 64
self.conv_block1_1 = self.conv_block(in_dim, 2, False)
self.conv_block1_2_1 = self.conv_block(in_dim, 1, False)
self.conv_block1_2_2 = self.conv_block(in_dim, 1, True)
self.pool1 = nn.MaxPool1d(kernel_size=2, padding=1, ceil_mode=True)
# conv block 128
self.conv_block2_1 = self.conv_block(in_dim*2, 2, False)
self.conv_block2_2_1 = self.conv_block(in_dim*2, 1, False)
self.conv_block2_2_2 = self.conv_block(in_dim*2, 1, True)
self.pool2 = nn.MaxPool1d(kernel_size=2, padding=1, ceil_mode=True)
# conv block 256
self.conv_block3_1 = self.conv_block(in_dim*4, 2, False)
self.conv_block3_2_1 = self.conv_block(in_dim*4, 1, False)
self.conv_block3_2_2 = self.conv_block(in_dim*4, 1, True)
self.pool3 = nn.MaxPool1d(kernel_size=2)
# conv block 512
self.conv_block4_1 = self.conv_block(in_dim*8, 2, False)
self.conv_block4_2_1 = self.conv_block(in_dim*8, 1, False)
self.conv_block4_2_2 = self.conv_block(in_dim*8, 1, True)
self.pool4 = nn.MaxPool1d(kernel_size=3)
self.out_dim = in_dim*16
@classmethod
def conv_block(cls, in_dim=64, depth=2, double=True):
"""
Args:
[batch_size, dim, length]
Return:
[batch_size, dim*2, length] if double=True
[batch_size, dim, length] if double=False
"""
out_dim = in_dim
layers = []
for i in range(depth):
if double:
if i == depth - 1:
out_dim = in_dim * 2
layers.append(nn.Conv1d(in_dim, out_dim, kernel_size=3, padding=1))
layers.append(nn.BatchNorm1d(out_dim))
layers.append(nn.ReLU())
return nn.Sequential(*layers)
def forward(self, sentence):#pylint: disable=arguments-differ
"""
Args:
sentence: [sentence_len, context_len, embed_dim]
Return:
logit: [batch_size, out_dim]
"""
# [sentence_len, embed_dim, context_len]
x = sentence.transpose(1, 2)
# conv block 64
x = self.conv_block1_1(x) + x # [batch, in_dim, 21]
x = self.conv_block1_2_1(x) + x # [batch, in_dim, 21]
x = self.conv_block1_2_2(x) # [batch, in_dim*2, 21]
x = self.pool1(x) # [batch, in_dim*2, 11]
# conv block 128
x = self.conv_block2_1(x) + x # [batch, in_dim*2, 11]
x = self.conv_block2_2_1(x) + x # [batch, in_dim*2, 11]
x = self.conv_block2_2_2(x) # [batch, in_dim*4, 11]
x = self.pool2(x) # [batch, in_dim*4, 6]
# conv block 256
x = self.conv_block3_1(x) + x # [batch, in_dim*4, 6]
x = self.conv_block3_2_1(x) + x # [batch, in_dim*4, 6]
x = self.conv_block3_2_2(x) # [batch, in_dim*8, 6]
x = self.pool3(x) # [batch, in_dim*8, 3]
# conv block 512
x = self.conv_block4_1(x) + x # [batch, in_dim*8, 3]
x = self.conv_block4_2_1(x) + x # [batch, in_dim*8, 3]
x = self.conv_block4_2_2(x) # [batch, in_dim*16, 3]
x = self.pool4(x) # [batch_size, in_dim*16, 1]
x = x.squeeze(-1) # [batch, in_dim*16]
return x
class RnnEncoder(nn.Module):
"""
RNN Encoder Module
"""
def __init__(self, context_len=21, in_dim=1024, out_dim=1024,
num_layers=2, cell='gru'):
super(RnnEncoder, self).__init__()
self.hidden_dim = out_dim // 2
if cell == 'gru':
self.rnn = nn.GRU(
input_size=in_dim,
hidden_size=self.hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
if cell == 'lstm':
self.rnn = nn.LSTM(
input_size=in_dim,
hidden_size=self.hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
elif cell == 'sru':
from sru import SRU
self.rnn = SRU(
input_size=in_dim,
hidden_size=self.hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
def forward(self, x):#pylint: disable=arguments-differ
"""
Args:
x: [sentence_len, context_len, input_size]
Return:
x: [sentence_len, hidden_size]
"""
# input (seq_len, batch, input_size)
# h_0 (num_layers * num_directions, batch, hidden_size)
# output (seq_len, batch, hidden_size * num_directions)
# h_n (num_layers * num_directions, batch, hidden_size)
# [sequence_len, context_len, input_size]
# =>[sentence_len, context_len, hidden_size x 2]
x, _ = self.rnn(x)
# [sequence_len, hidden_size x 2]
x = x[:, 10, :]
return x
#################
# Decoder Class #
#################
class FCDecoder(nn.Module):
"""
Fully-Connected Decoder
"""
def __init__(self, in_dim, hidden_dim, n_tags):
super(FCDecoder, self).__init__()
self.net = nn.Sequential(
nn.ReLU(),
nn.Dropout(),
nn.Linear(in_dim, n_tags)
)
def forward(self, x):#pylint: disable=arguments-differ
"""
[sentence_len, in_dim] => [sentence_len, n_tags]
"""
return self.net(x)
class RnnDecoder(nn.Module):
"""
RNN-based Decoder
"""
def __init__(self, in_dim=1024, hidden_dim=512, n_tags=11,
num_layers=2, cell='gru'):
super(RnnDecoder, self).__init__()
if cell == 'gru':
self.rnn = nn.GRU(
input_size=in_dim,
hidden_size=hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
if cell == 'lstm':
self.rnn = nn.LSTM(
input_size=in_dim,
hidden_size=hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
elif cell == 'sru':
from sru import SRU
self.rnn = SRU(
input_size=in_dim,
hidden_size=hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
self.out = nn.Sequential(
nn.ReLU(),
nn.Dropout(),
nn.Linear(hidden_dim * 2, n_tags)
)
def forward(self, x):#pylint: disable=arguments-differ
"""
[sentence_len, in_dim] => [sentence_len, n_tags]
"""
# input (seq_len, batch, input_size)
# h_0 (num_layers * num_directions, batch, hidden_size)
# output (seq_len, batch, hidden_size * num_directions)
# h_n (num_layers * num_directions, batch, hidden_size)
# [sentence_len, batch=1, input_size]
x = x.unsqueeze(1)
# x: [sentence_len, batch=1, hidden_size x 2]
# h_n: [num_layers * 2, batch=1, hidden_size]
# c_n: [num_layers * 2, batch=1, hidden_size]
x, _ = self.rnn(x)
# [sequence_len, hidden_size x 2]
x = x.squeeze(1)
# [sequence_len, n_tags]
x = self.out(x)
return x
| 31.506818
| 89
| 0.549592
| 1,832
| 13,863
| 3.899017
| 0.100437
| 0.066499
| 0.02352
| 0.0238
| 0.630687
| 0.586448
| 0.542349
| 0.461851
| 0.413692
| 0.405012
| 0
| 0.041397
| 0.320421
| 13,863
| 439
| 90
| 31.578588
| 0.716803
| 0.246916
| 0
| 0.457627
| 0
| 0
| 0.002685
| 0
| 0
| 0
| 0
| 0
| 0.012712
| 1
| 0.072034
| false
| 0
| 0.025424
| 0
| 0.165254
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22e5c3b42de15feed5e29aa272f135d23d064ab1
| 1,274
|
py
|
Python
|
setup.py
|
edulix/apscheduler
|
8030e0fc7e1845a15861e649988cc73a1aa624ec
|
[
"MIT"
] | null | null | null |
setup.py
|
edulix/apscheduler
|
8030e0fc7e1845a15861e649988cc73a1aa624ec
|
[
"MIT"
] | null | null | null |
setup.py
|
edulix/apscheduler
|
8030e0fc7e1845a15861e649988cc73a1aa624ec
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import os.path
try:
from setuptools import setup
extras = dict(zip_safe=False, test_suite='nose.collector', tests_require=['nose'])
except ImportError:
from distutils.core import setup
extras = {}
import apscheduler
here = os.path.dirname(__file__)
readme_path = os.path.join(here, 'README.rst')
readme = open(readme_path).read()
setup(
name='APScheduler',
version=apscheduler.release,
description='In-process task scheduler with Cron-like capabilities',
long_description=readme,
author='Alex Gronholm',
author_email='apscheduler@nextday.fi',
url='http://pypi.python.org/pypi/APScheduler/',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3'
],
keywords='scheduling cron',
license='MIT',
packages=('apscheduler', 'apscheduler.jobstores', 'apscheduler.triggers', 'apscheduler.triggers.cron'),
)
| 31.073171
| 107
| 0.663265
| 139
| 1,274
| 6
| 0.589928
| 0.136691
| 0.179856
| 0.093525
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010763
| 0.197802
| 1,274
| 40
| 108
| 31.85
| 0.805284
| 0.010204
| 0
| 0
| 0
| 0
| 0.466243
| 0.054011
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.147059
| 0
| 0.147059
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22e9a24e177b5cc9ead771b6359f5209ebe42377
| 543
|
py
|
Python
|
run.py
|
matthewyoung28/macmentum
|
af1a26903e25b4a4f278388d7be1e638e071c0a8
|
[
"MIT"
] | null | null | null |
run.py
|
matthewyoung28/macmentum
|
af1a26903e25b4a4f278388d7be1e638e071c0a8
|
[
"MIT"
] | null | null | null |
run.py
|
matthewyoung28/macmentum
|
af1a26903e25b4a4f278388d7be1e638e071c0a8
|
[
"MIT"
] | null | null | null |
import os
import sys
import random
def get_next_wallpaper(curr_path):
lst_dir = os.listdir()
rand_index = random.randint(0, len(lst_dir) - 1)
return lst_dir[rand_index]
def get_wall_dir():
return "/Users/MYOUNG/Pictures/mmt"
def main():
script = "osascript -e 'tell application \"Finder\" to set desktop picture to POSIX file '"
path = get_wall_dir()
file = get_next_wallpaper(path)
# print("FILE = ", file)
script = script + path + "/" + file
# print("SCRIPT = ", script)
os.system(script)
main()
| 18.724138
| 93
| 0.662983
| 77
| 543
| 4.493506
| 0.519481
| 0.052023
| 0.092486
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00464
| 0.206262
| 543
| 28
| 94
| 19.392857
| 0.798144
| 0.090239
| 0
| 0
| 0
| 0
| 0.201232
| 0.053388
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1875
| false
| 0
| 0.1875
| 0.0625
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22eae5e579a412e845c5851038ebc3ce5e3c9735
| 2,099
|
py
|
Python
|
noxfile.py
|
dolfno/mlops_demo
|
52a04525f1655a32d45002384a972a1920fd517a
|
[
"MIT"
] | null | null | null |
noxfile.py
|
dolfno/mlops_demo
|
52a04525f1655a32d45002384a972a1920fd517a
|
[
"MIT"
] | null | null | null |
noxfile.py
|
dolfno/mlops_demo
|
52a04525f1655a32d45002384a972a1920fd517a
|
[
"MIT"
] | null | null | null |
"""Automated CI tools to run with Nox"""
import nox
from nox import Session
locations = "src", "noxfile.py", "docs/conf.py"
nox.options.sessions = "lint", "tests"
@nox.session(python="3.9")
def tests(session: Session) -> None:
"""Run tests with nox"""
session.run("poetry", "install", external=True)
session.run("pytest", "--cov")
@nox.session(python="3.9")
def lint(session: Session) -> None:
"""Run linting with nox"""
session.install(
"flake8",
"flake8-annotations",
"flake8-bandit",
"flake8-black",
"flake8-bugbear",
"flake8-docstrings",
"flake8-import-order",
)
args = session.posargs or locations
session.run("flake8", *args)
@nox.session(python="3.9")
def black(session: Session) -> None:
"""Run black with nox"""
session.install("black")
args = session.posargs or locations
session.run("black", *args, "--line-length=120")
@nox.session(python="3.9")
def pytype(session: Session) -> None:
"""Run the static type checker."""
args = session.posargs or ["--disable=import-error", *locations]
session.install("pytype")
session.run("pytype", *args)
package = "hypermodern_python"
@nox.session(python=["3.9"])
def typeguard(session: Session) -> None:
"""Run typeguard for type checking with nox"""
args = session.posargs or ["-m", "not e2e"]
session.run("poetry", "install", "--no-dev", external=True)
session.install("pytest", "pytest-mock", "typeguard")
session.run("pytest", f"--typeguard-packages={package}", *args)
@nox.session(python="3.9")
def docs(session: Session) -> None:
"""Build the documentation."""
session.run("poetry", "install", "--no-dev", external=True)
session.install("sphinx", "sphinx-autodoc-typehints")
session.run("sphinx-build", "docs", "docs/_build")
@nox.session(python="3.9")
def coverage(session: Session) -> None:
"""Upload coverage data."""
session.install("coverage[toml]", "codecov")
session.run("coverage", "xml", "--fail-under=0")
session.run("codecov", *session.posargs)
| 28.364865
| 68
| 0.636494
| 260
| 2,099
| 5.130769
| 0.303846
| 0.082459
| 0.083958
| 0.089205
| 0.255622
| 0.255622
| 0.176912
| 0.08096
| 0.08096
| 0.08096
| 0
| 0.015508
| 0.170557
| 2,099
| 73
| 69
| 28.753425
| 0.750718
| 0.100048
| 0
| 0.208333
| 0
| 0
| 0.262987
| 0.041126
| 0
| 0
| 0
| 0
| 0
| 1
| 0.145833
| false
| 0
| 0.083333
| 0
| 0.229167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22eecf1d05ffdd487202a1266800927ab92af76d
| 1,098
|
py
|
Python
|
src/framework/tracing.py
|
davidhozic/Discord-Shiller
|
ff22bb1ceb7b4128ee0d27f3c9c9dd0a5279feb9
|
[
"MIT"
] | 12
|
2022-02-20T20:50:24.000Z
|
2022-03-24T17:15:15.000Z
|
src/framework/tracing.py
|
davidhozic/Discord-Shiller
|
ff22bb1ceb7b4128ee0d27f3c9c9dd0a5279feb9
|
[
"MIT"
] | 3
|
2022-02-21T15:17:43.000Z
|
2022-03-17T22:36:23.000Z
|
src/framework/tracing.py
|
davidhozic/discord-advertisement-framework
|
ff22bb1ceb7b4128ee0d27f3c9c9dd0a5279feb9
|
[
"MIT"
] | 1
|
2022-03-31T01:04:01.000Z
|
2022-03-31T01:04:01.000Z
|
"""
~ Tracing ~
This modules containes functions and classes
related to the console debug long or trace.
"""
from enum import Enum, auto
import time
__all__ = (
"TraceLEVELS",
"trace"
)
m_use_debug = None
class TraceLEVELS(Enum):
"""
Info: Level of trace for debug
"""
NORMAL = 0
WARNING = auto()
ERROR = auto()
def trace(message: str,
level: TraceLEVELS = TraceLEVELS.NORMAL):
""""
Name : trace
Param:
- message : str = Trace message
- level : TraceLEVELS = Level of the trace
"""
if m_use_debug:
timestruct = time.localtime()
timestamp = "Date: {:02d}.{:02d}.{:04d} Time:{:02d}:{:02d}"
timestamp = timestamp.format(timestruct.tm_mday,
timestruct.tm_mon,
timestruct.tm_year,
timestruct.tm_hour,
timestruct.tm_min)
l_trace = f"{timestamp}\nTrace level: {level.name}\nMessage: {message}\n"
print(l_trace)
| 25.534884
| 81
| 0.528233
| 112
| 1,098
| 5.044643
| 0.526786
| 0.106195
| 0.031858
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015782
| 0.365209
| 1,098
| 42
| 82
| 26.142857
| 0.794835
| 0.222222
| 0
| 0
| 0
| 0
| 0.152585
| 0.027743
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.086957
| 0
| 0.304348
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22f2bda6c50ac4fe1d32522345090972ebb7ad66
| 728
|
py
|
Python
|
sunkit_image/__init__.py
|
jeffreypaul15/sunkit-image
|
0987db8fcd38c79a83d7d890e407204e63a05c4f
|
[
"BSD-2-Clause-NetBSD",
"BSD-2-Clause"
] | null | null | null |
sunkit_image/__init__.py
|
jeffreypaul15/sunkit-image
|
0987db8fcd38c79a83d7d890e407204e63a05c4f
|
[
"BSD-2-Clause-NetBSD",
"BSD-2-Clause"
] | null | null | null |
sunkit_image/__init__.py
|
jeffreypaul15/sunkit-image
|
0987db8fcd38c79a83d7d890e407204e63a05c4f
|
[
"BSD-2-Clause-NetBSD",
"BSD-2-Clause"
] | null | null | null |
"""
sunkit-image
============
A image processing toolbox for Solar Physics.
* Homepage: https://sunpy.org
* Documentation: https://sunkit-image.readthedocs.io/en/latest/
"""
import sys
from .version import version as __version__ # NOQA
# Enforce Python version check during package import.
__minimum_python_version__ = "3.7"
class UnsupportedPythonError(Exception):
"""
Running on an unsupported version of Python.
"""
if sys.version_info < tuple(int(val) for val in __minimum_python_version__.split(".")):
# This has to be .format to keep backwards compatibly.
raise UnsupportedPythonError(
"sunkit_image does not support Python < {}".format(__minimum_python_version__)
)
__all__ = []
| 23.483871
| 87
| 0.717033
| 88
| 728
| 5.613636
| 0.670455
| 0.105263
| 0.121457
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0033
| 0.167582
| 728
| 30
| 88
| 24.266667
| 0.811881
| 0.443681
| 0
| 0
| 0
| 0
| 0.118421
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22f32d963c063df45b4e85b0c4f01e4ea1ea6369
| 26,004
|
py
|
Python
|
app/view.py
|
lucasblazzi/stocker
|
52cdec481ed84a09d97369ee4da229e169f99f51
|
[
"MIT"
] | null | null | null |
app/view.py
|
lucasblazzi/stocker
|
52cdec481ed84a09d97369ee4da229e169f99f51
|
[
"MIT"
] | null | null | null |
app/view.py
|
lucasblazzi/stocker
|
52cdec481ed84a09d97369ee4da229e169f99f51
|
[
"MIT"
] | null | null | null |
import plotly.graph_objects as go
import plotly.express as px
import pandas as pd
class View:
def __init__(self, st):
self.st = st
self.st.set_page_config(layout='wide')
self.side_bar = st.sidebar
def show_message(self, location, _type, message):
if location == "sb":
component = self.side_bar
else:
component = self.st
if _type == "success":
component.success(message)
elif _type == "error":
component.error(message)
elif _type == "warning":
component.warning(message)
elif _type == "info":
component.info(message)
def login(self):
_user = self.side_bar.text_input("Username:")
_pass = self.side_bar.text_input("Password", type="password")
return _user, _pass
def advisor_setup(self):
option = self.side_bar.selectbox("Options:", ("Research", ))
if option == "Research":
self.st.header("Advisor Research Area")
self.st.markdown("___")
return option
def research_area(self):
execute = False
args = {"price": {"enabled": False}, "sector": {"enabled": False}, "news": {"enabled": False},
"company_info": {"enabled": False}, "volatility": {"enabled": False}, "return": {"enabled": False},
"raw_price": {"enabled": False}, "volume": {"enabled": False}}
self.st.markdown("___")
check_cols = self.st.beta_columns(4)
args["price"]["enabled"] = check_cols[0].checkbox("Price")
args["company_info"]["enabled"] = check_cols[1].checkbox("Company Information")
args["sector"]["enabled"] = check_cols[2].checkbox("Sector Distribution")
args["news"]["enabled"] = check_cols[3].checkbox("News")
if args["price"]["enabled"]:
self.st.markdown("___")
self.st.subheader("Price Insights")
price_cols = self.st.beta_columns(7)
args["price"]["_type"] = price_cols[0].selectbox("Price type:", ("close", "open", "high", "low"))
args["price"]["period"] = price_cols[1].selectbox("Period:", ("ytd", "1m", "6m", "1y", "2y", "5y", "max"))
args["raw_price"]["enabled"] = price_cols[3].checkbox("Raw Price")
args["volume"]["enabled"] = price_cols[4].checkbox("Volume")
args["return"]["enabled"] = price_cols[5].checkbox("Return")
args["volatility"]["enabled"] = price_cols[6].checkbox("Volatility")
return execute, args
def show_cryptos(self, cryptos):
for crypto in cryptos:
cols = self.st.beta_columns(3)
cols[0].markdown(f"**Symbol: ** {crypto.get('symbol', '-')}")
cols[1].markdown(f"**Name: ** {crypto.get('name', '-')}")
cols[2].markdown(f"**Price: ** {crypto.get('price', '-')}")
def crypto_form(self):
self.st.markdown("<br><br>", unsafe_allow_html=True)
self.st.markdown("___")
_input = self.st.text_input("Cryptocurrency")
return _input
def sector_distribution(self, sectors):
self.st.subheader("Sector Distribution")
r = sectors['sector'].value_counts()
fig = go.Figure(data=[go.Pie(labels=r.index, values=r)])
fig.update_layout(
width=400, height=400,
)
self.st.plotly_chart(fig)
def plot_price(self, prices, _type):
self.st.subheader(_type.capitalize())
fig = go.Figure()
for price in prices:
name = price["symbol"][0]
fig.add_trace(go.Scatter(x=price.index, y=price[_type],
mode='lines',
name=name))
fig.update_layout(
template="plotly_white",
width=1400, height=500,
hovermode="x unified",
plot_bgcolor='rgba(0,0,0,0)'
)
self.st.plotly_chart(fig)
def show_companies(self, companies):
self.st.markdown("___")
self.st.subheader("Company Information")
self.st.markdown("<br>", unsafe_allow_html=True)
for company in companies:
basic = self.st.beta_columns(4)
basic[0].markdown(f"## **{company.get('name', ' ')} ({company.get('symbol', ' ')})**")
if company.get("logo"):
basic[3].image(company.get("logo"), width=50)
basic[3].markdown("<br>", unsafe_allow_html=True)
desc = self.st.beta_columns(2)
if company.get('sector'):
desc[0].markdown(f"**Sector: ** {company.get('sector', '-')}")
if company.get('industry'):
desc[1].markdown(f"**Industry: ** {company.get('industry', '-')}")
if company.get('description'):
desc[0].markdown(f"**Description: ** {company.get('description', '-')}")
info = self.st.beta_columns(2)
if company.get('CEO'):
info[0].markdown(f"**CEO: ** {company.get('CEO', '-')}")
if company.get('employees'):
info[1].markdown(f"**Employees: ** {company.get('employees', '-')}")
if company.get('website'):
info[0].markdown(f"**Website: ** {company.get('website', '-')}")
if company.get('city') or company.get('state') or company.get('country'):
info[1].markdown(f"**Location: ** {company.get('city', ' ')} - {company.get('state', ' ')} - {company.get('country', ' ')}")
self.st.markdown("___")
def show_news(self, news, title="Company News"):
self.st.markdown("___")
self.st.subheader(title)
self.st.markdown("<br>", unsafe_allow_html=True)
for n in news:
if n.get('symbol') or n.get('title') or n.get('date'):
self.st.markdown(f"**{n.get('symbol', ' ')} - {n.get('title', ' ')} [{n.get('date', ' ')}]**")
if n.get('source'):
self.st.markdown(f"**Source: ** {n.get('source', '-')}")
if n.get("image"):
self.st.image(n.get("image"), width=300)
if n.get("description"):
self.st.markdown(f"**Description: ** {n.get('description', '-')}")
if n.get("url"):
self.st.markdown(f"**Access on: ** {n.get('url', '-')}")
self.st.markdown("<br>", unsafe_allow_html=True)
def list_advisors(self, advisors):
for advisor in advisors:
cols = self.st.beta_columns(3)
cols[0].markdown(f"**Name: ** {advisor[0]}")
cols[1].markdown(f"**CPF: ** {advisor[1]}")
cols[2].markdown(f"**CVM: ** {advisor[2]}")
def symbol_input(self, symbols):
selected_symbols = self.st.multiselect("Stocks list:", symbols)
return selected_symbols
def admin_setup(self):
option = self.side_bar.selectbox("Option:", ("Data Loader", "Advisors", "Ad-Hoc"))
execute = False
arg = None
self.st.title("Stocker Administration Area")
self.st.markdown("___")
if option == "Data Loader":
arg = dict()
self.st.header("Stocker Data Loader")
arg["symbols"] = self.st.selectbox("Stocks Option:", ("Sample", "S&P 100"))
self.st.markdown("<br><br>", unsafe_allow_html=True)
self.st.markdown("___")
self.st.subheader("Stocker Company Loader")
self.show_message("st", "info", "Stock Loading: Load on our database information about the companies listed"
"on the Stocks Option selected")
if self.st.button("Load Stocks"):
execute = True
arg["loader"] = "company"
self.st.markdown("<br><br><br>", unsafe_allow_html=True)
self.st.markdown("___")
self.st.subheader("Stocker Price Loader")
self.show_message("st", "info", "Price Loading: Load on our database information about companies daily"
" prices, you can select a specific period")
arg["period"] = self.st.selectbox("Prices Period:", ("5y", "2y", "1y", "ytd", "6m", "3m", "1m", "5d"))
if self.st.button("Load Prices"):
execute = True
arg["loader"] = "price"
self.st.markdown("<br><br><br>", unsafe_allow_html=True)
self.st.markdown("___")
self.st.subheader("Stocker News Loader")
self.show_message("st", "info", "News Loading: Load on our database information about the latest news of"
" companies which can impact the market")
if self.st.button("Load News"):
execute = True
arg["loader"] = "news"
self.st.markdown("<br><br><br>", unsafe_allow_html=True)
self.st.markdown("___")
self.st.subheader("Stocker Crypto Loader")
self.show_message("st", "info", "Crypto Loading: Load on our database information about all "
"cryptocurrencies available on the market")
if self.st.button("Load Crypto"):
execute = True
arg["loader"] = "crypto"
self.st.markdown("<br><br><br>", unsafe_allow_html=True)
self.st.markdown("___")
self.st.subheader("Stocker Full Loader")
self.show_message("st", "info", "Full Loading: Load on our database all information listed above: companies"
" prices, news and cryptocurrencies")
if self.st.button("Full Load"):
execute = True
arg["loader"] = "full"
elif option == "Ad-Hoc":
self.st.header("Ad-Hoc")
elif option == "Advisors":
sub_option = self.st.selectbox("Opções:", ("List Advisors", "Register Advisor", "Edit Advisor"))
self.st.markdown("___")
if sub_option == "List Advisors":
option = sub_option
execute = True
elif sub_option == "Register Advisor":
arg = self.advisor_form(None)
option = sub_option
if arg:
execute = True
elif sub_option == "Edit Advisor":
arg = self.st.text_input("CPF", max_chars=15, type='default', help="CPF: 123.123.123-12")
execute = True
option = sub_option
self.st.markdown("___")
return option, execute, arg
def advisor_form(self, advisor):
cols = self.st.beta_columns([0.5, 0.25, 0.25])
button = "Update Advisor" if advisor else "Register Advisor"
advisor = {
"name": cols[0].text_input("Nome", max_chars=30, type='default', help="Nome Completo",
value=advisor["name"]) if advisor
else cols[0].text_input("Nome", max_chars=30, type='default', help="Nome Completo"),
"username": cols[1].text_input("Usuário", max_chars=15, type='default', help="Usuário para login",
value=advisor["username"]) if advisor
else cols[1].text_input("Usuário", max_chars=15, type='default', help="Usuário para login"),
"password": cols[2].text_input("Senha", max_chars=15, type='password', help="Senha para login"),
"cpf": advisor["cpf"] if advisor
else cols[2].text_input("CPF", max_chars=15, type='default', help="CPF: 123.123.123-12"),
"cvm_license": cols[1].text_input("Lincença CVM", max_chars=10, type='default',
value=advisor["cvm_license"]) if advisor
else cols[1].text_input("Lincença CVM", max_chars=10, type='default'),
"email": cols[0].text_input("Email", max_chars=30, type='default', value=advisor["email"]) if advisor
else cols[0].text_input("Email", max_chars=30, type='default'),
"profile": "advisor"
}
register = self.st.button(button)
self.st.markdown("___")
filled = True
for b in advisor.values():
if not b:
filled = False
if register:
if not filled:
self.show_message("st", "warning", "Preencha todos os campos")
else:
return advisor
@staticmethod
def plot_bar(companies, x, y, title, color):
df = pd.DataFrame(companies)
fig = px.bar(df, x=x, y=y,
color=color, title=title,
color_discrete_sequence=px.colors.qualitative.Pastel,
height=400)
return fig
@staticmethod
def plot_bar2(companies, y, title):
df = pd.DataFrame(companies)[["symbol", y]]
r = df[y].value_counts()
fig = go.Figure(data=[go.Bar(x=df[y], y=r)])
fig.update_layout(
height=400,
title=title
)
return fig
@staticmethod
def plot_pie(companies, y, title):
df = pd.DataFrame(companies)[["symbol", y]]
r = df[y].value_counts()
fig = go.Figure(data=[go.Pie(labels=df[y], values=r)])
fig.update_layout(
height=400,
title=title
)
return fig
@staticmethod
def plot_highest_emp(highest_emp):
fig = go.Figure(data=[go.Indicator(
mode="number+delta",
value=highest_emp[0][1],
title={
"text": f"{highest_emp[0][0]}<br><span style='font-size:0.8em;color:gray'>Highest number</span><br>"
f"<span style='font-size:0.8em;color:gray'>of employees</span>"},
)])
return fig
@staticmethod
def plot_information_companies(cols, companies):
logos = [company[1] for company in companies]
names = [company[0] for company in companies]
for idx, logo in enumerate(logos):
col = 2 if idx % 2 == 0 else 3
cols[col].image(logo, width=50)
for idx, name in enumerate(names):
col = 0 if idx % 2 == 0 else 1
cols[col].markdown(f"**Name: ** {name}")
@staticmethod
def plot_notusa_companies(cols, companies):
for company in companies:
cols[0].markdown(f"**Name: ** {company[0]}")
cols[1].markdown(f"**Country: ** {company[2]}")
cols[2].image(company[1], width=50)
@staticmethod
def plot_insight_prices(k, v):
fig = go.Figure(data=[go.Indicator(
mode="number+delta",
value=v[0][1],
title={
"text": f"{v[0][0]}<br><span style='font-size:0.8em;color:gray'>{k.split('_')[0].capitalize()} {k.split('_')[1].capitalize()}</span><br>"
f"<span style='font-size:0.8em;color:gray'>{v[0][2]}</span>"},
)])
return fig
def plot_company_ad_hoc(self, results):
companies = results["company"]["specific"]
highest_emp = results["company"]["insights"]["highest_emp"]
information = results["company"]["insights"]["tech"]
not_usa = results["company"]["insights"]["not_us"]
fields = results["company"]["fields"]
if companies:
if not "symbol" in fields:
self.st.warning("Be sure to select the symbol option")
else:
self.show_companies(companies)
col = self.st.beta_columns(2)
if "employees" in fields:
fig1 = self.plot_bar(companies, "symbol", "employees", "Number of employees by company", "employees")
col[0].plotly_chart(fig1, use_container_width=True)
if "state" in fields:
fig2 = self.plot_bar2(companies, "state", "State distribution")
col[1].plotly_chart(fig2, use_container_width=True)
col2 = self.st.beta_columns(2)
if "sector" in fields:
fig3 = self.plot_pie(companies, "sector", "Companies by sector")
col2[0].plotly_chart(fig3, use_container_width=True)
if "industry" in fields:
fig4 = self.plot_pie(companies, "industry", "Companies by industry")
col2[1].plotly_chart(fig4, use_container_width=True)
if highest_emp:
fig5 = self.plot_highest_emp(highest_emp)
self.st.plotly_chart(fig5, use_container_width=True)
if information:
self.st.markdown("___")
title_col = self.st.beta_columns(1)
cols4 = self.st.beta_columns([1, 1, 0.2, 0.2])
title_col[0].subheader("Information sector companies")
self.plot_information_companies(cols4, information)
if not_usa:
self.st.markdown("___")
title_col2 = self.st.beta_columns(1)
title_col2[0].subheader("Nasdaq listed companies outside USA")
cols5 = self.st.beta_columns(4)
self.plot_notusa_companies(cols5, not_usa)
def plot_price_ad_hoc(self, results):
if not results["price"]["specific"].empty:
self.st.markdown("___")
dfs = list()
for company in results["price"]["company_list"]:
mask = (results["price"]["specific"]["symbol"] == company)
dfs.append(results["price"]["specific"][mask])
self.plot_price(dfs, results["price"]["type"][0])
self.st.markdown("___")
c = 0
cols = self.st.beta_columns(len(results["price"]["insights"].keys()))
for k, val in results["price"]["insights"].items():
if val:
cols[c].plotly_chart(self.plot_insight_prices(k, val), use_container_width=True)
c += 1
def plot_news_ad_hoc(self, results):
if results["news"]["filter"]:
self.show_news(results["news"]["filter"], "Filtered News")
if results["news"]["insights"]:
news_fields = ("id", "symbol", "date", "title", "source", "url", "description", "image")
latest = results["news"]["insights"][0]
latest_news = dict()
for idx, v in enumerate(latest):
latest_news[news_fields[idx]] = v
self.show_news([latest], f"Latest news - {latest['symbol']} - {latest['date']}")
def plot_crypto_ad_hoc(self, results):
if results["crypto"]:
self.st.markdown("___")
self.show_cryptos(results["crypto"])
def ad_hoc_plot(self, results):
self.plot_company_ad_hoc(results)
self.plot_price_ad_hoc(results)
self.plot_news_ad_hoc(results)
self.plot_crypto_ad_hoc(results)
def ad_hoc_form(self, symbols):
company_fields = ("symbol", "name", "exchange", "industry", "website", "description", "CEO", "sector",
"employees", "state", "city", "country", "logo")
news_fields = ("symbol", "date", "title", "source", "url", "description", "image")
ad_hoc = self.default_ad_hoc()
self.st.markdown("___")
self.st.markdown(f"**Company Options:**")
cols = self.st.beta_columns([2, 1, 1])
cols[0].markdown(f"**Specific company views:**")
ad_hoc["company"]["specific"]["company_list"] = cols[0].multiselect("Stocks list:", sum(symbols, []))
ad_hoc["company"]["specific"]["fields"] = cols[0].multiselect("Information:", company_fields)
filter_cols = self.st.beta_columns(6)
ad_hoc["company"]["specific"]["order_by"] = filter_cols[0].selectbox("Order By:", ad_hoc["company"]["specific"]["fields"]),
ad_hoc["company"]["specific"]["order_method"] = filter_cols[1].selectbox("Order Method:", ("Ascending", "Descending")),
ad_hoc["company"]["specific"]["limit"] = filter_cols[2].number_input("Number of results:", value=1, min_value=1, max_value=100),
ad_hoc["company"]["specific"]["rule_filter"] = {}
cols[1].markdown(f"**Insights views:**")
cols[2].markdown(f"**-**")
cols[1].markdown("<br>", unsafe_allow_html=True)
ad_hoc["company"]["insights"]["highest_emp"] = cols[1].checkbox("Highest employees number")
cols[1].markdown("<br>", unsafe_allow_html=True)
ad_hoc["company"]["insights"]["tech"] = cols[1].checkbox("Information Companies")
cols[2].markdown("<br>", unsafe_allow_html=True)
ad_hoc["company"]["insights"]["not_us"] = cols[2].checkbox("Outside USA")
cols[2].markdown("<br>", unsafe_allow_html=True)
ad_hoc["company"]["specific"]["rule_filter"]["apply"] = cols[2].checkbox("Rule filter")
if ad_hoc["company"]["specific"]["rule_filter"]["apply"]:
ad_hoc["company"]["specific"]["rule_filter"]["field"] = filter_cols[0].selectbox(
"Filter Field:", ("symbol", "name", "employees"))
ad_hoc["company"]["specific"]["rule_filter"]["operation"] = filter_cols[1].selectbox(
"Operation", ("Greater than", "Less than", "Equals to") if
ad_hoc["company"]["specific"]["rule_filter"]["field"] == "employees" else ("Equals to", ))
ad_hoc["company"]["specific"]["rule_filter"]["value"] = filter_cols[2].number_input("Value: ") \
if ad_hoc["company"]["specific"]["rule_filter"]["field"] == "employees"\
else filter_cols[2].text_input("Value: ")
self.st.markdown("___")
self.st.markdown(f"**Prices Options:**")
price_cols = self.st.beta_columns([2, 1, 1])
price_cols[0].markdown(f"**Specific price views:**")
ad_hoc["price"]["specific"]["company_list"] = price_cols[0].multiselect("Price Stocks:", sum(symbols, []))
filter_price_cols = self.st.beta_columns(6)
ad_hoc["price"]["specific"]["start_date"] = filter_price_cols[0].date_input("Start Date:")
ad_hoc["price"]["specific"]["end_date"] = filter_price_cols[1].date_input("End Date:")
ad_hoc["price"]["specific"]["type"] = filter_price_cols[2].selectbox("Price Type:", ("close", "open", "high", "low")),
price_cols[1].markdown(f"**Insights views:**")
price_cols[2].markdown(f"**-**")
price_cols[1].markdown("<br>", unsafe_allow_html=True)
price_cols[2].markdown("<br>", unsafe_allow_html=True)
ad_hoc["price"]["insights"]["highest_close"] = price_cols[1].checkbox("Highest close price")
price_cols[1].markdown("<br>", unsafe_allow_html=True)
ad_hoc["price"]["insights"]["lowest_close"] = price_cols[2].checkbox("Lowest close price")
ad_hoc["price"]["insights"]["highest_volume"] = price_cols[1].checkbox("Highest volume")
price_cols[2].markdown("<br>", unsafe_allow_html=True)
ad_hoc["price"]["insights"]["lowest_volume"] = price_cols[2].checkbox("Lowest volume")
self.st.markdown("___")
self.st.markdown(f"**News Options:**")
news_cols = self.st.beta_columns([2, 1, 1, 1])
news_cols[0].markdown(f"**Specific news views:**")
news_cols[1].markdown("-<br>", unsafe_allow_html=True)
news_cols[2].markdown("-<br>", unsafe_allow_html=True)
news_cols[3].markdown("-<br>", unsafe_allow_html=True)
ad_hoc["news"]["company_list"] = news_cols[0].multiselect("News Stocks:", sum(symbols, []))
ad_hoc["news"]["fields"] = news_cols[0].multiselect("News Info:", news_fields)
ad_hoc["news"]["date"] = news_cols[1].date_input("Date:")
ad_hoc["news"]["filter_date"] = news_cols[2].selectbox("Filter Date as:", ("On", "Starting from", "Until"))
ad_hoc["news"]["order_by"] = news_cols[1].selectbox("Order by field:", ad_hoc["news"]["fields"])
ad_hoc["news"]["order_method"] = news_cols[2].selectbox("Order results:", ("Ascending", "Descending"))
ad_hoc["news"]["limit"] = news_cols[3].number_input("Limit of results:", value=1, min_value=1, max_value=100)
ad_hoc["news"]["latest"] = news_cols[3].checkbox("Latest News")
self.st.markdown("___")
self.st.markdown(f"**Crypto Options:**")
crypto_col = self.st.beta_columns([2, 0.5, 1])
ad_hoc["crypto"]["name"] = crypto_col[0].text_input("Cryptocurrency")
ad_hoc["crypto"]["limit"] = crypto_col[1].number_input("Limit of crypto:", value=1, min_value=1, max_value=100)
generate = self.st.button("Generate Report")
if generate:
return ad_hoc
@staticmethod
def default_ad_hoc():
return {
"company": {
"specific": {
"company_list": [],
"fields": [],
"order_by": None,
"order_method": None,
"limit": None,
"rule_filter": {
"apply": False,
"field": None,
"operation": None,
"value": None
}
},
"insights": {
"highest_emp": False,
"tech": False,
"not_us": False
}
},
"news": {
"company_list": [],
"date": None,
"filter_date": None,
},
"price": {
"specific": {
"company_list": [],
"type": None,
"start_date": None,
"end_date": None
},
"insights": {
"highest_close": False,
"lowest_close": False,
"highest_volume": False,
"lowest_volume": False,
}
},
"crypto": {
"name": None,
"limit": None
}
}
| 46.352941
| 153
| 0.543916
| 2,996
| 26,004
| 4.556409
| 0.111148
| 0.043513
| 0.043074
| 0.026152
| 0.380558
| 0.288184
| 0.244744
| 0.197421
| 0.164677
| 0.152516
| 0
| 0.016633
| 0.290225
| 26,004
| 561
| 154
| 46.352941
| 0.722978
| 0
| 0
| 0.207171
| 0
| 0.011952
| 0.23757
| 0.019842
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057769
| false
| 0.005976
| 0.005976
| 0.001992
| 0.093626
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22f35b16a60f939a7ee519533639ecb4ccd48d47
| 866
|
py
|
Python
|
TestFiles/volumioTest.py
|
GeorgeIoak/Oden
|
9bb6a5811e2ea40ceef67e46bc56eab1be9ce06c
|
[
"MIT"
] | null | null | null |
TestFiles/volumioTest.py
|
GeorgeIoak/Oden
|
9bb6a5811e2ea40ceef67e46bc56eab1be9ce06c
|
[
"MIT"
] | null | null | null |
TestFiles/volumioTest.py
|
GeorgeIoak/Oden
|
9bb6a5811e2ea40ceef67e46bc56eab1be9ce06c
|
[
"MIT"
] | null | null | null |
# Testing code to check update status on demand
from socketIO_client import SocketIO, LoggingNamespace
from threading import Thread
socketIO = SocketIO('localhost', 3000)
status = 'pause'
def on_push_state(*args):
print('state', args)
global status, position, duration, seek
status = args[0]['status'].encode('ascii', 'ignore')
seek = args[0]['seek']
duration = args[0]['duration']
if duration:
position = int(seek / 1000)
else:
position = 0
print("status", status, "position", position)
def _receive_thread():
socketIO.wait()
receive_thread = Thread(target=_receive_thread, daemon=True)
receive_thread.start()
socketIO.on('pushState', on_push_state)
# issue this and the socketIO.wait in the background will push the reply
socketIO.emit('getState', '', on_push_state)
| 29.862069
| 72
| 0.674365
| 107
| 866
| 5.336449
| 0.485981
| 0.091068
| 0.057793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017544
| 0.210162
| 866
| 29
| 73
| 29.862069
| 0.817251
| 0.133949
| 0
| 0
| 0
| 0
| 0.105615
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.095238
| 0
| 0.190476
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22f43bae0fb833bc9d376660819fab38bbd38d60
| 11,830
|
py
|
Python
|
src/use-model.py
|
sofieditmer/self-assigned
|
3033b64d2848fcf73c44dd79ad4e7f07f8387c65
|
[
"MIT"
] | null | null | null |
src/use-model.py
|
sofieditmer/self-assigned
|
3033b64d2848fcf73c44dd79ad4e7f07f8387c65
|
[
"MIT"
] | null | null | null |
src/use-model.py
|
sofieditmer/self-assigned
|
3033b64d2848fcf73c44dd79ad4e7f07f8387c65
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Info: This script loads the model trained in the cnn-asl.py script and enables the user to use it for classifying unseen ASL letters. It also visualizes the feature map of the last convolutional layer of the network to enable the user to get an insight into exactly which parts of the original image that the model is paying attention to when classifying the image.
Parameters:
(optional) model_name: str <name-of-the-model-to-load>, default = "saved_model.json"
(optional) train_data: str <name-of-training-data>, default = "asl_alphabet_train_subset"
(optional) unseen_image: str <name-of-unseen-image>, default = "unseen_img_test1.png"
Usage:
$ python use-model.py
Output:
- unseen_image_superimposed_heatmap.png: superimposed heatmap on unseen image.
- unseen_image_prediction.txt: model prediction of unseen image.
"""
### DEPENDENCIES ###
# Core libraries
import os
import sys
sys.path.append(os.path.join(".."))
# Matplotlib, numpy, OpenCV
import matplotlib.pyplot as plt
import numpy as np
import cv2
# TensorFlow
import tensorflow as tf
from tensorflow.keras.preprocessing.image import (load_img, img_to_array)
from tensorflow.keras.applications.resnet import preprocess_input
from tensorflow.keras.models import model_from_json
from tensorflow.keras import backend as K
# argparse
import argparse
### MAIN FUNCTION ###
def main():
### ARGPARSE ###
# Initialize ArgumentParser class
ap = argparse.ArgumentParser()
# Argument 1: Model name
ap.add_argument("-m", "--model_name",
type = str,
required = False, # the argument is not required
help = "Name of the model",
default = "saved_model.json") # default name
# Argument 2: Training data
ap.add_argument("-t", "--train_data",
type = str,
required = False, # the argument is not required
help = "Name of training data folder",
default = "asl_alphabet_train_subset") # default is a subset of the training dataset
# Argument 3: Input image
ap.add_argument("-u", "--unseen_image",
type = str,
required = False, # the argument is not required
help = "Name of the image the model should classify",
default = "unseen_img_test1.png") # default unseen image provided in the unseen_images folder
# Parse arguments
args = vars(ap.parse_args())
# Save input parameters
model_name = args["model_name"]
train_data = os.path.join("..", "data", "subset_asl_sign_language", args["train_data"])
unseen_image = args["unseen_image"]
# Create output directory if it does not already exist
if not os.path.exists(os.path.join("..", "output")):
os.mkdir(os.path.join("..", "output"))
# Start message
print("\n[INFO] Initializing...")
# Instantiate the class
classifier = Loaded_model_classifier(train_data, unseen_image)
# Create list of label names from the directory names in the training data folder
labels = classifier.list_labels()
# Load the model
print(f"\n[INFO] Loading the CNN model, {model_name}, from 'output' directory...")
model = classifier.load_model(model_name)
# Classify input image
print(f"\n[INFO] Using the model to predict the class of {unseen_image}...")
label = classifier.classify_unseen_image(labels, model)
# Visualize feature map of network for input image
print(f"\n[INFO] Visualizing the feature map of the last convolutional layer of the network...")
classifier.visualize_feature_map(model)
# User message
print(f"\n[INFO] Done! The {unseen_image} has been classified as {label} and the feature map of the last convolutional layer of the network has been visualized and saved as {unseen_image}_superimposed_heatmap.png in 'output' directory\n")
# Creating classifier class
class Loaded_model_classifier:
def __init__(self, train_data, unseen_image):
# Receive inputs: train data and input image
self.train_data = train_data
self.unseen_image = unseen_image
def list_labels(self):
"""
This method defines the label names by listing the names of the folders within training directory without listing hidden files. It sorts the names alphabetically.
"""
# Create empty list
labels = []
# For every name in training directory
for name in os.listdir(self.train_data):
# If it does not start with . (which hidden files do)
if not name.startswith('.'):
labels.append(name)
# Sort labels alphabetically
labels = sorted(labels)
return labels
def load_model(self, model_name):
"""
This method loads the model and the model weights that are saved in the output directory.
"""
# Load JSON-file and create model
model_path = os.path.join("..", "output", model_name)
json_model = open(model_path, "r")
# Read file
loaded_file = json_model.read()
# Create model
loaded_model = model_from_json(loaded_file)
# Load weights into new model
loaded_model.load_weights(os.path.join("..", "output", "model_weights.h5"))
# Compile model
loaded_model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return loaded_model
def classify_unseen_image(self, labels, model):
"""
This method takes an unseen image, performs some preprocessing to prepare it for the model, and predicts the class of the image using the model.
"""
# Define path
img_path = os.path.join("..", "data", "unseen_images", self.unseen_image)
# Load unseen image
image = load_img(img_path, target_size=(224, 224)) # using the same size as the images the model has been trained on
# Convert the image to a numpy array
image = img_to_array(image)
# Reshape the image, because the model expects a tensor of rank 4. The image goes from being 3-dimensional to 4-dimensional: (1, 224, 224, 3)
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
# Prepare the image for the ResNet50 model
image = preprocess_input(image)
# Predict the class of the image
prediction = np.argmax(model.predict(image))
# Convert labels to be a dictionary which is needed to extract the label that corresponds to the prediction
labels = dict(zip(labels, range(len(labels))))
# Define function that finds the key (letter) that corresponds to the predicted value
def find_key(dictionary, value):
return {k for k, v in dictionary.items() if v == value}
# Extract letter that corresponds to the predicted value from the label dictionary
label = find_key(labels, prediction)
# Print the predicted class to the terminal
print(f"\nThe model predicts {self.unseen_image} to be the letter {label}")
# Save prediction as txt-file to output directory
with open(os.path.join("..", "output", f"{self.unseen_image}_prediction.txt"), "w") as f:
f.write(f"The predicted class of the {self.unseen_image} made by the model is {label}")
return label
def visualize_feature_map(self, model):
"""
This method visualizes the feature map of the last convolutional layer of the network.
"""
# Define path
img_path = os.path.join("..", "data", "unseen_images", self.unseen_image)
# Load image with dimensions corresponding to training images
img = load_img(img_path, target_size=(224, 224))
# Convert image to array
x = img_to_array(img)
# Convert to rank 4 tensor
x = np.expand_dims(x, axis=0)
# Preprocess to be in line with ResNet50 data
x = preprocess_input(x)
# Create activation heatmap for final layer. This is done by taking advantage of how the model learns through gradient descent. We use the gradients that have been learned through training, and we go the opposite way (rather than minimizing we are maximizing). Essentially, we make use of the gradients in the final layer to highlight which regions are particularly informative when predicting a given class.
with tf.GradientTape() as tape:
# Take the last convolutional layer in the network
last_conv_layer = model.get_layer('conv5_block3_out')
# Create a model that maps the input image to the activations of the last convolutional layer as well as the output predictions
iterate = tf.keras.models.Model([model.inputs],
[model.output, last_conv_layer.output])
# Compute the gradient of the top predicted class for the input image with respect to the activations of the last conv layer
# Take the gradients from the last layer
model_out, last_conv_layer = iterate(x)
# Find the class that has been predicted by the model
class_out = model_out[:, np.argmax(model_out[0])]
# Extract gradient of the output neuron of the last convolutional layer
grads = tape.gradient(class_out,
last_conv_layer)
# Vector of mean intensity of the gradient over a specific feature map channel
pooled_grads = K.mean(grads, axis=(0, 1, 2))
# Multiply each channel in the feature map array by "how important this channel is" with regard to the top predicted class. Then sum all the channels to obtain the heatmap class activation
heatmap = tf.reduce_mean(tf.multiply(pooled_grads, last_conv_layer), axis=-1)
heatmap = np.maximum(heatmap, 0)
heatmap /= np.max(heatmap)
heatmap = heatmap.reshape((7,7))
plt.matshow(heatmap)
# Load unseen image with OpenCV
img = cv2.imread(img_path)
# Make heatmap semi-transparent
intensity = 0.5
# Resize the heatmap to be the original dimensions of the input
heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
# Apply colormap
heatmap = cv2.applyColorMap(np.uint8(255*heatmap), cv2.COLORMAP_JET)
# Multiply heatmap by intensity and 'add' this on top of the original image
superimposed = (heatmap * intensity) + img
# Save the superimposed image to output directory
cv2.imwrite(os.path.join("..", "output", f"{self.unseen_image}_superimposed_heatmap.png"), superimposed)
# User message
print(f"\n[INFO] The feature map has now been visualized and superimposed on {self.unseen_image}. Find image as {self.unseen_image}_superimposed_heatmap.png in 'output' directory...")
# Define behaviour when called from command line
if __name__=="__main__":
main()
| 42.707581
| 421
| 0.623331
| 1,497
| 11,830
| 4.819639
| 0.229793
| 0.045738
| 0.01386
| 0.024255
| 0.176299
| 0.14456
| 0.114207
| 0.103119
| 0.072072
| 0.072072
| 0
| 0.007597
| 0.298986
| 11,830
| 277
| 422
| 42.707581
| 0.862414
| 0.398309
| 0
| 0.072727
| 0
| 0.018182
| 0.185228
| 0.030294
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063636
| false
| 0
| 0.1
| 0.009091
| 0.209091
| 0.063636
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22f53ccd69bc56b9aef660e968f36d2013f14d05
| 7,899
|
py
|
Python
|
src/gluonts/nursery/autogluon_tabular/estimator.py
|
Xiaoxiong-Liu/gluon-ts
|
097c492769258dd70b7f223f826b17b0051ceee9
|
[
"Apache-2.0"
] | 2,648
|
2019-06-03T17:18:27.000Z
|
2022-03-31T08:29:22.000Z
|
src/gluonts/nursery/autogluon_tabular/estimator.py
|
Xiaoxiong-Liu/gluon-ts
|
097c492769258dd70b7f223f826b17b0051ceee9
|
[
"Apache-2.0"
] | 1,220
|
2019-06-04T09:00:14.000Z
|
2022-03-31T10:45:43.000Z
|
src/gluonts/nursery/autogluon_tabular/estimator.py
|
Xiaoxiong-Liu/gluon-ts
|
097c492769258dd70b7f223f826b17b0051ceee9
|
[
"Apache-2.0"
] | 595
|
2019-06-04T01:04:31.000Z
|
2022-03-30T10:40:26.000Z
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
from typing import Callable, Optional, List, Tuple
import pandas as pd
from autogluon.tabular import TabularPredictor as AutogluonTabularPredictor
from gluonts.core.component import validated
from gluonts.dataset.common import Dataset
from gluonts.dataset.util import to_pandas
from gluonts.model.estimator import Estimator
from gluonts.time_feature import (
TimeFeature,
get_lags_for_frequency,
time_features_from_frequency_str,
)
from .predictor import (
TabularPredictor,
mean_abs_scaling,
get_features_dataframe,
)
logger = logging.getLogger(__name__)
class TabularEstimator(Estimator):
"""An estimator that trains an Autogluon Tabular model for time series
forecasting.
Additional keyword arguments to the constructor, other than the ones documented
below, will be passed on to Autogluon Tabular's ``fit`` method used for training
the model.
Parameters
----------
freq
Frequency of the data to handle
prediction_length
Prediction length
lag_indices
List of indices of the lagged observations to use as features. If
None, this will be set automatically based on the frequency.
time_features
List of time features to be used. If None, this will be set automatically
based on the frequency.
scaling
Function to be used to scale time series. This should take a pd.Series object
as input, and return a scaled pd.Series and the scale (float). By default,
this divides a series by the mean of its absolute value.
batch_size
Batch size of the resulting predictor; this is just used at prediction
time, and does not affect training in any way.
disable_auto_regression
Whether to forecefully disable auto-regression in the model. If ``True``,
this will remove any lag index which is smaller than ``prediction_length``.
This will make predictions more efficient, but may impact their accuracy.
quantiles_to_predict
Whether to forecast in quantile way. If assigned with quantile values,
this will train model using quantile prediction model. If None, then the model
will be trained in a regular way.
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
lag_indices: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
scaling: Callable[
[pd.Series], Tuple[pd.Series, float]
] = mean_abs_scaling,
batch_size: Optional[int] = 32,
disable_auto_regression: bool = False,
last_k_for_val: Optional[int] = None,
quantiles_to_predict: Optional[List[float]] = None,
eval_metric: str = "mean_absolute_error",
**kwargs,
) -> None:
super().__init__()
self.freq = freq
self.prediction_length = prediction_length
self.lag_indices = (
lag_indices
if lag_indices is not None
else get_lags_for_frequency(self.freq)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.batch_size = batch_size
self.disable_auto_regression = disable_auto_regression
self.scaling = scaling
self.last_k_for_val = last_k_for_val
self.eval_metric = eval_metric
self.quantiles_to_predict = quantiles_to_predict
if self.disable_auto_regression:
self.lag_indices = [
lag_idx
for lag_idx in self.lag_indices
if lag_idx >= self.prediction_length
]
default_kwargs = {
"time_limit": 60,
# "excluded_model_types": ["KNN", "XT", "RF"],
"presets": [
"high_quality_fast_inference_only_refit",
"optimize_for_deployment",
],
"auto_stack": True,
}
self.kwargs = {**default_kwargs, **kwargs}
def train(
self,
training_data: Dataset,
validation_data: Optional[Dataset] = None,
) -> TabularPredictor:
kwargs_override = {}
dfs = [
get_features_dataframe(
series=self.scaling(to_pandas(entry))[0],
time_features=self.time_features,
lag_indices=self.lag_indices,
)
for entry in training_data
]
if validation_data is not None or self.last_k_for_val is not None:
kwargs_override["auto_stack"] = False
logger.warning(
"Auto Stacking is turned off "
"as validation dataset is provided before input into Tabular Predictor."
)
if validation_data is not None:
logger.log(20, "Validation dataset is directly provided.")
validation_dfs = [
get_features_dataframe(
series=self.scaling(to_pandas(entry))[0],
time_features=self.time_features,
lag_indices=self.lag_indices,
)
for entry in validation_data
]
train_df = pd.concat(dfs)
val_df = pd.concat(validation_dfs)
elif self.last_k_for_val is not None:
logger.log(
20,
f"last_k_for_val is provided, choosing last {self.last_k_for_val} of each time series as validation set.",
)
train_dfs = [
tmp_df.iloc[: -self.last_k_for_val, :] for tmp_df in dfs
]
validation_dfs = [
tmp_df.iloc[-self.last_k_for_val :, :] for tmp_df in dfs
]
train_df = pd.concat(train_dfs)
val_df = pd.concat(validation_dfs)
else:
logger.log(
20,
"No validation dataset is provided, will let TabularPredictor do the splitting automatically,"
"Note that this might break the time order of time series data.",
)
train_df = pd.concat(dfs)
val_df = None
if self.quantiles_to_predict is not None:
ag_model = AutogluonTabularPredictor(
label="target",
problem_type="quantile",
quantile_levels=self.quantiles_to_predict,
).fit(
train_df,
tuning_data=val_df,
**{**self.kwargs, **kwargs_override},
)
else:
ag_model = AutogluonTabularPredictor(
label="target",
problem_type="regression",
eval_metric=self.eval_metric,
).fit(
train_df,
tuning_data=val_df,
**{**self.kwargs, **kwargs_override},
)
return TabularPredictor(
ag_model=ag_model,
freq=self.freq,
prediction_length=self.prediction_length,
time_features=self.time_features,
lag_indices=self.lag_indices,
scaling=self.scaling,
batch_size=self.batch_size,
quantiles_to_predict=self.quantiles_to_predict,
)
| 36.233945
| 122
| 0.613622
| 934
| 7,899
| 4.981799
| 0.278373
| 0.036106
| 0.015474
| 0.021277
| 0.213841
| 0.192564
| 0.17709
| 0.143133
| 0.121212
| 0.121212
| 0
| 0.003716
| 0.318648
| 7,899
| 217
| 123
| 36.400922
| 0.860832
| 0.261426
| 0
| 0.233766
| 0
| 0
| 0.095129
| 0.014419
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012987
| false
| 0
| 0.064935
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22f548488d990977359fc60d27c5b1e982176596
| 1,032
|
py
|
Python
|
src/dcar/errors.py
|
andreas19/dcar
|
31118ac5924b7cb01f8b7da5a84480824c046df2
|
[
"BSD-3-Clause"
] | 1
|
2020-11-25T15:04:39.000Z
|
2020-11-25T15:04:39.000Z
|
src/dcar/errors.py
|
andreas19/dcar
|
31118ac5924b7cb01f8b7da5a84480824c046df2
|
[
"BSD-3-Clause"
] | null | null | null |
src/dcar/errors.py
|
andreas19/dcar
|
31118ac5924b7cb01f8b7da5a84480824c046df2
|
[
"BSD-3-Clause"
] | null | null | null |
"""Errors module."""
__all__ = [
'Error',
'AddressError',
'AuthenticationError',
'TransportError',
'ValidationError',
'RegisterError',
'MessageError',
'DBusError',
'SignatureError',
'TooLongError',
]
class Error(Exception):
"""Base class."""
class AddressError(Error):
"""Raised for errors in server addresses."""
class AuthenticationError(Error):
"""Raised when authentication failed."""
class TransportError(Error):
"""Raised for transport related errors."""
class ValidationError(Error):
"""Raised when validation failed."""
class RegisterError(Error):
"""Raised when a signal or method could not be registered."""
class MessageError(Error):
"""Raised for errors in messages."""
class DBusError(MessageError):
"""Raised for errors from ERROR messages."""
class SignatureError(MessageError):
"""Raised for errors in signatures."""
class TooLongError(MessageError):
"""Raised when a message, an array, a name etc. is too long."""
| 18.763636
| 67
| 0.671512
| 102
| 1,032
| 6.754902
| 0.45098
| 0.095791
| 0.087083
| 0.07402
| 0.063861
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.189922
| 1,032
| 54
| 68
| 19.111111
| 0.824163
| 0.373062
| 0
| 0
| 0
| 0
| 0.211506
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22f66223b5c0420ba407f0ba73a5510c6ae72923
| 31,006
|
py
|
Python
|
Packs/CortexXDR/Integrations/XDR_iocs/XDR_iocs_test.py
|
SergeBakharev/content
|
d66cc274f5bf6f9f0e9ed7e4df1af7b6f305aacf
|
[
"MIT"
] | 1
|
2022-03-05T02:23:32.000Z
|
2022-03-05T02:23:32.000Z
|
Packs/CortexXDR/Integrations/XDR_iocs/XDR_iocs_test.py
|
SergeBakharev/content
|
d66cc274f5bf6f9f0e9ed7e4df1af7b6f305aacf
|
[
"MIT"
] | 42
|
2022-03-11T10:52:26.000Z
|
2022-03-31T01:50:42.000Z
|
Packs/CortexXDR/Integrations/XDR_iocs/XDR_iocs_test.py
|
SergeBakharev/content
|
d66cc274f5bf6f9f0e9ed7e4df1af7b6f305aacf
|
[
"MIT"
] | 2
|
2021-12-13T13:07:21.000Z
|
2022-03-05T02:23:34.000Z
|
from XDR_iocs import *
import pytest
from freezegun import freeze_time
Client.severity = 'INFO'
client = Client({'url': 'test'})
def d_sort(in_dict):
return sorted(in_dict.items())
class TestGetHeaders:
@freeze_time('2020-06-01T00:00:00Z')
def test_sanity(self, mocker):
"""
Given:
- API key
- API key ID
Then:
- Verify headers created correct.
"""
params = {
"apikey_id": "7",
"apikey": "t3PkfrEhaRAD9a3r6Lq5cVPyqdMqtLd8cOJlSWUtbslkbERUgb2BTkSNRtDr3C6CWAgYqxvyzwDFJ83BLBgu1V2cxQY7rsoo2ks2u3W2aBL2BlteF8C8u75lCVUrNbv1" # noqa: E501
}
headers = {
'Authorization': 'da94963b561e3c95899d843b1284cecf410606e9e809be528ec1cf03880c6e9e',
'x-iocs-source': 'xsoar',
'x-xdr-auth-id': '7',
'x-xdr-nonce': '1111111111111111111111111111111111111111111111111111111111111111',
'x-xdr-timestamp': '1590969600000'
}
mocker.patch('secrets.choice', return_value='1')
output = get_headers(params)
assert output == headers, f'get_headers({params})\n\treturns: {d_sort(output)}\n\tinstead: {d_sort(headers)}'
def test_empty_case(self):
"""
Given:
Empty params
Then:
get_headers will not raise error
"""
get_headers({})
class TestHttpRequest:
class Res:
content = 'error'.encode()
def __init__(self, code):
self.status_code = code
@staticmethod
def json():
return {}
XDR_SERVER_ERROR = 500
INVALID_CREDS = 401
LICENSE_ERROR = 402
PERMISSION_ERROR = 403
OK = 200
data_test_http_request_error_codes = [
(OK, {}),
(XDR_SERVER_ERROR, 'XDR internal server error.\t(error)'),
(INVALID_CREDS, 'Unauthorized access. An issue occurred during authentication. This can indicate an incorrect key, id, or other invalid authentication parameters.\t(error)'), # noqa: E501
(LICENSE_ERROR, 'Unauthorized access. User does not have the required license type to run this API.\t(error)'),
(PERMISSION_ERROR, 'Unauthorized access. The provided API key does not have the required RBAC permissions to run this API.\t(error)') # noqa: E501
]
@pytest.mark.parametrize('res, expected_output', data_test_http_request_error_codes)
def test_http_request_error_codes(self, res, expected_output, mocker):
"""
Given:
- Status code
When:
- http_request returns this status code.
Then:
- Verify error/success format.
"""
mocker.patch('requests.post', return_value=self.Res(res))
try:
output = client.http_request('', {})
except DemistoException as error:
output = str(error)
assert output == expected_output, f'status code {res}\n\treturns: {output}\n\tinstead: {expected_output}'
class TestGetRequestsKwargs:
def test_with_file(self, mocker):
"""
Given:
- file to upload
Then:
- Verify output format.
"""
def override_open(open_path, *_other):
return open_path
mocker.patch('builtins.open', side_effect=override_open)
path = '/Users/some_user/some_dir/some_file.file'
output = get_requests_kwargs(file_path=path)
expected_output = {'files': [('file', ('iocs.json', path, 'application/json'))]}
assert output == expected_output, f'get_requests_kwargs(file_path={path})\n\treturns: {output}\n\t instead: {expected_output}' # noqa: E501
def test_with_json(self):
"""
Given:
- simple json
Then:
- the json ready to send
"""
_json = {'test': 'test'}
output = get_requests_kwargs(_json=_json)
expected_output = {'data': '{"request_data": {"test": "test"}}'}
assert output == expected_output, f'get_requests_kwargs(_json={_json})\n\treturns: {output}\n\t instead: {expected_output}' # noqa: E501
class TestPrepareCommands:
def test_prepare_get_changes(self):
"""
Given:
- get changes command
Then:
- Verify url and json format.
"""
ts = int(datetime.now(timezone.utc).timestamp() * 1000)
url_suffix, _json = prepare_get_changes(ts)
assert url_suffix == 'get_changes', f'prepare_get_changes\n\treturns url_suffix: {url_suffix}\n\tinstead url_suffix: get_changes' # noqa: E501
assert _json == {'last_update_ts': ts}
def test_prepare_enable_iocs(self):
"""
Given:
- enable iocs command
Then:
- Verify url and json format.
"""
url_suffix, iocs = prepare_enable_iocs('8.8.8.8,domain.com')
assert url_suffix == 'enable_iocs', f'prepare_enable_iocs\n\treturns url_suffix: {url_suffix}\n\tinstead url_suffix: enable_iocs' # noqa: E501
assert iocs == ['8.8.8.8', 'domain.com']
def test_prepare_disable_iocs(self):
"""
Given:
- disable iocs command
Then:
- Verify url and json format.
"""
url_suffix, iocs = prepare_disable_iocs('8.8.8.8,domain.com')
assert url_suffix == 'disable_iocs', f'prepare_disable_iocs\n\treturns url_suffix: {url_suffix}\n\tinstead url_suffix: disable_iocs' # noqa: E501
assert iocs == ['8.8.8.8', 'domain.com']
class TestCreateFile:
path = 'test_data/sync_file_test.json'
data_test_create_file_sync = [
('Domain_iocs', 'Domain_sync_file'),
('IP_iocs', 'IP_sync_file'),
('File_iocs', 'File_sync_file')
]
data_test_create_file_iocs_to_keep = [
('Domain_iocs', 'Domain_iocs_to_keep_file'),
('IP_iocs', 'IP_iocs_to_keep_file'),
('File_iocs', 'File_iocs_to_keep_file')
]
def setup(self):
# creates the file
with open(TestCreateFile.path, 'w') as _file:
_file.write('')
def teardown(self):
# removes the file when done
os.remove(TestCreateFile.path)
@staticmethod
def get_file(path):
with open(path, 'r') as _file:
return _file.read()
@staticmethod
def get_all_iocs(go_over, extension):
iocs = []
total = 0
data = []
for in_iocs, out_iocs in go_over:
ioc = json.loads(TestCreateFile.get_file(f'test_data/{in_iocs}.json'))
iocs.extend(ioc['iocs'])
total += ioc['total']
data.append(TestCreateFile.get_file(f'test_data/{out_iocs}.{extension}'))
all_iocs = {'iocs': iocs, 'total': total}
all_data = ''.join(data)
return all_iocs, all_data
def test_create_file_sync_without_iocs(self, mocker):
"""
Given:
- Sync command
When:
- there is no iocs
Then:
- Verify sync file data.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value={})
create_file_sync(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
expected_data = ''
assert data == expected_data, f'create_file_sync with no iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
@pytest.mark.parametrize('in_iocs, out_iocs', data_test_create_file_sync)
def test_create_file_sync(self, in_iocs, out_iocs, mocker):
"""
Given:
- Sync command
When:
- iocs type is a specific type.
Then:
- Verify sync file data.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value=json.loads(self.get_file(f'test_data/{in_iocs}.json'))) # noqa: E501
create_file_sync(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
expected_data = self.get_file(f'test_data/{out_iocs}.txt')
assert data == expected_data, f'create_file_sync with {in_iocs} iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
def test_create_file_sync_all_types(self, mocker):
"""
Given:
- Sync command
When:
- iocs as all types
Then:
- Verify sync file data.
"""
all_iocs, expected_data = self.get_all_iocs(self.data_test_create_file_sync, 'txt')
mocker.patch.object(demisto, 'searchIndicators', return_value=all_iocs)
create_file_sync(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
assert data == expected_data, f'create_file_sync with all iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
data_test_create_file_with_empty_indicators = [
{},
{'value': '11.11.11.11'},
{'indicator_type': 'IP'}
]
@pytest.mark.parametrize('defective_indicator', data_test_create_file_with_empty_indicators)
def test_create_file_sync_with_empty_indicators(self, defective_indicator, mocker):
"""
Given:
- Sync command
When:
- a part iocs dont have all required data
Then:
- Verify sync file data.
"""
all_iocs, expected_data = self.get_all_iocs(self.data_test_create_file_sync, 'txt')
all_iocs['iocs'].append(defective_indicator)
all_iocs['total'] += 1
mocker.patch.object(demisto, 'searchIndicators', return_value=all_iocs)
warnings = mocker.patch.object(demisto, 'debug')
create_file_sync(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
assert data == expected_data, f'create_file_sync with all iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
error_msg = warnings.call_args.args[0]
assert error_msg.startswith("unexpected IOC format in key: '"), f"create_file_sync empty message\n\tstarts: {error_msg}\n\tinstead: unexpected IOC format in key: '" # noqa: E501
assert error_msg.endswith(f"', {str(defective_indicator)}"), f"create_file_sync empty message\n\tends: {error_msg}\n\tinstead: ', {str(defective_indicator)}" # noqa: E501
def test_create_file_iocs_to_keep_without_iocs(self, mocker):
"""
Given:
- iocs to keep command
When:
- there is no iocs
Then:
- Verify iocs to keep file data.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value={})
create_file_iocs_to_keep(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
expected_data = ''
assert data == expected_data, f'create_file_iocs_to_keep with no iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
@pytest.mark.parametrize('in_iocs, out_iocs', data_test_create_file_iocs_to_keep)
def test_create_file_iocs_to_keep(self, in_iocs, out_iocs, mocker):
"""
Given:
- iocs to keep command
When:
- iocs type is a specific type.
Then:
- Verify iocs to keep file data.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value=json.loads(
self.get_file(f'test_data/{in_iocs}.json')))
create_file_iocs_to_keep(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
expected_data = self.get_file(f'test_data/{out_iocs}.txt')
assert data == expected_data, f'create_file_iocs_to_keep with {in_iocs} iocs\n\tcreates: {data}\n\tinstead: {expected_data}' # noqa: E501
def test_create_file_iocs_to_keep_all_types(self, mocker):
"""
Given:
- iocs to keep command
When:
- iocs as all types
Then:
- Verify iocs to keep file data.
"""
all_iocs, expected_data = self.get_all_iocs(self.data_test_create_file_iocs_to_keep, 'txt')
mocker.patch.object(demisto, 'searchIndicators', return_value=all_iocs)
create_file_iocs_to_keep(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
assert data == expected_data, f'create_file_iocs_to_keep with all iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
class TestDemistoIOCToXDR:
data_test_demisto_expiration_to_xdr = [
(None, -1),
('', -1),
('0001-01-01T00:00:00Z', -1),
('2020-06-03T00:00:00Z', 1591142400000)
]
@pytest.mark.parametrize('demisto_expiration, xdr_expiration', data_test_demisto_expiration_to_xdr)
def test_demisto_expiration_to_xdr(self, demisto_expiration, xdr_expiration):
"""
Given:
- demisto indicator expiration
Then:
- Verify XDR expiration.
"""
output = demisto_expiration_to_xdr(demisto_expiration)
assert xdr_expiration == output, f'demisto_expiration_to_xdr({demisto_expiration})\n\treturns: {output}\n\tinstead: {xdr_expiration}' # noqa: E501
data_test_demisto_reliability_to_xdr = [
(None, 'F'),
('A - Completely reliable', 'A'),
('B - Usually reliable', 'B'),
('C - Fairly reliable', 'C'),
('D - Not usually reliable', 'D'),
('E - Unreliable', 'E'),
('F - Reliability cannot be judged', 'F')
]
@pytest.mark.parametrize('demisto_reliability, xdr_reliability', data_test_demisto_reliability_to_xdr)
def test_demisto_reliability_to_xdr(self, demisto_reliability, xdr_reliability):
"""
Given:
- demisto indicator reliability
Then:
- Verify XDR reliability.
"""
output = demisto_reliability_to_xdr(demisto_reliability)
assert output == xdr_reliability, f'demisto_reliability_to_xdr({demisto_reliability})\n\treturns: {output}\n\tinstead: {xdr_reliability}' # noqa: E501
data_test_demisto_types_to_xdr = [
('File', 'HASH'),
('IP', 'IP'),
('Domain', 'DOMAIN_NAME')
]
@pytest.mark.parametrize('demisto_type, xdr_type', data_test_demisto_types_to_xdr)
def test_demisto_types_to_xdr(self, demisto_type, xdr_type):
"""
Given:
- demisto indicator type
Then:
- Verify XDR type.
"""
output = demisto_types_to_xdr(demisto_type)
assert output == xdr_type, f'demisto_reliability_to_xdr({demisto_type})\n\treturns: {output}\n\tinstead: {xdr_type}'
data_test_demisto_vendors_to_xdr = [
(
{'moduleID': {'sourceBrand': 'test', 'reliability': 'A - Completely reliable', 'score': 2}},
{'vendor_name': 'test', 'reputation': 'SUSPICIOUS', 'reliability': 'A'}
),
(
{'moduleID': {'reliability': 'A - Completely reliable', 'score': 2}},
{'vendor_name': 'moduleID', 'reputation': 'SUSPICIOUS', 'reliability': 'A'}
),
(
{'moduleID': {'sourceBrand': 'test', 'score': 2}},
{'vendor_name': 'test', 'reputation': 'SUSPICIOUS', 'reliability': 'F'}
),
(
{'moduleID': {'reliability': 'A - Completely reliable', 'score': 0}},
{'vendor_name': 'moduleID', 'reputation': 'UNKNOWN', 'reliability': 'A'}
)
]
@pytest.mark.parametrize('demisto_vendor, xdr_vendor', data_test_demisto_vendors_to_xdr)
def test_demisto_vendors_to_xdr(self, demisto_vendor, xdr_vendor):
"""
Given:
- demisto indicator vendors reports.
Then:
- Verify XDR vendors format.
"""
output = demisto_vendors_to_xdr(demisto_vendor)[0]
assert output == xdr_vendor, f'demisto_vendors_to_xdr({demisto_vendor})\n\treturns: {d_sort(output)}\n\tinstead: {d_sort(xdr_vendor)}' # noqa: E501
data_test_demisto_ioc_to_xdr = [
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO',
'type': 'IP'}
),
(
{'value': '11.11.11.11', 'indicator_type': 100, 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO', 'type': '100'}
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP'},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP'}
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'expiration': '2020-06-03T00:00:00Z'},
{'expiration_date': 1591142400000, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'comments': [{'type': 'IndicatorCommentTimeLine', 'content': 'test'}]}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP'}
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'comments': [{'type': 'IndicatorCommentRegular', 'content': 'test'}]}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'comment': 'test'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'comments': [{'type': 'IndicatorCommentRegular', 'content': 'test'}, {'type': 'IndicatorCommentRegular', 'content': 'this is the comment'}]}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'comment': 'this is the comment'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'aggregatedReliability': 'A - Completely reliable'},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'reliability': 'A'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'CustomFields': {'threattypes': {'threatcategory': 'Malware'}}}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'class': 'Malware'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'moduleToFeedMap': {'module': {'sourceBrand': 'test', 'score': 2}}}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'vendors': [{'vendor_name': 'test', 'reputation': 'SUSPICIOUS', 'reliability': 'F'}]} # noqa: E501
)
]
@pytest.mark.parametrize('demisto_ioc, xdr_ioc', data_test_demisto_ioc_to_xdr)
def test_demisto_ioc_to_xdr(self, demisto_ioc, xdr_ioc):
"""
Given:
- demisto indicator.
Then:
- Verify XDR indicator format.
"""
output = demisto_ioc_to_xdr(demisto_ioc)
assert output == xdr_ioc, f'demisto_ioc_to_xdr({demisto_ioc})\n\treturns: {d_sort(output)}\n\tinstead: {d_sort(xdr_ioc)}' # noqa: E501
def test_empty_demisto_ioc_to_xdr(self, mocker):
warnings = mocker.patch.object(demisto, 'debug')
output = demisto_ioc_to_xdr({})
assert output == {}, 'demisto_ioc_to_xdr({})\n\treturns: ' + str(d_sort(output)) + '\n\tinstead: {}'
assert warnings.call_args.args[0] == "unexpected IOC format in key: 'value', {}"
class TestXDRIOCToDemisto:
data_test_xdr_expiration_to_demisto = [
(-1, 'Never'),
(1591142400000, '2020-06-03T00:00:00Z'),
(1592142400000, '2020-06-14T13:46:40Z')
]
@pytest.mark.parametrize('xdr_expiration, demisto_expiration', data_test_xdr_expiration_to_demisto)
def test_xdr_expiration_to_demisto(self, xdr_expiration, demisto_expiration):
"""
Given:
- expiration in XDR format.
Then:
- expiration in demisto format.
"""
output = xdr_expiration_to_demisto(xdr_expiration)
assert output == demisto_expiration, f'xdr_expiration_to_demisto({xdr_expiration})\n\treturns: {output}\n\tinstead: {demisto_expiration}' # noqa: E501
data_test_xdr_ioc_to_demisto = [
(
{
'RULE_ID': 863, 'RULE_INSERT_TIME': 1591165763753, 'RULE_MODIFY_TIME': 1591166095668,
'RULE_SEVERITY': 'SEV_010_INFO', 'NUMBER_OF_HITS': 0, 'RULE_SOURCE': 'XSOAR TIM', 'RULE_COMMENT': '',
'RULE_STATUS': 'DISABLED', 'BS_STATUS': 'DONE', 'BS_TS': 1591165801230, 'BS_RETRIES': 1,
'RULE_EXPIRATION_TIME': -1, 'IOC_TYPE': 'HASH',
'RULE_INDICATOR': 'fa66f1e0e318b6d7b595b6cee580dc0d8e4ac38fbc8dbfcac6ad66dbe282832e', 'REPUTATION': 'GOOD', # noqa: E501
'RELIABILITY': None, 'VENDORS': None, 'KLASS': None, 'IS_DEFAULT_TTL': False, 'RULE_TTL': -1,
'MARKED_DELETED': 0
},
{
'value': 'fa66f1e0e318b6d7b595b6cee580dc0d8e4ac38fbc8dbfcac6ad66dbe282832e',
'type': 'File',
'score': 1,
'fields': {
'expirationdate': 'Never',
'tags': 'Cortex XDR',
'xdrstatus': 'disabled'
}
}
),
(
{
'RULE_ID': 861, 'RULE_INSERT_TIME': 1591165763753, 'RULE_MODIFY_TIME': 1591166095668,
'RULE_SEVERITY': 'SEV_010_INFO', 'NUMBER_OF_HITS': 0, 'RULE_SOURCE': 'XSOAR TIM', 'RULE_COMMENT': '',
'RULE_STATUS': 'DISABLED', 'BS_STATUS': 'DONE', 'BS_TS': 1591165801784, 'BS_RETRIES': 1,
'RULE_EXPIRATION_TIME': -1, 'IOC_TYPE': 'DOMAIN_NAME', 'RULE_INDICATOR': 'test.com', 'REPUTATION': 'GOOD', # noqa: E501
'RELIABILITY': None, 'VENDORS': None, 'KLASS': None, 'IS_DEFAULT_TTL': False, 'RULE_TTL': -1,
'MARKED_DELETED': 0
},
{
'value': 'test.com',
'type': 'Domain',
'score': 1,
'fields': {
'expirationdate': 'Never',
'tags': 'Cortex XDR',
'xdrstatus': 'disabled'
}
}
),
(
{
'RULE_ID': 862, 'RULE_INSERT_TIME': 1591165763753, 'RULE_MODIFY_TIME': 1591166095668,
'RULE_SEVERITY': 'SEV_010_INFO', 'NUMBER_OF_HITS': 0, 'RULE_SOURCE': 'XSOAR TIM', 'RULE_COMMENT': '',
'RULE_STATUS': 'ENABLED', 'BS_STATUS': 'DONE', 'BS_TS': 1591165801784, 'BS_RETRIES': 1,
'RULE_EXPIRATION_TIME': -1, 'IOC_TYPE': 'DOMAIN_NAME', 'RULE_INDICATOR': 'test.co.il',
'REPUTATION': 'SUSPICIOUS', 'RELIABILITY': 'A',
'VENDORS': [{'vendor_name': 'Cortex XDR - IOC', 'reputation': 'SUSPICIOUS', 'reliability': 'A'}],
'KLASS': None,
'IS_DEFAULT_TTL': False, 'RULE_TTL': -1, 'MARKED_DELETED': 0
},
{
'value': 'test.co.il',
'type': 'Domain',
'score': 2,
'fields': {
'expirationdate': 'Never',
'tags': 'Cortex XDR',
'xdrstatus': 'enabled'
}
}
)
]
@pytest.mark.parametrize('xdr_ioc, demisto_ioc', data_test_xdr_ioc_to_demisto)
def test_xdr_ioc_to_demisto(self, xdr_ioc, demisto_ioc, mocker):
"""
Given:
- IOC in XDR format.
Then:
- IOC in demisto format.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value={})
output = xdr_ioc_to_demisto(xdr_ioc)
del output['rawJSON']
assert output == demisto_ioc, f'xdr_ioc_to_demisto({xdr_ioc})\n\treturns: {d_sort(output)}\n\tinstead: {d_sort(demisto_ioc)}' # noqa: E501
class TestCommands:
# test commands full flow
class TestIOCSCommand:
def test_iocs_command_with_enable(self, mocker):
"""
Given:
- enable command
Then:
- Verify enable command is called.
"""
mocker.patch.object(demisto, 'command', return_value='xdr-iocs-enable')
mocker.patch.object(demisto, 'args', return_value={'indicator': '11.11.11.11'})
mocker.patch('XDR_iocs.Client.http_request', return_value={})
outputs = mocker.patch('XDR_iocs.return_outputs')
enable_ioc = mocker.patch('XDR_iocs.prepare_enable_iocs', side_effect=prepare_enable_iocs)
iocs_command(client)
output = outputs.call_args.args[0]
assert output == 'indicators 11.11.11.11 enabled.', f'enable command\n\tprints: {output}\n\tinstead: indicators 11.11.11.11 enabled.' # noqa: E501
assert enable_ioc.call_count == 1, 'enable command not called'
def test_iocs_command_with_disable(self, mocker):
"""
Given:
- disable command
Then:
- Verify disable command is called.
"""
mocker.patch.object(demisto, 'command', return_value='xdr-iocs-disable')
mocker.patch.object(demisto, 'args', return_value={'indicator': '11.11.11.11'})
mocker.patch('XDR_iocs.Client.http_request', return_value={})
outputs = mocker.patch('XDR_iocs.return_outputs')
disable_ioc = mocker.patch('XDR_iocs.prepare_disable_iocs', side_effect=prepare_disable_iocs)
iocs_command(client)
output = outputs.call_args.args[0]
assert output == 'indicators 11.11.11.11 disabled.', f'disable command\n\tprints: {output}\n\tinstead: indicators 11.11.11.11 disabled.' # noqa: E501
assert disable_ioc.call_count == 1, 'disable command not called'
def test_sync(self, mocker):
http_request = mocker.patch.object(Client, 'http_request')
iocs, data = TestCreateFile.get_all_iocs(TestCreateFile.data_test_create_file_sync, 'txt')
mocker.patch.object(demisto, 'searchIndicators', returnvalue=iocs)
mocker.patch('XDR_iocs.return_outputs')
sync(client)
assert http_request.call_args.args[0] == 'sync_tim_iocs', 'sync command url changed'
@freeze_time('2020-06-03T02:00:00Z')
def test_iocs_to_keep(self, mocker):
http_request = mocker.patch.object(Client, 'http_request')
iocs, data = TestCreateFile.get_all_iocs(TestCreateFile.data_test_create_file_iocs_to_keep, 'txt')
mocker.patch.object(demisto, 'searchIndicators', returnvalue=iocs)
mocker.patch('XDR_iocs.return_outputs')
iocs_to_keep(client)
assert http_request.call_args.args[0] == 'iocs_to_keep', 'iocs_to_keep command url changed'
def test_tim_insert_jsons(self, mocker):
http_request = mocker.patch.object(Client, 'http_request')
mocker.patch.object(demisto, 'getIntegrationContext', return_value={'time': '2020-06-03T00:00:00Z'})
iocs, _ = TestCreateFile.get_all_iocs(TestCreateFile.data_test_create_file_sync, 'txt')
mocker.patch.object(demisto, 'searchIndicators', return_value=iocs)
mocker.patch('XDR_iocs.return_outputs')
tim_insert_jsons(client)
assert http_request.call_args.kwargs['url_suffix'] == 'tim_insert_jsons/', 'tim_insert_jsons command url changed'
def test_get_changes(self, mocker):
mocker.patch.object(demisto, 'getIntegrationContext', return_value={'ts': 1591142400000})
mocker.patch.object(demisto, 'createIndicators')
mocker.patch.object(demisto, 'searchIndicators', return_value={})
xdr_res = {'reply': list(map(lambda xdr_ioc: xdr_ioc[0], TestXDRIOCToDemisto.data_test_xdr_ioc_to_demisto))}
mocker.patch.object(Client, 'http_request', return_value=xdr_res)
get_changes(client)
xdr_ioc_to_timeline(list(map(lambda x: str(x[0].get('RULE_INDICATOR')), TestXDRIOCToDemisto.data_test_xdr_ioc_to_demisto))) # noqa: E501
class TestParams:
tags_test = [
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO',
'type': 'IP'},
{'tlp_color': ''},
'Cortex XDR',
None
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO',
'type': 'IP'},
{'tag': 'tag1'},
'tag1',
None
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO',
'type': 'IP'},
{'feedTags': 'tag2', 'tlp_color': 'AMBER'},
'tag2',
'AMBER'
)
]
@pytest.mark.parametrize('demisto_ioc, xdr_ioc, param_value, expected_tags, expected_tlp_color', tags_test)
def test_feed_tags_and_tlp_color(self, demisto_ioc, xdr_ioc, param_value, expected_tags, expected_tlp_color, mocker):
"""
Given:
- IOC in XDR format.
Then:
- IOC in demisto format.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value={})
mocker.patch.object(demisto, 'params', return_value=param_value)
mocker.patch.object(demisto, 'getIntegrationContext', return_value={'ts': 1591142400000})
mocker.patch.object(demisto, 'searchIndicators', return_value={})
outputs = mocker.patch.object(demisto, 'createIndicators')
Client.tag = demisto.params().get('feedTags', demisto.params().get('tag', Client.tag))
Client.tlp_color = demisto.params().get('tlp_color')
client = Client({'url': 'yana'})
xdr_res = {'reply': list(map(lambda xdr_ioc: xdr_ioc[0], TestXDRIOCToDemisto.data_test_xdr_ioc_to_demisto))}
mocker.patch.object(Client, 'http_request', return_value=xdr_res)
get_changes(client)
output = outputs.call_args.args[0]
assert output[0]['fields']['tags'] == expected_tags
assert output[0]['fields'].get('trafficlightprotocol') == expected_tlp_color
| 44.042614
| 224
| 0.591111
| 3,461
| 31,006
| 5.037561
| 0.10286
| 0.022713
| 0.022713
| 0.015142
| 0.644566
| 0.579983
| 0.498537
| 0.460166
| 0.427416
| 0.411242
| 0
| 0.044583
| 0.272979
| 31,006
| 703
| 225
| 44.105263
| 0.728862
| 0.08121
| 0
| 0.258206
| 0
| 0.030635
| 0.341319
| 0.0769
| 0
| 0
| 0
| 0
| 0.080963
| 1
| 0.083151
| false
| 0
| 0.006565
| 0.006565
| 0.164114
| 0.004376
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22f898eb9c872bebbb74a0dcd35cbd3eb8f475a0
| 4,444
|
py
|
Python
|
cloudify_terminal_sdk/netconf_connection.py
|
cloudify-incubator/cloudify-plugins-sdk
|
9805008e739d31e5f9fe3184411648f9be5e6214
|
[
"Apache-2.0"
] | 1
|
2019-04-23T03:06:52.000Z
|
2019-04-23T03:06:52.000Z
|
cloudify_terminal_sdk/netconf_connection.py
|
cloudify-incubator/cloudify-plugins-sdk
|
9805008e739d31e5f9fe3184411648f9be5e6214
|
[
"Apache-2.0"
] | 9
|
2018-12-17T14:08:29.000Z
|
2022-01-16T17:52:54.000Z
|
cloudify_terminal_sdk/netconf_connection.py
|
cloudify-incubator/cloudify-plugins-sdk
|
9805008e739d31e5f9fe3184411648f9be5e6214
|
[
"Apache-2.0"
] | 3
|
2021-12-13T20:53:37.000Z
|
2022-01-20T09:01:47.000Z
|
# Copyright (c) 2015-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cloudify_common_sdk import exceptions
from cloudify_terminal_sdk import base_connection
# final of any package
NETCONF_1_0_END = "]]>]]>"
# base level of communication
NETCONF_1_0_CAPABILITY = 'urn:ietf:params:netconf:base:1.0'
# package based communication
NETCONF_1_1_CAPABILITY = 'urn:ietf:params:netconf:base:1.1'
class NetConfConnection(base_connection.SSHConnection):
# ssh connection
ssh = None
chan = None
# buffer for same packages, will save partial packages between calls
buff = ""
current_level = NETCONF_1_0_CAPABILITY
def connect(
self, ip, user, hello_string, password=None, key_content=None,
port=830
):
"""open connection and send xml string by link"""
self._ssh_connect(ip, user, password, key_content, port)
self.conn = self.ssh.get_transport().open_session()
self.conn.invoke_subsystem('netconf')
self.buff = ""
capabilities = self.send(hello_string)
return capabilities
def send(self, xml):
"""send xml string by connection"""
if self.current_level == NETCONF_1_1_CAPABILITY:
self._send_1_1(xml)
return self._recv_1_1()
else:
self._send_1_0(xml)
return self._recv_1_0()
def _send_1_0(self, xml):
"""send xml string with NETCONF_1_0_END by connection"""
if xml:
message = xml + NETCONF_1_0_END
self._conn_send(message)
def _recv_1_0(self):
"""recv xml string with NETCONF_1_0_END by connection"""
while self.buff.find(NETCONF_1_0_END) == -1:
self.buff += self._conn_recv(8192)
if self.conn.closed:
break
package_end = self.buff.find(NETCONF_1_0_END)
# we have already closed connection
if package_end == -1:
package_end = len(self.buff)
response = self.buff[:package_end]
self.buff = self.buff[package_end + len(NETCONF_1_0_END):]
return response
def _send_1_1(self, xml):
"""send xml string as package by connection"""
if xml:
message = "\n#{0}\n".format(len(xml))
message += xml
message += "\n##\n"
self._conn_send(message)
def _recv_1_1(self):
"""send xml string as package by connection"""
get_everything = False
response = ""
while not get_everything:
if len(self.buff) < 2:
self.buff += self._conn_recv(2)
# skip new line
if self.buff[:2] != "\n#":
# We have already closed connection
# caller shoud stop to ask new messages
if not self.buff and self.conn.closed:
return ""
raise exceptions.NonRecoverableError("no start")
self.buff = self.buff[2:]
# get package length
while self.buff.find("\n") == -1:
self.buff += self._conn_recv(20)
if self.buff[:2] == "#\n":
get_everything = True
self.buff = self.buff[2:]
break
length = int(self.buff[:self.buff.find("\n")])
self.buff = self.buff[self.buff.find("\n") + 1:]
# load current package
while length > len(self.buff):
self.buff += self._conn_recv(length - len(self.buff))
response += self.buff[:length]
self.buff = self.buff[length:]
return response
def close(self, goodbye_string=None):
"""send xml string by link and close connection"""
response = None
if goodbye_string:
# we have something to say
response = self.send(goodbye_string)
self._ssh_close()
return response
| 35.552
| 74
| 0.605761
| 578
| 4,444
| 4.486159
| 0.290657
| 0.092557
| 0.055534
| 0.049364
| 0.298496
| 0.174315
| 0.121095
| 0.028538
| 0.028538
| 0
| 0
| 0.023665
| 0.296355
| 4,444
| 124
| 75
| 35.83871
| 0.805564
| 0.278578
| 0
| 0.141026
| 0
| 0
| 0.035283
| 0.020343
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089744
| false
| 0.025641
| 0.025641
| 0
| 0.269231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22f9659775a0befbb80b23123b166ed4d7384748
| 15,411
|
py
|
Python
|
Seismic_Conv1D_dec.py
|
dyt1990/Seis_DCEC
|
6cc56a7db10dd87b0ef39ece73578fca8b23c55f
|
[
"MIT"
] | 1
|
2021-04-05T06:03:16.000Z
|
2021-04-05T06:03:16.000Z
|
Seismic_Conv1D_dec.py
|
dyt1990/Seis_DCEC
|
6cc56a7db10dd87b0ef39ece73578fca8b23c55f
|
[
"MIT"
] | null | null | null |
Seismic_Conv1D_dec.py
|
dyt1990/Seis_DCEC
|
6cc56a7db10dd87b0ef39ece73578fca8b23c55f
|
[
"MIT"
] | 2
|
2019-06-13T03:34:20.000Z
|
2019-12-16T05:57:30.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 19 17:48:13 2018
@author: Sediment
"""
# -*- coding: utf-8 -*-
'''
Keras implementation of deep embedder to improve clustering, inspired by:
"Unsupervised Deep Embedding for Clustering Analysis" (Xie et al, ICML 2016)
Definition can accept somewhat custom neural networks. Defaults are from paper.
'''
import sys
import numpy as np
import pandas as pd
import keras.backend as K
from keras.initializers import RandomNormal
from keras.engine.topology import Layer, InputSpec
from keras.models import Model, Sequential
from keras.layers import Dense, Dropout, Input, Conv1D, MaxPooling1D, BatchNormalization, Activation, Flatten, UpSampling1D, Reshape
from keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam, Nadam
from keras.regularizers import l2
from sklearn.preprocessing import normalize
from keras.callbacks import LearningRateScheduler
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.metrics import normalized_mutual_info_score, adjusted_rand_score
from sklearn import manifold
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
if (sys.version[0] == 2):
import cPickle as pickle
else:
import pickle
class ClusteringLayer(Layer):
'''
Clustering layer which converts latent space Z of input layer
into a probability vector for each cluster defined by its centre in
Z-space. Use Kullback-Leibler divergence as loss, with a probability
target distribution.
# Arguments
output_dim: int > 0. Should be same as number of clusters.
input_dim: dimensionality of the input (integer).
This argument (or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
weights: list of Numpy arrays to set as initial weights.
The list should have 2 elements, of shape `(input_dim, output_dim)`
and (output_dim,) for weights and biases respectively.
alpha: parameter in Student's t-distribution. Default is 1.0.
# Input shape
2D tensor with shape: `(nb_samples, input_dim)`.
# Output shape
2D tensor with shape: `(nb_samples, output_dim)`.
'''
def __init__(self, output_dim, input_dim=None, weights=None, alpha=1.0, **kwargs):
self.output_dim = output_dim
self.input_dim = input_dim
self.alpha = alpha
# kmeans cluster centre locations
self.initial_weights = weights
self.input_spec = [InputSpec(ndim=2)]
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(ClusteringLayer, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 2
input_dim = input_shape[1]
self.input_spec = [InputSpec(dtype=K.floatx(),
shape=(None, input_dim))]
self.W = K.variable(self.initial_weights)
self.trainable_weights = [self.W]
def call(self, x, mask=None):
q = 1.0/(1.0 + K.sqrt(K.sum(K.square(K.expand_dims(x, 1) - self.W), axis=2))**2 /self.alpha)
q = q**((self.alpha+1.0)/2.0)
q = K.transpose(K.transpose(q)/K.sum(q, axis=1))
return q
def get_output_shape_for(self, input_shape):
assert input_shape and len(input_shape) == 2
return (input_shape[0], self.output_dim)
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) == 2
return (input_shape[0], self.output_dim)
def get_config(self):
config = {'output_dim': self.output_dim,
'input_dim': self.input_dim}
base_config = super(ClusteringLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class DeepEmbeddingClustering(object):
def __init__(self,
n_clusters,
input_dim,
learning_rate=0.1,
encoded=None,
decoded=None,
alpha=1.0,
pretrained_weights=None,
cluster_centres=None,
batch_size=256,
conv_filters=[8, 16, 32],
kernel_size=12,
Maxpooling_size=2,
LatentSpace_Z=25,
finetune_epochs=5,
**kwargs):
super(DeepEmbeddingClustering, self).__init__()
self.n_clusters = n_clusters
self.input_dim = input_dim
self.encoded = encoded
self.decoded = decoded
self.alpha = alpha
self.pretrained_weights = pretrained_weights
self.cluster_centres = cluster_centres
self.batch_size = batch_size
self.learning_rate = learning_rate
self.iters_lr_update = 6000
self.lr_change_rate = 0.1
self.finetune_epochs = finetune_epochs
self.conv_filters = conv_filters
self.kernel_size = kernel_size
self.Maxpooling_size = Maxpooling_size
self.LatentSpace_Z = LatentSpace_Z
self.encoders = []
self.decoders = []
input_data = Input(shape=(self.input_dim, 1))
x = Conv1D(self.conv_filters[0], (self.kernel_size), activation='relu', padding='same')(input_data)
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
x = MaxPooling1D((self.Maxpooling_size), padding='same')(x)
x = Conv1D(self.conv_filters[1], (self.kernel_size), activation='relu', padding='same')(x)
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
x = MaxPooling1D((self.Maxpooling_size), padding='same')(x)
x = Conv1D(self.conv_filters[2], (self.kernel_size), activation='relu', padding='same')(x)
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
x = MaxPooling1D((self.Maxpooling_size), padding='same')(x)
# at this point the representation is (16 x conv_filters) i.e. 128-dimensional
x = Flatten()(x)
# at this point the representation is (6) i.e. 128-dimensional
encoded = Dense(LatentSpace_Z, activation='relu')(x)
# 256 = input_data / ((2^maxpool_num) * conv_fileters * 4)
x = Dense(self.input_dim // (2**3) * self.conv_filters[2], kernel_initializer=RandomNormal(mean=0.0, stddev=0.01, seed=None),
bias_initializer='zeros', activation='relu')(encoded)
x = Reshape((self.input_dim // (2**3), self.conv_filters[2]))(x) # 16 * 2 * 2 * 2 = 128, 多少个maxpool就与多少个2相乘
x = Conv1D(self.conv_filters[2], (self.kernel_size), activation='relu', padding='same')(x)
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
x = UpSampling1D((self.Maxpooling_size))(x)
x = Conv1D(self.conv_filters[1], (self.kernel_size), activation='relu', padding='same')(x)
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
x = UpSampling1D((self.Maxpooling_size))(x)
x = Conv1D(self.conv_filters[0], (1), activation='relu')(x)
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
x = UpSampling1D((self.Maxpooling_size))(x)
decoded = Conv1D(1, (self.kernel_size), activation='relu', padding='same')(x)
self.autoencoder = Model(input_data, decoded)
self.autoencoder.summary()
self.encoder = Model(input_data, encoded)
# build the end-to-end autoencoder for finetuning
# Note that at this point dropout is discarded
self.encoder.compile(loss='mse', optimizer=SGD(lr=self.learning_rate, decay=0, momentum=0.9))
self.autoencoder.compile(loss='mse', optimizer=SGD(lr=self.learning_rate, decay=0, momentum=0.9))
if cluster_centres is not None:
assert cluster_centres.shape[0] == self.n_clusters
assert cluster_centres.shape[1] == self.encoder.layers[-1].output_dim
if self.pretrained_weights is not None:
self.autoencoder.load_weights(self.pretrained_weights)
def p_mat(self, q):
weight = q**2 / q.sum(0)
return (weight.T / weight.sum(1)).T
def initialize(self, X, save_autoencoder=False, finetune_iters=5000):
if self.pretrained_weights is None:
iters_per_epoch = int(len(X) / self.batch_size)
print('layerwise pretrain')
lr_epoch_update = max(1, self.iters_lr_update / float(iters_per_epoch))
def step_decay(epoch):
initial_rate = self.learning_rate
factor = int(epoch / lr_epoch_update)
lr = initial_rate / (10 ** factor)
return lr
lr_schedule = LearningRateScheduler(step_decay)
#update encoder and decoder weights:
self.autoencoder.fit(X, X, batch_size=self.batch_size, epochs=self.finetune_epochs, callbacks=[lr_schedule])
if save_autoencoder:
self.autoencoder.save_weights('autoencoder.h5')
else:
print('Loading pretrained weights for autoencoder.')
self.autoencoder.load_weights(self.pretrained_weights)
# update encoder, decoder
# TODO: is this needed? Might be redundant...
for i in range(len(self.encoder.layers)):
self.encoder.layers[i].set_weights(self.autoencoder.layers[i].get_weights())
# initialize cluster centres using k-means
print('Initializing cluster centres with k-means.')
if self.cluster_centres is None:
np.random.seed(42) #随机种子,用于初始化聚类中心
kmeans = KMeans(n_clusters=self.n_clusters, max_iter=100, n_init=6, precompute_distances='auto', random_state=None, tol=1e-4)
self.y_pred = kmeans.fit_predict(self.encoder.predict(X))
self.cluster_centres = kmeans.cluster_centers_
print ('cluster_centres:\n ', self.cluster_centres)
# prepare DCEC model
self.DCEC = Sequential([self.encoder,
ClusteringLayer(self.n_clusters,
weights=self.cluster_centres,
name='clustering')])
self.DCEC.compile(loss='kullback_leibler_divergence', optimizer=SGD(lr=self.learning_rate, decay=0, momentum=0.9))
# loss: 'mean_squared_error', 'categorical_crossentropy', 'hinge', 'squared_hinge'
return
def visualizeData(self, Z, labels, num_clusters, csv_filename, title):
'''
TSNE visualization of the points in latent space Z
:param Z: Numpy array containing points in latent space in which clustering was performed
:param labels: True labels - used for coloring points
:param num_clusters: Total number of clusters
:param title: filename where the plot should be saved
:return: None - (side effect) saves clustering visualization plot in specified location
'''
print ('Start visualizing Data')
labels = labels.astype(int)
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
Z_tsne = tsne.fit_transform(Z)
fig = plt.figure()
plt.scatter(Z_tsne[:, 0], Z_tsne[:, 1], s=2, c=labels, cmap=plt.cm.get_cmap("jet", num_clusters))
plt.colorbar(ticks=range(num_clusters))
# fig.savefig(title, dpi=fig.dpi)
fig.savefig(title, dpi=600)
# save t_sne results
print('Save t_sne results')
dataframe = pd.DataFrame({'Z_tsne_x':Z_tsne[:, 0], 'Z_tsne_y':Z_tsne[:, 1], 'labels':labels})
dataframe.to_csv(csv_filename, index=False, sep=',')
def cluster(self, X, y=None,
tol=0.001, update_interval=None,
iter_max=799,
save_interval=None,
**kwargs):
if update_interval is None:
# 1 epochs
update_interval = X.shape[0]/self.batch_size
print('Update interval', update_interval)
if save_interval is None:
# 50 epochs
save_interval = X.shape[0]/self.batch_size*50
print('Save interval', save_interval)
assert save_interval >= update_interval
train = True
iteration, index = 0, 0
self.accuracy = []
while train:
sys.stdout.write('\r')
# cutoff iteration
if iter_max < iteration:
print('Reached maximum iteration limit. Stopping training.')
return self.y_pred
# update (or initialize) probability distributions and propagate weight changes
# from DCEC model to encoder.
if iteration % update_interval == 0:
self.q = self.DCEC.predict(X, verbose=0)
self.p = self.p_mat(self.q)
y_pred = self.q.argmax(1)
delta_label = ((y_pred == self.y_pred).sum().astype(np.float32) / y_pred.shape[0])
if y is None:
print(str(np.round(delta_label*100, 5))+'% change in label assignment')
if iteration > 0 and delta_label < tol:
print('delta_label ', delta_label, '< tol ', tol)
print('Reached tolerance threshold. Stopping training.')
train = False
continue
else:
self.y_pred = y_pred
for i in range(len(self.encoder.layers)):
self.encoder.layers[i].set_weights(self.DCEC.layers[0].layers[i].get_weights())
self.cluster_centres = self.DCEC.layers[-1].get_weights()[0]
# train on batch
sys.stdout.write('Iteration %d, ' % iteration)
if (index+1)*self.batch_size >= X.shape[0]:
loss = self.DCEC.train_on_batch(X[index*self.batch_size::], self.p[index*self.batch_size::])
index = 0
sys.stdout.write('Loss %f\n' % loss)
else:
loss = self.DCEC.train_on_batch(X[index*self.batch_size:(index+1) * self.batch_size],
self.p[index*self.batch_size:(index+1) * self.batch_size])
sys.stdout.write('Loss %f\n' % loss)
index += 1
# save intermediate
if iteration % save_interval == 0:
z = self.encoder.predict(X)
pca = PCA(n_components=2).fit(z)
z_2d = pca.transform(z)
clust_2d = pca.transform(self.cluster_centres)
# save states for visualization
pickle.dump({'z_2d': z_2d, 'clust_2d': clust_2d, 'q': self.q, 'p': self.p},
open('c'+str(iteration)+'.pkl', 'wb'))
# save DCEC model checkpoints
self.DCEC.save('DCEC_model_'+str(iteration)+'.h5')
iteration += 1
sys.stdout.flush()
return y_pred
| 43.78125
| 138
| 0.594251
| 1,869
| 15,411
| 4.745318
| 0.228464
| 0.004961
| 0.017589
| 0.012628
| 0.23148
| 0.211523
| 0.203405
| 0.162814
| 0.162814
| 0.149171
| 0
| 0.020189
| 0.299332
| 15,411
| 351
| 139
| 43.905983
| 0.801167
| 0.175394
| 0
| 0.133333
| 0
| 0
| 0.050293
| 0.002263
| 0
| 0
| 0
| 0.002849
| 0.026667
| 1
| 0.053333
| false
| 0
| 0.088889
| 0
| 0.191111
| 0.053333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|