hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0c4cd342a89bf95f5ab1d0a8be55dcb63cf8a4fa | 5,243 | py | Python | code/util/postprocessing.py | goldleaf3i/declutter-reconstruct | 954b755a1eced34af50d31ee2e3938a0b751cc4d | [
"MIT"
] | 2 | 2022-03-14T07:37:45.000Z | 2022-03-25T09:01:48.000Z | code/util/postprocessing.py | goldleaf3i/declutter-reconstruct | 954b755a1eced34af50d31ee2e3938a0b751cc4d | [
"MIT"
] | 1 | 2022-03-24T07:34:16.000Z | 2022-03-27T11:20:27.000Z | code/util/postprocessing.py | goldleaf3i/declutter-reconstruct | 954b755a1eced34af50d31ee2e3938a0b751cc4d | [
"MIT"
] | 1 | 2022-03-08T05:37:22.000Z | 2022-03-08T05:37:22.000Z | from PIL import Image
def get_colors(pix_data, map_image):
colors = []
dictionary = {}
# type dictionary
for y in range(map_image.size[1]):
for x in range(map_image.size[0]):
if pix_data[x, y] != (0, 0, 0, 255):
color = pix_data[x, y]
if color not in colors:
colors.append(color)
dictionary[color] = 1
else:
points = dictionary[color]
points += 1
dictionary[color] = points
return dictionary, colors
def remove_small_color(dictionary, colors, th):
keys = dictionary.keys()
for key in list(keys):
if dictionary[key] <= th:
colors.remove(key)
def get_color(pos, pix_data, colors):
color = pix_data[pos]
if color in colors:
return True, color
return False, None
def check_position1(pos, size):
if pos[0] >= size[0] - 1:
return False
return True
def check_position2(pos):
if pos[0] <= 0:
return False
return True
def check_position3(pos, size):
if pos[1] >= size[1] - 1:
return False
return True
def check_position4(pos):
if pos[1] <= 0:
return False
return True
def check_position5(pos, size):
if pos[1] >= size[1] - 1 or pos[0] >= size[0] - 1:
return False
return True
def check_position6(pos, size):
if pos[1] >= size[1] - 1 or pos[0] <= 0:
return False
return True
def check_position7(pos, size):
if pos[1] <= 0 or pos[0] >= size[0] - 1:
return False
return True
def check_position8(pos):
if pos[1] <= 0 or pos[0] <= 0:
return False
return True
def compute_distance(pix_data, position, size, colors):
p1, p2, p3, p4, p5, p6, p7, p8 = True, True, True, True, True, True, True, True
pos_x = position[0]
pos_y = position[1]
if pos_x == size[0] - 1:
p1, p5, p7 = False, False, False
if pos_x == 0:
p2, p6, p8 = False, False, False
if pos_y == size[1] - 1:
p3, p5, p6 = False, False, False
if pos_y == 0:
p4, p7, p8 = False, False, False
ind = 0
color = None
while True:
ind += 1
if p1:
pos = pos_x + ind, pos_y
p1 = check_position1(pos, size)
flag, color = get_color(pos, pix_data, colors)
if flag:
break
if p2:
pos = pos_x - ind, pos_y
p2 = check_position2(pos)
flag, color = get_color(pos, pix_data, colors)
if flag:
break
if p3:
pos = pos_x, pos_y + ind
p3 = check_position3(pos, size)
flag, color = get_color(pos, pix_data, colors)
if flag:
break
if p4:
pos = pos_x, pos_y - ind
p4 = check_position4(pos)
flag, color = get_color(pos, pix_data, colors)
if flag:
break
if p5:
pos = pos_x + ind, pos_y + ind
p5 = check_position5(pos, size)
flag, color = get_color(pos, pix_data, colors)
if flag:
break
if p6:
pos = pos_x - ind, pos_y + ind
p6 = check_position6(pos, size)
flag, color = get_color(pos, pix_data, colors)
if flag:
break
if p7:
pos = pos_x + ind, pos_y - ind
p7 = check_position7(pos, size)
flag, color = get_color(pos, pix_data, colors)
if flag:
break
if p8:
pos = pos_x - ind, pos_y - ind
p8 = check_position8(pos)
flag, color = get_color(pos, pix_data, colors)
if flag:
break
if not p1 and not p2 and not p3 and not p4 and not p5 and not p6 and not p7 and not p8:
break
if color is None:
black = 0, 0, 0, 255
return black
return color
def oversegmentation(segmentation_map_path, th_post, filepath='.'):
initial_map = Image.open(segmentation_map_path)
final_map = initial_map.copy()
pix_data_initial = initial_map.load()
pix_data_final = final_map.load()
dictionary, colors = get_colors(pix_data_initial, initial_map)
new_colors = colors[:]
if (255, 255, 255, 255) in new_colors:
new_colors.remove((255, 255, 255, 255))
del dictionary[(255, 255, 255, 255)]
remove_small_color(dictionary, new_colors, th_post)
colors_eliminated = [(255, 255, 255, 255)]
for color in colors:
if color not in new_colors:
colors_eliminated.append(color)
for y in range(initial_map.size[1]):
for x in range(initial_map.size[0]):
position = [x, y]
pixel_color = pix_data_initial[x, y]
if pixel_color in colors_eliminated:
new_pixel_color = compute_distance(pix_data_initial, position, initial_map.size, new_colors)
pix_data_final[x, y] = new_pixel_color
title = filepath + '8b_rooms_th1_on_map_post.png'
title_pdf = filepath + '8b_rooms_th1_on_map_post.pdf'
print('8b_rooms_th1_on_map_post')
final_map.save(title)
pdf_image = final_map.convert('RGB')
pdf_image.save(title_pdf)
return title, new_colors
def clear_rooms(room_image, param_obj, rooms):
initial_map = Image.open(room_image)
final_map = initial_map.copy()
pix_data_initial = initial_map.load()
pix_data_final = final_map.load()
dictionary, colors = get_colors(pix_data_initial, initial_map)
new_colors = colors[:]
remove_small_color(dictionary, new_colors, param_obj.th_post)
colors_eliminated = []
for color in colors:
if color not in new_colors:
colors_eliminated.append(color)
for y in range(initial_map.size[1]):
for x in range(initial_map.size[0]):
position = [x, y]
pixel_color = pix_data_initial[x, y]
if pixel_color in colors_eliminated:
new_pixel_color = compute_distance(pix_data_initial, position, initial_map.size, new_colors)
pix_data_final[x, y] = new_pixel_color
final_map.save(room_image)
| 23.831818 | 96 | 0.682434 | 875 | 5,243 | 3.881143 | 0.108571 | 0.053592 | 0.029152 | 0.037102 | 0.603946 | 0.584511 | 0.517668 | 0.450825 | 0.44523 | 0.417256 | 0 | 0.041697 | 0.204082 | 5,243 | 219 | 97 | 23.940639 | 0.772106 | 0.002861 | 0 | 0.40113 | 0 | 0 | 0.016073 | 0.015308 | 0 | 0 | 0 | 0 | 0 | 1 | 0.079096 | false | 0 | 0.00565 | 0 | 0.20904 | 0.00565 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c4d8c7b8f1c7dfd0975fedacde59945660a005d | 3,333 | py | Python | tests/mutations/test_login.py | openlobby/openlobby-server | b7a1a2b73e903c4da57970926844b0639dce5aae | [
"MIT"
] | 7 | 2017-11-23T15:24:50.000Z | 2018-11-29T21:47:55.000Z | tests/mutations/test_login.py | openlobby/openlobby-server | b7a1a2b73e903c4da57970926844b0639dce5aae | [
"MIT"
] | 20 | 2018-02-21T22:25:42.000Z | 2020-06-05T17:22:36.000Z | tests/mutations/test_login.py | openlobby/openlobby-server | b7a1a2b73e903c4da57970926844b0639dce5aae | [
"MIT"
] | 3 | 2018-03-08T10:05:01.000Z | 2018-08-16T14:36:28.000Z | import json
import pytest
import re
from urllib.parse import urlparse, urlunparse, parse_qs
from unittest.mock import patch
from openlobby.core.models import OpenIdClient, LoginAttempt
from openlobby.core.openid import register_client
pytestmark = pytest.mark.django_db
def check_authorization_url(authorization_url, oid_client, state, snapshot):
url = urlparse(authorization_url)
url_without_query = urlunparse((url.scheme, url.netloc, url.path, "", "", ""))
assert url_without_query == "{}/protocol/openid-connect/auth".format(
oid_client.issuer
)
qs = parse_qs(url.query)
assert qs["client_id"][0] == oid_client.client_id
assert qs["response_type"][0] == "code"
assert qs["scope"][0] == "openid"
assert qs["redirect_uri"][0] == "http://localhost:8010/login-redirect"
assert qs["state"][0] == state
snapshot.assert_match(json.loads(qs["claims"][0]))
def test_login__known_openid_client(issuer, call_api, snapshot):
oc = register_client(issuer)
oid_client = OpenIdClient.objects.create(
name="Test",
issuer=issuer,
client_id=oc.client_id,
client_secret=oc.client_secret,
)
app_redirect_uri = "http://i.am.pirate"
openid_uid = "wolf@openid.provider"
query = """
mutation {{
login (input: {{ openidUid: "{uid}", redirectUri: "{uri}" }}) {{
authorizationUrl
}}
}}
""".format(
uid=openid_uid, uri=app_redirect_uri
)
# Keycloak server used for tests does not support issuer discovery by UID, so we mock it
with patch(
"openlobby.core.api.mutations.discover_issuer", return_value=issuer
) as mock:
response = call_api(query)
mock.assert_called_once_with(openid_uid)
assert "errors" not in response
authorization_url = response["data"]["login"]["authorizationUrl"]
la = LoginAttempt.objects.get(openid_client__id=oid_client.id)
assert la.app_redirect_uri == app_redirect_uri
assert la.openid_uid == openid_uid
check_authorization_url(authorization_url, oid_client, la.state, snapshot)
def test_login__new_openid_client(issuer, call_api, snapshot):
app_redirect_uri = "http://i.am.pirate"
openid_uid = "wolf@openid.provider"
query = """
mutation {{
login (input: {{ openidUid: "{uid}", redirectUri: "{uri}" }}) {{
authorizationUrl
}}
}}
""".format(
uid=openid_uid, uri=app_redirect_uri
)
# Keycloak server used for tests does not support issuer discovery by UID, so we mock it
with patch(
"openlobby.core.api.mutations.discover_issuer", return_value=issuer
) as mock:
response = call_api(query)
mock.assert_called_once_with(openid_uid)
assert "errors" not in response
authorization_url = response["data"]["login"]["authorizationUrl"]
oid_client = OpenIdClient.objects.get()
assert oid_client.name == issuer
assert oid_client.issuer == issuer
assert re.match(r"\w+", oid_client.client_id)
assert re.match(r"\w+", oid_client.client_secret)
la = LoginAttempt.objects.get(openid_client__id=oid_client.id)
assert la.app_redirect_uri == app_redirect_uri
assert la.openid_uid == openid_uid
check_authorization_url(authorization_url, oid_client, la.state, snapshot)
| 33.33 | 92 | 0.687969 | 426 | 3,333 | 5.14554 | 0.253521 | 0.053376 | 0.051095 | 0.031022 | 0.631843 | 0.620438 | 0.590328 | 0.569343 | 0.541971 | 0.541971 | 0 | 0.003724 | 0.194419 | 3,333 | 99 | 93 | 33.666667 | 0.812663 | 0.051905 | 0 | 0.531646 | 0 | 0 | 0.204308 | 0.037694 | 0 | 0 | 0 | 0 | 0.240506 | 1 | 0.037975 | false | 0 | 0.088608 | 0 | 0.126582 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c4e5066f0c12b9a851914044ff7e83327de8634 | 26,892 | py | Python | scripts/walking_simulation.py | mcx/quadruped_ctrl | 6e1093c1d67ff835c02d66d0ad611d2c67d912ad | [
"MIT"
] | 161 | 2020-10-04T13:43:11.000Z | 2022-03-22T07:28:32.000Z | scripts/walking_simulation.py | DrKaung-Khant-Ko-Ko-Han/quadruped_ctrl | 41f10a780df72e5cddbc0036a65cf0304c6d70b0 | [
"MIT"
] | 13 | 2020-09-22T01:38:06.000Z | 2022-01-27T08:57:48.000Z | scripts/walking_simulation.py | DrKaung-Khant-Ko-Ko-Han/quadruped_ctrl | 41f10a780df72e5cddbc0036a65cf0304c6d70b0 | [
"MIT"
] | 60 | 2020-07-03T07:15:26.000Z | 2022-03-22T07:28:38.000Z | #!/usr/bin/env python
import os
import numpy
import pyquaternion
import pcl
import tf2_ros
import rospy
import rospkg
import threading
import random
import ctypes
from PIL import Image as pil
import pybullet as p
import pybullet_data
from pybullet_utils import gazebo_world_parser
from sensor_msgs.msg import Image, Imu, JointState, PointCloud2, PointField
from nav_msgs.msg import Odometry
from geometry_msgs.msg import TransformStamped, Twist
from quadruped_ctrl.srv import QuadrupedCmd, QuadrupedCmdResponse
from whole_body_state_msgs.msg import WholeBodyState
from whole_body_state_msgs.msg import JointState as WBJointState
from whole_body_state_msgs.msg import ContactState as WBContactState
class StructPointer(ctypes.Structure):
_fields_ = [("eff", ctypes.c_double * 12)]
class WalkingSimulation(object):
def __init__(self):
self.get_last_vel = [0] * 3
self.robot_height = 0.30
self.motor_id_list = [0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14]
self.init_new_pos = [0.0, -0.8, 1.6, 0.0, -0.8, 1.6, 0.0, -0.8, 1.6, 0.0, -0.8, 1.6,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
self.__init_ros()
self.__load_controller()
self.__init_simulator()
add_thread = threading.Thread(target=self.__thread_job)
add_thread.start()
if self.camera:
add_thread_1 = threading.Thread(target=self.__camera_update)
add_thread_1.start()
def __init_ros(self):
self.terrain = rospy.get_param('/simulation/terrain')
self.camera = rospy.get_param('/simulation/camera')
self.lateralFriction = rospy.get_param('/simulation/lateralFriction')
self.spinningFriction = rospy.get_param('/simulation/spinningFriction')
self.freq = rospy.get_param('/simulation/freq')
self.stand_kp = rospy.get_param('/simulation/stand_kp')
self.stand_kd = rospy.get_param('/simulation/stand_kd')
self.joint_kp = rospy.get_param('/simulation/joint_kp')
self.joint_kd = rospy.get_param('/simulation/joint_kd')
rospy.loginfo("lateralFriction = " + str(self.lateralFriction) +
" spinningFriction = " + str(self.spinningFriction))
rospy.loginfo(" freq = " + str(self.freq) + " PID = " +
str([self.stand_kp, self.stand_kd, self.joint_kp, self.joint_kd]))
self.s0 = rospy.Service('gait_type', QuadrupedCmd, self.__callback_gait)
self.s1 = rospy.Service('robot_mode', QuadrupedCmd, self.__callback_mode)
self.s2 = rospy.Subscriber("cmd_vel", Twist, self.__callback_body_vel, buff_size=30)
self.robot_tf = tf2_ros.TransformBroadcaster()
def __load_controller(self):
self.path = rospkg.RosPack().get_path('quadruped_ctrl')
so_file = self.path.replace('src/quadruped_ctrl', 'devel/lib/libquadruped_ctrl.so')
if(not os.path.exists(so_file)):
so_file = self.path.replace('src/quadruped_ctrl', 'build/lib/libquadruped_ctrl.so')
if(not os.path.exists(so_file)):
rospy.logerr("cannot find cpp.so file")
self.cpp_gait_ctrller = ctypes.cdll.LoadLibrary(so_file)
self.cpp_gait_ctrller.torque_calculator.restype = ctypes.POINTER(StructPointer)
rospy.loginfo("find so file = " + so_file)
def __init_simulator(self):
robot_start_pos = [0, 0, 0.42]
p.connect(p.GUI) # or p.DIRECT for non-graphical version
p.setAdditionalSearchPath(pybullet_data.getDataPath()) # optionally
p.resetSimulation()
p.setTimeStep(1.0/self.freq)
p.setGravity(0, 0, -9.81)
self.reset = p.addUserDebugParameter("reset", 1, 0, 0)
self.low_energy_mode = p.addUserDebugParameter("low_energy_mode", 1, 0, 0)
self.high_performance_mode = p.addUserDebugParameter("high_performance_mode", 1, 0, 0)
p.resetDebugVisualizerCamera(0.2, 45, -30, [1, -1, 1])
heightPerturbationRange = 0.06
numHeightfieldRows = 256
numHeightfieldColumns = 256
if self.terrain == "plane":
planeShape = p.createCollisionShape(shapeType=p.GEOM_PLANE)
ground_id = p.createMultiBody(0, planeShape)
p.resetBasePositionAndOrientation(ground_id, [0, 0, 0], [0, 0, 0, 1])
p.changeDynamics(ground_id, -1, lateralFriction=self.lateralFriction)
elif self.terrain == "random1":
heightfieldData = [0]*numHeightfieldRows*numHeightfieldColumns
for j in range(int(numHeightfieldColumns/2)):
for i in range(int(numHeightfieldRows/2)):
height = random.uniform(0, heightPerturbationRange)
heightfieldData[2*i+2*j*numHeightfieldRows] = height
heightfieldData[2*i+1+2*j*numHeightfieldRows] = height
heightfieldData[2*i+(2*j+1)*numHeightfieldRows] = height
heightfieldData[2*i+1+(2*j+1)*numHeightfieldRows] = height
terrainShape = p.createCollisionShape(
shapeType=p.GEOM_HEIGHTFIELD,
meshScale=[.05, .05, 1],
heightfieldTextureScaling=(numHeightfieldRows-1)/2,
heightfieldData=heightfieldData,
numHeightfieldRows=numHeightfieldRows,
numHeightfieldColumns=numHeightfieldColumns)
ground_id = p.createMultiBody(0, terrainShape)
p.resetBasePositionAndOrientation(ground_id, [0, 0, 0], [0, 0, 0, 1])
p.changeDynamics(ground_id, -1, lateralFriction=self.lateralFriction)
elif self.terrain == "random2":
terrain_shape = p.createCollisionShape(
shapeType=p.GEOM_HEIGHTFIELD,
meshScale=[.5, .5, .5],
fileName="heightmaps/ground0.txt",
heightfieldTextureScaling=128)
ground_id = p.createMultiBody(0, terrain_shape)
textureId = p.loadTexture(self.path + "/models/grass.png")
p.changeVisualShape(ground_id, -1, textureUniqueId=textureId)
p.resetBasePositionAndOrientation(ground_id, [1, 0, 0.2], [0, 0, 0, 1])
p.changeDynamics(ground_id, -1, lateralFriction=self.lateralFriction)
elif self.terrain == "stairs":
planeShape = p.createCollisionShape(shapeType=p.GEOM_PLANE)
ground_id = p.createMultiBody(0, planeShape)
p.resetBasePositionAndOrientation(ground_id, [0, 0, 0], [0, 0, 0, 1])
# many boxes
colSphereId = p.createCollisionShape(
p.GEOM_BOX, halfExtents=[0.1, 0.4, 0.01])
colSphereId1 = p.createCollisionShape(
p.GEOM_BOX, halfExtents=[0.1, 0.4, 0.02])
colSphereId2 = p.createCollisionShape(
p.GEOM_BOX, halfExtents=[0.1, 0.4, 0.03])
colSphereId3 = p.createCollisionShape(
p.GEOM_BOX, halfExtents=[0.1, 0.4, 0.04])
p.createMultiBody(100, colSphereId, basePosition=[1.0, 1.0, 0.0])
p.changeDynamics(colSphereId, -1, lateralFriction=self.lateralFriction)
p.createMultiBody(100, colSphereId1, basePosition=[1.2, 1.0, 0.0])
p.changeDynamics(colSphereId1, -1, lateralFriction=self.lateralFriction)
p.createMultiBody(100, colSphereId2, basePosition=[1.4, 1.0, 0.0])
p.changeDynamics(colSphereId2, -1, lateralFriction=self.lateralFriction)
p.createMultiBody(100, colSphereId3, basePosition=[1.6, 1.0, 0.0])
p.changeDynamics(colSphereId3, -1, lateralFriction=self.lateralFriction)
p.changeDynamics(ground_id, -1, lateralFriction=self.lateralFriction)
elif self.terrain == "racetrack":
os.chdir(self.path)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0)
gazebo_world_parser.parseWorld(p, filepath="worlds/racetrack_day.world")
p.configureDebugVisualizer(shadowMapResolution=8192)
p.configureDebugVisualizer(shadowMapWorldSize=25)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)
# TODO: Get the URDF from robot_description parameter (or URDF file in the repo)
self.boxId = p.loadURDF("mini_cheetah/mini_cheetah.urdf", robot_start_pos, useFixedBase=False)
p.changeDynamics(self.boxId, 3, spinningFriction=self.spinningFriction)
p.changeDynamics(self.boxId, 7, spinningFriction=self.spinningFriction)
p.changeDynamics(self.boxId, 11, spinningFriction=self.spinningFriction)
p.changeDynamics(self.boxId, 15, spinningFriction=self.spinningFriction)
self.__reset_robot()
def __reset_robot(self):
if self.terrain == "racetrack":
robot_z = 0.4
else:
robot_z = self.robot_height
p.resetBasePositionAndOrientation(
self.boxId, [0, 0, robot_z], [0, 0, 0, 1])
p.resetBaseVelocity(self.boxId, [0, 0, 0], [0, 0, 0])
for j in range(12):
p.resetJointState(
self.boxId, self.motor_id_list[j], self.init_new_pos[j], self.init_new_pos[j+12])
self.cpp_gait_ctrller.init_controller(
self.__convert_type(self.freq),
self.__convert_type([self.stand_kp, self.stand_kd, self.joint_kp, self.joint_kd]))
for _ in range(10):
p.stepSimulation()
imu_data, leg_data, _, _ = self.__get_data_from_sim()
self.cpp_gait_ctrller.pre_work(self.__convert_type(
imu_data), self.__convert_type(leg_data["state"]))
for j in range(16):
p.setJointMotorControl2(self.boxId, j, p.VELOCITY_CONTROL, force=0)
self.cpp_gait_ctrller.set_robot_mode(self.__convert_type(1))
for _ in range(200):
self.run() # TODO: THIS IS BLOCKING!!
p.stepSimulation
self.cpp_gait_ctrller.set_robot_mode(self.__convert_type(0))
def run(self):
rate = rospy.Rate(self.freq) # Hz
reset_flag = p.readUserDebugParameter(self.reset)
low_energy_flag = p.readUserDebugParameter(self.low_energy_mode)
high_performance_flag = p.readUserDebugParameter(self.high_performance_mode)
while not rospy.is_shutdown():
# check reset button state
if(reset_flag < p.readUserDebugParameter(self.reset)):
reset_flag = p.readUserDebugParameter(self.reset)
rospy.logwarn("reset the robot")
self.__reset_robot()
if(low_energy_flag < p.readUserDebugParameter(self.low_energy_mode)):
low_energy_flag = p.readUserDebugParameter(self.low_energy_mode)
rospy.logwarn("set robot to low energy mode")
self.cpp_gait_ctrller.set_robot_mode(self.__convert_type(1))
if(high_performance_flag < p.readUserDebugParameter(self.high_performance_mode)):
high_performance_flag = p.readUserDebugParameter(self.high_performance_mode)
rospy.logwarn("set robot to high performance mode")
self.cpp_gait_ctrller.set_robot_mode(self.__convert_type(0))
self.__simulation_step()
rate.sleep()
def __simulation_step(self):
# get data from simulator
imu_data, leg_data, base_pos, contact_points = self.__get_data_from_sim()
# pub msg
self.__pub_nav_msg(base_pos, imu_data)
self.__pub_imu_msg(imu_data)
self.__pub_joint_states(leg_data)
self.__pub_whole_body_state(imu_data, leg_data, base_pos, contact_points)
# call cpp function to calculate mpc tau
tau = self.cpp_gait_ctrller.torque_calculator(self.__convert_type(
imu_data), self.__convert_type(leg_data["state"]))
# set tau to simulator
p.setJointMotorControlArray(bodyUniqueId=self.boxId,
jointIndices=self.motor_id_list,
controlMode=p.TORQUE_CONTROL,
forces=tau.contents.eff)
p.stepSimulation()
def __camera_update(self):
rate_1 = rospy.Rate(20)
near = 0.1
far = 1000
step_index = 4
pixelWidth = int(320 / step_index)
pixelHeight = int(240 / step_index)
cameraEyePosition = [0.3, 0, 0.26436384367425125]
cameraTargetPosition = [1.0, 0, 0]
cameraUpVector = [45, 45, 0]
self.pointcloud_publisher = rospy.Publisher("/generated_pc", PointCloud2, queue_size=10)
self.image_publisher = rospy.Publisher("/cam0/image_raw", Image, queue_size=10)
while not rospy.is_shutdown():
cubePos, cubeOrn = p.getBasePositionAndOrientation(self.boxId)
get_matrix = p.getMatrixFromQuaternion(cubeOrn)
T1 = numpy.mat([[0, -1.0/2.0, numpy.sqrt(3.0)/2.0, 0.25], [-1, 0, 0, 0],
[0, -numpy.sqrt(3.0)/2.0, -1.0/2.0, 0], [0, 0, 0, 1]])
T2 = numpy.mat([[get_matrix[0], get_matrix[1], get_matrix[2], cubePos[0]],
[get_matrix[3], get_matrix[4], get_matrix[5], cubePos[1]],
[get_matrix[6], get_matrix[7], get_matrix[8], cubePos[2]],
[0, 0, 0, 1]])
T3 = numpy.array(T2*T1)
cameraEyePosition[0] = T3[0][3]
cameraEyePosition[1] = T3[1][3]
cameraEyePosition[2] = T3[2][3]
cameraTargetPosition = (numpy.mat(T3)*numpy.array([[0],[0],[1],[1]]))[0:3]
q = pyquaternion.Quaternion(matrix=T3)
cameraQuat = [q[1], q[2], q[3], q[0]]
self.robot_tf.sendTransform(self.__fill_tf_message("world", "robot", cubePos, cubeOrn))
self.robot_tf.sendTransform(
self.__fill_tf_message("world", "cam", cameraEyePosition, cameraQuat))
self.robot_tf.sendTransform(
self.__fill_tf_message("world", "tar", cameraTargetPosition, cubeOrn))
cameraUpVector = [0, 0, 1]
viewMatrix = p.computeViewMatrix(
cameraEyePosition, cameraTargetPosition, cameraUpVector)
aspect = float(pixelWidth) / float(pixelHeight)
projectionMatrix = p.computeProjectionMatrixFOV(60, aspect, near, far)
width, height, rgbImg, depthImg, _ = p.getCameraImage(
pixelWidth,
pixelHeight,
viewMatrix=viewMatrix,
projectionMatrix=projectionMatrix,
shadow=1,
lightDirection=[1, 1, 1],
renderer=p.ER_BULLET_HARDWARE_OPENGL)
# point cloud mehted
pc_list = []
pcl_data = pcl.PointCloud()
fx = (pixelWidth*projectionMatrix[0]) / 2.0
fy = (pixelHeight*projectionMatrix[5]) / 2.0
cx = (1-projectionMatrix[2]) * pixelWidth / 2.0
cy = (1+projectionMatrix[6]) * pixelHeight / 2.0
cloud_point = [0] * pixelWidth * pixelHeight * 3
depthBuffer = numpy.reshape(depthImg, [pixelHeight, pixelWidth])
depth = depthBuffer
for h in range(0, pixelHeight):
for w in range(0, pixelWidth):
depth[h][w] = float(depthBuffer[h, w])
depth[h][w] = far * near / (far - (far - near) * depthBuffer[h][w])
Z = float(depth[h][w])
if (Z > 4 or Z < 0.01):
continue
X = (w - cx) * Z / fx
Y = (h - cy) * Z / fy
XYZ_ = numpy.mat([[X], [Y], [Z], [1]])
XYZ = numpy.array(T3*XYZ_)
X = float(XYZ[0])
Y = float(XYZ[1])
Z = float(XYZ[2])
cloud_point[h * pixelWidth * 3 + w * 3 + 0] = float(X)
cloud_point[h * pixelWidth * 3 + w * 3 + 1] = float(Y)
cloud_point[h * pixelWidth * 3 + w * 3 + 2] = float(Z)
pc_list.append([X, Y, Z])
pcl_data.from_list(pc_list)
pub_pointcloud = PointCloud2()
pub_pointcloud.header.stamp = rospy.Time().now()
pub_pointcloud.header.frame_id = "body"
pub_pointcloud.height = 1
pub_pointcloud.width = len(pc_list)
pub_pointcloud.point_step = 12
pub_pointcloud.fields = [
PointField('x', 0, PointField.FLOAT32, 1),
PointField('y', 4, PointField.FLOAT32, 1),
PointField('z', 8, PointField.FLOAT32, 1)]
pub_pointcloud.data = numpy.asarray(pc_list, numpy.float32).tostring()
self.pointcloud_publisher.publish(pub_pointcloud)
# grey image
pub_image = Image()
pub_image.header.stamp = rospy.Time().now()
pub_image.header.frame_id = "cam"
pub_image.width = width
pub_image.height = height
pub_image.encoding = "mono8"
pub_image.step = width
grey = pil.fromarray(rgbImg)
pub_image.data = numpy.asarray(grey.convert('L')).reshape([1,-1]).tolist()[0]
self.image_publisher.publish(pub_image)
rate_1.sleep()
def __convert_type(self, input):
ctypes_map = {
int: ctypes.c_int,
float: ctypes.c_double,
str: ctypes.c_char_p,
}
input_type = type(input)
if input_type is list:
length = len(input)
if length == 0:
rospy.logerr("convert type failed...input is " + input)
return 0
else:
arr = (ctypes_map[type(input[0])] * length)()
for i in range(length):
arr[i] = bytes(
input[i], encoding="utf-8") if (type(input[0]) is str) else input[i]
return arr
else:
if input_type in ctypes_map:
return ctypes_map[input_type](bytes(input, encoding="utf-8") if type(input) is str else input)
else:
rospy.logerr("convert type failed...input is "+input)
return 0
def __thread_job(self):
rospy.spin()
def __callback_gait(self, req):
self.cpp_gait_ctrller.set_gait_type(self.__convert_type(req.cmd))
return QuadrupedCmdResponse(0, "get the gait")
def __callback_mode(self, req):
self.cpp_gait_ctrller.set_robot_mode(self.__convert_type(req.cmd))
return QuadrupedCmdResponse(0, "get the mode")
def __callback_body_vel(self, msg):
vel = [msg.linear.x, msg.linear.y, msg.angular.x]
self.cpp_gait_ctrller.set_robot_vel(self.__convert_type(vel))
def __fill_tf_message(self, parent_frame, child_frame, translation, rotation):
t = TransformStamped()
t.header.stamp = rospy.Time.now()
t.header.frame_id = parent_frame
t.child_frame_id = child_frame
t.transform.translation.x = translation[0]
t.transform.translation.y = translation[1]
t.transform.translation.z = translation[2]
t.transform.rotation.x = rotation[0]
t.transform.rotation.y = rotation[1]
t.transform.rotation.z = rotation[2]
t.transform.rotation.w = rotation[3]
return t
def __pub_nav_msg(self, base_pos, imu_data):
pub_odom = rospy.Publisher("/robot_odom", Odometry, queue_size=30)
odom = Odometry()
odom.header.stamp = rospy.Time.now()
odom.header.frame_id = "world"
odom.child_frame_id = "body"
odom.pose.pose.position.x = base_pos[0]
odom.pose.pose.position.y = base_pos[1]
odom.pose.pose.position.z = base_pos[2]
odom.pose.pose.orientation.x = imu_data[3]
odom.pose.pose.orientation.y = imu_data[4]
odom.pose.pose.orientation.z = imu_data[5]
odom.pose.pose.orientation.w = imu_data[6]
pub_odom.publish(odom)
t = self.__fill_tf_message(
odom.header.frame_id, odom.child_frame_id, base_pos[0:3], imu_data[3:7])
self.robot_tf.sendTransform(t)
def __pub_imu_msg(self, imu_data):
pub_imu = rospy.Publisher("/imu0", Imu, queue_size=30)
imu_msg = Imu()
imu_msg.linear_acceleration.x = imu_data[0]
imu_msg.linear_acceleration.y = imu_data[1]
imu_msg.linear_acceleration.z = imu_data[2]
imu_msg.angular_velocity.x = imu_data[7]
imu_msg.angular_velocity.y = imu_data[8]
imu_msg.angular_velocity.z = imu_data[9]
imu_msg.orientation.x = imu_data[3]
imu_msg.orientation.y = imu_data[4]
imu_msg.orientation.z = imu_data[5]
imu_msg.orientation.w = imu_data[6]
imu_msg.header.stamp = rospy.Time.now()
imu_msg.header.frame_id = "body"
pub_imu.publish(imu_msg)
def __pub_joint_states(self, joint_states):
pub_js = rospy.Publisher("joint_states", JointState, queue_size=30)
js_msg = JointState()
js_msg.name = []
js_msg.position = []
js_msg.velocity = []
# TODO: Use joints length
i = 0
for _ in joint_states["name"]:
js_msg.name.append(joint_states["name"][i].decode('utf-8'))
js_msg.position.append(joint_states["state"][i])
js_msg.velocity.append(joint_states["state"][12+i])
i += 1
js_msg.header.stamp = rospy.Time.now()
js_msg.header.frame_id = "body"
pub_js.publish(js_msg)
def __pub_whole_body_state(self, imu_data, leg_data, base_pos, contact_points):
wbs_pub = rospy.Publisher("wb_state", WholeBodyState, queue_size=10)
wbs = WholeBodyState()
wbs.header.stamp = rospy.Time.now()
wbs.header.frame_id = "world"
wbs.time = wbs.header.stamp.secs
# This represents the base state (CoM motion, angular motion and centroidal momenta)
wbs.centroidal.com_position.x = base_pos[0]
wbs.centroidal.com_position.y = base_pos[1]
wbs.centroidal.com_position.z = base_pos[2]
wbs.centroidal.base_orientation.x = imu_data[3]
wbs.centroidal.base_orientation.y = imu_data[4]
wbs.centroidal.base_orientation.z = imu_data[5]
wbs.centroidal.base_orientation.w = imu_data[6]
wbs.centroidal.base_angular_velocity.x = imu_data[7]
wbs.centroidal.base_angular_velocity.y = imu_data[8]
wbs.centroidal.base_angular_velocity.z = imu_data[9]
# This represents the joint state (position, velocity, acceleration and effort)
wbs.joints = []
i = 0
for _ in leg_data["name"]:
js_msg = WBJointState()
js_msg.name = leg_data["name"][i].decode('utf-8')
js_msg.position = leg_data["state"][i]
js_msg.velocity = leg_data["state"][12+i]
wbs.joints.append(js_msg)
i += 1
# This represents the end-effector state (cartesian position and contact forces)
wbs.contacts = []
for contact_point in contact_points:
contact_msg = WBContactState()
contact_msg.name = "body"
contact_msg.type = WBContactState.UNKNOWN
contact_msg.pose.position.x = contact_point[5][0]
contact_msg.pose.position.y = contact_point[5][1]
contact_msg.pose.position.z = contact_point[5][2]
contact_msg.wrench.force.z = contact_point[9]
contact_msg.surface_normal.x = contact_point[7][0]
contact_msg.surface_normal.y = contact_point[7][1]
contact_msg.surface_normal.z = contact_point[7][2]
contact_msg.friction_coefficient = 1.0
wbs.contacts.append(contact_msg)
wbs_pub.publish(wbs)
def __get_motor_joint_states(self, robot):
joint_number_range = range(p.getNumJoints(robot))
joint_states = p.getJointStates(robot, joint_number_range)
joint_infos = [p.getJointInfo(robot, i) for i in joint_number_range]
joint_states, joint_name = \
zip(*[(j, i[1]) for j, i in zip(joint_states, joint_infos) if i[2] != p.JOINT_FIXED])
joint_positions = [state[0] for state in joint_states]
joint_velocities = [state[1] for state in joint_states]
joint_torques = [state[3] for state in joint_states]
return joint_positions, joint_velocities, joint_torques, joint_name
def __get_data_from_sim(self):
get_matrix = []
get_velocity = []
get_invert = []
imu_data = [0] * 10
leg_data = {}
leg_data["state"] = [0] * 24
leg_data["name"] = [""] * 12
base_pose = p.getBasePositionAndOrientation(self.boxId)
get_velocity = p.getBaseVelocity(self.boxId)
get_invert = p.invertTransform(base_pose[0], base_pose[1])
get_matrix = p.getMatrixFromQuaternion(get_invert[1])
# IMU data
imu_data[3] = base_pose[1][0]
imu_data[4] = base_pose[1][1]
imu_data[5] = base_pose[1][2]
imu_data[6] = base_pose[1][3]
imu_data[7] = get_matrix[0] * get_velocity[1][0] + get_matrix[1] * \
get_velocity[1][1] + get_matrix[2] * get_velocity[1][2]
imu_data[8] = get_matrix[3] * get_velocity[1][0] + get_matrix[4] * \
get_velocity[1][1] + get_matrix[5] * get_velocity[1][2]
imu_data[9] = get_matrix[6] * get_velocity[1][0] + get_matrix[7] * \
get_velocity[1][1] + get_matrix[8] * get_velocity[1][2]
# calculate the acceleration of the robot
linear_X = (get_velocity[0][0] - self.get_last_vel[0]) * self.freq
linear_Y = (get_velocity[0][1] - self.get_last_vel[1]) * self.freq
linear_Z = 9.8 + (get_velocity[0][2] - self.get_last_vel[2]) * self.freq
imu_data[0] = get_matrix[0] * linear_X + \
get_matrix[1] * linear_Y + get_matrix[2] * linear_Z
imu_data[1] = get_matrix[3] * linear_X + \
get_matrix[4] * linear_Y + get_matrix[5] * linear_Z
imu_data[2] = get_matrix[6] * linear_X + \
get_matrix[7] * linear_Y + get_matrix[8] * linear_Z
# joint data
joint_positions, joint_velocities, _, joint_names = \
self.__get_motor_joint_states(self.boxId)
leg_data["state"][0:12] = joint_positions
leg_data["state"][12:24] = joint_velocities
leg_data["name"] = joint_names
# CoM velocity
self.get_last_vel = [get_velocity[0][0], get_velocity[0][1], get_velocity[0][2]]
# Contacts
contact_points = p.getContactPoints(self.boxId)
return imu_data, leg_data, base_pose[0], contact_points
if __name__ == '__main__':
rospy.init_node('quadruped_simulator', anonymous=True)
walking_simulation = WalkingSimulation()
walking_simulation.run()
| 46.365517 | 110 | 0.613491 | 3,353 | 26,892 | 4.687742 | 0.139875 | 0.011197 | 0.010688 | 0.009161 | 0.339611 | 0.262756 | 0.19519 | 0.149192 | 0.128833 | 0.106311 | 0 | 0.034859 | 0.271419 | 26,892 | 579 | 111 | 46.445596 | 0.767366 | 0.025286 | 0 | 0.095142 | 0 | 0 | 0.038489 | 0.008171 | 0 | 0 | 0 | 0.001727 | 0 | 1 | 0.040486 | false | 0 | 0.04251 | 0 | 0.107287 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c51f818ad504a52eec44d704982065f79829f94 | 307 | py | Python | sparql.py | reinvantveer/semcontext | 681024135a41e69ec9efff99a460f80779e49dad | [
"MIT"
] | null | null | null | sparql.py | reinvantveer/semcontext | 681024135a41e69ec9efff99a460f80779e49dad | [
"MIT"
] | null | null | null | sparql.py | reinvantveer/semcontext | 681024135a41e69ec9efff99a460f80779e49dad | [
"MIT"
] | null | null | null | """A simple webapp2 server."""
import webapp2
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.write('SPARQL')
application = webapp2.WSGIApplication([
('/sparql', MainPage),
], debug=True)
| 19.1875 | 61 | 0.635179 | 31 | 307 | 6.290323 | 0.741935 | 0.123077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016598 | 0.214984 | 307 | 15 | 62 | 20.466667 | 0.792531 | 0.078176 | 0 | 0 | 0 | 0 | 0.133588 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c5223103f81db8a9c28f13d7a2fffa2408bf2e3 | 12,775 | py | Python | mturk_hit.py | moyazzz/Crowdsourcing | 107827c8b7689ec1a847e38aff0b7f6747091c97 | [
"Apache-2.0"
] | 1 | 2020-10-03T14:04:15.000Z | 2020-10-03T14:04:15.000Z | mturk_hit.py | moyazzz/Crowdsourcing | 107827c8b7689ec1a847e38aff0b7f6747091c97 | [
"Apache-2.0"
] | null | null | null | mturk_hit.py | moyazzz/Crowdsourcing | 107827c8b7689ec1a847e38aff0b7f6747091c97 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# impoer django settings module to make this script work separately
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "csgame.settings")
from pprint import pprint
from django.conf import settings
from csgame.storage_backends import mturk
#number of rounds that will be hard coded for other test
roundsnum = settings.NUMROUNDS
# getting arguments and phases
import sys
import argparse
from datetime import datetime
hitDescriptions = {
'phase01a': "Generating questions and answers and verifying question given a shown image",
'phase01b': "Given 4 images of same single object and list of questions, answer all the questions that you think are meaningful",
'phase03': "Vote YES or NO for question provided based on common sense",
}
'''
create hits assignments with phase01a, phase01b and available rounds number
create hits assigmments with phase03 only with MaxAssignments defined by us.(Like 60?)
input: phase round number
output: HITID and HIITGroupID for preview link
'''
def create_hit(phase, number):
# phase 01a
for i in range(number):
if(phase == 'phase01a'):
try:
question = open(file='hitExternal/hitp1.xml', mode='r').read()
except:
print()
print("----------------------")
print('Error: no file found!')
exit(1)
# create new hit
new_hit = mturk.create_hit(
Title="Image Labeling With Text",
Description=hitDescriptions['phase01a'],
Keywords='image, tagging, machine learning, text generation',
Reward = '0.50',
MaxAssignments=1,
LifetimeInSeconds=60*60*24*10,
AssignmentDurationInSeconds=35*60,
AutoApprovalDelayInSeconds=60*60*24*3,
Question=question,
QualificationRequirements=[
{
# this id is used on sandbox only
'QualificationTypeId': '39GW9SGGAFJE7KP1M1X8MFKH3ZLRO3',
'Comparator': 'GreaterThanOrEqualTo',
'IntegerValues':[60],
'ActionsGuarded': 'Accept',
}
]
)
# phase 01b
elif(phase == 'phase01b'):
try:
question = open(file='hitExternal/hitp1b.xml', mode='r').read()
except:
print()
print("----------------------")
print('Error: no file found!')
exit(1)
# create new hit
new_hit = mturk.create_hit(
Title="Knowledge Answer With Image",
Description=hitDescriptions['phase01b'],
Keywords='image, tagging',
Reward = '0.50',
MaxAssignments=1,
LifetimeInSeconds=60*60*24*10,
AssignmentDurationInSeconds=35*60,
AutoApprovalDelayInSeconds=60*60*24*3,
Question=question,
QualificationRequirements=[
{
'QualificationTypeId': '39GW9SGGAFJE7KP1M1X8MFKH3ZLRO3',
'Comparator': 'GreaterThanOrEqualTo',
'IntegerValues':[60],
'ActionsGuarded': 'Accept',
}
]
)
else:
# phase 03
try:
question = open(file='hitExternal/hitp3.xml', mode='r').read()
except:
print()
print("----------------------")
print('Error: no file found!')
exit(1)
# create new hit
new_hit = mturk.create_hit(
Title="Binary Selection Question",
Description=hitDescriptions['phase03'],
Keywords='binary tagging, text verification, computer vision, machine learning',
Reward = '0.25',
MaxAssignments=1,
LifetimeInSeconds=60*60*24*10,
AssignmentDurationInSeconds=600,
AutoApprovalDelayInSeconds=60*60*24*3,
Question=question,
QualificationRequirements=[
{
'QualificationTypeId': '39GW9SGGAFJE7KP1M1X8MFKH3ZLRO3',
'Comparator': 'GreaterThanOrEqualTo',
'IntegerValues':[60],
'ActionsGuarded': 'Accept',
}
]
)
# some print function for reference
print(f"https://worker.mturk.com/mturk/preview?groupId={new_hit['HIT']['HITGroupId']}")
print(f"HITID = {new_hit['HIT']['HITId']} (Use to Get Results)")
'''
check available hit
input argument: N/A
output print: HIT and Some title
'''
def print_hit():
pprint(mturk.list_hits()['HITs'])
'''
delete_hit for different
input argument: phase number
output print: delete HIT ID, Status and delete message: success or fail
Note: This should only been done for sandbox(development) or between the phase gap
'''
def delete_hit(phase):
# Delete all HITs for now
for item in mturk.list_hits()['HITs']:
hit_id=item['HITId']
print('HITId:', hit_id)
# GET the HIT status
status = mturk.get_hit(HITId=hit_id)['HIT']['HITStatus']
print('HITStatus: ', status)
description = mturk.get_hit(HITId=hit_id)['HIT']['Description']
# delete phase01a
if phase == 'phase01a' and description == hitDescriptions['phase01a']:
# If HIT is active then set it to expire immediately
if status=='Assignable':
response = mturk.update_expiration_for_hit(
HITId=hit_id,
ExpireAt=datetime(2015, 1, 1)
)
if status == 'Unassignable':
try:
response = mturk.update_expiration_for_hit(
HITId=hit_id,
ExpireAt = datetime(2015, 1, 1)
)
except Exception as e:
print(e)
# Delete the HIT
try:
mturk.delete_hit(HITId=hit_id)
except Exception as e:
# print(e)
print('Not deleted')
else:
print('Deleted')
elif phase == 'phase01b' and description == hitDescriptions['phase01b']:
# If HIT is active then set it to expire immediately
if status=='Assignable':
response = mturk.update_expiration_for_hit(
HITId=hit_id,
ExpireAt=datetime(2015, 1, 1)
)
print("I found for phase1a")
# Delete the HIT
try:
mturk.delete_hit(HITId=hit_id)
except:
print('Not deleted')
else:
print('Deleted')
elif phase == 'phase03' and description == hitDescriptions['phase03']:
# If HIT is active then set it to expire immediately
if status=='Assignable':
response = mturk.update_expiration_for_hit(
HITId=hit_id,
ExpireAt=datetime(2015, 1, 1)
)
print("I found for phase1a")
# Delete the HIT
try:
mturk.delete_hit(HITId=hit_id)
except:
print('Not deleted')
else:
print('Deleted')
'''
check completed assigmments
input argument: hit
output print: HIT and Some title
'''
def print_assignment(hit_id):
if hit_id == 'all':
a = []
for hit in mturk.list_hits()['HITs']:
try:
a.extend(mturk.list_assignments_for_hit(
HITId=hit['HITId']
).get('Assignments', []))
except Exception as e:
print(e)
pprint(a)
else:
pprint(mturk.list_assignments_for_hit(
HITId=hit_id
).get('Assignments', []))
def approve_assignment(assignment_id):
mturk.approve_assignment(
AssignmentId=assignment_id,
OverrideRejection=True
)
def reject_assignment(assignment_id, reason):
mturk.reject_assignment(
AssignmentId=assignment_id,
RequesterFeedback=reason
)
def create_qualification(phase):
if phase == 'phase01a' or phase == 'phase01b':
try:
questions = open(file='qualifyT/testP3.xml', mode='r').read()
answers = open(file='qualifyT/ansP3.xml', mode='r').read()
except:
print()
print("----------------------")
print('Error: no file found!')
exit(1)
qual_resp = mturk.create_qualification_type(
Name = 'English comprehension writing test',
Keywords = 'test, qualifcation, English writing skills',
Description = "This is a test consists of 10 questions to decide your level of your english comprehension in writing, you need to get at least 6 correct to be qualified",
QualificationTypeStatus = 'Active',
Test=questions,
AnswerKey=answers,
TestDurationInSeconds=300
)
else:
try:
questions = open(file='qualifyT/testP3.xml', mode='r').read()
answers = open(file='qualifyT/ansP3.xml', mode='r').read()
except:
print()
print("----------------------")
print('Error: no file found!')
exit(1)
qual_resp = mturk.create_qualification_type(
Name = 'English comprehension reading test',
Keywords = 'test, qualifcation, English reading skills',
Description = "This is a test consists of 10 questions to decide your level of your english comprehension in writing, you need to get at least 6 correct to be qualified",
QualificationTypeStatus = 'Active',
Test=questions,
AnswerKey=answers,
TestDurationInSeconds=300
)
print(qual_resp['QualificationType']['QualificationTypeId'])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(metavar='subcommands', dest='command')
phasesArg = dict(type=str, choices=['phase01a', 'phase01b', 'phase03'], metavar='phase',
help='Choose phase01a, phase01b, or phase03.')
cparser = subparsers.add_parser('create', help='create hits for specfic phase with', aliases=['c'])
cparser.add_argument('phase', **phasesArg)
cparser.add_argument('number', type=int, default=1, help="The number of the HITS to generate each round")
dparser = subparsers.add_parser('delete', help='delete hits for specfic phase with', aliases=['d'])
dparser.add_argument('phase', **phasesArg)
pparser = subparsers.add_parser('print', help='print hit or assignment status', aliases=['p'])
pparser.add_argument('-a', '--assignment', type=str, metavar='assignment', default='all', nargs='?', help='HIT id to show assignments for.')
aparser = subparsers.add_parser('approve', help='approve the assignment', aliases=['a'])
aparser.add_argument('assignment', type=str, metavar='assignment')
rparser = subparsers.add_parser('reject', help='reject the assignment', aliases=['r'])
rparser.add_argument('assignment', type=str, metavar='assignment')
rparser.add_argument('reason', type=str, metavar='reason')
# a parser that will be only needed once for create a qualificatio for each of the 3 phases
qparser = subparsers.add_parser('qualify', help='create a qualification type for different 3 phases', aliases=['q'])
qparser.add_argument('phase', **phasesArg)
options = parser.parse_args()
# Hello world for mturk boto api
print("I have $" + mturk.get_account_balance()['AvailableBalance'] + " in my account")
if options.command in ('create', 'c'):
create_hit(options.phase, options.number)
elif options.command in ('delete', 'd'):
delete_hit(options.phase)
elif options.command in ('print', 'p'):
hitId = options.assignment
if hitId:
print_assignment(hitId)
else:
print_hit()
elif options.command in ('approve', 'a'):
approve_assignment(options.assignment)
elif options.command in ('reject', 'r'):
reject_assignment(options.assignment, options.reason)
elif options.command in ('qualify', 'q'):
create_qualification(options.phase)
else:
sys.exit(2)
| 38.478916 | 182 | 0.56454 | 1,286 | 12,775 | 5.526439 | 0.239502 | 0.011257 | 0.015478 | 0.018292 | 0.459688 | 0.426762 | 0.409033 | 0.392711 | 0.336147 | 0.336147 | 0 | 0.025067 | 0.325479 | 12,775 | 331 | 183 | 38.595166 | 0.799698 | 0.05456 | 0 | 0.507813 | 0 | 0.011719 | 0.242233 | 0.027216 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027344 | false | 0 | 0.027344 | 0 | 0.054688 | 0.160156 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c533e8fe9dc26dde9f40be79b87b8ea74ccff76 | 1,044 | py | Python | documentation/olds/test.py | kuefmz/software_classification | 0dee3a046e59052ab272e4029195fb21f3d58c04 | [
"Apache-2.0"
] | null | null | null | documentation/olds/test.py | kuefmz/software_classification | 0dee3a046e59052ab272e4029195fb21f3d58c04 | [
"Apache-2.0"
] | null | null | null | documentation/olds/test.py | kuefmz/software_classification | 0dee3a046e59052ab272e4029195fb21f3d58c04 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import pickle
import sys
from sklearn import metrics
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
class DataframeContainer:
def __init__(self, name, filanemCsv):
self.name = name
self.df_X = pd.DataFrame([sys.argv[1]], columns=['Text'])
self.df_X.reset_index(drop=True, inplace=True)
def predict(self):
self.y_pred = self.clf.predict(self.df_X)
return self.y_pred
def load_pickle(self):
filename = 'pickles/3/' + self.name + '.sav'
self.clf = pickle.load(open(filename, 'rb'))
names_list = ["Audio", "Computer Vision", "Graphs", "General", "Natural Language Processing", "Reinforcement Learning", "Sequential"]
output = []
dataframecontainers_list = [DataframeContainer(name, 'dataset/somef_data.csv') for name in names_list]
for container in dataframecontainers_list:
container.load_pickle()
output.append((container.predict()[0], container.name))
for o in output:
print(o[1], o[0]) | 31.636364 | 133 | 0.703065 | 140 | 1,044 | 5.114286 | 0.521429 | 0.03352 | 0.02933 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0058 | 0.17433 | 1,044 | 33 | 134 | 31.636364 | 0.824826 | 0 | 0 | 0 | 0 | 0 | 0.12823 | 0.021053 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.269231 | 0 | 0.461538 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c53e7d728bbc5664855d66ce4b5855669e26b4d | 11,423 | py | Python | meiduo_mall/meiduo_mall/apps/carts/views.py | m17630030204/Django_project | 9f207b38abfcb1b84be5850f1ec90949b571d9bb | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/apps/carts/views.py | m17630030204/Django_project | 9f207b38abfcb1b84be5850f1ec90949b571d9bb | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/apps/carts/views.py | m17630030204/Django_project | 9f207b38abfcb1b84be5850f1ec90949b571d9bb | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django_redis import get_redis_connection
from rest_framework import status
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
import pickle
import base64
from . import constants
from .serializers import CartSerializer, CartSKUSerializer, CartDeleteSerializer, CartSelectAllSerializer
from goods.models import SKU
# Create your views here.
class CartView(GenericAPIView):
"""购物车"""
serializer_class = CartSerializer
def perform_authentication(self, request):
"""将执行具体请求方法前的身份认证关掉,由视图自己来进行身份认证"""
pass
def post(self, request):
"""保存购物车"""
# sku_id count selected
# 校验
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
sku_id = serializer.validated_data['sku_id']
count = serializer.validated_data['count']
selected = serializer.validated_data['selected']
# 判断用户登录状态
try:
user = request.user # 匿名用户 AnonymoseUser
except Exception:
user = None
# 保存
if user and user.is_authenticated:
# 如果用户已登录,保存到redis
redis_conn = get_redis_connection('cart')
pl = redis_conn.pipeline()
# 用户购物车数据 redis hash哈希
pl.hincrby('cart_%s' % user.id, sku_id, count)
# 用户购物车勾选数据 redis set
if selected:
pl.sadd('cart_selected_%s' % user.id, sku_id)
pl.execute()
return Response(serializer.data)
else:
# 如果用户未登录,保存到cookie reponse = Response() response.set_cookie
# 取出cookie中的购物车数据
cart_str = request.COOKIES.get('cart')
if cart_str:
# 解析
cart_str = cart_str.encode() # str -> bytes
cart_bytes = base64.b64decode(cart_str) # b64decode(byes类型)
cart_dict = pickle.loads(cart_bytes)
else:
cart_dict = {}
# cart_dict = {
# sku_id_1: {
# 'count': 10
# 'selected': True
# },
# sku_id_2: {
# 'count': 10
# 'selected': False
# },
# sku_id_3: {
# 'count': 10
# 'selected': True
# }
# }
if sku_id in cart_dict:
# 如果商品存在购物车中,累加
cart_dict[sku_id]['count'] += count
cart_dict[sku_id]['selected'] = selected
else:
# 如果商品不在购物车中,设置
cart_dict[sku_id] = {
'count': count,
'selected': selected
}
cart_cookie = base64.b64encode(pickle.dumps(cart_dict)).decode()
# 设置cookie
response = Response(serializer.data)
response.set_cookie('cart', cart_cookie, max_age=constants.CART_COOKIE_EXPIRES)
return response
def get(self, request):
"""查询购物车"""
# 判断用户登录状态
try:
user = request.user
except Exception:
user = None
# 查询
if user and user.is_authenticated:
# 如果用户已登录,从redis中查询 sku_id count selected
redis_conn = get_redis_connection('cart')
redis_cart = redis_conn.hgetall('cart_%s' % user.id)
# redis_cart = {
# 商品的sku_id bytes字节类型: 数量 bytes字节类型
# 商品的sku_id bytes字节类型: 数量 bytes字节类型
# ...
# }
redis_cart_selected = redis_conn.smembers('cart_selected_%s' % user.id)
# redis_cart_selected = set(勾选的商品sku_id bytes字节类型, ....)
# 遍历 redis_cart,形成cart_dict
cart_dict = {}
for sku_id, count in redis_cart.items():
cart_dict[int(sku_id)] = {
'count': int(count),
'selected': sku_id in redis_cart_selected
}
else:
# 如果用户未登录,从cookie中查询
cookie_cart = request.COOKIES.get('cart')
if cookie_cart:
# 表示cookie中有购物车数据
# 解析
cart_dict = pickle.loads(base64.b64decode(cookie_cart.encode()))
else:
# 表示cookie中没有购物车数据
cart_dict = {}
# cart_dict = {
# sku_id_1: {
# 'count': 10
# 'selected': True
# },
# sku_id_2: {
# 'count': 10
# 'selected': False
# },
# }
# 查询数据库
sku_id_list = cart_dict.keys()
sku_obj_list = SKU.objects.filter(id__in=sku_id_list)
# 遍历sku_obj_list 向sku对象中添加count和selected属性
for sku in sku_obj_list:
sku.count = cart_dict[sku.id]['count']
sku.selected = cart_dict[sku.id]['selected']
# 序列化返回
serializer = CartSKUSerializer(sku_obj_list, many=True)
return Response(serializer.data)
def put(self, request):
"""修改购物车"""
# sku_id, count, selected
# 校验
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
sku_id = serializer.validated_data['sku_id']
count = serializer.validated_data['count']
selected = serializer.validated_data['selected']
# 判断用户的登录状态
try:
user = request.user
except Exception:
user = None
# 保存
if user and user.is_authenticated:
# 如果用户已登录,修改redis
redis_conn = get_redis_connection('cart')
pl = redis_conn.pipeline()
# 处理数量 hash
pl.hset('cart_%s' % user.id, sku_id, count)
# 处理勾选状态 set
if selected:
# 表示勾选
pl.sadd('cart_selected_%s' % user.id, sku_id)
else:
# 表示取消勾选, 删除
pl.srem('cart_selected_%s' % user.id, sku_id)
pl.execute()
return Response(serializer.data)
else:
# 未登录,修改cookie
cookie_cart = request.COOKIES.get('cart')
if cookie_cart:
# 表示cookie中有购物车数据
# 解析
cart_dict = pickle.loads(base64.b64decode(cookie_cart.encode()))
else:
# 表示cookie中没有购物车数据
cart_dict = {}
# cart_dict = {
# sku_id_1: {
# 'count': 10
# 'selected': True
# },
# sku_id_2: {
# 'count': 10
# 'selected': False
# },
# }
response = Response(serializer.data)
if sku_id in cart_dict:
cart_dict[sku_id] = {
'count': count,
'selected': selected
}
cart_cookie = base64.b64encode(pickle.dumps(cart_dict)).decode()
# 设置cookie
response.set_cookie('cart', cart_cookie, max_age=constants.CART_COOKIE_EXPIRES)
return response
def delete(self, request):
"""删除购物车"""
# sku_id
# 校验
serializer = CartDeleteSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
sku_id = serializer.validated_data['sku_id']
# 判断用户的登录状态
try:
user = request.user
except Exception:
user = None
# 删除
if user and user.is_authenticated:
# 已登录,删除redis
redis_conn = get_redis_connection('cart')
pl = redis_conn.pipeline()
# 删除hash
pl.hdel('cart_%s' % user.id, sku_id)
# 删除set
pl.srem('cart_selected_%s' % user.id, sku_id)
pl.execute()
return Response(status=status.HTTP_204_NO_CONTENT)
else:
# 未登录,删除cookie
cookie_cart = request.COOKIES.get('cart')
if cookie_cart:
# 表示cookie中有购物车数据
# 解析
cart_dict = pickle.loads(base64.b64decode(cookie_cart.encode()))
else:
# 表示cookie中没有购物车数据
cart_dict = {}
# cart_dict = {
# sku_id_1: {
# 'count': 10
# 'selected': True
# },
# sku_id_2: {
# 'count': 10
# 'selected': False
# },
# }
response = Response(status=status.HTTP_204_NO_CONTENT)
if sku_id in cart_dict:
del cart_dict[sku_id]
cart_cookie = base64.b64encode(pickle.dumps(cart_dict)).decode()
# 设置cookie
response.set_cookie('cart', cart_cookie, max_age=constants.CART_COOKIE_EXPIRES)
return response
class CartSelectAllView(GenericAPIView):
"""
购物车全选
"""
serializer_class = CartSelectAllSerializer
def perform_authentication(self, request):
pass
def put(self, request):
# selected
# 校验
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
selected = serializer.validated_data['selected']
# 判断用户的登录状态
try:
user = request.user
except Exception:
user = None
if user and user.is_authenticated:
# 已登录,redis
redis_conn = get_redis_connection('cart')
redis_cart = redis_conn.hgetall('cart_%s' % user.id)
# redis_cart = {
# 商品的sku_id bytes字节类型: 数量 bytes字节类型
# 商品的sku_id bytes字节类型: 数量 bytes字节类型
# ...
# }
sku_id_list = redis_cart.keys()
if selected:
# 全选, 所有的sku_id都添加到redis set
redis_conn.sadd('cart_selected_%s' % user.id, *sku_id_list)
else:
# 取消全选,清空redis中的set数据
redis_conn.srem('cart_selected_%s' % user.id, *sku_id_list)
return Response({'message': 'OK'})
else:
# 未登录, cookie
cookie_cart = request.COOKIES.get('cart')
if cookie_cart:
# 表示cookie中有购物车数据
# 解析
cart_dict = pickle.loads(base64.b64decode(cookie_cart.encode()))
else:
# 表示cookie中没有购物车数据
cart_dict = {}
# cart_dict = {
# sku_id_1: {
# 'count': 10
# 'selected': True
# },
# sku_id_2: {
# 'count': 10
# 'selected': False
# },
# }
response = Response({'message': 'OK'})
if cart_dict:
for count_selected_dict in cart_dict.values():
count_selected_dict['selected'] = selected
cart_cookie = base64.b64encode(pickle.dumps(cart_dict)).decode()
# 设置cookie
response.set_cookie('cart', cart_cookie, max_age=constants.CART_COOKIE_EXPIRES)
return response
| 30.219577 | 105 | 0.50162 | 1,067 | 11,423 | 5.132146 | 0.164948 | 0.042001 | 0.02374 | 0.028488 | 0.668371 | 0.628196 | 0.61176 | 0.57615 | 0.556976 | 0.541271 | 0 | 0.011681 | 0.407949 | 11,423 | 377 | 106 | 30.299735 | 0.798019 | 0.177799 | 0 | 0.698864 | 0 | 0 | 0.037556 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039773 | false | 0.011364 | 0.056818 | 0 | 0.170455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c544a27135926f10afa0d57dff1287a16b7220b | 3,218 | py | Python | benchmarks/BM_resnet50/scripts/prepare-input-data.py | laochonlam/dali_backend | 461fe528d42a6ba48baa95c4b817cc757c351f55 | [
"MIT"
] | 55 | 2020-09-24T18:05:09.000Z | 2022-03-26T03:18:16.000Z | benchmarks/BM_resnet50/scripts/prepare-input-data.py | laochonlam/dali_backend | 461fe528d42a6ba48baa95c4b817cc757c351f55 | [
"MIT"
] | 85 | 2020-10-14T17:24:26.000Z | 2022-03-31T21:30:57.000Z | benchmarks/BM_resnet50/scripts/prepare-input-data.py | laochonlam/dali_backend | 461fe528d42a6ba48baa95c4b817cc757c351f55 | [
"MIT"
] | 19 | 2020-09-23T22:20:59.000Z | 2022-03-28T00:10:30.000Z | # The MIT License (MIT)
#
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import argparse
from pathlib import Path
from shutil import copyfile
import shutil
import base64 as b64
import json
dali_extra_path = os.getenv('DALI_EXTRA_PATH', None)
assert dali_extra_path is not None, "Please set DALI_EXTRA_PATH env variable."
images_dir = Path(dali_extra_path) / 'db' / 'single' / 'jpeg'
images_paths = list(images_dir.glob('**/*.jpg'))
sized_images = sorted([(os.stat(p).st_size, p) for p in images_paths])
# choose 16 smallest samples
chosen_set = [p for (_, p) in sized_images[:16]]
# choose medium sized image
chosen_sample = sized_images[8][1]
def save_sample_input(sample, dir_name, input_name):
Path(dir_name).mkdir(exist_ok=True)
shutil.copy(sample, Path(dir_name) / Path(input_name))
def get_content(fpath):
with fpath.open("rb") as f:
content = f.read()
return {
'content' : {
'b64': b64.b64encode(content).decode('utf-8')
},
'shape': [len(content)]
}
def save_json_dataset(files, dataset_filename, input_name):
contents = [get_content(fpath) for fpath in files]
inputs = [{input_name: content} for content in contents]
result_dict = {'data': inputs}
with open(dataset_filename, 'w') as dataset_file:
json.dump(result_dict, dataset_file)
def get_args():
parser = argparse.ArgumentParser(description='Prepare perf_analyzer input data.')
parser.add_argument('-d', '--directory-name', required=False, action='store', default='inputs-data',
help='Directory name to store a single sample data.')
parser.add_argument('-i', '--input-name', required=False, action='store', default='input',
help='Input name.')
parser.add_argument('-f', '--dataset-filename', required=False, action='store', default='dataset.json',
help='Name of the created JSON dataset.')
return parser.parse_args()
def main(args):
save_sample_input(chosen_sample, args.directory_name, args.input_name)
save_json_dataset(chosen_set, args.dataset_filename, args.input_name)
if __name__ == '__main__':
args = get_args()
main(args)
| 38.771084 | 105 | 0.728403 | 468 | 3,218 | 4.867521 | 0.41453 | 0.03863 | 0.028534 | 0.031607 | 0.044337 | 0.030729 | 0 | 0 | 0 | 0 | 0 | 0.007836 | 0.167185 | 3,218 | 82 | 106 | 39.243902 | 0.842164 | 0.356122 | 0 | 0 | 0 | 0 | 0.159668 | 0 | 0 | 0 | 0 | 0 | 0.021277 | 1 | 0.106383 | false | 0 | 0.148936 | 0 | 0.297872 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c55ce49b3c5848016d4be6e406ec45fa43e2618 | 1,038 | py | Python | Others/s8pc/s8pc-5/b.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 2 | 2020-06-12T09:54:23.000Z | 2021-05-04T01:34:07.000Z | Others/s8pc/s8pc-5/b.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 961 | 2020-06-23T07:26:22.000Z | 2022-03-31T21:34:52.000Z | Others/s8pc/s8pc-5/b.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
def calc_dist(x1, y1, x2, y2):
return (x2 - x1) ** 2 + (y2 - y1) ** 2
def main():
from itertools import combinations
from math import sqrt
n, m = map(int, input().split())
a = list()
b = list()
ans = float('inf')
if n > 0:
for i in range(n):
xi, yi, ri = map(int, input().split())
ans = min(ans, ri)
a.append((xi, yi, ri))
for j in range(m):
xi, yi = map(int, input().split())
b.append((xi, yi))
if m > 1:
dist = float('inf')
for (x1, y1), (x2, y2) in list(combinations(b, 2)):
dist = min(dist, calc_dist(x1, y1, x2, y2))
ans = min(ans, sqrt(dist) / 2)
if n > 0 and m > 0:
dist = float('inf')
for xa, ya, ra in a:
for xb, yb in b:
dist = min(dist, sqrt(calc_dist(xa, ya, xb, yb)) - ra)
ans = min(ans, dist)
print(ans)
if __name__ == '__main__':
main()
| 21.625 | 71 | 0.442197 | 151 | 1,038 | 2.966887 | 0.344371 | 0.035714 | 0.040179 | 0.053571 | 0.071429 | 0.071429 | 0 | 0 | 0 | 0 | 0 | 0.039746 | 0.394027 | 1,038 | 47 | 72 | 22.085106 | 0.672496 | 0.020231 | 0 | 0.064516 | 0 | 0 | 0.017562 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.064516 | 0.032258 | 0.16129 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c581c8cb82b1aed0c0ff6cb1cdd1a77d784bfa8 | 5,644 | py | Python | ngs_utils/sambamba.py | pdiakumis/NGS_Utils | 9eae9f8d5f0e408118d429fde90e297dbac9ae15 | [
"MIT"
] | 3 | 2018-06-06T01:41:51.000Z | 2020-08-20T11:36:06.000Z | ngs_utils/sambamba.py | pdiakumis/NGS_Utils | 9eae9f8d5f0e408118d429fde90e297dbac9ae15 | [
"MIT"
] | 4 | 2019-11-28T03:34:54.000Z | 2021-06-24T23:04:55.000Z | ngs_utils/sambamba.py | pdiakumis/NGS_Utils | 9eae9f8d5f0e408118d429fde90e297dbac9ae15 | [
"MIT"
] | 5 | 2018-03-15T12:43:38.000Z | 2021-06-24T23:12:48.000Z | import os
import subprocess
import traceback
from os.path import join, dirname, abspath, basename, isfile, getmtime
import sys
from pybedtools import BedTool
from ngs_utils.call_process import run
from ngs_utils.file_utils import verify_file, splitext_plus, which, can_reuse
from ngs_utils.logger import debug, warn, err, critical
def get_executable():
sys_path = which('sambamba')
if not sys_path:
critical('Error: sambamba executable is not found')
return sys_path
def index_bam(bam_fpath, sambamba=None, samtools=None):
sambamba = sambamba or get_executable()
indexed_bam = bam_fpath + '.bai'
if not can_reuse(indexed_bam, cmp_f=bam_fpath, silent=True):
cmdline = '{sambamba} index {bam_fpath}'.format(**locals())
res = run(cmdline, output_fpath=indexed_bam, stdout_to_outputfile=False, stdout_tx=False)
def call_sambamba(cmdl, bam_fpath, output_fpath=None, command_name='', no_index=False):
if not no_index:
index_bam(bam_fpath)
sambamba = get_executable()
run(sambamba + ' ' + cmdl, output_fpath=output_fpath)
return output_fpath
def sambamba_depth(work_dir, bed, bam, depth_thresholds=None,
output_fpath=None, sample_name=None, threads=1):
if not bam:
return None
sample_name = sample_name or splitext_plus(basename(bam))[0]
depth_thresholds = depth_thresholds or []
if isinstance(bed, BedTool):
bed = bed.saveas().fn
if not output_fpath:
output_fpath = join(work_dir,
splitext_plus(basename(bed))[0] + '_' + sample_name + '_sambamba_depth.txt')
if can_reuse(output_fpath, [bam, bed]):
return output_fpath
thresholds_str = ''.join([' -T' + str(int(d)) for d in depth_thresholds if d is not None])
cmdline = ('depth region -F "not duplicate and not failed_quality_control" '
'-t {threads} -L {bed} {thresholds_str} {bam}').format(**locals())
call_sambamba(cmdline, bam_fpath=bam, output_fpath=output_fpath)
return output_fpath
def remove_dups(bam, output_fpath):
cmdline = 'view --format=bam -F "not duplicate" {bam}'.format(**locals()) # -F (=not) 1024 (=duplicate)
return call_sambamba(cmdline, bam_fpath=bam, output_fpath=output_fpath, command_name='not_duplicate')
def count_in_bam(work_dir, bam, query, dedup=False, bed=None, use_grid=False, sample_name=None, target_name=None):
if dedup:
query += ' and not duplicate'
name = 'num_' + (query.replace(' ', '_') or 'reads')
if bed is not None and isinstance(bed, BedTool):
bed = bed.saveas().fn
if bed is not None:
target_name = target_name or ('target_' + basename(bed))
name += '_on_' + target_name
sample_name = sample_name or basename(bam)
output_fpath = join(work_dir, sample_name + '_' + name)
if can_reuse(output_fpath, cmp_f=bam):
pass
else:
cmdline = 'view -c -F "{query}" {bam}'.format(**locals())
if bed is not None:
cmdline += ' -L ' + bed
call_sambamba(cmdline, bam_fpath=bam, output_fpath=output_fpath, command_name=name)
with open(output_fpath) as f:
return int(f.read().strip())
def number_of_reads(work_dir, bam, dedup=False, use_grid=False, sample_name=None, reuse=False):
return count_in_bam(work_dir, bam, '', dedup, use_grid=use_grid, sample_name=sample_name)
def number_of_mapped_reads(work_dir, bam, dedup=False, use_grid=False, sample_name=None, reuse=False):
return count_in_bam(work_dir, bam, 'not unmapped', dedup, use_grid=use_grid, sample_name=sample_name)
def number_of_properly_paired_reads(work_dir, bam, dedup=False, use_grid=False, sample_name=None, reuse=False):
return count_in_bam(work_dir, bam, 'proper_pair', dedup, use_grid=use_grid, sample_name=sample_name)
def number_of_dup_reads(work_dir, bam, use_grid=False, sample_name=None, reuse=False):
return count_in_bam(work_dir, bam, 'duplicate', use_grid=use_grid, sample_name=sample_name)
def number_mapped_reads_on_target(work_dir, bed, bam, dedup=False, use_grid=False, sample_name=None, target_name=None):
return count_in_bam(work_dir, bam, 'not unmapped', dedup, bed=bed, use_grid=use_grid, sample_name=sample_name, target_name=target_name)
# def flag_stat(cnf, bam):
# output_fpath = join(cnf.work_dir, basename(bam) + '_flag_stats')
# cmdline = 'flagstat {bam}'.format(**locals())
# call_sambamba(cmdline, output_fpath=output_fpath, bam_fpath=bam, command_name='flagstat')
# stats = dict()
# with open(output_fpath) as f:
# lines = f.readlines()
# for stat, fun in [('total', number_of_reads),
# ('duplicates', number_of_dup_reads), # '-f 1024'
# ('mapped', number_of_mapped_reads), # '-F 4'
# ('properly paired', number_of_properly_paired_reads)]: # '-f 2'
# try:
# val = next(l.split()[0] for l in lines if stat in l)
# except StopIteration:
# warn('Cannot extract ' + stat + ' from flagstat output ' + output_fpath + '. Trying samtools view -c...')
# val = None
# else:
# try:
# val = int(val)
# except ValueError:
# warn('Cannot parse value ' + str(val) + ' from ' + stat + ' from flagstat output ' + output_fpath + '. Trying samtools view -c...')
# val = None
# if val is not None:
# stats[stat] = val
# else:
# stats[stat] = fun(cnf, bam)
# return stats
| 40.604317 | 153 | 0.655032 | 778 | 5,644 | 4.492288 | 0.192802 | 0.084979 | 0.028612 | 0.040057 | 0.428612 | 0.354506 | 0.322175 | 0.322175 | 0.274106 | 0.247496 | 0 | 0.003209 | 0.226967 | 5,644 | 138 | 154 | 40.898551 | 0.797846 | 0.238306 | 0 | 0.090909 | 0 | 0 | 0.089056 | 0.00539 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0.012987 | 0.116883 | 0.064935 | 0.415584 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c5821878688b913b81b85d8ca99f1a8a3acf5f6 | 483 | py | Python | Python/minimum-path-sum.py | sm2774us/leetcode_interview_prep_2021 | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | [
"Fair"
] | null | null | null | Python/minimum-path-sum.py | sm2774us/leetcode_interview_prep_2021 | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | [
"Fair"
] | null | null | null | Python/minimum-path-sum.py | sm2774us/leetcode_interview_prep_2021 | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | [
"Fair"
] | null | null | null | # Time: O(m * n)
# Space: O(m + n)
class Solution(object):
# @param grid, a list of lists of integers
# @return an integer
def minPathSum(self, grid):
sum = list(grid[0])
for j in range(1, len(grid[0])):
sum[j] = sum[j - 1] + grid[0][j]
for i in range(1, len(grid)):
sum[0] += grid[i][0]
for j in range(1, len(grid[0])):
sum[j] = min(sum[j - 1], sum[j]) + grid[i][j]
return sum[-1]
| 25.421053 | 61 | 0.47412 | 80 | 483 | 2.8625 | 0.3875 | 0.087336 | 0.104803 | 0.144105 | 0.283843 | 0.218341 | 0.218341 | 0.218341 | 0.218341 | 0.218341 | 0 | 0.037975 | 0.345756 | 483 | 18 | 62 | 26.833333 | 0.686709 | 0.188406 | 0 | 0.2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c58d1fd6c4dbccad8c2500770123a1c44ae4431 | 465 | py | Python | serverless/populate_s3.py | keithrozario/cps3 | f688b5c9312eb1297091c646ed06d7df7e5849e8 | [
"Apache-2.0"
] | 10 | 2019-04-25T16:31:03.000Z | 2020-12-19T15:08:21.000Z | serverless/populate_s3.py | keithrozario/cps3 | f688b5c9312eb1297091c646ed06d7df7e5849e8 | [
"Apache-2.0"
] | null | null | null | serverless/populate_s3.py | keithrozario/cps3 | f688b5c9312eb1297091c646ed06d7df7e5849e8 | [
"Apache-2.0"
] | 3 | 2019-11-05T16:47:45.000Z | 2020-12-14T19:41:00.000Z | import boto3
import uuid
import io
import json
s3_client = boto3.client('s3')
def main(event, context):
dummy_content = {"foo": "bar"}
dest_bucket = 'test-source-keithrozario'
for x in range(5000):
file_obj = io.BytesIO(json.dumps(dummy_content).encode('utf-8'))
file_name = uuid.uuid4().__str__()
key = f"{file_name[:1]}/{file_name}"
s3_client.upload_fileobj(file_obj, dest_bucket, key)
return {"status": 200}
| 22.142857 | 72 | 0.653763 | 66 | 465 | 4.363636 | 0.651515 | 0.083333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.040431 | 0.202151 | 465 | 20 | 73 | 23.25 | 0.735849 | 0 | 0 | 0 | 0 | 0 | 0.150538 | 0.109677 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.285714 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c5900ca46ebd4d20b23f7cb0b946be3d8dbeb4b | 3,990 | py | Python | preparation/iemocap.py | ttslr/Expressive-FastSpeech2 | 7f1c463d0f10053596de62e5c112ee952f58d924 | [
"MIT"
] | 79 | 2021-05-17T10:19:40.000Z | 2022-03-27T09:01:58.000Z | preparation/iemocap.py | KunZhou9646/Expressive-FastSpeech2 | 7f1c463d0f10053596de62e5c112ee952f58d924 | [
"MIT"
] | 13 | 2021-05-16T23:07:29.000Z | 2022-03-20T23:45:04.000Z | preparation/iemocap.py | KunZhou9646/Expressive-FastSpeech2 | 7f1c463d0f10053596de62e5c112ee952f58d924 | [
"MIT"
] | 22 | 2021-05-16T09:35:50.000Z | 2022-03-04T09:52:58.000Z | import re
import argparse
import yaml
import os
import shutil
import json
import librosa
import soundfile
from glob import glob
from tqdm import tqdm
from moviepy.editor import VideoFileClip
from text import _clean_text
from text.korean import normalize_nonchar
from g2p_en import G2p
def extract_nonen(preprocess_config):
in_dir = preprocess_config["path"]["raw_path"]
filelist = open(f'{in_dir}/nonen.txt', 'w', encoding='utf-8')
count = 0
nonen = set()
print("Extract non english charactors...")
with open(f'{in_dir}/filelist.txt', 'r', encoding='utf-8') as f:
lines = f.readlines()
total_count = len(lines)
for line in tqdm(lines):
wav = line.split('|')[0]
text = line.split('|')[1]
reg = re.compile("""[^ a-zA-Z~!.,?:`"'"“‘’”’]+""")
impurities = reg.findall(text)
if len(impurities) == 0:
count+=1
continue
norm = _clean_text(text, preprocess_config["preprocessing"]["text"]["text_cleaners"])
impurities_str = ','.join(impurities)
filelist.write(f'{norm}|{text}|{impurities_str}|{wav}\n')
for imp in impurities:
nonen.add(imp)
filelist.close()
print('Total {} non english charactors from {} lines'.format(len(nonen), total_count-count))
print(sorted(list(nonen)))
def extract_lexicon(preprocess_config):
"""
Extract lexicon and build grapheme-phoneme dictionary for MFA training
"""
in_dir = preprocess_config["path"]["raw_path"]
lexicon_path = preprocess_config["path"]["lexicon_path"]
filelist = open(lexicon_path, 'a+', encoding='utf-8')
# Load Lexicon Dictionary
done = set()
if os.path.isfile(lexicon_path):
filelist.seek(0)
for line in filelist.readlines():
grapheme = line.split("\t")[0]
done.add(grapheme)
print("Extract lexicon...")
g2p = G2p()
for lab in tqdm(glob(f'{in_dir}/**/*.lab', recursive=True)):
with open(lab, 'r', encoding='utf-8') as f:
text = f.readline().strip("\n")
text = normalize_nonchar(text)
for grapheme in text.split(" "):
if not grapheme in done:
phoneme = " ".join(g2p(grapheme))
filelist.write("{}\t{}\n".format(grapheme, phoneme))
done.add(grapheme)
filelist.close()
def apply_fixed_text(preprocess_config):
in_dir = preprocess_config["path"]["corpus_path"]
sub_dir = preprocess_config["path"]["sub_dir_name"]
out_dir = preprocess_config["path"]["raw_path"]
fixed_text_path = preprocess_config["path"]["fixed_text_path"]
cleaners = preprocess_config["preprocessing"]["text"]["text_cleaners"]
fixed_text_dict = dict()
print("Fixing transcripts...")
with open(fixed_text_path, 'r', encoding='utf-8') as f:
for line in tqdm(f.readlines()):
wav, fixed_text = line.split('|')[0], line.split('|')[1]
session = '_'.join(wav.split('_')[1:])
fixed_text_dict[wav] = fixed_text.replace('\n', '')
text = _clean_text(fixed_text, cleaners)
with open(
os.path.join(out_dir, sub_dir, session, "{}.lab".format(wav)),
"w",
) as f1:
f1.write(text)
filelist_fixed = open(f'{out_dir}/filelist_fixed.txt', 'w', encoding='utf-8')
with open(f'{out_dir}/filelist.txt', 'r', encoding='utf-8') as filelist:
for line in tqdm(filelist.readlines()):
wav = line.split('|')[0]
if wav in fixed_text_dict:
filelist_fixed.write("|".join([line.split("|")[0]] + [fixed_text_dict[wav]] + line.split("|")[2:]))
else:
filelist_fixed.write(line)
filelist_fixed.close()
os.remove(f'{out_dir}/filelist.txt')
os.rename(f'{out_dir}/filelist_fixed.txt', f'{out_dir}/filelist.txt')
extract_lexicon(preprocess_config) | 35.625 | 115 | 0.6 | 503 | 3,990 | 4.594433 | 0.228628 | 0.090004 | 0.06058 | 0.049762 | 0.196884 | 0.163566 | 0.077456 | 0.025097 | 0 | 0 | 0 | 0.008949 | 0.24386 | 3,990 | 112 | 116 | 35.625 | 0.757043 | 0.02381 | 0 | 0.086957 | 0 | 0 | 0.154124 | 0.052577 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032609 | false | 0 | 0.152174 | 0 | 0.184783 | 0.054348 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c593574d2724aade273fb4cc825a1c1c30bfd59 | 668 | py | Python | SP/Modul_04/repl_conv_error.py | edu-sense-com/OSE-Python-Course | cbf93e18b0cdbcaf54483f6fac5faafd372de068 | [
"MIT"
] | null | null | null | SP/Modul_04/repl_conv_error.py | edu-sense-com/OSE-Python-Course | cbf93e18b0cdbcaf54483f6fac5faafd372de068 | [
"MIT"
] | null | null | null | SP/Modul_04/repl_conv_error.py | edu-sense-com/OSE-Python-Course | cbf93e18b0cdbcaf54483f6fac5faafd372de068 | [
"MIT"
] | null | null | null | # przykładowy skrypt do wykonywania w trybie REPL
# każda linia pojedynczo
# wykazujemy błąd przy braku konwersji
value_float = 3.1415927
input_data = input("Please, give me some number:")
input_data
type(input_data)
new_value = input_data * value_float
# !! tutaj wystąpi błąd
# Traceback (most recent call last):
# File "/usr/lib/python3.8/idlelib/run.py", line 559, in runcode
# exec(code, self.locals)
# File "<pyshell#4>", line 1, in <module>
# TypeError: can't multiply sequence by non-int of type 'float'
# poprawne wykonanie
input_data = float(input_data)
input_data
type(input_data)
new_value = input_data * value_float
new_value
type(new_value)
| 24.740741 | 66 | 0.75 | 103 | 668 | 4.708738 | 0.640777 | 0.16701 | 0.057732 | 0.074227 | 0.202062 | 0.202062 | 0.202062 | 0.202062 | 0.202062 | 0.202062 | 0 | 0.026455 | 0.151198 | 668 | 26 | 67 | 25.692308 | 0.828924 | 0.567365 | 0 | 0.545455 | 0 | 0 | 0.101083 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c59d0a32ca5f76f527beb43bb5b0f973913cae4 | 2,753 | py | Python | custom_components/weenect/binary_sensor.py | eifinger/homeassistant-conf | 170e22a7d3ea1339318ba9823fd9e2eb1be47869 | [
"MIT"
] | 60 | 2018-07-21T04:17:25.000Z | 2021-12-11T18:48:28.000Z | custom_components/weenect/binary_sensor.py | eifinger/homeassistant-conf | 170e22a7d3ea1339318ba9823fd9e2eb1be47869 | [
"MIT"
] | 1 | 2018-08-16T06:44:46.000Z | 2018-11-02T11:32:54.000Z | custom_components/weenect/binary_sensor.py | eifinger/homeassistant-conf | 170e22a7d3ea1339318ba9823fd9e2eb1be47869 | [
"MIT"
] | 3 | 2019-12-06T04:15:55.000Z | 2022-03-13T21:16:45.000Z | """Binary_sensor platform for weenect."""
import logging
from typing import Any, Dict, List
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import BINARY_SENSOR_TYPES, DOMAIN, TRACKER_ADDED
from .entity import WeenectEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the weenect binary_sensors."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
@callback
def async_add_binary_sensors(
added: List[int],
) -> None:
"""Add binary_sensors callback."""
sensors: list = []
for tracker_id in added:
for sensor_type in BINARY_SENSOR_TYPES:
sensors.append(
WeenectBinarySensor(coordinator, tracker_id, sensor_type)
)
async_add_entities(sensors, True)
unsub_dispatcher = async_dispatcher_connect(
hass,
f"{config_entry.entry_id}_{TRACKER_ADDED}",
async_add_binary_sensors,
)
coordinator.unsub_dispatchers.append(unsub_dispatcher)
if len(coordinator.data) > 0:
async_add_binary_sensors(coordinator.data.keys())
class WeenectBinarySensor(WeenectEntity, BinarySensorEntity):
"""weenect binary_sensor."""
def __init__(
self,
coordinator: DataUpdateCoordinator,
tracker_id: str,
sensor_type: Dict[str, Any],
):
super().__init__(coordinator, tracker_id)
self._device_class = sensor_type["device_class"]
self._value_name = sensor_type["value_name"]
self._enabled = sensor_type["enabled"]
self._name = sensor_type["name"]
@property
def name(self):
"""Return the name of this tracker."""
if self.id in self.coordinator.data:
return f"{self.coordinator.data[self.id]['name']} {self._name}"
@property
def unique_id(self):
"""Return a unique ID to use for this entity."""
return f"{self.id}_{self._value_name}"
@property
def is_on(self):
"""Return True if the binary sensor is on."""
if self.id in self.coordinator.data:
return self.coordinator.data[self.id]["position"][0][self._value_name]
@property
def device_class(self):
"""Device class of this entity."""
return self._device_class
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._enabled
| 31.643678 | 93 | 0.678169 | 325 | 2,753 | 5.476923 | 0.267692 | 0.039326 | 0.035955 | 0.035393 | 0.130337 | 0.039326 | 0.039326 | 0.039326 | 0 | 0 | 0 | 0.000939 | 0.226299 | 2,753 | 86 | 94 | 32.011628 | 0.834742 | 0.113331 | 0 | 0.118644 | 0 | 0 | 0.06822 | 0.045339 | 0 | 0 | 0 | 0 | 0 | 1 | 0.118644 | false | 0 | 0.135593 | 0 | 0.355932 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c5bde26885a60d71d8e27f13e75905e3607b148 | 408 | py | Python | pp/samples/30_metadata.py | flaport/gdsfactory | 1f2e844c1fe27b9c6340e2d51500fd3358fa16e5 | [
"MIT"
] | 8 | 2020-08-25T11:25:18.000Z | 2022-03-27T11:32:11.000Z | pp/samples/30_metadata.py | flaport/gdsfactory | 1f2e844c1fe27b9c6340e2d51500fd3358fa16e5 | [
"MIT"
] | null | null | null | pp/samples/30_metadata.py | flaport/gdsfactory | 1f2e844c1fe27b9c6340e2d51500fd3358fa16e5 | [
"MIT"
] | 1 | 2022-03-04T07:03:29.000Z | 2022-03-04T07:03:29.000Z | """ # Metadata
Together with the GDS files that we send to the foundries we also store some .JSON dictionaries for each cell containing all the settings that we used to build the GDS.
By default the metadata will consists of all the parameters that were passed to the component function.
"""
if __name__ == "__main__":
import pp
c = pp.c.waveguide()
print(c.settings)
print(c.get_json())
| 29.142857 | 168 | 0.727941 | 65 | 408 | 4.430769 | 0.661538 | 0.041667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.203431 | 408 | 13 | 169 | 31.384615 | 0.886154 | 0.693627 | 0 | 0 | 0 | 0 | 0.068376 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.4 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c5ff9aa783f8d07e9a2e835af14bd73daef2f3d | 567 | py | Python | project_axf/axf/urls.py | mychristopher/test | 9977d36bab3fcc47f0e1dd42bbf5a99b39112a2f | [
"Apache-2.0"
] | null | null | null | project_axf/axf/urls.py | mychristopher/test | 9977d36bab3fcc47f0e1dd42bbf5a99b39112a2f | [
"Apache-2.0"
] | null | null | null | project_axf/axf/urls.py | mychristopher/test | 9977d36bab3fcc47f0e1dd42bbf5a99b39112a2f | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import url
from .views import *
urlpatterns = [
url(r"^home$", home, name="home"),
url(r"^market/(\d+)/(\d+)/(\d+)", market, name="market"),
url(r"^cart$", cart, name="cart"),
url(r"^mine$", mine, name="mine"),
url(r"^register$", register, name="register"),
url(r"^login$", login_api, name='login'),
url(r"^logout$", logout_api, name='logout'),
url(r"^cart_api$", cart_api),
url(r"^cartitem_change$", cart_item_change),
url(r"^cart_item_select$", select_cart_item),
url(r"^select_all$", select_all)
] | 35.4375 | 61 | 0.610229 | 84 | 567 | 3.964286 | 0.285714 | 0.132132 | 0.072072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.149912 | 567 | 16 | 62 | 35.4375 | 0.690871 | 0 | 0 | 0 | 0 | 0 | 0.285211 | 0.044014 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.133333 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c63e9be8b953fdcdf4a042328c184f7a32b5075 | 1,691 | py | Python | python/dense.py | tczhangzhi/pytorch-parallel | 8d8baf80dd48234386051d0bab616de5b55f8f5c | [
"MIT"
] | 117 | 2018-12-25T08:58:24.000Z | 2022-03-21T05:51:03.000Z | python/dense.py | tczhangzhi/pytorch-dense | 8d8baf80dd48234386051d0bab616de5b55f8f5c | [
"MIT"
] | 4 | 2019-12-24T07:35:59.000Z | 2022-02-09T12:48:12.000Z | python/dense.py | tczhangzhi/pytorch-dense | 8d8baf80dd48234386051d0bab616de5b55f8f5c | [
"MIT"
] | 25 | 2018-12-26T05:40:11.000Z | 2022-02-02T17:20:45.000Z | import torch
from torch.nn import Module, Parameter
from torch.autograd import Function
class DenseFunction(Function):
@staticmethod
def forward(ctx, input, weight, bias=None):
output = input.mm(weight.t())
if bias is not None:
output += bias.unsqueeze(0).expand_as(output)
output = torch.sigmoid(output)
ctx.save_for_backward(input, weight, bias, output)
return output
@staticmethod
def backward(ctx, grad_output):
input, weight, bias, output = ctx.saved_tensors
grad_sigmoid = (1.0 - output) * output
grad_output = grad_sigmoid * grad_output
grad_input = grad_weight = grad_bias = None
if ctx.needs_input_grad[0]:
grad_input = grad_output.mm(weight)
if ctx.needs_input_grad[1]:
grad_weight = grad_output.t().mm(input)
if bias is not None and ctx.needs_input_grad[2]:
grad_bias = grad_output.sum(0).squeeze(0)
return grad_input, grad_weight, grad_bias
class Dense(Module):
def __init__(self, input_features, output_features, bias=True):
super(Dense, self).__init__()
self.input_features = input_features
self.output_features = output_features
self.weight = Parameter(torch.Tensor(output_features, input_features))
if bias:
self.bias = Parameter(torch.Tensor(output_features))
else:
self.register_parameter('bias', None)
self.weight.data.uniform_(-0.1, 0.1)
if bias is not None:
self.bias.data.uniform_(-0.1, 0.1)
def forward(self, input):
return DenseFunction.apply(input, self.weight, self.bias) | 36.76087 | 78 | 0.648729 | 221 | 1,691 | 4.751131 | 0.244344 | 0.057143 | 0.042857 | 0.031429 | 0.22381 | 0.08 | 0 | 0 | 0 | 0 | 0 | 0.012668 | 0.253105 | 1,691 | 46 | 79 | 36.76087 | 0.818686 | 0 | 0 | 0.1 | 0 | 0 | 0.002364 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.075 | 0.025 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c644404bd1cbc3bab74f5563ba0c009855835db | 2,608 | py | Python | easy_maps/templatetags/easy_maps_tags.py | bokoio/exemplodjango | b6b40d271aaabd58358b38c6717f34667f7d2607 | [
"MIT"
] | null | null | null | easy_maps/templatetags/easy_maps_tags.py | bokoio/exemplodjango | b6b40d271aaabd58358b38c6717f34667f7d2607 | [
"MIT"
] | null | null | null | easy_maps/templatetags/easy_maps_tags.py | bokoio/exemplodjango | b6b40d271aaabd58358b38c6717f34667f7d2607 | [
"MIT"
] | null | null | null | #coding: utf-8
from django import template
from django.template.loader import render_to_string
from easy_maps.models import Address
from django.conf import settings
register = template.Library()
@register.tag
def easy_map(parser, token):
"""
The syntax:
{% easy_map <address> [<width> <height>] [<zoom>] [using <template_name>] %}
The "address" parameter can be an Address instance or a string describing it.
If an address is not found a new entry is created in the database.
"""
width, height, zoom, template_name = None, None, None, None
params = token.split_contents()
# pop the template name
if params[-2] == 'using':
template_name = params[-1]
params = params[:-2]
if len(params) < 2:
raise template.TemplateSyntaxError('easy_map tag requires address argument')
address = params[1]
if len(params) == 4:
width, height = params[2], params[3]
elif len(params) == 5:
width, height, zoom = params[2], params[3], params[4]
elif len(params) == 3 or len(params) > 5:
raise template.TemplateSyntaxError('easy_map tag has the following syntax: '
'{% easy_map <address> <width> <height> [zoom] [using <template_name>] %}')
return EasyMapNode(address, width, height, zoom, template_name)
class EasyMapNode(template.Node):
def __init__(self, address, width, height, zoom, template_name):
self.address = template.Variable(address)
self.width = width or ''
self.height = height or ''
self.zoom = zoom or 16
self.template_name = template.Variable(template_name or '"easy_maps/map.html"')
def get_map(self, address):
if isinstance(address, Address):
return address
if not address:
map_ = Address(latitude=settings.EASY_MAPS_CENTER[0],
longitude=settings.EASY_MAPS_CENTER[1])
else:
map_, _ = Address.objects.get_or_create(address=address)
return map_
def render(self, context):
try:
address = self.address.resolve(context)
template_name = self.template_name.resolve(context)
map_ = self.get_map(address)
context.update({
'map': map_,
'width': self.width,
'height': self.height,
'zoom': self.zoom,
'template_name': template_name
})
return render_to_string(template_name, context_instance=context)
except template.VariableDoesNotExist:
return ''
| 33.435897 | 94 | 0.618482 | 310 | 2,608 | 5.06129 | 0.293548 | 0.107075 | 0.057361 | 0.056087 | 0.18037 | 0.163161 | 0.066284 | 0.066284 | 0.066284 | 0.066284 | 0 | 0.010058 | 0.27569 | 2,608 | 77 | 95 | 33.87013 | 0.82054 | 0.105061 | 0 | 0 | 0 | 0 | 0.088937 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.074074 | 0 | 0.259259 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c655100937e4260b19bb1f7c484cbd6ec34ff3f | 676 | py | Python | simple_strategy/3_pts_str.py | unball/strategy | 51a9bd0b7c55222712dd655ceaa85fa1099dcf60 | [
"MIT"
] | 1 | 2017-11-27T12:49:03.000Z | 2017-11-27T12:49:03.000Z | simple_strategy/3_pts_str.py | unball/strategy | 51a9bd0b7c55222712dd655ceaa85fa1099dcf60 | [
"MIT"
] | null | null | null | simple_strategy/3_pts_str.py | unball/strategy | 51a9bd0b7c55222712dd655ceaa85fa1099dcf60 | [
"MIT"
] | null | null | null | import rospy
import math
from measurement_system.msg import measurement_msg
from communication.msg import target_positions_msg
k = 1
def set_3_pnts(msg):
msg.y[0] = 0.5 * k
msg.x[0] = 0.5
msg.y[1] = 0.5 * k
msg.x[1] = 0
msg.y[2] = 0.5 * k
msg.x[2] = -0.5
def callback(data):
global k
msg = target_positions_msg()
set_3_pnts(msg)
pub.publish(msg)
def start():
global pub
pub = rospy.Publisher('target_positions_topic', target_positions_msg, queue_size=10)
rospy.Subscriber('measurement_system_topic', measurement_msg, callback)
rospy.spin()
if __name__ == '__main__':
rospy.init_node('strategy')
start()
| 19.314286 | 88 | 0.66568 | 107 | 676 | 3.953271 | 0.373832 | 0.023641 | 0.12766 | 0.042553 | 0.049645 | 0 | 0 | 0 | 0 | 0 | 0 | 0.041199 | 0.210059 | 676 | 34 | 89 | 19.882353 | 0.750936 | 0 | 0 | 0 | 0 | 0 | 0.091716 | 0.068047 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.16 | 0 | 0.28 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a73c45bceccf55a6085a11bd62a1f8c20158ebc7 | 1,129 | py | Python | rltorch/q_function/continuous.py | cindycia/Atari-SAC-Discrete | 5d92339f3efbac34488a14db024499b8951fc3b3 | [
"MIT"
] | 16 | 2019-11-15T13:37:20.000Z | 2022-01-24T10:29:38.000Z | rltorch/q_function/continuous.py | cindycia/Atari-SAC-Discrete | 5d92339f3efbac34488a14db024499b8951fc3b3 | [
"MIT"
] | 1 | 2020-05-09T18:24:21.000Z | 2020-05-10T12:44:39.000Z | rltorch/q_function/continuous.py | ku2482/rltorch | 7819af49d95bfa268e00413a7606564b0e7286a7 | [
"MIT"
] | 3 | 2020-12-21T08:21:15.000Z | 2022-01-24T10:29:43.000Z | import torch
from rltorch.network import BaseNetwork, create_linear_network
class ContinuousLinearQNetwork(BaseNetwork):
def __init__(self, input_dim, output_dim, hidden_units=[],
initializer='xavier'):
super(ContinuousLinearQNetwork, self).__init__()
self.Q = create_linear_network(
input_dim + output_dim, 1, hidden_units=hidden_units,
initializer=initializer)
def forward(self, states, actions):
x = torch.cat([states, actions], dim=1)
Q = self.Q(x)
return Q
class TwinnedContinuousLinearQNetwork(BaseNetwork):
def __init__(self, input_dim, output_dim, hidden_units=[],
initializer='xavier'):
super(TwinnedContinuousLinearQNetwork, self).__init__()
self.Q1 = ContinuousLinearQNetwork(
input_dim, output_dim, hidden_units, initializer)
self.Q2 = ContinuousLinearQNetwork(
input_dim, output_dim, hidden_units, initializer)
def forward(self, states, actions):
Q1 = self.Q1(states, actions)
Q2 = self.Q2(states, actions)
return Q1, Q2
| 32.257143 | 65 | 0.66519 | 118 | 1,129 | 6.059322 | 0.271186 | 0.092308 | 0.097902 | 0.118881 | 0.468531 | 0.468531 | 0.377622 | 0.377622 | 0.201399 | 0.201399 | 0 | 0.011696 | 0.242693 | 1,129 | 34 | 66 | 33.205882 | 0.824561 | 0 | 0 | 0.32 | 0 | 0 | 0.010629 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16 | false | 0 | 0.08 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a73c9cdaac37c233a8038c6917861556d6f1ca25 | 2,964 | py | Python | models/naiveresnet.py | millermuttu/torch_soft | 70a692650b6eb8c70000e0f8dc2b22cbb9f94741 | [
"MIT"
] | null | null | null | models/naiveresnet.py | millermuttu/torch_soft | 70a692650b6eb8c70000e0f8dc2b22cbb9f94741 | [
"MIT"
] | null | null | null | models/naiveresnet.py | millermuttu/torch_soft | 70a692650b6eb8c70000e0f8dc2b22cbb9f94741 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
class ResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride, t=2):
super().__init__()
# compute Z[L+2]
mid_channels = in_channels * t
self.conv = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=mid_channels, kernel_size=1),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=mid_channels, out_channels=mid_channels, kernel_size=3, stride=stride, padding=1, groups=mid_channels),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=mid_channels, out_channels=out_channels, kernel_size=1),
nn.BatchNorm2d(out_channels)
)
# self.relu = nn.ReLU(inplace=True)
# downsample a[L] in case there is stride in conv1
if stride != 1:
assert stride == 2
self.downsample = nn.Sequential(
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1),
nn.BatchNorm2d(out_channels))
else:
self.downsample = None
def forward(self, x):
identity = x.clone()
if self.downsample:
identity = self.downsample(identity)
return self.conv(x) + identity
class GlobalAveragePooling(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return nn.functional.avg_pool2d(x, x.size()[2:])
class NaiveResNet(nn.Module):
def __init__(self, num_classes):
super().__init__()
self.groups = nn.ModuleList([
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1, stride=2),
self._build_group(in_channels=32, out_channels=64, stride=2, num_blocks=2),
self._build_group(in_channels=64, out_channels=128, stride=2, num_blocks=2),
self._build_group(in_channels=128, out_channels=256, stride=2, num_blocks=2),
self._build_group(in_channels=256, out_channels=512, stride=2, num_blocks=2)
])
self.globalavgpool = GlobalAveragePooling()
self.conv = nn.Sequential(
nn.Conv2d(in_channels=512, out_channels=200, kernel_size=1)
)
def forward(self, x):
for group in self.groups:
x = group(x)
# global average pooling
x = self.globalavgpool(x)
x = self.conv(x)
x = x.view(x.size(0), -1)
return x
def _build_group(self, in_channels, out_channels, stride, num_blocks):
layers = []
layers.append(ResidualBlock(in_channels=in_channels, out_channels=out_channels, stride=stride))
for _ in range(num_blocks - 1):
layers.append(ResidualBlock(in_channels=out_channels, out_channels=out_channels, stride=1))
return nn.Sequential(*layers)
| 37.05 | 137 | 0.630229 | 383 | 2,964 | 4.624021 | 0.201044 | 0.130435 | 0.139469 | 0.071146 | 0.488425 | 0.409938 | 0.309994 | 0.286279 | 0.160926 | 0.160926 | 0 | 0.032757 | 0.258435 | 2,964 | 79 | 138 | 37.518987 | 0.772975 | 0.040486 | 0 | 0.196721 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016393 | 1 | 0.114754 | false | 0 | 0.032787 | 0.016393 | 0.262295 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a73d61ec89908c6c6ef9983367ab58159a17b9de | 6,291 | py | Python | shinobiaccess.py | Kaoline/ShinobiTool | c417b254808356978613ef7227771f6da1b6ebd4 | [
"MIT"
] | null | null | null | shinobiaccess.py | Kaoline/ShinobiTool | c417b254808356978613ef7227771f6da1b6ebd4 | [
"MIT"
] | 5 | 2018-03-15T22:49:18.000Z | 2018-05-15T03:25:58.000Z | shinobiaccess.py | Kaoline/ShinobiTool | c417b254808356978613ef7227771f6da1b6ebd4 | [
"MIT"
] | null | null | null | # file --shinobiaccess.py--
import requests
from multiprocessing import Queue # Resolves Import errors
from multiprocessing.dummy import Pool as ThreadPool
from functools import partial
from tkinter import *
from bs4 import BeautifulSoup
import time
# -----------------------------------------
# Model
# -----------------------------------------
class ShinobiAccess:
"""Interface with Shinobi.fr, to connect, send messages and do some ranking searches"""
# Connection block
def __init__(self):
self.session = requests.Session()
self.encoding = None
def get_encoding(self):
r = requests.get('http://www.shinobi.fr/')
soup = BeautifulSoup(r.text, "html.parser")
self.encoding = re.search('charset=(.*)', soup.head.meta["content"]).group(1)
def connect(self, login, password):
self.session.get('http://www.shinobi.fr/index.php?page=deconnexion')
login_params = {'login': login, 'pass': password}
r = self.session.post('http://www.shinobi.fr/index.php?page=connexion', login_params)
connected = r.text.find("<a href='index.php?page=jeu'> Jouer </a>") != -1
if connected:
self.login = login
return connected
def deconnect(self):
self.session.get('http://www.shinobi.fr/index.php?page=deconnexion')
self.login = None
# PMer
def send_message(self, receiver, title, message_content):
"""Needs connection"""
# print("Starting at " + time.strftime("%H:%M:%S"))
try:
title = title.replace("%pseudo%", receiver)
message_content = message_content.replace("%pseudo%", receiver)
if self.encoding is None:
self.get_encoding()
self.session.get('http://www.shinobi.fr/index.php?page=menu-messagerie-nouveau')
payload = {'destinataire': receiver.encode(self.encoding, "xmlcharrefreplace"),
'sujet': title.encode(self.encoding, "xmlcharrefreplace"),
'message': message_content.encode(self.encoding, "xmlcharrefreplace"), 'envoi': 1}
self.session.post('http://www.shinobi.fr/index.php?page=menu-messagerie', payload)
except Exception as error:
print("Problème à l'envoi au destinataire " + receiver + ".\nErreur : " + str(error))
# print("Finished at " + time.strftime("%H:%M:%S"))
print(time.strftime("%H:%M:%S") + " > " + receiver + " ok")
# Ranking search
def get_shinobis(self, ranking, min_page, max_page, min_lvl, max_lvl, village, classe, team, min_evo, max_evo, min_points):
print("Starting at " + time.strftime("%H:%M:%S"))
link = "http://www.shinobi.fr/index.php?page=classement&type=classement_joueurs"
if ranking == "weekly":
link += "_hebdomadaire"
if village is not None:
link += '&village=' + village.lower()
link += "&p="
time1 = time.time()
partial_search = partial(self.search_ranking_page, ranking_link=link, min_lvl=min_lvl, max_lvl=max_lvl,
village=village, classe=classe, team=team, min_evo=min_evo, max_evo=max_evo, min_points=min_points)
pool = ThreadPool()
shinoobs = pool.map(partial_search, range(min_page, max_page + 1))
pool.close()
pool.join()
shinoobs = [item for sublist in shinoobs for item in sublist]
time2 = time.time()
print("Temps de recherche (secondes) : " + str(time2 - time1))
print("Finished at " + time.strftime("%H:%M:%S"))
return shinoobs
def search_ranking_page(self, page_number, ranking_link, min_lvl, max_lvl, village, classe, team, min_evo, max_evo, min_points):
shinoobs = []
page = self.session.get(ranking_link + str(page_number))
soup = BeautifulSoup(page.text, "html.parser")
table = soup.find(id="classement_general")
for tr in table.find_all("tr")[1:]:
try:
name = tr.find(class_="nom").a.text
has_team = (tr.find(class_="equipe").a.text) != ""
lvl = int(tr.find(class_="equipe").next_sibling.text)
clazz_img = tr.find(class_="village").previous_sibling.img
clazz = None if clazz_img is None else clazz_img["alt"]
sVillage = tr.find(class_="village").a.span.text
evo = int(tr.find(class_="evolution").text[1:].replace(",", ""))
points = float(tr.find(class_="points").text.replace(",", ""))
if min_lvl <= lvl <= max_lvl and (village is None or sVillage == village.lower()) and (clazz in classe) and (team is None or (team == has_team)) and min_evo <= evo <= max_evo and points >= min_points:
shinoobs.append(name)
except Exception as ec:
# print("Problem at page " + str(page_number))
print(ec)
print("Page " + str(page_number) + " ok")
return shinoobs
# Delete PMs
def wipe_pms(self, nbToDelete):
nbPages = nbToDelete // 20
nbMessagesLastPage = nbToDelete % 20
for page in range(nbPages):
self.delete_message(20)
print("Page " + str(page+1) + "/" + str(nbPages) + " deleted")
self.delete_message(nbMessagesLastPage)
print(str(nbMessagesLastPage) + " messages from last page deleted. " + str(nbToDelete) + " total pages deleted.")
def delete_message(self, nbToDelete):
page = self.session.get("http://www.shinobi.fr/index.php?page=menu-messagerie")
soup = BeautifulSoup(page.text, "html.parser")
table = soup.find(id="messagerie")
for tr in table.find_all("tr")[1:nb_to_delete + 1]:
suppr = tr.find_all(class_="icon")[1].a["href"]
# print(suppr)
self.session.get("http://www.shinobi.fr/" + suppr)
# Shop
def is_in_shop(self):
page = self.session.get("http://www.shinobi.fr/index.php?page=moteur_boutique&categorie=normaux")
soup = BeautifulSoup(page.text, "html.parser")
state = soup.find(id="etatmsg").text
return not ("Vous n'êtes pas au bon endroit pour effectuer cette action." in state or "Vous n'êtes pas aux Commerces !" in state)
| 47.300752 | 216 | 0.602925 | 773 | 6,291 | 4.795602 | 0.266494 | 0.026706 | 0.037766 | 0.043162 | 0.236579 | 0.222552 | 0.21311 | 0.197464 | 0.153224 | 0.149447 | 0 | 0.004427 | 0.245907 | 6,291 | 132 | 217 | 47.659091 | 0.776981 | 0.071372 | 0 | 0.089109 | 0 | 0 | 0.193537 | 0.004469 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09901 | false | 0.019802 | 0.069307 | 0 | 0.217822 | 0.089109 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a73fdd4bd845e815cf10a51fba7d3d3a64f2f5ec | 3,902 | py | Python | tests/test_client_db.py | machine23/hanita | 5a7d51dc7a08e0633925ee21ca30ccee5a7547eb | [
"MIT"
] | null | null | null | tests/test_client_db.py | machine23/hanita | 5a7d51dc7a08e0633925ee21ca30ccee5a7547eb | [
"MIT"
] | 5 | 2021-03-18T19:55:31.000Z | 2022-03-11T23:11:37.000Z | tests/test_client_db.py | machine23/hanita | 5a7d51dc7a08e0633925ee21ca30ccee5a7547eb | [
"MIT"
] | null | null | null | import pytest
import sqlite3
import time
from hanita_JIM import JIMClientMessage, JIMMessage
from hanita import ClientDB, ClientDBError
@pytest.fixture
def db():
client_db = ClientDB()
cmd_user = "INSERT INTO users(user_id, user_name) VALUES (?, ?)"
client_db.cursor.execute(cmd_user, (1, "user1"))
client_db.conn.commit()
cmd_chat = "INSERT INTO chats(chat_id, chat_name) VALUES (?, ?)"
client_db.cursor.execute(cmd_chat, (1, "chat1"))
client_db.conn.commit()
# cmd_chat_user = "INSERT INTO chat_users(user_id, chat_id) VALUES (?, ?)"
# client_db.cursor.execute(cmd_chat_user, (1, 1))
# client_db.cursor.execute(cmd_chat_user, (2, 1))
# client_db.cursor.execute(cmd_chat_user, (1, 2))
# client_db.conn.commit()
msg1 = JIMClientMessage.msg(1, "Hello", 3.3)
# msg2 = JIMClientMessage.msg(2, 1, "Hi")
# msg3 = JIMClientMessage.msg(1, 2, "Good!")
data1 = (1, 1, msg1.chat_id, msg1.timestamp, msg1.message)
# data2 = (msg2.from_user, msg2.to_user, msg2.time, msg2.message)
# data3 = (msg3.from_user, msg3.to_user, msg3.time, msg3.message)
cmd = """INSERT INTO messages(msg_id, user_id, chat_id, time, message)
VALUES (?, ?, ?, ?, ?)"""
# for data in [data1, data2, data3]:
client_db.cursor.execute(cmd, data1)
client_db.conn.commit()
yield client_db
client_db.close()
def test_user_exists(db):
assert db.user_exists("1")
assert not db.user_exists("5")
def test_add_user(db):
db.add_user(4, "user4")
assert db.user_exists(4)
with pytest.raises(ClientDBError):
db.add_user(4, "user5")
def test_get_user(db):
assert db.get_user(1) == {"user_id": 1, "user_name": "user1"}
assert db.get_user(5) == {}
def test_update_user(db):
db.update_user(1, "updated_name")
assert db.get_user(1)["user_name"] == "updated_name"
db.update_user(1, "another_name")
assert db.get_user(1)["user_name"] == "another_name"
db.update_user(5, "user5")
assert db.get_user(5) == {"user_id": 5, "user_name": "user5"}
def test_chat_exists(db):
assert db.chat_exists(1)
assert not db.chat_exists(5)
def test_get_chats(db):
result = db.get_chats()
expect = ["chat" + str(i) for i in range(3)]
assert result == expect
def test_add_chat(db):
db.add_chat(2, "new_chat")
assert db.chat_exists(2)
with pytest.raises(ClientDBError):
db.add_chat(2, "another_chat")
def test_get_chat(db):
expect = {
"chat_id": 1,
"chat_name": "chat1",
"read_time": None
}
assert db.get_chat(1) == expect
assert db.get_chat(5) == {}
def test_set_chat_readed(db):
db.set_chat_readed(1)
assert time.time() - db.get_chat(1)["read_time"] < 0.1
def test_del_chat(db):
db.del_chat(1)
assert db.chat_exists(1) is False
db.del_chat(1)
def test_get_chats(db):
assert db.get_chats() == [1]
db.add_chat(2, "chat2")
assert db.get_chats() == [1, 2]
for i in db.get_chats():
db.del_chat(i)
assert db.get_chats() == []
def test_msg_exists(db):
assert db.msg_exists(1) is True
assert db.msg_exists(2) is False
def test_add_msg(db):
msg2 = {"msg_id":2, "user_id":1, "chat_id":1, "timestamp":5.5,
"message":"message text 2"}
# msg2.msg_id = 2
db.add_msg(**msg2)
assert db.msg_exists(2)
def test_get_msg(db):
expect = {
"msg_id": 1,
"user_id": 1,
"chat_id": 1,
"timestamp": 3.3,
"message": "Hello",
"readed": 0
}
assert db.get_msg(1) == expect
assert db.get_msg(2) == {}
def test_get_msgs(db):
msgs = db.get_msgs(1)
assert msgs == [1]
db.add_msg(5, 1, 1, 5.5, "message")
assert db.get_msgs(1) == [1, 5]
db.del_chat(1)
assert db.get_msgs(1) == []
###############################################################################
| 25.84106 | 79 | 0.613275 | 600 | 3,902 | 3.756667 | 0.143333 | 0.078083 | 0.068323 | 0.055901 | 0.3252 | 0.20142 | 0.124224 | 0.06921 | 0 | 0 | 0 | 0.037675 | 0.210917 | 3,902 | 150 | 80 | 26.013333 | 0.694381 | 0.128652 | 0 | 0.14 | 0 | 0 | 0.154427 | 0 | 0 | 0 | 0 | 0 | 0.27 | 1 | 0.16 | false | 0 | 0.05 | 0 | 0.21 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a74004045f2bd02697767b8892dd72a7a078150f | 4,280 | py | Python | eval.py | haoala/CSGStumpNet | f680952e2c0445275efc51a6defcfef54ef80450 | [
"MIT"
] | 20 | 2021-08-25T02:23:21.000Z | 2022-02-17T04:01:32.000Z | eval.py | haoala/CSGStumpNet | f680952e2c0445275efc51a6defcfef54ef80450 | [
"MIT"
] | 5 | 2021-09-06T23:04:18.000Z | 2022-03-25T10:11:18.000Z | eval.py | haoala/CSGStumpNet | f680952e2c0445275efc51a6defcfef54ef80450 | [
"MIT"
] | 5 | 2021-08-31T06:38:24.000Z | 2022-03-24T15:29:20.000Z | import os
import time
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from dataset import ShapeNet
from loss import Loss
from config import Config
from model import CSGStumpNet
from utils import generate_mesh
import argparse
def eval(config):
test_dataset = ShapeNet(partition='test', category=config.category, shapenet_root=config.dataset_root, balance=config.balance,num_surface_points=config.num_surface_points, num_sample_points=config.num_sample_points)
test_loader = DataLoader(test_dataset, pin_memory=True, num_workers=20, batch_size=config.test_batch_size_per_gpu*config.num_gpu, shuffle=False, drop_last=True)
device = torch.device("cuda")
model = CSGStumpNet(config).to(device)
pre_train_model_path = './checkpoints/%s/models/model.th' % config.experiment_name
assert os.path.exists(pre_train_model_path), "Cannot find pre-train model for experiment: {}\nNo such a file: {}".format(config.experiment_name, pre_train_model_path)
model.load_state_dict(torch.load('./checkpoints/%s/models/model.th' % config.experiment_name))
# model = nn.DataParallel(model)
print("Let's use", torch.cuda.device_count(), "GPUs!")
criterion = Loss(config)
model.eval()
start_time = time.time()
test_iter = 0
with torch.no_grad():
testloader_t = tqdm(test_loader)
avg_test_loss_recon = avg_test_loss_primitive = avg_test_loss = avg_test_accuracy = avg_test_recall = 0
for surface_pointcloud, testing_points in testloader_t:
surface_pointcloud = surface_pointcloud.to(device)
testing_points = testing_points.to(device)
occupancies, primitive_sdfs = model(surface_pointcloud.transpose(2,1), testing_points[:,:,:3], is_training=False)
loss_dict = criterion(occupancies, testing_points[:,:,-1], primitive_sdfs)
predict_occupancies = (occupancies >=0.5).float()
target_occupancies = (testing_points[:,:,-1] >=0.5).float()
accuracy = torch.sum(predict_occupancies*target_occupancies)/torch.sum(target_occupancies)
recall = torch.sum(predict_occupancies*target_occupancies)/(torch.sum(predict_occupancies)+1e-9)
avg_test_loss_recon += loss_dict["loss_recon"].item()
avg_test_loss_primitive += loss_dict["loss_primitive"].item()
avg_test_loss += loss_dict["loss_total"].item()
avg_test_accuracy += accuracy.item()
avg_test_recall += recall.item()
generate_mesh(model, surface_pointcloud.transpose(2,1), config, test_iter)
test_iter += 1
exit()
avg_test_loss_recon = avg_test_loss_recon / test_iter
test_accuracy = avg_test_accuracy / test_iter
test_recall = avg_test_recall / test_iter
test_fscore = 2*test_accuracy*test_recall/(test_accuracy + test_recall + 1e-6)
print("Evaluating: time: %4.4f, loss_total: %.6f, loss_recon: %.6f, loss_primitive: %.6f, acc: %.6f, recall: %.6f, fscore: %.6f" % (
time.time() - start_time,
avg_test_loss/test_iter,
avg_test_loss_recon / test_iter,
avg_test_loss_primitive/test_iter,
test_accuracy,
test_recall,
test_fscore))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='EvalPartAwareReconstruction')
parser.add_argument('--config_path', type=str, default='./configs/config_default.json', metavar='N',
help='config_path')
args = parser.parse_args()
config = Config((args.config_path))
eval(config)
| 50.952381 | 219 | 0.595327 | 471 | 4,280 | 5.087049 | 0.288747 | 0.049666 | 0.050501 | 0.033389 | 0.175292 | 0.145659 | 0.102671 | 0.080134 | 0 | 0 | 0 | 0.009854 | 0.312383 | 4,280 | 83 | 220 | 51.566265 | 0.804281 | 0.007009 | 0 | 0 | 0 | 0.015625 | 0.093095 | 0.028282 | 0 | 0 | 0 | 0 | 0.015625 | 1 | 0.015625 | false | 0 | 0.1875 | 0 | 0.203125 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a740ef7bbf1c3fa05e7c505f2c1ab6220a5983a0 | 2,394 | py | Python | testing/test_dynamodb.py | andrew-lee-zuora/zsec-aws-tools | eb5224e0f4aa48e474ab66046c064f3b49e39fd7 | [
"BSD-2-Clause"
] | 1 | 2019-08-07T20:36:39.000Z | 2019-08-07T20:36:39.000Z | testing/test_dynamodb.py | andrew-lee-zuora/zsec-aws-tools | eb5224e0f4aa48e474ab66046c064f3b49e39fd7 | [
"BSD-2-Clause"
] | 1 | 2020-07-30T23:47:39.000Z | 2020-07-30T23:47:39.000Z | testing/test_dynamodb.py | zuoralabs/zsec-aws-tools | d836963f1d39c2ba8db2684603095f686ae4303b | [
"BSD-2-Clause"
] | 1 | 2019-08-07T20:37:51.000Z | 2019-08-07T20:37:51.000Z | import string
import random
import boto3
import pytest
import zsec_aws_tools.dynamodb as zaws_dynamodb
import logging
@pytest.fixture
def my_table():
session = boto3.Session(profile_name='test', region_name='us-east-1')
random_str = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(10))
table_x = zaws_dynamodb.Table(name="test-db-" + random_str,
session=session,
config=dict(AttributeDefinitions=[dict(AttributeName='id',
AttributeType='S')],
KeySchema=[dict(AttributeName='id',
KeyType='HASH')],
ProvisionedThroughput=dict(
ReadCapacityUnits=5,
WriteCapacityUnits=5,
)
))
yield table_x
# don't care about consistency with bucket_x.exists; this is a fixture not a test
table_x.delete(not_exists_ok=True)
def test_table_creation_and_idempotency_and_deletion(my_table, caplog):
caplog.set_level(logging.CRITICAL)
assert not my_table.exists
my_table.put()
assert my_table.exists
#assert my_queue._detect_existence_using_index_id()
my_table.put() # test idempotency
arn = my_table.arn
assert arn
assert arn.endswith(my_table.name)
assert arn.startswith('arn:aws')
my_table.delete()
my_table.wait_until_not_exists()
assert not my_table.exists
def test_table_arn(my_table, caplog):
caplog.set_level(logging.CRITICAL)
my_table.put()
arn = my_table.arn
assert arn
assert arn.endswith(my_table.name)
assert arn.startswith('arn:aws')
def test_table_set_and_get(my_table: zaws_dynamodb.Table, caplog):
caplog.set_level(logging.CRITICAL)
my_table.put()
put_resp = my_table.boto3_resource().put_item(Item={'id': '123'})
print(put_resp)
query_resp = my_table.boto3_resource().query(
KeyConditionExpression='#K = :v',
ExpressionAttributeNames={'#K': 'id'},
ExpressionAttributeValues={':v': '123'},
)
assert 1 <= query_resp['Count']
| 31.090909 | 98 | 0.581036 | 265 | 2,394 | 4.996226 | 0.373585 | 0.100453 | 0.030211 | 0.045317 | 0.291541 | 0.222054 | 0.222054 | 0.222054 | 0.188822 | 0.188822 | 0 | 0.009913 | 0.325815 | 2,394 | 76 | 99 | 31.5 | 0.810409 | 0.060986 | 0 | 0.314815 | 0 | 0 | 0.031208 | 0 | 0 | 0 | 0 | 0 | 0.185185 | 1 | 0.074074 | false | 0 | 0.111111 | 0 | 0.185185 | 0.018519 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a741286702663e15630b612b2b7663fca0067a89 | 404 | py | Python | python_basic/challenge1_conversor_mx.py | carmsanchezs/datacademy | 72bc8671cbd284d7e3a266ea5a8e0afc26af33de | [
"Apache-2.0"
] | null | null | null | python_basic/challenge1_conversor_mx.py | carmsanchezs/datacademy | 72bc8671cbd284d7e3a266ea5a8e0afc26af33de | [
"Apache-2.0"
] | null | null | null | python_basic/challenge1_conversor_mx.py | carmsanchezs/datacademy | 72bc8671cbd284d7e3a266ea5a8e0afc26af33de | [
"Apache-2.0"
] | null | null | null | # transforma de pesos a dolares
pesos = input("¿Cuántos pesos mexicanos tienes?: ")
pesos = float(pesos)
valor_dolar = 19.90
dolares = pesos / valor_dolar
dolares = round(dolares, 2)
print("Tienes {} dólares".format(dolares))
# transforma de dolares a pesos
dolares = int(input("Cuántos dolares tienes?: "))
valor_peso = 0.05
pesos = round(dolares / valor_peso, 2)
print("Tienes {} pesos".format(pesos)) | 31.076923 | 51 | 0.727723 | 58 | 404 | 5.017241 | 0.413793 | 0.082474 | 0.103093 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025788 | 0.136139 | 404 | 13 | 52 | 31.076923 | 0.805158 | 0.14604 | 0 | 0 | 0 | 0 | 0.265306 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a742d19db43f0b326891b5df41d003a2ebf015e5 | 2,140 | py | Python | solver/cli.py | GuiBeal/termo-solver | 14ee9638424bc1f172bc91e13694a69e5ac49c15 | [
"MIT"
] | null | null | null | solver/cli.py | GuiBeal/termo-solver | 14ee9638424bc1f172bc91e13694a69e5ac49c15 | [
"MIT"
] | null | null | null | solver/cli.py | GuiBeal/termo-solver | 14ee9638424bc1f172bc91e13694a69e5ac49c15 | [
"MIT"
] | 2 | 2022-02-07T18:52:41.000Z | 2022-03-18T23:41:53.000Z | import re
import argparse
import pandas as pd
import os
def printHeader(message):
print(f"+============={message}=============+")
def replacechar(s, index, new):
return s[:index] + new + s[index + 1:]
def main():
parser = argparse.ArgumentParser(description='Helps you play term.ooo!')
parser.add_argument(
'-l', '--lang', default="pt",
help='Language to use, should be "pt" or "en"',)
args = parser.parse_args()
fiveLetter = re.compile("^[a-zA-Z]{5}$")
guessRegexp = re.compile("^[yg\-]{5}$")
print("loading...")
import solver.guess as guess
print("press q to quit... ")
command=""
printHeader("Open the website and lets begin!")
best_guesses = guess.first_guess()
guesses = []
matches = []
while command !="q":
print("Your best first guesses are:")
print(best_guesses)
print("Type your guess")
print("or type `q` to leave")
print("or type `r` to reset")
command = input("> ")
if fiveLetter.match(command):
word = command
print("Now type your hints (e.g. --yg-):")
print(" or type h for help")
command = input("> ")
if command=="h":
print("example: if you got the first letter yellow and the last letter green")
print(" then type 'y---g'")
continue
if not guessRegexp.match(command):
print("Oops, typed wrong!")
continue
matches.append(command)
guesses.append(word)
best_guesses, subset = guess.get_guess(guesses, matches, return_subset=True)
if(len(subset) > 10):
print(f"{ len(subset)= }")
else:
print(f"{len(subset)} words left:")
print(subset)
if command=="r":
green="-"*5
yellow_pos=[]
gray=""
subset=words
continue
if command=="a":
print(subset)
continue
if __name__=="__main__":
main()
| 24.597701 | 94 | 0.51028 | 238 | 2,140 | 4.516807 | 0.462185 | 0.016744 | 0.030698 | 0.027907 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004264 | 0.342523 | 2,140 | 86 | 95 | 24.883721 | 0.759773 | 0 | 0 | 0.129032 | 0 | 0 | 0.235047 | 0.01729 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048387 | false | 0 | 0.080645 | 0.016129 | 0.145161 | 0.306452 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a742fd6927658d0703f122c88539437c39561b48 | 6,290 | py | Python | backend/couscous/v1/invoice/tests/test_views.py | jimmykamau/couscous | 97a1b36e159df39239e3485bd90be0639aa44d38 | [
"MIT"
] | 1 | 2020-10-26T10:23:58.000Z | 2020-10-26T10:23:58.000Z | backend/couscous/v1/invoice/tests/test_views.py | jimmykamau/couscous | 97a1b36e159df39239e3485bd90be0639aa44d38 | [
"MIT"
] | 9 | 2019-11-21T12:43:42.000Z | 2022-02-10T14:18:01.000Z | backend/couscous/v1/invoice/tests/test_views.py | jimmykamau/couscous | 97a1b36e159df39239e3485bd90be0639aa44d38 | [
"MIT"
] | null | null | null | from django.urls import reverse
from rest_framework.test import APITestCase
import couscous.v1.debtor.tests.factories as debtor_factories
import couscous.v1.tests.factories as couscous_factories
from couscous.v1.invoice import logger
from .factories import InvoiceFactory
class ListInvoiceViewTests(APITestCase):
def setUp(self):
self.admin_user = couscous_factories.UserFactory()
self.client.force_authenticate(user=self.admin_user)
self.debtors = debtor_factories.DebtorFactory.create_batch(
3, created_by=self.admin_user
)
self.invoices = InvoiceFactory.create_batch(
5, debtor=self.debtors[0]
)
self.url = reverse('v1:list-invoices')
def tearDown(self):
self.client.force_authenticate(user=None)
def test_list_invoices(self):
response = self.client.get(
self.url, format='json'
)
self.assertEqual(200, response.status_code)
self.assertEqual(
len(self.invoices),
len(response.data)
)
# Check the content of the returned data
self.assertCountEqual(
['email', 'status', 'amount', 'due_date'],
response.data[0]
)
def test_cannot_list_invoices_without_auth(self):
# Test for user that didn't create invoices
other_user = couscous_factories.UserFactory()
self.client.force_authenticate(user=other_user)
response = self.client.get(
self.url, format='json'
)
self.assertEqual(200, response.status_code)
self.assertFalse(response.data)
# Test for user without staff rights
self.admin_user.is_staff = False
self.admin_user.save()
self.client.force_authenticate(user=self.admin_user)
response = self.client.get(
self.url, format='json'
)
self.assertEqual(403, response.status_code)
# Test for logged out user
self.client.force_authenticate(user=None)
response = self.client.get(
self.url, format='json'
)
self.assertEqual(403, response.status_code)
def test_filter_results(self):
other_debtor_invoices = InvoiceFactory.create_batch(2, debtor=self.debtors[1])
# Filter by debtor email
url = f"{self.url}?debtor__email={self.debtors[1].email}"
response = self.client.get(
url, format='json'
)
self.assertEqual(
200, response.status_code
)
self.assertEqual(
len(other_debtor_invoices),
len(response.data)
)
# Filter by status
status = other_debtor_invoices[0].status
url = f"{self.url}?status={status}"
response = self.client.get(
url, format='json'
)
self.assertEqual(200, response.status_code)
for invoice in response.data:
self.assertEqual(status, invoice['status'])
# Filter by amount
amount = float(other_debtor_invoices[1].amount)
url = f"{self.url}?amount={amount}"
response = self.client.get(
url, format='json'
)
self.assertEqual(200, response.status_code)
for invoice in response.data:
self.assertEqual(amount, float(invoice['amount']))
# Filter by due date
due_date = self.invoices[2].due_date.strftime('%Y-%m-%d')
url = f"{self.url}?due_date={due_date}"
response = self.client.get(
url, format='json'
)
self.assertEqual(200, response.status_code)
for invoice in response.data:
self.assertEqual(due_date, invoice['due_date'])
def test_order_results(self):
other_debtor_invoices = InvoiceFactory.create_batch(2, debtor=self.debtors[1])
self.url = f"{self.url}?ordering="
# Order by descending debtor email
response = self.client.get(
f"{self.url}debtor__email", format='json'
)
self.assertEqual(200, response.status_code)
self.assertGreater(
response.data[-1]['email'], response.data[0]['email']
)
# Order by ascending debtor email
response = self.client.get(
f"{self.url}-debtor__email", format='json'
)
self.assertEqual(200, response.status_code)
self.assertLess(
response.data[-1]['email'], response.data[0]['email']
)
# Order by descending status
response = self.client.get(
f"{self.url}status", format='json'
)
self.assertEqual(200, response.status_code)
self.assertGreater(
response.data[-1]['status'], response.data[0]['status']
)
# Order by ascending status
response = self.client.get(
f"{self.url}-status", format='json'
)
self.assertEqual(200, response.status_code)
self.assertLess(
response.data[-1]['status'], response.data[0]['status']
)
# Order by descending amount
response = self.client.get(
f"{self.url}amount", format='json'
)
self.assertEqual(200, response.status_code)
self.assertGreater(
response.data[-1]['amount'], response.data[0]['amount']
)
# Order by ascending amount
response = self.client.get(
f"{self.url}-amount", format='json'
)
self.assertEqual(200, response.status_code)
self.assertLess(
response.data[-1]['amount'], response.data[0]['amount']
)
# Order by descending due date
response = self.client.get(
f"{self.url}due_date", format='json'
)
self.assertEqual(200, response.status_code)
self.assertGreater(
response.data[-1]['due_date'], response.data[0]['due_date']
)
# Order by ascending due date
response = self.client.get(
f"{self.url}-due_date", format='json'
)
self.assertEqual(200, response.status_code)
self.assertLess(
response.data[-1]['due_date'], response.data[0]['due_date']
)
| 33.105263 | 86 | 0.591892 | 701 | 6,290 | 5.195435 | 0.138374 | 0.075783 | 0.079077 | 0.092257 | 0.660626 | 0.641131 | 0.619989 | 0.619989 | 0.605437 | 0.57084 | 0 | 0.018026 | 0.294436 | 6,290 | 189 | 87 | 33.280423 | 0.802614 | 0.070588 | 0 | 0.466216 | 0 | 0 | 0.091439 | 0.030365 | 0 | 0 | 0 | 0 | 0.209459 | 1 | 0.040541 | false | 0 | 0.040541 | 0 | 0.087838 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a74456626648b83566c2bf9ed61d7f79673a7fe6 | 4,622 | py | Python | spikeinterface/sortingcomponents/template_matching.py | lkeegan/spikeinterface | 237cc6f6119a5365be1d9e1c235d8410ceb482d3 | [
"MIT"
] | null | null | null | spikeinterface/sortingcomponents/template_matching.py | lkeegan/spikeinterface | 237cc6f6119a5365be1d9e1c235d8410ceb482d3 | [
"MIT"
] | null | null | null | spikeinterface/sortingcomponents/template_matching.py | lkeegan/spikeinterface | 237cc6f6119a5365be1d9e1c235d8410ceb482d3 | [
"MIT"
] | null | null | null | """Sorting components: template matching."""
import numpy as np
# ~ try:
# ~ import numba
# ~ HAVE_NUMBA = True
# ~ except ImportError:
# ~ HAVE_NUMBA = False
from spikeinterface.core.job_tools import ChunkRecordingExecutor
from spikeinterface.toolkit import get_noise_levels, get_channel_distances
from spikeinterface.sortingcomponents.peak_detection import detect_peak_locally_exclusive
spike_dtype = [('sample_ind', 'int64'), ('channel_ind', 'int64'), ('cluster_ind', 'int64'),
('amplitude', 'float64'), ('segment_ind', 'int64')]
def find_spike_from_templates(recording, waveform_extractor, method='simple',
method_kwargs={}, **job_kwargs):
"""Find spike from a recording from given templates.
Parameters
----------
recording: RecordingExtractor
The recording extractor object.
waveform_extractor: WaveformExtractor
The waveform extractor.
method: {'simple'}
Which method to use.
method_kwargs: dict, optional
Keyword arguments for the chosen method.
job_kwargs: dict
Parameters for ChunkRecordingExecutor.
Returns
-------
spikes: ndarray
Spikes found from templates.
Notes
-----
Templates are represented as WaveformExtractor so statistics can be extracted.
"""
assert method in ('simple',)
if method == 'simple':
method_kwargs = check_kwargs_simple_matching(recording, waveform_extractor, method_kwargs)
# and run
func = _find_spike_chunk
init_func = _init_worker_find_spike
init_args = (recording.to_dict(), method, method_kwargs)
processor = ChunkRecordingExecutor(recording, func, init_func, init_args,
handle_returns=True, job_name='find spikes', **job_kwargs)
spikes = processor.run()
spikes = np.concatenate(spikes)
return spikes
def _init_worker_find_spike(recording, method, method_kwargs):
"""Initialize worker for finding spikes."""
if isinstance(recording, dict):
from spikeinterface.core import load_extractor
recording = load_extractor(recording)
# create a local dict per worker
worker_ctx = {}
worker_ctx['recording'] = recording
worker_ctx['method'] = method
worker_ctx['method_kwargs'] = method_kwargs
return worker_ctx
def _find_spike_chunk(segment_index, start_frame, end_frame, worker_ctx):
"""Find spikes from a chunk of data."""
# recover variables of the worker
recording = worker_ctx['recording']
method = worker_ctx['method']
method_kwargs = worker_ctx['method_kwargs']
# load trace in memory
traces = recording.get_traces(start_frame=start_frame, end_frame=end_frame,
segment_index=segment_index)
if method == 'simple':
spikes = find_spike_simple_matching(traces, method_kwargs)
else:
raise NotImplementedError
spikes['sample_ind'] += start_frame
spikes['segment_ind'] = segment_index
return spikes
##########
# simple mathing
##########
_default_simple_matching = {
'peak_sign': 'neg',
'n_shifts': 2,
'detect_threshold': 5,
'noise_levels': None,
'local_radius_um': 100,
'random_chunk_kwargs': {},
}
def check_kwargs_simple_matching(recording, we, kwargs):
"""Check keyword arguments for the simple matching method."""
d = _default_simple_matching.copy()
d.update(kwargs)
if d['noise_levels'] is None:
d['noise_levels'] = get_noise_levels(recording, **d['random_chunk_kwargs'])
d['abs_threholds'] = d['noise_levels'] * d['detect_threshold']
channel_distance = get_channel_distances(recording)
d['neighbours_mask'] = channel_distance < d['local_radius_um']
return d
def find_spike_simple_matching(traces, method_kwargs):
"""Find spikes using the simple matching method."""
peak_sign = method_kwargs['peak_sign']
abs_threholds = method_kwargs['abs_threholds']
n_shifts = method_kwargs['n_shifts']
neighbours_mask = method_kwargs['neighbours_mask']
peak_sample_ind, peak_chan_ind = detect_peak_locally_exclusive(traces, peak_sign, abs_threholds,
n_shifts, neighbours_mask)
# this wrong at the moment this ios for debug only!!!!
spikes = np.zeros(peak_sample_ind.size, dtype=spike_dtype)
spikes['sample_ind'] = peak_sample_ind
spikes['channel_ind'] = peak_chan_ind # need to put the channel from template
spikes['cluster_ind'] = 666
spikes['amplitude'] = 111111.11111
return spikes
| 30.012987 | 100 | 0.681307 | 536 | 4,622 | 5.580224 | 0.29291 | 0.064193 | 0.02006 | 0.017385 | 0.05015 | 0.027416 | 0.027416 | 0 | 0 | 0 | 0 | 0.008013 | 0.217006 | 4,622 | 153 | 101 | 30.20915 | 0.818458 | 0.224578 | 0 | 0.070423 | 0 | 0 | 0.13746 | 0 | 0 | 0 | 0 | 0 | 0.014085 | 1 | 0.070423 | false | 0 | 0.070423 | 0 | 0.211268 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7445ba85682f751054e41c614bca9af6603a1c8 | 13,350 | py | Python | theano/configdefaults.py | arnaudsj/Theano | 41103b5d158739e4147428ce776fb5716062d4a8 | [
"BSD-3-Clause"
] | 1 | 2015-11-05T13:58:11.000Z | 2015-11-05T13:58:11.000Z | theano/configdefaults.py | arnaudsj/Theano | 41103b5d158739e4147428ce776fb5716062d4a8 | [
"BSD-3-Clause"
] | null | null | null | theano/configdefaults.py | arnaudsj/Theano | 41103b5d158739e4147428ce776fb5716062d4a8 | [
"BSD-3-Clause"
] | null | null | null | import os
import logging
import subprocess
import sys
from theano.configparser import (
AddConfigVar, BoolParam, ConfigParam, EnumStr, IntParam, FloatParam,
StrParam, TheanoConfigParser)
_logger = logging.getLogger('theano.configdefaults')
config = TheanoConfigParser()
AddConfigVar('floatX',
"Default floating-point precision for python casts",
EnumStr('float64', 'float32'),
)
AddConfigVar('cast_policy',
"Rules for implicit type casting",
EnumStr('custom', 'numpy+floatX',
# The 'numpy' policy was originally planned to provide a smooth
# transition from numpy. It was meant to behave the same as
# numpy+floatX, but keeping float64 when numpy would. However
# the current implementation of some cast mechanisms makes it
# a bit more complex to add than what was expected, so it is
# currently not available.
#numpy,
),
)
# python 2.* define int / int to return int and int // int to return int.
# python 3* define int / int to return float and int // int to return int.
# numpy 1.6.1 behaves as python 2.*. I think we should not change it faster
# than numpy. When we will do the transition, we should create an int_warn
# and floatX_warn option.
AddConfigVar('int_division',
"What to do when one computes x / y, where both x and y are of "
"integer types",
EnumStr('int', 'raise', 'floatX'),
in_c_key=False)
#gpu mean let the driver select the gpu. Needed in case of gpu in exclusive mode.
#gpuX mean use the gpu number X.
AddConfigVar('device',
"Default device for computations. If gpu*, change the default to try to move computation to it and to put shared variable of float32 on it.",
EnumStr('cpu', 'gpu',
'gpu0', 'gpu1', 'gpu2', 'gpu3',
'gpu4', 'gpu5', 'gpu6', 'gpu7',
'gpu8', 'gpu9', 'gpu10', 'gpu11',
'gpu12', 'gpu13', 'gpu14', 'gpu15',
allow_override=False),
in_c_key=False,
)
AddConfigVar('init_gpu_device',
("Initialize the gpu device to use, works only if device=cpu. "
"Unlike 'device', setting this option will NOT move computations, "
"nor shared variables, to the specified GPU. "
"It can be used to run GPU-specific tests on a particular GPU."),
EnumStr('', 'gpu',
'gpu0', 'gpu1', 'gpu2', 'gpu3',
'gpu4', 'gpu5', 'gpu6', 'gpu7',
'gpu8', 'gpu9', 'gpu10', 'gpu11',
'gpu12', 'gpu13', 'gpu14', 'gpu15',
allow_override=False),
in_c_key=False)
AddConfigVar('force_device',
"Raise an error if we can't use the specified device",
BoolParam(False, allow_override=False),
in_c_key=False)
# Do not add FAST_RUN_NOGC to this list (nor any other ALL CAPS shortcut).
# The way to get FAST_RUN_NOGC is with the flag 'linker=c|py_nogc'.
# The old all capital letter way of working is deprecated as it is not
# scalable.
# Also, please be careful not to modify the first item in the enum when adding
# new modes, since it is the default mode.
AddConfigVar('mode',
"Default compilation mode",
EnumStr('Mode', 'ProfileMode', 'DebugMode', 'FAST_RUN',
'FAST_COMPILE', 'PROFILE_MODE', 'DEBUG_MODE'),
in_c_key=False)
# Test whether or not gcc is present: disable C code if it is not.
# Using the dummy file descriptor below is a workaround for a crash experienced
# in an unusual Python 2.4.4 Windows environment with the default stdin=None.
dummy_stdin = open(os.devnull)
try:
subprocess.Popen('gcc', stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=dummy_stdin.fileno())
# Keep the default linker the same as the one for the mode FAST_RUN
AddConfigVar('linker',
"Default linker used if the theano flags mode is Mode or ProfileMode",
EnumStr('c|py', 'py', 'c', 'c|py_nogc', 'c&py',
'vm', 'cvm', 'vm_nogc', 'cvm_nogc'),
in_c_key=False)
except OSError:
# gcc is not present, linker should default to python only
AddConfigVar('linker',
"Default linker used if the theano flags mode is Mode or ProfileMode",
EnumStr('py', 'c|py', 'c', 'c|py_nogc', 'c&py',
'vm', 'cvm', 'vm_nogc', 'cvm_nogc'),
in_c_key=False)
_logger.warning('GCC not detected ! Theano will be unable to execute '
'optimized C-implementations (for both CPU and GPU) and will '
'default to Python implementations. Performance will be severely '
'degraded.')
del dummy_stdin
#Keep the default optimizer the same as the one for the mode FAST_RUN
AddConfigVar('optimizer',
"Default optimizer. If not None, will use this linker with the Mode object(not ProfileMode or DebugMode)",
EnumStr('fast_run', 'merge', 'fast_compile', 'None'),
in_c_key=False)
AddConfigVar('on_opt_error',
"What to do when an optimization crashes: warn and skip it, or raise the exception",
EnumStr('warn', 'raise'),
in_c_key=False)
def safe_no_home(home):
"""
Make sure the user is not attempting to use `config.home`.
This config option was removed in Thenao 0.5 since it was redundant with
`config.base_compiledir`. This filter function ensures people who were
setting the location of their compilation directory through `config.home`
switch to `config.basecompiledir` instead, by raising an error when
`config.home` is used.
"""
if home:
raise RuntimeError(
'The `config.home` option has been removed and should not be '
'used anymore. Please set the `config.base_compiledir` option '
'instead (for instance to: %s)' %
os.path.join(home, '.theano'))
return True
AddConfigVar('home',
"This config option was removed in 0.5: do not use it!",
ConfigParam('', allow_override=False, filter=safe_no_home),
in_c_key=False)
AddConfigVar('nocleanup',
"Suppress the deletion of code files that did not compile cleanly",
BoolParam(False),
in_c_key=False)
# This flag is used when we import Theano to initialize global variables.
# So changing it after import will not modify these global variables.
# This could be done differently... but for now we simply prevent it from being
# changed at runtime.
AddConfigVar('tensor.cmp_sloppy',
"Relax tensor._allclose (0) not at all, (1) a bit, (2) more",
IntParam(0, lambda i: i in (0,1,2), allow_override=False),
in_c_key=False)
AddConfigVar('tensor.local_elemwise_fusion',
"Enable or not in fast_run mode(fast_run optimization) the elemwise fusion optimization",
BoolParam(True),
in_c_key=False)
AddConfigVar('gpu.local_elemwise_fusion',
"Enable or not in fast_run mode(fast_run optimization) the gpu elemwise fusion optimization",
BoolParam(True),
in_c_key=False)
#http://developer.amd.com/CPU/LIBRARIES/LIBM/Pages/default.aspx
AddConfigVar('lib.amdlibm',
"Use amd's amdlibm numerical library",
BoolParam(False))
AddConfigVar('op.set_flops',
"currently used only in ConvOp. The profile mode will print the flops/s for the op.",
BoolParam(False),
in_c_key=False)
AddConfigVar('gpuelemwise.sync',
"when true, wait that the gpu fct finished and check it error code.",
BoolParam(True))
AddConfigVar('traceback.limit',
"The number of stack to trace. -1 mean all.",
IntParam(5),
in_c_key=False)
AddConfigVar('experimental.mrg',
"Another random number generator that work on the gpu",
BoolParam(False))
AddConfigVar('numpy.seterr_all',
("Sets numpy's behaviour for floating-point errors, ",
"see numpy.seterr. "
"'None' means not to change numpy's default, which can be "
"different for different numpy releases. "
"This flag sets the default behaviour for all kinds of floating-"
"point errors, its effect can be overriden for specific errors "
"by the following flags: seterr_divide, seterr_over, "
"seterr_under and seterr_invalid."),
EnumStr('ignore', 'warn', 'raise', 'call', 'print', 'log', 'None',
allow_override=False),
in_c_key=False)
AddConfigVar('numpy.seterr_divide',
("Sets numpy's behavior for division by zero, see numpy.seterr. "
"'None' means using the default, defined by numpy.seterr_all."),
EnumStr('None', 'ignore', 'warn', 'raise', 'call', 'print', 'log',
allow_override=False),
in_c_key=False)
AddConfigVar('numpy.seterr_over',
("Sets numpy's behavior for floating-point overflow, "
"see numpy.seterr. "
"'None' means using the default, defined by numpy.seterr_all."),
EnumStr('None', 'ignore', 'warn', 'raise', 'call', 'print', 'log',
allow_override=False),
in_c_key=False)
AddConfigVar('numpy.seterr_under',
("Sets numpy's behavior for floating-point underflow, "
"see numpy.seterr. "
"'None' means using the default, defined by numpy.seterr_all."),
EnumStr('None', 'ignore', 'warn', 'raise', 'call', 'print', 'log',
allow_override=False),
in_c_key=False)
AddConfigVar('numpy.seterr_invalid',
("Sets numpy's behavior for invalid floating-point operation, "
"see numpy.seterr. "
"'None' means using the default, defined by numpy.seterr_all."),
EnumStr('None', 'ignore', 'warn', 'raise', 'call', 'print', 'log',
allow_override=False),
in_c_key=False)
###
### To disable some warning about old bug that are fixed now.
###
AddConfigVar('warn.ignore_bug_before',
"If 'None', we warn about all Theano bugs found by default. If 'all', we don't warn about Theano bugs found by default. If a version, we print only the warnings relative to Theano bugs found after that version. Warning for specific bugs can be configured with specific [warn] flags.",
EnumStr('None', 'all', '0.3','0.4', '0.4.1', '0.5', allow_override=False),
in_c_key=False)
def warn_default(version):
"""
Return True iff we should warn about bugs fixed after a given version.
"""
if config.warn.ignore_bug_before == 'None':
return True
if config.warn.ignore_bug_before == 'all':
return False
if config.warn.ignore_bug_before >= version:
return False
return True
AddConfigVar('warn.argmax_pushdown_bug',
"Warn if in past version of Theano we generated a bug with the theano.tensor.nnet.nnet.local_argmax_pushdown optimization. Was fixed 27 may 2010",
BoolParam(warn_default('0.3')),
in_c_key=False)
AddConfigVar('warn.gpusum_01_011_0111_bug',
"Warn if we are in a case where old version of Theano had a silent bug with GpuSum pattern 01,011 and 0111 when the first dimensions was bigger then 4096. Was fixed 31 may 2010",
BoolParam(warn_default('0.3')),
in_c_key=False)
AddConfigVar('warn.sum_sum_bug',
"Warn if we are in a case where Theano version between version 9923a40c7b7a and the 2 august 2010(fixed date), generated an error in that case. This happen when their is 2 consecutive sum in the graph, bad code was generated. Was fixed 2 August 2010",
BoolParam(warn_default('0.3')),
in_c_key=False)
AddConfigVar('warn.sum_div_dimshuffle_bug',
"Warn if previous versions of Theano (between rev. 3bd9b789f5e8, 2010-06-16, and cfc6322e5ad4, 2010-08-03) would have given incorrect result. This bug was triggered by sum of division of dimshuffled tensors.",
BoolParam(warn_default('0.3')),
in_c_key=False)
AddConfigVar('compute_test_value',
"If 'True', Theano will run each op at graph build time, using Constants, SharedVariables and the tag 'test_value' as inputs to the function. This helps the user track down problems in the graph before it gets optimized.",
EnumStr('off', 'ignore', 'warn', 'raise'),
in_c_key=False)
"""Note to developers:
Generally your exceptions should use an apply node's __str__
method when exception_verbosity == 'low'. When exception_verbosity
== 'high', you should include a call to printing.min_informative_str
on all important apply nodes.
"""
AddConfigVar('exception_verbosity',
"If 'low', the text of exceptions will generally refer " \
+ "to apply nodes with short names such as " \
+ "Elemwise{add_no_inplace}. If 'high', some exceptions " \
+ "will also refer to apply nodes with long descriptions " \
+ """ like:
A. Elemwise{add_no_inplace}
B. log_likelihood_v_given_h
C. log_likelihood_h""",
EnumStr('low','high'),
in_c_key=False)
| 43.344156 | 297 | 0.638352 | 1,796 | 13,350 | 4.64588 | 0.277283 | 0.010067 | 0.020134 | 0.036913 | 0.293744 | 0.26534 | 0.229147 | 0.206376 | 0.201462 | 0.183245 | 0 | 0.017563 | 0.262172 | 13,350 | 307 | 298 | 43.485342 | 0.829543 | 0.17236 | 0 | 0.358852 | 0 | 0.043062 | 0.513751 | 0.02956 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009569 | false | 0 | 0.023923 | 0 | 0.057416 | 0.033493 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a747e7ce1367df9e77b42767543c1f7f2a5c0b5c | 909 | py | Python | nobrainer/models/__init__.py | djarecka/nobrainer | 6d9820b76299d258a22365e39e6efa6a94c6385e | [
"Apache-2.0"
] | null | null | null | nobrainer/models/__init__.py | djarecka/nobrainer | 6d9820b76299d258a22365e39e6efa6a94c6385e | [
"Apache-2.0"
] | null | null | null | nobrainer/models/__init__.py | djarecka/nobrainer | 6d9820b76299d258a22365e39e6efa6a94c6385e | [
"Apache-2.0"
] | null | null | null | from nobrainer.models.highresnet import highresnet
from nobrainer.models.meshnet import meshnet
from nobrainer.models.unet import unet
from nobrainer.models.autoencoder import autoencoder
def get(name):
"""Return callable that creates a particular `tf.keras.Model`.
Parameters
----------
name: str, the name of the model (case-insensitive).
Returns
-------
Callable, which instantiates a `tf.keras.Model` object.
"""
if not isinstance(name, str):
raise ValueError("Model name must be a string.")
models = {
"highresnet": highresnet,
"meshnet": meshnet,
"unet": unet,
"autoencoder": autoencoder,
}
try:
return models[name.lower()]
except KeyError:
avail = ", ".join(models.keys())
raise ValueError(
"Uknown model: '{}'. Available models are {}.".format(name, avail)
)
| 25.971429 | 78 | 0.623762 | 98 | 909 | 5.785714 | 0.5 | 0.091711 | 0.134039 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.251925 | 909 | 34 | 79 | 26.735294 | 0.833824 | 0.228823 | 0 | 0 | 0 | 0 | 0.159159 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.2 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a74851b3f8b1be1f2e442703b424b5173513a02e | 1,660 | py | Python | setup.py | fossabot/do | 18a76fdb611b4d4aca97b71be87d3ab4df470d81 | [
"MIT"
] | null | null | null | setup.py | fossabot/do | 18a76fdb611b4d4aca97b71be87d3ab4df470d81 | [
"MIT"
] | null | null | null | setup.py | fossabot/do | 18a76fdb611b4d4aca97b71be87d3ab4df470d81 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import setup
current_version = "0.7.3"
main_package = "controller"
app = '{}.__main__:main'.format(main_package)
setup(
name='rapydo_controller',
version=current_version,
author="Paolo D'Onorio De Meo",
author_email='p.donorio.de.meo@gmail.com',
description='Manage and deploy projects based on RAPyDo framework',
url='https://rapydo.github.io/do',
license='MIT',
packages=[main_package],
package_data={
main_package: ['argparser.yaml']
},
# End-of-life: 2020-09-13
python_requires='>=3.5.0',
entry_points={
'console_scripts': [
'rapydo={}'.format(app),
'do={}'.format(app),
],
},
install_requires=[
"docker-compose==1.25.4",
"dockerfile-parse",
"python-dateutil",
"pytz",
"loguru",
"prettyprinter",
"jinja2",
"sultan==0.9.1",
"plumbum",
"glom",
"gitpython==3.1.0",
"PyYAML==5.3.1",
"pip>=10.0.0"
],
keywords=['http', 'api', 'rest', 'web', 'backend', 'rapydo'],
classifiers=[
'Programming Language :: Python',
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
# End-of-life: 2020-09-13
'Programming Language :: Python :: 3.5',
# End-of-life: 2021-12-23
'Programming Language :: Python :: 3.6',
# End-of-life: 2023-06-27
'Programming Language :: Python :: 3.7',
# End-of-life: 2024-10
'Programming Language :: Python :: 3.8',
]
)
| 26.774194 | 71 | 0.546988 | 188 | 1,660 | 4.739362 | 0.574468 | 0.028058 | 0.050505 | 0.116723 | 0.038159 | 0.038159 | 0 | 0 | 0 | 0 | 0 | 0.0601 | 0.278313 | 1,660 | 61 | 72 | 27.213115 | 0.683639 | 0.083133 | 0 | 0.04 | 0 | 0 | 0.447525 | 0.031683 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.02 | 0 | 0.02 | 0.02 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7496655fe135621a501433aae084245a81140d6 | 11,058 | py | Python | preprocessing.py | krg-uoi/ganram | e3a5ddcce33b0543f1d57e35d970cd8845e37081 | [
"MIT"
] | 2 | 2022-03-31T07:03:34.000Z | 2022-03-31T15:20:52.000Z | preprocessing.py | krg-uoi/ganram | e3a5ddcce33b0543f1d57e35d970cd8845e37081 | [
"MIT"
] | null | null | null | preprocessing.py | krg-uoi/ganram | e3a5ddcce33b0543f1d57e35d970cd8845e37081 | [
"MIT"
] | null | null | null | from scipy.signal import savgol_filter
# from peakutils import baseline
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from scipy import integrate
import helpers as hlp
# the 'deriv' parameter in savgol_filter() is used in conjuction with the
# `delta` parameter, which is the x-spacing of the data. so, if the data are
# not evenly spaced, the computed derivatives are not correct. the workaround
# is to either not use the differentiation capability of savgol_filter() and
# use the differentiate() function of this module or interpolate the data with
# evenly spaced x values and use the spacing between them for the 'delta'
# parameter.
def smooth(data, window_length, polyorder, deriv=0, mode='interp'):
"""Apply a Savitzky-Golay filter to smooth an array.
This is a wrapper around scipy.signal.savgol_filter. Original function can
be found here:
https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.signal.savgol_filter.html
Arguments:
data {numpy.ndarray} -- Data to be smoothed.
window_length {int} -- Length of the smoothing window.
polyorder {int} -- Order of the polynomial that is used for smoothing.
It must be smaller than the window length.
Keyword Arguments:
deriv {int} -- Order of derivative to compute. (default: {0})
mode {str} -- Must be ‘mirror’, ‘constant’, ‘nearest’, ‘wrap’ or
‘interp’. Determines the type of extension that is used for the padded
signal to which the filter is applied. (default: {'interp'})
"""
return savgol_filter(data, window_length=window_length,
polyorder=polyorder, deriv=deriv, mode=mode)
def differentiate(x, y, order=1):
"""Numerically calculate the derivative of an array.
Arguments:
x {array} -- x-axis array.
y {array} -- The array whose derivative is to be calculated.
Keyword Arguments:
order {int} -- Order of derivative to compute. (default: {1})
"""
for i in range(order):
y = np.gradient(y, x, edge_order=2)
return y
# def poly(data, deg=2, max_it=100, tol=0.001):
# """Baseline estimation using an n-th order polynomial.
# This is a wrapper around peakutils.baseline.baseline. Original function can
# be found here:
# https://peakutils.readthedocs.io/en/latest/reference.html#module-peakutils.baseline
# Arguments:
# data {numpy.ndarray} -- Data for which the baseline is to be estimated
# using n-th order polynomial fitting.
# Keyword Arguments:
# deg {int} -- The degree of the polynomial. (default: {2})
# max_iter {int} -- Maximum number of iterations for the polynomial
# fitting to converge. (default: {100})
# tol {float} -- Tolerance to use when comparing the difference between
# the current fit coefficients and the ones from the last iteration. The
# iteration procedure will stop when the difference between them is lower
# than tol. (default: {0.001})
# Returns:
# numpy.ndarray -- Polynomial baseline estimation.
# """
# return baseline(data, deg=deg, max_it=max_it, tol=tol)
def snip(data, iterations, increasing=False):
"""SNIP implementation for 1-D data based on the M. Morháč algorithm [1].
[1] Morháč M, Kliman J, Matoušek V, Veselský M, Turzo I. Background
elimination methods for multidimensional coincidence γ-ray spectra. Nuclear
Instruments and Methods in Physics Research Section A: Accelerators,
Spectrometers, Detectors and Associated Equipment. 1997 Dec 11;401(1):113-
32.
Arguments:
data {numpy.ndarray or pd.core.series.Series} -- Data for which the
background is to be estimated using the SNIP algorithm.
iterations {int} -- Number of iterations for the SNIP algorithm.
Keyword Arguments:
increasing {bool} -- Implementation of the SNIP algorithm using
increasing or decreasing iteration window. (default: {False})
Returns:
numpy.ndarray -- SNIP-calculated background.
"""
# check value of iterations
if isinstance(iterations, int) is False or iterations < 0:
raise ValueError(
'The number of iterations must be a positive integer (int).')
N = len(data)
w = np.empty(N) # working vector
v = data.copy() # use copy of data so the original remain intact
# if data is a pandas series convert them to numpy array
if isinstance(data, pd.core.series.Series):
v = v.values
# snip for increasing iteration window
def snip_increasing(data, iterations):
p = 1
while p <= iterations:
i = p
while i < N - p:
w[i] = min(v[i], (v[i - p] + v[i + p]) / 2)
i += 1
j = p
while j < N - p:
v[j] = w[j]
j += 1
p += 1
return v
# snip for decreasing iteration window
def snip_decreasing(data, iterations):
p = iterations
while p > 0:
i = p
while i < N - p:
w[i] = min(v[i], (v[i - p] + v[i + p]) / 2)
i += 1
j = p
while j < N - p:
v[j] = w[j]
j += 1
p -= 1
return v
if increasing:
return snip_increasing(data, iterations)
else:
return snip_decreasing(data, iterations)
def get_index(x, value, closest=True):
"""Get the index of an array that corresponds to a given value.
If closest is true, get the index of the value closest to the
value entered.
"""
if closest:
index = np.abs(np.array(x) - value).argsort()[0]
else:
index = list(x).index(value)
return index
def interpolate(x1, y1, x2, kind='cubic'):
"""Interpolate an array x1, y1 with an array x2.
Return a tuple of the x1_new, y1 arrays.
"""
# start_value = max(x1[0], x2[0])
# stop_value = min(x1[-1], x2[-1])
# x1_start_index = get_index(x1, start_value, closest=True)
# x1_start_value = x1[x1_start_index]
# x1_stop_index = get_index(x1, stop_value, closest=True)
# x1_stop_value = x1[x1_stop_index]
# x2_start_index = get_index(x2, start_value, closest=True)
# x2_start_value = x2[x2_start_index]
# x2_stop_index = get_index(x2, stop_value, closest=True)
# x2_stop_value = x2[x2_stop_index]
# # interpolation range needs to be smaller than x1 range
# if x1_start_value > x2_start_value:
# x2_start_index = x2_start_index + 1
# x2_start_value = x2[x2_start_index]
# if x1_stop_value < x2_stop_value:
# x2_stop_index = x2_stop_index - 1
# x2_stop_value = x2[x2_stop_index]
f = interp1d(
x1,
y1,
kind=kind
)
# x1_new = x2[x2_start_index:x2_stop_index]
x1_new = interpolation_intersection(x1, x2)
return f(x1_new)
def interpolation_intersection(x1, x2):
"""Intersect two arrays, x1 and x2, and return the x2 intersection for
interpolation, i.e. x2 upper and lower values must lie within x1.
"""
start_value = max(x1[0], x2[0])
stop_value = min(x1[-1], x2[-1])
x1_start_index = get_index(x1, start_value, closest=True)
x1_start_value = x1[x1_start_index]
x1_stop_index = get_index(x1, stop_value, closest=True)
x1_stop_value = x1[x1_stop_index]
x2_start_index = get_index(x2, start_value, closest=True)
x2_start_value = x2[x2_start_index]
x2_stop_index = get_index(x2, stop_value, closest=True)
x2_stop_value = x2[x2_stop_index]
# interpolation range needs to be smaller than x1 range
if x1_start_value > x2_start_value:
x2_start_index = x2_start_index + 1
x2_start_value = x2[x2_start_index]
if x1_stop_value < x2_stop_value:
x2_stop_index = x2_stop_index - 1
x2_stop_value = x2[x2_stop_index]
return x2[x2_start_index:x2_stop_index + 1]
def norm_peak(y, x, peak, closest=True):
"""Normalize a y-array to the value of a peak given its x-array value.
If 'peak' is an integer, the y-array is normalized to the
value of y that corresponds to this x-array value.
If 'peak' is a list or tuple that contains two values, the y-array is
normalized to the maximum value of y between these x-array values.
"""
# check if the x-array is sorted
if not hlp.is_sorted(x, sort_order='both'):
raise ValueError("Array 'x' is not sorted.")
# check if y and x are of same length
if len(x) != len(y):
raise ValueError("Arrays 'x' and 'y' have different lengths.")
if isinstance(peak, (list, tuple)) and len(peak) != 2:
raise ValueError(
"'peak' can either be an int/float or a 2-elements list/tuple.")
elif isinstance(peak, (list, tuple)) and len(peak) == 2:
start_index = get_index(x, peak[0], closest=closest)
stop_index = get_index(x, peak[1], closest=closest)
# swap indices if start_index > stop_index
if start_index > stop_index:
start_index, stop_index = stop_index, start_index
value = max(y[start_index:stop_index + 1])
elif isinstance(peak, (int, float)):
peak_index = get_index(x, peak, closest=closest)
value = y[peak_index]
return y / value
def norm_area(y, x, x_range, closest=True):
"""Normalize an array y to the value of the integral between the specified
range of the x array.
"""
# check the sort order of x and make sure that the calculated integral
# will have the correct sign for x either ascending and descending
# (result of integrate.simps() has the opposite sign)
if hlp.is_sorted(x, sort_order='ascending'):
sort = 1
elif hlp.is_sorted(x, sort_order='descending'):
sort = -1
else:
raise ValueError("Array 'x' is not sorted.")
# check if y and x are of same length
if len(x) != len(y):
raise ValueError("Arrays 'x' and 'y' have different lengths.")
if isinstance(x_range, (list, tuple)) and len(x_range) != 2:
raise ValueError(
"'x_range' can either be an integer or a 2-elements list/tuple.")
elif isinstance(x_range, (list, tuple)) and len(x_range) == 2:
start_index = get_index(x, x_range[0], closest=closest)
stop_index = get_index(x, x_range[1], closest=closest)
# swap indices if start_index > stop_index
if start_index > stop_index:
start_index, stop_index = stop_index, start_index
area = integrate.simps(y[start_index:stop_index + 1],
x[start_index:stop_index + 1]) * sort
return y / area
| 35.329073 | 97 | 0.626063 | 1,573 | 11,058 | 4.274634 | 0.181818 | 0.043129 | 0.025134 | 0.025431 | 0.372992 | 0.333284 | 0.304581 | 0.268293 | 0.242415 | 0.242415 | 0 | 0.024068 | 0.28233 | 11,058 | 312 | 98 | 35.442308 | 0.823211 | 0.498915 | 0 | 0.311475 | 0 | 0 | 0.0705 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081967 | false | 0 | 0.04918 | 0 | 0.221311 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a749bb4246b2a3f4e855723c2af94f2018e36ca3 | 8,002 | py | Python | python/ml4ir/base/data/ranklib_helper.py | ducouloa/ml4ir | 75aeecaff11682a7bd71c5521e59c449c43c3f9f | [
"Apache-2.0"
] | 70 | 2020-02-05T00:42:29.000Z | 2022-03-07T09:33:01.000Z | python/ml4ir/base/data/ranklib_helper.py | ducouloa/ml4ir | 75aeecaff11682a7bd71c5521e59c449c43c3f9f | [
"Apache-2.0"
] | 102 | 2020-01-31T21:12:55.000Z | 2022-03-28T17:04:43.000Z | python/ml4ir/base/data/ranklib_helper.py | ducouloa/ml4ir | 75aeecaff11682a7bd71c5521e59c449c43c3f9f | [
"Apache-2.0"
] | 23 | 2020-02-05T00:43:07.000Z | 2022-02-13T13:33:51.000Z | import ast
import argparse
import pandas as pd
import numpy as np
max_f_id = 0
def process_line(line, keep_additional_info, query_id_name, relevance_name):
"""Takes an input line in ranklib format and returns a row in ml4ir format.
Parameters
----------
line : str
a line from ranklib format data
keep_additional_info : bool
Option to keep additional info (All info after the "#") True to keep, False to ignore.
query_id_name : str
The name of the query id column.
relevance_name : str
The name of the relevance column (the target label).
Returns
-------
dictionary <column:value>
Keys are the column names values are the parsed values.
"""
if keep_additional_info:
feature_values = line.replace('#', '').replace(' = ', ':').strip().split()
else:
feature_values = line.split('#')[0].strip().split()
feature_values[0] = relevance_name + ':' + feature_values[0]
r = {}
for fv in feature_values:
feat = fv.split(':')[0].strip()
if feat == query_id_name:
val = fv.split(':')[1].strip()
r[feat] = 'Q'+str(val)
elif feat == relevance_name:
val = fv.split(':')[1].strip()
r[feat] = float(val)
else:
try:
val = float(fv.split(':')[1].strip())
except:
val = fv.split(':')[1].strip()
r['f_' + feat] = val
global max_f_id
if int(feat) > max_f_id:
max_f_id = int(feat)
return r
def convert(input_file, keep_additional_info, gl_2_clicks,
non_zero_features_only, query_id_name, relevance_name,
add_dummy_rank_column = True):
"""Convert the input file with the specified parameters into ml4ir format. returns a dataframe
Parameters
----------
input_file : str
ranklib input file path
keep_additional_info : bool
Option to keep additional info (All info after the "#") True to keep, False to ignore.
gl_2_clicks : int
Convert graded relevance to clicks (only max relevant document is considered clicked) 1 to convert
query_id_name : str
The name of the query id column.
relevance_name : str
The name of the relevance column (the target label).
add_dummy_rank_column : bool
ml4ir expects pre-rankings. This would add a dummy pre-rankings.
Returns
-------
Dataframe
converted ml4ir dataframe
"""
f = open(input_file, 'r')
rows = []
for line in f:
rows.append(process_line(line, keep_additional_info, query_id_name, relevance_name))
f.close()
if non_zero_features_only:
columns = [query_id_name, relevance_name] + ['f_' + str(i) for i in range(max_f_id)]
df = pd.DataFrame(rows, columns=columns)
df.replace(np.nan, 0, inplace=True)
else:
df = pd.DataFrame(rows)
if int(gl_2_clicks) == 1:
groups = df.groupby(query_id_name)
for gname, group in groups:
df.loc[df[query_id_name] == gname, relevance_name] = (
df.loc[df[query_id_name] == gname].relevance == max(group.relevance)).astype(int)
# NOTE: ml4ir expects a pre-ranking. Adding a dummy pre-ranking to match format.
if add_dummy_rank_column:
df['rank'] = 1
return df
def ranklib_to_csv(input_file, output_file, keep_additional_info, gl_2_clicks, non_zero_features_only, query_id_name, relevance_name, add_dummy_rank_column = False):
"""Convert the input file with the specified parameters into ml4ir format writes the converted file to a csv
Parameters
----------
input_file : str
ranklib input file path
output_file : str
output converted file path
keep_additional_info : bool
Option to keep additional info (All info after `#`) True to keep, False to ignore.
gl_2_clicks : int
Convert graded relevance to clicks (only max relevant document is considered clicked) 1 to convert
query_id_name : str
The name of the query id column.
relevance_name : str
The name of the relevance column (the target label).
add_dummy_rank_column : bool
ml4ir expects pre-rankings. This would add a dummy pre-rankings.
"""
df = convert(input_file, keep_additional_info, gl_2_clicks, non_zero_features_only, query_id_name, relevance_name, add_dummy_rank_column)
df.to_csv(output_file)
def ranklib_directory_to_csvs(input_dir, keep_additional_info, gl_2_clicks, non_zero_features_only, query_id_name, relevance_name, add_dummy_rank_column = False):
"""Convert all files in the given directory with the specified parameters into ml4ir format writes the converted file to a csv
Parameters
----------
input_dir : str
ranklib input directory path. All files within the directory will be converted.
keep_additional_info : bool
Option to keep additional info (All info after `#`) True to keep, False to ignore.
gl_2_clicks : int
Convert graded relevance to clicks (only max relevant document is considered clicked) 1 to convert
query_id_name : str
The name of the query id column.
relevance_name : str
The name of the relevance column (the target label).
add_dummy_rank_column : bool
ml4ir expects pre-rankings. This would add a dummy pre-rankings.
"""
from os import listdir
from os.path import isfile, join
onlyfiles = [f for f in listdir(input_dir) if isfile(join(input_dir, f))]
for f in onlyfiles[1:]:
ranklib_to_csv(join(input_dir, f), join(input_dir, f)+'_ml4ir.csv', keep_additional_info, gl_2_clicks, non_zero_features_only, query_id_name, relevance_name, add_dummy_rank_column)
if __name__ == "__main__":
# parsing arguments
parser = argparse.ArgumentParser()
parser.add_argument('--input_file', type=str, default='ml4ir/applications/ranking/tests/data/train/sample.txt', help='ranklib input file path')
parser.add_argument('--input_dir', type=str, default='ml4ir/applications/ranking/tests/data/test',
help='ranklib input directory path. All files within the directory will be converted.')
parser.add_argument('--output_file', type=str, default='ml4ir/applications/ranking/tests/data/train/sample_ml4ir.csv', help='output converted file path')
parser.add_argument('--keep_additional_info', type=ast.literal_eval, default=True,
help='Option to keep additional info (All info after the "#") True to keep, False to ignore')
parser.add_argument('--gl_2_clicks', type=int, default=1,
help='Convert graded relevance to clicks (only max relevant document is considered clicked) 1 to convert')
parser.add_argument('--non_zero_features_only', type=int, default=True,
help='Only non zero features are stored. True for yes, False otherwise')
parser.add_argument('--query_id_name', type=str, default='qid',
help='The name of the query id column.')
parser.add_argument('--relevance_name', type=str, default='relevance',
help='The name of the relevance column.')
args = parser.parse_args()
print("Converting file...")
ranklib_to_csv(args.input_file, args.output_file, args.keep_additional_info, args.gl_2_clicks,
args.non_zero_features_only, args.query_id_name, args.relevance_name)
print('Conversion is completed')
| 45.725714 | 188 | 0.628343 | 1,058 | 8,002 | 4.541588 | 0.15879 | 0.033507 | 0.071176 | 0.024974 | 0.602497 | 0.585848 | 0.57128 | 0.566077 | 0.519875 | 0.519875 | 0 | 0.007483 | 0.28193 | 8,002 | 174 | 189 | 45.988506 | 0.82875 | 0.336916 | 0 | 0.073171 | 0 | 0 | 0.178259 | 0.044182 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04878 | false | 0 | 0.073171 | 0 | 0.146341 | 0.02439 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a74a74978d3def77bee6f66944621f779cc2bd27 | 5,369 | py | Python | skoleintra/sbs4.py | svalgaard/fskintra | 3ccf656ef1450e541c902d4c00ea1dadcf82085c | [
"BSD-2-Clause-FreeBSD"
] | 9 | 2015-08-12T09:54:04.000Z | 2021-06-21T08:35:39.000Z | skoleintra/sbs4.py | svalgaard/fskintra | 3ccf656ef1450e541c902d4c00ea1dadcf82085c | [
"BSD-2-Clause-FreeBSD"
] | 29 | 2015-01-03T21:13:20.000Z | 2020-11-12T08:23:56.000Z | skoleintra/sbs4.py | svalgaard/fskintra | 3ccf656ef1450e541c902d4c00ea1dadcf82085c | [
"BSD-2-Clause-FreeBSD"
] | 11 | 2015-02-25T20:24:56.000Z | 2018-11-16T07:37:37.000Z | # -*- coding: utf-8 -*-
import bs4
import copy as _copy
import re
import sys
import time
import config
def copy(bs):
'Return a copy of bs'
return _copy.copy(bs)
def extract(bs, sel):
'Extract (delete tags incl. contents) elements matching sel'
for elm in list(bs.select(sel)):
elm.extract()
def unwrap(bs, sel):
'Unwrap (delete tags excl. contents) elements matching sel'
for elm in list(bs.select(sel)):
elm.unwrap()
def find1orFail(bs, sel, asText=False):
'Find a single tag matching sel or fail'
hits = bs.select(sel)
if len(hits) != 1:
config.log(u"'%s' var %d gange på siden (!=1)" % (sel, len(hits)), -1)
sys.exit(1)
hit = hits[0]
if asText:
hit = hit.text.strip()
return hit
def contents2html(bs):
'Return HTML inside bs as unicode text'
return u''.join(unicode(c) for c in bs.contents).strip()
def appendComment(bs, text=''):
'''Append a comment 'Tag' with the specified text'''
bs.append(bs4.Comment(text))
def appendTodayComment(bs):
'''Append a comment 'Tag' with today's date'''
appendComment(bs, time.strftime(u' I dag er %Y-%m-%d '))
def deobfuscateEmail(s):
'Deobfuscate an e-mail address. Return the address if possible o.w. None'
if len(s) % 2 or not s:
# Not with a length divible by 2
return
try:
# Check that this is a hex string
int(s, 16)
except ValueError:
# Not hex string somewhere
return
key = int(s[:2], 16)
mail = ''.join(chr(int(s[i:i+2], 16) ^ key) for i in range(2, len(s), 2))
if '@' not in mail:
return # not an e-mail
return mail
def cleanupSoup(bs):
'''Cleanup/deobfuscate the soup'''
# deobfuscate content/spans with email addresses
CLASS = '__cf_email__'
DATA = 'data-cfemail'
HREF_PREFIX = '/cdn-cgi/l/email-protection'
for tag in bs.find_all(**{'class': CLASS, DATA: re.compile('.')}):
email = deobfuscateEmail(tag[DATA])
if email:
del tag[DATA]
tag['class'].remove(CLASS)
tag.string = email
if tag.name == 'span' and tag.attrs == {}:
tag.unwrap()
if tag.name == 'a' and tag.has_attr('href') and \
tag['href'].startswith(HREF_PREFIX):
tag['href'] = 'mailto:' + email
# deobfuscate href's with email links
for tag in bs.find_all('a', href=re.compile('^%s.*' % (HREF_PREFIX))):
href = tag['href']
email = deobfuscateEmail(href[len(HREF_PREFIX):].strip('#'))
if email:
tag['href'] = 'mailto:' + email
else:
tag.unwrap()
BLOCKED = 'blocked::'
for tag in bs.find_all('a', title=re.compile('^%s.*' % BLOCKED)):
tag['title'] = tag['title'][len(BLOCKED):]
if tag.has_attr('href') and tag['title'] == tag['href']:
del tag['title']
# Remove imgs without an actual image - probably copied into ForældreIntra
# from e.g., Outlook.
rec = re.compile('^(%s).*' % '|'.join(['cid']))
for img in bs.find_all('img', src=rec):
img.extract()
# Clean up "Word-like" style attributes
for tag in bs.find_all():
if not tag.has_attr('style'):
continue
sts = []
for st in tag['style'].split(';'):
st = st.strip()
if st.startswith('mso-'):
continue
if st:
sts.append(st)
if sts:
tag['style'] = u';'.join(sts)
else:
del tag['style']
# Remove target from links
for tag in bs.select('a'):
del tag['target']
# Remove empty class attributes
for tag in bs.find_all(**{'class': ''}):
if not tag.has_attr('class'):
continue
while '' in tag['class']:
tag['class'].remove('')
if not tag['class']:
del tag['class']
def trimSoup(bs):
'''Trim "body" of bs for whitespace including <br/>'''
for rev in [False, True]:
children = list(bs.children)
if rev:
children = reversed(children)
for c in children:
if isinstance(c, bs4.element.Tag):
if c.name == 'br':
c.extract()
continue
if isinstance(c, bs4.element.NavigableString):
text = c.string
text = text.rstrip() if rev else text.lstrip()
if not text:
c.extract()
continue
c.string.replace_with(text)
break
def condenseSoup(bs):
'''Trim bs for empty divs, etc to condense the HTML put in e-mails'''
for e in bs.select('div'):
# remove empty divs
contents = u''.join(map(unicode, e.children)).strip()
if not contents:
e.extract()
trimSoup(bs)
for c in list(bs.descendants):
if isinstance(c, bs4.element.NavigableString) and \
c.previous_sibling and \
isinstance(c.previous_sibling, bs4.element.NavigableString):
text = c.previous_sibling.string + c.string
c.previous_sibling.string.replace_with(text)
c.extract()
def beautify(data):
bs = bs4.BeautifulSoup(data, 'lxml')
cleanupSoup(bs)
return bs
| 28.558511 | 78 | 0.549823 | 706 | 5,369 | 4.143059 | 0.284703 | 0.012308 | 0.01641 | 0.020513 | 0.167179 | 0.115897 | 0.076239 | 0.036239 | 0.036239 | 0.036239 | 0 | 0.007311 | 0.312162 | 5,369 | 187 | 79 | 28.71123 | 0.784728 | 0.172658 | 0 | 0.155556 | 0 | 0 | 0.121322 | 0.005757 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088889 | false | 0 | 0.044444 | 0 | 0.185185 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a74a979a33ee7bb086190c14c1858fa7da34c4aa | 3,885 | py | Python | pyaiot/common/messaging.py | aabadie/pyaiot | fed441ec02c0b67b22b7ba2b06ebe28e0f8dcf77 | [
"BSD-3-Clause"
] | null | null | null | pyaiot/common/messaging.py | aabadie/pyaiot | fed441ec02c0b67b22b7ba2b06ebe28e0f8dcf77 | [
"BSD-3-Clause"
] | null | null | null | pyaiot/common/messaging.py | aabadie/pyaiot | fed441ec02c0b67b22b7ba2b06ebe28e0f8dcf77 | [
"BSD-3-Clause"
] | 1 | 2019-12-03T19:53:46.000Z | 2019-12-03T19:53:46.000Z | # Copyright 2017 IoT-Lab Team
# Contributor(s) : see AUTHORS file
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Pyaiot messaging utility module."""
import json
import logging
logger = logging.getLogger("pyaiot.messaging")
class Message():
"""Utility class for generating and parsing service messages."""
@staticmethod
def serialize(message):
return json.dumps(message, ensure_ascii=False)
@staticmethod
def new_node(uid, dst="all"):
"""Generate a text message indicating a new node."""
return Message.serialize({'type': 'new', 'uid': uid, 'dst': dst})
@staticmethod
def out_node(uid):
"""Generate a text message indicating a node to remove."""
return Message.serialize({'type': 'out', 'uid': uid})
@staticmethod
def update_node(uid, endpoint, data, dst="all"):
"""Generate a text message indicating a node update."""
return Message.serialize({'type': 'update',
'uid': uid,
'endpoint': endpoint,
'data': data,
'dst': dst})
@staticmethod
def discover_node():
"""Generate a text message for websocket node discovery."""
return Message.serialize({'request': 'discover'})
@staticmethod
def check_message(raw):
"""Verify a received message is correctly formatted."""
reason = None
try:
message = json.loads(raw)
except TypeError as e:
logger.warning(e)
reason = "Invalid message '{}'.".format(raw)
message = None
except json.JSONDecodeError:
reason = ("Invalid message received "
"'{}'. Only JSON format is supported.".format(raw))
message = None
if message is not None:
if not hasattr(message, '__iter__'):
reason = "Invalid message '{}'.".format(message)
elif 'type' not in message and 'data' not in message:
reason = "Invalid message '{}'.".format(message)
elif (message['type'] != 'new' and message['type'] != 'update' and
message['type'] != 'out'):
reason = "Invalid message type '{}'.".format(message['type'])
if reason is not None:
logger.warning(reason)
message = None
return message, reason
| 39.642857 | 79 | 0.645302 | 464 | 3,885 | 5.381466 | 0.407328 | 0.036043 | 0.040048 | 0.032038 | 0.148578 | 0.129355 | 0.09972 | 0.084101 | 0.054465 | 0.054465 | 0 | 0.002442 | 0.262291 | 3,885 | 97 | 80 | 40.051546 | 0.868807 | 0.471557 | 0 | 0.229167 | 0 | 0 | 0.141709 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.041667 | 0.020833 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a74ab707fc143a606f42fe71ad454fbd6edc6d46 | 581 | py | Python | tests/Action/test_Breaking.py | aalireza/arep | 95f0ec6282c4f5d12462d2a64e82d6777f51bf06 | [
"BSD-3-Clause"
] | 1 | 2022-01-14T00:15:26.000Z | 2022-01-14T00:15:26.000Z | tests/Action/test_Breaking.py | aalireza/arep | 95f0ec6282c4f5d12462d2a64e82d6777f51bf06 | [
"BSD-3-Clause"
] | null | null | null | tests/Action/test_Breaking.py | aalireza/arep | 95f0ec6282c4f5d12462d2a64e82d6777f51bf06 | [
"BSD-3-Clause"
] | null | null | null | from ..utils import action, results_formatter
from functools import partial
import arep
import pytest
import os
results_formatter = partial(results_formatter, name=os.path.basename(__file__))
all_results = results_formatter({
(2, 4), (6, 8), (15, 12)
})
@pytest.fixture
def grepper():
engine = arep.Grepper(os.path.abspath('tests/data/Action/Breaking.py'))
return engine
def test_Breaking(grepper, action):
action.reset()
action.Breaking.consideration = True
grepper.constraint_list.append(action)
assert set(grepper.all_results()) == all_results
| 23.24 | 79 | 0.740103 | 76 | 581 | 5.486842 | 0.539474 | 0.153477 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016064 | 0.142857 | 581 | 24 | 80 | 24.208333 | 0.821285 | 0 | 0 | 0 | 0 | 0 | 0.049914 | 0.049914 | 0 | 0 | 0 | 0 | 0.055556 | 1 | 0.111111 | false | 0 | 0.277778 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a74bed80be92f08fc81568fdb71303c6cd96cf73 | 4,432 | py | Python | scripts/study_case/ID_59/code_06.py | kzbnb/numerical_bugs | bc22e72bcc06df6ce7889a25e0aeed027bde910b | [
"Apache-2.0"
] | 8 | 2021-06-30T06:55:14.000Z | 2022-03-18T01:57:14.000Z | scripts/study_case/ID_59/code_06.py | kzbnb/numerical_bugs | bc22e72bcc06df6ce7889a25e0aeed027bde910b | [
"Apache-2.0"
] | 1 | 2021-06-30T03:08:15.000Z | 2021-06-30T03:08:15.000Z | scripts/study_case/ID_59/code_06.py | kzbnb/numerical_bugs | bc22e72bcc06df6ce7889a25e0aeed027bde910b | [
"Apache-2.0"
] | 2 | 2021-11-17T11:19:48.000Z | 2021-11-18T03:05:58.000Z | import numpy as np
import tensorflow as tf
import sys
sys.path.append("/data")
class dateset():
def __init__(self, images, labels):
self.num_examples = len(images) # 样本数量
self.images = np.reshape(images / 255., [-1, 28 * 28]) # 图片归一化加扁平化
self.labels = np.eye(10)[labels] # 标签 one-hot 化
def next_batch(self, batch_size): # 随机抓一批图片和标签
batch_index = np.random.choice(self.num_examples, batch_size)
return self.images[batch_index], self.labels[batch_index]
class mnist():
def __init__(self):
# 导入mnist手写数据,x shape: (?,28,28); y shape: (?); x value: 0~255; y value: 0~9
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
self.train = dateset(x_train, y_train)
self.test = dateset(x_test, y_test)
# 导入手写数据集
mnist = mnist()
# 定义神经网络
class network():
def __init__(self):
self.learning_rate = 0.01
self.x = tf.placeholder(tf.float32, [None, 784], name='x')
self.y = tf.placeholder(tf.float32, [None, 10], name='y')
self.w = tf.Variable(tf.random_uniform([784, 10], -1, 1), name="weights")
self.b = tf.Variable(tf.zeros([10]), name="bias")
self.full_connect_layer = tf.add(tf.matmul(self.x, self.w), self.b)
self.pred = tf.nn.softmax(self.full_connect_layer, name='y_pred')
# 获得正确率
def get_accuracy(self):
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.pred, 1), tf.argmax(self.y, 1)), tf.float32))
return accuracy
# 自己算梯度更新
def get_loss1(self):
# 通过设置log前的最小值不让归0,防止出现 log(0) 未定义
tf.clip_by_value(self.pred, 1e-15, 1.0)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(self.y * tf.log(self.pred), reduction_indices=1))
w_grad = - tf.matmul(tf.transpose(self.x), self.y - self.pred)
b_grad = - tf.reduce_mean(tf.matmul(tf.transpose(self.x), self.y - self.pred), reduction_indices=0)
new_w = self.w.assign(self.w - self.learning_rate * w_grad)
new_b = self.b.assign(self.b - self.learning_rate * b_grad)
optimizer = [new_w, new_b]
return cross_entropy, optimizer
# tf算梯度更新
def get_loss2(self):
# 通过设置log前的最小值不让归0,防止出现 log(0) 未定义
tf.clip_by_value(self.pred, 1e-15, 1.0)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(self.y * tf.log(self.pred), reduction_indices=1))
w_grad, b_grad = tf.gradients(cross_entropy, [self.w, self.b])
new_w = self.w.assign(self.w - self.learning_rate * w_grad)
new_b = self.b.assign(self.b - self.learning_rate * b_grad)
optimizer = [new_w, new_b]
return cross_entropy, optimizer
# tf随机梯度下降
def get_loss3(self):
# 通过设置log前的最小值不让归0,防止出现 log(0) 未定义
tf.clip_by_value(self.pred, 1e-15, 1.0)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(self.y * tf.log(self.pred), reduction_indices=1))
optimizer = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(cross_entropy)
return cross_entropy, optimizer
# tf动量梯度下降
def get_loss4(self):
# 通过设置log前的最小值不让归0,防止出现 log(0) 未定义
tf.clip_by_value(self.pred, 1e-15, 1.0)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(self.y * tf.log(self.pred), reduction_indices=1))
optimizer = tf.train.MomentumOptimizer(self.learning_rate, 0.9).minimize(cross_entropy)
return cross_entropy, optimizer
def main():
net = network()
cross_entropy, optimizer = net.get_loss1()
batch_size = 100
accuracy = net.get_accuracy()
'''inserted code'''
from scripts.utils.tf_utils import TensorFlowScheduler
scheduler = TensorFlowScheduler(name="tensorflow_book.code_06")
'''inserted code'''
with tf.Session() as sess:
tf.train.write_graph(sess.graph_def, '/data/scripts/study_case/pbtxt_files', 'tensorflow_book.pbtxt')
sess.run(tf.global_variables_initializer())
while True:
total_batch = int(mnist.train.num_examples / batch_size)
for step in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
_, loss = sess.run([optimizer, cross_entropy], feed_dict={net.x: batch_xs, net.y: batch_ys})
'''inserted code'''
scheduler.loss_checker(loss)
scheduler.check_time()
'''inserted code'''
if __name__ == '__main__':
main()
| 37.243697 | 111 | 0.643502 | 636 | 4,432 | 4.273585 | 0.25 | 0.057395 | 0.041207 | 0.030905 | 0.377483 | 0.358352 | 0.358352 | 0.323767 | 0.323767 | 0.323767 | 0 | 0.02611 | 0.222247 | 4,432 | 118 | 112 | 37.559322 | 0.762402 | 0.06769 | 0 | 0.263158 | 0 | 0 | 0.02773 | 0.019807 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131579 | false | 0 | 0.052632 | 0 | 0.302632 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a74dae23523c5e0468e1ca9c415b2ed591e24e5f | 777 | py | Python | source/loss/SyNPairsLoss.py | celsofranssa/E2ECodeSearch | 8f11029fbcca968885658a7e152e7edd8200b6fe | [
"MIT"
] | null | null | null | source/loss/SyNPairsLoss.py | celsofranssa/E2ECodeSearch | 8f11029fbcca968885658a7e152e7edd8200b6fe | [
"MIT"
] | null | null | null | source/loss/SyNPairsLoss.py | celsofranssa/E2ECodeSearch | 8f11029fbcca968885658a7e152e7edd8200b6fe | [
"MIT"
] | null | null | null | import torch
from torch import nn
class SyNPairsLoss(nn.Module):
def __init__(self, name):
super(SyNPairsLoss, self).__init__()
self.name = name
def forward(self, r1, r2):
"""
Computes the N-Pairs Loss between the r1 and r2 representations.
:param r1: Tensor of shape (batch_size, representation_size)
:param r2: Tensor of shape (batch_size, representation_size)
:return: he scalar loss
"""
scores = torch.matmul(r1, r2.t())
diagonal_mean = torch.mean(torch.diag(scores))
r_lse = torch.mean(torch.logsumexp(scores, dim=1))
c_lse = torch.mean(torch.logsumexp(scores, dim=0))
return 1/2 * (r_lse - diagonal_mean) +\
1/2 * (c_lse - diagonal_mean)
| 29.884615 | 72 | 0.619048 | 103 | 777 | 4.485437 | 0.456311 | 0.077922 | 0.090909 | 0.077922 | 0.324675 | 0.324675 | 0.324675 | 0 | 0 | 0 | 0 | 0.024691 | 0.27027 | 777 | 25 | 73 | 31.08 | 0.790123 | 0.27027 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.153846 | 0 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7515490eb2bb01c38c37ef8f248b97e75d3fd1a | 19,181 | py | Python | resources/aws_api_gateway.py | luk-kop/verus-stake-notification | b93f06f7f30b26bce48cdf87464419a9cbe3d10f | [
"MIT"
] | null | null | null | resources/aws_api_gateway.py | luk-kop/verus-stake-notification | b93f06f7f30b26bce48cdf87464419a9cbe3d10f | [
"MIT"
] | null | null | null | resources/aws_api_gateway.py | luk-kop/verus-stake-notification | b93f06f7f30b26bce48cdf87464419a9cbe3d10f | [
"MIT"
] | null | null | null | import boto3
from typing import List, Union
from dataclasses import dataclass
from resources.aws_policy_document import PolicyStatement, PolicyDocumentCustom
from resources.aws_cognito import CognitoUserPool, CognitoResources
class ApiGateway:
"""
Class represents API Gateway resource. If a API Gateway with the specified name already exists, it is used.
The API Gateway is publicly accessible and invokes Lambda function.
"""
def __init__(self, name: str, lambda_arn: str):
self.name = name
self.lambda_arn = lambda_arn
self.api_endpoint = 'stake'
self._api_client = boto3.client('apigateway')
self._account_id = boto3.client('sts').get_caller_identity()['Account']
def create_resource(self) -> None:
"""
Creates Cognito user pool resource in AWS cloud.
"""
if not self._check_exist():
resource_policy = self.create_policy()
self._api_client.create_rest_api(
name=self.name,
description='Invoke Lambda function to publish a msg to SNS topic when new stake appears in Verus wallet.',
apiKeySource='HEADER',
endpointConfiguration={
'types': ['REGIONAL'],
},
policy=resource_policy,
tags={
'Project': 'verus-notification'
},
)
print(f'The API Gateway "{self.name}" created.')
return
print(f'The API Gateway "{self.name}" exists. Using it.')
@property
def id(self):
"""
Returns API Gateway id.
"""
for api in self._api_gateways:
if api['name'] == self.name:
return api['id']
return ''
def get_url(self, stage_name: str):
"""
Returns API Gateway URL.
"""
if self._check_stage_exist(name=stage_name):
for api in self._api_gateways:
if api['name'] == self.name:
return f'https://{self.id}.execute-api.{self._api_client.meta.region_name}.' \
f'amazonaws.com/{stage_name}/{self.api_endpoint}'
print(f'Stage name {stage_name} does not exist')
return ''
def _check_stage_exist(self, name):
"""
Checks if stage with specified name already exist.
"""
try:
self._api_client.get_stage(restApiId=self.id,
stageName=name)
return True
except self._api_client.exceptions.NotFoundException:
return False
@property
def arn(self):
"""
Returns API Gateway ARN.
"""
for api in self._api_gateways:
if api['name'] == self.name:
return f'arn:aws:execute-api:{self._api_client.meta.region_name}:' \
f'{self._account_id}:{self.id}/*/GET/{self.api_endpoint}'
return ''
def _check_exist(self):
"""
Checks if API Gateway resource with specified name already exist.
"""
return True if self.id else False
@property
def _api_gateways(self):
"""
Returns list of already created API Gateways.
"""
return self._api_client.get_rest_apis()['items']
@property
def authorizers(self) -> list:
"""
Returns list of already created API Gateway Authorizers.
"""
try:
return self._api_client.get_authorizers(limit=50,
restApiId=self.id)['items']
except self._api_client.exceptions.NotFoundException:
return []
@property
def root_resource_id(self) -> str:
"""
Returns parent id (root resource - path '/').
"""
resources = self._api_client.get_resources(restApiId=self.id)
resource_items = resources['items']
for item in resource_items:
if item['path'] == '/':
# return root resource id
return item['id']
def create_policy(self):
"""
Creates resource-based policy for API Gateway endpoint
"""
policy = PolicyDocumentCustom()
policy_statement = PolicyStatement(effect='Allow',
actions='execute-api:Invoke',
resources='execute-api:/*',
principals='*')
policy_statement.add_condition(condition_operator='IpAddress',
condition_key='aws:SourceIp',
condition_value=['0.0.0.0/0'])
policy.add_statement(policy_statement)
return policy.get_json()
def delete_resource(self):
"""
Deletes API Gateway with all associated API Gateway resources.
"""
if self._check_exist():
self._api_client.delete_rest_api(restApiId=self.id)
print(f'The API Gateway {self.name} has been deleted')
return
print(f'The API Gateway "{self.name}" does not exist')
def deploy(self, stage_name: str) -> None:
"""
Creates API Gateway deployment.
"""
if not self._check_stage_exist(name=stage_name):
self._api_client.create_deployment(restApiId=self.id,
stageName=stage_name)
return
print(f'Stage name "{stage_name}" already exists')
class ApiGatewayAuthorizer:
"""
Class represents API Gateway Authorizer resource.
"""
def __init__(self, name: str, api_id: str, providers: List[CognitoUserPool], auth_type: str) -> None:
self.api_id = api_id
self.name = name
self.providers = providers
self.auth_type = auth_type
self._auth_client = boto3.client('apigateway')
def create_resource(self) -> None:
"""
Creates API Gateway Authorizer resource in AWS cloud.
"""
if not self._check_exist():
self._auth_client.create_authorizer(
restApiId=self.api_id,
name=self.name,
type=self.auth_type,
providerARNs=[provider.arn for provider in self.providers],
identitySource='method.request.header.Authorization',
)
print(f'The API Gateway Authorizer "{self.name}" created')
return
print(f'The API Gateway Authorizer "{self.name}" exists. Using it.')
@property
def auth_type(self) -> str:
"""
Returns auth_type attribute
"""
return self._auth_type
@auth_type.setter
def auth_type(self, new_type) -> str:
"""
Sets auth_type attribute and makes simple input validation.
"""
allowed_types = ['TOKEN', 'REQUEST', 'COGNITO_USER_POOLS']
if new_type not in allowed_types:
raise AttributeError(f'Wrong auth_type value. The allowed auth_type values: {", ".join(allowed_types)}')
self._auth_type = new_type
@property
def id(self) -> str:
"""
Returns API Gateway Authorizer id.
"""
for auth in self._authorizers:
if auth['name'] == self.name:
return auth['id']
return ''
@property
def _authorizers(self) -> list:
"""
Returns list of already created API Gateway Authorizers.
"""
try:
return self._auth_client.get_authorizers(limit=50,
restApiId=self.api_id)['items']
except self._auth_client.exceptions.NotFoundException:
return []
def _check_exist(self) -> bool:
"""
Checks if API Gateway Authorizer resource with specified name already exist.
Assign 'id' attribute if user pool client exist.
"""
result = True if self.id else False
return result
def delete_resource(self) -> None:
"""
Deletes API Gateway Authorizer in AWS cloud.
"""
if self._check_exist():
try:
self._auth_client.delete_authorizer(restApiId=self.api_id,
authorizerId=self.id)
print(f'The API Gateway Authorizer "{self.name}" has been deleted')
except self._auth_client.exceptions.ConflictException as err:
print(err.response['Error']['Message'])
return
print(f'The API Gateway Authorizer "{self.name}" does not exist')
@dataclass
class ApiMethod:
"""
Class represents API Gateway HTTP method.
"""
http_method: str
api_id: str
resource_id: str
authorizer: Union[None, ApiGatewayAuthorizer] = None
@property
def data(self) -> dict:
"""
Returns properly prepared HTTP method statement for boto3 usage.
"""
method_data = {
'restApiId': self.api_id,
'resourceId': self.resource_id,
'httpMethod': self.http_method
}
if self.authorizer:
method_data['authorizationType'] = self.authorizer.auth_type
method_data['authorizerId'] = self.authorizer.id
# TODO: change below!!!
resource_srv = self.authorizer.providers[0].resource_servers[0]
resource_srv_scopes = resource_srv['Scopes']
resource_srv_identifier = resource_srv['Identifier']
method_data['authorizationScopes'] = [f'{resource_srv_identifier}/{scope["ScopeName"]}'for scope in resource_srv_scopes]
else:
method_data['authorizationType'] = 'NONE'
return method_data
class ApiGatewayResource:
"""
Class represents API Gateway Resource resource.
"""
def __init__(self, api_id: str, parent_id: str, path_part: str) -> None:
self.api_id = api_id
self.parent_id = parent_id
self.path_part = path_part
self._api_resource_client = boto3.client('apigateway')
def create_resource(self) -> None:
"""
Creates API Gateway Resource resource in AWS cloud.
"""
if not self._check_exist():
self._api_resource_client.create_resource(
restApiId=self.api_id,
parentId=self.parent_id,
pathPart=self.path_part
)
print(f'The API Gateway resource "{self.path_part}" created')
return
print(f'The API Gateway resource "{self.path_part}" exists. Using it.')
def _check_exist(self) -> bool:
"""
Checks if API Gateway resource with specified path already exist.
"""
return True if self.id else False
@property
def _api_resources(self) -> list:
"""
Returns list of already created API Gateway resources.
"""
return self._api_resource_client.get_resources(restApiId=self.api_id,
limit=60)['items']
@property
def id(self) -> str:
"""
Returns user pool id.
"""
for resource in self._api_resources:
path_part = resource.get('pathPart')
if path_part == self.path_part:
return resource['id']
return ''
@property
def full_path(self) -> str:
"""
Returns full path for API Gateway resource.
"""
if self._check_exist():
resource = self._api_resource_client.client.get_resource(
restApiId=self.api_id,
resourceId=self.id,
)
return resource['path']
return ''
def put_method(self, api_method: ApiMethod) -> None:
"""
Adds a method to existing resource.
"""
try:
self._api_resource_client.put_method(**api_method.data)
except self._api_resource_client.exceptions.ConflictException:
print('Method already exists for this resource')
def put_integration(self, api_method, lambda_arn, integration_type: str = 'AWS') -> None:
"""
Sets up a method' integration.
"""
# NOTE: For Lambda integrations, you must use the HTTP method of POST for the integration request
# (integrationHttpMethod) or this will not work
lambda_uri = f'arn:aws:apigateway:{self._api_resource_client.meta.region_name}:' \
f'lambda:path/2015-03-31/functions/{lambda_arn}/invocations'
self._api_resource_client.put_integration(restApiId=self.api_id,
resourceId=self.id,
httpMethod=api_method.http_method,
type=integration_type,
integrationHttpMethod='POST',
uri=lambda_uri,
connectionType='INTERNET'
)
def put_method_response(self, api_method: ApiMethod) -> None:
"""
Adds a method response to an existing existing method resource.
"""
api_method = api_method.data
# Remove unnecessary keys
allowed_keys = ['restApiId', 'resourceId', 'httpMethod']
method_response = {key: value for (key, value) in api_method.items() if key in allowed_keys}
method_response['statusCode'] = '200'
# Put method response
try:
self._api_resource_client.put_method_response(**method_response)
except self._api_resource_client.exceptions.ConflictException:
print('Response already exists for this resource')
def put_integration_response(self, api_method: ApiMethod) -> None:
"""
Sets up a method' integration response.
"""
self._api_resource_client.put_integration_response(restApiId=self.api_id,
resourceId=self.id,
httpMethod=api_method.data['httpMethod'],
statusCode='200',
selectionPattern='',
contentHandling='CONVERT_TO_TEXT')
def delete_resource(self) -> None:
"""
Deletes API Gateway resource in AWS cloud.
"""
if self._check_exist():
try:
self._api_resource_client.delete_resource(restApiId=self.api_id,
resourceId=self.id)
print(f'The API Gateway resource "{self.path_part}" has been deleted')
except self._api_resource_client.exceptions.InvalidParameterException as err:
print(err.response['Error']['Message'])
return
print(f'The API Gateway resource "{self.path_part}" does not exist')
class ApiResources:
"""
Class represents all API Gateway related resources used in verus-notification project.
"""
def __init__(self, api_name: str, lambda_arn: str, http_methods: list, stage_name: str,
user_pool: Union[CognitoUserPool, None] = None) -> None:
self.api_name = api_name
self.lambda_arn = lambda_arn
self.user_pool = user_pool
self.http_methods = http_methods
self.stage_name = stage_name
self.authorizer = None
# API Gateway instantiation
self.api = ApiGateway(name=api_name, lambda_arn=lambda_arn)
self.api.create_resource()
# API Gateway Resource instantiation
self.api_resource = ApiGatewayResource(api_id=self.api.id,
parent_id=self.api.root_resource_id,
path_part='stake')
self.api_resource.create_resource()
if user_pool:
# API Gateway Authorizer instantiation
self.authorizer = ApiGatewayAuthorizer(name='VerusApiAuthBoto3',
api_id=self.api.id,
providers=[user_pool],
auth_type='COGNITO_USER_POOLS')
self.authorizer.create_resource()
self.add_http_methods()
# Deploy API Gateway
self.api.deploy(stage_name=stage_name)
@property
def invoke_url(self):
"""
Returns API Gateway invoke URL.
"""
return self.api.get_url(self.stage_name)
@property
def arn(self):
"""
Returns API Gateway ARN.
"""
return self.api.arn
def add_http_methods(self):
"""
Adds HTTP methods and integrations to API Gateway Resource.
"""
for method in self.http_methods:
method_get = ApiMethod(http_method=method,
api_id=self.api.id,
resource_id=self.api_resource.id,
authorizer=self.authorizer)
self.api_resource.put_method(api_method=method_get)
self.api_resource.put_integration(api_method=method_get, lambda_arn=self.lambda_arn)
self.api_resource.put_method_response(api_method=method_get)
self.api_resource.put_integration_response(api_method=method_get)
def create(self):
"""
Creates all API Gateway related resources. Method can be used to recreate API Gateway resources after deletion.
"""
self.api.create_resource()
if self.user_pool:
self.authorizer.create_resource()
self.add_http_methods()
# Deploy API Gateway
self.api.deploy(stage_name=self.stage_name)
def delete(self):
"""
Deletes all API Gateway related resources.
"""
self.api.delete_resource()
def main() -> None:
"""
Main function - example of use
"""
# Add existed Lambda ARN
lambda_arn = ''
scopes = [
{
'name': 'api-read',
'description': 'Read access to the API'
}
]
cognito_resources = CognitoResources(user_pool_name='UserPool4Tests',
resource_server_scopes=scopes,
pool_domain='verus-test-12345',
name_prefix='verus-api')
resources = ApiResources(api_name='ApiGateway4Tests',
lambda_arn=lambda_arn,
http_methods=['GET'],
stage_name='vrsc',
user_pool=cognito_resources.user_pool)
print(resources.invoke_url)
# Delete all resources
resources.delete()
cognito_resources.delete()
if __name__ == '__main__':
main() | 37.10058 | 132 | 0.558469 | 1,982 | 19,181 | 5.193239 | 0.134712 | 0.047605 | 0.029146 | 0.026523 | 0.41776 | 0.331293 | 0.283979 | 0.230836 | 0.164481 | 0.10716 | 0 | 0.003298 | 0.351859 | 19,181 | 517 | 133 | 37.10058 | 0.824646 | 0.134508 | 0 | 0.276074 | 0 | 0 | 0.127854 | 0.024232 | 0 | 0 | 0 | 0.001934 | 0 | 1 | 0.122699 | false | 0 | 0.015337 | 0 | 0.276074 | 0.058282 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a751b975e24f9ac015727aa1f7eab918b43dbc35 | 461 | py | Python | dla_34/loss.py | wi-ith/dla_34_classification | 877e5b1a44fc18f03c26d0d9ab5a102c98dbdcbc | [
"MIT"
] | 2 | 2020-05-12T15:58:13.000Z | 2020-06-30T10:11:18.000Z | dla_34/loss.py | wi-ith/dla_34_classification | 877e5b1a44fc18f03c26d0d9ab5a102c98dbdcbc | [
"MIT"
] | null | null | null | dla_34/loss.py | wi-ith/dla_34_classification | 877e5b1a44fc18f03c26d0d9ab5a102c98dbdcbc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@author: wi-ith
"""
import tensorflow as tf
import numpy as np
FLAGS = tf.app.flags.FLAGS
def soft_max(logits, axis=-1):
tile_depth = logits.shape[axis]
max_value = tf.tile(tf.reshape((tf.reduce_max(logits, axis=axis)), [-1, 1]), [1, tile_depth])
exp_logits = tf.exp(logits-max_value)
exp_sum = tf.tile(tf.reshape((tf.reduce_sum(exp_logits, axis=axis)), [-1, 1]), [1, tile_depth])
return exp_logits / exp_sum
| 25.611111 | 99 | 0.659436 | 77 | 461 | 3.779221 | 0.376623 | 0.027491 | 0.103093 | 0.103093 | 0.33677 | 0.33677 | 0.178694 | 0.178694 | 0 | 0 | 0 | 0.020619 | 0.158351 | 461 | 17 | 100 | 27.117647 | 0.729381 | 0.08243 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a755138d5807904004e5ce66662a602b945a90c1 | 10,526 | py | Python | arch/api/impl/based_spark/based_1x/table.py | yzjba/FATE | 9a6d252da637b2583a0f8a51f6cb4c615850bab9 | [
"Apache-2.0"
] | 32 | 2020-06-12T08:39:58.000Z | 2022-03-20T06:57:08.000Z | arch/api/impl/based_spark/based_1x/table.py | ErikSun2020/FATE | bdda535c7d8a974fc2c43102837964b7da199730 | [
"Apache-2.0"
] | 10 | 2020-11-13T18:55:48.000Z | 2022-02-10T02:00:12.000Z | arch/api/impl/based_spark/based_1x/table.py | ErikSun2020/FATE | bdda535c7d8a974fc2c43102837964b7da199730 | [
"Apache-2.0"
] | 16 | 2020-06-12T06:51:46.000Z | 2022-03-29T10:23:42.000Z | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import uuid
from typing import Iterable
from arch.api.base.table import Table
from arch.api.impl.based_spark import util
from arch.api.impl.utils.split import split_put, split_get
from arch.api.utils.profile_util import log_elapsed
class RDDTable(Table):
# noinspection PyProtectedMember
@classmethod
def from_dtable(cls, session_id: str, dtable):
namespace = dtable._namespace
name = dtable._name
partitions = dtable._partitions
return RDDTable(session_id=session_id, namespace=namespace, name=name, partitions=partitions, dtable=dtable)
@classmethod
def from_rdd(cls, rdd, job_id: str, namespace: str, name: str):
partitions = rdd.getNumPartitions()
return RDDTable(session_id=job_id, namespace=namespace, name=name, partitions=partitions, rdd=rdd)
def __init__(self, session_id: str,
namespace: str,
name: str = None,
partitions: int = 1,
rdd=None,
dtable=None):
self._valid_param_check(rdd, dtable, namespace, partitions)
setattr(self, util.RDD_ATTR_NAME, rdd)
self._rdd = rdd
self._partitions = partitions
self._dtable = dtable
self.schema = {}
self._name = name or str(uuid.uuid1())
self._namespace = namespace
self._session_id = session_id
def get_name(self):
return self._name
def get_namespace(self):
return self._namespace
def __str__(self):
return f"{self._namespace}, {self._name}, {self._dtable}"
def __repr__(self):
return f"{self._namespace}, {self._name}, {self._dtable}"
def _tmp_table_from_rdd(self, rdd, name=None):
"""
tmp table, with namespace == job_id
"""
rdd = util.materialize(rdd)
name = name or str(uuid.uuid1())
return RDDTable(session_id=self._session_id,
namespace=self._namespace,
name=name,
partitions=rdd.getNumPartitions(),
rdd=rdd,
dtable=None)
# self._rdd should not be pickled(spark requires all transformer/action to be invoked in driver).
def __getstate__(self):
state = dict(self.__dict__)
if "_rdd" in state:
del state["_rdd"]
return state
@staticmethod
def _valid_param_check(rdd, dtable, namespace, partitions):
assert (rdd is not None) or (dtable is not None), "params rdd and storage are both None"
assert namespace is not None, "namespace is None"
assert partitions > 0, "invalid partitions={0}".format(partitions)
def rdd(self):
if hasattr(self, "_rdd") and self._rdd is not None:
return self._rdd
if self._dtable is None:
raise AssertionError("try create rdd from None storage")
return self._rdd_from_dtable()
# noinspection PyProtectedMember,PyUnresolvedReferences
@log_elapsed
def _rdd_from_dtable(self):
storage_iterator = self._dtable.collect(use_serialize=True)
if self._dtable.count() <= 0:
storage_iterator = []
num_partition = self._dtable._partitions
from pyspark import SparkContext
self._rdd = SparkContext.getOrCreate() \
.parallelize(storage_iterator, num_partition) \
.persist(util.get_storage_level())
return self._rdd
def dtable(self):
"""
rdd -> storage
"""
if self._dtable:
return self._dtable
else:
if not hasattr(self, "_rdd") or self._rdd is None:
raise AssertionError("try create dtable from None")
return self._rdd_to_dtable()
# noinspection PyUnusedLocal
@log_elapsed
def _rdd_to_dtable(self, **kwargs):
self._dtable = self.save_as(name=self._name,
namespace=self._namespace,
partition=self._partitions,
persistent=False)._dtable
return self._dtable
def get_partitions(self):
return self._partitions
@log_elapsed
def map(self, func, **kwargs):
from arch.api.impl.based_spark.rdd_func import _map
rtn_rdd = _map(self.rdd(), func)
return self._tmp_table_from_rdd(rtn_rdd)
@log_elapsed
def mapValues(self, func, **kwargs):
from arch.api.impl.based_spark.rdd_func import _map_value
rtn_rdd = _map_value(self.rdd(), func)
return self._tmp_table_from_rdd(rtn_rdd)
@log_elapsed
def mapPartitions(self, func, **kwargs):
from arch.api.impl.based_spark.rdd_func import _map_partitions
rtn_rdd = _map_partitions(self.rdd(), func)
return self._tmp_table_from_rdd(rtn_rdd)
@log_elapsed
def mapPartitions2(self, func, **kwargs):
return self._tmp_table_from_rdd(self.rdd().mapPartitions())
@log_elapsed
def reduce(self, func, key_func=None, **kwargs):
if key_func is None:
return self.rdd().values().reduce(func)
return dict(self.rdd().map(lambda x: (key_func(x[0]), x[1])).reduceByKey(func).collect())
def join(self, other, func=None, **kwargs):
rdd1 = self.rdd()
rdd2 = other.rdd()
# noinspection PyUnusedLocal,PyShadowingNames
@log_elapsed
def _join(rdda, rddb, **kwargs):
from arch.api.impl.based_spark.rdd_func import _join
return self._tmp_table_from_rdd(_join(rdda, rddb, func))
return _join(rdd1, rdd2, **kwargs)
@log_elapsed
def glom(self, **kwargs):
from arch.api.impl.based_spark.rdd_func import _glom
return self._tmp_table_from_rdd(_glom(self.rdd()))
@log_elapsed
def sample(self, fraction, seed=None, **kwargs):
from arch.api.impl.based_spark.rdd_func import _sample
return self._tmp_table_from_rdd(_sample(self.rdd(), fraction, seed))
@log_elapsed
def subtractByKey(self, other, **kwargs):
from arch.api.impl.based_spark.rdd_func import _subtract_by_key
return self._tmp_table_from_rdd(_subtract_by_key(self.rdd(), other.rdd()))
@log_elapsed
def filter(self, func, **kwargs):
from arch.api.impl.based_spark.rdd_func import _filter
return self._tmp_table_from_rdd(_filter(self.rdd(), func))
@log_elapsed
def union(self, other, func=lambda v1, v2: v1, **kwargs):
from arch.api.impl.based_spark.rdd_func import _union
return self._tmp_table_from_rdd(_union(self.rdd(), other.rdd(), func))
@log_elapsed
def flatMap(self, func, **kwargs):
from arch.api.impl.based_spark.rdd_func import _flat_map
return self._tmp_table_from_rdd(_flat_map(self.rdd(), func))
@log_elapsed
def collect(self, min_chunk_size=0, use_serialize=True, **kwargs):
if self._dtable:
return self._dtable.collect(min_chunk_size, use_serialize)
else:
return iter(self.rdd().collect())
"""
storage api
"""
def put(self, k, v, use_serialize=True, maybe_large_value=False):
if not maybe_large_value:
rtn = self.dtable().put(k, v, use_serialize)
else:
rtn = split_put(k, v, use_serialize=use_serialize, put_call_back_func=self.dtable().put)
self._rdd = None
return rtn
def put_all(self, kv_list: Iterable, use_serialize=True, chunk_size=100000):
rtn = self.dtable().put_all(kv_list, use_serialize, chunk_size)
self._rdd = None
return rtn
def get(self, k, use_serialize=True, maybe_large_value=False):
if not maybe_large_value:
return self.dtable().get(k, use_serialize)
else:
return split_get(k=k, use_serialize=use_serialize, get_call_back_func=self.dtable().get)
def delete(self, k, use_serialize=True):
rtn = self.dtable().delete(k, use_serialize)
self._rdd = None
return rtn
def destroy(self):
if self._dtable:
self._dtable.destroy()
else:
self._rdd = None
return True
def put_if_absent(self, k, v, use_serialize=True):
rtn = self.dtable().put_if_absent(k, v, use_serialize)
self._rdd = None
return rtn
# noinspection PyPep8Naming
def take(self, n=1, keysOnly=False, use_serialize=True):
if self._dtable:
return self._dtable.take(n, keysOnly, use_serialize)
else:
rtn = self._rdd.take(n)
if keysOnly:
rtn = [pair[0] for pair in rtn]
return rtn
# noinspection PyPep8Naming
def first(self, keysOnly=False, use_serialize=True):
return self.take(1, keysOnly, use_serialize)[0]
def count(self, **kwargs):
if self._dtable:
return self._dtable.count()
else:
return self._rdd.count()
@log_elapsed
def save_as(self, name, namespace, partition=None, use_serialize=True, persistent=True, **kwargs) -> 'RDDTable':
if partition is None:
partition = self._partitions
partition = partition or self._partitions
from arch.api import RuntimeInstance
persistent_engine = RuntimeInstance.SESSION.get_persistent_engine()
if self._dtable:
_dtable = self._dtable.save_as(name, namespace, partition,
use_serialize=use_serialize,
persistent_engine=persistent_engine)
return RDDTable.from_dtable(session_id=self._session_id, dtable=_dtable)
else:
from arch.api.impl.based_spark.rdd_func import _save_as_func
return _save_as_func(self._rdd, name=name, namespace=namespace, partition=partition, persistent=persistent)
| 35.681356 | 119 | 0.635949 | 1,326 | 10,526 | 4.797134 | 0.167421 | 0.038516 | 0.027669 | 0.030656 | 0.337368 | 0.296966 | 0.189907 | 0.140544 | 0.140544 | 0.13457 | 0 | 0.004805 | 0.268383 | 10,526 | 294 | 120 | 35.802721 | 0.821192 | 0.089398 | 0 | 0.258216 | 0 | 0 | 0.026554 | 0 | 0 | 0 | 0 | 0 | 0.023474 | 1 | 0.183099 | false | 0 | 0.089202 | 0.032864 | 0.488263 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a75601cdea21086c79a3d2335643985397a1c149 | 2,288 | py | Python | tests/components/mazda/test_diagnostics.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | tests/components/mazda/test_diagnostics.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | tests/components/mazda/test_diagnostics.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Test Mazda diagnostics."""
import json
import pytest
from homeassistant.components.mazda.const import DATA_COORDINATOR, DOMAIN
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr
from . import init_integration
from tests.common import load_fixture
from tests.components.diagnostics import (
get_diagnostics_for_config_entry,
get_diagnostics_for_device,
)
async def test_config_entry_diagnostics(hass: HomeAssistant, hass_client):
"""Test config entry diagnostics."""
await init_integration(hass)
assert hass.data[DOMAIN]
config_entry = hass.config_entries.async_entries(DOMAIN)[0]
diagnostics_fixture = json.loads(
load_fixture("mazda/diagnostics_config_entry.json")
)
assert (
await get_diagnostics_for_config_entry(hass, hass_client, config_entry)
== diagnostics_fixture
)
async def test_device_diagnostics(hass: HomeAssistant, hass_client):
"""Test device diagnostics."""
await init_integration(hass)
assert hass.data[DOMAIN]
config_entry = hass.config_entries.async_entries(DOMAIN)[0]
device_registry = dr.async_get(hass)
reg_device = device_registry.async_get_device(
identifiers={(DOMAIN, "JM000000000000000")},
)
assert reg_device is not None
diagnostics_fixture = json.loads(load_fixture("mazda/diagnostics_device.json"))
assert (
await get_diagnostics_for_device(hass, hass_client, config_entry, reg_device)
== diagnostics_fixture
)
async def test_device_diagnostics_vehicle_not_found(hass: HomeAssistant, hass_client):
"""Test device diagnostics when the vehicle cannot be found."""
await init_integration(hass)
assert hass.data[DOMAIN]
config_entry = hass.config_entries.async_entries(DOMAIN)[0]
device_registry = dr.async_get(hass)
reg_device = device_registry.async_get_device(
identifiers={(DOMAIN, "JM000000000000000")},
)
assert reg_device is not None
# Remove vehicle info from hass.data so that vehicle will not be found
hass.data[DOMAIN][config_entry.entry_id][DATA_COORDINATOR].data = []
with pytest.raises(AssertionError):
await get_diagnostics_for_device(hass, hass_client, config_entry, reg_device)
| 30.105263 | 86 | 0.751748 | 284 | 2,288 | 5.774648 | 0.207746 | 0.080488 | 0.051829 | 0.04878 | 0.685976 | 0.631707 | 0.580488 | 0.471341 | 0.405488 | 0.405488 | 0 | 0.017305 | 0.166521 | 2,288 | 75 | 87 | 30.506667 | 0.842685 | 0.040647 | 0 | 0.479167 | 0 | 0 | 0.047573 | 0.031068 | 0 | 0 | 0 | 0 | 0.166667 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7589c5e2482807622175155888716cc2550717e | 628 | py | Python | wmt_etl/tests/fixtures.py | ministryofjustice/wmt-etl | c41aabeba06cc531364583b92254998404f6bc34 | [
"MIT"
] | null | null | null | wmt_etl/tests/fixtures.py | ministryofjustice/wmt-etl | c41aabeba06cc531364583b92254998404f6bc34 | [
"MIT"
] | 5 | 2017-05-10T13:50:08.000Z | 2022-01-24T16:58:23.000Z | wmt_etl/tests/fixtures.py | ministryofjustice/wmt-etl | c41aabeba06cc531364583b92254998404f6bc34 | [
"MIT"
] | 1 | 2021-04-11T06:17:01.000Z | 2021-04-11T06:17:01.000Z | ''' Fixture and helper functions for reuse in tests'''
from os import path, remove, listdir
from shutil import copyfile
import wmt_etl.etl_config as config
def clear_archive():
'''Clear down archive following test execution'''
for archive_path in [f for f in listdir(config.ARCHIVE_FILE_DIR)
if f.endswith('.tar.gz')]:
remove(path.join(config.ARCHIVE_FILE_DIR, archive_path))
def copy_source_files(source_file_paths, dest_file_paths):
'''Copy source files to temp destination for testing'''
for src, dest in zip(source_file_paths, dest_file_paths):
copyfile(src, dest)
| 39.25 | 68 | 0.718153 | 93 | 628 | 4.645161 | 0.494624 | 0.083333 | 0.078704 | 0.092593 | 0.12963 | 0.12963 | 0 | 0 | 0 | 0 | 0 | 0 | 0.192675 | 628 | 15 | 69 | 41.866667 | 0.852071 | 0.224522 | 0 | 0 | 0 | 0 | 0.014894 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.3 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a75e1b07680cca93940810f4014eb58b4f1f8a48 | 2,360 | py | Python | services/webpage_actions.py | josepfpinto/webscraping | 109fcb4371f0e8e4127a48b4ba29cb6cc73dbf43 | [
"MIT"
] | null | null | null | services/webpage_actions.py | josepfpinto/webscraping | 109fcb4371f0e8e4127a48b4ba29cb6cc73dbf43 | [
"MIT"
] | null | null | null | services/webpage_actions.py | josepfpinto/webscraping | 109fcb4371f0e8e4127a48b4ba29cb6cc73dbf43 | [
"MIT"
] | null | null | null | import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
from services import exceptions, webpage_scraping, g_driver
def wait(seconds, css_selector):
w = WebDriverWait(g_driver.google_driver, seconds)
if len(css_selector) > 1:
w.until(EC.presence_of_element_located(
(By.CSS_SELECTOR, css_selector)))
def close_cookies():
print("- closing cookies")
try:
w = WebDriverWait(g_driver.google_driver, 15)
w.until(EC.presence_of_element_located(
(By.CSS_SELECTOR, "button#onetrust-accept-btn-handler")))
g_driver.google_driver.find_element_by_css_selector(
"button#onetrust-accept-btn-handler").click()
print("- cookie button clicked")
except (NoSuchElementException, TimeoutException) as error:
exceptions.simple("- no cookie button found... Moving on:", error)
finally:
webpage_scraping.is_first_page = False
time.sleep(3)
def wait_for_apartments():
try:
wait(15, "div.sr_item.sr_item_new.sr_item_default.sr_property_block.sr_flex_layout")
except (NoSuchElementException, TimeoutException) as error:
exceptions.simple("- no apartments found:", error)
return error
def get_price(apartment, totalAdults, totalDays, cleaningFee):
price = ""
for elem in apartment.find_elements_by_css_selector("span.bui-u-sr-only"):
text = elem.text
if ("Price" in text) or ("Preço" in text):
price = int(text.split(' ')[-1])
dayTax = int(totalAdults) * 2
tax = 7 * dayTax if totalDays > 7 else totalDays * dayTax
return (price - cleaningFee - tax) / totalDays
def get_score(apartment):
scoreRaw = apartment.find_element_by_css_selector(
"div.bui-review-score__badge").text
return int(scoreRaw) if scoreRaw == "10" else float(scoreRaw[0] + "." + scoreRaw[2])
def get_reviews(apartment):
reviewsRaw = apartment.find_element_by_css_selector(
"div.bui-review-score__text").text.split(' ')[0]
return int(reviewsRaw[0] + reviewsRaw[2:] if (("," in reviewsRaw) or ("." in reviewsRaw)) else reviewsRaw)
| 36.875 | 110 | 0.705085 | 299 | 2,360 | 5.371237 | 0.367893 | 0.061644 | 0.048568 | 0.035492 | 0.339975 | 0.290785 | 0.249689 | 0.249689 | 0.118306 | 0.118306 | 0 | 0.008854 | 0.186441 | 2,360 | 63 | 111 | 37.460317 | 0.827604 | 0 | 0 | 0.122449 | 0 | 0 | 0.138983 | 0.08178 | 0 | 0 | 0 | 0 | 0 | 1 | 0.122449 | false | 0 | 0.142857 | 0 | 0.346939 | 0.040816 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a760ec326c5318f9731d7eb95971a127876272c2 | 7,743 | py | Python | Q/questionnaire/models/models_users.py | ES-DOC/esdoc-questionnaire | 9301eda375c4046323265b37ba96d94c94bf8b11 | [
"MIT"
] | null | null | null | Q/questionnaire/models/models_users.py | ES-DOC/esdoc-questionnaire | 9301eda375c4046323265b37ba96d94c94bf8b11 | [
"MIT"
] | 477 | 2015-01-07T18:22:27.000Z | 2017-07-17T15:05:48.000Z | Q/questionnaire/models/models_users.py | ES-DOC/esdoc-questionnaire | 9301eda375c4046323265b37ba96d94c94bf8b11 | [
"MIT"
] | null | null | null | ####################
# ES-DOC CIM Questionnaire
# Copyright (c) 2017 ES-DOC. All rights reserved.
#
# University of Colorado, Boulder
# http://cires.colorado.edu/
#
# This project is distributed according to the terms of the MIT license [http://www.opensource.org/licenses/MIT].
####################
from allauth.account.models import EmailAddress
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.translation import ugettext_lazy as _
from Q.questionnaire import APP_LABEL, q_logger
from Q.questionnaire.models.models_sites import get_site
# This is a custom UserProfile for the Q
# it includes Q-specific things
# I still use the built-in Django User for managing users
# however, I use django-allauth for authentication
# this lets me share users w/ registered OAuth providers (in the long-term)
class QUserProfile(models.Model):
class Meta:
app_label = APP_LABEL
abstract = False
verbose_name = 'Questionnaire User Profile'
verbose_name_plural = 'Questionnaire User Profiles'
# 1to1 relationship w/ standard Django User...
user = models.OneToOneField(User, related_name='profile')
# extra profile info associated w/ a Questionnaire User...
projects = models.ManyToManyField("QProject", blank=True, verbose_name="Project Membership")
change_password = models.BooleanField(default=False, verbose_name="Change password at next logon")
description = models.TextField(blank=True, null=True, verbose_name="Description")
institute = models.ForeignKey("QInstitute", blank=True, null=True, limit_choices_to={"is_active": True})
institute.verbose_name = "Publication Institute"
institute.help_text = _(
"Please select the institute for which you intend to publish documents. "
"If no selection is made, you will be unable to publish."
)
@property
def is_verified(self):
if self.user.is_authenticated and not self.user.is_superuser:
try:
email = EmailAddress.objects.get(email=self.user.email)
return email.verified
except EmailAddress.DoesNotExist:
pass
return False
def __str__(self):
return str(self.user)
def is_admin_of(self, project):
project_admin_group = project.get_group("admin")
return self.user in project_admin_group.user_set.all()
def is_member_of(self, project):
project_member_group = project.get_group("member")
return self.user in project_member_group.user_set.all()
def is_pending_of(self, project):
project_pending_group = project.get_group("pending")
return self.user in project_pending_group.user_set.all()
def is_user_of(self, project):
project_user_group = project.get_group("user")
return self.user in project_user_group.user_set.all()
def add_group(self, group):
group.user_set.add(self.user)
def remove_group(self, group):
group.user_set.remove(self.user)
def add_pending_permissions(self, project):
pending_permission_group = project.get_group("pending")
self.add_group(pending_permission_group)
def add_member_permissions(self, project):
member_permission_group = project.get_group("member")
self.add_group(member_permission_group)
def add_user_permissions(self, project):
user_permission_group = project.get_group("user")
self.add_group(user_permission_group)
def add_admin_permissions(self, project):
admin_permission_group = project.get_group("admin")
self.add_group(admin_permission_group)
def remove_admin_permissions(self, project):
admin_permission_group = project.get_group("admin")
self.remove_group(admin_permission_group)
def remove_member_permissions(self, project):
member_permission_group = project.get_group("member")
self.remove_group(member_permission_group)
def remove_pending_permissions(self, project):
pending_permission_group = project.get_group("pending")
self.remove_group(pending_permission_group)
def remove_user_permissions(self, project):
user_permission_group = project.get_group("user")
self.remove_group(user_permission_group)
def join_project(self, project):
self.projects.add(project)
self.remove_pending_permissions(project)
self.add_member_permissions(project)
self.add_user_permissions(project)
def leave_project(self, project):
self.projects.remove(project)
self.remove_pending_permissions(project)
self.remove_member_permissions(project)
self.remove_user_permissions(project)
self.remove_admin_permissions(project)
def created(self):
# this fns is referenced in "signals_users.py"
mail_content = "User '{0}' created (on site '{1}').".format(
self, get_site(),
)
mail_from = settings.EMAIL_HOST_USER
mail_to = [settings.EMAIL_HOST_USER, ]
try:
send_mail(
"ES-DOC Questionnaire user joined",
mail_content,
mail_from,
mail_to,
fail_silently=False
)
except Exception as e:
q_logger.error(e)
def is_admin_of(user, project):
if user.is_authenticated():
return user.is_superuser or user.profile.is_admin_of(project)
else:
return False
def is_member_of(user, project):
if user.is_authenticated():
return user.is_superuser or user.profile.is_member_of(project)
else:
return False
def is_pending_of(user, project):
if user.is_authenticated():
return not user.is_superuser and user.profile.is_pending_of(project)
else:
return False
def is_user_of(user, project):
if user.is_authenticated():
return user.is_superuser or user.profile.is_user_of(project)
else:
return False
def get_institute(user):
if user.is_authenticated():
if user.is_superuser:
return None
else:
return user.profile.institute
else:
return None
#######################
# user / project code #
#######################
def project_join_request(project, user, site=None):
mail_content = "User '{0}' wants to join project '{1}'. To approve this request, please goto: http://{2}/{3}/manage/.".format(
user.username, project.title, site.domain, project.name,
)
mail_from = settings.EMAIL_HOST_USER
mail_to = [settings.EMAIL_HOST_USER, ]
try:
send_mail(
"ES-DOC Questionnaire project join request",
mail_content,
mail_from,
mail_to,
fail_silently=False
)
user.profile.add_pending_permissions(project)
return True
except Exception as e:
q_logger.error(e)
return False
def project_join(project, user, site=None):
mail_content = "User '{0}' has joined project '{1}' [http://{2}/{3}].".format(
user.username, project.title, site.domain, project.name,
)
mail_from = settings.EMAIL_HOST_USER
mail_to = [user.email, project.email]
try:
send_mail(
"ES-DOC Questionnaire project join response",
mail_content,
mail_from,
mail_to,
fail_silently=False
)
user.profile.join_project(project)
return True
except Exception as e:
q_logger.error(e)
return False
| 31.348178 | 131 | 0.670412 | 964 | 7,743 | 5.158714 | 0.212656 | 0.048261 | 0.036195 | 0.048261 | 0.47959 | 0.390308 | 0.348683 | 0.312689 | 0.27408 | 0.265634 | 0 | 0.002696 | 0.233501 | 7,743 | 246 | 132 | 31.47561 | 0.835215 | 0.086013 | 0 | 0.378698 | 0 | 0.005917 | 0.095265 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0.011834 | 0.053254 | 0.005917 | 0.378698 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7627b6bbd6aa9cb3a70b95c6e9348883c8a3624 | 3,013 | py | Python | Token/views.py | dominicneeraj/Technex_api | 1d60ecad212494ca1b93d7417c76ba0d843da336 | [
"MIT"
] | null | null | null | Token/views.py | dominicneeraj/Technex_api | 1d60ecad212494ca1b93d7417c76ba0d843da336 | [
"MIT"
] | null | null | null | Token/views.py | dominicneeraj/Technex_api | 1d60ecad212494ca1b93d7417c76ba0d843da336 | [
"MIT"
] | null | null | null | import json
from Token.models import Word
from Token.serializers import WordSerializer
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from Token.models import Word
from Token.serializers import WordSerializer
from timex import date
from Nouns import *
def formating(tok):
fromdate = date(tok)
if fromdate =='Any':
Todate='Any'
else:
Todate='Today'
fromperson=getFrom(tok)
toperson=getTo(tok)
subject = getFeature(tok, ['Subject', 'subject','as','As','about','Regarding','regarding'])
cc=getCC(tok)
attach=attachment(tok)
if attach=='Any':
HasAttachment='No'
Attachmentname='Any'
Attachmentsize='Any'
elif attach in ['attachment', 'attachments']:
HasAttachment='Yes'
attach='Any'
Attachmentname = attachmentname(tok)
Attachmentsize = size(tok)
else:
HasAttachment='Yes'
Attachmentname = attachmentname(tok)
Attachmentsize = size(tok)
data = {'From':fromperson,'To':toperson,'ToDate':Todate,'FromDate':fromdate,'HasAttachments':HasAttachment,'AttachmentType':attach,'AttachmentSize':Attachmentsize,'AttachmentName':Attachmentname,'Subject':subject,'CC':cc}
json_data = json.dumps(data)
response = json_data
return response
class TokenPost(APIView):
def post(self, request, format=None):
print(request.data)
serializer = WordSerializer(data=request.data)
if serializer.is_valid():
serializer.validated_data['code'] = formating(serializer.validated_data['code'])
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class TokenList(APIView):
"""
List all words, or create a new word.
"""
def get(self, request, format=None):
words = Word.objects.all()
serializer = WordSerializer(words, many=True)
return Response(serializer.data)
class TokenDetail(APIView):
"""
Retrieve, update or delete a word instance.
"""
def get_object(self, pk):
try:
return Word.objects.get(pk=pk)
except Word.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
word = self.get_object(pk)
serializer = WordSerializer(word)
return Response(serializer.data)
def put(self, request, pk, format=None):
word = self.get_object(pk)
serializer = WordSerializer(word, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
word = self.get_object(pk)
word.delete()
return Response(status=status.HTTP_204_NO_CONTENT) | 30.13 | 225 | 0.670096 | 338 | 3,013 | 5.902367 | 0.301775 | 0.05614 | 0.07218 | 0.05614 | 0.352882 | 0.352882 | 0.263659 | 0.263659 | 0.219549 | 0.219549 | 0 | 0.007679 | 0.222038 | 3,013 | 100 | 226 | 30.13 | 0.84343 | 0.026884 | 0 | 0.324324 | 0 | 0 | 0.064094 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.094595 | false | 0 | 0.148649 | 0 | 0.405405 | 0.013514 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7635d6c893e96acb3099d4f740002c756194d92 | 13,580 | py | Python | kbrl.py | NadeemWard/kernel-based_RL | 9897a1dc9890c7408815e9571c764750a29e921f | [
"MIT"
] | null | null | null | kbrl.py | NadeemWard/kernel-based_RL | 9897a1dc9890c7408815e9571c764750a29e921f | [
"MIT"
] | null | null | null | kbrl.py | NadeemWard/kernel-based_RL | 9897a1dc9890c7408815e9571c764750a29e921f | [
"MIT"
] | null | null | null | import random
import numpy as np
import gym
import matplotlib.pyplot as plt
from sklearn.gaussian_process.kernels import RBF
from sklearn.preprocessing import normalize
import sys
import cvxpy as cp
import pdb
def get_data(env, total_samples_per_action=1000, random = True, V=None, R=None, kernel = None,
data = None, gamma = None):
'''
Function to collect data at random from OpenAI gym type environment. Use for CartPole-v0 primarily
:param env: gym type env
:param total_samples_per_action: number of samples to collect per action
:return: - return the observed transitions per action, all concatentated into a large matrix ("transition_data")
- return the rewards observed from those transitions ("reward_data")
transition_data is of the form ( num_samples X num_actions X 2 (for starting state and next state) X state dimension )
reward_data is of the form: ( num_samples X num_actions)
'''
num_actions = env.action_space.n
state_dim = env.observation_space.shape[0]
transition_data = np.zeros([total_samples_per_action, num_actions, 2, state_dim]) # placeholder for data to store
reward_data = np.zeros([total_samples_per_action, num_actions])
num_samples_per_action = np.zeros(num_actions)
while min(num_samples_per_action) < total_samples_per_action:
# run another full episode
x = env.reset()
done = False
while not done:
if random:
action = env.action_space.sample()
else:
action = get_action(V, R, kernel, data, gamma, x)
next_x, reward, done, _ = env.step(action)
if num_samples_per_action[action] < total_samples_per_action:
transition_data[int(num_samples_per_action[action]), action, 0, :] = x
transition_data[int(num_samples_per_action[action]), action, 1, :] = next_x
reward_data[int(num_samples_per_action[action]), action] = reward
num_samples_per_action[action] += 1
# update current state
x = next_x
if done:
reward_data[int(num_samples_per_action[action]) - 1, action] = 0 # change the reward to be zero
return transition_data, reward_data
def kernel_matrix(X_s, Y_s, kernel):
'''
X_s: data matrix of initial states of size (num_samples, num_dimensions_state_space)
Y_s: data matrix of next states of size (num_samples, num_dimensions_state_space)
These data matrices are both for a specific action a
K: return the kernel matrix of the cross product between elements of these two matrices, size (num_samples, num_samples)
using the gaussian kernel. Element i,j of this matrix is kernel([X_s]_i, [Y_s]_j)
'''
m, dim_s = X_s.shape
return normalize(kernel(X_s, Y_s), axis=0,
norm="l1") # normalize the kernel values along axis 0 to have them sum to 1
def kernel_tensor(X, Y, kernel):
'''
X: data tensor of initial states of size (num_samples, num_actions, num_dim_state_space)
Y: data tensor of next states of size (num_samples, num_actions, num_dim_state_space)
return: K, the kernel tensor of concatenated kernel matrices of each action seperately
'''
num_samples, num_actions, dim_s = X.shape
K = np.zeros((num_samples, num_actions, num_samples))
for a in range(num_actions):
K[:, a, :] = kernel_matrix(X[:, a, :], Y[:, a, :], kernel) # get the kernel matrix per action
return K
def get_action(V, R, kernel, data, gamma, x):
'''
V: the value function, a matrix of size (num_samples, num_actions)
for each "next state" seen in the data
R: matrix of 1 step rewards of size (num_samples, num_actions)
kernel: the kernel function used
data: the data tensor or size (num_samples X num_actions X 2 (x_s, y_s) X dim(state_space) )
gamma: discount factor
bandwidith: hyperparameter for kernel function
x: the actual state we are evaluating
return: indx of action to take
'''
num_samples, num_actions = V.shape
Q = np.zeros(num_actions)
for i in range(num_actions):
X_a = data[:, i, 0, :] # shape (num_samples, dim_state_space)
Q[i] = np.dot(normalize(kernel(X_a, x.reshape(1, -1)), axis=0, norm="l1").T, R[:, i] + gamma * V[:, i])
return np.argmax(Q)
def value_iteration(Theta, R, gamma, stopping_criteria=10e-5, axis=2):
'''
Theta: Tensor of kernel values for the data of size (num_samples, num_actions, num_samples)
R: one step rewards observed of size (num_samples, num_actions)
gamma: discount factor
num_iterations: number of times we want to iterate the algorithm
return: The new Value functions we get; of size (num_samples, num_actions)
'''
num_samples, num_actions = R.shape
V_old = np.zeros((num_samples, num_actions))
abs_error = sys.maxsize
num_iterations = 0
while abs_error > stopping_criteria:
# compute the Q value
Q = np.zeros((num_samples, num_actions, num_actions))
for i in range(num_actions):
Q[:, i, :] = np.dot(Theta[:, i, :].T, R + gamma * V_old)
# do max over axis
V = np.amax(Q, axis=axis)
# compute error (largest absolute difference)
abs_error = np.max(np.abs(V_old - V))
V_old = V
num_iterations += 1
# print("Number of iterations of value iterations until convergence:", num_iterations)
return V
def different_value_iteration(X, Y, R, kernel, gamma, stopping_criteria=10e-5):
'''
My implementation with kernel computation between action datasets S^a. This is just to make sure Im doing the computation
right.
:param X: starting state data of the form num_samples X num_actions X
:param Y: next state data of the form num_samples X num_actions X
:param R: One step rewards of the form num_samples X num_actions
:param gamma: discount factor
:param stopping_criteria: When we will terminate value iteration
:return: Return the value functions found
'''
num_samples, num_actions = R.shape
V_old = np.zeros((num_samples, num_actions))
abs_error = sys.maxsize
num_iterations = 0
while abs_error > stopping_criteria:
td_update = R + gamma * V_old
V = np.zeros((num_samples, num_actions))
for sample_indx in range(num_samples):
for action_indx in range(num_actions):
# loop over each action-value function
Q_x = np.zeros(num_actions)
for a in range(num_actions):
Q_x[a] = np.dot(normalize(kernel(X[:, a, :], Y[sample_indx, action_indx, :].reshape(1, -1)), axis=0,
norm="l1").T, td_update[:, a])
V[sample_indx, action_indx] = max(Q_x)
# compute error (largest absolute difference)
abs_error = np.max(np.abs(V_old - V))
V_old = V
num_iterations += 1
print("Number of iterations of value iterations until convergence:", num_iterations)
return V
def test_kbrl_env(env, V=None, R=None, kernel=None, gamma=None, data=None, num_episodes=1, random=False):
'''
Getting test performance. What this code does is loop num_episodes times over the env and saves all the
rewards received per episode
:param env: env used
:param V: the value function found from {value iteration / linear programming } needed for action selection
:param R: one step rewards (needed for action selection)
:param kernel: kernel function used (needed for aciton selection)
:param gamma: discount factor
:param data: data generated
:param num_episodes: number of iterations
:return: returns all of the episode rewards received
'''
rewards = []
for i in range(num_episodes):
episode_reward = 0
num_steps = 0
done = False
state = env.reset()
while not done:
if random:
action = env.action_space.sample()
else:
action = get_action(V, R, kernel, data, gamma, state)
state, reward, done, _ = env.step(action)
episode_reward += reward
num_steps += 1
rewards.append(episode_reward)
return np.array(rewards)
def plot_results(env, transition_data, reward_data, kernel_vals, gamma_vals,
num_episodes=10, axis = 2, lp=False, path = None):
'''
Plotting function. Putting everything together
:param env: env used
:param transition_data: transition dynamics
:param reward_data: reward data
:param kernel_vals: the different hyperparmeters for the RBF kernel to try
:param gamma_vals: different gamma values to try
:param num_episodes: number of episode we want to average performance over
:param axis: how to maximize in policy iteration
:param lp: wheter to solve using LP approach or not
:param path: path to save model to. If None won't save.
:return: None. Just plot the result
'''
num_samples_per_action, num_actions = reward_data.shape
X = transition_data[:, :, 0, :] # num_samples_per_action, num_actions, dim_state
Y = transition_data[:, :, 1, :] # num_samples_per_action, num_actions, dim_state
for gamma in gamma_vals:
rewards = []
for b in kernel_vals:
# define kernel
kernel = RBF(b)
# compute kernel tensor
Theta = kernel_tensor(X, Y, kernel)
# compute value iteration
if lp:
init_dist = np.ones((num_samples_per_action, num_actions)) * 1 / num_samples_per_action
V = kblp(Theta=Theta, R=reward_data, gamma=gamma, initial_dsitribution=init_dist)
else:
V = value_iteration(Theta, reward_data, gamma=gamma, stopping_criteria=10e-3, axis= axis)
# V = different_value_iteration(X, Y, reward_data, kernel = kernel, gamma = gamma)
# save model
if path:
np.savez(path + "/data_gamma=" + str(gamma)+"_b=" + str(b), V = V,
transition_data = transition_data, reward_data = reward_data)
# run on test environement
rewards.append(test_kbrl_env(env, V=V, R=reward_data, kernel=kernel, gamma=gamma,
data=transition_data, num_episodes=num_episodes, random=False))
# rewards will a matrix of size num_kernel_vals X num_cummulative_rewards_per_episode
rewards = np.array(rewards)
average = rewards.mean(axis=1)
sigma = rewards.std(axis=1)
# save results
if path:
np.savez(path + "/results", rewards = rewards, average = average, sigma = sigma)
plt.plot(kernel_vals, average, label="gamma = {0}".format(gamma))
plt.fill_between(kernel_vals, average + sigma, average - sigma, alpha=0.5)
# plt.plot(kernel_vals, [random_reward] * len(kernel_vals), label="Random Agent")
plt.xlabel("bandwidth value")
plt.ylabel("Average reward")
# plt.title("Average performance for different values of the bandwidth parameter")
plt.legend()
plt.show()
def kblp(Theta, R, gamma, initial_dsitribution):
'''
LP implementation using kernel based RL
:param Theta: kernel Tensor
:param R: reward data
:param gamma: discount factor
:param initial_dsitribution: the weighting in the objective function
:return: the optimal value function
'''
# information about samples
num_samples, num_actions = R.shape
# define variables
v = cp.Variable((num_samples, num_actions))
# create objective function
objective = cp.Minimize(cp.trace(initial_dsitribution.T @ v))
# create constraints
constraints = [v >= Theta[:, a, :].T @ (R + gamma * v) for a in range(num_actions)] # axis = 1
# solve
prob = cp.Problem(objective, constraints)
prob.solve(verbose = False)
return v.value
if __name__ == "__main__":
# Define env
env = gym.make("CartPole-v0")
gamma = 0.99
num_actions = env.action_space.n
# get data
num_samples_per_action = 1500
transition_data, reward_data = get_data(env, total_samples_per_action=num_samples_per_action)
X = transition_data[:, :, 0, :] # num_samples_per_action, num_actions, dim_state
Y = transition_data[:, :, 1, :] # num_samples_per_action, num_actions, dim_state
#####################################################################################################
#################################### Value Iteration Approach #######################################
#####################################################################################################
# define kernel values to try
kernel_vals = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.1, 0.2]
gamma_vals = [0.99]
#plot_results(env, transition_data, reward_data, kernel_vals, gamma_vals, num_episodes=1000, save_name="KBRL_test2")
#####################################################################################################
############################################## LP approach ##########################################
#####################################################################################################
plot_results(env, transition_data, reward_data, kernel_vals, gamma_vals, num_episodes = 1000, lp = True, save_name="LP_test9") # takes a while | 37.205479 | 146 | 0.627761 | 1,839 | 13,580 | 4.444263 | 0.16857 | 0.059954 | 0.046984 | 0.044047 | 0.40487 | 0.322036 | 0.262817 | 0.24532 | 0.204209 | 0.171051 | 0 | 0.010052 | 0.245434 | 13,580 | 365 | 146 | 37.205479 | 0.787548 | 0.384831 | 0 | 0.289474 | 0 | 0 | 0.020963 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059211 | false | 0 | 0.059211 | 0 | 0.171053 | 0.006579 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a76389426a6c8d8578931ed8d49b546a8ca1da76 | 3,950 | py | Python | bittensor/utils/model_utils.py | parall4x/bittensor | abacb0b0f1b078d3103f516aff1328f049f9dc34 | [
"MIT"
] | null | null | null | bittensor/utils/model_utils.py | parall4x/bittensor | abacb0b0f1b078d3103f516aff1328f049f9dc34 | [
"MIT"
] | null | null | null | bittensor/utils/model_utils.py | parall4x/bittensor | abacb0b0f1b078d3103f516aff1328f049f9dc34 | [
"MIT"
] | null | null | null | # The MIT License (MIT)
# Copyright © 2021 Yuma Rao
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from loguru import logger
import torch
class ModelInformationNotFoundException(Exception):
pass
class ModelToolbox:
def __init__(self, model_class, optimizer_class):
self.model_class = model_class
self.optimizer_class = optimizer_class
def save_model(self, miner_path, model_info):
"""Saves the model locally.
Args:
model_info (:obj:`dict`, `required`): Dictionary containing the epoch we are saving at, the loss, and the PyTorch model object.
Raises:
:obj:`ModelInformationNotFoundException`: Raised whenever the loss, epoch, or PyTorch model object is missing from the input dictionary.
"""
try:
if 'epoch' not in model_info.keys():
raise ModelInformationNotFoundException("Missing 'epoch' in torch save dict")
if 'loss' not in model_info.keys():
raise ModelInformationNotFoundException("Missing 'loss' in torch save dict")
if 'model_state_dict' not in model_info.keys():
raise ModelInformationNotFoundException("Missing 'model' in torch save dict")
if 'optimizer_state_dict' not in model_info.keys():
raise ModelInformationNotFoundException("Missing 'optimizer' in torch save dict")
logger.info( 'Saving/Serving model: epoch: {}, loss: {}, path: {}/model.torch'.format(model_info['epoch'], model_info['loss'], miner_path))
torch.save(model_info,"{}/model.torch".format(miner_path))
except ModelInformationNotFoundException as e:
logger.error("Encountered exception trying to save model: {}", e)
def load_model(self, config):
""" Loads a model saved by save_model() and returns it.
Returns:
model (:obj:`torch.nn.Module`) : Model that was saved earlier, loaded back up using the state dict and optimizer.
optimizer (:obj:`torch.optim`) : Model optimizer that was saved with the model.
"""
model = self.model_class( config )
optimizer = self.optimizer_class(model.parameters(), lr = config.miner.learning_rate, momentum=config.miner.momentum)
try:
checkpoint = torch.load("{}/model.torch".format(config.miner.full_path))
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
loss = checkpoint['loss']
logger.info( 'Reloaded model: epoch: {}, loss: {}, path: {}/model.torch'.format(epoch, loss, config.miner.full_path))
except Exception as e:
logger.warning ( 'Exception {}. Could not find model in path: {}/model.torch', e, config.miner.full_path )
return model, optimizer
| 47.590361 | 151 | 0.682785 | 497 | 3,950 | 5.342052 | 0.356137 | 0.030508 | 0.015066 | 0.021092 | 0.146516 | 0.127307 | 0.127307 | 0.101695 | 0.054237 | 0.054237 | 0 | 0.001317 | 0.231139 | 3,950 | 82 | 152 | 48.170732 | 0.872572 | 0.418987 | 0 | 0.057143 | 0 | 0 | 0.223235 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0.028571 | 0.057143 | 0 | 0.228571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a763ad46097206901f13a0cb33cf0ad51f35a41a | 2,988 | py | Python | hdf5_wrappers/hdf5_dataset.py | hilman-dayo/active_learning | cc5b0388be25946e794d59d95e4d9c8c56e24207 | [
"Apache-2.0"
] | 54 | 2020-07-09T04:19:04.000Z | 2022-03-05T11:38:07.000Z | hdf5_wrappers/hdf5_dataset.py | AnnotationSoftware/active_learning | 2376ecf9d3ef5f7ebf0fdbc59a3cbb50cfbf855e | [
"Apache-2.0"
] | 2 | 2021-05-20T10:16:47.000Z | 2021-06-07T08:20:35.000Z | hdf5_wrappers/hdf5_dataset.py | AnnotationSoftware/active_learning | 2376ecf9d3ef5f7ebf0fdbc59a3cbb50cfbf855e | [
"Apache-2.0"
] | 9 | 2020-09-17T13:40:03.000Z | 2021-11-05T09:09:24.000Z | import numpy as np
import torch
from torch.utils import data
import h5py
import warnings
from logging import getLogger, Formatter, StreamHandler, INFO, FileHandler
# Logger
logger = getLogger("MainLogger")
if __name__ == '__main__':
logger.addHandler(handler)
class HDF5Dataset(data.Dataset):
"""Represents a HDF5 dataset. Loads images from compressed HDF5 file.
Input params:
image_file_path: Path to a HDF5 file containing all the image slices.
mask_file_path: Path to a HDF5 file containing all the image masks.
image_ids: List of strings with image or slice ids.
transform: PyTorch transform to apply to every data instance (default=None).
"""
def __init__(self, image_file_path, mask_file_path=None, image_ids=None,
transform=None):
super().__init__()
self.image_file = h5py.File(image_file_path, 'r', libver='latest', swmr=True)
# Sometimes we don't need to load the ground truth masks.
if mask_file_path is None:
self.mask_file = None
else:
self.mask_file = h5py.File(mask_file_path, 'r', libver='latest', swmr=True)
self.image_ids = image_ids
self.transform = transform
def __getitem__(self, index):
# get data
x = self.get_image(index)
if self.transform:
x = self.transform(x)
else:
x = torch.from_numpy(x)
if self.mask_file is None:
return x, 0 # {'image': x, 'mask': 0/None}
# get label
y = self.get_mask(index)
y = torch.from_numpy(y)
return x, y
def __len__(self):
return len(self.image_ids)
def get_mask(self, index):
if self.mask_file==None:
return None
slice_id = self.image_ids[index]
return self.load_ground_truth_mask(slice_id)
def get_image(self, index):
slice_id = self.image_ids[index]
# print("Trying to load {}".format(slice_id))
return self.load_image(slice_id)
def load_image(self, slice_id):
""" Loads image slice from hdf5 file in shape (w, h, ch).
Args:
slice_id (string) - if slices are not used, image_id, otherwise slice_id.
"""
im = np.array(self.image_file.get(slice_id), dtype=np.float32) # (ch, w, h)
# print("**************** Loaded image {} of shape {}".format(slice_id, im.shape))
# NOTE(martun): Ignore mean image for this time.
#if self.mean_image is not None:
# im -= self.mean_image
return im
def load_ground_truth_mask(self, slice_id):
mask = np.array(self.mask_file.get(slice_id))
mask = (mask > 0.5).astype(np.uint8)
#logger.info("Loaded a mask for image {} with {} filled pixels".format(
# image_id, str(np.sum(mask))))
input_size = mask.shape[-1]
mask = mask.reshape((1, input_size, input_size))
return mask
| 33.573034 | 90 | 0.615127 | 413 | 2,988 | 4.244552 | 0.297821 | 0.047918 | 0.034227 | 0.015973 | 0.110667 | 0.110667 | 0.083286 | 0.0502 | 0.0502 | 0.0502 | 0 | 0.008353 | 0.278782 | 2,988 | 88 | 91 | 33.954545 | 0.805104 | 0.320281 | 0 | 0.08 | 0 | 0 | 0.016393 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.14 | false | 0 | 0.12 | 0.02 | 0.44 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a766850d40a3c32e54d6a0911c6cf73993521d3a | 2,195 | py | Python | pyhealth/models/text/tool.py | Abhinav43/PyHealth | 5aa9816f76990d221d79340b331c18dfa10adcb3 | [
"BSD-2-Clause"
] | null | null | null | pyhealth/models/text/tool.py | Abhinav43/PyHealth | 5aa9816f76990d221d79340b331c18dfa10adcb3 | [
"BSD-2-Clause"
] | null | null | null | pyhealth/models/text/tool.py | Abhinav43/PyHealth | 5aa9816f76990d221d79340b331c18dfa10adcb3 | [
"BSD-2-Clause"
] | null | null | null | import pytorch_pretrained_bert
from pytorch_pretrained_bert import PYTORCH_PRETRAINED_BERT_CACHE
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from pytorch_pretrained_bert.modeling import BertModel, BertConfig, WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam
from pyhealth.utils.characterbertmain.modeling.character_bert import CharacterBertModel
from pyhealth.utils.characterbertmain.utils.character_cnn import CharacterIndexer
import os
def get_embedding(embed_type):
if embed_type == 'BioBERT':
model_loc = '/content/drive/MyDrive/models_a/pretrained_bert_tf/biobert_pretrain_output_all_notes_150000/'
tokenizer = BertTokenizer.from_pretrained(model_loc, do_lower_case=True)
cache_dir = os.path.join(PYTORCH_PRETRAINED_BERT_CACHE, 'distributed_{}'.format(-1))
model = BertModel.from_pretrained(model_loc, cache_dir=cache_dir)
indexer = None
elif embed_type == 'BERT':
model_loc = '/content/drive/MyDrive/models_a/pretrained_bert_tf/bert_pretrain_output_all_notes_150000/'
tokenizer = BertTokenizer.from_pretrained(model_loc, do_lower_case=True)
cache_dir = os.path.join(PYTORCH_PRETRAINED_BERT_CACHE, 'distributed_{}'.format(-1))
model = BertModel.from_pretrained(model_loc, cache_dir=cache_dir)
indexer = None
elif embed_type == 'CharBERT':
model_loc = '/content/drive/MyDrive/models_a/general_character_bert/'
model = CharacterBertModel.from_pretrained(model_loc)
tokenizer = BertTokenizer.from_pretrained('/content/drive/MyDrive/models_a/pretrained_bert_tf/bert_pretrain_output_all_notes_150000/')
indexer = CharacterIndexer()
elif embed_type == 'BioCharBERT':
model_loc = '/content/drive/MyDrive/models_a/medical_character_bert/'
model = CharacterBertModel.from_pretrained(model_loc)
tokenizer = BertTokenizer.from_pretrained('/content/drive/MyDrive/models_a/pretrained_bert_tf/biobert_pretrain_output_all_notes_150000/')
indexer = CharacterIndexer()
return indexer, tokenizer, model
| 62.714286 | 145 | 0.789066 | 267 | 2,195 | 6.101124 | 0.2397 | 0.120319 | 0.128913 | 0.092081 | 0.692449 | 0.692449 | 0.692449 | 0.622468 | 0.622468 | 0.552486 | 0 | 0.013655 | 0.132574 | 2,195 | 34 | 146 | 64.558824 | 0.841912 | 0 | 0 | 0.363636 | 0 | 0 | 0.241458 | 0.215034 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0 | 0.272727 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a769baf2694bc77a1fe8aa737cffb6c78e89841f | 2,096 | py | Python | scripts/platformio/platformio-build-pre.py | pch-jp/zephyr | c3f6a9bfce6f360ff5dfbc11072ae46de8f4aa4f | [
"Apache-2.0"
] | null | null | null | scripts/platformio/platformio-build-pre.py | pch-jp/zephyr | c3f6a9bfce6f360ff5dfbc11072ae46de8f4aa4f | [
"Apache-2.0"
] | null | null | null | scripts/platformio/platformio-build-pre.py | pch-jp/zephyr | c3f6a9bfce6f360ff5dfbc11072ae46de8f4aa4f | [
"Apache-2.0"
] | null | null | null | # Copyright 2019-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from SCons.Script import AlwaysBuild
Import("env")
def ZephyrBuildProgram(env):
env["LDSCRIPT_PATH"] = None
env.ProcessProgramDeps()
env.ProcessProjectDeps()
# append into the beginning a main LD script
env.Prepend(LINKFLAGS=["-T", "$LDSCRIPT_PATH"])
# enable "cyclic reference" for linker
if env.get("LIBS") and env.GetCompilerType() == "gcc":
env.Prepend(_LIBFLAGS="-Wl,--start-group ")
env.Append(_LIBFLAGS=" -Wl,--end-group")
program_pre = env.Program(
os.path.join("$BUILD_DIR", "firmware-pre"), env["PIOBUILDFILES"],
LDSCRIPT_PATH=os.path.join("$BUILD_DIR", "zephyr", "linker.cmd")
)
# Force execution of offset header target before compiling project sources
env.Depends(env["PIOBUILDFILES"], env["__ZEPHYR_OFFSET_HEADER_CMD"])
program = env.Program(
os.path.join("$BUILD_DIR", env.subst("$PROGNAME")),
env["PIOBUILDFILES"] + env["_EXTRA_ZEPHYR_PIOBUILDFILES"],
LDSCRIPT_PATH=os.path.join("$BUILD_DIR", "zephyr", "linker_pass_final.cmd")
)
env.Replace(PIOMAINPROG=program)
AlwaysBuild(
env.Alias(
"checkprogsize",
program,
env.VerboseAction(env.CheckUploadSize, "Checking size $PIOMAINPROG"),
)
)
print("Building in %s mode" % env.GetBuildType())
return program
env.AddMethod(ZephyrBuildProgram, "BuildProgram")
| 31.757576 | 84 | 0.666031 | 252 | 2,096 | 5.456349 | 0.555556 | 0.043636 | 0.029091 | 0.043636 | 0.120727 | 0.120727 | 0.120727 | 0.08 | 0.08 | 0.08 | 0 | 0.004881 | 0.218034 | 2,096 | 65 | 85 | 32.246154 | 0.834045 | 0.349237 | 0 | 0 | 0 | 0 | 0.264431 | 0.057722 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0.03125 | 0.09375 | 0 | 0.15625 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a76b08e0e48bbc63f3621831ff5162538f68030c | 11,368 | py | Python | goatools/grouper/grprobj_init.py | flying-sheep/goatools | 1e3a74faa17cbdeef02550c7ddf17b65cf47d34a | [
"BSD-2-Clause"
] | 477 | 2015-02-10T06:54:42.000Z | 2022-03-15T12:36:11.000Z | goatools/grouper/grprobj_init.py | flying-sheep/goatools | 1e3a74faa17cbdeef02550c7ddf17b65cf47d34a | [
"BSD-2-Clause"
] | 174 | 2015-02-05T18:11:14.000Z | 2022-03-29T10:24:19.000Z | goatools/grouper/grprobj_init.py | flying-sheep/goatools | 1e3a74faa17cbdeef02550c7ddf17b65cf47d34a | [
"BSD-2-Clause"
] | 202 | 2015-01-21T12:29:23.000Z | 2022-03-01T13:26:05.000Z | """Given user GO ids and parent terms, group user GO ids under one parent term.
Given a group of GO ids with one or more higher-level grouping terms, group
each user GO id under the most descriptive parent GO term.
Each GO id may have more than one parent. One of the parent(s) is chosen
to best represent the user GO id's function. The choice of parent is made by
regarding how close the parent GO id is to the bottom of its hierarchy.
The estimation of how close a GO term is to "the bottom" of its GO hierarchy
is estimated using the number of total Go term descendent counts below
that term.
"""
from __future__ import print_function
import collections as cx
from goatools.nt_utils import get_dict_w_id2nts
from goatools.gosubdag.go_most_specific import get_most_specific_dcnt
from goatools.gosubdag.go_most_specific import get_most_specific_tinfo
from goatools.gosubdag.go_most_specific import get_most_specific_tinfo_dcnt
from goatools.grouper.utils import get_hdridx_flds
__copyright__ = "Copyright (C) 2016-2018, DV Klopfenstein, H Tang, All rights reserved."
__author__ = "DV Klopfenstein"
class GrouperInit:
"""Initialize Grouper object."""
most_specific_fncs = {
'dcnt': get_most_specific_dcnt,
'tinfo': get_most_specific_tinfo,
'tinfo_dcnt': get_most_specific_tinfo_dcnt}
def __init__(self, goids, objpre, fnc_most_specific='dcnt'):
# Data members read
self.grpname = objpre.grpname
self.gosubdag = objpre.gosubdag
self.usrgos = self._init_usrgos(goids)
self.hdrobj = objpre.hdrobj # Contains all possible hdrgos, not just ones used
assert self.gosubdag.rcntobj is not None
# Initialize: hdrgo2usrgos hdrgo_is_usrgo
# * hdrgo2usrgos: User GO IDs, grouped under high GO IDs (grouped, but not sorted)
self.hdrgo2usrgos = None
self.hdrgo_is_usrgo = None # Will contain both main GO IDs and user-specified alt GO IDs
self._init_h2us(fnc_most_specific)
def _init_usrgos(self, goids):
"""Return user GO IDs which have GO Terms."""
usrgos = set()
goids_missing = set()
_go2obj = self.gosubdag.go2obj
for goid in goids:
if goid in _go2obj:
usrgos.add(goid)
else:
goids_missing.add(goid)
if goids_missing:
print("MISSING GO IDs: {GOs}".format(GOs=goids_missing))
print("{N} of {M} GO IDs ARE MISSING".format(N=len(goids_missing), M=len(goids)))
return usrgos
def get_gos_all(self):
"""Return a flat list of all GO IDs in grouping object.
All GO IDs:
* header GO IDs that are not user GO IDs
* user GO IDs that are under header GOs
* user GO IDs that are header GOs in groups containing no other user GO IDs
"""
gos_all = set()
# Get:
# * Header GO IDs that are not user GO IDs
# * User GO IDs that are under header GOs
for hdrgo, usrgos in self.hdrgo2usrgos.items():
gos_all.add(hdrgo)
gos_all |= usrgos
# User GO IDs that are header GOs in groups containing no other user GO IDs
gos_all |= self.hdrgo_is_usrgo
assert gos_all == self.usrgos.union(set(self.hdrgo2usrgos.keys()))
assert not self.usrgos.difference(gos_all), \
"GROUPER ERROR: {GOs}".format(GOs=self.usrgos.difference(gos_all))
return gos_all
def _init_h2us(self, fnc_most_specific):
"""Given a set of user GO ids, return GO ids grouped under the "GO high" terms.
Example of a grouped go list:
gos = ['GO:0044464':[ # grp_term: D1 cell part
'GO:0005737', # child: D3 cytoplasm
'GO:0048471', # child: D4 perinuclear region of cytoplasm
'GO:0016020':[ # grp_term: D1 membrane
'GO:0098589', # child: D2 membrane region
'GO:0005886', # child: D2 plasma membrane
]
"""
# Header GO IDs are main. User GO IDs are as specified by the user
hdrgo2usrgos = cx.defaultdict(set)
# Contains user GO IDs which are also header GO IDs, plus user main GO if needed
hdrgo_is_usrgo = set()
_go2nt = self.gosubdag.go2nt
objhi = GrouperInit.GetGoidHigh(self.gosubdag, self.hdrobj.hdrgos,
self.most_specific_fncs[fnc_most_specific])
for goid_usr in self.usrgos:
goid_main = _go2nt[goid_usr].id
# Add current GO ID to parents_all in case curr GO ID is a high GO.
goid_high = objhi.get_goid_high(goid_main)
# Don't add user GO ID if it is also the GO header
if goid_main != goid_high:
hdrgo2usrgos[goid_high].add(goid_usr)
elif goid_high not in hdrgo2usrgos:
hdrgo2usrgos[goid_high] = set()
if goid_main == goid_high:
hdrgo_is_usrgo.add(goid_main)
if goid_main != goid_usr:
hdrgo_is_usrgo.add(goid_usr)
# Initialize data members
self.hdrgo2usrgos = hdrgo2usrgos
self.hdrgo_is_usrgo = hdrgo_is_usrgo
# pylint: disable=too-few-public-methods
class GetGoidHigh:
"""Given a user GO ID, return the 'closest' header GO."""
def __init__(self, gosubdag, gos_high, get_most_specific):
self.go2parents = gosubdag.rcntobj.go2ancestors
self.go2nt = gosubdag.go2nt
self.gos_high = gos_high
self.get_most_specific = get_most_specific
def get_goid_high(self, goid_main):
"""Return the 'closest' GO header to the GO ID arg."""
parents_all = {goid_main}
if goid_main in self.go2parents:
parents_all.update(self.go2parents[goid_main])
parents_high = parents_all.intersection(self.gos_high)
assert parents_high, "NO PARENTS {P} {H} {NT}".format(
P=len(parents_all), H=len(self.gos_high), NT=goid_main)
return self.get_most_specific(parents_high, self.go2nt)
# --- Initialize go2nt. Namedtuple fields may be used in sortby lambda functions
def get_go2nt(self, usr_go2nt):
"""Combine user namedtuple fields, GO object fields, and format_txt."""
gos_all = self.get_gos_all()
# Minimum set of namedtuple fields available for use with Sorter on grouped GO IDs
prt_flds_all = get_hdridx_flds() + self.gosubdag.prt_attr['flds']
if not usr_go2nt:
return self.__init_go2nt_dflt(gos_all, prt_flds_all)
usr_nt_flds = next(iter(usr_go2nt.values()))._fields
# If user namedtuple already contains all fields available, then return usr_go2nt
if not set(prt_flds_all).difference(usr_nt_flds):
return self._init_go2nt_aug(usr_go2nt)
# Otherwise, combine user fields and default Sorter fields
return self.__init_go2nt_w_usr(gos_all, usr_go2nt, prt_flds_all)
def __init_go2nt_dflt(self, gos_all, prt_flds_all):
"""Combine GO object fields and format_txt."""
go2nts = [self.gosubdag.go2nt, self._get_go2nthdridx(gos_all)]
go2nt = get_dict_w_id2nts(gos_all, go2nts, prt_flds_all)
return self._init_go2nt_aug(go2nt)
def __init_go2nt_w_usr(self, gos_all, usr_go2nt, prt_flds_all):
"""Combine GO object fields and format_txt."""
assert usr_go2nt, "go2nt HAS NO ELEMENTS"
from goatools.nt_utils import get_unique_fields
go2nts = [usr_go2nt, self.gosubdag.go2nt, self._get_go2nthdridx(gos_all)]
usr_nt_flds = next(iter(usr_go2nt.values()))._fields # Get any single value from a dict
flds = get_unique_fields([usr_nt_flds, prt_flds_all])
go2nt = get_dict_w_id2nts(gos_all, go2nts, flds)
return self._init_go2nt_aug(go2nt)
def _init_go2nt_aug(self, go2nt):
"""Augment go2nt with GO ID key to account for alt GO IDs."""
go2obj = self.gosubdag.go2obj
# Get alt GO IDs
go2nt_aug = {}
# NOW
for goid_usr, nt_usr in go2nt.items():
goobj = go2obj[goid_usr]
if goobj.alt_ids:
alts = set(goobj.alt_ids)
alts.add(goobj.id)
for goid_alt in alts:
if goid_alt not in go2nt:
go2nt_aug[goid_alt] = nt_usr
# WAS
# Add alt GO IDs to go2nt
for goid, gont in go2nt_aug.items():
go2nt[goid] = gont
return go2nt
def _get_go2nthdridx(self, gos_all):
"""Get GO IDs header index for each user GO ID and corresponding parent GO IDs."""
go2nthdridx = {}
# NtHdrIdx Namedtuple fields:
# * format_txt: Used to determine the format when writing Excel cells
# * hdr_idx: Value printed in an Excel cell
# shortcuts
obj = GrouperInit.NtMaker(self)
# Create go2nthdridx
for goid in gos_all:
go2nthdridx[goid] = obj.get_nt(goid)
return go2nthdridx
class NtMaker:
"""Make namedtuples for GO IDs in grouper."""
ntobj = cx.namedtuple("NtHdrIdx", " ".join(get_hdridx_flds()))
def __init__(self, obj):
self.grpname = obj.grpname
self.usrgos = obj.usrgos
self.hdrgos = obj.hdrobj.hdrgos
## assert "GO:0008150" in self.hdrgos
self.go2obj = obj.gosubdag.go2obj
self.hdrgo2usrgos = obj.hdrgo2usrgos
self.hdrgo_is_usrgo = obj.hdrgo_is_usrgo
def get_nt(self, goid_user):
"""Get Grouper namedtuple for user GO ID."""
goid_main = self.go2obj[goid_user].id
goid_in_hdrgos = goid_main in self.hdrgo2usrgos
goid_in_usrgos = goid_user in self.hdrgo_is_usrgo
# format_txt = int(goid_in_hdrgos or goobj.id in self.hdrgos)
format_txt = int(goid_in_hdrgos)
# namedtuple grouping fields
hdr1usr01 = self._get_hdr1usr01(goid_in_hdrgos, goid_in_usrgos)
return self.ntobj(
format_txt=format_txt,
hdr_idx=format_txt,
is_hdrgo=goid_in_hdrgos,
is_usrgo=goid_in_usrgos,
num_usrgos=self._get_num_usrgos(goid_user, goid_in_hdrgos, goid_in_usrgos),
hdr1usr01=hdr1usr01)
def _get_num_usrgos(self, goid_main, goid_in_hdrgos, goid_in_usrgos):
"""Get the number of user GO IDs under a header GO ID."""
if not goid_in_hdrgos:
return "."
num_goids = len(self.hdrgo2usrgos[goid_main]) + int(goid_in_usrgos)
assert num_goids != 0, "{NAME} MAIN({GO}) num_goids({N})\n{HDRUSR}".format(
NAME=self.grpname, GO=goid_main, N=num_goids, HDRUSR=" ".join(sorted(self.hdrgos)))
return num_goids
@staticmethod
def _get_hdr1usr01(goid_in_hdrgos, goid_in_usrgos):
"""Get string indicating if GO is also a header GO."""
if goid_in_hdrgos:
return "**" if goid_in_usrgos else "*"
return ""
# Copyright (C) 2016-2018, DV Klopfenstein, H Tang, All rights reserved.
| 44.233463 | 99 | 0.629662 | 1,553 | 11,368 | 4.378622 | 0.176433 | 0.026471 | 0.021176 | 0.010588 | 0.226765 | 0.188382 | 0.156029 | 0.146029 | 0.124265 | 0.102794 | 0 | 0.021631 | 0.2924 | 11,368 | 256 | 100 | 44.40625 | 0.82372 | 0.300493 | 0 | 0.025974 | 0 | 0 | 0.036685 | 0.003122 | 0 | 0 | 0 | 0 | 0.038961 | 1 | 0.097403 | false | 0 | 0.051948 | 0 | 0.272727 | 0.019481 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a76be5f9bc3a776e69b25e97aaf2ea12e112a586 | 404 | py | Python | tests/test_meta_model.py | dickronez/autokeras | b31f2cafe77bf3a2f738289a89438fb72936117c | [
"MIT"
] | 1 | 2019-09-06T07:47:40.000Z | 2019-09-06T07:47:40.000Z | tests/test_meta_model.py | dickronez/autokeras | b31f2cafe77bf3a2f738289a89438fb72936117c | [
"MIT"
] | null | null | null | tests/test_meta_model.py | dickronez/autokeras | b31f2cafe77bf3a2f738289a89438fb72936117c | [
"MIT"
] | null | null | null | import tensorflow as tf
from autokeras import meta_model
def test_text_assembler():
texts = ['The cat sat on the mat.',
'The dog sat on the log.',
'Dogs and cats living together aa.']
assembler = meta_model.TextAssembler()
dataset = tf.data.Dataset.from_tensor_slices(texts)
for x in dataset:
assembler.update(x)
assert assembler.sw_ratio() == 0.5
| 28.857143 | 55 | 0.658416 | 57 | 404 | 4.54386 | 0.701754 | 0.069498 | 0.061776 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006601 | 0.25 | 404 | 13 | 56 | 31.076923 | 0.848185 | 0 | 0 | 0 | 0 | 0 | 0.195545 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 1 | 0.090909 | false | 0 | 0.181818 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a76c7c78fbfedf21d213710d5bbb4b1b3420f0fb | 15,034 | py | Python | examples/PLSR/PLSR_on_NIR_and_octane_data.py | Mohamed0gad/hoggorm | 4debdb49a8d1d8858abb783be2ad67ffc96fd3ab | [
"BSD-2-Clause"
] | null | null | null | examples/PLSR/PLSR_on_NIR_and_octane_data.py | Mohamed0gad/hoggorm | 4debdb49a8d1d8858abb783be2ad67ffc96fd3ab | [
"BSD-2-Clause"
] | null | null | null | examples/PLSR/PLSR_on_NIR_and_octane_data.py | Mohamed0gad/hoggorm | 4debdb49a8d1d8858abb783be2ad67ffc96fd3ab | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # Partial Least Squares Regression (PLSR) on Near Infrared Spectroscopy (NIR) data and octane data
# This notebook illustrates how to use the **hoggorm** package to carry out partial least squares regression (PLSR) on multivariate data. Furthermore, we will learn how to visualise the results of the PLSR using the **hoggormPlot** package.
# ---
# ### Import packages and prepare data
# First import **hoggorm** for analysis of the data and **hoggormPlot** for plotting of the analysis results. We'll also import **pandas** such that we can read the data into a data frame. **numpy** is needed for checking dimensions of the data.
# In[1]:
import hoggorm as ho
import hoggormplot as hop
import pandas as pd
import numpy as np
# Next, load the data that we are going to analyse using **hoggorm**. After the data has been loaded into the pandas data frame, we'll display it in the notebook.
# In[3]:
# Load fluorescence data
X_df = pd.read_csv('gasoline_NIR.txt', header=None, sep='\s+')
X_df
# In[6]:
# Load response data, that is octane measurements
y_df = pd.read_csv('gasoline_octane.txt', header=None, sep='\s+')
y_df
# The ``nipalsPLS2`` class in hoggorm accepts only **numpy** arrays with numerical values and not pandas data frames. Therefore, the pandas data frames holding the imported data need to be "taken apart" into three parts:
# * two numpy array holding the numeric values
# * two Python list holding variable (column) names
# * two Python list holding object (row) names.
#
# The numpy arrays with values will be used as input for the ``nipalsPLS2`` class for analysis. The Python lists holding the variable and row names will be used later in the plotting function from the **hoggormPlot** package when visualising the results of the analysis. Below is the code needed to access both data, variable names and object names.
# In[7]:
# Get the values from the data frame
X = X_df.values
y = y_df.values
# Get the variable or columns names
X_varNames = list(X_df.columns)
y_varNames = list(y_df.columns)
# Get the object or row names
X_objNames = list(X_df.index)
y_objNames = list(y_df.index)
# ---
# ### Apply PLSR to our data
# Now, let's run PLSR on the data using the ``nipalsPLS1`` class, since we have a univariate response. The documentation provides a [description of the input parameters](https://hoggorm.readthedocs.io/en/latest/plsr.html). Using input paramter ``arrX`` and ``vecy`` we define which numpy array we would like to analyse. ``vecy`` is what typically is considered to be the response vector, while the measurements are typically defined as ``arrX``. By setting input parameter ``Xstand=False`` we make sure that the variables are only mean centered, not scaled to unit variance, if this is what you want. This is the default setting and actually doesn't need to expressed explicitly. Setting paramter ``cvType=["loo"]`` we make sure that we compute the PLS2 model using full cross validation. ``"loo"`` means "Leave One Out". By setting paramter ``numpComp=10`` we ask for four components to be computed.
# In[9]:
model = ho.nipalsPLS1(arrX=X, Xstand=False,
vecy=Y,
cvType=["loo"],
numComp=10)
# That's it, the PLS2 model has been computed. Now we would like to inspect the results by visualising them. We can do this using plotting functions of the separate [**hoggormPlot** package](https://hoggormplot.readthedocs.io/en/latest/). If we wish to plot the results for component 1 and component 2, we can do this by setting the input argument ``comp=[1, 2]``. The input argument ``plots=[1, 6]`` lets the user define which plots are to be plotted. If this list for example contains value ``1``, the function will generate the scores plot for the model. If the list contains value ``6`` the explained variance plot for y will be plotted. The hoggormPlot documentation provides a [description of input paramters](https://hoggormplot.readthedocs.io/en/latest/mainPlot.html).
# In[16]:
hop.plot(model, comp=[1, 2],
plots=[1, 6],
objNames=X_objNames,
XvarNames=X_varNames,
YvarNames=Y_varNames)
# Plots can also be called separately.
# In[11]:
# Plot cumulative explained variance (both calibrated and validated) using a specific function for that.
hop.explainedVariance(model)
# In[13]:
# Plot cumulative validated explained variance in X.
hop.explainedVariance(model, which='X')
# In[14]:
hop.scores(model)
# In[17]:
# Plot X loadings in line plot
hop.loadings(model, weights=True, line=True)
# In[18]:
# Plot regression coefficients
hop.coefficients(model, comp=3)
# ---
# ### Accessing numerical results
# Now that we have visualised the PLSR results, we may also want to access the numerical results. Below are some examples. For a complete list of accessible results, please see this part of the documentation.
# In[61]:
# Get X scores and store in numpy array
X_scores = model.X_scores()
# Get scores and store in pandas dataframe with row and column names
X_scores_df = pd.DataFrame(model.X_scores())
X_scores_df.index = X_objNames
X_scores_df.columns = ['Comp {0}'.format(x+1) for x in range(model.X_scores().shape[1])]
X_scores_df
# In[20]:
help(ho.nipalsPLS1.X_scores)
# In[21]:
# Dimension of the X_scores
np.shape(model.X_scores())
# We see that the numpy array holds the scores for all countries and OECD (35 in total) for four components as required when computing the PCA model.
# In[62]:
# Get X loadings and store in numpy array
X_loadings = model.X_loadings()
# Get X loadings and store in pandas dataframe with row and column names
X_loadings_df = pd.DataFrame(model.X_loadings())
X_loadings_df.index = X_varNames
X_loadings_df.columns = ['Comp {0}'.format(x+1) for x in range(model.X_loadings().shape[1])]
X_loadings_df
# In[23]:
help(ho.nipalsPLS1.X_loadings)
# In[24]:
np.shape(model.X_loadings())
# Here we see that the array holds the loadings for the 10 variables in the data across four components.
# In[63]:
# Get Y loadings and store in numpy array
Y_loadings = model.Y_loadings()
# Get Y loadings and store in pandas dataframe with row and column names
Y_loadings_df = pd.DataFrame(model.Y_loadings())
Y_loadings_df.index = Y_varNames
Y_loadings_df.columns = ['Comp {0}'.format(x+1) for x in range(model.Y_loadings().shape[1])]
Y_loadings_df
# In[64]:
# Get X correlation loadings and store in numpy array
X_corrloadings = model.X_corrLoadings()
# Get X correlation loadings and store in pandas dataframe with row and column names
X_corrloadings_df = pd.DataFrame(model.X_corrLoadings())
X_corrloadings_df.index = X_varNames
X_corrloadings_df.columns = ['Comp {0}'.format(x+1) for x in range(model.X_corrLoadings().shape[1])]
X_corrloadings_df
# In[27]:
help(ho.nipalsPLS1.X_corrLoadings)
# In[65]:
# Get Y loadings and store in numpy array
Y_corrloadings = model.X_corrLoadings()
# Get Y loadings and store in pandas dataframe with row and column names
Y_corrloadings_df = pd.DataFrame(model.Y_corrLoadings())
Y_corrloadings_df.index = Y_varNames
Y_corrloadings_df.columns = ['Comp {0}'.format(x+1) for x in range(model.Y_corrLoadings().shape[1])]
Y_corrloadings_df
# In[29]:
help(ho.nipalsPLS1.Y_corrLoadings)
# In[66]:
# Get calibrated explained variance of each component in X
X_calExplVar = model.X_calExplVar()
# Get calibrated explained variance in X and store in pandas dataframe with row and column names
X_calExplVar_df = pd.DataFrame(model.X_calExplVar())
X_calExplVar_df.columns = ['calibrated explained variance in X']
X_calExplVar_df.index = ['Comp {0}'.format(x+1) for x in range(model.X_loadings().shape[1])]
X_calExplVar_df
# In[31]:
help(ho.nipalsPLS1.X_calExplVar)
# In[67]:
# Get calibrated explained variance of each component in Y
Y_calExplVar = model.Y_calExplVar()
# Get calibrated explained variance in Y and store in pandas dataframe with row and column names
Y_calExplVar_df = pd.DataFrame(model.Y_calExplVar())
Y_calExplVar_df.columns = ['calibrated explained variance in Y']
Y_calExplVar_df.index = ['Comp {0}'.format(x+1) for x in range(model.Y_loadings().shape[1])]
Y_calExplVar_df
# In[33]:
help(ho.nipalsPLS1.Y_calExplVar)
# In[68]:
# Get cumulative calibrated explained variance in X
X_cumCalExplVar = model.X_cumCalExplVar()
# Get cumulative calibrated explained variance in X and store in pandas dataframe with row and column names
X_cumCalExplVar_df = pd.DataFrame(model.X_cumCalExplVar())
X_cumCalExplVar_df.columns = ['cumulative calibrated explained variance in X']
X_cumCalExplVar_df.index = ['Comp {0}'.format(x) for x in range(model.X_loadings().shape[1] + 1)]
X_cumCalExplVar_df
# In[35]:
help(ho.nipalsPLS1.X_cumCalExplVar)
# In[69]:
# Get cumulative calibrated explained variance in Y
Y_cumCalExplVar = model.Y_cumCalExplVar()
# Get cumulative calibrated explained variance in Y and store in pandas dataframe with row and column names
Y_cumCalExplVar_df = pd.DataFrame(model.Y_cumCalExplVar())
Y_cumCalExplVar_df.columns = ['cumulative calibrated explained variance in Y']
Y_cumCalExplVar_df.index = ['Comp {0}'.format(x) for x in range(model.Y_loadings().shape[1] + 1)]
Y_cumCalExplVar_df
# In[37]:
help(ho.nipalsPLS1.Y_cumCalExplVar)
# In[70]:
# Get cumulative calibrated explained variance for each variable in X
X_cumCalExplVar_ind = model.X_cumCalExplVar_indVar()
# Get cumulative calibrated explained variance for each variable in X and store in pandas dataframe with row and column names
X_cumCalExplVar_ind_df = pd.DataFrame(model.X_cumCalExplVar_indVar())
X_cumCalExplVar_ind_df.columns = X_varNames
X_cumCalExplVar_ind_df.index = ['Comp {0}'.format(x) for x in range(model.X_loadings().shape[1] + 1)]
X_cumCalExplVar_ind_df
# In[39]:
help(ho.nipalsPLS1.X_cumCalExplVar_indVar)
# In[41]:
# Get calibrated predicted Y for a given number of components
# Predicted Y from calibration using 1 component
Y_from_1_component = model.Y_predCal()[1]
# Predicted Y from calibration using 1 component stored in pandas data frame with row and columns names
Y_from_1_component_df = pd.DataFrame(model.Y_predCal()[1])
Y_from_1_component_df.index = Y_objNames
Y_from_1_component_df.columns = Y_varNames
Y_from_1_component_df
# In[42]:
# Get calibrated predicted Y for a given number of components
# Predicted Y from calibration using 4 component
Y_from_4_component = model.Y_predCal()[4]
# Predicted Y from calibration using 1 component stored in pandas data frame with row and columns names
Y_from_4_component_df = pd.DataFrame(model.Y_predCal()[4])
Y_from_4_component_df.index = Y_objNames
Y_from_4_component_df.columns = Y_varNames
Y_from_4_component_df
# In[43]:
help(ho.nipalsPLS1.X_predCal)
# In[71]:
# Get validated explained variance of each component X
X_valExplVar = model.X_valExplVar()
# Get calibrated explained variance in X and store in pandas dataframe with row and column names
X_valExplVar_df = pd.DataFrame(model.X_valExplVar())
X_valExplVar_df.columns = ['validated explained variance in X']
X_valExplVar_df.index = ['Comp {0}'.format(x+1) for x in range(model.X_loadings().shape[1])]
X_valExplVar_df
# In[45]:
help(ho.nipalsPLS1.X_valExplVar)
# In[72]:
# Get validated explained variance of each component Y
Y_valExplVar = model.Y_valExplVar()
# Get calibrated explained variance in X and store in pandas dataframe with row and column names
Y_valExplVar_df = pd.DataFrame(model.Y_valExplVar())
Y_valExplVar_df.columns = ['validated explained variance in Y']
Y_valExplVar_df.index = ['Comp {0}'.format(x+1) for x in range(model.Y_loadings().shape[1])]
Y_valExplVar_df
# In[47]:
help(ho.nipalsPLS1.Y_valExplVar)
# In[73]:
# Get cumulative validated explained variance in X
X_cumValExplVar = model.X_cumValExplVar()
# Get cumulative validated explained variance in X and store in pandas dataframe with row and column names
X_cumValExplVar_df = pd.DataFrame(model.X_cumValExplVar())
X_cumValExplVar_df.columns = ['cumulative validated explained variance in X']
X_cumValExplVar_df.index = ['Comp {0}'.format(x) for x in range(model.X_loadings().shape[1] + 1)]
X_cumValExplVar_df
# In[49]:
help(ho.nipalsPLS1.X_cumValExplVar)
# In[74]:
# Get cumulative validated explained variance in Y
Y_cumValExplVar = model.Y_cumValExplVar()
# Get cumulative validated explained variance in Y and store in pandas dataframe with row and column names
Y_cumValExplVar_df = pd.DataFrame(model.Y_cumValExplVar())
Y_cumValExplVar_df.columns = ['cumulative validated explained variance in Y']
Y_cumValExplVar_df.index = ['Comp {0}'.format(x) for x in range(model.Y_loadings().shape[1] + 1)]
Y_cumValExplVar_df
# In[51]:
help(ho.nipalsPLS1.Y_cumValExplVar)
# In[53]:
help(ho.nipalsPLS1.X_cumValExplVar_indVar)
# In[54]:
# Get validated predicted Y for a given number of components
# Predicted Y from validation using 1 component
Y_from_1_component_val = model.Y_predVal()[1]
# Predicted Y from calibration using 1 component stored in pandas data frame with row and columns names
Y_from_1_component_val_df = pd.DataFrame(model.Y_predVal()[1])
Y_from_1_component_val_df.index = Y_objNames
Y_from_1_component_val_df.columns = Y_varNames
Y_from_1_component_val_df
# In[55]:
# Get validated predicted Y for a given number of components
# Predicted Y from validation using 3 components
Y_from_3_component_val = model.Y_predVal()[3]
# Predicted Y from calibration using 3 components stored in pandas data frame with row and columns names
Y_from_3_component_val_df = pd.DataFrame(model.Y_predVal()[3])
Y_from_3_component_val_df.index = Y_objNames
Y_from_3_component_val_df.columns = Y_varNames
Y_from_3_component_val_df
# In[56]:
help(ho.nipalsPLS1.Y_predVal)
# In[58]:
# Get predicted scores for new measurements (objects) of X
# First pretend that we acquired new X data by using part of the existing data and overlaying some noise
import numpy.random as npr
new_X = X[0:4, :] + npr.rand(4, np.shape(X)[1])
np.shape(X)
# Now insert the new data into the existing model and compute scores for two components (numComp=2)
pred_X_scores = model.X_scores_predict(new_X, numComp=2)
# Same as above, but results stored in a pandas dataframe with row names and column names
pred_X_scores_df = pd.DataFrame(model.X_scores_predict(new_X, numComp=2))
pred_X_scores_df.columns = ['Comp {0}'.format(x+1) for x in range(2)]
pred_X_scores_df.index = ['new object {0}'.format(x+1) for x in range(np.shape(new_X)[0])]
pred_X_scores_df
# In[59]:
help(ho.nipalsPLS1.X_scores_predict)
# In[60]:
# Predict Y from new X data
pred_Y = model.Y_predict(new_X, numComp=2)
# Predict Y from nex X data and store results in a pandas dataframe with row names and column names
pred_Y_df = pd.DataFrame(model.Y_predict(new_X, numComp=2))
pred_Y_df.columns = Y_varNames
pred_Y_df.index = ['new object {0}'.format(x+1) for x in range(np.shape(new_X)[0])]
pred_Y_df
# In[ ]:
| 28.259398 | 900 | 0.756485 | 2,458 | 15,034 | 4.478845 | 0.146867 | 0.013625 | 0.036243 | 0.032701 | 0.533563 | 0.448815 | 0.417113 | 0.350622 | 0.26442 | 0.258425 | 0 | 0.018455 | 0.149395 | 15,034 | 531 | 901 | 28.312618 | 0.84243 | 0.522416 | 0 | 0 | 0 | 0 | 0.071784 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.033333 | 0 | 0.033333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a76d0f7f608e5cafee2b66e7a657ad5cdd2cdf2f | 851 | py | Python | Stacks/balanced_symbol.py | iamdsc/Abstract-Data-Types-in-Python | e736b49118a1d78ab3d58ed2fec7a92c7ee28807 | [
"MIT"
] | null | null | null | Stacks/balanced_symbol.py | iamdsc/Abstract-Data-Types-in-Python | e736b49118a1d78ab3d58ed2fec7a92c7ee28807 | [
"MIT"
] | null | null | null | Stacks/balanced_symbol.py | iamdsc/Abstract-Data-Types-in-Python | e736b49118a1d78ab3d58ed2fec7a92c7ee28807 | [
"MIT"
] | null | null | null | from stack import Stack
# Complete balance checker for symbols : '[ { ( ) } ]'
def sym_checker (symbol_string):
s = Stack()
balanced = True
index = 0
while index < len(symbol_string) and balanced:
symbol = symbol_string[index]
if symbol in '([{':
s.push(symbol)
else:
if s.is_empty():
balanced = False
else:
top=s.pop()
if not matches(top, symbol):
balanced = False
index=index+1
if balanced and s.is_empty():
return True
else:
return False
def matches(op, close):
opens = '([{'
closes = ')]}'
return opens.index(op) == closes.index(close)
print(sym_checker('{{([][])}()}'))
print(sym_checker('[{()]'))
| 21.820513 | 55 | 0.481786 | 88 | 851 | 4.568182 | 0.431818 | 0.074627 | 0.039801 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003846 | 0.388954 | 851 | 38 | 56 | 22.394737 | 0.769231 | 0.061105 | 0 | 0.185185 | 0 | 0 | 0.034256 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.037037 | 0 | 0.222222 | 0.074074 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a76e80b962d665dcaf72da15754e8792e99709c4 | 325 | py | Python | erri/python/lesson_48/one_square.py | TGITS/programming-workouts | 799e805ccf3fd0936ec8ac2417f7193b8e9bcb55 | [
"MIT"
] | null | null | null | erri/python/lesson_48/one_square.py | TGITS/programming-workouts | 799e805ccf3fd0936ec8ac2417f7193b8e9bcb55 | [
"MIT"
] | 16 | 2020-05-30T12:38:13.000Z | 2022-02-19T09:23:31.000Z | erri/python/lesson_48/one_square.py | TGITS/programming-workouts | 799e805ccf3fd0936ec8ac2417f7193b8e9bcb55 | [
"MIT"
] | null | null | null | import turtle
# initialisation
turtle.mode("standard")
turtle.home()
turtle.showturtle()
turtle.speed(1)
turtle.pencolor("red")
turtle.pensize(2)
turtle.pendown()
# dessin du carré
side = 100
angle = 90
for i in range(4):
turtle.forward(side)
turtle.right(angle)
# finalisation
turtle.hideturtle()
turtle.done()
| 13.541667 | 24 | 0.726154 | 44 | 325 | 5.363636 | 0.704545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02847 | 0.135385 | 325 | 23 | 25 | 14.130435 | 0.811388 | 0.132308 | 0 | 0 | 0 | 0 | 0.039568 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7706d520415611b189c31e1fe0151c5f9431a62 | 1,462 | py | Python | diff.py | namtium-oxide/launcher-diff | 70cd5fcc573d725f8dfedc5b53464402a74b7c98 | [
"MIT"
] | null | null | null | diff.py | namtium-oxide/launcher-diff | 70cd5fcc573d725f8dfedc5b53464402a74b7c98 | [
"MIT"
] | null | null | null | diff.py | namtium-oxide/launcher-diff | 70cd5fcc573d725f8dfedc5b53464402a74b7c98 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import json
import os
from glob import glob
key_versions = {}
def walk(keys, node):
version = int(node["minimumLauncherVersion"])
_walk(keys, node, "", version)
def _walk(keys, node, cur, version):
if isinstance(node, dict):
for k, v in node.items():
next = "{}/{}".format(cur, k)
_walk(keys, v, next, version)
elif isinstance(node, list):
next = cur + "/*"
for v in node:
_walk(keys, v, next, version)
else:
cur = "{}({})".format(cur, type(node).__name__)
if cur not in keys:
keys[cur] = set()
keys[cur].add(version)
files = glob("json/*.json")
for filename in files:
with open(filename, 'r') as fd:
launch_info = json.load(fd)
walk(key_versions, launch_info)
print("SUMMARY:")
for k in sorted(key_versions.keys()):
print(k)
print("\nDIFFERENCES:")
version_keys = {}
for k, v in key_versions.items():
for version in v:
if version not in version_keys:
version_keys[version] = set()
version_keys[version].add(k)
prev_keys = set()
for version in sorted(version_keys.keys()):
print("launcher version:", version)
cur_keys = version_keys[version]
added = cur_keys - prev_keys
missing = prev_keys - cur_keys
for k in sorted(added):
print("+", k)
for k in sorted(missing):
print("-", k)
print()
prev_keys = cur_keys
| 22.84375 | 55 | 0.591655 | 197 | 1,462 | 4.248731 | 0.274112 | 0.078853 | 0.086022 | 0.043011 | 0.04779 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000934 | 0.267442 | 1,462 | 63 | 56 | 23.206349 | 0.780579 | 0.011628 | 0 | 0.041667 | 0 | 0 | 0.060942 | 0.015235 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.0625 | 0 | 0.104167 | 0.145833 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7713c895cacb5109845bbff3a86aa5e750f2fa8 | 5,137 | py | Python | python/hardware/NeckUpDown.py | Springwald/RoobertV1 | e12f9df9c526797340520eccfaa54da37010457b | [
"MIT"
] | null | null | null | python/hardware/NeckUpDown.py | Springwald/RoobertV1 | e12f9df9c526797340520eccfaa54da37010457b | [
"MIT"
] | null | null | null | python/hardware/NeckUpDown.py | Springwald/RoobertV1 | e12f9df9c526797340520eccfaa54da37010457b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Roobert - home robot project
# ________ ______ _____
# ___ __ \______________ /_______________ /_
# __ /_/ / __ \ __ \_ __ \ _ \_ ___/ __/
# _ _, _// /_/ / /_/ / /_/ / __/ / / /_
# /_/ |_| \____/\____//_.___/\___//_/ \__/
#
# Project website: http://roobert.springwald.de
#
# ########################################
# # neck left/right motor control module #
# ########################################
#
# Licensed under MIT License (MIT)
#
# Copyright (c) 2016 Daniel Springwald | daniel@springwald.de
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import time
from GroveI2CMotorDriver import GroveI2CMotorDriver
from I2cIoExpanderPcf8574 import I2cIoExpanderPcf8574
from StepperMotorControl import StepperMotorControl
class NeckUpDown(StepperMotorControl):
_motorName = "neck up/down"
_i2cIoExpanderPcf8574Motor = None # the I2cIoExpanderPcf8574 to control the 2 motors
_i2cIoExpanderPcf8574EndStop = None # the I2cIoExpanderPcf8574 the endstop is connected to
_endStopBit = 1 # the bit of the I2cIoExpanderPcf8574 to read the motor endstop
MaxSteps = 300 # how many motor steps can the motor maximum move
_isClosedCircle = False # is 0 to maxSteps a full round to the same endstop
_fastestSpeedDelay = 0.003 # how fast can the stepper motor go
_slowestSpeedDelay = _fastestSpeedDelay * 4
_actualSpeedDelay = _slowestSpeedDelay
_rampSpeedup = 1.01 # how fast is the speed of for motor ramping
_rampSafeArea = 40 # prevent to come nearer than this to the endstop
_stepData = [0b10000001, 0b01000010, 0b00100100, 0b00011000] # the stepper motor step bits (4 bits for each motor)
_stepDataOff = 0
_released = False
def __init__(self, i2cIoExpanderPcf8574Motor=None, i2cIoExpanderPcf8574EndStop=None):
super().__init__()
self._i2cIoExpanderPcf8574Motor=i2cIoExpanderPcf8574Motor
self._i2cIoExpanderPcf8574EndStop = i2cIoExpanderPcf8574EndStop
super().start()
def _endStop(self):
#print (self._i2cIoExpanderPcf8574EndStop.getBit(self._endStopBit))
return self._i2cIoExpanderPcf8574EndStop.getBit(self._endStopBit)
def _updateMotorSteps(self):
if (super()._releasedMotor == True):
return
lastStepDataPos = self.lastStepDataPos
actualStepDataPos = self.actualStepDataPos
#print("actualStepDataPos = " + str(actualStepDataPos) + " of " + str(len(self._stepData)))
if (lastStepDataPos != actualStepDataPos): # stepper has to move
#print("actualStepDataPos " + self._motorName + ":" + str(actualStepDataPos))
if (actualStepDataPos > len(self._stepData)-1):
actualStepDataPos = len(self._stepData)-1
print("actualStepDataPos >= "+ str(len(self._stepData)))
else:
if (actualStepDataPos < 0):
actualStepDataPos = 0
print("actualStepDataPos < 0")
self._i2cIoExpanderPcf8574Motor.setByte(self._stepData[actualStepDataPos])
self.lastStepDataPos = actualStepDataPos
self.lastStepDataPosChange = time.time()
def Release(self):
if (self._released == False):
super().ReleaseStepperMotor()
self._released = True
self._i2cIoExpanderPcf8574Motor.setByte(self._stepDataOff)
def __del__(self):
self.Release()
if __name__ == "__main__":
endStop = I2cIoExpanderPcf8574(0x38, useAsInputs=True)
motor = I2cIoExpanderPcf8574(0x3e, useAsInputs=False)
controller = NeckUpDown(motor, endStop)
for i in range(1, 2):
controller.targetPos = 0
while controller.targetReached == False:
#print("wait for target "+ str(controller._targetPos))
#controller.ManualUpdate()
time.sleep(0.1)
controller.targetPos = controller.MaxSteps
while controller.targetReached == False:
#print("wait for target "+ str(controller._targetPos))
#controller.ManualUpdate()
time.sleep(0.1)
controller.Release()
| 39.515385 | 121 | 0.686393 | 523 | 5,137 | 6.441683 | 0.414914 | 0.026121 | 0.017809 | 0.02434 | 0.11873 | 0.068863 | 0.068863 | 0.068863 | 0.068863 | 0.068863 | 0 | 0.042786 | 0.217442 | 5,137 | 129 | 122 | 39.821705 | 0.795274 | 0.475764 | 0 | 0.065574 | 0 | 0 | 0.024247 | 0 | 0 | 0 | 0.003129 | 0 | 0 | 1 | 0.081967 | false | 0 | 0.065574 | 0.016393 | 0.42623 | 0.032787 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a77317efd840e253016125f39203bf19c2d1ca11 | 5,273 | py | Python | brainreg/backend/niftyreg/parameters.py | stephenlenzi/brainreg | e08a3902bdd2fb0c7b225c985383cbda5d354faf | [
"BSD-3-Clause"
] | null | null | null | brainreg/backend/niftyreg/parameters.py | stephenlenzi/brainreg | e08a3902bdd2fb0c7b225c985383cbda5d354faf | [
"BSD-3-Clause"
] | null | null | null | brainreg/backend/niftyreg/parameters.py | stephenlenzi/brainreg | e08a3902bdd2fb0c7b225c985383cbda5d354faf | [
"BSD-3-Clause"
] | null | null | null | from brainreg.backend.niftyreg.niftyreg_binaries import (
get_niftyreg_binaries,
get_binary,
)
class RegistrationParams:
"""
A class to store and access the variables required for the registration
including the paths of the different binaries and atlases.
Options are typically stored as a tuple of (option_string, option_value)
"""
def __init__(
self,
affine_n_steps=6,
affine_use_n_steps=5,
freeform_n_steps=6,
freeform_use_n_steps=4,
bending_energy_weight=0.95,
grid_spacing=-10,
smoothing_sigma_reference=-1.0,
smoothing_sigma_floating=-1.0,
histogram_n_bins_floating=128,
histogram_n_bins_reference=128,
):
self.transform_program_path = self.__get_binary("transform")
self.affine_reg_program_path = self.__get_binary("affine")
self.freeform_reg_program_path = self.__get_binary("freeform")
self.segmentation_program_path = self.__get_binary("segmentation")
# affine (reg_aladin)
self.affine_reg_pyramid_steps = ("-ln", affine_n_steps)
self.affine_reg_used_pyramid_steps = ("-lp", affine_use_n_steps)
# freeform (ref_f3d)
self.freeform_reg_pyramid_steps = ("-ln", freeform_n_steps)
self.freeform_reg_used_pyramid_steps = ("-lp", freeform_use_n_steps)
self.freeform_reg_grid_spacing = ("-sx", grid_spacing)
self.bending_energy_penalty_weight = ("-be", bending_energy_weight)
self.reference_image_smoothing_sigma = (
"-smooR",
smoothing_sigma_reference,
)
self.floating_image_smoothing_sigma = (
"-smooF",
smoothing_sigma_floating,
)
self.reference_image_histo_n_bins = (
"--rbn",
histogram_n_bins_reference,
)
self.floating_image_histo_n_bins = ("--fbn", histogram_n_bins_floating)
# segmentation (reg_resample)
self.segmentation_interpolation_order = ("-inter", 0)
def get_affine_reg_params(self):
"""
Get the parameters (options) required for the affine registration step
:return: The affine registration options.
:rtype: list
"""
affine_params = [
self.affine_reg_pyramid_steps,
self.affine_reg_used_pyramid_steps,
]
return affine_params
def get_freeform_reg_params(self):
"""
Get the parameters (options) required for the freeform (elastic)
registration step
:return: The freeform registration options.
:rtype: list
"""
freeform_params = [
self.freeform_reg_pyramid_steps,
self.freeform_reg_used_pyramid_steps,
self.freeform_reg_grid_spacing,
self.bending_energy_penalty_weight,
self.reference_image_smoothing_sigma,
self.floating_image_smoothing_sigma,
self.reference_image_histo_n_bins,
self.floating_image_histo_n_bins,
]
return freeform_params
def get_segmentation_params(self):
"""
Get the parameters (options) required for the segmentation step
(propagation of transformation)
:return: The affine registration options.
:rtype: list
"""
return [self.segmentation_interpolation_order]
def format_param_pairs(self, params_pairs):
"""
Format the list of params pairs into a string
:param list params_pairs: A list of tuples of the form
(option_string, option_value) (e.g. (-sx, 10))
:return: The options as a formatted string
:rtype: str
"""
out = ""
for param in params_pairs:
out += "{} {} ".format(*param)
return out
def format_affine_params(self):
"""
Generate the string of formatted affine registration options
:return: The formatted string
:rtype: str
"""
return self.format_param_pairs(self.get_affine_reg_params())
def format_freeform_params(self):
"""
Generate the string of formatted freeform registration options
:return: The formatted string
:rtype: str
"""
return self.format_param_pairs(self.get_freeform_reg_params())
def format_segmentation_params(self):
"""
Generate the string of formatted segmentation options
:return: The formatted string
:rtype: str
"""
return self.format_param_pairs(self.get_segmentation_params())
def __get_binary(self, program_type):
"""
Get the path to the registration (from nifty_reg) program
based on the type
:param str program_type:
:return: The program path
:rtype: str
"""
program_names = {
"affine": "reg_aladin",
"freeform": "reg_f3d",
"segmentation": "reg_resample",
"transform": "reg_transform",
}
program_name = program_names[program_type]
nifty_reg_binaries_folder = get_niftyreg_binaries()
program_path = get_binary(nifty_reg_binaries_folder, program_name)
return program_path
| 31.386905 | 79 | 0.635502 | 592 | 5,273 | 5.29223 | 0.194257 | 0.022343 | 0.033514 | 0.022981 | 0.414619 | 0.34759 | 0.258219 | 0.123524 | 0.123524 | 0.108522 | 0 | 0.006375 | 0.285985 | 5,273 | 167 | 80 | 31.57485 | 0.825764 | 0.255263 | 0 | 0 | 0 | 0 | 0.046315 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.011905 | 0 | 0.22619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7744be113d572b7e5a0ca21aca975e6b37a6190 | 4,673 | py | Python | modules/loss_functions.py | daved01/real_time_style_transfer | 6391d006cd3d0c274b9682e23e64ab0fa43c242a | [
"Apache-2.0"
] | 1 | 2021-12-28T17:40:37.000Z | 2021-12-28T17:40:37.000Z | modules/loss_functions.py | daved01/real_time_style_transfer | 6391d006cd3d0c274b9682e23e64ab0fa43c242a | [
"Apache-2.0"
] | null | null | null | modules/loss_functions.py | daved01/real_time_style_transfer | 6391d006cd3d0c274b9682e23e64ab0fa43c242a | [
"Apache-2.0"
] | null | null | null | from tensorflow import keras
import tensorflow as tf
from tensorflow.keras.applications import vgg16
from tensorflow.keras.layers import Input
def get_loss_network():
loss_net = vgg16.VGG16(include_top=False, weights="imagenet", input_tensor=Input(shape=(256,256,3)))
loss_net_outputs = dict([(layer.name, layer.output) for layer in loss_net.layers])
loss_net_activations = keras.Model(inputs=loss_net.inputs, outputs=loss_net_outputs)
return loss_net_activations
def gram_matrix(x):
"""
Computes the gram matrix with batch dimension.
y = xT * x
Inputs:
x -- tf.tensor with batch dimension (batch_dim, x1, x2, x3)
"""
x = tf.transpose(x, (0,3,1,2))
features = tf.reshape(x, (tf.shape(x)[0], tf.shape(x)[1], -1))
gram = tf.matmul(features, tf.transpose(features, (0,2,1)))
return gram
def compute_content_loss(generated, content, dimensions):
"""
Computes the content loss from the given features.
Equation 2 in paper.
Args:
generated: Tensor feature map of the generated image.
content: Tensor feature map of the content image.
dimensions: List of layer dimensions [height, width, channels]
"""
# Check dimensions
assert generated.shape[0] == content.shape[0], "Batch dimensions of generated and content image don't match!"
height, width, channels = dimensions[0], dimensions[1], dimensions[2]
scaling_factor = (int(height/4) * int(width/4) * channels) # H, W, C
# Sum over all elements, including the batch_size to get average loss over the batch.
content_reconstruction_loss = tf.math.reduce_sum(tf.square(generated - content)) / (scaling_factor * generated.shape[0])
return content_reconstruction_loss
def compute_style_loss(generated, style, dimensions):
"""
Compute style loss for one layer.
"""
# Dimensions
height, width, channels = dimensions[0], dimensions[1], dimensions[2]
scaling_factor = (channels * height * width)**2
generated = gram_matrix(generated)
style = gram_matrix(style)
# Compute the total average loss over all elements in the batch.
res = tf.reduce_sum(tf.square(generated - style)) / (scaling_factor * generated.shape[0])
return res
def compute_perceptual_loss(generated_image, content_image, style_image, loss_net_activations, batch_size, content_layers, style_layers):
"""
Computes the loss with the loss network.
Args:
tf.tensors, scaled to [0,1] with dim (b,h,w,c), RGB.
"""
# Combine input tensors to make one pass with all in parallel.
input_tensors = tf.concat([generated_image, content_image, style_image], axis=0)
# Preprocess input_tensors for vgg16. Expects range [0, 255]
input_tensors = tf.keras.applications.vgg16.preprocess_input(input_tensors*255)
# Forward pass to get loss from loss network.
features = loss_net_activations(input_tensors, training=False)
# Initialize loss
loss = tf.zeros(shape=())
# Compute content loss
for content_layer in content_layers.keys():
layer_features = features[content_layer]
generated_features = layer_features[0:batch_size,:,:,:]
content_features = layer_features[batch_size:2*batch_size,:,:,:]
loss += compute_content_loss(generated_features, content_features, content_layers[content_layer])
# Compute style loss
for style_layer in style_layers.keys():
layer_features = features[style_layer]
generated_features = layer_features[0:batch_size,:,:,:]
style_features = layer_features[2*batch_size,:,:,:]
style_features = tf.expand_dims(style_features, 0)
loss += compute_style_loss(generated_features, style_features, style_layers[style_layer])
return loss
@tf.function
def compute_loss_and_grads(content_image, style_image, transform_network, optimizer, loss_net_activations, batch_size, content_layers, style_layers):
"""
Takes in content and style images as tf.tensors with batch dimension
and scaled to range [0,1].
"""
with tf.GradientTape() as tape:
# Forward pass
generated_image = transform_network(content_image, training=True)
# Convert to range [0,1]
generated_image = ((generated_image * 0.5) + 0.5)
# Get loss
loss = compute_perceptual_loss(generated_image, content_image, style_image, loss_net_activations, batch_size, content_layers, style_layers)
# Get gradients and upate weights
grads = tape.gradient(loss, transform_network.trainable_weights)
optimizer.apply_gradients(zip(grads, transform_network.trainable_weights))
return loss | 37.384 | 149 | 0.710678 | 625 | 4,673 | 5.1184 | 0.224 | 0.02407 | 0.033761 | 0.027509 | 0.248828 | 0.184745 | 0.152235 | 0.152235 | 0.124101 | 0.108159 | 0 | 0.01772 | 0.190884 | 4,673 | 125 | 150 | 37.384 | 0.828352 | 0.233255 | 0 | 0.113208 | 0 | 0 | 0.019733 | 0 | 0 | 0 | 0 | 0 | 0.018868 | 1 | 0.113208 | false | 0 | 0.075472 | 0 | 0.301887 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a77534ddb5be339708cb1835224ed2ce4234c6ca | 2,409 | py | Python | titanic/api.py | pmanlukas/titanic-flask | cc46eff6f0b001118deafd0962d06c77aad15d7f | [
"MIT"
] | null | null | null | titanic/api.py | pmanlukas/titanic-flask | cc46eff6f0b001118deafd0962d06c77aad15d7f | [
"MIT"
] | null | null | null | titanic/api.py | pmanlukas/titanic-flask | cc46eff6f0b001118deafd0962d06c77aad15d7f | [
"MIT"
] | null | null | null | from flask import Flask, jsonify, request
from sklearn.externals import joblib
from sklearn.linear_model import LogisticRegression
import pandas as pd
import traceback
app = Flask(__name__)
@app.route('/predict', methods=['POST'])
def predict():
if model:
try:
json_ = request.json
print(json_)
query = preprocess_input(json_)
prediction = model_pred(query)
print("prediction: {}".format(prediction))
return jsonify({'prediction': str(prediction)})
except:
return jsonify({'trace': traceback.format_exc()})
else:
print('Train a model first')
return 'no model to use'
@app.route('/train', methods=['GET'])
def train():
try:
model = train_model(dataframe=load_process_data())
print("trained model")
return jsonify({'success': "trained model!"})
except:
return jsonify({'trace': traceback.format_exc()})
@app.route('/healthz', methods=['GET'])
def healt_check():
return "api is running"
def load_process_data():
#import dataset
url = "http://s3.amazonaws.com/assets.datacamp.com/course/Kaggle/train.csv"
df = pd.read_csv(url)
cols = ['Age','Sex','Embarked','Survived']
df_ = df[cols]
categoricals = []
for col, col_type in df_.dtypes.iteritems():
if col_type == 'O':
categoricals.append(col)
else:
df_[col].fillna(0, inplace=True)
#one hot encode the data
df_ohe = pd.get_dummies(df_,columns=categoricals, dummy_na=True)
return df_ohe
def train_model(dataframe):
dependent_var = 'Survived'
X = dataframe[dataframe.columns.difference([dependent_var])]
Y = dataframe[dependent_var]
clf = LogisticRegression()
clf.fit(X,Y)
return clf
def preprocess_input(json):
query = pd.get_dummies(pd.DataFrame(json))
query = query.reindex(columns=model_columns, fill_value=0)
return query
def model_pred(query):
prediction = list(model.predict(query))
return prediction
if __name__ == "__main__":
try:
port = int(sys.argv[1])
except:
port = 5000
global model
model = joblib.load('model.pkl')
print("Model loaded!")
model_columns = joblib.load('model_cols.pkl')
print("Model columns loaded!")
app.run(host='0.0.0.0', port=port, debug=True) | 26.184783 | 79 | 0.629307 | 294 | 2,409 | 4.993197 | 0.411565 | 0.035422 | 0.025886 | 0.032698 | 0.057221 | 0.057221 | 0.057221 | 0 | 0 | 0 | 0 | 0.006557 | 0.240349 | 2,409 | 92 | 80 | 26.184783 | 0.795628 | 0.015359 | 0 | 0.142857 | 0 | 0.014286 | 0.134121 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.071429 | 0.014286 | 0.314286 | 0.085714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a77bbe82bf35d7977e569a471786269904092a9a | 2,470 | py | Python | tests/test_hydra_cli_errors.py | sara-nl/hydra | 8fd0d23d71cf528528ca5eda26e0c1f0c1e973d7 | [
"MIT"
] | 5,847 | 2019-10-03T04:20:44.000Z | 2022-03-31T17:07:46.000Z | tests/test_hydra_cli_errors.py | sara-nl/hydra | 8fd0d23d71cf528528ca5eda26e0c1f0c1e973d7 | [
"MIT"
] | 1,393 | 2019-10-04T01:03:38.000Z | 2022-03-31T20:29:35.000Z | tests/test_hydra_cli_errors.py | sara-nl/hydra | 8fd0d23d71cf528528ca5eda26e0c1f0c1e973d7 | [
"MIT"
] | 505 | 2019-10-03T19:41:42.000Z | 2022-03-31T11:40:16.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import re
from pathlib import Path
from typing import Any
from pytest import mark, param
from hydra.test_utils.test_utils import (
chdir_hydra_root,
normalize_newlines,
run_with_error,
)
chdir_hydra_root()
@mark.parametrize(
"override,expected",
[
param(
"+key=int(",
"no viable alternative at input 'int('",
id="parse_error_in_function",
),
param(
"+key=sort()",
"""Error parsing override '+key=sort()'
ValueError while evaluating 'sort()': empty sort input""",
id="empty_sort",
),
param(
"key=sort(interval(1,10))",
"""Error parsing override 'key=sort(interval(1,10))'
TypeError while evaluating 'sort(interval(1,10))': mismatch type argument args[0]""",
id="sort_interval",
),
param(
"+key=choice()",
"""Error parsing override '+key=choice()'
ValueError while evaluating 'choice()': empty choice is not legal""",
id="empty choice",
),
param(
["+key=choice(choice(a,b))", "-m"],
"""Error parsing override '+key=choice(choice(a,b))'
ValueError while evaluating 'choice(choice(a,b))': nesting choices is not supported
See https://hydra.cc/docs/next/advanced/override_grammar/basic for details
Set the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace.
""",
id="empty choice",
),
param(
"--config-dir=/dir/not/found",
f"""Additional config directory '{Path('/dir/not/found').absolute()}' not found
Set the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace.
""",
id="config_dir_not_found",
),
],
)
def test_cli_error(tmpdir: Any, monkeypatch: Any, override: Any, expected: str) -> None:
monkeypatch.chdir("tests/test_apps/app_without_config/")
if isinstance(override, str):
override = [override]
cmd = ["my_app.py", "hydra.sweep.dir=" + str(tmpdir)] + override
ret = normalize_newlines(run_with_error(cmd))
assert (
re.search("^" + re.escape(normalize_newlines(expected.strip())), ret)
is not None
), (
f"Result:"
f"\n---"
f"\n{ret}"
f"\n---"
f"\nDid not match expected:"
f"\n---"
f"\n{expected}"
f"\n---"
)
| 30.121951 | 91 | 0.585425 | 296 | 2,470 | 4.777027 | 0.402027 | 0.008487 | 0.056577 | 0.065064 | 0.248939 | 0.090523 | 0.090523 | 0.090523 | 0.090523 | 0.090523 | 0 | 0.006648 | 0.269231 | 2,470 | 81 | 92 | 30.493827 | 0.776731 | 0.02753 | 0 | 0.272727 | 0 | 0 | 0.305556 | 0.096372 | 0 | 0 | 0 | 0 | 0.015152 | 1 | 0.015152 | false | 0 | 0.075758 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a77d9310bacb731cb2a8e779ec5f5c6937a6ccd6 | 1,013 | py | Python | main.py | victorfariassb/tarefa_automacao_insper | e06c874176313f807d802d3241b0b915ee4a64c5 | [
"MIT"
] | null | null | null | main.py | victorfariassb/tarefa_automacao_insper | e06c874176313f807d802d3241b0b915ee4a64c5 | [
"MIT"
] | null | null | null | main.py | victorfariassb/tarefa_automacao_insper | e06c874176313f807d802d3241b0b915ee4a64c5 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
from scrapy.crawler import CrawlerRunner
from twisted.internet import reactor
import loggerConfig
from crawlDou import crawlDou
from writeResult import writeResult
import os.path
# create a crawler process with the specified settings
runner = CrawlerRunner(
{
'LOG_STDOUT': False,
'LOG_ENABLED': True,
'ROBOTSTXT_OBEY' : True,
'RANDOMIZE_DOWNLOAD_DELAY': True,
'CONCURRENT_REQUESTS': 5,
'RETRY_TIMES' : 5,
'AUTOTHROTTLE_ENABLED' : True,
'HTTPCACHE_ENABLED': True, # for development
'FEEDS':{
'items.jl': {
'format': 'jsonlines',
'encoding': 'utf8'
}
},
}
)
crawlDou(runner, "09-12-2021", "dou1")
reactor.run() # the script will block here until the last crawl call is finished
if (os.path.exists("items.jl")):
writeResult("result.json", "items.jl")
else:
raise FileNotFoundError("Required files not found. Try again later")
| 26.657895 | 81 | 0.627838 | 111 | 1,013 | 5.648649 | 0.702703 | 0.052632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017356 | 0.260612 | 1,013 | 37 | 82 | 27.378378 | 0.81976 | 0.154985 | 0 | 0 | 0 | 0 | 0.291422 | 0.028202 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a77f76a2d239f4e208706be5e533beb7cb4999a3 | 9,264 | py | Python | solardatatools/algorithms/time_shifts.py | catzzz/solar-data-tools | dc173c1036bc2e3116b302f3fd442b1cb030e0b0 | [
"BSD-2-Clause"
] | 3 | 2019-02-26T18:06:12.000Z | 2019-04-16T19:49:27.000Z | solardatatools/algorithms/time_shifts.py | catzzz/solar-data-tools | dc173c1036bc2e3116b302f3fd442b1cb030e0b0 | [
"BSD-2-Clause"
] | 1 | 2019-03-28T19:02:37.000Z | 2019-03-28T19:02:37.000Z | solardatatools/algorithms/time_shifts.py | catzzz/solar-data-tools | dc173c1036bc2e3116b302f3fd442b1cb030e0b0 | [
"BSD-2-Clause"
] | 1 | 2019-03-06T17:52:27.000Z | 2019-03-06T17:52:27.000Z | """ Time Shift Algorithm Module
This module contains the algorithm for detecting time shifts in an unlabeled PV
power production data sets. These occur because of the local clock on the data
logging system being changed or by incorrect handling of daylight savings.
The algorithm works as follows:
- Estimate solar noon on each day from the data
- Fit a signal demixing model, assuming a seasonal component and a piecewise
constant component
- Polish the L1 heuristic used to estimate piecewise constant component
using iterative reweighting
- Use piecewise constance component to detect shift points in time and
correction amounts
"""
import numpy as np
from scipy.stats import mode
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from solardatatools.solar_noon import energy_com, avg_sunrise_sunset
from solardatatools.signal_decompositions import l2_l1d1_l2d2p365
class TimeShift:
def __init__(self):
self.metric = None
self.s1 = None
self.s2 = None
self.index_set = None
self.corrected_data = None
self.roll_by_index = None
self.normalized_holdout_error = None
self.normalized_train_error = None
self.tv_metric = None
self.jumps_per_year = None
self.best_c1 = None
self.best_ix = None
self.__recursion_depth = 0
def run(
self,
data,
use_ixs=None,
c1=None,
c2=200.0,
solar_noon_estimator="com",
threshold=0.1,
periodic_detector=False,
solver=None,
):
if solar_noon_estimator == "com":
metric = energy_com(data)
elif solar_noon_estimator == "srss":
metric = avg_sunrise_sunset(data, threshold=threshold)
self.metric = metric
if use_ixs is None:
use_ixs = ~np.isnan(metric)
else:
use_ixs = np.logical_and(use_ixs, ~np.isnan(metric))
self.use_ixs = use_ixs
# Optimize c1
if c1 is None:
c1s = np.logspace(-1, 2, 11)
hn, rn, tv_metric, jpy, best_ix = self.optimize_c1(
metric, c1s, use_ixs, c2, periodic_detector, solver=solver
)
if tv_metric[best_ix] >= 0.009:
# rerun the optimizer with a new random data selection
hn, rn, tv_metric, jpy, best_ix = self.optimize_c1(
metric, c1s, use_ixs, c2, periodic_detector, solver=solver
)
# if np.isclose(hn[best_ix], hn[-1]):
# best_ix = np.argmax(hn * rn)
best_c1 = c1s[best_ix]
else:
best_c1 = c1
hn = None
rn = None
tv_metric = None
jpy = None
c1s = None
best_ix = None
s1, s2 = self.estimate_components(
metric, best_c1, c2, use_ixs, periodic_detector, solver=solver
)
# find indices of transition points
index_set = np.arange(len(s1) - 1)[np.round(np.diff(s1, n=1), 3) != 0]
# print(len(index_set), len(index_set) / (len(metric) / 365))
s1, s2 = self.estimate_components(
metric,
best_c1,
c2,
use_ixs,
periodic_detector,
transition_locs=index_set,
solver=solver,
)
jumps_per_year = len(index_set) / (len(metric) / 365)
cond1 = np.isclose(np.max(s2), 0.5)
cond2 = c1 is None
cond3 = self.__recursion_depth < 2
if cond1 and cond2 and cond3:
# Unlikely that constraint should be active or that there are more
# than 5 time shifts per year. Try a different random sampling
self.__recursion_depth += 1
self.run(
data,
use_ixs=use_ixs,
c1=c1,
c2=c2,
solar_noon_estimator=solar_noon_estimator,
threshold=threshold,
periodic_detector=periodic_detector,
solver=solver,
)
return
# Apply corrections
roll_by_index = np.round(
(mode(np.round(s1, 3)).mode[0] - s1) * data.shape[0] / 24, 0
)
correction_metric = np.average(np.abs(roll_by_index))
if correction_metric < 0.01:
roll_by_index[:] = 0
self.roll_by_index = roll_by_index
index_set = np.arange(len(roll_by_index) - 1)[
np.round(np.diff(roll_by_index, n=1), 3) != 0
]
Dout = self.apply_corrections(data)
# save results
self.normalized_holdout_error = hn
self.normalized_train_error = rn
self.tv_metric = tv_metric
self.jumps_per_year = jpy
self.c1_vals = c1s
self.best_c1 = best_c1
self.best_ix = best_ix
self.s1 = s1
self.s2 = s2
self.index_set = index_set
self.corrected_data = Dout
self.__recursion_depth = 0
def optimize_c1(self, metric, c1s, use_ixs, c2, periodic_detector, solver=None):
# set up train/test split with sklearn
ixs = np.arange(len(metric))
ixs = ixs[use_ixs]
train_ixs, test_ixs = train_test_split(ixs, test_size=0.75)
train = np.zeros(len(metric), dtype=bool)
test = np.zeros(len(metric), dtype=bool)
train[train_ixs] = True
test[test_ixs] = True
# initialize results objects
train_r = np.zeros_like(c1s)
test_r = np.zeros_like(c1s)
tv_metric = np.zeros_like(c1s)
jpy = np.zeros_like(c1s)
# iterate over possible values of c1 parameter
for i, v in enumerate(c1s):
s1, s2 = self.estimate_components(
metric, v, c2, train, periodic_detector, n_iter=5, solver=solver
)
y = metric
# collect results
train_r[i] = np.average(np.power((y - s1 - s2)[train], 2))
test_r[i] = np.average(np.power((y - s1 - s2)[test], 2))
tv_metric[i] = np.average(np.abs(np.diff(s1, n=1)))
count_jumps = np.sum(~np.isclose(np.diff(s1), 0, atol=1e-4))
jumps_per_year = count_jumps / (len(metric) / 365)
jpy[i] = jumps_per_year
def zero_one_scale(x):
return (x - np.min(x)) / (np.max(x) - np.min(x))
hn = zero_one_scale(test_r) # holdout error metrix
rn = zero_one_scale(train_r)
ixs = np.arange(len(c1s))
# Detecting more than 5 time shifts per year is extremely uncommon,
# and is considered non-physical
slct = jpy <= 5
best_ix = ixs[slct][np.argmin(hn[slct])]
return hn, rn, tv_metric, jpy, best_ix
def estimate_components(
self,
metric,
c1,
c2,
use_ixs,
periodic_detector,
transition_locs=None,
n_iter=5,
solver=None,
):
# Iterative reweighted L1 heuristic
w = np.ones(len(metric) - 1)
eps = 0.1
for i in range(n_iter):
s1, s2 = l2_l1d1_l2d2p365(
metric,
c1=c1,
c2=c2,
tv_weights=w,
use_ixs=use_ixs,
yearly_periodic=periodic_detector,
transition_locs=transition_locs,
seas_max=0.5,
solver=solver,
)
w = 1 / (eps + np.abs(np.diff(s1, n=1)))
return s1, s2
def plot_optimization(self, figsize=None):
if self.best_ix is not None:
c1s = self.c1_vals
hn = self.normalized_holdout_error
rn = self.normalized_train_error
best_c1 = self.best_c1
import matplotlib.pyplot as plt
fig, ax = plt.subplots(nrows=4, sharex=True, figsize=figsize)
ax[0].plot(c1s, hn, marker=".")
ax[0].axvline(best_c1, ls="--", color="red")
ax[0].set_title("holdout validation")
ax[1].plot(c1s, self.jumps_per_year, marker=".")
ax[1].axvline(best_c1, ls="--", color="red")
ax[1].set_title("jumps per year")
ax[2].plot(c1s, rn, marker=".")
ax[2].axvline(best_c1, ls="--", color="red")
ax[2].set_title("training residuals")
ax[3].plot(c1s, self.tv_metric, marker=".")
ax[3].axvline(best_c1, ls="--", color="red")
ax[3].set_xscale("log")
ax[3].set_title("Total variation metric")
plt.tight_layout()
return fig
def apply_corrections(self, data):
roll_by_index = self.roll_by_index
Dout = np.copy(data)
for roll in np.unique(roll_by_index):
if roll != 0:
ixs = roll_by_index == roll
Dout[:, ixs] = np.roll(data, int(roll), axis=0)[:, ixs]
return Dout
def invert_corrections(self, data):
roll_by_index = self.roll_by_index
Dout = np.copy(data)
for roll in np.unique(roll_by_index):
if roll != 0:
ixs = roll_by_index == roll
Dout[:, ixs] = np.roll(data, -int(roll), axis=0)[:, ixs]
return Dout
| 36.046693 | 84 | 0.563795 | 1,214 | 9,264 | 4.112026 | 0.224876 | 0.021635 | 0.035256 | 0.012019 | 0.260817 | 0.213542 | 0.187901 | 0.147236 | 0.128606 | 0.119391 | 0 | 0.032348 | 0.33927 | 9,264 | 256 | 85 | 36.1875 | 0.783205 | 0.143135 | 0 | 0.242857 | 0 | 0 | 0.013777 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038095 | false | 0 | 0.033333 | 0.004762 | 0.109524 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a77fc41890e58af45741ab29df69bd92e4369aac | 8,014 | py | Python | tricircleclient/v1/jobs_cli.py | electrocucaracha/python-tricircleclient | 27bfddea9d3d13670b4aae40698b88751633132f | [
"Apache-2.0"
] | null | null | null | tricircleclient/v1/jobs_cli.py | electrocucaracha/python-tricircleclient | 27bfddea9d3d13670b4aae40698b88751633132f | [
"Apache-2.0"
] | null | null | null | tricircleclient/v1/jobs_cli.py | electrocucaracha/python-tricircleclient | 27bfddea9d3d13670b4aae40698b88751633132f | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from osc_lib.command import command
from oslo_log import log as logging
from six.moves.urllib import parse
from tricircleclient import constants
from tricircleclient import utils
def _job_from_args(parsed_args):
# necessary parameters
data = {'type': parsed_args.type,
'project_id': parsed_args.project_id,
}
# optional parameters vary with job type
resources = {}
for id in constants.job_resource_map[data['type']]:
resources[id] = getattr(parsed_args, id, None)
data['resource'] = resources
return {'job': data}
def _add_pagination_argument(parser):
parser.add_argument(
'--limit',
dest='limit', metavar="<num-jobs>", type=int,
help="Maximum number of jobs to return",
default=None)
def _add_marker_argument(parser):
parser.add_argument(
'--marker',
dest='marker', metavar="<job>", type=str,
help="ID of last job in previous page, jobs after marker will be "
"returned. Display all jobs if not specified.",
default=None)
def _add_filtering_arguments(parser):
# available filtering fields: project ID, type, status
parser.add_argument(
'--project-id',
dest='project_id', metavar="<project-id>", type=str,
help="ID of a project object in Keystone",
default=None)
parser.add_argument(
'--type',
dest='type', metavar="<type>", type=str,
choices=constants.job_resource_map.keys(),
help="Job type",
default=None)
parser.add_argument(
'--status',
dest='status', metavar="<status>", type=lambda str: str.lower(),
choices=['new', 'running', 'success', 'fail'],
help="Execution status of the job. It's case-insensitive",
default=None)
def _add_search_options(parsed_args):
search_opts = {}
for key in ('limit', 'marker', 'project_id', 'type', 'status'):
value = getattr(parsed_args, key, None)
if value is not None:
search_opts[key] = value
return search_opts
def _prepare_query_string(params):
"""Convert dict params to query string"""
params = sorted(params.items(), key=lambda x: x[0])
return '?%s' % parse.urlencode(params) if params else ''
def expand_job_resource(job):
# because job['resource'] is a dict value, so we should
# expand its values and let them show as other fields in the
# same level.
for id in constants.job_resource_map[job['type']]:
job[id] = job['resource'][id]
job.pop('resource')
return job
class ListJobs(command.Lister):
"""List Jobs"""
log = logging.getLogger(__name__ + ".ListJobs")
path = '/jobs'
def get_parser(self, prog_name):
parser = super(ListJobs, self).get_parser(prog_name)
_add_pagination_argument(parser)
_add_marker_argument(parser)
_add_filtering_arguments(parser)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
client = self.app.client_manager.multiregion_networking
# add pagination/marker/filter to list operation
search_opts = _add_search_options(parsed_args)
self.path += _prepare_query_string(search_opts)
data = client.job.list(self.path)
column_headers = utils.prepare_column_headers(constants.COLUMNS,
constants.COLUMNS_REMAP)
return utils.list2cols(constants.COLUMNS, data['jobs'], column_headers)
class CreateJob(command.ShowOne):
"""Create a Job"""
log = logging.getLogger(__name__ + ".CreateJob")
def get_parser(self, prog_name):
parser = super(CreateJob, self).get_parser(prog_name)
# as resource is a compound attribute, so we expand its fields
# and list them as optional parameters. If new resources are
# provisioned, they should be added here.
parser.add_argument(
'--type',
metavar="<type>",
required=True,
help="Job type",
)
parser.add_argument(
'--project_id',
metavar="<project-id>",
required=True,
help="ID of a project object in Keystone",
)
parser.add_argument(
'--router_id',
metavar="<router-id>",
help="ID of a router",
)
parser.add_argument(
'--network_id',
metavar="<network-id>",
help="ID of a network",
)
parser.add_argument(
'--pod_id',
metavar="<pod-id>",
help="ID of a pod",
)
parser.add_argument(
'--port_id',
metavar="<port-id>",
help="ID of a port",
)
parser.add_argument(
'--trunk_id',
metavar="<trunk-id>",
help="ID of a trunk",
)
parser.add_argument(
'--subnet_id',
metavar="<subnet-id>",
help="ID of a subnet",
)
parser.add_argument(
'--portchain_id',
metavar="<portchain-id>",
help="ID of a port chain",
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
client = self.app.client_manager.multiregion_networking
data = client.job.create(_job_from_args(parsed_args))
if 'job' in data.keys():
return self.dict2columns(expand_job_resource(data['job']))
class ShowJob(command.ShowOne):
"""Display Job details."""
log = logging.getLogger(__name__ + ".ShowJob")
def get_parser(self, prog_name):
parser = super(ShowJob, self).get_parser(prog_name)
parser.add_argument(
"job",
metavar="<job>",
help="ID of the job to display",
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
client = self.app.client_manager.multiregion_networking
data = client.job.get(parsed_args.job)
if 'job' in data.keys():
return self.dict2columns(expand_job_resource(data['job']))
class DeleteJob(command.Command):
"""Delete a Job."""
log = logging.getLogger(__name__ + ".DeleteJob")
def get_parser(self, prog_name):
parser = super(DeleteJob, self).get_parser(prog_name)
parser.add_argument(
"job",
metavar="<job>",
nargs="+",
help="ID(s) of the job(s) to delete",
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
client = self.app.client_manager.multiregion_networking
for job_id in parsed_args.job:
client.job.delete(job_id)
class RedoJob(command.Command):
"""Redo a Job."""
log = logging.getLogger(__name__ + ".RedoJob")
def get_parser(self, prog_name):
parser = super(RedoJob, self).get_parser(prog_name)
parser.add_argument(
'job',
metavar="<job>",
help="ID of the job to redo",
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
client = self.app.client_manager.multiregion_networking
client.job.update(parsed_args.job)
| 30.704981 | 79 | 0.608685 | 975 | 8,014 | 4.826667 | 0.228718 | 0.044624 | 0.061411 | 0.017212 | 0.376541 | 0.286443 | 0.262856 | 0.250106 | 0.19932 | 0.19932 | 0 | 0.001378 | 0.275393 | 8,014 | 260 | 80 | 30.823077 | 0.809024 | 0.133766 | 0 | 0.351648 | 0 | 0 | 0.146444 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.093407 | false | 0 | 0.027473 | 0 | 0.247253 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a77fd86c2a035ebbaf57543cd05a280bfb3e358d | 40,091 | py | Python | LeleNet_trn.py | binary-bisam/LeleNet | e1b546da575502d299f428ce2f5c914115282037 | [
"Unlicense"
] | 1 | 2021-06-05T10:20:41.000Z | 2021-06-05T10:20:41.000Z | LeleNet_trn.py | binary-bisam/LeleNet | e1b546da575502d299f428ce2f5c914115282037 | [
"Unlicense"
] | 1 | 2021-05-26T09:09:31.000Z | 2021-05-28T17:45:39.000Z | LeleNet_trn.py | binary-bisam/LeleNet | e1b546da575502d299f428ce2f5c914115282037 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 17 17:56:34 2021
@author: Manuel
Full implementation
runfrom terminal: python ~/LeleNet/py3/LeleNet_trn.py "U-Net" 10 40
"""
__author__ = "Manuel R. Popp"
#### parse arguments-----------------------------------------------------------
# Import arguments
import argparse, pickle
def parseArguments():
parser = argparse.ArgumentParser()
# Positional mandatory arguments
parser.add_argument("model", help = "Model; one in U-Net, FCDenseNet)",\
type = str)
parser.add_argument("bs", help = "Batchsize (int)",\
type = int)
parser.add_argument("ep", help = "Training epochs (int)",\
type = int)
# Optional arguments
parser.add_argument("-lr", "--lr",\
help = "Initial learning rate (float).",\
type = float, default = 1e-4)
parser.add_argument("-lrd", "--lrd",\
help = "Learning rate decay factor (float).",\
type = float, default = 0.95)
parser.add_argument("-lrs", "--lrs",\
help = "Learning rate decay step size (int).",\
type = int, default = 2)
parser.add_argument("-esp", "--esp",\
help = "Early stopping patience (int).",\
type = int, default = None)
parser.add_argument("-op", "--op",\
help = "Optimizer. 'Adam', 'rms', or 'sgd'.",\
type = str, default = "rms")
parser.add_argument("-ki", "--ki",\
help = "Kernel initialiser.",\
type = str, default = None)
parser.add_argument("-do", "--do",\
help = "Dropout rate.",\
type = float, default = 0.1)
parser.add_argument("-xf", "--xf",\
help = "Image format; either png, jpg, or tif.",\
type = str, default = "png")
parser.add_argument("-yf", "--yf",\
help = "Image format; either png, jpg, or tif.",\
type = str, default = "png")
parser.add_argument("-imgr", "--imgr",\
help = "Image x resolution (rows).", type = int,\
default = None)
parser.add_argument("-imgc", "--imgc",\
help = "Image y resolution (columns).", type = int,\
default = None)
parser.add_argument("-imgd", "--imgd",\
help = "Image dimensions (rows = columns).",\
type = int, default = None)
parser.add_argument("-imgdim", "--imgdim",\
help = "X image dimensions (colours).", type = int,\
default = 3)
parser.add_argument("-nc", "--nc",\
help = "Number of classes.", type = int,\
default = None)
parser.add_argument("-ww", "--ww",\
help = ("Weights scaling factor. Inverse weights =" +\
"1/(weights**ww) or 1/math.log(weights, ww)"), \
type = float, default = 0.0)
parser.add_argument("-ws", "--ws",\
help = ("Weight scaling (either 'exp' or 'log'."), \
type = str, default = "exp")
parser.add_argument("-wd", "--wd",\
help = "Alternative working directory.", type = str,\
default = "")
parser.add_argument("-yr", "--yr",\
help = ("Sampling date of the data" +\
"as MM_YYYY. Default: '03_2021'"),\
type = str, default = "03_2021")
parser.add_argument("-r", "--r",\
help = ("Resume from checkpoint. Either 'f' (False;" +\
" default), 't' (True), or date of a specif" +\
"ic training event (folder name)."),\
type = str, default = "f")
parser.add_argument("-save_settings", "--sv",\
help = "Save training settings.", type = bool,\
default = True)
# Parse arguments
args = parser.parse_args()
return args
if __name__ == "__main__":
# Parse the arguments
args = parseArguments()
# debug mode
if False:
import pickle
saved_args = "C:\\Users\\Manuel\\Nextcloud\\Masterarbeit\\py3\\vrs\\train_settings.pkl"
with open(saved_args, "rb") as f:
args = pickle.load(f)
args.wd = "home"
mdl = args.model
bs = args.bs
epochz = args.ep
init_lr = args.lr
decay_lr = args.lrd
step_lr = args.lrs
es_patience = args.esp if args.esp is not None else epochz
optmer = args.op
kernel_init = args.ki
drop = args.do
xf = args.xf
yf = args.yf
imgdim = args.imgdim
ww = args.ww
ws = args.ws
wd = args.wd
year = args.yr
resume_training = args.r
# case insensitive arguments
mdl, optmer, xf, yf, wd, resume_training = mdl.casefold(), optmer.casefold(),\
xf.casefold(), yf.casefold(), wd.casefold(), resume_training.casefold()
#### basic settings------------------------------------------------------------
import platform, sys, datetime, pathlib, os
OS = platform.system()
OS_version = platform.release()
py_version = sys.version
t_start = datetime.datetime.utcnow()
import tensorflow as tf
print("Running on " + OS + " " + OS_version + ".\nPython version: " +
py_version + "\nTensorflow version: " + tf.__version__ +
"\nUTC time (start): " + str(t_start) +
"\nLocal time (start): " + str(datetime.datetime.now()))
# Model (one of "mod_UNet", "mod_FCD")
if mdl in ["u-net", "unet", "mod_unet", "mod_u-net", "u_net"]:
mod = "mod_UNet"
elif mdl in ["fcd", "fcdensenet", "fc-densenet", "fc-dense-net"]:
mod = "mod_FCD"
else:
raise ValueError("Unexpected input for argument 'model': " + str(mdl))
### general directory functions------------------------------------------------
import numpy as np
if wd == "home":
if OS == "Linux":
if platform.release() == "4.18.0-193.60.2.el8_2.x86_64":
wd = "/home/kit/ifgg/mp3890/LeleNet"
else:
wd = "/home/manuel/Nextcloud/Masterarbeit"
elif OS == "Windows":
wd = os.path.join("C:\\", "Users", "Manuel",\
"Nextcloud", "Masterarbeit")
else:
raise Exception("OS not detected.")
elif wd == "":
pydir = os.path.dirname(os.path.realpath(__file__))
wd = os.path.dirname(pydir)
else:
wd = args.wd
def dir_fig(fig_id = None):
if fig_id == None:
return os.path.join(wd, "fig")
else:
return os.path.join(wd, "fig", fig_id)
def dir_dat(dat_id = None):
if dat_id == None:
return os.path.join(wd, "dat")
else:
dat_id = dat_id.split(",")
return os.path.join(wd, "dat", *dat_id)
def dir_out(*out_id):
if len(out_id) < 1:
return os.path.join(wd, "out")
else:
out_lst = list(out_id)
out_ids = os.path.sep.join(out_lst)
return os.path.join(wd, "out", out_ids)
def dir_var(pkl_name = None):
if pkl_name == None:
return os.path.join(wd, "py3", "vrs")
else:
return os.path.join(wd, "py3", "vrs", pkl_name + ".pkl")
def save_var(variables, name):
os.makedirs(dir_var(), exist_ok = True)
with open(dir_var(pkl_name = name), "wb") as f:
pickle.dump(variables, f)
def get_var(name):
with open(dir_var(pkl_name = name), "rb") as f:
return pickle.load(f)
os.chdir(wd)
with open(dir_out("System_info.txt"), "w") as f:
f.write("Most recent run on " + OS + " " + OS_version +
".\nPython version: " +
py_version + "\nTensorflow version: " + tf.__version__ +
"\nUTC time (start): " + str(t_start) +
"\nLocal time (start): " + str(datetime.datetime.now()))
if args.sv:
save_var(args, "train_settings")
print("Saved training settings.")
#### data preparation directory functions--------------------------------------
def dir_omk(plot_id = None, myear = None, type_ext = ""):
# returns list!
if plot_id == None:
if myear == None:
return dir_dat("omk")
else:
return os.path.join(dir_dat("omk"), myear)
else:
if myear == None:
return list(pathlib.Path(dir_dat("omk")) \
.glob("**/*" + plot_id + type_ext + ".tif"))
else:
return list(pathlib.Path(os.path.join(dir_dat("omk"), myear)) \
.glob("**/*" + plot_id + type_ext + ".tif"))
def dir_tls(myear = None, dset = None, plot_id = None):
if plot_id == None:
if myear == None:
if dset == None:
return dir_dat("tls")
else:
return dir_dat("tls")
raise Exception("Missing year. Returning tile directory.")
else:
if dset == None:
return os.path.join(dir_dat("tls"), myear)
else:
return os.path.join(dir_dat("tls"), myear, dset, "0")
else:
if myear == None:
return dir_dat("tls")
raise Exception("Missing year. Returning tile directory.")
else:
if dset == None:
return os.path.join(dir_dat("tls"), myear)
raise Exception("Missing dset (X or y)." +\
"Returning tile directory.")
else:
return os.path.join(dir_dat("tls"), myear, dset, "0", plot_id)
def save_dataset_info(variables, year = year, name = "dset_info"):
tile_dir = dir_tls(myear = year)
os.makedirs(tile_dir, exist_ok = True)
with open(tile_dir + os.path.sep + name + ".pkl", "wb") as f:
pickle.dump(variables, f)
def get_dataset_info(year = year, name = "dset_info"):
tile_dir = dir_tls(myear = year)
with open(tile_dir + os.path.sep + name + ".pkl", "rb") as f:
return pickle.load(f)
def toINT(filename):
imgINT = filename.astype("uint8")
return imgINT
# get tile dimensions if not specified-----------------------------------------
from PIL import Image
if (args.imgr is None or args.imgc is None) and args.imgd is None:
imgs = list(pathlib.Path(os.path.dirname(dir_tls(myear = year,\
dset = "y")))\
.glob("**/*." + yf))
im = Image.open(imgs[0])
w, h = im.size
im.close()
# image dimensions
if args.imgr != args.imgc:
print("Warning: Arguments imgr and imgc do not match.")
if args.imgr is not None:
imgr = args.imgr
else:
imgr = h
if args.imgc is not None:
imgc = args.imgc
else:
imgc = w
if args.imgd is not None:
print("Argument imgd set. imgd overwrites imgr and imgc.")
imgr = args.imgd
imgc = args.imgd
# Data preparation-------------------------------------------------------------
### Run file DataPreparation.py
### read dictionary to group species to classes, if need be
import pandas as pd
specdict = pd.read_excel(dir_dat("xls,SpeciesList.xlsx"),
sheet_name = "Dictionary", header = 0)
# exec(open("A1_DataPreparation.py").read())
## load information generated during data preparation--------------------------
classes, classes_decoded, NoDataValue, no_data_class, abc = get_dataset_info()
N_CLASSES = len(classes) if no_data_class or abc else len(classes) + 1
if args.nc is not None:
N_CLASSES = args.nc
# Setup for training-----------------------------------------------------------
os.chdir(os.path.join(wd, "py3"))
os.chdir(wd)
# import modules---------------------------------------------------------------
#already done in A0_LeleNet.py: import tensorflow as tf
#import tensorflow_io as tfio
from tensorflow import keras as ks
AUTOTUNE = tf.data.experimental.AUTOTUNE
tf.__version__
ks.__version__
## make GPU available----------------------------------------------------------
phys_devs = tf.config.experimental.list_physical_devices("GPU")
print("N GPUs available: ", len(phys_devs))
#if len(phys_devs) >= 1 and False:
# tf.config.experimental.set_memory_growth(phys_devs[0], True)
#else:
# #os.environ['CUDA_VISIBLE_DEVICES'] = "-1"
# my_devices = tf.config.experimental.list_physical_devices(device_type = "CPU")
# tf.config.experimental.set_visible_devices(devices = my_devices, device_type = "CPU")
# print("No GPUs used.")
## general options/info--------------------------------------------------------
N_img = len(list(pathlib.Path(dir_tls(myear = year, dset = "X")) \
.glob("**/*." + xf)))
N_val = len(list(pathlib.Path(dir_tls(myear = year, dset = "X_val")) \
.glob("**/*." + xf)))
zeed = 42
## build data loader-----------------------------------------------------------
def parse_image(img_path: str) -> dict:
# read image
image = tf.io.read_file(img_path)
if xf == "png":
image = tf.image.decode_png(image, channels = 3)
elif xf == "jpg":
image = tf.image.decode_jpeg(image, channels = 3)
elif xf == "tif":
import tensorflow_io as tfio
image = tfio.experimental.image.decode_tiff(image)
else:
print("Invalid X data format. Allowed formats: png, jpg, tif")
# read mask
mask_path = tf.strings.regex_replace(img_path, "X", "y")
mask_path = tf.strings.regex_replace(mask_path, "X." + xf, "y." + yf)
mask_path = tf.strings.regex_replace(mask_path, "image", "mask")
mask = tf.io.read_file(mask_path)
if yf == "png":
mask = tf.image.decode_png(mask, channels = 1)
elif yf == "tif":
import tensorflow_io as tfio
mask = tfio.experimental.image.decode_tiff(mask)
else:
print("Invalid y data format. Allowed formats: png, tif")
mask = tf.where(mask == 255, np.dtype("uint8").type(NoDataValue), mask)
return {"image": image, "segmentation_mask": mask}
train_dataset = tf.data.Dataset.list_files(
dir_tls(myear = year, dset = "X") + os.path.sep + "*." + xf, seed = zeed)
train_dataset = train_dataset.map(parse_image)
val_dataset = tf.data.Dataset.list_files(
dir_tls(myear = year, dset = "X_val") + os.path.sep + "*." + xf, seed = zeed)
val_dataset = val_dataset.map(parse_image)
## data transformations--------------------------------------------------------
@tf.function
def normalise(input_image: tf.Tensor, input_mask: tf.Tensor) -> tuple:
input_image = tf.cast(input_image, tf.float32) / 255.0
input_mask = tf.round(input_mask)
input_mask = tf.cast(input_mask, tf.uint8)
return input_image, input_mask
@tf.function
def load_image_train(datapoint: dict) -> tuple:
input_image = tf.image.resize(datapoint["image"], (imgr, imgc))
input_mask = tf.image.resize(datapoint["segmentation_mask"], (imgr, imgc))
if tf.random.uniform(()) > 0.5:
input_image = tf.image.flip_left_right(input_image)
input_mask = tf.image.flip_left_right(input_mask)
# more experimental data augmentation
'''
if tf.random.uniform(()) > 0.5:
input_image = tf.image.flip_up_down(input_image)
input_mask = tf.image.flip_up_down(input_mask)
input_image = tf.image.random_brightness(input_image, max_delta = 0.2)
input_image = tf.image.random_contrast(input_image, lower = 0.0, \
upper = 0.05)
input_image = tf.image.random_saturation(input_image, lower = 0.0, \
upper = 0.05)
'''
input_image, input_mask = normalise(input_image, input_mask)
return input_image, input_mask
@tf.function
def load_image_test(datapoint: dict) -> tuple:
input_image = tf.image.resize(datapoint["image"], (imgr, imgc))
input_mask = tf.image.resize(datapoint["segmentation_mask"], (imgr, imgc))
input_image, input_mask = normalise(input_image, input_mask)
return input_image, input_mask
## create datasets-------------------------------------------------------------
buff_size = 1000
dataset = {"train": train_dataset, "val": val_dataset}
# train dataset
dataset["train"] = dataset["train"]\
.map(load_image_train, num_parallel_calls = tf.data.experimental.AUTOTUNE)
dataset["train"] = dataset["train"].shuffle(buffer_size = buff_size,
seed = zeed)
dataset["train"] = dataset["train"].repeat()
dataset["train"] = dataset["train"].batch(bs)
dataset["train"] = dataset["train"].prefetch(buffer_size = AUTOTUNE)
# validation dataset
dataset["val"] = dataset["val"].map(load_image_test)
dataset["val"] = dataset["val"].repeat()
dataset["val"] = dataset["val"].batch(bs)
dataset["val"] = dataset["val"].prefetch(buffer_size = AUTOTUNE)
print(dataset["train"])
print(dataset["val"])
# define weighs for categorical crossentropy loss function---------------------
def calculate_weights(directory, n_classes):
imgs = list(pathlib.Path(directory).glob("**/*." + yf))
weights = np.array([0] * n_classes)
gravity = 0
for img in imgs:
im = Image.open(img)
vals = np.array(im.getdata(), dtype = np.uint8)
unique, counts = np.unique(vals, return_counts = True)
classweights = np.array([0] * n_classes)
classweights[unique.astype(int)] = counts
weights = ((weights * gravity) + classweights) / (gravity + 1)
gravity += 1
im.close()
return weights
def estimate_weights(directory, n_classes, N = 500):
import random
imgs = list(pathlib.Path(directory).glob("**/*." + yf))
weights = np.array([0] * n_classes)
gravity = 1
for i in range(N):
x = random.randint(0, (len(imgs) - 1))
im = Image.open(imgs[x])
vals = np.array(im.getdata(), dtype = np.uint8)
unique, counts = np.unique(vals, return_counts = True)
classweights = np.array([0] * n_classes)
classweights[unique.astype(int)] = counts
weights = ((weights * gravity) + classweights) / (gravity + 1)
gravity += 1
im.close()
return weights
import glob, time
if ww != 0:
if os.path.isfile(dir_var("weights")):
print("Loading class weights...")
WEIGHTS, weights_timestamp = get_dataset_info("weights")
print("Checking class weights timestamp...")
latest_mod = max(glob.glob(dir_tls(myear = year, dset = "y") + \
os.path.sep + "*"), key = os.path.getctime)
img_mod_timestamp = os.path.getmtime(latest_mod)
img_mod_timestamp = datetime.datetime.fromtimestamp(img_mod_timestamp)
if weights_timestamp < img_mod_timestamp:
print("Weights out of date. Calculating new class weights...")
WEIGHTS = calculate_weights(
os.path.dirname(dir_tls(myear = year, dset = "y")), N_CLASSES)
weights_timestamp = datetime.datetime.now()
save_dataset_info(variables = [WEIGHTS, weights_timestamp],
name = "weights")
else:
print("Calculating class weights...")
WEIGHTS = calculate_weights(os.path.dirname( \
dir_tls(myear = year, dset = "y")), N_CLASSES)
weights_timestamp = datetime.datetime.now()
save_dataset_info(variables = [WEIGHTS, weights_timestamp],
name = "weights")
NORMWEIGHTS = WEIGHTS / max(WEIGHTS)
### inverse frequency as weights
#inv_weights = tf.constant((1 / (WEIGHTS + 0.01)), dtype = tf.float32,
# shape = [1, 1, 1, N_CLASSES])
import math
inv_weights = (1 / (NORMWEIGHTS + 0.01)**(ww)) if ws == "exp" else \
[1 / math.log(nw, ww) for nw in NORMWEIGHTS]
inv_weights = inv_weights / max(inv_weights)
print("Calculated the following weights:", inv_weights)
## add weights-----------------------------------------------------------------
def add_sample_weights(image, segmentation_mask):
class_weights = tf.constant(inv_weights, dtype = tf.float32)
class_weights = class_weights/tf.reduce_sum(class_weights)
sample_weights = tf.gather(class_weights,
indices = tf.cast(segmentation_mask, tf.int32))
return image, segmentation_mask, sample_weights
if ww != 0:
dataset["train"].map(add_sample_weights).element_spec
# Get model--------------------------------------------------------------------
os.chdir(os.path.join(wd, "py3"))
if kernel_init is not None:
k_initializers = { \
"he_normal" : "he_normal", \
"he_uniform" : "he_uniform", \
"random_uniform" : ks.initializers.RandomUniform(minval=0., maxval=1.), \
"truncated_normal" : ks.initializers.TruncatedNormal(mean=0.0, \
stddev=0.05) \
}
initializer = k_initializers[kernel_init.casefold()]
if mod == "mod_UNet":
if kernel_init is None:
initializer = "he_normal"
def UNet(n_classes, input_shape = (imgr, imgc, imgdim), dropout = drop, \
filters = 64, \
ops = {"activation" : "relu",
"padding" : "same",
"kernel_initializer" : initializer
}):
# input layer
inputz = ks.layers.Input(shape = input_shape)
# encoder part
## 1st convolution
c1 = ks.layers.Conv2D(filters, (3, 3), **ops)(inputz)
c1 = ks.layers.Conv2D(filters, (3, 3), **ops)(c1)
## 1st max pooling
p1 = ks.layers.MaxPooling2D(pool_size = (2, 2))(c1)
## 2nd convolution
c2 = ks.layers.Conv2D(filters*2, (3, 3), **ops)(p1)
c2 = ks.layers.Conv2D(filters*2, (3, 3), **ops)(c2)
## 2nd max pooling
p2 = ks.layers.MaxPooling2D(pool_size = (2, 2))(c2)
## 3rd convolution
c3 = ks.layers.Conv2D(filters*4, (3, 3), **ops)(p2)
c3 = ks.layers.Conv2D(filters*4, (3, 3), **ops)(c3)
## 3rd max pooling
p3 = ks.layers.MaxPooling2D(pool_size = (2, 2))(c3)
## 4th convolution
c4 = ks.layers.Conv2D(filters*8, (3, 3), **ops)(p3)
c4 = ks.layers.Conv2D(filters*8, (3, 3), **ops)(c4)
## Drop
d4 = ks.layers.Dropout(dropout)(c4)
## 4th max pooling
p4 = ks.layers.MaxPooling2D(pool_size = (2, 2))(d4)
## 5th convolution
c5 = ks.layers.Conv2D(filters*16, (3, 3), **ops)(p4)
c5 = ks.layers.Conv2D(filters*16, (3, 3), **ops)(c5)
## Drop
d5 = ks.layers.Dropout(dropout)(c5)
# decoder part
## 1st up convolution
us6 = ks.layers.UpSampling2D(size = (2, 2))(d5)
up6 = ks.layers.Conv2D(filters*8, (2, 2), **ops)(us6)
## merge
ct6 = ks.layers.concatenate([d4, up6], axis = 3)
uc6 = ks.layers.Conv2D(filters*8, (3, 3), **ops)(ct6)
uc6 = ks.layers.Conv2D(filters*8, (3, 3), **ops)(uc6)
## 2nd up convolution
us7 = ks.layers.UpSampling2D(size = (2, 2))(uc6)
up7 = ks.layers.Conv2D(filters*4, (2, 2), **ops)(us7)
## merge
ct7 = ks.layers.concatenate([c3, up7], axis = 3)
uc7 = ks.layers.Conv2D(filters*4, (3, 3), **ops)(ct7)
uc7 = ks.layers.Conv2D(filters*4, (2, 2), **ops)(uc7)
## 3rd up convolution
us8 = ks.layers.UpSampling2D(size = (2, 2))(uc7)
up8 = ks.layers.Conv2D(filters*2, (2, 2), **ops)(us8)
## merge
ct8 = ks.layers.concatenate([c2, up8], axis = 3)
uc8 = ks.layers.Conv2D(filters*2, (3, 3), **ops)(ct8)
uc8 = ks.layers.Conv2D(filters*2, (3, 3), **ops)(uc8)
## 4th up convolution
us9 = ks.layers.UpSampling2D(size = (2, 2))(uc8)
up9 = ks.layers.Conv2D(filters, (2, 2), **ops)(us9)
## merge
ct9 = ks.layers.concatenate([c1, up9], axis = 3)
uc9 = ks.layers.Conv2D(filters, (3, 3), **ops)(ct9)
uc9 = ks.layers.Conv2D(filters, (3, 3), **ops)(uc9)
uc9 = ks.layers.Conv2D(2, (3, 3), **ops)(uc9)
# output layer
if n_classes > 2:
outputz = ks.layers.Conv2D(n_classes, (1, 1), \
activation = "softmax")(uc9)
else:
outputz = ks.layers.Conv2D(1, (1, 1), activation = "sigmoid")(uc9)
model = ks.Model(inputs = [inputz], outputs = [outputz])
print(model.summary())
print(f'Total number of layers: {len(model.layers)}')
return model
# get model
model = UNet(n_classes = N_CLASSES)
# directory to save model
os.makedirs(dir_out("mod_UNet"), exist_ok = True)
elif mod == "mod_FCD":
if kernel_init is None:
initializer = "he_uniform"
def BN_ReLU_Conv(inputs, n_filters, filter_size = 3, dropout_p = drop):
l = ks.layers.BatchNormalization()(inputs)
l = ks.layers.Activation("relu")(l)
l = ks.layers.Conv2D(n_filters, filter_size, activation = None,
padding = "same",
kernel_initializer = initializer) (l)
if dropout_p != 0.0:
l = ks.layers.Dropout(dropout_p)(l)
return l
def TransitionDown(inputs, n_filters, dropout_p = drop):
l = BN_ReLU_Conv(inputs, n_filters, filter_size = 1,\
dropout_p = dropout_p)
l = ks.layers.MaxPool2D(pool_size = (2, 2))(l)
return l
def TransitionUp(skip_connection, block_to_upsample, n_filters_keep):
l = ks.layers.concatenate(block_to_upsample)
l = ks.layers.Conv2DTranspose(n_filters_keep, kernel_size = (3, 3),
strides = (2, 2), padding = "same",
kernel_initializer = initializer)(l)
l = ks.layers.concatenate([l, skip_connection])
return l
def FCDense(n_classes, input_shape = (imgr, imgc, imgdim),
n_filters_first_conv = 48, n_pool = 4, growth_rate = 12,
n_layers_per_block = 5, dropout_p = drop):
"""
Original note from the authors of the FC-DenseNet:
The network consist of a downsampling path, where dense blocks and
transition down are applied, followed
by an upsampling path where transition up and dense blocks are applied.
Skip connections are used between the downsampling path and the
upsampling path
Each layer is a composite function of BN - ReLU - Conv and the last
layer is a softmax layer.
:param input_shape: shape of the input batch. Only the first dimension
(n_channels) is needed
:param n_classes: number of classes
:param n_filters_first_conv: number of filters for the first
convolution applied
:param n_pool: number of pooling layers = number of transition down =
number of transition up
:param growth_rate: number of new feature maps created by each layer
in a dense block
:param n_layers_per_block: number of layers per block. Can be an int
or a list of size 2 * n_pool + 1
:param dropout_p: dropout rate applied after each convolution
(0. for not using)
"""
# check n_layers_per_block setting
if type(n_layers_per_block) == list:
assert(len(n_layers_per_block) == 2*n_pool + 1)
elif type(n_layers_per_block) == int:
n_layers_per_block = [n_layers_per_block]*(2*n_pool + 1)
else:
raise ValueError
# Input layer, m = 3
inputz = tf.keras.layers.Input(shape = input_shape)
# first convolution; store feature maps in the Tiramisu
# 3 x 3 convolution, m = 48
Tiramisu = ks.layers.Conv2D(filters = n_filters_first_conv,
kernel_size = (3, 3), strides = (1, 1),
padding = "same", dilation_rate = (1, 1),
activation = "relu",
kernel_initializer = initializer
)(inputz)
n_filters = n_filters_first_conv
# downsampling path, n*(dense block + transition down)
skip_connection_list = []
for i in range(n_pool):
## dense block
for j in range(n_layers_per_block[i]):
### Compute new feature maps
l = BN_ReLU_Conv(Tiramisu, growth_rate, dropout_p = dropout_p)
### and stack it---the Tiramisu is growing
Tiramisu = ks.layers.concatenate([Tiramisu, l])
n_filters += growth_rate
## store Tiramisu in skip_connections list
skip_connection_list.append(Tiramisu)
## transition Down
Tiramisu = TransitionDown(Tiramisu, n_filters, dropout_p)
skip_connection_list = skip_connection_list[::-1]
# bottleneck
## store output of subsequent dense block; upsample only these new features
block_to_upsample = []
# dense Block
for j in range(n_layers_per_block[n_pool]):
l = BN_ReLU_Conv(Tiramisu, growth_rate, dropout_p = dropout_p)
block_to_upsample.append(l)
Tiramisu = ks.layers.concatenate([Tiramisu, l])
# upsampling path
for i in range(n_pool):
## Transition Up ( Upsampling + concatenation with the skip connection)
n_filters_keep = growth_rate * n_layers_per_block[n_pool + i]
Tiramisu = TransitionUp(skip_connection_list[i], block_to_upsample,
n_filters_keep)
## dense Block
block_to_upsample = []
for j in range(n_layers_per_block[n_pool + i + 1]):
l = BN_ReLU_Conv(Tiramisu, growth_rate, dropout_p = dropout_p)
block_to_upsample.append(l)
Tiramisu = ks.layers.concatenate([Tiramisu, l])
# output layer; 1x1 convolution, m = number of classes
if n_classes > 2:
outputz = ks.layers.Conv2D(n_classes, (1, 1), \
activation = "softmax")(Tiramisu)
else:
outputz = ks.layers.Conv2D(1, (1, 1), \
activation = "sigmoid")(Tiramisu)
model = tf.keras.Model(inputs = [inputz], outputs = [outputz])
print(model.summary())
print(f'Total number of layers: {len(model.layers)}')
return model
# get model
model = FCDense(n_classes = N_CLASSES)
# directory to save model
os.makedirs(dir_out("mod_FCD"), exist_ok = True)
### logs and callbacks---------------------------------------------------------
# define callbacks
from tensorflow.keras.callbacks import LearningRateScheduler
'''
Simple custom LR decay which would only require the epoch index as an argument:
'''
def step_decay_schedule(initial_lr = init_lr,
decay_factor = decay_lr, step_size = step_lr):
def schedule(epoch):
return initial_lr * (decay_factor ** np.floor(epoch/step_size))
return LearningRateScheduler(schedule)
#lr_sched = step_decay_schedule(initial_lr = init_lr,
# decay_factor = decay_lr, step_size = step_lr)
'''
Using some simple built-in learning rate decay:
'''
if init_lr is not None:
lr_sched = ks.optimizers.schedules.ExponentialDecay(
initial_learning_rate = init_lr,
# decay after n steps
decay_steps = np.floor(N_img/bs),
decay_rate = decay_lr)
optimizers = {
"adam" : ks.optimizers.Adam(learning_rate = lr_sched, \
clipnorm = 1), \
"sgd" : ks.optimizers.SGD(learning_rate = init_lr, \
clipnorm = 1), \
"rms" : ks.optimizers.RMSprop(learning_rate = lr_sched, \
clipnorm = 1)
}
else:
optimizers = {
"adam" : ks.optimizers.Adam(),
"sgd" : ks.optimizers.SGD(),
"rms" : ks.optimizers.RMSprop()
}
try:
optimizer = optimizers[optmer]
except:
print("Failed to assign optimizer: " + optmer + \
". Use 'Adam', 'rms', or 'sgd'.")
# list callbacks
now = datetime.datetime.now()
logdir = os.path.join(dir_out("logs"), now.strftime("%y-%m-%d-%H-%M-%S"))
cptdir = os.path.join(dir_out("cpts"), now.strftime("%y-%m-%d-%H-%M-%S"))
cllbs = [
#ks.callbacks.ReduceLROnPlateau(monitor = "val_loss", factor = 0.2,
# patience = 5, min_lr = 0.001),
ks.callbacks.EarlyStopping(patience = es_patience),
ks.callbacks.ModelCheckpoint(os.path.join(cptdir, \
"Epoch.{epoch:02d}.hdf5"),
save_best_only = True),
ks.callbacks.TensorBoard(log_dir = logdir, histogram_freq = 5)
]
# compile model----------------------------------------------------------------
## loss functions
### define IoU loss (only binary)
#### https://www.youtube.com/watch?v=NqDBvUPD9jg&ab_channel=DigitalSreeni
#def IoU_coe(y_true, y_pred):
# T = ks.flatten(y_true)
# P = ks.flatten(y_pred)
# intersect = ks.sum(T * P)
# IoU = (intersect + 1.0) / (ks.sum(T) + ks.sum(P) - intersect + 1.0)
# return IoU
#def IoU_loss(y_true, y_pred):
# return 1 - IoU_coe(y_true, y_pred)
### define dice coefficient
### https://github.com/tensorlayer/tensorlayer/blob/master/tensorlayer/cost.py#L216
def dice_coe(target, output, loss_type = "jaccard",
axis = (1, 2, 3), smooth = 1):# orig. val. smooth = 1e-5
inse = tf.reduce_sum(output * target, axis = axis)
if loss_type == "jaccard":
l = tf.reduce_sum(output * output, axis = axis)
r = tf.reduce_sum(target * target, axis = axis)
elif loss_type == "sorensen":
l = tf.reduce_sum(output, axis = axis)
r = tf.reduce_sum(target, axis = axis)
else:
raise Exception("Unknow loss_type: " + loss_type)
dice = (2. * inse + smooth) / (l + r + smooth)
dice = tf.reduce_mean(dice)
return dice
def dice_loss(y_true, y_pred):
return 1 - dice_coe(y_true, y_pred)
### define focal loss
# pip3 install focal-loss
## metrics
### get intersect. over union (original function gives error -> updated accor-
### ding to https://stackoverflow.com/a/61826074/11611246)
# mIoU = ks.metrics.MeanIoU(num_classes = N_CLASSES)
class UpdatedMeanIoU(tf.keras.metrics.MeanIoU):
def __init__(self,
y_true = None,
y_pred = None,
num_classes = None,
name = None,
dtype = None):
super(UpdatedMeanIoU, self).__init__(num_classes = num_classes,
name = name, dtype = dtype)
def update_state(self, y_true, y_pred, sample_weight = None):
y_pred = tf.math.argmax(y_pred, axis = -1)
return super().update_state(y_true, y_pred, sample_weight)
mIoU = UpdatedMeanIoU(num_classes = N_CLASSES)
### get sparse categorical/binary cross entropy
lozz = ks.losses.SparseCategoricalCrossentropy() if N_CLASSES > 2 else\
ks.losses.BinaryCrossentropy()
#run_opts = tf.compat.v1.RunOptions(report_tensor_allocations_upon_oom = True)
metrix = [mIoU, "sparse_categorical_accuracy"] if N_CLASSES > 2 else \
[mIoU, "accuracy"]
# resume training or compile new model-----------------------------------------
if resume_training == "f":
os.makedirs(logdir, exist_ok = True)
os.makedirs(cptdir, exist_ok = True)
os.chdir(logdir)
model.compile(optimizer = optimizer, loss = lozz,
metrics = metrix)#, options = run_opts)
model.summary()
elif resume_training == "t":
cpt_folders = [f for f in os.listdir(dir_out("cpts")) \
if not f.startswith(".")]
cpt_dates = [datetime.datetime.strptime(d, "%y-%m-%d-%H-%M-%S"\
) for d in cpt_folders]
cpt_folder = max(cpt_dates).strftime("%y-%m-%d-%H-%M-%S")
else:
cpt_folder = resume_training
if resume_training != "f":
list_of_files = glob.glob(dir_out("cpts", cpt_folder) + os.path.sep + \
"*" + ".hdf5")
checkpoint = max(list_of_files, key = os.path.getctime)
try:
model = ks.models.load_model(checkpoint, \
custom_objects = {"UpdatedMeanIoU": mIoU})
except:
print("Failed to load model from", checkpoint)
all_logs = [dir_out("logs", p) for p in os.listdir(dir_out("logs"))]
logdir = max(all_logs, key = os.path.getctime)
os.chdir(logdir)
model.compile(optimizer = optimizer, loss = lozz,
metrics = metrix)
# report to tensorboard--------------------------------------------------------
import subprocess
PARAMETERS = "'Batch size: " + str(bs) + " Init. lr: " + str(init_lr) + \
" Img dim: " + str(imgc) + " Weights: " + str(ww) + " Optimizer: " + \
optmer + " Dataset: " + year +"'"
subprocess.Popen(["tensorboard", "dev", "upload", "--logdir", logdir, \
"--name", "LeleNet_" + mod, "--description", \
PARAMETERS], shell = False, \
stdout = subprocess.DEVNULL, stderr = subprocess.STDOUT)
# fit model--------------------------------------------------------------------
args_fit = {"epochs" : epochz,
"steps_per_epoch" : np.ceil(N_img/bs),
"validation_steps" : np.ceil(N_val/bs),
"callbacks" : cllbs}
if resume_training != "f":
try:
s = checkpoint.find("Epoch.") + len("Epoch.")
e = checkpoint.find("Epoch.") + len("Epoch.") + 2
args_fit["initial_epoch"] = int(checkpoint[s : e])
except:
print("Error when trying to retreive the epoch number from filename", \
"'" + checkpoint + "': Unable to find integer at position", \
str(checkpoint.find("Epoch.") + len("Epoch.")), "to", \
str(len(checkpoint)-5))
if "train_generator" in locals() or "train_generator" in globals():
model.fit(train_generator,
validation_data = val_generator,
**args_fit)
else:
if ww != 0:
model.fit(dataset["train"].map(add_sample_weights),
validation_data = dataset["val"],
**args_fit)
else:
model.fit(dataset["train"],
validation_data = dataset["val"],
**args_fit)
os.chdir(dir_out())
# save model-------------------------------------------------------------------
os.makedirs(dir_out(mod), exist_ok = True)
model.save(dir_out(mod), save_format = "tf", save_traces = True)
print("Model saved to disc.")
#trained_model = ks.models.load_model(dir_out(mod),\
# custom_objects = {"UpdatedMeanIoU": mIoU})
| 42.024109 | 92 | 0.544661 | 4,743 | 40,091 | 4.45077 | 0.161501 | 0.020464 | 0.019233 | 0.02288 | 0.357461 | 0.309901 | 0.259261 | 0.218475 | 0.191805 | 0.167598 | 0 | 0.01727 | 0.298047 | 40,091 | 953 | 93 | 42.068206 | 0.732855 | 0.154972 | 0 | 0.254731 | 0 | 0 | 0.113031 | 0.006678 | 0 | 0 | 0 | 0 | 0.001456 | 1 | 0.043668 | false | 0 | 0.021834 | 0.002911 | 0.125182 | 0.032023 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a780e26e4f23e4f9fb8ef5db39cf3ff77817c9ac | 1,179 | py | Python | storyboard/api/middleware/user_id_hook.py | Sitcode-Zoograf/storyboard | 5833f87e20722c524a1e4a0b8e1fb82206fb4e5c | [
"Apache-2.0"
] | null | null | null | storyboard/api/middleware/user_id_hook.py | Sitcode-Zoograf/storyboard | 5833f87e20722c524a1e4a0b8e1fb82206fb4e5c | [
"Apache-2.0"
] | null | null | null | storyboard/api/middleware/user_id_hook.py | Sitcode-Zoograf/storyboard | 5833f87e20722c524a1e4a0b8e1fb82206fb4e5c | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pecan import hooks
import storyboard.common.hook_priorities as priority
from storyboard.db.api import access_tokens as token_api
class UserIdHook(hooks.PecanHook):
priority = priority.AUTH
def before(self, state):
request = state.request
if request.authorization and len(request.authorization) == 2:
access_token = request.authorization[1]
token = token_api.access_token_get_by_token(access_token)
if token:
request.current_user_id = token.user_id
return
request.current_user_id = None
| 31.026316 | 69 | 0.720102 | 164 | 1,179 | 5.085366 | 0.591463 | 0.071942 | 0.031175 | 0.038369 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010787 | 0.21374 | 1,179 | 37 | 70 | 31.864865 | 0.888889 | 0.466497 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.214286 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a782b94e2caf912181ecaec88c431b17b602f33d | 1,952 | py | Python | tests/recurrence/test_quadrature_creation.py | utsekaj42/chaospy | 0fb23cbb58eb987c3ca912e2a20b83ebab0514d0 | [
"MIT"
] | 333 | 2016-10-25T12:00:48.000Z | 2022-03-30T07:50:33.000Z | tests/recurrence/test_quadrature_creation.py | utsekaj42/chaospy | 0fb23cbb58eb987c3ca912e2a20b83ebab0514d0 | [
"MIT"
] | 327 | 2016-09-25T16:29:41.000Z | 2022-03-30T03:26:27.000Z | tests/recurrence/test_quadrature_creation.py | utsekaj42/chaospy | 0fb23cbb58eb987c3ca912e2a20b83ebab0514d0 | [
"MIT"
] | 74 | 2016-10-17T11:14:13.000Z | 2021-12-09T10:55:59.000Z | """
Check the creation of quadrature nodes.
Create Gaussian quadrature nodes using various distributions and algorithms and
check if the nodes correctly can be used to estimate raw statistical nodes up
to 2N-1. Check for both 1 and 3 dimensions.
"""
import pytest
import numpy
import chaospy
def test_1d_quadrature_creation(
analytical_distribution, recurrence_algorithm):
"""Check 1-D quadrature rule."""
abscissas, weights = chaospy.quadrature.gaussian(
order=8,
dist=analytical_distribution,
recurrence_algorithm=recurrence_algorithm,
)
assert abscissas.shape == (1, 9)
assert weights.shape == (9,)
assert numpy.allclose(numpy.sum(abscissas*weights, -1),
analytical_distribution.mom(1))
assert numpy.allclose(numpy.sum(abscissas**2*weights, -1),
analytical_distribution.mom(2))
# lanczos not working as well as the others for heavy tails:
rtol = 1e-3 if recurrence_algorithm == "lanczos" else 1e-5
assert numpy.allclose(numpy.sum(abscissas**15*weights, -1),
analytical_distribution.mom(15), rtol=rtol)
def test_3d_quadrature_creation(
analytical_distribution, recurrence_algorithm):
"""Check 3-D quadrature rule."""
distribution = chaospy.Iid(analytical_distribution, 3)
abscissas, weights = chaospy.quadrature.gaussian(
order=3,
dist=distribution,
recurrence_algorithm=recurrence_algorithm,
)
assert abscissas.shape == (3, 4**3)
assert weights.shape == (4**3,)
kloc = numpy.eye(3, dtype=int)
assert numpy.allclose(numpy.sum(abscissas*weights, -1),
distribution.mom(kloc))
assert numpy.allclose(numpy.sum(abscissas**2*weights, -1),
distribution.mom(2*kloc))
assert numpy.allclose(numpy.sum(abscissas**5*weights, -1),
distribution.mom(5*kloc))
| 37.538462 | 79 | 0.665471 | 231 | 1,952 | 5.536797 | 0.320346 | 0.120407 | 0.089132 | 0.112588 | 0.548084 | 0.48319 | 0.383112 | 0.248632 | 0.070367 | 0 | 0 | 0.027425 | 0.234119 | 1,952 | 51 | 80 | 38.27451 | 0.828094 | 0.182377 | 0 | 0.277778 | 0 | 0 | 0.004436 | 0 | 0 | 0 | 0 | 0 | 0.277778 | 1 | 0.055556 | false | 0 | 0.083333 | 0 | 0.138889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a78347ae989fd1092f5624d6dc6bc874f4a39764 | 6,399 | py | Python | pycon/finaid/tests/test_review.py | pyconjp/pyconjp-website | c14b1412b70ad04d6c6e837cb0feaec17fd5cd36 | [
"BSD-3-Clause"
] | 6 | 2016-04-03T18:22:45.000Z | 2018-03-15T11:20:39.000Z | pycon/finaid/tests/test_review.py | alex/pycon | d1437a9f2ac1ec4f4fd5ad41ef3a7fe06958b52b | [
"BSD-3-Clause"
] | 60 | 2016-04-14T12:16:06.000Z | 2017-08-15T06:15:50.000Z | pycon/finaid/tests/test_review.py | alex/pycon | d1437a9f2ac1ec4f4fd5ad41ef3a7fe06958b52b | [
"BSD-3-Clause"
] | 7 | 2016-04-23T02:29:35.000Z | 2017-10-05T07:37:46.000Z | import datetime
from decimal import Decimal
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from pycon.finaid.models import STATUS_REJECTED, STATUS_SUBMITTED, \
FinancialAidMessage, FinancialAidReviewData, FinancialAidApplicationPeriod
from pycon.finaid.utils import is_reviewer
from symposion.conference.models import Conference
from .utils import TestMixin, create_application, ReviewTestMixin
today = datetime.date.today()
one_day = datetime.timedelta(days=1)
class TestFinaidApplicationReview(TestCase, TestMixin, ReviewTestMixin):
def setUp(self):
self.user = self.create_user()
self.login_url = reverse('account_login')
self.review_url = reverse('finaid_review')
self.setup_reviewer_team_and_permissions()
def test_not_reviewer(self):
# Non-reviewers cannot access the review view
self.login()
rsp = self.client.get(self.review_url)
self.assertEqual(403, rsp.status_code)
def test_reviewer(self):
# reviewers can access the review view
Conference.objects.get_or_create(id=settings.CONFERENCE_ID)
self.make_reviewer(self.user)
self.login()
rsp = self.client.get(self.review_url)
self.assertEqual(200, rsp.status_code)
def test_non_reviewer_is_reviewer(self):
self.assertFalse(is_reviewer(self.user))
def test_reviewer_is_reviewer(self):
self.make_reviewer(self.user)
self.assertTrue(is_reviewer(self.user))
def test_submit_message(self):
self.make_reviewer(self.user)
self.login()
# create application
applicant = self.create_user(username="jane",
email="jane@example.com")
application = create_application(user=applicant)
application.save()
# form data
MESSAGE = "now is the time for all good parties to..."
data = {
'application': application,
'user': self.user,
'visible': False,
'message': MESSAGE,
'message_submit': 'message_submit',
}
url = reverse('finaid_review_detail', kwargs={'pk': application.pk})
rsp = self.client.post(url, data, follow=True)
self.assertEqual(200, rsp.status_code)
msg = FinancialAidMessage.objects.filter(user=self.user,
application=application)[0]
self.assertEqual(MESSAGE, msg.message)
def test_reviewer_view_messages(self):
self.make_reviewer(self.user)
self.login()
# create application
applicant = self.create_user(username="jane",
email="jane@example.com")
application = create_application(user=applicant)
application.save()
# create message that is only visible to reviewers
message = FinancialAidMessage.objects.create(
application=application,
user=self.user,
visible=False
)
url = reverse('finaid_review_detail', kwargs={'pk': application.pk})
rsp = self.client.get(url)
self.assertEqual(200, rsp.status_code)
review_messages = rsp.context['review_messages']
self.assertIn(message, review_messages)
def test_update_review_data(self):
self.make_reviewer(self.user)
self.login()
# create application
applicant = self.create_user(username="jane",
email="jane@example.com")
application = create_application(user=applicant)
application.save()
# Create review record
# Most fields are optional
data = {
'application': application,
'status': STATUS_SUBMITTED,
'hotel_amount': Decimal('6.66'),
'registration_amount': Decimal('0.00'),
'travel_amount': Decimal('0.00'),
}
review = FinancialAidReviewData(**data)
review.save()
# Now, submit the form to change the status
data['status'] = STATUS_REJECTED
data['hotel_amount'] = Decimal('7.77')
data['review_submit'] = 'review_submit'
url = reverse('finaid_review_detail', kwargs={'pk': application.pk})
rsp = self.client.post(url, data, follow=False)
self.assertEqual(302, rsp.status_code)
new_review = FinancialAidReviewData.objects.get(pk=review.pk)
self.assertEqual(STATUS_REJECTED, new_review.status)
self.assertEqual(Decimal("7.77"), new_review.hotel_amount)
class TestFinaidApplicationReviewDetail(TestCase, TestMixin, ReviewTestMixin):
def setUp(self):
self.user = self.create_user()
self.applicant = self.create_user("fred", "fred@example.com", "linus")
self.application = create_application(user=self.applicant)
self.application.save()
self.review_url = reverse('finaid_review_detail', kwargs={'pk': self.application.pk})
self.setup_reviewer_team_and_permissions()
self.period = FinancialAidApplicationPeriod.objects.create(
start=today - one_day,
end=today + one_day
)
self.conf = Conference.objects.get_or_create(id=settings.CONFERENCE_ID)
def test_not_reviewer_not_applicant(self):
# Non-reviewers cannot access the review view
self.login()
rsp = self.client.get(self.review_url)
self.assertEqual(403, rsp.status_code)
def test_not_reviewer_is_applicant(self):
# Non-reviewer applicants are redirected to finaid_edit
self.login(username="fred@example.com", password="linus")
rsp = self.client.get(self.review_url, follow=True)
self.assertRedirects(rsp, reverse('finaid_edit'))
def test_reviewer(self):
# reviewers can access the review view
self.login()
self.make_reviewer(self.user)
rsp = self.client.get(self.review_url)
self.assertEqual(200, rsp.status_code)
def test_reviewer_is_applicant(self):
# reviewers that are applicants are redirected to their edit view
self.login(username="fred@example.com", password="linus")
self.make_reviewer(self.applicant)
rsp = self.client.get(self.review_url, follow=True)
self.assertRedirects(rsp, reverse('finaid_edit'))
| 39.018293 | 93 | 0.653696 | 719 | 6,399 | 5.655077 | 0.187761 | 0.041318 | 0.028775 | 0.027546 | 0.537629 | 0.520905 | 0.469257 | 0.415642 | 0.393999 | 0.369405 | 0 | 0.007874 | 0.24582 | 6,399 | 163 | 94 | 39.257669 | 0.834646 | 0.075481 | 0 | 0.440945 | 0 | 0 | 0.08507 | 0 | 0 | 0 | 0 | 0 | 0.11811 | 1 | 0.102362 | false | 0.015748 | 0.070866 | 0 | 0.188976 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7862e3a35f9f49d07229edd0a3a4a35a967e9f5 | 3,737 | py | Python | smashgg-scraper/get_data.py | Eblemgg/smashultimateelo | c39c1014c8041724267fae7721c169724a71df24 | [
"MIT"
] | null | null | null | smashgg-scraper/get_data.py | Eblemgg/smashultimateelo | c39c1014c8041724267fae7721c169724a71df24 | [
"MIT"
] | null | null | null | smashgg-scraper/get_data.py | Eblemgg/smashultimateelo | c39c1014c8041724267fae7721c169724a71df24 | [
"MIT"
] | null | null | null | import requests
import csv
import time
import json
from collections import *
from api_scrape_util import *
import threading
import sys
def get_data(slug, supress_output):
tourn_info = get_tournament_info(slug)
event_phases = tourn_info['phases_per_event']
phase_groups = tourn_info['groups_per_phase']
#Separate each phase by game
events = {}
for event_id in event_phases:
r = requests.get(format_url(api_prefix, 'event/', str(event_id), api_entrant_postfix))
evnt_data = json.loads(r.text)
events[evnt_data["entities"]["event"]["id"]] = Event(event_id, evnt_data["entities"]["event"]["name"], evnt_data["entities"]["event"]["videogameId"], evnt_data["entities"]["event"]["type"])
tmp = evnt_data["entities"]["entrants"]
events[evnt_data["entities"]["event"]["id"]].add_entrants(tmp)
#At this point, we've scrapped all events, phases, and entrants
#print("Retrieved events")
for event in events:
events[event].add_phases(event_phases[event])
for phase in events[event].phases:
events[event].add_groups(phase_groups[phase])
for event in events:
#Uses the skip criteria defined in skip_event to check if we care about this event.
if(skip_event(events, event)):
continue
#Update the master tournament file
master_file = "../data/" + events[event].game + "/" + events[event].format + "/tournaments.csv"
master_lock.acquire()
update_master_file(master_file,slug, tourn_info['name'], tourn_info['dates'], events[event])
master_lock.release()
#Update the sets file
filename = get_filename(events[event].game, events[event].format,slug,'-sets.csv')
if(not supress_output):
print("Working on " + filename + "...")
doubles = write_set_data(filename, events[event], supress_output)
#Update the standings file
filename = get_filename(events[event].game, events[event].format,slug,'-standings.csv')
write_placements(filename, events[event], doubles)
if(supress_output):
slug_lock.acquire()
all_slugs.pop(slug, None)
slug_lock.release()
#Declare all needed threads and locks
threads = []
all_slugs = {}
master_lock = threading.RLock()
slug_lock = threading.RLock()
def Single():
slug = input("What is the tournament slug?\n")
get_data(slug, False)
def Multi():
#Open the slugs file to read all tournaments to scrape
slug_file = "../data/slugs.csv"
f = open(slug_file,"r")
reader = csv.reader(f)
slug_list = list(reader)
iterations = len(slug_list[1::])
for i in range(1,iterations + 1):
slug = slug_list[i][1]
slug_lock.acquire()
all_slugs[slug] = slug
#print("Starting Tournament: ", slug)
slug_lock.release()
#Create a thread to grab data, surpress output
t = threading.Thread(target=get_data, args=(slug,True))
threads.append(t)
t.start()
#Print the remaining threads, and check every half second.
while(threading.activeCount() != 1):
sys.stdout.write("Threads Remaining: {0}\r".format(threading.activeCount()))
sys.stdout.flush()
time.sleep(0.5)
for thread in threads:
thread.join()
print("Error'd files: ", all_slugs)
mode = input("Single Mode (s)? Or File Mode (f)?\n")
valid = False
if(mode == "s"):
Single()
valid = True
if(mode == "f"):
Multi()
valid = True
if(not valid):
print("Please select a valid mode and rerun.")
| 31.940171 | 198 | 0.621889 | 476 | 3,737 | 4.735294 | 0.313025 | 0.063443 | 0.042591 | 0.046584 | 0.112689 | 0.09228 | 0.052351 | 0.052351 | 0.052351 | 0.052351 | 0 | 0.002858 | 0.251003 | 3,737 | 116 | 199 | 32.215517 | 0.802429 | 0.134065 | 0 | 0.102564 | 0 | 0 | 0.120656 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.102564 | 0 | 0.141026 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7874d581787a5f17d04ced6c92447541eaa61ab | 5,539 | py | Python | src/mcmc/hamiltonian.py | pjbull/data-science-is-software | 127c07a8e9598b279595400420ca3d6daac76e7b | [
"MIT"
] | 22 | 2016-03-18T19:34:23.000Z | 2021-01-03T14:32:38.000Z | src/mcmc/hamiltonian.py | afcarl/data-science-is-software--pjbull | 127c07a8e9598b279595400420ca3d6daac76e7b | [
"MIT"
] | 1 | 2016-03-18T19:48:12.000Z | 2016-03-19T20:25:11.000Z | src/mcmc/hamiltonian.py | afcarl/data-science-is-software--pjbull | 127c07a8e9598b279595400420ca3d6daac76e7b | [
"MIT"
] | 18 | 2016-03-18T19:34:47.000Z | 2020-08-06T07:47:24.000Z | from __future__ import print_function
import numpy as np
# this makes matplotlib be called
# happily in a virtualenv from a jupyter notebook
import matplotlib
matplotlib.use('nbagg')
import matplotlib.pyplot as plt
import prettyplotlib as pplt
blues_rev = pplt.brewer2mpl.get_map('Blues', 'Sequential', 9, reverse=True).mpl_colormap
c1, c2 = pplt.brewer2mpl.get_map('Dark2', 'Qualitative', 8).mpl_colors[3:5]
def run_diagnostics(samples, function=None, plots=True):
if plots:
xlim = (-0.5, 1.5)
ylim = (-1.5, 1.)
# plot the sample distribution
plt.hist2d(samples[:,0], samples[:,1], bins=50, cmap=blues_rev)
# overlay the true function
if function:
plot_true_function(function, xlim, ylim)
plt.show()
plot_diagnostics(samples)
gelman_rubin(samples)
# gewecke
#geweke_val = pymc.diagnostics.geweke(samples, intervals=1)[0][0][1]
Geweke(samples)
def gelman_rubin(samples):
# g-r conventionally uses 10 chains
# we'll assume an appropriate burnin
# so we can divide our chains into 10
# seaprate ones
m_chains = 10
length, dims = samples.shape
n_draws = length//m_chains
# split the chain into 10 subchains
total_length = n_draws * m_chains
chain_draws = samples[:total_length,:].reshape(n_draws, m_chains, dims)
# calculate within chain variance for each dimension
var_j = np.var(chain_draws, axis=1)
var_wc = np.mean(var_j, axis=0)
# calculate between chain variance for each dimension
mu_j = np.mean(chain_draws, axis=1)
var_bc = np.var(mu_j, axis=0) * n_draws
# calculate the estimated variance per dimension
var = (1 - (1/n_draws))*var_wc + (1/n_draws)*var_bc
# calculate potential scale reduction factor
R = np.sqrt(var/var_wc)
print("The Gelman-Rubin potential scale reduction factor is: ", R, " (< 1.1 indicates good mixing)")
def Geweke(trace, intervals=1, length=200, first=0.1):
first*=len(trace)
# take two parts of the chain.
# subsample lenght
nsl=length
z =np.empty(intervals)
for k in np.arange(0, intervals):
# beg of each sub samples
bega=first+k*length
begb = len(trace)/2 + k*length
sub_trace_a = trace[bega:bega+nsl]
sub_trace_b = trace[begb:begb+nsl]
theta_a = np.mean(sub_trace_a)
theta_b = np.mean(sub_trace_b)
var_a = np.var(sub_trace_a)
var_b = np.var(sub_trace_b)
z[k] = (theta_a-theta_b)/np.sqrt( var_a + var_b)
print("The Geweke Diagnostic Value is: ", np.abs(z), "(< 1.96 indicates convergence)")
def plot_diagnostics(samples):
# Samples Trace
plot_traces(samples)
# Samples Autocorrelation
plot_acorr(samples)
def plot_traces(samples):
lens, dims = samples.shape
figs, axes = plt.subplots(dims,1)
for d in range(dims):
pplt.plot(axes[d], np.arange(lens), samples[:,d])
def plot_acorr(x_vals, maxlags=200):
figs, axes = plt.subplots(1,2)
# plot x autocorrelation
axes[0].acorr(x_vals[:,0]-np.mean(x_vals[:,0]),
normed=True,
usevlines=False,
maxlags=maxlags,
color=c1,
alpha=0.1)
axes[0].set_xlim((0, maxlags))
axes[0].set_title(r"Autocorrelation of $x$")
# plot y autocorrelation
axes[1].acorr(x_vals[:,1]-np.mean(x_vals[:,1]),
normed=True,
usevlines=False,
maxlags=1000,
color=c2,
alpha=0.1)
axes[1].set_xlim((0, maxlags))
axes[1].set_title(r"Autocorrelation of $y$")
plt.show()
def plot_true_function(function, xlim, ylim, ax=None):
# get plotting object
ax = plt if not ax else ax
# plot true function
xs = np.linspace(xlim[0], xlim[1], 1000)
ys = np.linspace(ylim[0], ylim[1], 1000)
XX, YY = np.meshgrid(xs, ys)
# reshape
LS = np.vstack([XX.ravel(), YY.ravel()])
ZZ = function(LS.T).reshape(1000, 1000)
plt.contour(XX, YY, ZZ.reshape(1000, 1000),
cmap=pplt.brewer2mpl.get_map('Blues', 'Sequential', 9, reverse=False).mpl_colormap)
def hamiltonian(sample_size, U, K, grad_U, dims=2, L=5, epsilon=0.1, burn_in=10, thinning=10):
sample_size = (sample_size + burn_in)*thinning
# initial position
current_q = np.ones(dims).reshape(-1, dims)
H = np.zeros(sample_size)
qall = np.zeros((sample_size, dims))
for j in np.arange(sample_size):
q = current_q.copy()
# draw a new p
p = np.random.normal(0, 1, dims).reshape(-1, dims)
current_p = p.copy()
# Make a half step for momentum at the beginning
p = p - epsilon * grad_U(q)/2.0
# alternate full steps for position and momentum
for i in range(L):
q = q + epsilon*p
if (i != L-1):
p = p - epsilon*grad_U(q)
#make a half step at the end
p = p - epsilon*grad_U(q)/2.
# negate the momentum
p= -p
current_U = U(current_q)
current_K = K(current_p)
proposed_U = U(q)
proposed_K = K(p)
A=np.exp(current_U-proposed_U+current_K-proposed_K)
# accept/reject
if np.random.rand() < A:
current_q = q.copy()
qall[j,:] = q.copy()
else:
qall[j, :] = current_q.copy()
H[j] = U(current_q)+K(current_p)
return qall[burn_in::thinning], H[burn_in::thinning]
| 27.557214 | 104 | 0.608955 | 819 | 5,539 | 3.982906 | 0.283272 | 0.003679 | 0.015635 | 0.018394 | 0.135806 | 0.060392 | 0.036174 | 0.026364 | 0 | 0 | 0 | 0.031304 | 0.267557 | 5,539 | 200 | 105 | 27.695 | 0.772738 | 0.169886 | 0 | 0.072072 | 0 | 0 | 0.052793 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072072 | false | 0 | 0.045045 | 0 | 0.126126 | 0.027027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a78765ac1f5f46fecd9a79a7f2e193fe02ce0655 | 21,551 | py | Python | lambda/ErrorHandlingAndCleanup/index.py | JintaoH/maskopy | 32bdaf1cb52abead770e93aa6d2082c17ba06a2d | [
"Apache-2.0"
] | 13 | 2019-11-25T14:59:53.000Z | 2022-01-14T10:58:41.000Z | lambda/ErrorHandlingAndCleanup/index.py | JintaoH/maskopy | 32bdaf1cb52abead770e93aa6d2082c17ba06a2d | [
"Apache-2.0"
] | 2 | 2019-11-29T17:13:52.000Z | 2021-07-29T21:55:40.000Z | lambda/ErrorHandlingAndCleanup/index.py | JintaoH/maskopy | 32bdaf1cb52abead770e93aa6d2082c17ba06a2d | [
"Apache-2.0"
] | 10 | 2019-11-26T20:22:02.000Z | 2021-07-01T01:02:46.000Z | """
Copyright (c) 2019. Maskopy Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This lambda cleans up any resources generated by the step function in case of Error.
This lambda expects the following inputs:
- ApplicationName
- DestinationEnv
- RdsSnapshotIdentifier
Optional:
- AsgName
- CreatedDestinationSnapshots
- CreatedSnapshots
- DestinationRestoredDatabases
- ecs
- fargate
- InstanceId
- ObfuscateRunMode
- TaskDefinition
"""
import json
import os
import time
import boto3
from botocore.exceptions import ClientError
ASG_CLIENT = boto3.client('autoscaling')
ECS_CLIENT = boto3.client('ecs')
RDS_CLIENT = boto3.client('rds')
STS_CLIENT = boto3.client('sts')
ASSUME_ROLE_ARN = os.environ['assume_role_arn']
def lambda_handler(event, context):
"""Lambda handler for the eleventh lambda of the Maskopy process.
Args:
event (dict): AWS Lambda uses this parameter to pass in event data to the handler.
context (Context): AWS Lambda provides runtime info and meta data.
Returns:
:obj:`list` of :obj`dict` of str:str:
List of deleted resources and message to be sent to SQS.
"""
deleted_resources = []
# Create message to be sent to SQS
json_msg = {
"ApplicationName": event['ApplicationName'],
"State": "CRITICAL",
"SDLC": event['DestinationEnv'],
"Service": "MasKopy",
"msgDetail": (f"MasKopy process for ApplicationName: {event['ApplicationName']} "
f"for snapshotID: {event['RdsSnapshotIdentifier']}. "
f"The status is: CRITICAL.")
}
deleted_resources.append({'Message' : json.dumps(json_msg)})
session = create_account_session(
STS_CLIENT, ASSUME_ROLE_ARN, context.aws_request_id)
rds_source_client = session.client('rds')
for shared_snapshot in event.get('CreatedSnapshots', []):
if isinstance(shared_snapshot, dict):
snapshot_name = shared_snapshot.get('SnapshotName')
print(f"Deleting snapshot in source account: {snapshot_name}")
if delete_snapshot(rds_source_client, snapshot_name,event["CreatedSnapshots"][0]["Engine"]):
deleted_resources.append({'SourceSnapshot' : snapshot_name})
for destination_snapshot in event.get('CreatedDestinationSnapshots', []):
if isinstance(destination_snapshot, dict):
snapshot_name = destination_snapshot.get('SnapshotName')
print(f"Deleting snapshots in destination account: {snapshot_name}")
if delete_snapshot(RDS_CLIENT, snapshot_name,event["CreatedSnapshots"][0]["Engine"]):
deleted_resources.append({'DestinationSnapshot': snapshot_name})
for database in event.get('DestinationRestoredDatabases', []):
if 'DBIdentifier' in database and database['DBIdentifier']['DBInstanceIdentifier'].startswith('maskopy'):
print(f"Deleting RDS in destination account: {database['DBIdentifier']['DBInstanceIdentifier']}")
if delete_database(RDS_CLIENT, database,event["CreatedSnapshots"][0]["Engine"]):
deleted_resources.append({"DestinationDatabase": database['DBIdentifier']})
if event.get('ObfuscateRunMode') == 'ecs':
ecs = event.get('ecs')
if ecs:
if (ecs.get('InstanceId') and ecs.get('AsgName') and
delete_asg(ASG_CLIENT, ecs['AsgName'])):
deleted_resources.append({"Instance": ecs['InstanceId']})
deleted_resources.append({"ASG": ecs['AsgName']})
if (ecs.get('TaskDefinition') and
deregister_task_definition(ECS_CLIENT, ecs['TaskDefinition'])):
deleted_resources.append({"Task Definition": ecs['TaskDefinition']})
if (ecs.get('ClusterName') and
delete_cluster(ECS_CLIENT, ecs.get('ClusterName'), ecs.get('InstanceId'))):
deleted_resources.append({"ECS Cluster": ecs['ClusterName']})
elif not event.get('ObfuscateRunMode') or event.get('ObfuscateRunMode') == 'fargate':
fargate = event.get('fargate')
if (fargate and fargate.get('TaskDefinition') and
deregister_task_definition(ECS_CLIENT, fargate.get('TaskDefinition'))):
deleted_resources.append({"Task Definition": fargate.get('TaskDefinition')})
return deleted_resources
def delete_snapshot(rds_client, snapshot_identifier, engine):
"""Function to delete snapshot.
Args:
rds_client (Client): AWS RDS Client object.
snapshot_identifier (str): RDS snapshot identifer to delete
engine: The DB engine of the snapshot
Returns:
bool: True if snapshot was deleted successfully or does not exist,
False otherwise.
Raises:
MaskopyResourceException: Exception used when trying to access a resource
that cannot be accessed.
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
if 'aurora' in engine:
return delete_snapshot_cluster(rds_client, snapshot_identifier)
else:
return delete_snapshot_instance(rds_client, snapshot_identifier)
def delete_snapshot_cluster(rds_client, snapshot_identifier):
"""Function to delete snapshot.
Args:
rds_client (Client): AWS RDS Client object.
snapshot_identifier (str): RDS snapshot identifer to delete
Returns:
bool: True if snapshot was deleted successfully or does not exist,
False otherwise.
Raises:
MaskopyResourceException: Exception used when trying to access a resource
that cannot be accessed.
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
try:
rds_client.delete_db_cluster_snapshot(
DBClusterSnapshotIdentifier=snapshot_identifier)
return True
except ClientError as err:
# Check if error code is DBSnapshotNotFound. If so, ignore the error.
if err.response['Error']['Code'] == 'DBClusterSnapshotNotFound':
print(f'Snapshot, {snapshot_identifier}, already deleted.')
return True
# Check if error code is due to SNAPSHOT not being in an available state.
if err.response['Error']['Code'] == 'InvalidDBClusterSnapshotState':
print(f"{snapshot_identifier}: RDS snapshot is not in available state.")
raise MaskopyResourceException(err)
# Check if error code is due to throttling.
if err.response['Error']['Code'] == 'Throttling':
print(f"Throttling occurred when deleting snapshot: {snapshot_identifier}.")
raise MaskopyThrottlingException(err)
print(f"Error deleting snapshot, {snapshot_identifier}: {err.response['Error']['Code']}.")
print(err)
return False
def delete_snapshot_instance(rds_client, snapshot_identifier):
"""Function to delete snapshot.
Args:
rds_client (Client): AWS RDS Client object.
snapshot_identifier (str): RDS snapshot identifer to delete
Returns:
bool: True if snapshot was deleted successfully or does not exist,
False otherwise.
Raises:
MaskopyResourceException: Exception used when trying to access a resource
that cannot be accessed.
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
try:
rds_client.delete_db_snapshot(
DBSnapshotIdentifier=snapshot_identifier)
return True
except ClientError as err:
# Check if error code is DBSnapshotNotFound. If so, ignore the error.
if err.response['Error']['Code'] == 'DBSnapshotNotFound':
print(f'Snapshot, {snapshot_identifier}, already deleted.')
return True
# Check if error code is due to SNAPSHOT not being in an available state.
if err.response['Error']['Code'] == 'InvalidDBSnapshotState':
print(f"{snapshot_identifier}: RDS snapshot is not in available state.")
raise MaskopyResourceException(err)
# Check if error code is due to throttling.
if err.response['Error']['Code'] == 'Throttling':
print(f"Throttling occurred when deleting snapshot: {snapshot_identifier}.")
raise MaskopyThrottlingException(err)
print(f"Error deleting snapshot, {snapshot_identifier}: {err.response['Error']['Code']}.")
print(err)
return False
def delete_database(rds_client, db_identifier, engine):
"""Function to delete RDS instance.
Args:
rds_client (Client): AWS RDS Client object.
db_instance_identifier (str): RDS instance to delete
engine: The DB engine of the snapshot
Returns:
bool: True if instance was deleted successfully or does not exist,
False otherwise.
Raises:
MaskopyResourceException: Exception used when trying to access a resource
that cannot be accessed.
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
if 'aurora' in engine:
return delete_database_cluster(rds_client, db_identifier['DBIdentifier'])
else:
return delete_database_instance(rds_client, db_identifier['DBIdentifier'])
def delete_database_cluster(rds_client, db_identifier):
"""Function to delete RDS instance.
Args:
rds_client (Client): AWS RDS Client object.
db_instance_identifier (str): RDS instance to delete
Returns:
bool: True if instance was deleted successfully or does not exist,
False otherwise.
Raises:
MaskopyResourceException: Exception used when trying to access a resource
that cannot be accessed.
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
db_cluster_identifier=db_identifier['DBClusterIdentifier']
db_instance_identifier=db_identifier['DBInstanceIdentifier']
if db_cluster_identifier.startswith('Maskopy'):
print(f"Deleting RDS cluster in destination account: {db_cluster_identifier}")
try:
rds_client.delete_db_instance(
DBInstanceIdentifier=db_instance_identifier,
SkipFinalSnapshot=True)
rds_client.delete_db_cluster(
DBClusterIdentifier=db_cluster_identifier,
SkipFinalSnapshot=True)
return True
except ClientError as err:
# Check if error code is DBSnapshotNotFound. If so, ignore the error.
if err.response['Error']['Code'] == 'DBClusterNotFound':
print(f'RDS cluster, {db_cluster_identifier}, already deleted.')
return True
# Check if error code is due to RDS not being in an available state.
if err.response['Error']['Code'] == 'InvalidDBClusterState':
print(f"{db_cluster_identifier}: RDS cluster is not in available state.")
raise MaskopyResourceException(err)
# Check if error code is due to throttling.
if err.response['Error']['Code'] == 'Throttling':
print(f"Throttling occurred when deleting database: {db_cluster_identifier}.")
raise MaskopyThrottlingException(err)
if err.response['Error']['Code'] == 'DBInstanceNotFound':
print(f'RDS instance, {db_instance_identifier}, already deleted.')
return True
# Check if error code is due to RDS not being in an available state.
if err.response['Error']['Code'] == 'InvalidDBInstanceState':
print(f"{db_instance_identifier}: RDS instance is not in available state.")
raise MaskopyResourceException(err)
print(f"Error deleting database cluster, {db_cluster_identifier}: {err.response['Error']['Code']}")
print(err)
return False
def delete_database_instance(rds_client, db_identifier):
"""Function to delete RDS instance.
Args:
rds_client (Client): AWS RDS Client object.
db_instance_identifier (str): RDS instance to delete
Returns:
bool: True if instance was deleted successfully or does not exist,
False otherwise.
Raises:
MaskopyResourceException: Exception used when trying to access a resource
that cannot be accessed.
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
db_instance_identifier=db_identifier['DBInstanceIdentifier']
try:
rds_client.delete_db_instance(
DBInstanceIdentifier= db_instance_identifier,
SkipFinalSnapshot=True)
return True
except ClientError as err:
# Check if error code is DBSnapshotNotFound. If so, ignore the error.
if err.response['Error']['Code'] == 'DBInstanceNotFound':
print(f'RDS instance, { db_instance_identifier}, already deleted.')
return True
# Check if error code is due to RDS not being in an available state.
if err.response['Error']['Code'] == 'InvalidDBInstanceState':
print(f"{db_instance_identifier}: RDS instance is not in available state.")
raise MaskopyResourceException(err)
# Check if error code is due to throttling.
if err.response['Error']['Code'] == 'Throttling':
print(f"Throttling occurred when deleting database: { db_instance_identifier }.")
raise MaskopyThrottlingException(err)
print(f"Error deleting database, {db_instance_identifier}: {err.response['Error']['Code']}")
print(err)
return False
def delete_asg(asg_client, asg_name):
"""Function to delete ASG.
Args:
asg_client (Client): AWS ASG Client object.
asg_name (str): ASG and launch configuration name to delete
Returns:
bool: True if instance was deleted successfully or does not exist,
False otherwise.
Raises:
MaskopyResourceException: Exception used when trying to access a resource
that cannot be accessed.
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
try:
# Check if ASG exists and then delete it
asg_response = asg_client.describe_auto_scaling_groups(
AutoScalingGroupNames=[asg_name])
if asg_response['AutoScalingGroups']:
print(f'Deleting ASG: {asg_name}')
asg_client.delete_auto_scaling_group(
AutoScalingGroupName=asg_name, ForceDelete=True)
time.sleep(40)
# Check if launch configuration exists and then delete it
launch_configuration_response = asg_client.describe_launch_configurations(
LaunchConfigurationNames=[asg_name])
if launch_configuration_response['LaunchConfigurations']:
print(f'Deleting launch configuration: {asg_name}.')
asg_client.delete_launch_configuration(
LaunchConfigurationName=asg_name)
return True
except ClientError as err:
# Check if error code is ResourceContention.
if err.response['Error']['Code'] == 'ResourceContention':
print(f"ASG or launch configuration has a pending update already: {asg_name}.")
raise MaskopyResourceException(err)
# Check if error code is ResourceInUse.
if err.response['Error']['Code'] == 'ResourceInUse':
print(f"Launch configuration is still in use: {asg_name}.")
raise MaskopyResourceException(err)
# Check if error code is due to throttling.
if err.response['Error']['Code'] == 'Throttling':
print(f"Throttling occurred when deleting ASG: {asg_name}.")
raise MaskopyThrottlingException(err)
print(f"Error deleting ASG, {asg_name}: {err.response['Error']['Code']}")
print(err)
return False
def deregister_task_definition(ecs_client, task_definition):
"""Function to deregister task definition.
Args:
ecs_client (Client): AWS ECS Client object.
task_definition (str): Task definition to delete
Returns:
bool: True if task definition was deregistered successfully or does not exist,
False otherwise.
Raises:
MaskopyResourceException: Exception used when trying to access a resource
that cannot be accessed.
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
try:
print(f'Deregistering task definition: {task_definition}')
ecs_client.deregister_task_definition(
taskDefinition=task_definition)
return True
except ClientError as err:
# Check if error code is ClientException.
if (err.response['Error']['Code'] == 'ClientException' and
err.response['Error']['Message'] ==
'The specified task definition does not exist.'):
print(f'Task definition revision, {task_definition}, does not exist.')
return True
print(f"Error deregistering task definition, {task_definition}: "
f"{err.response['Error']['Code']}")
print(err)
return False
def delete_cluster(ecs_client, cluster_name, instance_identifier=None):
"""Function to delete ECS or fargate cluster.
Args:
ecs_client (Client): AWS ECS Client object.
cluster_name (str): Cluster to delete
instance_identifier (str, optional): Instance identifier to deregister.
Classical ECS clusters require EC2 instance to be registered.
Forcing a deregister of the instance allows the ECS cluster to be
deleted.
Returns:
bool: True if cluster was deleted successfully or does not exist,
False otherwise.
Raises:
MaskopyResourceException: Exception used when trying to access a resource
that cannot be accessed.
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
try:
cluster = ecs_client.describe_clusters(
clusters=[cluster_name])
if instance_identifier:
ecs_client.deregister_container_instance(
cluster=cluster_name,
containerInstance=instance_identifier,
force=True)
print('Deleting ECS Cluster:' + cluster_name)
ecs_client.delete_cluster(cluster=cluster_name)
return True
except ClientError as err:
# Check if error code is ClusterNotFoundException.
if err.response['Error']['Code'] == 'ClusterNotFoundException':
print(f'ECS cluster, {cluster_name}, already deleted.')
return True
# Check if error code is ClusterContainsContainerInstancesException.
if err.response['Error']['Code'] == 'ClusterContainsContainerInstancesException':
print(f'ECS cluster, {cluster_name}, still contains instances.')
raise MaskopyResourceException(err)
# Check if error code is ClusterContainsTasksException.
if err.response['Error']['Code'] == 'ClusterContainsTasksException':
print(f'ECS cluster, {cluster_name}, still contains tasks.')
raise MaskopyResourceException(err)
# Check if error code is due to throttling.
if err.response['Error']['Code'] == 'Throttling':
print(f"Throttling occurred when deleting ECS cluster: {cluster}.")
raise MaskopyThrottlingException(err)
print(f"Error deleting ECS, {cluster_name}: {err.response['Error']['Code']}")
print(err)
return False
def create_account_session(sts_client, role_arn, request_id):
"""Function to create and assume account role.
Args:
sts_client (Client): AWS STS Client object.
role_arn (str): The arn of the role to assume a session.
request_id (str): UUID for session to uniquely identify session name.
Returns:
:obj:`boto3.session.Session`:
A session of the role to be used.
"""
sts_response = sts_client.assume_role(
RoleArn=role_arn,
RoleSessionName=request_id
)
return boto3.session.Session(
aws_access_key_id=sts_response['Credentials']['AccessKeyId'],
aws_secret_access_key=sts_response['Credentials']['SecretAccessKey'],
aws_session_token=sts_response['Credentials']['SessionToken']
)
class MaskopyResourceException(Exception):
"""Exception raised when IAM role or user is not able to access the
resource.
"""
class MaskopyThrottlingException(Exception):
"""Exception raised when AWS request returns a Throttling exception.
"""
| 46.049145 | 113 | 0.671152 | 2,401 | 21,551 | 5.909205 | 0.124948 | 0.031717 | 0.033831 | 0.04088 | 0.60523 | 0.564421 | 0.531224 | 0.498238 | 0.471948 | 0.468988 | 0 | 0.001289 | 0.244026 | 21,551 | 467 | 114 | 46.147752 | 0.869568 | 0.370424 | 0 | 0.377049 | 0 | 0 | 0.301046 | 0.08091 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045082 | false | 0 | 0.020492 | 0 | 0.184426 | 0.180328 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a788e3c6231694b687c16aedcab52650ce95f29b | 899 | py | Python | tools/fewshot_exp/datasets/voc_create_standard.py | JunhoPark0314/DCNet | 4b8b11701ae05903ecae779cb72949d320f134a7 | [
"MIT"
] | 93 | 2021-03-20T13:48:47.000Z | 2022-03-31T09:35:00.000Z | tools/fewshot_exp/datasets/voc_create_standard.py | JunhoPark0314/DCNet | 4b8b11701ae05903ecae779cb72949d320f134a7 | [
"MIT"
] | 19 | 2021-05-19T06:19:52.000Z | 2022-03-26T07:56:24.000Z | tools/fewshot_exp/datasets/voc_create_standard.py | JunhoPark0314/DCNet | 4b8b11701ae05903ecae779cb72949d320f134a7 | [
"MIT"
] | 19 | 2021-05-29T09:36:56.000Z | 2022-03-31T09:35:02.000Z | import os
from maskrcnn_benchmark.data.datasets.voc import PascalVOCDataset
import sys
seed=int(sys.argv[1])
cls = PascalVOCDataset.CLASSES[1:]
#yolodir = '../Fewshot_Detection'
for shot in [10, 5, 3, 2, 1]:
ids = []
for c in cls:
with open('/workspace/data/pascal_voc/voclist%d/box_%dshot_%s_train.txt'%(seed,shot, c)) as f:
content = f.readlines()
content = [i.strip().split('/')[-1][:-4] for i in content]
ids += content
ids = list(set(ids))
with open('datasets/voc/VOC2007/ImageSets/Main/trainval_%dshot_novel_standard_seed%d.txt'%(shot,seed), 'w+') as f:
for i in ids:
if '_' not in i:
f.write(i + '\n')
with open('datasets/voc/VOC2012/ImageSets/Main/trainval_%dshot_novel_standard_seed%d.txt'%(shot,seed), 'w+') as f:
for i in ids:
if '_' in i:
f.write(i + '\n')
| 35.96 | 118 | 0.600667 | 135 | 899 | 3.881481 | 0.451852 | 0.062977 | 0.034351 | 0.072519 | 0.30916 | 0.30916 | 0.267176 | 0.267176 | 0.267176 | 0.267176 | 0 | 0.026277 | 0.238042 | 899 | 24 | 119 | 37.458333 | 0.738686 | 0.035595 | 0 | 0.190476 | 0 | 0 | 0.260116 | 0.247399 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a78972f5eef1111c04c4fb3d001168a6f72666ea | 19,001 | py | Python | tests/memex/storage_test.py | ssin122/test-h | c10062ae23b690afaac0ab4af7b9a5a5e4b686a9 | [
"MIT"
] | null | null | null | tests/memex/storage_test.py | ssin122/test-h | c10062ae23b690afaac0ab4af7b9a5a5e4b686a9 | [
"MIT"
] | null | null | null | tests/memex/storage_test.py | ssin122/test-h | c10062ae23b690afaac0ab4af7b9a5a5e4b686a9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import copy
import pytest
import mock
from pyramid import security
from memex import groups
from memex import storage
from memex import schemas
from memex.models.annotation import Annotation
from memex.models.document import Document, DocumentURI, DocumentMeta
class FakeGroup(object):
def __acl__(self):
return []
class TestFetchAnnotation(object):
def test_it_fetches_and_returns_the_annotation(self, db_session, factories):
annotation = factories.Annotation()
actual = storage.fetch_annotation(db_session, annotation.id)
assert annotation == actual
def test_it_does_not_crash_if_id_is_invalid(self, db_session):
assert storage.fetch_annotation(db_session, 'foo') is None
class TestFetchOrderedAnnotations(object):
def test_it_returns_annotations_for_ids_in_the_same_order(self, db_session, factories):
ann_1 = factories.Annotation(userid='luke')
ann_2 = factories.Annotation(userid='luke')
assert [ann_2, ann_1] == storage.fetch_ordered_annotations(db_session,
[ann_2.id, ann_1.id])
assert [ann_1, ann_2] == storage.fetch_ordered_annotations(db_session,
[ann_1.id, ann_2.id])
def test_it_allows_to_change_the_query(self, db_session, factories):
ann_1 = factories.Annotation(userid='luke')
ann_2 = factories.Annotation(userid='maria')
def only_maria(query):
return query.filter(Annotation.userid == 'maria')
assert [ann_2] == storage.fetch_ordered_annotations(db_session,
[ann_2.id, ann_1.id],
query_processor=only_maria)
class TestExpandURI(object):
def test_expand_uri_no_document(self, db_session):
actual = storage.expand_uri(db_session, 'http://example.com/')
assert actual == ['http://example.com/']
def test_expand_uri_document_doesnt_expand_canonical_uris(self, db_session):
document = Document(document_uris=[
DocumentURI(uri='http://foo.com/', claimant='http://example.com'),
DocumentURI(uri='http://bar.com/', claimant='http://example.com'),
DocumentURI(uri='http://example.com/', type='rel-canonical',
claimant='http://example.com'),
])
db_session.add(document)
db_session.flush()
assert storage.expand_uri(db_session, "http://example.com/") == [
"http://example.com/"]
def test_expand_uri_document_uris(self, db_session):
document = Document(document_uris=[
DocumentURI(uri='http://foo.com/', claimant='http://bar.com'),
DocumentURI(uri='http://bar.com/', claimant='http://bar.com'),
])
db_session.add(document)
db_session.flush()
assert storage.expand_uri(db_session, 'http://foo.com/') == [
'http://foo.com/',
'http://bar.com/'
]
@pytest.mark.usefixtures('models', 'group_service')
class TestCreateAnnotation(object):
def test_it_fetches_parent_annotation_for_replies(self,
fetch_annotation,
pyramid_config,
pyramid_request,
group_service):
# Make the annotation's parent belong to 'test-group'.
fetch_annotation.return_value.groupid = 'test-group'
# The request will need permission to write to 'test-group'.
pyramid_config.testing_securitypolicy('acct:foo@example.com',
groupids=['group:test-group'])
data = self.annotation_data()
# The annotation is a reply.
data['references'] = ['parent_annotation_id']
storage.create_annotation(pyramid_request, data, group_service)
fetch_annotation.assert_called_once_with(pyramid_request.db,
'parent_annotation_id')
def test_it_sets_group_for_replies(self,
fetch_annotation,
models,
pyramid_config,
pyramid_request,
group_service):
# Make the annotation's parent belong to 'test-group'.
fetch_annotation.return_value.groupid = 'test-group'
# The request will need permission to write to 'test-group'.
pyramid_config.testing_securitypolicy('acct:foo@example.com',
groupids=['group:test-group'])
data = self.annotation_data()
assert data['groupid'] != 'test-group'
# The annotation is a reply.
data['references'] = ['parent_annotation_id']
storage.create_annotation(pyramid_request, data, group_service)
assert models.Annotation.call_args[1]['groupid'] == 'test-group'
def test_it_raises_if_parent_annotation_does_not_exist(self,
fetch_annotation,
pyramid_request,
group_service):
fetch_annotation.return_value = None
data = self.annotation_data()
# The annotation is a reply.
data['references'] = ['parent_annotation_id']
with pytest.raises(schemas.ValidationError) as exc:
storage.create_annotation(pyramid_request, data, group_service)
assert str(exc.value).startswith('references.0: ')
def test_it_finds_the_group(self, pyramid_request, pyramid_config, group_service):
data = self.annotation_data()
data['groupid'] = 'foo-group'
storage.create_annotation(pyramid_request, data, group_service)
group_service.find.assert_called_once_with('foo-group')
def test_it_allows_when_user_has_write_permission(self, pyramid_request, pyramid_config, models, group_service):
pyramid_config.testing_securitypolicy('userid', permissive=True)
group_service.find.return_value = FakeGroup()
data = self.annotation_data()
data['groupid'] = 'foo-group'
# this should not raise
result = storage.create_annotation(pyramid_request, data, group_service)
assert result == models.Annotation.return_value
def test_it_raises_when_user_is_missing_write_permission(self, pyramid_request, pyramid_config, group_service):
pyramid_config.testing_securitypolicy('userid', permissive=False)
group_service.find.return_value = FakeGroup()
data = self.annotation_data()
data['groupid'] = 'foo-group'
with pytest.raises(schemas.ValidationError) as exc:
storage.create_annotation(pyramid_request, data, group_service)
assert str(exc.value).startswith('group: ')
def test_it_raises_when_group_could_not_be_found(self, pyramid_request, pyramid_config, group_service):
pyramid_config.testing_securitypolicy('userid', permissive=True)
group_service.find.return_value = None
data = self.annotation_data()
data['groupid'] = 'missing-group'
with pytest.raises(schemas.ValidationError) as exc:
storage.create_annotation(pyramid_request, data, group_service)
assert str(exc.value).startswith('group: ')
def test_it_inits_an_Annotation_model(self, models, pyramid_request, group_service):
data = self.annotation_data()
storage.create_annotation(pyramid_request, copy.deepcopy(data), group_service)
del data['document']
models.Annotation.assert_called_once_with(**data)
def test_it_adds_the_annotation_to_the_database(self, models, pyramid_request, group_service):
storage.create_annotation(pyramid_request, self.annotation_data(), group_service)
assert models.Annotation.return_value in pyramid_request.db.added
def test_it_updates_the_document_metadata_from_the_annotation(self,
models,
pyramid_request,
datetime,
group_service):
annotation_data = self.annotation_data()
annotation_data['document']['document_meta_dicts'] = (
mock.sentinel.document_meta_dicts)
annotation_data['document']['document_uri_dicts'] = (
mock.sentinel.document_uri_dicts)
storage.create_annotation(pyramid_request, annotation_data, group_service)
models.update_document_metadata.assert_called_once_with(
pyramid_request.db,
models.Annotation.return_value.target_uri,
mock.sentinel.document_meta_dicts,
mock.sentinel.document_uri_dicts,
created=datetime.utcnow(),
updated=datetime.utcnow(),
)
def test_it_sets_the_annotations_document_id(self,
models,
pyramid_request,
group_service):
annotation_data = self.annotation_data()
document = mock.Mock()
models.update_document_metadata.return_value = document
ann = storage.create_annotation(pyramid_request, annotation_data, group_service)
assert ann.document == document
def test_it_returns_the_annotation(self, models, pyramid_request, group_service):
annotation = storage.create_annotation(pyramid_request,
self.annotation_data(),
group_service)
assert annotation == models.Annotation.return_value
def test_it_does_not_crash_if_target_selectors_is_empty(self, pyramid_request, group_service):
# Page notes have [] for target_selectors.
data = self.annotation_data()
data['target_selectors'] = []
storage.create_annotation(pyramid_request, data, group_service)
def test_it_does_not_crash_if_no_text_or_tags(self, pyramid_request, group_service):
# Highlights have no text or tags.
data = self.annotation_data()
data['text'] = data['tags'] = ''
storage.create_annotation(pyramid_request, data, group_service)
@pytest.fixture
def group_service(self, pyramid_config):
group_service = mock.Mock(spec_set=['find'])
pyramid_config.register_service(group_service, iface='memex.interfaces.IGroupService')
return group_service
def annotation_data(self):
return {
'userid': 'acct:test@localhost',
'text': 'text',
'tags': ['one', 'two'],
'shared': False,
'target_uri': 'http://www.example.com/example.html',
'groupid': '__world__',
'references': [],
'target_selectors': ['selector_one', 'selector_two'],
'document': {
'document_uri_dicts': [],
'document_meta_dicts': [],
}
}
@pytest.mark.usefixtures('models')
class TestUpdateAnnotation(object):
def test_it_gets_the_annotation_model(self,
annotation_data,
models,
session):
storage.update_annotation(session,
'test_annotation_id',
annotation_data)
session.query.assert_called_once_with(models.Annotation)
session.query.return_value.get.assert_called_once_with(
'test_annotation_id')
def test_it_adds_new_extras(self, annotation_data, session):
annotation = session.query.return_value.get.return_value
annotation.extra = {}
annotation_data['extra'] = {'foo': 'bar'}
storage.update_annotation(session,
'test_annotation_id',
annotation_data)
assert annotation.extra == {'foo': 'bar'}
def test_it_overwrites_existing_extras(self,
annotation_data,
session):
annotation = session.query.return_value.get.return_value
annotation.extra = {'foo': 'original_value'}
annotation_data['extra'] = {'foo': 'new_value'}
storage.update_annotation(session,
'test_annotation_id',
annotation_data)
assert annotation.extra == {'foo': 'new_value'}
def test_it_does_not_change_extras_that_are_not_sent(self,
annotation_data,
session):
annotation = session.query.return_value.get.return_value
annotation.extra = {
'one': 1,
'two': 2,
}
annotation_data['extra'] = {'two': 22}
storage.update_annotation(session,
'test_annotation_id',
annotation_data)
assert annotation.extra['one'] == 1
def test_it_does_not_change_extras_if_none_are_sent(self,
annotation_data,
session):
annotation = session.query.return_value.get.return_value
annotation.extra = {'one': 1, 'two': 2}
assert not annotation_data.get('extra')
storage.update_annotation(session,
'test_annotation_id',
annotation_data)
assert annotation.extra == {'one': 1, 'two': 2}
def test_it_changes_the_updated_timestamp(self, annotation_data, session, datetime):
annotation = storage.update_annotation(session,
'test_annotation_id',
annotation_data)
assert annotation.updated == datetime.utcnow()
def test_it_updates_the_annotation(self, annotation_data, session):
annotation = session.query.return_value.get.return_value
storage.update_annotation(session,
'test_annotation_id',
annotation_data)
for key, value in annotation_data.items():
assert getattr(annotation, key) == value
def test_it_updates_the_document_metadata_from_the_annotation(
self,
annotation_data,
session,
models,
datetime):
annotation = session.query.return_value.get.return_value
annotation_data['document']['document_meta_dicts'] = (
mock.sentinel.document_meta_dicts)
annotation_data['document']['document_uri_dicts'] = (
mock.sentinel.document_uri_dicts)
storage.update_annotation(session,
'test_annotation_id',
annotation_data)
models.update_document_metadata.assert_called_once_with(
session,
annotation.target_uri,
mock.sentinel.document_meta_dicts,
mock.sentinel.document_uri_dicts,
updated=datetime.utcnow()
)
def test_it_updates_the_annotations_document_id(self,
annotation_data,
session,
models):
annotation = session.query.return_value.get.return_value
document = mock.Mock()
models.update_document_metadata.return_value = document
storage.update_annotation(session,
'test_annotation_id',
annotation_data)
assert annotation.document == document
def test_it_returns_the_annotation(self, annotation_data, session):
annotation = storage.update_annotation(session,
'test_annotation_id',
annotation_data)
assert annotation == session.query.return_value.get.return_value
def test_it_does_not_crash_if_no_document_in_data(self,
session):
storage.update_annotation(session, 'test_annotation_id', {})
def test_it_does_not_call_update_document_meta_if_no_document_in_data(
self,
session,
models):
storage.update_annotation(session, 'test_annotation_id', {})
assert not models.update_document_metadata.called
@pytest.fixture
def annotation_data(self):
return {
'userid': 'acct:test@localhost',
'text': 'text',
'tags': ['one', 'two'],
'shared': False,
'target_uri': 'http://www.example.com/example.html',
'groupid': '__world__',
'references': [],
'target_selectors': ['selector_one', 'selector_two'],
'document': {
'document_uri_dicts': [],
'document_meta_dicts': [],
},
'extra': {},
}
class TestDeleteAnnotation(object):
def test_it_marks_the_annotation_as_deleted(self, db_session, factories):
ann = factories.Annotation()
storage.delete_annotation(db_session, ann.id)
assert ann.deleted
def test_it_touches_the_updated_field(self, db_session, factories, datetime):
ann = factories.Annotation()
storage.delete_annotation(db_session, ann.id)
assert ann.updated == datetime.utcnow()
@pytest.fixture
def fetch_annotation(patch):
return patch('memex.storage.fetch_annotation')
@pytest.fixture
def models(patch):
models = patch('memex.storage.models', autospec=False)
models.Annotation.return_value.is_reply = False
return models
@pytest.fixture
def pyramid_request(fake_db_session, pyramid_request):
pyramid_request.db = fake_db_session
return pyramid_request
@pytest.fixture
def session(db_session):
session = mock.Mock(spec=db_session)
session.query.return_value.get.return_value.extra = {}
return session
@pytest.fixture
def datetime(patch):
return patch('memex.storage.datetime')
| 38.078156 | 116 | 0.586601 | 1,878 | 19,001 | 5.600107 | 0.114483 | 0.065228 | 0.027384 | 0.039935 | 0.719217 | 0.663402 | 0.639536 | 0.573833 | 0.500238 | 0.446991 | 0 | 0.002108 | 0.325878 | 19,001 | 498 | 117 | 38.154618 | 0.818955 | 0.022209 | 0 | 0.51831 | 0 | 0 | 0.091237 | 0.004416 | 0 | 0 | 0 | 0 | 0.101408 | 1 | 0.126761 | false | 0 | 0.028169 | 0.016901 | 0.202817 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a78a208190f71ecd90d016a21319eb546528043a | 1,037 | py | Python | dynts/lib/fallback/dates.py | quantmind/dynts | 21ac57c648bfec402fa6b1fe569496cf098fb5e8 | [
"BSD-3-Clause"
] | 57 | 2015-02-10T13:42:06.000Z | 2022-03-28T14:48:36.000Z | dynts/lib/fallback/dates.py | quantmind/dynts | 21ac57c648bfec402fa6b1fe569496cf098fb5e8 | [
"BSD-3-Clause"
] | 1 | 2016-11-01T07:43:05.000Z | 2016-11-01T07:43:05.000Z | dynts/lib/fallback/dates.py | quantmind/dynts | 21ac57c648bfec402fa6b1fe569496cf098fb5e8 | [
"BSD-3-Clause"
] | 17 | 2015-05-08T04:09:19.000Z | 2021-08-02T19:24:52.000Z | from datetime import date, datetime
_EPOCH_ORD = 719163
def jstimestamp_slow(dte):
'''Convert a date or datetime object into a javsacript timestamp'''
year, month, day, hour, minute, second = dte.timetuple()[:6]
days = date(year, month, 1).toordinal() - _EPOCH_ORD + day - 1
hours = days*24 + hour
minutes = hours*60 + minute
seconds = minutes*60 + second
if isinstance(dte,datetime):
return 1000*seconds + 0.001*dte.microsecond
else:
return 1000*seconds
# 30% faster than jstimestamp_slow (no call to timetuple)
def jstimestamp(dte):
'''Convert a date or datetime object into a javsacript timestamp.'''
days = date(dte.year, dte.month, 1).toordinal() - _EPOCH_ORD + dte.day - 1
hours = days*24
if isinstance(dte,datetime):
hours += dte.hour
minutes = hours*60 + dte.minute
seconds = minutes*60 + dte.second
return 1000*seconds + int(0.001*dte.microsecond)
else:
return 3600000*hours
| 30.5 | 79 | 0.632594 | 135 | 1,037 | 4.8 | 0.37037 | 0.037037 | 0.078704 | 0.046296 | 0.373457 | 0.256173 | 0.169753 | 0.169753 | 0.169753 | 0.169753 | 0 | 0.068063 | 0.263259 | 1,037 | 33 | 80 | 31.424242 | 0.780105 | 0.174542 | 0 | 0.181818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.045455 | 0 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a78a5edf79bd066fb2e88ce4f85069641c972ed1 | 483 | py | Python | apps/job/utils.py | matheuslins/cuzjobs | 0f46402d534fefaef394ccd09b454fe361bb36f2 | [
"MIT"
] | 1 | 2018-07-10T20:30:52.000Z | 2018-07-10T20:30:52.000Z | apps/job/utils.py | matheuslins/cuscuzjobs | 0f46402d534fefaef394ccd09b454fe361bb36f2 | [
"MIT"
] | 10 | 2019-04-25T00:01:29.000Z | 2021-04-08T18:52:52.000Z | apps/job/utils.py | matheuslins/cuzjobs | 0f46402d534fefaef394ccd09b454fe361bb36f2 | [
"MIT"
] | null | null | null | from apps.company.models import Company
def create_object_from_field(data, fields):
new_data = []
for dt in data:
payload = {'name': dt.get(fields[0]) or 'Not Informed'}
list_update = [{field: dt.get(field, "")} for field in fields[1]]
for dict_field in list_update:
payload.update(dict_field)
obj, created = Company.objects.get_or_create(**payload)
dt[fields[0]] = obj.id
new_data.append(dt)
return new_data
| 32.2 | 73 | 0.63354 | 69 | 483 | 4.26087 | 0.463768 | 0.071429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008242 | 0.246377 | 483 | 14 | 74 | 34.5 | 0.799451 | 0 | 0 | 0 | 0 | 0 | 0.033126 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a78ab7e530f7127d0e0d922bc8429c267b129881 | 2,025 | py | Python | medium/116-Populating Next Right Pointers in Each Node.py | Davidxswang/leetcode | d554b7f5228f14c646f726ddb91014a612673e06 | [
"Apache-2.0"
] | 2 | 2020-05-08T02:17:17.000Z | 2020-05-17T04:55:56.000Z | medium/116-Populating Next Right Pointers in Each Node.py | Davidxswang/leetcode | d554b7f5228f14c646f726ddb91014a612673e06 | [
"Apache-2.0"
] | null | null | null | medium/116-Populating Next Right Pointers in Each Node.py | Davidxswang/leetcode | d554b7f5228f14c646f726ddb91014a612673e06 | [
"Apache-2.0"
] | null | null | null | """
https://leetcode.com/problems/populating-next-right-pointers-in-each-node/
You are given a perfect binary tree where all leaves are on the same level, and every parent has two children. The binary tree has the following definition:
struct Node {
int val;
Node *left;
Node *right;
Node *next;
}
Populate each next pointer to point to its next right node. If there is no next right node, the next pointer should be set to NULL.
Initially, all next pointers are set to NULL.
Follow up:
You may only use constant extra space.
Recursive approach is fine, you may assume implicit stack space does not count as extra space for this problem.
Example 1:
Input: root = [1,2,3,4,5,6,7]
Output: [1,#,2,3,#,4,5,6,7,#]
Explanation: Given the above perfect binary tree (Figure A), your function should populate each next pointer to point to its next right node, just like in Figure B. The serialized output is in level order as connected by the next pointers, with '#' signifying the end of each level.
Constraints:
The number of nodes in the given tree is less than 4096.
-1000 <= node.val <= 1000
"""
# time complexity: O(n), space complexity: O(1)
# this is inspired by @yavinci in the discussion area.
# The main idea is to use the current node to set up the next pointer of its left and right children, and it utilized an implication that root is a single node which can be seen as a completed layer
"""
# Definition for a Node.
class Node:
def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):
self.val = val
self.left = left
self.right = right
self.next = next
"""
class Solution:
def connect(self, root: 'Node') -> 'Node':
head = root
while root and root.left:
cur = root
while cur:
cur.left.next = cur.right
cur.right.next = cur.next.left if cur.next else None
cur = cur.next
root = root.left
return head
| 31.153846 | 282 | 0.674074 | 328 | 2,025 | 4.14939 | 0.429878 | 0.033064 | 0.028655 | 0.033799 | 0.080823 | 0.080823 | 0.080823 | 0.070536 | 0.070536 | 0.070536 | 0 | 0.01893 | 0.243457 | 2,025 | 64 | 283 | 31.640625 | 0.869452 | 0.693827 | 0 | 0 | 0 | 0 | 0.022099 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a78cd268834586644473d4286b3917dda66e61e0 | 1,484 | py | Python | setup.py | covid19datahub/Python | 421ae5e1c27f8c0b2c6ca88843b321a741cf057b | [
"MIT"
] | 10 | 2020-05-21T14:24:18.000Z | 2022-02-04T00:57:37.000Z | setup.py | covid19datahub/Python | 421ae5e1c27f8c0b2c6ca88843b321a741cf057b | [
"MIT"
] | 4 | 2020-07-29T14:55:42.000Z | 2021-05-26T13:04:32.000Z | setup.py | covid19datahub/Python | 421ae5e1c27f8c0b2c6ca88843b321a741cf057b | [
"MIT"
] | 3 | 2020-07-14T12:50:47.000Z | 2021-11-01T13:43:30.000Z | #!/usr/bin/python
# requirements
try:
with open('requirements.txt') as f:
reqs = f.read().splitlines()
except:
reqs = []
import setuptools
with open("README.md", "r", encoding="UTF-8") as fh:
long_description = fh.read()
setuptools.setup(
name = 'covid19dh',
version = '2.3.0',
author = 'Martin Beneš',
author_email = 'martinbenes1996@gmail.com',
description = 'Unified data hub for a better understanding of COVID-19 https://covid19datahub.io',
long_description = long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
url = 'https://www.covid19datahub.io',
download_url = 'https://github.com/covid19datahub/Python/archive/2.3.0.tar.gz',
keywords = ['2019-nCov', 'coronavirus', 'covid-19', 'covid-data', 'covid19-data'],
install_requires=reqs,
package_dir={'': '.'},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Intended Audience :: Other Audience',
'Topic :: Database',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| 32.977778 | 100 | 0.670485 | 167 | 1,484 | 5.892216 | 0.616766 | 0.060976 | 0.101626 | 0.105691 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029935 | 0.167116 | 1,484 | 44 | 101 | 33.727273 | 0.766181 | 0.019542 | 0 | 0 | 0 | 0.025641 | 0.557467 | 0.047488 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.025641 | 0 | 0.025641 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a78d307d581ee3321f30a3d626480981a33d8574 | 13,961 | py | Python | leopy/src/leopy/dataio/generate_nav2dsim_time_varying_dataset.py | rpl-cmu/leo | 4ed27b169172795930a9103598144eb3ca70a405 | [
"MIT"
] | 15 | 2021-11-15T23:04:19.000Z | 2022-03-16T05:09:48.000Z | leopy/src/leopy/dataio/generate_nav2dsim_time_varying_dataset.py | psodhi/logo | 4ed27b169172795930a9103598144eb3ca70a405 | [
"MIT"
] | null | null | null | leopy/src/leopy/dataio/generate_nav2dsim_time_varying_dataset.py | psodhi/logo | 4ed27b169172795930a9103598144eb3ca70a405 | [
"MIT"
] | 1 | 2021-08-11T02:53:29.000Z | 2021-08-11T02:53:29.000Z | #!/usr/bin/env python
import sys
sys.path.append("/usr/local/cython/")
import numpy as np
import math
import os
import hydra
import json
import csv
from attrdict import AttrDict
from datetime import datetime
import gtsam
from leopy.utils import tf_utils, dir_utils
from leopy.eval import quant_metrics
from scipy import interpolate
import matplotlib.pyplot as plt
BASE_PATH = os.path.abspath(os.path.join(
os.path.dirname(__file__), "../../../.."))
CONFIG_PATH = os.path.join(BASE_PATH, "python/config/dataio/nav2d.yaml")
def wrap_logger_angles_to_pi(logger, field_names):
for field in field_names:
field_arr = np.asarray(logger[field])
field_arr[:, -1] = quant_metrics.wrap_to_pi(field_arr[:, -1]) # x, y, theta format
logger[field] = field_arr.tolist()
return logger
def get_waypoints_gui(params, poses=None):
class MouseEvents:
def __init__(self, fig, line):
self.path_start = False # if true, capture data
self.fig = fig
self.line = line
self.xs = list(line.get_xdata())
self.ys = list(line.get_ydata())
self.orientation = []
def connect(self):
self.a = self.fig.canvas.mpl_connect(
'button_press_event', self.on_press)
self.b = self.fig.canvas.mpl_connect(
'motion_notify_event', self.on_motion)
def on_press(self, event):
print('Pressed', event.button, event.xdata, event.ydata)
self.path_start = not self.path_start
def on_motion(self, event):
if self.path_start is True:
if len(self.orientation) == 0:
self.orientation.append(0)
else:
self.orientation.append(
np.pi/2 + np.arctan2((self.ys[-1] - event.ydata), (self.xs[-1] - event.xdata)))
self.xs.append(event.xdata)
self.ys.append(event.ydata)
self.line.set_data(self.xs, self.ys)
self.line.figure.canvas.draw()
plt.ioff()
plt.close('all')
fig = plt.figure(figsize=(12, 8))
plt.title(
"Generate waypoints for nav2d/{0:04d}/{1:04d}.json dataset: \n Click and move pointer to draw trajectory. Close window once finished.".format(params.dataio.ds_idx, params.dataio.seq_idx))
if poses is not None:
plt.plot(poses[:, 0], poses[:, 1], 'o--', c='m')
plt.xlim(params.env.area.xmin, params.env.area.xmax)
plt.ylim(params.env.area.ymin, params.env.area.ymax)
line, = plt.plot([], [])
mouse = MouseEvents(fig, line)
mouse.connect()
plt.show()
return np.hstack((np.array(mouse.xs)[:, None], np.array(mouse.ys)[:, None], np.array(mouse.orientation)[:, None]))[1:]
def plot_data(params, logger, plot_ori=False):
plt.ion()
plt.close('all')
fig = plt.figure(figsize=(12, 8))
poses_gt = np.asarray(logger.poses_gt)
meas_odom = np.asarray(logger.meas_odom)
meas_gps = np.asarray(logger.meas_gps)
num_steps = params.num_steps
poses_odom = np.zeros((num_steps, 3))
poses_gps = np.zeros((num_steps, 3))
poses_odom[0, :] = poses_gt[0, :]
# compute poses
for tstep in range(0, num_steps):
if (tstep > 0):
poses_odom[tstep] = tf_utils.pose2_to_vec3(tf_utils.vec3_to_pose2(
poses_odom[tstep-1, :]).compose(tf_utils.vec3_to_pose2(meas_odom[tstep-1, :])))
poses_gps[tstep, :] = meas_gps[tstep, :]
# plot poses
for tstep in range(num_steps-1, num_steps):
plt.cla()
plt.xlim(params.env.area.xmin, params.env.area.xmax)
plt.ylim(params.env.area.ymin, params.env.area.ymax)
plt.scatter([0], [0], marker='*', c='k', s=20,
alpha=1.0, zorder=3, edgecolor='k')
plt.scatter(poses_gt[tstep, 0], poses_gt[tstep, 1], marker=(3, 0, poses_gt[tstep, 2]/np.pi*180),
color='dimgray', s=300, alpha=0.25, zorder=3, edgecolor='dimgray')
plt.plot(poses_gt[0:tstep, 0], poses_gt[0:tstep, 1], color=params.plot.colors[0], linewidth=2, label="groundtruth")
plt.plot(poses_odom[0:tstep, 0], poses_odom[0:tstep, 1], color=params.plot.colors[1], linewidth=2, label="odom")
plt.plot(poses_gps[0:tstep, 0], poses_gps[0:tstep, 1], color=params.plot.colors[2], linewidth=2, label="gps")
# if plot_ori:
# ori = poses_gt[:, 2]
# sz_arw = 0.03
# (dx, dy) = (sz_arw * np.cos(ori), sz_arw * np.sin(ori))
# for i in range(0, num_steps):
# plt.arrow(poses_gt[i, 0], poses_gt[i, 1], dx[i], dy[i], linewidth=4,
# head_width=0.01, color='black', head_length=0.1, fc='black', ec='black')
plt.title("Logged dataset nav2d/{0:04d}/{1:04d}.json".format(params.dataio.ds_idx, params.dataio.seq_idx))
plt.legend(loc='upper right')
plt.show()
plt.pause(1)
# def covariance_type(tfrac):
# cov_type = None
# if tfrac <= 0.25:
# cov_type = 0
# elif (tfrac > 0.25) & (tfrac <= 0.5):
# cov_type = 1
# elif (tfrac > 0.5) & (tfrac <= 0.75):
# cov_type = 0
# elif (tfrac > 0.75):
# cov_type = 1
# return cov_type
def covariance_type(tfrac):
cov_type = None
if tfrac <= 0.25:
cov_type = 0
elif (tfrac > 0.25) & (tfrac <= 0.75):
cov_type = 1
elif (tfrac >= 0.75):
cov_type = 0
return cov_type
def create_measurements(params, poses, covariances):
# noise models
odom_noise0 = gtsam.noiseModel_Diagonal.Sigmas(covariances.odom0)
odom_noise1 = gtsam.noiseModel_Diagonal.Sigmas(covariances.odom1)
gps_noise0 = gtsam.noiseModel_Diagonal.Sigmas(covariances.gps0)
gps_noise1 = gtsam.noiseModel_Diagonal.Sigmas(covariances.gps1)
# samplers
sampler_odom_noise0 = gtsam.Sampler(odom_noise0, 0)
sampler_odom_noise1 = gtsam.Sampler(odom_noise1, 0)
sampler_gps_noise0 = gtsam.Sampler(gps_noise0, 0)
sampler_gps_noise1 = gtsam.Sampler(gps_noise1, 0)
# init measurements
measurements = AttrDict()
num_steps = params.num_steps
measurements.odom = np.zeros((num_steps-1, 3))
measurements.gps = np.zeros((num_steps, 3))
measurements.cov_type = np.zeros((num_steps, 1))
# add measurements
for tstep in range(0, num_steps):
cov_type = covariance_type(tstep / float(num_steps))
sampler_odom_noise = sampler_odom_noise0 if (cov_type == 0) else sampler_odom_noise1
sampler_gps_noise = sampler_gps_noise0 if (cov_type == 0) else sampler_gps_noise1
measurements.cov_type[tstep] = cov_type
# binary odom
if (tstep > 0):
prev_pose = tf_utils.vec3_to_pose2(poses[tstep-1])
curr_pose = tf_utils.vec3_to_pose2(poses[tstep])
delta_pose = prev_pose.between(curr_pose)
delta_pose_noisy = tf_utils.add_gaussian_noise(delta_pose, sampler_odom_noise.sample())
measurements.odom[tstep-1, :] = tf_utils.pose2_to_vec3(delta_pose_noisy)
# unary gps
curr_pose = tf_utils.vec3_to_pose2(poses[tstep])
curr_pose_noisy = tf_utils.add_gaussian_noise(curr_pose, sampler_gps_noise.sample())
measurements.gps[tstep, :] = tf_utils.pose2_to_vec3(curr_pose_noisy)
return measurements
def log_data(params, poses, measurements, save_file=False):
# get data for logger
sigma_mat_odom0 = np.diag(list(params.measurements.noise_models.odom0))
sigma_mat_odom0 = (np.reshape(sigma_mat_odom0, (sigma_mat_odom0.shape[0]*sigma_mat_odom0.shape[1]))).tolist()
sigma_mat_odom1 = np.diag(list(params.measurements.noise_models.odom1))
sigma_mat_odom1 = (np.reshape(sigma_mat_odom1, (sigma_mat_odom1.shape[0]*sigma_mat_odom1.shape[1]))).tolist()
sigma_mat_gps0 = np.diag(list(params.measurements.noise_models.gps0))
sigma_mat_gps0 = (np.reshape(sigma_mat_gps0, (sigma_mat_gps0.shape[0]*sigma_mat_gps0.shape[1]))).tolist()
sigma_mat_gps1 = np.diag(list(params.measurements.noise_models.gps1))
sigma_mat_gps1 = (np.reshape(sigma_mat_gps1, (sigma_mat_gps1.shape[0]*sigma_mat_gps1.shape[1]))).tolist()
factor_names, factor_keysyms, factor_keyids, factor_covs, factor_meas = ([] for i in range(5))
num_steps = params.num_steps
meas_odom, meas_gps = [], []
for tstep in range(0, num_steps):
# odom
if (tstep > 0):
factor_names.append('odom')
factor_keysyms.append(['x', 'x'])
factor_keyids.append([tstep-1, tstep])
factor_meas.append(measurements.odom[tstep-1].tolist() + measurements.cov_type[tstep-1].tolist())
sigma_mat_odom = sigma_mat_odom0 if (measurements.cov_type[tstep-1] == 0) else sigma_mat_odom1
factor_covs.append(sigma_mat_odom)
# gps
factor_names.append('gps')
factor_keysyms.append(['x'])
factor_keyids.append([tstep])
factor_meas.append(measurements.gps[tstep].tolist() + measurements.cov_type[tstep].tolist())
sigma_mat_gps = sigma_mat_gps0 if (measurements.cov_type[tstep-1] == 0) else sigma_mat_gps1
factor_covs.append(sigma_mat_gps)
# store measurement separately
meas_odom.append(measurements.odom[tstep-1].tolist())
meas_gps.append(measurements.gps[tstep].tolist())
# save to logger object
logger = AttrDict()
logger.poses_gt = poses[0:num_steps, :].tolist()
logger.factor_names = factor_names
logger.factor_keysyms = factor_keysyms
logger.factor_keyids = factor_keyids
logger.factor_covs = factor_covs
logger.factor_meas = factor_meas
logger.meas_odom = meas_odom
logger.meas_gps = meas_gps
logger = wrap_logger_angles_to_pi(logger, field_names=['poses_gt', 'meas_odom', 'meas_gps'])
logger.logname = "{0}_{1}".format(
params.dataio.dataset_name, datetime.now().strftime("%m-%d-%Y-%H-%M-%S"))
if save_file:
seq_idx = params.dataio.seq_idx
dataset_mode = "train" if (seq_idx < params.dataio.n_data_train) else "test"
filename = "{0}/{1}/{2:04d}.json".format(params.dataio.dstdir_logger, dataset_mode, seq_idx)
dir_utils.write_file_json(filename=filename, data=logger)
return logger
def load_poses_file(params):
filename = "{0}/{1}/{2}/poses/{3:04d}.json".format(
BASE_PATH, params.dataio.dstdir_dataset, params.dataio.dataset_name, params.dataio.seq_idx)
dataset = dir_utils.read_file_json(filename, verbose=False)
poses = np.asarray(dataset['poses'])
return poses
def save_poses_file(params, poses):
filename = "{0}/{1}/{2}/poses/{3:04d}.json".format(
BASE_PATH, params.dataio.dstdir_dataset, params.dataio.dataset_name, params.dataio.seq_idx)
logger = AttrDict()
logger.poses = poses.tolist()
dir_utils.write_file_json(filename, data=logger)
def random_cov_sigmas(min_val=0., max_val=1., dim=3):
sigmas = np.random.rand(dim) * (max_val - min_val) + min_val
return sigmas
def get_covariances(params):
covariances = AttrDict()
if (params.measurements.noise_models == "random"):
covariances.odom0 = random_cov_sigmas(min_val=1e-2, max_val=1e-1, dim=3)
covariances.gps0 = random_cov_sigmas(min_val=1e-1, max_val=1, dim=3)
covariances.odom1 = random_cov_sigmas(min_val=1e-1, max_val=1e-1, dim=3)
covariances.gps1 = random_cov_sigmas(min_val=1e-1, max_val=1, dim=3)
return covariances
covariances.odom0 = np.array(params.measurements.noise_models.odom0)
covariances.gps0 = np.array(params.measurements.noise_models.gps0)
covariances.odom1 = np.array(params.measurements.noise_models.odom1)
covariances.gps1 = np.array(params.measurements.noise_models.gps1)
return covariances
def interpolate_poses(poses, dim=2):
n_poses = poses.shape[0]
y = quant_metrics.wrap_to_pi(poses[:, dim])
x = np.arange(0, n_poses)
idx = np.nonzero(y)
interp = interpolate.interp1d(x[idx], y[idx], fill_value="extrapolate")
x = np.arange(0, n_poses)
y_interp = interp(x)
poses[:, dim] = y_interp
return poses
@hydra.main(config_path=CONFIG_PATH)
def main(cfg):
if cfg.options.random_seed is not None:
np.random.seed(cfg.options.random_seed)
# create logger dstdir
cfg.dataio.dstdir_logger = "{0}/{1}/{2}/dataset_{3:04d}".format(
BASE_PATH, cfg.dataio.dstdir_dataset, cfg.dataio.dataset_name, cfg.dataio.start_ds_idx)
dir_utils.make_dir(cfg.dataio.dstdir_logger+"/train", clear=True)
dir_utils.make_dir(cfg.dataio.dstdir_logger+"/test", clear=True)
for ds_idx in range(cfg.dataio.start_ds_idx, cfg.dataio.n_datasets):
cfg.dataio.ds_idx = ds_idx
for seq_idx in range(cfg.dataio.start_seq_idx, cfg.dataio.n_seqs):
cfg.dataio.seq_idx = seq_idx
covariances = get_covariances(cfg)
# load poses
if (cfg.dataio.load_poses_file):
poses = load_poses_file(cfg)
poses = interpolate_poses(poses, dim=2) # angles
else:
poses = get_waypoints_gui(cfg, poses=None)
if (cfg.dataio.save_poses_file):
save_poses_file(cfg, poses)
# create measurements
cfg.num_steps = int(np.minimum(poses.shape[0], cfg.measurements.num_steps_max))
measurements = create_measurements(cfg, poses, covariances)
cfg.dataio.dstdir_logger = "{0}/{1}/{2}/dataset_{3:04d}".format(
BASE_PATH, cfg.dataio.dstdir_dataset, cfg.dataio.dataset_name, ds_idx)
dir_utils.make_dir(cfg.dataio.dstdir_logger, clear=False)
logger = log_data(cfg, poses, measurements, save_file=True)
plot_data(cfg, logger, plot_ori=False)
if __name__ == '__main__':
main()
| 35.981959 | 195 | 0.648521 | 1,983 | 13,961 | 4.337368 | 0.147252 | 0.026043 | 0.024067 | 0.030345 | 0.389955 | 0.301593 | 0.20765 | 0.152773 | 0.136496 | 0.107429 | 0 | 0.027469 | 0.220328 | 13,961 | 387 | 196 | 36.074935 | 0.762701 | 0.06468 | 0 | 0.165323 | 0 | 0.004032 | 0.04424 | 0.015131 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.056452 | 0 | 0.165323 | 0.004032 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a78f30d4f2963ecc7e75da731b558f2e681905de | 5,110 | py | Python | symbol_table.py | kavj/npmd | 742fcb271e695b24bb062cdc66d455c0f397116d | [
"Apache-2.0"
] | null | null | null | symbol_table.py | kavj/npmd | 742fcb271e695b24bb062cdc66d455c0f397116d | [
"Apache-2.0"
] | null | null | null | symbol_table.py | kavj/npmd | 742fcb271e695b24bb062cdc66d455c0f397116d | [
"Apache-2.0"
] | null | null | null | import itertools
from contextlib import contextmanager
from symtable import symtable, Function, Symbol
import ir
import type_resolution as tr
from errors import CompilerError
from utils import extract_name, wrap_input
def reduces_array_dims(ref):
if isinstance(ref, ir.NameRef):
return False
elif isinstance(ref, ir.Subscript):
return False if isinstance(ref.slice, ir.Slice) else True
else:
msg = "{ref} does not represent array view creation."
raise TypeError(msg)
def map_alias_to_qualified_names(import_nodes):
"""
Internally, we refer to qualified names for uniqueness reasons.
This maps any any aliases of modules or names from modules to
qualified names.
alias: module_name or alias: module_name.imported_name
"""
qual_names = {}
for node in import_nodes:
if isinstance(node, ir.NameImport):
qual_names[node.as_name] = f"{node.module}.{node.name}"
elif isinstance(node, ir.ModImport):
qual_names[node.as_name] = node.module
else:
raise ValueError
class symbol:
"""
variable name symbol class
These are meant to be interned by the symbol table and not created arbitrarily.
"""
def __init__(self, name: str, type_, is_arg, is_source_name):
self.name = name
self.type_ = type_
self.is_arg = is_arg
self.is_source_name = is_source_name
def __eq__(self, other):
assert isinstance(other, symbol)
return (self.name == other.name
and self.is_source_name == other.is_source_name)
def __ne__(self, other):
assert isinstance(other, symbol)
return (self.name != other.name
or self.is_source_name != other.is_source_name)
def __hash__(self):
return hash(self.name)
class symbol_table:
"""
Per function symbol table with type information and disambiguation of original source vs implementation names.
"""
def __init__(self, namespace, symbols):
self.namespace = namespace
self.symbols = symbols
self.name_manglers = {}
@property
def from_source(self):
for s in self.symbols.values():
if s.is_source_name:
yield s
@property
def source_locals(self):
for sym in self.symbols.values():
if sym.is_source_name and not sym.is_arg:
yield sym
@property
def arguments(self):
for sym in self.symbols.values():
if sym.is_arg:
yield sym
def declares(self, name):
name = extract_name(name)
return name in self.symbols
def lookup(self, name):
name = extract_name(name)
sym = self.symbols.get(name)
return sym
def is_source_name(self, name):
sym = self.lookup(name)
return (sym is not None
and sym.is_source_name)
def is_impl_name(self, name):
sym = self.lookup(name)
if sym is None:
return False
return not sym.is_source_name
def check_type(self, name):
name = extract_name(name)
return self.symbols[name].type_
def _get_name_mangler(self, prefix: str):
# splitting by prefix helps avoids appending
# large numbers in most cases
gen = self.name_manglers.get(prefix)
if gen is None:
gen = itertools.count()
self.name_manglers[prefix] = gen
return gen
def make_unique_name_like(self, name, type_):
"""
This is used to add a unique typed temporary variable name.
"""
prefix_ = extract_name(name)
if type_ is None:
msg = f"Failed to retrieve a type for name {prefix_}."
raise CompilerError(msg)
gen = self._get_name_mangler(prefix_)
name = f"{prefix_}_{next(gen)}"
while self.declares(name):
name = f"{prefix_}_{next(gen)}"
sym = symbol(name, type_, is_arg=False, is_source_name=False)
self.symbols[name] = sym
# The input name may require mangling for uniqueness.
# Return the name as it is registered.
return wrap_input(name)
def build_module_symbol_table(src, name):
module = module_symbol_table(name)
top = symtable(src, name, "exec")
# use default int == 64 for now. This could be made platform specific
# and overridable here
for func in top.get_children():
name = func.get_name()
if func.is_nested():
raise ValueError(f"{name} in file {file_name} appears as a nested scope, which is unsupported.")
elif func.has_children():
raise ValueError(f"{name} in file {file_name} contains nested scopes, which are unsupported.")
elif func.get_type() != "function":
raise TypeError(f"{name} in file {file_name} refers to a class rather than a function. This is "
f"unsupported.")
func_table = func_symbol_table(func, tr.Int64)
module.register_func(func_table)
return module
| 30.969697 | 114 | 0.629354 | 671 | 5,110 | 4.605067 | 0.265276 | 0.036246 | 0.050485 | 0.024272 | 0.222006 | 0.161812 | 0.146926 | 0.106796 | 0.08479 | 0.061489 | 0 | 0.0011 | 0.288258 | 5,110 | 164 | 115 | 31.158537 | 0.848502 | 0.142074 | 0 | 0.178571 | 0 | 0 | 0.094573 | 0.015607 | 0 | 0 | 0 | 0 | 0.017857 | 1 | 0.160714 | false | 0 | 0.098214 | 0.008929 | 0.401786 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7931d8302977376be51973a317ce5f81d24d7a6 | 1,126 | py | Python | src/other/Other_from_2020-2021/home/what_is_your_name.py | jonahmakowski/PyWrskp | 93950d5bf6173f1507560ea719a6e1ed1387c95c | [
"MIT"
] | null | null | null | src/other/Other_from_2020-2021/home/what_is_your_name.py | jonahmakowski/PyWrskp | 93950d5bf6173f1507560ea719a6e1ed1387c95c | [
"MIT"
] | null | null | null | src/other/Other_from_2020-2021/home/what_is_your_name.py | jonahmakowski/PyWrskp | 93950d5bf6173f1507560ea719a6e1ed1387c95c | [
"MIT"
] | null | null | null | import sys
import os
try:
pyWrkspLoc = os.environ["PYWRKSP"]
except KeyError:
pyWrkspLoc = os.environ["HOME"] + input('Since you do not have the PYWRSKP env var '
'\nPlease enter the pwd for the pyWrskp repo not including the '
'"home" section')
class Name:
def __init__(self, name, pyWrskp):
self.name = name
self.pyWrskp = pyWrskp
self.fun_stuff()
def hello_world(self):
print('Hello World')
print('Your name is {}!'.format(self.name))
def lola_is_the_best(self):
for i in range(999):
print('Lola is the best')
def name(self):
sys.path.append(self.pyWrskp + '/src/game')
from game import Game
g = Game
def fun_stuff(self):
option = input('What do you want to do {}?'.format(self.name))
if option == 'hello world':
self.hello_world()
elif option == 'lola is the best':
self.lola_is_the_best()
elif option == 'game':
self.name()
n = Name('Jonah')
| 25.590909 | 108 | 0.539964 | 141 | 1,126 | 4.212766 | 0.425532 | 0.06734 | 0.060606 | 0.087542 | 0.057239 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004093 | 0.349023 | 1,126 | 43 | 109 | 26.186047 | 0.806276 | 0 | 0 | 0 | 0 | 0 | 0.215808 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15625 | false | 0 | 0.09375 | 0 | 0.28125 | 0.09375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a794b20c5def7f6ced8453d979332c9f94f5d7af | 6,832 | py | Python | spark-streaming-etl/analyze_tweets.py | jrnhofman/Spark-Streaming-Kafka-Stock-Tweets-Project | e23fe4c4e271afde5ef1f6243106f7ba86fe4551 | [
"MIT"
] | null | null | null | spark-streaming-etl/analyze_tweets.py | jrnhofman/Spark-Streaming-Kafka-Stock-Tweets-Project | e23fe4c4e271afde5ef1f6243106f7ba86fe4551 | [
"MIT"
] | null | null | null | spark-streaming-etl/analyze_tweets.py | jrnhofman/Spark-Streaming-Kafka-Stock-Tweets-Project | e23fe4c4e271afde5ef1f6243106f7ba86fe4551 | [
"MIT"
] | null | null | null | from pyspark.sql import SparkSession
from pyspark.sql.functions import explode, unix_timestamp
from pyspark.sql.functions import split, expr, lit
from pyspark.sql.functions import lower, col, regexp_replace
from pyspark.sql.functions import window, concat_ws
from pyspark.sql.functions import udf
from pyspark.sql.types import IntegerType, DoubleType
import requests
spark = SparkSession \
.builder \
.appName("TweetAndStockApp") \
.getOrCreate()
# Reading stock quotes provided every minute
# from Kafka topic
ticker_df = spark \
.readStream \
.format("kafka") \
.option("kafka.bootstrap.servers", "broker:9092") \
.option("subscribe", "STOCK_QUOTES") \
.load()
tickers = (
ticker_df
.withWatermark("timestamp", "120 seconds")
.select(
col("timestamp")
, split(col("value"), " ").getItem(0).alias("Symbol")
, split(col("value"), " ").getItem(1).alias("Price")
)
.select(
col("timestamp").alias("ticker_ts")
, lower(col("Symbol")).alias("ticker_symbol")
, col("Price").alias("price")
)
)
# Reading tweets pre-filtered on hashtags
# from Kafka topic
df = (spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", "broker:9092")
.option("subscribe", "TWEETS")
.load()
)
def map_hashtags_to_tickers(x):
mapper = {
'google' : 'goog'
, 'microsoft' : 'msft'
, 'nvidia' : 'nvda'
, 'facebook' : 'fb'
, 'adobe' : 'adbe'
, 'amazon' : 'amzn'
, 'apple' : 'aapl'
}
return mapper[x] if x in mapper.keys() else x
# For demonstration purposes we query tweets
# with a couple of well-known companies
# instead of simply querying the quote we also quote
# the alias as most people refer to the company
# not by its stock quote (duh!) but by the company name
map_hashtags_to_tickers_udf = udf(map_hashtags_to_tickers)
hashtags = (df
.withWatermark("timestamp", "120 seconds")
.selectExpr("timestamp", "CAST(value as string) as value")
.select(
col("timestamp")
, col("value").alias("tweet")
# splitting to extract the hashtags
, explode(split(col("value"), " ")).alias("word")
)
.filter(col("word").contains("#"))
.select(col('timestamp'), col("tweet"), lower(col('word')).alias('Symbol'))
.select(
col('timestamp').alias('ht_ts')
, col("tweet")
, regexp_replace(col('Symbol'), '#', '').alias('ht_symbol')
, lit(1).alias('cnt')
)
.select(
col('ht_ts')
, col('tweet')
, map_hashtags_to_tickers_udf(col('ht_symbol')).alias('ht_symbol')
, col('cnt')
)
)
# Joining stock quotes and hashtags
# On the stock symbols (or aliases, i.e. GOOG and Google)
# This is a left join since no tweets can be produced
# within a minute of stock quote
joined = tickers.join(
hashtags,
expr("""
ht_symbol = ticker_symbol
AND
ticker_ts <= ht_ts AND
ticker_ts + interval 60 seconds > ht_ts
"""),
"leftOuter"
)
# Since we want to aggregate tweet counts and stock prices
# over a window, this is not possible to do properly after
# a join, an (ugly) solution is to create a kafka topic
# with the joined data and then read it again for the agg
# which is what we do here
(joined
.fillna({ 'cnt': 0, 'tweet': ''})
.withColumn(
"value"
, concat_ws(
','
, col("ticker_ts")
, col("ticker_symbol")
, col("price")
, col("cnt")
, col("tweet")
, col("ht_ts")
)
)
.writeStream
.format("kafka")
.option("kafka.bootstrap.servers", "broker:9092")
.option("topic", "JOINED_TWEETS")
.option("checkpointLocation", "checkpoints")
.start())
# Read the topic we just wrote back
joined_df = (spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", "broker:9092")
# enable this if you don't want to start from scratch
# every time you relaunch the app
.option("startingOffsets", "earliest")
.option("subscribe", "JOINED_TWEETS")
.load()
)
result = (joined_df
# decoding and splitting back to columns
.selectExpr("CAST(value as string) as value")
.select(
split(col("value"), ",").getItem(0).alias("ticker_ts"),
split(col("value"), ",").getItem(1).alias("ticker_symbol"),
split(col("value"), ",").getItem(2).alias("price"),
split(col("value"), ",").getItem(3).alias("cnt"),
split(col("value"), ",").getItem(4).alias("tweet"),
split(col("value"), ",").getItem(5).alias("ht_ts"),
)
# casting
.select(
col("ticker_ts").cast("timestamp")
, col("ticker_symbol")
, col("ht_ts").cast("timestamp")
, col("price").cast(DoubleType())
, col("cnt").cast(IntegerType())
, col(("tweet"))
)
.filter(col("ticker_ts") > unix_timestamp(lit('2021-04-01 12:00:00')).cast('timestamp'))
.withWatermark("ticker_ts", "2 minutes")
# grouping data in 5 minute windows
.groupBy(
window(col("ticker_ts"), "5 minutes", "5 minutes").alias("ticker_window")
,col("ticker_symbol")
)
.agg({'price': 'avg', 'cnt': 'sum', 'tweet': 'collect_list'})
# final result
.select(
col('ticker_window').start.alias('ticker_ts')
, col('ticker_window')
, col('ticker_symbol')
, col('avg(price)').alias('price')
, col('sum(cnt)').alias('n_tweets')
, col('collect_list(tweet)').alias('tweets')
)
)
def send_df_to_dashboard(df, epoch_id):
if df.count() > 0:
request_data = {'tickers': [], 'ticker_ts_str': [], 'n_tweets' : [], 'price': []}
df_pd = df.toPandas()
df_pd['ticker_ts_str'] = df_pd['ticker_ts'].apply(lambda x: x.strftime("%Y-%m-%d %H:%M:%S"))
for s in df_pd.ticker_symbol.unique():
request_data['tickers'].append(s)
stats = df_pd[df_pd.ticker_symbol==s]
for c in ['ticker_ts_str', 'price', 'n_tweets']:
request_data[c].append(stats[c].values.tolist())
request_data = {k:str(v) for k,v in request_data.items()}
print("DATA BEING SEND")
print(request_data)
url = 'http://dashboard:9001/updateData'
response = requests.post(url, data=request_data)
print("RESPONSE")
print(response.status_code)
# Write result to endpoint to be picked up by dashboard
result.writeStream.foreachBatch(send_df_to_dashboard).start()
# write result to console for debugging purposes
query = result \
.writeStream \
.outputMode("append") \
.format("console") \
.option("truncate", "false") \
.start()
query.awaitTermination()
| 30.914027 | 100 | 0.598214 | 834 | 6,832 | 4.788969 | 0.314149 | 0.028042 | 0.029294 | 0.04006 | 0.197296 | 0.110916 | 0.084877 | 0.069855 | 0.069855 | 0.056335 | 0 | 0.010985 | 0.240486 | 6,832 | 220 | 101 | 31.054545 | 0.75872 | 0.163495 | 0 | 0.163743 | 0 | 0 | 0.254309 | 0.01618 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011696 | false | 0 | 0.046784 | 0 | 0.064327 | 0.023392 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a795557d3b1d7e374f3379251fae85513490f841 | 7,538 | py | Python | code/preprocess.py | cltl/Guido_Ansem_Crosslingual_Aspect_Classifcation | 0f67f2dd8822dd2a5491d44c5e7eae6d5a930379 | [
"MIT"
] | null | null | null | code/preprocess.py | cltl/Guido_Ansem_Crosslingual_Aspect_Classifcation | 0f67f2dd8822dd2a5491d44c5e7eae6d5a930379 | [
"MIT"
] | null | null | null | code/preprocess.py | cltl/Guido_Ansem_Crosslingual_Aspect_Classifcation | 0f67f2dd8822dd2a5491d44c5e7eae6d5a930379 | [
"MIT"
] | null | null | null | import json
import nltk
import copy
import random
import itertools
import numpy as np
from tqdm import tqdm
from collections import Counter
from transformers import XLMRobertaTokenizerFast
def load_data(path):
with open(path, 'r', encoding='utf-8') as f:
lines = f.read().splitlines()
sentences = [line.split('LABELS:')[0] for line in tqdm(lines)]
sentences = [nltk.word_tokenize(sent) for sent in tqdm(sentences)]
aspects = []
for line in tqdm(lines):
sentence_aspects = []
for word in line.split('LABELS:')[1].split():
if word.isupper() and word not in ['CATEGORY1:', 'CATEGORY2:', 'TARGET:']:
sentence_aspects.append(word)
aspects.append(sentence_aspects)
targets = []
for line in tqdm(lines):
sentence_targets = []
for piece in line.split('LABELS:')[1].split('TARGET')[1:]:
piece_targets = []
for word in piece.split():
if not word.isupper() and word not in ['restaurant', 'camera', 'None'] and len(word) > 1:
piece_targets.append(word)
if len(piece_targets) != 0:
sentence_targets.append(' '.join(piece_targets))
targets.append(sentence_targets)
return sentences, aspects, targets
def remove_low_count_labels(labels, sentences):
remove = [key for key, value in Counter(itertools.chain(*labels)).items() if value < 20]
keep_labels = []
keep_sents = []
for i, label in enumerate(labels):
if not set(label).intersection(set(remove)):
keep_labels.append(label)
keep_sents.append(sentences[i])
return keep_labels, keep_sents
def resample(labels, sentences):
scaling = 0
no_aspect_labels = []
no_aspect_sentences = []
aspect_labels = []
aspect_sentences = []
for i, label in enumerate(labels):
if all([token == 'O' for token in label]):
no_aspect_labels.append(label)
no_aspect_sentences.append(sentences[i])
else:
aspect_labels.append(label)
aspect_sentences.append(sentences[i])
pairs = list(zip(no_aspect_labels, no_aspect_sentences))
random.shuffle(pairs)
no_aspect_labels = [pair[0] for pair in pairs]
no_aspect_sentences = [pair[1] for pair in pairs]
labels = aspect_labels + no_aspect_labels[:scaling * len(aspect_labels)]
sentences = aspect_sentences + no_aspect_sentences[:scaling * len(aspect_sentences)]
return labels, sentences
def get_training_label_ids(data_type):
tag2id = json.load(open(f'./{data_type}_tag2id.json', 'r', encoding='utf-8'))
id2tag = json.load(open(f'./{data_type}_id2tag.json', 'r', encoding='utf-8'))
return tag2id, id2tag
def generate_training_label_ids(unique_tags, data_type):
tag2id = {tag: id for id, tag in tqdm(enumerate(unique_tags))}
id2tag = {id: tag for tag, id in tqdm(tag2id.items())}
json.dump(tag2id, open(f'./{data_type}_tag2id.json', 'w', encoding='utf-8'))
json.dump(id2tag, open(f'./{data_type}_id2tag.json', 'w', encoding='utf-8'))
return tag2id, id2tag
def generate_labels(sentences, aspects, targets, mode='train', data_type=None):
labels = match_BIO_tags(sentences, aspects, targets)
labels, sentences = remove_low_count_labels(labels, sentences)
labels, sentences = resample(labels, sentences)
unique_tags = sorted(list(set(label for doc in tqdm(labels) for label in doc)))
if mode == 'train':
tag2id, id2tag = generate_training_label_ids(unique_tags, data_type)
elif mode == 'test':
tag2id, id2tag = get_training_label_ids(data_type)
return labels, sentences, tag2id, id2tag
def encode_data(sentences, labels, tag2id, id2tag):
tokenizer = XLMRobertaTokenizerFast.from_pretrained('xlm-roberta-base')
encodings = tokenizer(sentences, is_split_into_words=True, return_offsets_mapping=True, padding=True)
labels = encode_tags(labels, encodings, tag2id, id2tag)
return encodings, labels
def single_token_target(BIO_sent_tags, sents, aspects, targets, token, sent_i, token_i):
target_bools = [token.lower() == word.lower() for target in targets[sent_i] for word in target.split()]
if any(target_bools):
aspect = aspects[sent_i][[i for i, target_bool in enumerate(target_bools) if target_bool][0]]
if token_i == 0 or BIO_sent_tags[token_i - 1] == 'O':
BIO_sent_tags[token_i] = 'B-' + aspect
else:
BIO_sent_tags[token_i] = 'I-' + aspect
return BIO_sent_tags
def multiple_token_target(BIO_sent_tags, sents, aspects, targets, token, sent_i, token_i):
target_bools = []
for target in targets[sent_i]:
target_bools.append([token == word for word in target.split()])
target_bools = [any(target_bool) for target_bool in target_bools]
if any(target_bools):
aspect = aspects[sent_i][[i for i, target_bool in enumerate(target_bools) if target_bool][0]]
if token_i == 0 or BIO_sent_tags[token_i - 1] == 'O':
BIO_sent_tags[token_i] = 'B-' + aspect
else:
BIO_sent_tags[token_i] = 'I-' + aspect
return BIO_sent_tags
pass
def match_BIO_tags(sents, aspects, targets):
aspects = [['None'] if len(aspect) == 0 else aspect for aspect in aspects]
targets = [['None'] if len(target) == 0 else target for target in targets]
BIO_tags = []
for sent_i, sent in enumerate(sents):
BIO_sent_tags = ['O'] * len(sent)
for token_i, token in enumerate(sent):
if any([len(target.split()) > 1 for target in targets[sent_i]]):
BIO_sent_tags = multiple_token_target(BIO_sent_tags, sents, aspects, targets, token, sent_i, token_i)
else:
BIO_sent_tags = single_token_target(BIO_sent_tags, sents, aspects, targets, token, sent_i, token_i)
BIO_tags.append(BIO_sent_tags)
return BIO_tags
def fix_encodings(encodings):
encs = copy.deepcopy(encodings)
encs['input_ids'] = []
encs['attention_mask'] = []
encs['offset_mapping'] = []
for i, encoding in tqdm(enumerate(encodings['input_ids'])):
empty_strings = 0
input_ids = []
attention_mask = []
offset_mapping = []
for j, item in enumerate(encoding):
if item != 6:
input_ids.append(encodings['input_ids'][i][j])
attention_mask.append(encodings['attention_mask'][i][j])
offset_mapping.append(encodings['offset_mapping'][i][j])
else:
empty_strings += 1
encs['input_ids'].append(input_ids + ([1] * empty_strings))
encs['attention_mask'].append(attention_mask + ([0] * empty_strings))
encs['offset_mapping'].append(offset_mapping + ([(0, 0)] * empty_strings))
return encs
def encode_tags(tags, encodings, tag2id, id2tag):
encodings = fix_encodings(encodings)
labels = [[tag2id[tag] for tag in doc] for doc in tags]
encoded_labels = []
i = 0
for doc_labels, doc_offset in tqdm(zip(labels, encodings.offset_mapping)):
# create an empty array of -100
doc_enc_labels = np.ones(len(doc_offset),dtype=int) * -100
arr_offset = np.array(doc_offset)
# set labels whose first offset position is 0 and the second is not 0
doc_enc_labels[(arr_offset[:,0] == 0) & (arr_offset[:,1] != 0)] = doc_labels
encoded_labels.append(doc_enc_labels.tolist())
i += 1
return encoded_labels | 37.133005 | 117 | 0.652428 | 1,016 | 7,538 | 4.624016 | 0.156496 | 0.02384 | 0.037463 | 0.020434 | 0.322052 | 0.290549 | 0.182631 | 0.170711 | 0.13708 | 0.13708 | 0 | 0.013049 | 0.227381 | 7,538 | 203 | 118 | 37.133005 | 0.793613 | 0.012868 | 0 | 0.148387 | 0 | 0 | 0.05041 | 0.013443 | 0 | 0 | 0 | 0 | 0 | 1 | 0.077419 | false | 0.006452 | 0.058065 | 0 | 0.212903 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7960526c5da52edce83faa14afd8078d5d66e8c | 1,558 | py | Python | Sources/Compiler/SwiftTypeTransfer.py | Tuluobo/HTTPIDL | 0b4476fe0fe1ae8237c92ca53b1fc8be1f8c2d5d | [
"MIT"
] | null | null | null | Sources/Compiler/SwiftTypeTransfer.py | Tuluobo/HTTPIDL | 0b4476fe0fe1ae8237c92ca53b1fc8be1f8c2d5d | [
"MIT"
] | null | null | null | Sources/Compiler/SwiftTypeTransfer.py | Tuluobo/HTTPIDL | 0b4476fe0fe1ae8237c92ca53b1fc8be1f8c2d5d | [
"MIT"
] | null | null | null | idl_to_swift_type = {'UINT32': 'UInt32', 'UINT64': 'UInt64', 'INT32': 'Int32', 'INT64': 'Int64', 'BOOL': 'Bool', 'DOUBLE': 'Double', 'STRING': 'String', 'FILE': 'HTTPFile', 'BLOB': 'HTTPData'}
def swift_base_type_name_from_idl_base_type(type_name):
if type_name in idl_to_swift_type:
builtin_type_name = idl_to_swift_type[type_name]
return builtin_type_name
return type_name
def swift_type_name(idl_param_type_context):
base_type = idl_param_type_context.baseType()
if base_type is not None:
return swift_base_type_name(base_type)
else:
generic_type = idl_param_type_context.genericType()
dict_type = generic_type.dictGenericParam()
if dict_type is not None:
return swift_dict_type_name(dict_type)
else:
array_type = generic_type.arrayGenericParam()
return swift_array_type_name(array_type)
def swift_base_type_name(base_type_context):
struct_name = base_type_context.structName()
if struct_name is not None:
return struct_name.getText()
else:
return idl_to_swift_type[base_type_context.getText()]
def swift_dict_type_name(dict_param_context):
key_type = swift_base_type_name_from_idl_base_type(dict_param_context.baseType().getText())
value_type = swift_type_name(dict_param_context.paramType())
return '[' + key_type + ': ' + value_type + ']'
def swift_array_type_name(array_param_context):
element_type = swift_type_name(array_param_context.paramType())
return '[' + element_type + ']' | 38 | 192 | 0.720154 | 215 | 1,558 | 4.739535 | 0.2 | 0.133464 | 0.039254 | 0.054956 | 0.358194 | 0.154073 | 0.062807 | 0.062807 | 0 | 0 | 0 | 0.01251 | 0.179076 | 1,558 | 41 | 193 | 38 | 0.784206 | 0 | 0 | 0.096774 | 0 | 0 | 0.067992 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16129 | false | 0 | 0 | 0 | 0.451613 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7979d633829f4a42cfe2982317ea29d0a69f942 | 13,485 | py | Python | luxtronik/calculations.py | Cees-van-Beek/python-luxtronik | 662281ebbe787d618c37e4a25b6f3b0053dc6a4f | [
"MIT"
] | 7 | 2020-06-30T11:11:55.000Z | 2021-12-19T13:12:01.000Z | luxtronik/calculations.py | Cees-van-Beek/python-luxtronik | 662281ebbe787d618c37e4a25b6f3b0053dc6a4f | [
"MIT"
] | 20 | 2020-05-02T12:28:14.000Z | 2022-03-25T20:49:40.000Z | luxtronik/calculations.py | Cees-van-Beek/python-luxtronik | 662281ebbe787d618c37e4a25b6f3b0053dc6a4f | [
"MIT"
] | 7 | 2020-06-13T14:42:52.000Z | 2022-03-04T19:53:49.000Z | """Parse luxtonik calculations."""
import logging
from luxtronik.datatypes import (
BivalenceLevel,
Bool,
Celsius,
Count,
Energy,
Errorcode,
Flow,
Frequency,
HeatpumpCode,
Icon,
IPAddress,
Kelvin,
Level,
MainMenuStatusLine1,
MainMenuStatusLine2,
MainMenuStatusLine3,
OperationMode,
Percent2,
Power,
Pressure,
Pulses,
Seconds,
SecOperationMode,
Speed,
SwitchoffFile,
Timestamp,
Unknown,
Version,
Voltage,
)
LOGGER = logging.getLogger("Luxtronik.Calculations")
class Calculations:
"""Class that holds all calculations."""
calculations = {
0: Unknown("Unknown_Calculation_0"),
1: Unknown("Unknown_Calculation_1"),
2: Unknown("Unknown_Calculation_2"),
3: Unknown("Unknown_Calculation_3"),
4: Unknown("Unknown_Calculation_4"),
5: Unknown("Unknown_Calculation_5"),
6: Unknown("Unknown_Calculation_6"),
7: Unknown("Unknown_Calculation_7"),
8: Unknown("Unknown_Calculation_8"),
9: Unknown("Unknown_Calculation_9"),
10: Celsius("ID_WEB_Temperatur_TVL"),
11: Celsius("ID_WEB_Temperatur_TRL"),
12: Celsius("ID_WEB_Sollwert_TRL_HZ"),
13: Celsius("ID_WEB_Temperatur_TRL_ext"),
14: Celsius("ID_WEB_Temperatur_THG"),
15: Celsius("ID_WEB_Temperatur_TA"),
16: Celsius("ID_WEB_Mitteltemperatur"),
17: Celsius("ID_WEB_Temperatur_TBW"),
18: Celsius("ID_WEB_Einst_BWS_akt"),
19: Celsius("ID_WEB_Temperatur_TWE"),
20: Celsius("ID_WEB_Temperatur_TWA"),
21: Celsius("ID_WEB_Temperatur_TFB1"),
22: Celsius("ID_WEB_Sollwert_TVL_MK1"),
23: Celsius("ID_WEB_Temperatur_RFV"),
24: Celsius("ID_WEB_Temperatur_TFB2"),
25: Celsius("ID_WEB_Sollwert_TVL_MK2"),
26: Celsius("ID_WEB_Temperatur_TSK"),
27: Celsius("ID_WEB_Temperatur_TSS"),
28: Celsius("ID_WEB_Temperatur_TEE"),
29: Bool("ID_WEB_ASDin"),
30: Bool("ID_WEB_BWTin"),
31: Bool("ID_WEB_EVUin"),
32: Bool("ID_WEB_HDin"),
33: Bool("ID_WEB_MOTin"),
34: Bool("ID_WEB_NDin"),
35: Bool("ID_WEB_PEXin"),
36: Bool("ID_WEB_SWTin"),
37: Bool("ID_WEB_AVout"),
38: Bool("ID_WEB_BUPout"),
39: Bool("ID_WEB_HUPout"),
40: Bool("ID_WEB_MA1out"),
41: Bool("ID_WEB_MZ1out"),
42: Bool("ID_WEB_VENout"),
43: Bool("ID_WEB_VBOout"),
44: Bool("ID_WEB_VD1out"),
45: Bool("ID_WEB_VD2out"),
46: Bool("ID_WEB_ZIPout"),
47: Bool("ID_WEB_ZUPout"),
48: Bool("ID_WEB_ZW1out"),
49: Bool("ID_WEB_ZW2SSTout"),
50: Bool("ID_WEB_ZW3SSTout"),
51: Bool("ID_WEB_FP2out"),
52: Bool("ID_WEB_SLPout"),
53: Bool("ID_WEB_SUPout"),
54: Bool("ID_WEB_MZ2out"),
55: Bool("ID_WEB_MA2out"),
56: Seconds("ID_WEB_Zaehler_BetrZeitVD1"),
57: Pulses("ID_WEB_Zaehler_BetrZeitImpVD1"),
58: Seconds("ID_WEB_Zaehler_BetrZeitVD2"),
59: Pulses("ID_WEB_Zaehler_BetrZeitImpVD2"),
60: Seconds("ID_WEB_Zaehler_BetrZeitZWE1"),
61: Seconds("ID_WEB_Zaehler_BetrZeitZWE2"),
62: Seconds("ID_WEB_Zaehler_BetrZeitZWE3"),
63: Seconds("ID_WEB_Zaehler_BetrZeitWP"),
64: Seconds("ID_WEB_Zaehler_BetrZeitHz"),
65: Seconds("ID_WEB_Zaehler_BetrZeitBW"),
66: Seconds("ID_WEB_Zaehler_BetrZeitKue"),
67: Seconds("ID_WEB_Time_WPein_akt"),
68: Seconds("ID_WEB_Time_ZWE1_akt"),
69: Seconds("ID_WEB_Time_ZWE2_akt"),
70: Seconds("ID_WEB_Timer_EinschVerz"),
71: Seconds("ID_WEB_Time_SSPAUS_akt"),
72: Seconds("ID_WEB_Time_SSPEIN_akt"),
73: Seconds("ID_WEB_Time_VDStd_akt"),
74: Seconds("ID_WEB_Time_HRM_akt"),
75: Seconds("ID_WEB_Time_HRW_akt"),
76: Seconds("ID_WEB_Time_LGS_akt"),
77: Seconds("ID_WEB_Time_SBW_akt"),
78: HeatpumpCode("ID_WEB_Code_WP_akt"),
79: BivalenceLevel("ID_WEB_BIV_Stufe_akt"),
80: OperationMode("ID_WEB_WP_BZ_akt"),
81: Version("ID_WEB_SoftStand"),
91: IPAddress("ID_WEB_AdresseIP_akt"),
92: IPAddress("ID_WEB_SubNetMask_akt"),
93: IPAddress("ID_WEB_Add_Broadcast"),
94: IPAddress("ID_WEB_Add_StdGateway"),
95: Timestamp("ID_WEB_ERROR_Time0"),
96: Timestamp("ID_WEB_ERROR_Time1"),
97: Timestamp("ID_WEB_ERROR_Time2"),
98: Timestamp("ID_WEB_ERROR_Time3"),
99: Timestamp("ID_WEB_ERROR_Time4"),
100: Errorcode("ID_WEB_ERROR_Nr0"),
101: Errorcode("ID_WEB_ERROR_Nr1"),
102: Errorcode("ID_WEB_ERROR_Nr2"),
103: Errorcode("ID_WEB_ERROR_Nr3"),
104: Errorcode("ID_WEB_ERROR_Nr4"),
105: Count("ID_WEB_AnzahlFehlerInSpeicher"),
106: SwitchoffFile("ID_WEB_Switchoff_file_Nr0"),
107: SwitchoffFile("ID_WEB_Switchoff_file_Nr1"),
108: SwitchoffFile("ID_WEB_Switchoff_file_Nr2"),
109: SwitchoffFile("ID_WEB_Switchoff_file_Nr3"),
110: SwitchoffFile("ID_WEB_Switchoff_file_Nr4"),
111: Timestamp("ID_WEB_Switchoff_file_Time0"),
112: Timestamp("ID_WEB_Switchoff_file_Time1"),
113: Timestamp("ID_WEB_Switchoff_file_Time2"),
114: Timestamp("ID_WEB_Switchoff_file_Time3"),
115: Timestamp("ID_WEB_Switchoff_file_Time4"),
116: Bool("ID_WEB_Comfort_exists"),
117: MainMenuStatusLine1("ID_WEB_HauptMenuStatus_Zeile1"),
118: MainMenuStatusLine2("ID_WEB_HauptMenuStatus_Zeile2"),
119: MainMenuStatusLine3("ID_WEB_HauptMenuStatus_Zeile3"),
120: Seconds("ID_WEB_HauptMenuStatus_Zeit"),
121: Level("ID_WEB_HauptMenuAHP_Stufe"),
122: Celsius("ID_WEB_HauptMenuAHP_Temp"),
123: Seconds("ID_WEB_HauptMenuAHP_Zeit"),
124: Bool("ID_WEB_SH_BWW"),
125: Icon("ID_WEB_SH_HZ"),
126: Icon("ID_WEB_SH_MK1"),
127: Icon("ID_WEB_SH_MK2"),
128: Unknown("ID_WEB_Einst_Kurzrpgramm"),
129: Unknown("ID_WEB_StatusSlave_1"),
130: Unknown("ID_WEB_StatusSlave_2"),
131: Unknown("ID_WEB_StatusSlave_3"),
132: Unknown("ID_WEB_StatusSlave_4"),
133: Unknown("ID_WEB_StatusSlave_5"),
134: Timestamp("ID_WEB_AktuelleTimeStamp"),
135: Icon("ID_WEB_SH_MK3"),
136: Celsius("ID_WEB_Sollwert_TVL_MK3"),
137: Celsius("ID_WEB_Temperatur_TFB3"),
138: Bool("ID_WEB_MZ3out"),
139: Bool("ID_WEB_MA3out"),
140: Bool("ID_WEB_FP3out"),
141: Seconds("ID_WEB_Time_AbtIn"),
142: Celsius("ID_WEB_Temperatur_RFV2"),
143: Celsius("ID_WEB_Temperatur_RFV3"),
144: Icon("ID_WEB_SH_SW"),
145: Unknown("ID_WEB_Zaehler_BetrZeitSW"),
146: Bool("ID_WEB_FreigabKuehl"),
147: Voltage("ID_WEB_AnalogIn"),
148: Unknown("ID_WEB_SonderZeichen"),
149: Icon("ID_WEB_SH_ZIP"),
150: Icon("ID_WEB_WebsrvProgrammWerteBeobarten"),
151: Energy("ID_WEB_WMZ_Heizung"),
152: Energy("ID_WEB_WMZ_Brauchwasser"),
153: Energy("ID_WEB_WMZ_Schwimmbad"),
154: Energy("ID_WEB_WMZ_Seit"),
155: Flow("ID_WEB_WMZ_Durchfluss"),
156: Voltage("ID_WEB_AnalogOut1"),
157: Voltage("ID_WEB_AnalogOut2"),
158: Seconds("ID_WEB_Time_Heissgas"),
159: Celsius("ID_WEB_Temp_Lueftung_Zuluft"),
160: Celsius("ID_WEB_Temp_Lueftung_Abluft"),
161: Seconds("ID_WEB_Zaehler_BetrZeitSolar"),
162: Voltage("ID_WEB_AnalogOut3"),
163: Voltage("ID_WEB_AnalogOut4"),
164: Voltage("ID_WEB_Out_VZU"),
165: Voltage("ID_WEB_Out_VAB"),
166: Bool("ID_WEB_Out_VSK"),
167: Bool("ID_WEB_Out_FRH"),
168: Voltage("ID_WEB_AnalogIn2"),
169: Voltage("ID_WEB_AnalogIn3"),
170: Bool("ID_WEB_SAXin"),
171: Bool("ID_WEB_SPLin"),
172: Bool("ID_WEB_Compact_exists"),
173: Flow("ID_WEB_Durchfluss_WQ"),
174: Bool("ID_WEB_LIN_exists"),
175: Celsius("ID_WEB_LIN_ANSAUG_VERDAMPFER"),
176: Celsius("ID_WEB_LIN_ANSAUG_VERDICHTER"),
177: Celsius("ID_WEB_LIN_VDH"),
178: Kelvin("ID_WEB_LIN_UH"),
179: Kelvin("ID_WEB_LIN_UH_Soll"),
180: Pressure("ID_WEB_LIN_HD"),
181: Pressure("ID_WEB_LIN_ND"),
182: Bool("ID_WEB_LIN_VDH_out"),
183: Percent2("ID_WEB_HZIO_PWM"),
184: Speed("ID_WEB_HZIO_VEN"),
185: Unknown("ID_WEB_HZIO_EVU2"),
186: Bool("ID_WEB_HZIO_STB"),
187: Energy("ID_WEB_SEC_Qh_Soll"),
188: Energy("ID_WEB_SEC_Qh_Ist"),
189: Celsius("ID_WEB_SEC_TVL_Soll"),
190: Unknown("ID_WEB_SEC_Software"),
191: SecOperationMode("ID_WEB_SEC_BZ"),
192: Unknown("ID_WEB_SEC_VWV"),
193: Speed("ID_WEB_SEC_VD"),
194: Celsius("ID_WEB_SEC_VerdEVI"),
195: Celsius("ID_WEB_SEC_AnsEVI"),
196: Kelvin("ID_WEB_SEC_UEH_EVI"),
197: Kelvin("ID_WEB_SEC_UEH_EVI_S"),
198: Celsius("ID_WEB_SEC_KondTemp"),
199: Celsius("ID_WEB_SEC_FlussigEx"),
200: Celsius("ID_WEB_SEC_UK_EEV"),
201: Pressure("ID_WEB_SEC_EVI_Druck"),
202: Voltage("ID_WEB_SEC_U_Inv"),
203: Celsius("ID_WEB_Temperatur_THG_2"),
204: Celsius("ID_WEB_Temperatur_TWE_2"),
205: Celsius("ID_WEB_LIN_ANSAUG_VERDAMPFER_2"),
206: Celsius("ID_WEB_LIN_ANSAUG_VERDICHTER_2"),
207: Celsius("ID_WEB_LIN_VDH_2"),
208: Kelvin("ID_WEB_LIN_UH_2"),
209: Kelvin("ID_WEB_LIN_UH_Soll_2"),
210: Pressure("ID_WEB_LIN_HD_2"),
211: Pressure("ID_WEB_LIN_ND_2"),
212: Bool("ID_WEB_HDin_2"),
213: Bool("ID_WEB_AVout_2"),
214: Bool("ID_WEB_VBOout_2"),
215: Bool("ID_WEB_VD1out_2"),
216: Bool("ID_WEB_LIN_VDH_out_2"),
217: SwitchoffFile("ID_WEB_Switchoff2_file_Nr0"),
218: SwitchoffFile("ID_WEB_Switchoff2_file_Nr1"),
219: SwitchoffFile("ID_WEB_Switchoff2_file_Nr2"),
220: SwitchoffFile("ID_WEB_Switchoff2_file_Nr3"),
221: SwitchoffFile("ID_WEB_Switchoff2_file_Nr4"),
222: Timestamp("ID_WEB_Switchoff2_file_Time0"),
223: Timestamp("ID_WEB_Switchoff2_file_Time1"),
224: Timestamp("ID_WEB_Switchoff2_file_Time2"),
225: Timestamp("ID_WEB_Switchoff2_file_Time3"),
226: Timestamp("ID_WEB_Switchoff2_file_Time4"),
227: Celsius("ID_WEB_RBE_RT_Ist"),
228: Celsius("ID_WEB_RBE_RT_Soll"),
229: Celsius("ID_WEB_Temperatur_BW_oben"),
230: HeatpumpCode("ID_WEB_Code_WP_akt_2"),
231: Frequency("ID_WEB_Freq_VD"),
232: Unknown("Unknown_Calculation_232"),
233: Unknown("Unknown_Calculation_233"),
234: Unknown("Unknown_Calculation_234"),
235: Unknown("Unknown_Calculation_235"),
236: Unknown("Unknown_Calculation_236"),
237: Unknown("Unknown_Calculation_237"),
238: Unknown("Unknown_Calculation_238"),
239: Unknown("Unknown_Calculation_239"),
240: Unknown("Unknown_Calculation_240"),
241: Percent2("Circulation_Pump"),
242: Unknown("Unknown_Calculation_242"),
243: Unknown("Unknown_Calculation_243"),
244: Unknown("Unknown_Calculation_244"),
245: Unknown("Unknown_Calculation_245"),
246: Unknown("Unknown_Calculation_246"),
247: Unknown("Unknown_Calculation_247"),
248: Unknown("Unknown_Calculation_248"),
249: Unknown("Unknown_Calculation_249"),
250: Unknown("Unknown_Calculation_250"),
251: Unknown("Unknown_Calculation_251"),
252: Unknown("Unknown_Calculation_252"),
253: Unknown("Unknown_Calculation_253"),
254: Flow("Flow_Rate_254"),
255: Unknown("Unknown_Calculation_255"),
256: Unknown("Unknown_Calculation_256"),
257: Power("Heat_Output"),
258: Unknown("Unknown_Calculation_258"),
259: Unknown("Unknown_Calculation_259"),
}
def parse(self, raw_data):
"""Parse raw calculations data."""
for index, data in enumerate(raw_data):
calculation = self.calculations.get(index, False)
if calculation is not False and index not in range(81, 91):
calculation.value = calculation.from_heatpump(data)
continue
if calculation is not False and index in range(81, 91):
calculation.value = calculation.from_heatpump(raw_data[index : index + 9])
continue
if calculation is False and index not in range(81, 91):
LOGGER.warning("Calculation '%d' not in list of calculationss", index)
def _lookup(self, target):
"""Lookup calculation by either id or name."""
if isinstance(target, int):
return self.calculations.get(target, None)
if isinstance(target, str):
try:
target = int(target)
return self.calculations.get(target, None)
except ValueError:
for _, calculation in self.calculations.items():
if calculation.name == target:
return calculation
LOGGER.warning("Calculation '%s' not found", target)
return None
def get(self, target):
"""Get calculation by id or name."""
calculation = self._lookup(target)
return calculation
| 41.112805 | 90 | 0.639525 | 1,702 | 13,485 | 4.627497 | 0.313749 | 0.135221 | 0.052565 | 0.055866 | 0.205307 | 0.07034 | 0.024378 | 0.018156 | 0.012697 | 0 | 0 | 0.083988 | 0.235373 | 13,485 | 327 | 91 | 41.238532 | 0.679856 | 0.012162 | 0 | 0.019108 | 0 | 0 | 0.380293 | 0.221061 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009554 | false | 0 | 0.006369 | 0 | 0.038217 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a79f064779d1b3e2219508c7e12794f8588383a4 | 4,536 | py | Python | scripts/gripper_action_client.py | DanManN/baxter_examples | 7d2fa8ac17cf5544284f6203305457f4d5097c15 | [
"BSD-3-Clause"
] | null | null | null | scripts/gripper_action_client.py | DanManN/baxter_examples | 7d2fa8ac17cf5544284f6203305457f4d5097c15 | [
"BSD-3-Clause"
] | null | null | null | scripts/gripper_action_client.py | DanManN/baxter_examples | 7d2fa8ac17cf5544284f6203305457f4d5097c15 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2013-2015, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Baxter RSDK Gripper Action Client Example
"""
import sys
import argparse
import rospy
import actionlib
from control_msgs.msg import (
GripperCommandAction,
GripperCommandGoal,
)
import baxter_interface
from baxter_interface import CHECK_VERSION
class GripperClient(object):
def __init__(self, gripper):
ns = 'robot/end_effector/' + gripper + '_gripper/'
self._client = actionlib.SimpleActionClient(
ns + "gripper_action",
GripperCommandAction,
)
self._goal = GripperCommandGoal()
# Wait 10 Seconds for the gripper action server to start or exit
if not self._client.wait_for_server(rospy.Duration(10.0)):
rospy.logerr("Exiting - %s Gripper Action Server Not Found" % (gripper.capitalize(), ))
rospy.signal_shutdown("Action Server not found")
sys.exit(1)
self.clear()
def command(self, position, effort):
self._goal.command.position = position
self._goal.command.max_effort = effort
self._client.send_goal(self._goal)
def stop(self):
self._client.cancel_goal()
def wait(self, timeout=5.0):
self._client.wait_for_result(timeout=rospy.Duration(timeout))
return self._client.get_result()
def clear(self):
self._goal = GripperCommandGoal()
def main():
"""RSDK Gripper Example: Action Client
Demonstrates creating a client of the Gripper Action Server,
which enables sending commands of standard action type
control_msgs/GripperCommand.
The example will command the grippers to a number of positions
while specifying moving force or vacuum sensor threshold. Be sure
to start Baxter's gripper_action_server before running this example.
"""
arg_fmt = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(formatter_class=arg_fmt, description=main.__doc__)
parser.add_argument(
'-g',
'--gripper',
dest='gripper',
required=True,
choices=['left', 'right'],
help='which gripper to send action commands'
)
args = parser.parse_args(rospy.myargv()[1:])
gripper = args.gripper
print("Initializing node... ")
rospy.init_node("rsdk_gripper_action_client_%s" % (gripper, ))
print("Getting robot state... ")
rs = baxter_interface.RobotEnable(CHECK_VERSION)
print("Enabling robot... ")
rs.enable()
print("Running. Ctrl-c to quit")
gc = GripperClient(gripper)
gc.command(position=0.0, effort=50.0)
gc.wait()
gc.command(position=100.0, effort=50.0)
gc.wait()
gc.command(position=25.0, effort=40.0)
gc.wait()
gc.command(position=75.0, effort=20.0)
gc.wait()
gc.command(position=0.0, effort=30.0)
gc.wait()
gc.command(position=100.0, effort=40.0)
print(gc.wait())
print("Exiting - Gripper Action Test Example Complete")
if __name__ == "__main__":
main()
| 34.625954 | 99 | 0.708333 | 595 | 4,536 | 5.302521 | 0.430252 | 0.032964 | 0.03233 | 0.014263 | 0.114422 | 0.099208 | 0.075753 | 0.075753 | 0.075753 | 0.043106 | 0 | 0.015517 | 0.204365 | 4,536 | 130 | 100 | 34.892308 | 0.858687 | 0.44246 | 0 | 0.128571 | 0 | 0 | 0.138618 | 0.011789 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0 | 0.1 | 0 | 0.214286 | 0.085714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a79fdd46a7c2ab75bd13070a8ab426754ad200bb | 11,765 | py | Python | examples/cifar10.py | victorlidong/mia | e1f66014ef09b346d7168c0e0f15f94897ed5b73 | [
"MIT"
] | null | null | null | examples/cifar10.py | victorlidong/mia | e1f66014ef09b346d7168c0e0f15f94897ed5b73 | [
"MIT"
] | null | null | null | examples/cifar10.py | victorlidong/mia | e1f66014ef09b346d7168c0e0f15f94897ed5b73 | [
"MIT"
] | null | null | null | """
Example membership inference attack against a deep net classifier on the CIFAR10 dataset
"""
import sys
sys.path.append('/media/aaa/041CDACD1CDAB93E/pyProject/mia')
from dp import dp_utils
import numpy as np
from datetime import datetime
import tensorflow as tf
from tensorflow.keras import layers, optimizers, datasets, Sequential, metrics
from sklearn.model_selection import train_test_split
from mia.estimators import ShadowModelBundle, AttackModelBundle, prepare_attack_data
from sklearn.metrics import roc_curve
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--is_log', type=bool, default=False)
parser.add_argument('--log_path', type=str, default="tmp.log")
parser.add_argument('--is_dp', type=bool, default=False)
parser.add_argument('--pb', type=float, default=1e6)
parser.add_argument('--clip_bound', type=float, default=0.1)
parser.add_argument('--dp_type', type=str,default="norm1")
parser.add_argument('--target_epochs',type=int,default=12,help="Number of epochs to train target and shadow models")
parser.add_argument('--attack_epochs',type=int,default=12,help="Number of epochs to train attack models")
parser.add_argument('--num_shadows',type=int,default=3,help="num_shadows")
args = parser.parse_args()
NUM_CLASSES = 10
WIDTH = 32
HEIGHT = 32
CHANNELS = 3
SHADOW_DATASET_SIZE = 4000
ATTACK_TEST_DATASET_SIZE = 4000
# log
class Logger(object):
def __init__(self, filename='default.log', stream=sys.stdout):
self.terminal = stream
self.log = open(filename, 'a')
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
def get_data():
"""Prepare CIFAR10 data."""
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.cifar10.load_data()
y_train = tf.keras.utils.to_categorical(y_train)
y_test = tf.keras.utils.to_categorical(y_test)
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
y_train = y_train.astype("float32")
y_test = y_test.astype("float32")
X_train /= 255
X_test /= 255
# train_size_num=20000
train_size_num = 5000
#把训练集缩小
X_train, tmp_x_test, y_train, tmp_y_test = train_test_split(X_train, y_train, test_size=(1-train_size_num/50000.0),random_state=1)
return (X_train, y_train), (X_test, y_test)
def target_model_fn():
"""The architecture of the target (victim) model.
The attack is white-box, hence the attacker is assumed to know this architecture too."""
model = tf.keras.models.Sequential()
model.add(
layers.Conv2D(
32,
(3, 3),
activation="relu",
padding="same",
input_shape=(WIDTH, HEIGHT, CHANNELS),
)
)
model.add(layers.Conv2D(32, (3, 3), activation="relu"))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(64, (3, 3), activation="relu", padding="same"))
model.add(layers.Conv2D(64, (3, 3), activation="relu"))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation="relu"))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(NUM_CLASSES, activation="softmax"))
model.compile("adam", loss="categorical_crossentropy", metrics=["accuracy"])
return model
def attack_model_fn():
"""Attack model that takes target model predictions and predicts membership.
Following the original paper, this attack model is specific to the class of the input.
AttachModelBundle creates multiple instances of this model for each class.
"""
model = tf.keras.models.Sequential()
model.add(layers.Dense(128, activation="relu", input_shape=(NUM_CLASSES,)))
model.add(layers.Dropout(0.3, noise_shape=None, seed=None))
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dropout(0.2, noise_shape=None, seed=None))
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dense(1, activation="sigmoid"))
model.compile("adam", loss="binary_crossentropy", metrics=["accuracy"])
return model
def train_target_model(model,X,Y,epochs=12,is_dp=False,dp_type="norm1",privacy_budget=1e6,clip_bound=0.1,sample_num=500,privacy_delta=1e-6,parallelnum=1):
batch_size = 32
x_train, x_test, y_train, y_test = train_test_split(X, Y,test_size=0.1,random_state=1)
print("train size: ",x_train.shape,", val size: ",x_test.shape)
if is_dp:
print("use dp,type=", dp_type)
dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(batch_size)
val_dataset=tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(batch_size)
optimizer = optimizers.Adam(learning_rate=0.0005) # 声明采用批量随机梯度下降方法,学习率=0.01
acc_meter = metrics.Accuracy()
val_acc_meter = metrics.Accuracy()
iteration=0
for epoch in range(1,epochs+1):
for step, (x, y) in enumerate(dataset): # 一次输入batch组数据进行训练
iteration+=1
with tf.GradientTape() as tape: # 构建梯度记录环境
out = model(x)
loss = tf.square(out - y)
loss = tf.reduce_sum(loss) / batch_size #定义均方差损失函数
grads = tape.gradient(loss, model.trainable_variables) # 计算网络中各个参数的梯度
#add noise
if is_dp:
if dp_type == "norm1":
tensor_size_all = 0
for grad in grads:
tensor_size_all += dp_utils.get_tensor_size(grad.shape.dims)
for i, grad in enumerate(grads):
grad = dp_utils.clip_func(clip_bound, dp_type, grad)
sensitivity = dp_utils.calculate_l1_sensitivity(clip_bound, tensor_size_all)
beta = dp_utils.gen_laplace_beta(batch_size, parallelnum, sensitivity, privacy_budget)
noise_tensor = tf.cast(tf.convert_to_tensor(dp_utils.laplace_function(beta, grad.shape.dims)),
dtype=tf.float32)
grads[i]+=noise_tensor
elif dp_type == "norm2":
for i, grad in enumerate(grads):
grad = dp_utils.clip_func(clip_bound, dp_type, grad)
sensitivity = dp_utils.calculate_l2_sensitivity(clip_bound)
sigma = dp_utils.gen_gaussian_sigma(batch_size, parallelnum, sensitivity, privacy_budget, privacy_delta)
noise_tensor=tf.random.normal(grad.shape.dims, stddev=sigma, dtype=tf.float32)
grads[i] += noise_tensor
elif dp_type == "sample_L1":
for i, grad in enumerate(grads):
tensor_size = dp_utils.get_tensor_size(grad.shape.dims)
sensitivity = dp_utils.calculate_l1_sensitivity_sample(grad, tensor_size, sample_num)
beta = dp_utils.gen_laplace_beta(batch_size, parallelnum, sensitivity, privacy_budget)
noise_tensor = tf.cast(tf.convert_to_tensor(dp_utils.laplace_function(beta, grad.shape.dims)),
dtype=tf.float32)
grads[i] += noise_tensor
elif dp_type == "sample_L2":
for i, grad in enumerate(grads):
tensor_size = dp_utils.get_tensor_size(grad.shape.dims)
sensitivity = dp_utils.calculate_l2_sensitivity_sample(grad, tensor_size, sample_num)
sigma = dp_utils.gen_gaussian_sigma(batch_size, parallelnum, sensitivity, privacy_budget, privacy_delta)
noise_tensor = tf.random.normal(grad.shape.dims, stddev=sigma, dtype=tf.float32)
grads[i] += noise_tensor
#add noise done
with tf.GradientTape() as tape: # 构建梯度记录环境
optimizer.apply_gradients(zip(grads, model.trainable_variables)) # 更新网络参数
acc_meter.update_state(tf.argmax(out, axis=1), tf.argmax(y, axis=1)) # 比较预测值与标签,并计算精确度
if iteration % 100 == 0: #
print('Epoch',epoch,'iteration', iteration, ': Loss is: ', float(loss), ' Train Accuracy: ', acc_meter.result().numpy())
acc_meter.reset_states()
#每一个epoch验证一次
for step, (x, y) in enumerate(val_dataset):
out = model(x)
prediction=tf.argmax(out, axis=1)
label=tf.argmax(y, axis=1)
val_acc_meter.update_state(prediction,label)
print('Epoch', epoch, 'iteration', iteration, ' Val Accuracy: ', val_acc_meter.result().numpy())
val_acc_meter.reset_states()
def demo():
(X_train, y_train), (X_test, y_test) = get_data()
# Train the target model.
print("Training the target model...")
target_model = target_model_fn()
print("target model")
print(target_model.summary())
# target_model训练
train_target_model(target_model,X_train,y_train,epochs=args.target_epochs,
is_dp=args.is_dp,dp_type=args.dp_type,privacy_budget=args.pb,clip_bound=args.clip_bound)
# target_model.fit(
# X_train, y_train, epochs=12, validation_split=0.1, verbose=True
# )
print("Training the target model... done!!!")
#-----------------------------------------------------------------------------------
# Train the shadow models.
smb = ShadowModelBundle(
target_model_fn,
shadow_dataset_size=SHADOW_DATASET_SIZE,
num_models=args.num_shadows,
)
#把测试集数据按照9:1分成影子模型的训练集和测试集
# We assume that attacker's data were not seen in target's training.
attacker_X_train, attacker_X_test, attacker_y_train, attacker_y_test = train_test_split(
X_test, y_test, test_size=0.1
)
print(attacker_X_train.shape, attacker_X_test.shape)
print("Training the shadow models...")
X_shadow, y_shadow = smb.fit_transform(
attacker_X_train,
attacker_y_train,
fit_kwargs=dict(
epochs=args.target_epochs,
verbose=True,
validation_data=(attacker_X_test, attacker_y_test),
),
)
# ShadowModelBundle returns data in the format suitable for the AttackModelBundle.
amb = AttackModelBundle(attack_model_fn, num_classes=NUM_CLASSES)
# Fit the attack models.
print("Training the attack models...")
amb.fit(
X_shadow, y_shadow, fit_kwargs=dict(epochs=args.attack_epochs, verbose=True)
)
# Test the success of the attack.
# Prepare examples that were in the training, and out of the training.
data_in = X_train[:ATTACK_TEST_DATASET_SIZE], y_train[:ATTACK_TEST_DATASET_SIZE]
data_out = X_test[:ATTACK_TEST_DATASET_SIZE], y_test[:ATTACK_TEST_DATASET_SIZE]
# Compile them into the expected format for the AttackModelBundle.
attack_test_data, real_membership_labels = prepare_attack_data(
target_model, data_in, data_out
)
# Compute the attack accuracy.
attack_guesses = amb.predict(attack_test_data)
attack_accuracy = np.mean(attack_guesses == real_membership_labels)
fpr, tpr, phi = roc_curve(real_membership_labels, attack_guesses, pos_label=1)
Adv_A = tpr - fpr
print("attack_accuracy=",attack_accuracy)
print("Privacy Leakage Metrics=",Adv_A)
if __name__ == "__main__":
log_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# 控制台输出log重定向
if args.is_log:
sys.stdout = Logger(args.log_path + '-' + log_time + '.txt', sys.stdout)
sys.stderr = Logger(args.log_path + '-' + log_time + '.txt', sys.stderr)
demo()
| 37.587859 | 154 | 0.650744 | 1,551 | 11,765 | 4.697614 | 0.203095 | 0.019764 | 0.034587 | 0.011529 | 0.396239 | 0.336948 | 0.28891 | 0.261735 | 0.220148 | 0.2056 | 0 | 0.020007 | 0.231024 | 11,765 | 312 | 155 | 37.708333 | 0.785343 | 0.109647 | 0 | 0.189055 | 0 | 0 | 0.072657 | 0.006247 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039801 | false | 0.004975 | 0.049751 | 0 | 0.109453 | 0.064677 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7a478879b2bf5e80d51bc7f8f5ab5aad9e192ef | 443 | py | Python | api/routes/__init__.py | bcnorwood/sigma | e55f45e74695c47a262769e8bd793c7283cbdaa8 | [
"MIT"
] | null | null | null | api/routes/__init__.py | bcnorwood/sigma | e55f45e74695c47a262769e8bd793c7283cbdaa8 | [
"MIT"
] | null | null | null | api/routes/__init__.py | bcnorwood/sigma | e55f45e74695c47a262769e8bd793c7283cbdaa8 | [
"MIT"
] | null | null | null | # import routes
from . import \
images_GET, \
image_GET, \
image_DELETE, \
upload
# set up a 2D hash mapping each route/method to the appropriate handler
_routes = {}
for route in (images_GET, image_GET, image_DELETE, upload):
endpoint = _routes.setdefault(route.path, {})
endpoint[route.method] = route.handler
# export convenience method to encapsulate routing logic
def match(method, route):
return _routes[route][method]
| 26.058824 | 71 | 0.735892 | 60 | 443 | 5.283333 | 0.566667 | 0.100946 | 0.088328 | 0.107256 | 0.214511 | 0.214511 | 0.214511 | 0 | 0 | 0 | 0 | 0.002717 | 0.1693 | 443 | 16 | 72 | 27.6875 | 0.858696 | 0.311512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0.090909 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7a61ac3a204d7b2052d09d0684223a7497d3329 | 3,280 | py | Python | scripts/setup-sbuild.py | eid-project/meta-eid | 01ca4ca925c20711683272cbd884916e6be8f7b7 | [
"MIT"
] | 9 | 2018-10-25T20:32:21.000Z | 2020-07-18T00:38:56.000Z | scripts/setup-sbuild.py | eid-project/meta-eid | 01ca4ca925c20711683272cbd884916e6be8f7b7 | [
"MIT"
] | 5 | 2018-10-25T21:22:48.000Z | 2020-07-20T16:17:28.000Z | scripts/setup-sbuild.py | eid-project/meta-eid | 01ca4ca925c20711683272cbd884916e6be8f7b7 | [
"MIT"
] | 5 | 2018-10-26T10:21:06.000Z | 2021-01-20T22:37:45.000Z | #!/usr/bin/env python3
import sys
import os
# Add bitbake/lib to syspath so we can import bb modules.
# Base on poky/scripts/lib/scriptpath.py
def add_bitbake_lib_path():
basepath = os.path.abspath(os.path.dirname(__file__) + '/../..')
bitbakepath = None
if os.path.exists(basepath + '/bitbake/lib/bb'):
bitbakepath = basepath + '/bitbake'
else:
# look for bitbake/bin dir in PATH
for pth in os.environ['PATH'].split(':'):
if os.path.exists(os.path.join(pth, '../lib/bb')):
bitbakepath = os.path.abspath(os.path.join(pth, '..'))
break
if bitbakepath:
sys.path.insert(0, bitbakepath + '/lib')
return bitbakepath
# tinfoil to get bitbake variables
def tinfoil_init():
import bb.tinfoil
tinfoil = bb.tinfoil.Tinfoil()
tinfoil.prepare(True)
return tinfoil
# Display error message and exit
def die(msg):
RED = '\033[91m'
BLD_RED = '\033[1;91m'
RST = '\033[0m'
msg = "".join([BLD_RED, 'ERROR', RST, RED, ': ', msg, RST])
sys.exit(msg)
def main():
if os.geteuid() != 0:
die("Please run this script as root.")
bitbake_lib_path = add_bitbake_lib_path()
if not bitbake_lib_path:
die('Bitbake lib path not found.')
import subprocess
tinfoil = tinfoil_init()
chroot_suffix = tinfoil.config_data.getVar('CHROOT_SUFFIX')
chroot_dir = tinfoil.config_data.getVar('CHROOT_DIR')
deb_build_arch = tinfoil.config_data.getVar('DEB_BUILD_ARCH')
debian_codename = tinfoil.config_data.getVar('DEBIAN_CODENAME')
debian_repo = tinfoil.config_data.getVar('DEBIAN_REPO')
# Create chroot
# TODO: define schroot name particular to each build directory
# to avoid creating duplicated schroot in one system,
# or use --chroot-mode=unshare?
cmd = ['sbuild-createchroot']
if deb_build_arch:
cmd.append('--arch=%s' % deb_build_arch)
if chroot_suffix:
cmd.append('--chroot-suffix=%s' % chroot_suffix)
cmd.append(debian_codename)
cmd.append(chroot_dir)
cmd.append(debian_repo)
err = subprocess.call(cmd)
if err != 0:
die('Failed to create chroot.')
# automatically put HTTP proxy setting for apt into the schroot
http_proxy = os.getenv('http_proxy') or os.getenv('HTTP_PROXY')
if http_proxy:
conf_file = os.path.join(chroot_dir,'etc/apt/apt.conf.d/proxy')
conf_content = 'Acquire::http::Proxy "%s";' % http_proxy
f = open(conf_file, 'w')
f.write(conf_content)
f.close()
# TODO: any other better places where
# the script doesn't need to care the permission?
apt_repo_dir = tinfoil.config_data.getVar('APT_REPO_DIR')
subprocess.call(['mkdir', '-p', apt_repo_dir])
subprocess.call(['chmod', '777', apt_repo_dir])
# Because this script is run as root, bitbake-cookerdaemon.log and
# ./tmp directory are created as root.
# Change their owner to same as the owner of build directory.
topdir = tinfoil.config_data.getVar('TOPDIR')
tmpdir = tinfoil.config_data.getVar('TMPDIR')
subprocess.call(['chown', '-R', '--reference=%s' % topdir,
'%s/bitbake-cookerdaemon.log' % topdir, tmpdir])
if __name__ == "__main__":
main()
| 32.156863 | 71 | 0.65061 | 449 | 3,280 | 4.5902 | 0.363029 | 0.02329 | 0.065987 | 0.089277 | 0.112082 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00859 | 0.219207 | 3,280 | 101 | 72 | 32.475248 | 0.796173 | 0.206707 | 0 | 0 | 0 | 0 | 0.169374 | 0.019722 | 0 | 0 | 0 | 0.009901 | 0 | 1 | 0.060606 | false | 0 | 0.060606 | 0 | 0.151515 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7a78d074b9ad3ef89d54d623bdfa32a9acf2ecb | 4,262 | py | Python | custom_components/tesy/water_heater.py | rudizl/TesyForHASS | f74632ed053cc16540906314701bffc3758675ea | [
"MIT"
] | null | null | null | custom_components/tesy/water_heater.py | rudizl/TesyForHASS | f74632ed053cc16540906314701bffc3758675ea | [
"MIT"
] | null | null | null | custom_components/tesy/water_heater.py | rudizl/TesyForHASS | f74632ed053cc16540906314701bffc3758675ea | [
"MIT"
] | null | null | null | """
Tesy platform for the climate component.
#For more details about this platform, please refer to the documentation
#https://home-assistant.io/components/tesy/
"""
import logging
from homeassistant.const import (
TEMP_CELSIUS,
PRECISION_WHOLE,
STATE_OFF,
STATE_ON,
TEMP_CELSIUS,
ATTR_TEMPERATURE
)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.components.water_heater import (WaterHeaterDevice, SUPPORT_OPERATION_MODE, SUPPORT_TARGET_TEMPERATURE)
from . import (TESY_DEVICES, TESY_CONFIG) #, get_device_from_hass)
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
STATES = {"OFF" : "off",
"READY" : "on",
"HEATING" : "heat"}
async def async_setup_entry(hass, _config_entry, async_add_entities):
"""Set up Tesy sensor dynamically."""
async def async_discover_sensor(dev, instance):
"""Discover and add a discovered sensor."""
async_add_entities([TesyWaterHeater(dev, instance)])
async_dispatcher_connect(
hass,
"tesy_new_water_heater",
async_discover_sensor
)
# async def async_setup_platform(hass, _config, async_add_entities,
# discovery_info=None):
# """Setup the Tesy Sensor platform."""
# dev = get_device_from_hass(hass, discovery_info)
# async_add_entities([TesyWaterHeater(dev, hass)])
class TesyWaterHeater(WaterHeaterDevice):
"""Representation of a Shelly Sensor."""
def __init__(self, dev, instance):
"""Initialize an ShellySwitch."""
self._unique_id = "tesy_" + dev.id
self.entity_id = "water_heater.tesy_" + dev.id
self._config = instance.conf
self._dev = dev
self._instance = instance
dev.on_updated.append(self._updated)
self._state = None
def _updated(self, _dev):
self.schedule_update_ha_state(True)
@property
def state(self):
"""Return the state of the sensor."""
return STATES.get( self._dev.state, "unknown" )
@property
def precision(self):
"""Return the precision of the system."""
return PRECISION_WHOLE
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_OPERATION_MODE | SUPPORT_TARGET_TEMPERATURE
@property
def current_temperature(self):
"""Return the sensor temperature."""
return self._dev.temp
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._dev.target_temp
@property
def min_temp(self):
"""Return the minimum temperature."""
return 15
@property
def max_temp(self):
"""Return the maximum temperature."""
return 75
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
self._dev.set_temp(temperature)
#await self._async_control_heating(force=True)
#await self.async_update_ha_state()
def set_operation_mode(self, operation_mode):
if operation_mode == "on":
self._dev.turn_on()
else:
self._dev.turn_off()
@property
def current_operation(self):
"""Return current operation ie. eco, electric, performance, ..."""
if self._dev.state == "OFF":
return "off"
return "on"
@property
def operation_list(self):
"""Return the list of available operation modes."""
return ["on", "off"]
@property
def device_info(self):
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
return {
'identifiers': {
(DOMAIN, self._dev.id)
},
'name': self._dev.id,
'manufacturer': 'Tesy',
'model': "Heater",
'sw_version': "0.1"
}
| 30.014085 | 122 | 0.605584 | 456 | 4,262 | 5.407895 | 0.300439 | 0.034063 | 0.047445 | 0.021898 | 0.07867 | 0.035685 | 0 | 0 | 0 | 0 | 0 | 0.001965 | 0.283435 | 4,262 | 141 | 123 | 30.22695 | 0.805501 | 0.228062 | 0 | 0.144444 | 0 | 0 | 0.06383 | 0.021952 | 0 | 0 | 0 | 0 | 0 | 1 | 0.155556 | false | 0 | 0.066667 | 0 | 0.377778 | 0.011111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7a9b9bbd56e17bfc8234408259edd5a46dc5fdc | 3,763 | py | Python | cdips/utils/collect_lightcurve_set.py | lgbouma/cdips | 187e15e620cd44160372dbfa9da989d38722c3e5 | [
"MIT"
] | 1 | 2019-10-04T02:03:25.000Z | 2019-10-04T02:03:25.000Z | cdips/utils/collect_lightcurve_set.py | lgbouma/cdips | 187e15e620cd44160372dbfa9da989d38722c3e5 | [
"MIT"
] | 3 | 2019-08-17T20:33:23.000Z | 2021-08-18T17:55:10.000Z | cdips/utils/collect_lightcurve_set.py | lgbouma/cdips | 187e15e620cd44160372dbfa9da989d38722c3e5 | [
"MIT"
] | null | null | null | """
collects lightcurves from a given projid into "center" and "corner"
directories.
"""
import numpy as np, pandas as pd, matplotlib.pyplot as plt
from glob import glob
import os
from shutil import copyfile
from astropy.io import fits
def collect_lightcurve_set(lcdir=None, projidstr='projid1030_cam3_ccd3'):
"""
make file with x,y,Teff, and lc paths, for selection.
"""
lcpaths = glob(os.path.join(lcdir,'*_llc.fits'))
_paths, _xcc, _ycc, _teff = [], [], [], []
for ix, lcpath in enumerate(lcpaths):
print('{}/{}'.format(ix, len(lcpaths)))
hdulist = fits.open(lcpath)
hdr = hdulist[0].header
_xcc.append(hdr['XCC'])
_ycc.append(hdr['YCC'])
_paths.append(lcpath)
_teff.append(hdr['teff_val'])
df = pd.DataFrame({'xcc':_xcc, 'ycc':_ycc, 'paths':_paths, 'teff':_teff})
outpath = '../data/{}_lcinfo.csv'.format(projidstr)
df.to_csv(outpath, index=False)
print('saved {}'.format(outpath))
def copy_lightcurves_mkdirs(lcinfopath, projidstr=None, cam=None, ccd=None):
df = pd.read_csv(lcinfopath)
if cam==3 and ccd==3:
sel_corner = (
(df['xcc'] > 250) & (df['xcc'] < 350)
&
(df['ycc'] > 250) & (df['ycc'] < 350)
)
sel_center = (
(df['xcc'] > 1650) & (df['xcc'] < 1750)
&
(df['ycc'] > 1650) & (df['ycc'] < 1750)
)
elif cam==2 and ccd==2:
# NOTE: the x,y orientations must have a better description scheme :(
sel_corner = (
(df['xcc'] > 1650) & (df['xcc'] < 1750)
&
(df['ycc'] > 250) & (df['ycc'] < 350)
)
sel_center = (
(df['xcc'] > 250) & (df['xcc'] < 350)
&
(df['ycc'] > 1650) & (df['ycc'] < 1750)
)
f,ax = plt.subplots()
ax.scatter(df['xcc'],df['ycc'],rasterized=True,s=3)
ax.set_xlabel('xcc')
ax.set_ylabel('ycc')
figpath = '../results/sanity_checks/lc_positions_{}.png'.format(projidstr)
f.savefig(figpath)
print('saved {}'.format(figpath))
##########################################
print('copying {} lcs from center'.format(len(df[sel_center])))
print('copying {} lcs from corner'.format(len(df[sel_corner])))
for inpath in df.loc[sel_center, 'paths']:
indir = os.path.dirname(inpath)
inname = os.path.basename(inpath)
outdir = '../results/{}_lcs'.format(projidstr)
if not os.path.exists(outdir):
os.mkdir(outdir)
outdir = '../results/{}_lcs/center_lcs'.format(projidstr)
if not os.path.exists(outdir):
os.mkdir(outdir)
outpath = os.path.join(outdir, inname)
copyfile(inpath, outpath)
print('{} -> {}'.format(inpath, outpath))
for inpath in df.loc[sel_corner, 'paths']:
indir = os.path.dirname(inpath)
inname = os.path.basename(inpath)
outdir = '../results/{}_lcs/corner_lcs'.format(projidstr)
if not os.path.exists(outdir):
os.mkdir(outdir)
outpath = os.path.join(outdir, inname)
copyfile(inpath, outpath)
print('{} -> {}'.format(inpath, outpath))
if __name__=="__main__":
####################
# change these numbers
projid = 1088
cam = 2
ccd = 2
projidstr = 'projid{}_cam{}_ccd{}'.format(projid,cam,ccd)
####################
lcinfopath = '../data/{}_lcinfo.csv'.format(projidstr)
if not os.path.exists(lcinfopath):
lcdir = ('/home/luke/local/tess-trex/lightcurves/projid{}'.
format(projid))
collect_lightcurve_set(lcdir=lcdir, projidstr=projidstr)
copy_lightcurves_mkdirs(lcinfopath, projidstr=projidstr, cam=cam, ccd=ccd)
| 29.865079 | 78 | 0.559394 | 452 | 3,763 | 4.533186 | 0.307522 | 0.032211 | 0.033187 | 0.039043 | 0.387506 | 0.328453 | 0.297218 | 0.281601 | 0.248414 | 0.248414 | 0 | 0.026363 | 0.254053 | 3,763 | 125 | 79 | 30.104 | 0.703598 | 0.059527 | 0 | 0.329412 | 0 | 0 | 0.131663 | 0.055054 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023529 | false | 0 | 0.058824 | 0 | 0.082353 | 0.082353 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7abf8fb6cd959b6443500ca3f28387c61a7575e | 387 | py | Python | support/issue13-get_current_user/test.py | aukaio/NoseGAE | 57d040a1dac4d6792cb5809696362c7c07c681fd | [
"BSD-2-Clause"
] | 2 | 2015-10-16T02:17:20.000Z | 2016-01-10T21:42:11.000Z | examples/issue13-get_current_user/test.py | gregorynicholas/nose-gae | 2102cc337060b4bf475a253c9a9b03d111c3ae59 | [
"BSD-2-Clause"
] | null | null | null | examples/issue13-get_current_user/test.py | gregorynicholas/nose-gae | 2102cc337060b4bf475a253c9a9b03d111c3ae59 | [
"BSD-2-Clause"
] | null | null | null | from webtest import TestApp
import unittest
from helloworld import app
app = TestApp(app)
class TestUserService(unittest.TestCase):
nosegae_user = True
nosegae_user_kwargs = dict(USER_EMAIL='nosegae@example.org')
def test_index(self):
# this will call get_current_user()
response = app.get('/')
self.assertIn('nosegae@example.org', response.body)
| 24.1875 | 64 | 0.713178 | 49 | 387 | 5.489796 | 0.612245 | 0.081784 | 0.126394 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.18863 | 387 | 15 | 65 | 25.8 | 0.856688 | 0.085271 | 0 | 0 | 0 | 0 | 0.110795 | 0 | 0 | 0 | 0 | 0 | 0.1 | 1 | 0.1 | false | 0 | 0.3 | 0 | 0.7 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7ad904980d1aa504ad70c8db7c6744bf16deaa1 | 1,565 | py | Python | src/main.py | Siddhant-K-code/Coronavirus-Outbreak-Notification-Alert- | 68e344eddce059a39ba080a78c423f82c4c1289f | [
"MIT"
] | 2 | 2020-03-26T00:48:36.000Z | 2020-12-12T14:15:12.000Z | src/main.py | Siddhant-K-code/Coronavirus-Outbreak-Notification-Alert | 68e344eddce059a39ba080a78c423f82c4c1289f | [
"MIT"
] | null | null | null | src/main.py | Siddhant-K-code/Coronavirus-Outbreak-Notification-Alert | 68e344eddce059a39ba080a78c423f82c4c1289f | [
"MIT"
] | null | null | null | from plyer import notification
import requests
from bs4 import BeautifulSoup
import time
def notifyMe(title, message):
notification.notify(
title = title,
message = message,
app_icon = "C:\Coronavirus Outbreak Notification\Icon.ico", # app icon = "<path>",
timeout = 3
)
def getData(url):
r = requests.get(url)
return r.text
if __name__ == "__main__":
while True:
# notifyMe("Siddhant", " Let's Stop the spread of the virus together")
myHtmlData = getData('https://www.mohfw.gov.in/') # Official website of Ministry of Health and Family Welfare of Gov. of India
soup = BeautifulSoup(myHtmlData, 'html.parser')
# print(soup.prettify())
myDataStr = ""
for tr in soup.find_all('tbody')[7].find_all('tr'):
myDataStr += tr.get_text()
myDataStr = myDataStr[1:]
itemList = myDataStr.split("\n\n")
states = ['Madhya Pradesh'] # You can add more states , and the state which you want
for item in itemList [0:25]:
dataList = item.split('\n')
if dataList[1] in states:
print(dataList)
nTitle = 'Cases of COVID-19'
nText = f"State : {dataList[1]}\nIndian : {dataList[2]} & Foreign : {dataList[3]}\nCured : {dataList[4]}\nDeaths : {dataList[5]} "
notifyMe(nTitle, nText)
time.sleep(2)
time.sleep(3600) # it will give alert in regular period , i take it as 3600 Seconds , i.e. 1 Hour
| 35.568182 | 146 | 0.584026 | 192 | 1,565 | 4.697917 | 0.588542 | 0.026608 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022789 | 0.299042 | 1,565 | 43 | 147 | 36.395349 | 0.799453 | 0.205112 | 0 | 0 | 0 | 0.030303 | 0.204049 | 0.051012 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0 | 0.121212 | 0 | 0.212121 | 0.030303 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7ae733ee5bcfec4bb8da8328d76ffdd17374cc5 | 1,219 | py | Python | tests/test_clean_all_strcol.py | jaimiles23/pywrangle | 67f44e56f2b87758cd033156af83e83b2ac6e185 | [
"MIT"
] | 1 | 2020-08-16T01:16:57.000Z | 2020-08-16T01:16:57.000Z | tests/test_clean_all_strcol.py | jaimiles23/pywrangle | 67f44e56f2b87758cd033156af83e83b2ac6e185 | [
"MIT"
] | null | null | null | tests/test_clean_all_strcol.py | jaimiles23/pywrangle | 67f44e56f2b87758cd033156af83e83b2ac6e185 | [
"MIT"
] | 2 | 2020-08-29T19:16:18.000Z | 2021-04-06T23:19:03.000Z | """
Runs tests for clean_strcol() function in str_cleaning dir.
"""
##########
# Imports
##########
import pandas as pd
import numpy as numpy
try:
from context import (
pywrangle as pw,
create_df
)
except ModuleNotFoundError:
from .context import (
pywrangle as pw,
create_df
)
##########
# Tests
##########
def test_clean_all_strcols():
"""Tests output for clean_str_col against the 'animals' column
"""
df1, df2 = (create_df.create_str_df1() for _ in range(2))
for col in df1.columns:
df1[col] = df1[col].str.lower()
df2 = pw.clean_all_strcols(df2)
assert df1.equals(df2)
return
def test_clean_nonstrcols():
df1, df2 = (create_df.create_int_df_size(10, 10) for _ in range(2))
df2 = pw.clean_all_strcols(df2)
assert df1.equals(df2)
def test_clean_mixedcols():
"""Test that doesn't break when running in tests. Must manually check output."""
df = create_df.create_mixed_df_size(10, 10)
df = pw.clean_all_strcols(df, trim = False)
##########
# Main
##########
if __name__ == "__main__":
test_clean_all_strcols()
test_clean_nonstrcols()
test_clean_mixedcols() | 18.753846 | 84 | 0.61936 | 163 | 1,219 | 4.343558 | 0.392638 | 0.076271 | 0.105932 | 0.072034 | 0.279661 | 0.223164 | 0.223164 | 0.223164 | 0.115819 | 0.115819 | 0 | 0.028017 | 0.23872 | 1,219 | 65 | 85 | 18.753846 | 0.734914 | 0.179655 | 0 | 0.266667 | 0 | 0 | 0.008686 | 0 | 0 | 0 | 0 | 0 | 0.066667 | 1 | 0.1 | false | 0 | 0.133333 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7b160a5bd1986ec0f09307338c707b627abb227 | 4,923 | py | Python | nilmtk/tests/test_metergroup.py | emilholmegaard/nilmtk | d2a06dd77a6cdf9f3b4d28825d1a0ea84db1bb19 | [
"Apache-2.0"
] | null | null | null | nilmtk/tests/test_metergroup.py | emilholmegaard/nilmtk | d2a06dd77a6cdf9f3b4d28825d1a0ea84db1bb19 | [
"Apache-2.0"
] | null | null | null | nilmtk/tests/test_metergroup.py | emilholmegaard/nilmtk | d2a06dd77a6cdf9f3b4d28825d1a0ea84db1bb19 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
from __future__ import print_function, division
import unittest
from os.path import join
from nilmtk.tests.testingtools import data_dir
from nilmtk import (Appliance, MeterGroup, ElecMeter, HDFDataStore,
global_meter_group, TimeFrame, DataSet)
from nilmtk.utils import tree_root, nodes_adjacent_to_root
from nilmtk.elecmeter import ElecMeterID
from nilmtk.building import BuildingID
class TestMeterGroup(unittest.TestCase):
@classmethod
def setUpClass(cls):
filename = join(data_dir(), 'energy.h5')
cls.datastore = HDFDataStore(filename)
ElecMeter.load_meter_devices(cls.datastore)
def test_getitem(self):
fridge_meter = ElecMeter()
fridge = Appliance({'type':'fridge', 'instance':1})
fridge_meter.appliances = [fridge]
mg = MeterGroup([fridge_meter])
# test good keys
for key in ['fridge', ('fridge', 1), {'type':'fridge'},
{'type':'fridge', 'instance': 1}]:
self.assertEqual(mg[key], fridge_meter)
# test bad key values
for key in ['foo', ('foo', 2), ('fridge', 2),
{'type':'fridge', 'instance': -12}]:
with self.assertRaises(KeyError):
mg[key]
# test bad key types
for key in [True, False, ['fridge']]:
with self.assertRaises(TypeError):
mg[key]
def test_select(self):
fridge_meter = ElecMeter()
fridge = Appliance({'type':'fridge', 'instance':1})
fridge_meter.appliances = [fridge]
mg = MeterGroup([fridge_meter])
self.assertEqual(mg.select_using_appliances(category='cold'), mg)
# TODO: make this test more rigorous!
def test_wiring_graph(self):
meter1 = ElecMeter(metadata={'site_meter': True},
meter_id=ElecMeterID(1,1,'REDD'))
meter2 = ElecMeter(metadata={'submeter_of': 1},
meter_id=ElecMeterID(2,1,'REDD'))
meter3 = ElecMeter(metadata={'submeter_of': 2},
meter_id=ElecMeterID(3,1,'REDD'))
mg = MeterGroup([meter1, meter2, meter3])
wiring_graph = mg.wiring_graph()
self.assertIs(mg.mains(), meter1)
self.assertEqual(mg.meters_directly_downstream_of_mains(), [meter2])
self.assertEqual(wiring_graph.nodes(), [meter2, meter3, meter1])
def test_proportion_of_energy_submetered(self):
meters = []
for i in [1,2,3]:
meter_meta = self.datastore.load_metadata('building1')['elec_meters'][i]
meter_id = ElecMeterID(i, 1, 'REDD')
meter = ElecMeter(self.datastore, meter_meta, meter_id)
meters.append(meter)
mains = meters[0]
mg = MeterGroup(meters)
self.assertEqual(mg.proportion_of_energy_submetered(), 1.0)
def test_dual_supply(self):
elec_meters = {1: {'data_location': '/building1/elec/meter1',
'device_model': 'Energy Meter'},
2: {'data_location': '/building1/elec/meter1',
'device_model': 'Energy Meter'},
3: {'data_location': '/building1/elec/meter1',
'device_model': 'Energy Meter'}}
appliances = [{'type': 'washer dryer', 'instance': 1, 'meters': [1,2]},
{'type': 'fridge', 'instance': 1, 'meters': [3]}]
mg = MeterGroup()
mg.load(self.datastore, elec_meters, appliances, BuildingID(1, 'REDD'))
self.assertEqual(mg['washer dryer'].total_energy()['active'],
mg['fridge'].total_energy()['active'] * 2)
self.assertIsInstance(mg['washer dryer'], MeterGroup)
self.assertIsInstance(mg['fridge'], ElecMeter)
def test_from_list(self):
meters = []
for i in range(1,6):
meters.append(ElecMeter(meter_id=ElecMeterID(i, 1, None)))
mg = global_meter_group.from_list([
ElecMeterID(1,1,None),
(ElecMeterID(2,1,None),
(ElecMeterID(3,1,None), ElecMeterID(4,1,None), ElecMeterID(5,1,None)))
])
"""
Commented for the time being
self.assertIs(mg.meters[0], meters[0])
self.assertIs(mg.meters[1].meters[0], meters[1])
self.assertEqual(len(mg.meters[1].meters[1].meters), 3)
self.assertEqual(len(mg.meters), 2)
"""
def test_full_results_with_no_sections_raises_runtime_error(self):
mg = MeterGroup([ElecMeter(), ElecMeter()])
with self.assertRaises(RuntimeError):
mg.dropout_rate(full_results=True)
def test_total_energy(self):
filename = join(data_dir(), 'random.h5')
ds = DataSet(filename)
ds.buildings[1].elec.total_energy()
if __name__ == '__main__':
unittest.main()
| 39.701613 | 84 | 0.588056 | 547 | 4,923 | 5.120658 | 0.268739 | 0.019993 | 0.032131 | 0.027133 | 0.178151 | 0.133881 | 0.133881 | 0.133881 | 0.133881 | 0.077115 | 0 | 0.021366 | 0.277473 | 4,923 | 123 | 85 | 40.02439 | 0.766095 | 0.021532 | 0 | 0.152174 | 0 | 0 | 0.108399 | 0.014512 | 0 | 0 | 0 | 0.00813 | 0.130435 | 1 | 0.097826 | false | 0 | 0.086957 | 0 | 0.195652 | 0.01087 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7b1b3b5f3f83567554f42bd6536a65a70a9d99d | 2,324 | py | Python | youtube_dl/extractor/spike.py | pierrephilip31/download | 7a6c204fcb6ba5a1a5149ea7a3c186eab87fc7e4 | [
"Unlicense"
] | 24 | 2017-03-17T10:27:12.000Z | 2022-02-16T05:55:50.000Z | youtube_dl/extractor/spike.py | travis-south/youtube-dl | dc89f968330fe9b2f0e56b07febc8cd57005f2c0 | [
"Unlicense"
] | 7 | 2017-07-26T08:15:27.000Z | 2018-09-20T12:56:53.000Z | youtube_dl/extractor/spike.py | travis-south/youtube-dl | dc89f968330fe9b2f0e56b07febc8cd57005f2c0 | [
"Unlicense"
] | 3 | 2017-03-17T10:27:13.000Z | 2019-01-28T01:19:17.000Z | from __future__ import unicode_literals
import re
from .mtv import MTVServicesInfoExtractor
class SpikeIE(MTVServicesInfoExtractor):
_VALID_URL = r'https?://(?:[^/]+\.)?spike\.com/[^/]+/[\da-z]{6}(?:[/?#&]|$)'
_TESTS = [{
'url': 'http://www.spike.com/video-clips/lhtu8m/auction-hunters-can-allen-ride-a-hundred-year-old-motorcycle',
'md5': '1a9265f32b0c375793d6c4ce45255256',
'info_dict': {
'id': 'b9c8221a-4e50-479a-b86d-3333323e38ba',
'ext': 'mp4',
'title': 'Auction Hunters|December 27, 2013|4|414|Can Allen Ride A Hundred Year-Old Motorcycle?',
'description': 'md5:fbed7e82ed5fad493615b3094a9499cb',
'timestamp': 1388120400,
'upload_date': '20131227',
},
}, {
'url': 'http://www.spike.com/full-episodes/j830qm/lip-sync-battle-joel-mchale-vs-jim-rash-season-2-ep-209',
'md5': 'b25c6f16418aefb9ad5a6cae2559321f',
'info_dict': {
'id': '37ace3a8-1df6-48be-85b8-38df8229e241',
'ext': 'mp4',
'title': 'Lip Sync Battle|April 28, 2016|2|209|Joel McHale Vs. Jim Rash|Act 1',
'description': 'md5:a739ca8f978a7802f67f8016d27ce114',
},
}, {
'url': 'http://www.spike.com/video-clips/lhtu8m/',
'only_matching': True,
}, {
'url': 'http://www.spike.com/video-clips/lhtu8m',
'only_matching': True,
}, {
'url': 'http://bellator.spike.com/fight/atwr7k/bellator-158-michael-page-vs-evangelista-cyborg',
'only_matching': True,
}, {
'url': 'http://bellator.spike.com/video-clips/bw6k7n/bellator-158-foundations-michael-venom-page',
'only_matching': True,
}]
_FEED_URL = 'http://www.spike.com/feeds/mrss/'
_MOBILE_TEMPLATE = 'http://m.spike.com/videos/video.rbml?id=%s'
_CUSTOM_URL_REGEX = re.compile(r'spikenetworkapp://([^/]+/[-a-fA-F0-9]+)')
_GEO_COUNTRIES = ['US']
def _extract_mgid(self, webpage):
mgid = super(SpikeIE, self)._extract_mgid(webpage)
if mgid is None:
url_parts = self._search_regex(self._CUSTOM_URL_REGEX, webpage, 'episode_id')
video_type, episode_id = url_parts.split('/', 1)
mgid = 'mgid:arc:{0}:spike.com:{1}'.format(video_type, episode_id)
return mgid
| 41.5 | 118 | 0.607143 | 268 | 2,324 | 5.115672 | 0.514925 | 0.058352 | 0.03647 | 0.054705 | 0.250912 | 0.196937 | 0.196937 | 0.196937 | 0.078045 | 0.078045 | 0 | 0.106476 | 0.216007 | 2,324 | 55 | 119 | 42.254545 | 0.645993 | 0 | 0 | 0.265306 | 0 | 0.122449 | 0.512909 | 0.143287 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020408 | false | 0 | 0.061224 | 0 | 0.244898 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7b4317f2526e8a8eead02672c5f1a78a6925d20 | 1,294 | py | Python | Lesson 03/Problems/Problem03.py | NoelKocheril/Python101 | b0e923e1ec3e936babbd57a310ec72b13e07ac57 | [
"WTFPL"
] | null | null | null | Lesson 03/Problems/Problem03.py | NoelKocheril/Python101 | b0e923e1ec3e936babbd57a310ec72b13e07ac57 | [
"WTFPL"
] | null | null | null | Lesson 03/Problems/Problem03.py | NoelKocheril/Python101 | b0e923e1ec3e936babbd57a310ec72b13e07ac57 | [
"WTFPL"
] | null | null | null | import unittest
# Given the following dictionary of employees, print out the data in the following format:
# Employee: {name}, Age: {age}, and Salary: {salary}
def employeePrinter(employeeDict: dict[dict]) -> str:
return ""
class employeePrinterTest(unittest.TestCase):
def test_01(self):
sample_dict = {
"emp1": {"name": "Steve", "salary": 7500, "age": 32},
"emp2": {"name": "Noel", "salary": 6500, "age": 25},
"emp3": {"name": "Arjun", "salary": 8000, "age": 40},
"emp4": {"name": "Vithusan", "salary": 500, "age": 18},
}
self.assertEqual(
employeePrinter(sample_dict),
"Employee: Steve, Age: 32, and Salary: 7500\nEmployee: Noel, Age: 25, and Salary: 6500\nEmployee: Arjun, Age: 40, and Salary: 8000\nEmployee: Vithusan, Age: 18, and Salary: 500\n",
)
def test_02(self):
sample_dict = {
"emp1": {"name": "Steve", "salary": 7500, "age": 32},
"emp2": {"name": "Noel", "salary": 6500, "age": 25},
}
self.assertEqual(
employeePrinter(sample_dict),
"Employee: Steve, Age: 32, and Salary: 7500\nEmployee: Noel, Age: 25, and Salary: 6500\n",
)
if __name__ == "__main__":
unittest.main()
| 32.35 | 192 | 0.563369 | 147 | 1,294 | 4.863946 | 0.360544 | 0.088112 | 0.039161 | 0.05035 | 0.478322 | 0.478322 | 0.478322 | 0.478322 | 0.478322 | 0.478322 | 0 | 0.084567 | 0.268934 | 1,294 | 39 | 193 | 33.179487 | 0.671247 | 0.107419 | 0 | 0.384615 | 0 | 0.076923 | 0.351563 | 0 | 0 | 0 | 0 | 0 | 0.076923 | 1 | 0.115385 | false | 0 | 0.038462 | 0.038462 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7b7473a5de5a3bb432f9368bca02e435d6dabd6 | 3,776 | py | Python | pdnn/helpers/demo_visualize_k_effects.py | petered/pdnn | 83ae177372c1bea1bc10ec9ce30487f73008bf99 | [
"BSD-2-Clause-FreeBSD"
] | 17 | 2017-06-14T16:36:12.000Z | 2021-01-31T18:16:10.000Z | pdnn/helpers/demo_visualize_k_effects.py | petered/pdnn | 83ae177372c1bea1bc10ec9ce30487f73008bf99 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2018-02-26T16:04:48.000Z | 2018-03-01T06:42:57.000Z | pdnn/helpers/demo_visualize_k_effects.py | petered/pdnn | 83ae177372c1bea1bc10ec9ce30487f73008bf99 | [
"BSD-2-Clause-FreeBSD"
] | 5 | 2017-09-12T13:20:02.000Z | 2019-02-06T08:41:58.000Z | from artemis.general.mymath import cosine_distance
from pdnn.helpers.pid_encoder_decoder import lowpass_random, pid_encode, pid_decode, Herder
from matplotlib import pyplot as plt
import numpy as np
def demo_visualize_k_effects(
kps = [0., 0.01, .1, 2.],
kds = [0, 1., 4.],
cutoff=0.005,
n_samples=550,
s_as_triangles = False,
seed=1234
):
x = lowpass_random(n_samples = n_samples, cutoff=cutoff, rng=seed, normalize=True)
plt.figure(figsize=(10, 6))
plt.subplots_adjust(wspace=0.01, hspace=0.01, left=0.08, right=.98, top=.92)
ax=plt.subplot2grid((len(kps), len(kds)), (0, 0))
for i, kp in enumerate(kps):
for j, kd in enumerate(kds):
xe = pid_encode(x, kp=kp, kd=kd)
h = Herder()
xc = [h(xet) for xet in xe]
xd = pid_decode(xc, kp=kp, kd=kd)
this_ax = plt.subplot2grid((len(kps), len(kds)), (len(kps)-i-1, j), sharex=ax, sharey=ax)
plt.plot(xd, color='C1', label='$\hat x_t$')
# plt.text(0, -0.1, '$Sc(x,\hat x)={:.2g},|s|={}$'.format(cosine_distance(x, xd), np.sum(np.abs(xc))))
# plt.text(0.01, .99, '$Sc(x,\hat x)={:.3g},|s|={}$'.format(cosine_distance(x, xd), int(np.sum(np.abs(xc)))),
# ha='left', va='top', transform=this_ax.transAxes, bbox=dict(boxstyle='square', facecolor='w', alpha=0.7, pad=0))
# plt.text(0.01, .99, '$|x-\hat x|^2={:.2g},|s|={}$'.format(np.sqrt(((x-xd)**2).mean()), int(np.sum(np.abs(xc)))),
# ha='left', va='top', transform=this_ax.transAxes, bbox=dict(boxstyle='square', facecolor='w', alpha=0.8, pad=0))
# plt.text(0.01, .01, '$\left<|x_t-\hat x_t|\\right>_t={:.2g}, \Sigma_t|s_t|={}$'.format(np.abs(x-xd).mean(), int(np.sum(np.abs(xc)))),
# ha='left', va='bottom', transform=this_ax.transAxes, bbox=dict(boxstyle='square', facecolor='w', edgecolor='none', alpha=0.8, pad=0.0))
plt.text(.01, .01, '$\left<|x_t-\hat x_t|\\right>_t={:.2g}, \;\;\; N={}$'.format(np.abs(x-xd).mean(), int(np.sum(np.abs(xc)))),
ha='left', va='bottom', transform=this_ax.transAxes, bbox=dict(boxstyle='square', facecolor='w', edgecolor='none', alpha=0.8, pad=0.0))
# plt.text(0.5, 0.5,'matplotlib',
# horizontalalignment='center',
# verticalalignment='center',
# transform = ax.transAxes)
# plt.plot(xe, color='C4', label='$a_t$')
if s_as_triangles:
up_spikes = np.nonzero(xc>0)[0]
down_spikes = np.nonzero(xc<0)[0]
plt.plot(up_spikes, np.zeros(up_spikes.shape), '^', color='k', label='$s_t^+$')
plt.plot(down_spikes, np.zeros(down_spikes.shape), 'v', color='r', label='$s_t^-$')
else:
plt.plot(xc, color='k', label='$s_t$')
plt.plot(x, color='C0', label='$x_t$')
plt.grid()
if i>0:
plt.tick_params('x', labelbottom='off')
else:
plt.xlabel('$k_d={}$'.format(kd))
if j>0:
plt.tick_params('y', labelleft='off')
else:
plt.ylabel('$k_p={}$'.format(kp))
ax.set_xlim(0, n_samples)
ax.set_ylim(np.min(x)*1.1, np.max(x)*1.1)
handles, labels = plt.gca().get_legend_handles_labels()
# plt.legend(handles[::-1], labels[::-1],bbox_to_anchor=(1, 1), bbox_transform=plt.gcf().transFigure, ncol=len(handles[::-1]))
plt.legend(handles[::-1], labels[::-1],bbox_to_anchor=(1, 1), bbox_transform=plt.gcf().transFigure, ncol=len(handles[::-1]), loc='upper right')
plt.show()
if __name__ == '__main__':
demo_visualize_k_effects()
| 49.684211 | 158 | 0.54714 | 568 | 3,776 | 3.507042 | 0.288732 | 0.007028 | 0.02008 | 0.0251 | 0.451305 | 0.437751 | 0.381526 | 0.332329 | 0.332329 | 0.332329 | 0 | 0.039024 | 0.239936 | 3,776 | 75 | 159 | 50.346667 | 0.655052 | 0.302172 | 0 | 0.06 | 0 | 0 | 0.061092 | 0.0084 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02 | false | 0.04 | 0.08 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |