hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
84db537f5dfccd0dd8727dd1268abb3d016e064b | 2,105 | py | Python | examples/client-temp.py | keiichishima/echonetlite | b8f1cfcf57bae75b76262a7a13b9bb27f36fc1dc | [
"BSD-2-Clause"
] | 5 | 2018-01-15T08:18:12.000Z | 2021-05-27T02:53:28.000Z | examples/client-temp.py | keiichishima/echonetlite | b8f1cfcf57bae75b76262a7a13b9bb27f36fc1dc | [
"BSD-2-Clause"
] | 1 | 2019-02-20T09:26:22.000Z | 2019-04-02T01:00:21.000Z | examples/client-temp.py | keiichishima/echonetlite | b8f1cfcf57bae75b76262a7a13b9bb27f36fc1dc | [
"BSD-2-Clause"
] | 2 | 2017-04-05T04:04:27.000Z | 2018-01-22T14:22:22.000Z | import argparse
import struct
from echonetlite.interfaces import monitor
from echonetlite import middleware
from echonetlite.protocol import *
class Temperature(middleware.RemoteDevice):
def __init__(self, eoj, node_id):
super(Temperature, self).__init__(eoj=eoj)
self._node_id = node_id
monitor.schedule_loopingcall(
10,
self._request_temperature,
from_device=controller,
to_eoj=self.eoj,
to_node_id=self._node_id)
self.add_listener(EPC_TEMPERATURE,
self._on_did_receive_temperature)
def _request_temperature(self, from_device, to_eoj, to_node_id):
from_device.send(esv=ESV_CODE['GET'],
props=[Property(epc=EPC_TEMPERATURE),],
to_eoj=to_eoj,
to_node_id=to_node_id)
def _on_did_receive_temperature(self, from_node_id, from_eoj,
to_device, esv, prop):
if esv not in ESV_RESPONSE_CODES:
return
(val,) = struct.unpack('!h', bytearray(prop.edt))
print('Temperature is', val / 10)
class MyProfile(middleware.NodeProfile):
def __init__(self, eoj=None):
super(MyProfile, self).__init__(eoj=eoj)
# profile.property[EPC_MANUFACTURE_CODE] = ...
# profile.property[EPC_IDENTIFICATION_NUMBER] = ...
def on_did_find_device(self, eoj, from_node_id):
if (eoj.clsgrp == CLSGRP_CODE['SENSOR']
and eoj.cls == CLS_SE_CODE['TEMPERATURE']):
return Temperature(eoj, from_node_id)
return None
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--self-node', dest='self_node', required=True,
help='IP address of this node')
args = parser.parse_args()
profile = MyProfile()
controller = middleware.Controller(instance_id=1)
monitor.start(node_id=args.self_node,
devices={str(profile.eoj): profile,
str(controller.eoj): controller})
| 35.677966 | 77 | 0.619952 | 243 | 2,105 | 5.00823 | 0.353909 | 0.059162 | 0.026294 | 0.027116 | 0.021364 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003296 | 0.279335 | 2,105 | 58 | 78 | 36.293103 | 0.798945 | 0.044656 | 0 | 0 | 0 | 0 | 0.044323 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108696 | false | 0 | 0.108696 | 0 | 0.326087 | 0.021739 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84df22e953bad2bfd61c9c777a6347a614ce9c26 | 6,167 | py | Python | bot.py | LFRusso/QCircuit-Bot | b392036b061342beddcb4338fc64e4b8aba0fb8e | [
"MIT"
] | 3 | 2020-09-29T14:13:54.000Z | 2020-10-23T04:55:35.000Z | bot.py | LFRusso/QCircuit-Bot | b392036b061342beddcb4338fc64e4b8aba0fb8e | [
"MIT"
] | 1 | 2020-10-01T16:30:50.000Z | 2020-10-01T16:30:50.000Z | bot.py | LFRusso/QCircuit-Bot | b392036b061342beddcb4338fc64e4b8aba0fb8e | [
"MIT"
] | 1 | 2020-10-22T01:55:29.000Z | 2020-10-22T01:55:29.000Z | from telegram.ext import Updater, CommandHandler
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
import logging
from interpreter import *
import os
import numpy as np
def parse_result(count):
message=[]
for state in count.keys():
message.append(f"P(|{state}〉)={round(count[state]/100,2)}")
message = '\n'.join(message)
return message
def start(update, context):
message = "Hello, @{}! Type /help for help.".format(update.effective_user.username)
context.bot.send_message(chat_id=update.effective_chat.id, text=message)
def help(update, context):
message = """
Commands:
/help: Display commands
/guide: How to use this bot
/run: Parses and runs a quantum circuit
/example: Loads an example circuit
/gates: Displays avaliable quantum gates
"""
link = "https://github.com/LFRusso/QCircuit-Bot"
keyboard = [[InlineKeyboardButton(text="GitHub Repository", url=link)]]
markup = InlineKeyboardMarkup(keyboard)
context.bot.send_message(chat_id=update.effective_chat.id, text=message, reply_markup=markup)
def guide(update, context):
message = """
How to use this bot:
Use the command /run with a quantum circuit as shown in /examples
The circuit consists of qubits, being each delimited by a separete line, and logic gates (avaliable at /gates). You can use those to perform operations on the qubits by adding them to the corresponding qubit line. Other characters other than the gates will be treated as "wires" and can be used to ident the circuit (keep in mind the limit of characters is 20 per line).
"""
context.bot.send_message(chat_id=update.effective_chat.id, text=message)
def gates(update, context):
message = """
Avaliable logic gates:
- Single qubit operations
X: NOT/Pauli-X
Y: Pauli-Y
Z: Pauli-Z
H: Hadamard
M: Measure
|: Barrier
i: Identity
S: S (π/2) gate
T: T (π/4) gate
0: Reset a qubit to |0〉 state
- Two qubits operations
o: Control, used for the controlled operations (the 'o' character has to be in the sabe index as the 'C' on its corresponding line)
CX: Controlled Not (CNOT)
CY: Controlled Y
CZ: Controlled Z
x,x: Swap (the 'x' characters must have the sabe index on their corresponding lines)
- Three qubits operations
t,o,o: Toffoli (CCNOT). The 'o' characters must both have the same index as the 't' character.
"""
link = "https://en.wikipedia.org/wiki/Quantum_logic_gate"
keyboard = [[InlineKeyboardButton(text="About quantum logic gates", url=link)]]
markup = InlineKeyboardMarkup(keyboard)
context.bot.send_message(chat_id=update.effective_chat.id, text=message, reply_markup=markup)
# Runs an example circuit
def example(update, context):
examples = {
'Bell State':
['-H-o--M',
'---CX-M'],
'Deutsch (f=0)':
['0iH|-|H-M',
'0XH|-|i-M'],
'Deutsch (f=1)':
['0iH|i|H-M',
'0XH|X|i-M'],
'Deutsch (f(0)=0, f(1)=1)':
['0iH|o-|H-M',
'0XH|CX|i-M'],
'Deutsch (f(0)=1, f(1)=0)':
['0iH|Xo-X|H-M',
'0XH|iCXi|i-M'],
'Deutsch-Jozsa (Balanced Oracle)':
['0Hi|o...ii|H.M',
'0Hi|..o..i|H.M',
'0Hi|....o.|H.M',
'0XH|CXCXCX|HX.'],
'Finding multiples of 2^3/r; 7^r mod 15 = 1':
['........o..o...iM',
'H.....ooCZH..o..M',
'Ho.o.......CZCZHM',
'X.....t..........',
'.CX....o.........',
'...CXXo..........',
'.......t.........'],
}
random_key = np.random.choice(list(examples.keys()))
random_circ = examples[random_key]
drawn_circ = '\n'.join(random_circ)
fname = "{}.png".format(update.effective_chat.id)
count = parse_and_run(random_circ, fname)
message = parse_result(count)
context.bot.send_message(chat_id=update.effective_chat.id, text=f"Circuit name: {random_key}")
context.bot.send_message(chat_id=update.effective_chat.id, text=drawn_circ)
context.bot.sendPhoto(chat_id=update.effective_chat.id, photo=open(fname, 'rb'))
context.bot.send_message(chat_id=update.effective_chat.id, text=message)
os.remove(fname)
# Parses and runs a quantum circuit
def run(update, context):
circ_str = context.args
if (len(circ_str)>7):
# Too many qubits error
context.bot.send_message(chat_id=update.effective_chat.id, text="Too many qubits, maximum is 7.")
return
len_circ = len(circ_str[0])
if (len_circ>20):
# Circuit too long error
context.bot.send_message(chat_id=update.effective_chat.id, text="Circuit is too long. Max characters is 20.")
return
for line in circ_str:
if (len(line) != len_circ):
# Lines of different length
context.bot.send_message(chat_id=update.effective_chat.id, text="Lines must be the same length.")
return
fname = "{}.png".format(update.effective_chat.id)
count = parse_and_run(circ_str, fname)
message = parse_result(count)
context.bot.sendPhoto(chat_id=update.effective_chat.id, photo=open(fname, 'rb'))
context.bot.send_message(chat_id=update.effective_chat.id, text=message)
os.remove(fname)
def main():
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s")
TOKEN = os.environ['API_TOKEN']
updater = Updater(token=TOKEN, use_context=True)
dp = updater.dispatcher
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help))
dp.add_handler(CommandHandler("run", run))
dp.add_handler(CommandHandler("gates", gates))
dp.add_handler(CommandHandler("guide", guide))
dp.add_handler(CommandHandler("example", example))
updater.start_polling()
logging.info("=== It's alive! ===")
updater.idle()
logging.info("=== Oh no, It's dying! ===")
if __name__ == "__main__":
main() | 33.335135 | 374 | 0.630128 | 847 | 6,167 | 4.486423 | 0.288076 | 0.044211 | 0.075 | 0.082895 | 0.305789 | 0.292105 | 0.277368 | 0.262632 | 0.262632 | 0.262632 | 0 | 0.009436 | 0.22669 | 6,167 | 185 | 375 | 33.335135 | 0.786957 | 0.020756 | 0 | 0.176056 | 0 | 0.028169 | 0.380676 | 0.006629 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056338 | false | 0 | 0.042254 | 0 | 0.126761 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84e32a3f56750d951d976dd05b43bd476893aedc | 1,487 | py | Python | src/torso.py | kneave/MacFeeglePrime-ROS | d73081190155bb20119f4b9a7216d730c0a1e8b2 | [
"MIT"
] | null | null | null | src/torso.py | kneave/MacFeeglePrime-ROS | d73081190155bb20119f4b9a7216d730c0a1e8b2 | [
"MIT"
] | null | null | null | src/torso.py | kneave/MacFeeglePrime-ROS | d73081190155bb20119f4b9a7216d730c0a1e8b2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
import time
import sys
sys.path.append('/home/ubuntu/RedBoard')
import redboard
from std_msgs.msg import Int16MultiArray
# Servo values, 0 is centre
pan_value = 0
tilt_value = 0
def callback(data):
rospy.loginfo(rospy.get_caller_id() + 'RCVD: %s', data.data)
setservos(data.data[0], data.data[1])
def listener():
# In ROS, nodes are uniquely named. If two nodes with the same
# name are launched, the previous one is kicked off. The
# anonymous=True flag means that rospy will choose a unique
# name for our 'listener' node so that multiple listeners can
# run simultaneously.
rospy.init_node('head_driver', anonymous=True)
rospy.Subscriber('head_controller', Int16MultiArray, callback)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
def setservos(pan, tilt):
global pan_value, tilt_value
pan_max = 65
# Positive is back, negative forward, due to servo orientation
tilt_backward_max = 25
tilt_forward_max = -55
new_pan = pan_value + pan
if(abs(new_pan) < pan_max):
pan_value = new_pan
new_tilt = tilt_value + tilt
if(tilt_backward_max >= new_tilt >= tilt_forward_max):
tilt_value = new_tilt
redboard.servo22(pan_value)
redboard.servo21(tilt_value)
if __name__ == '__main__':
print("Head node listening...")
#setservos(0, 0)
#listener()
redboard.servo8_off()
rospy.spin()
| 26.087719 | 72 | 0.694015 | 213 | 1,487 | 4.652582 | 0.497653 | 0.040363 | 0.030272 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018739 | 0.210491 | 1,487 | 56 | 73 | 26.553571 | 0.825383 | 0.305313 | 0 | 0.0625 | 0 | 0 | 0.08317 | 0.020548 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0 | 0.15625 | 0 | 0.25 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84e3985260d9d57b8161b7b1a75f1fd40cce4250 | 10,943 | py | Python | S4/S4 Library/simulation/broadcasters/environment_score/environment_score_mixin.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | 1 | 2021-05-20T19:33:37.000Z | 2021-05-20T19:33:37.000Z | S4/S4 Library/simulation/broadcasters/environment_score/environment_score_mixin.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | S4/S4 Library/simulation/broadcasters/environment_score/environment_score_mixin.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | from collections import Counter
import operator
import weakref
from broadcasters.environment_score.environment_score_tuning import EnvironmentScoreTuning
import alarms
import clock
import gsi_handlers
import services
import sims4.log
import sims4.reload
logger = sims4.log.Logger('Environment Score')
with sims4.reload.protected(globals()):
environment_score_enabled = True
environment_score_mood_commodities = []
def _initialize_environment_score_commodities(instance_manager=None):
global environment_score_mood_commodities
if instance_manager is None:
instance_manager = services.get_instance_manager(sims4.resources.Types.MOOD)
environment_score_mood_commodities = []
for mood in instance_manager.types.values():
if mood.environment_scoring_commodity is not None:
environment_score_mood_commodities.append(mood.environment_scoring_commodity)
if not sims4.reload.currently_reloading:
services.get_instance_manager(sims4.resources.Types.MOOD).add_on_load_complete(_initialize_environment_score_commodities)
class EnvironmentScoreMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._environment_score_commodity = None
self._environment_score_broadcasters = weakref.WeakSet()
self._environment_score_alarm_handle = None
self._dirty = True
def add_environment_score_broadcaster(self, broadcaster):
self._remove_linked_broadcasters(broadcaster)
self._environment_score_broadcasters.add(broadcaster)
self._dirty = True
self.schedule_environment_score_update()
def _remove_linked_broadcasters(self, broadcaster):
for linked_broadcaster in broadcaster.get_linked_broadcasters_gen():
self._environment_score_broadcasters.discard(linked_broadcaster)
def remove_environment_score_broadcaster(self, broadcaster):
self._environment_score_broadcasters.discard(broadcaster)
self._dirty = True
self.schedule_environment_score_update()
def _start_environment_score(self):
self._clear_environment_score()
self._dirty = True
self.schedule_environment_score_update()
def _stop_environment_score(self):
self._clear_environment_score()
self._dirty = True
def on_build_objects_environment_score_update(self):
self._dirty = True
self.schedule_environment_score_update(force_run=True)
def _get_build_objects_environment_score(self):
negative_value = 0
positive_value = 0
if not services.get_zone_modifier_service().is_build_eco_effects_enabled:
return (negative_value, positive_value)
lot = services.current_zone().lot
negative_stat_type = EnvironmentScoreTuning.BUILD_OBJECTS_ENVIRONMENT_SCORING.negative_environment_scoring
negative_stat_tracker = lot.get_tracker(negative_stat_type)
if negative_stat_tracker is not None:
negative_value = negative_stat_tracker.get_value(negative_stat_type)
positive_stat_type = EnvironmentScoreTuning.BUILD_OBJECTS_ENVIRONMENT_SCORING.positive_environment_scoring
positive_stat_tracker = lot.get_tracker(positive_stat_type)
if positive_stat_tracker is not None:
positive_value = positive_stat_tracker.get_value(positive_stat_type)
return (negative_value, positive_value)
def _get_broadcasting_environment_score_objects_gen(self):
for broadcaster in self._environment_score_broadcasters:
if broadcaster.broadcasting_object is not None:
yield broadcaster.broadcasting_object
for linked_broadcaster in broadcaster.get_linked_broadcasters_gen():
if linked_broadcaster.broadcasting_object is not None:
yield linked_broadcaster.broadcasting_object
def schedule_environment_score_update(self, force_run=False):
def _update_environment_score_callback(timeline):
if not force_run and self.queue is not None and self.transition_controller is not None:
self._environment_score_alarm_handle = None
return
self._update_environment_score()
if self._environment_score_alarm_handle is not None:
if force_run:
alarms.cancel_alarm(self._environment_score_alarm_handle)
self._environment_score_alarm_handle = None
if self._environment_score_alarm_handle is None:
self._environment_score_alarm_handle = alarms.add_alarm(self, clock.interval_in_real_seconds(1.0), _update_environment_score_callback, repeating=False)
def _update_mood_commodities(self, total_mood_scores):
current_mood_commodity = self._environment_score_commodity
largest_mood = None
if total_mood_scores:
largest_mood = total_mood_scores.most_common(1)[0][0]
if largest_mood is not None:
self._environment_score_commodity = largest_mood.environment_scoring_commodity
if self._environment_score_commodity is not None:
new_value = total_mood_scores.get(largest_mood, 0)
if self._environment_score_commodity is current_mood_commodity:
if self.commodity_tracker.get_value(self._environment_score_commodity) != new_value:
self.commodity_tracker.set_value(self._environment_score_commodity, new_value)
self.commodity_tracker.remove_statistic(current_mood_commodity)
self.commodity_tracker.add_statistic(self._environment_score_commodity)
self.commodity_tracker.set_value(self._environment_score_commodity, new_value)
else:
self.commodity_tracker.remove_statistic(current_mood_commodity)
self.commodity_tracker.add_statistic(self._environment_score_commodity)
self.commodity_tracker.set_value(self._environment_score_commodity, new_value)
logger.error('Environment Scoring: {} has no commodity set for environment scoring.', largest_mood, owner='rmccord')
else:
logger.error('Environment Scoring: {} has no commodity set for environment scoring.', largest_mood, owner='rmccord')
elif current_mood_commodity is not None:
self.commodity_tracker.remove_statistic(current_mood_commodity)
return largest_mood
def _update_positive_and_negative_commodities(self, negative_score, positive_score):
negative_stat = self.commodity_tracker.get_statistic(EnvironmentScoreTuning.NEGATIVE_ENVIRONMENT_SCORING, add=True)
positive_stat = self.commodity_tracker.get_statistic(EnvironmentScoreTuning.POSITIVE_ENVIRONMENT_SCORING, add=True)
contribute_positive_scoring = True
if negative_stat is not None:
if negative_stat.get_value() != negative_score:
negative_stat.set_value(negative_score)
if negative_stat.buff_handle is not None:
contribute_positive_scoring = False
if positive_stat is not None:
if contribute_positive_scoring and positive_stat.get_value() != positive_score:
positive_stat.set_value(positive_score)
elif not contribute_positive_scoring:
positive_stat.set_value(0)
def _update_environment_score(self):
try:
if not self._dirty:
return
if not environment_score_enabled or self.is_hidden():
self._clear_environment_score()
return
total_mood_scores = Counter()
total_negative_score = 0
total_positive_score = 0
(build_objs_negative_score, build_objs_positive_score) = self._get_build_objects_environment_score()
total_negative_score += build_objs_negative_score
total_positive_score += build_objs_positive_score
if gsi_handlers.sim_handlers_log.environment_score_archiver.enabled:
contributing_objects = []
object_contributions = []
environment_score_objects = set(self._get_broadcasting_environment_score_objects_gen())
for obj in environment_score_objects:
(mood_scores, negative_score, positive_score, contributions) = obj.get_environment_score(self)
total_negative_score += negative_score
total_positive_score += positive_score
total_mood_scores.update(mood_scores)
if gsi_handlers.sim_handlers_log.environment_score_archiver.enabled:
if not sum(mood_scores.values()) != 0:
if not negative_score != 0:
if positive_score != 0:
contributing_objects.append((obj, mood_scores, negative_score, positive_score))
object_contributions.extend(contributions)
contributing_objects.append((obj, mood_scores, negative_score, positive_score))
object_contributions.extend(contributions)
self._update_positive_and_negative_commodities(total_negative_score, total_positive_score)
largest_mood = self._update_mood_commodities(total_mood_scores)
if gsi_handlers.sim_handlers_log.environment_score_archiver.enabled and (contributing_objects or total_negative_score != 0 or total_positive_score != 0):
gsi_handlers.sim_handlers_log.log_environment_score(self.id, largest_mood, total_mood_scores.get(largest_mood, 0), self._environment_score_commodity, total_negative_score, EnvironmentScoreTuning.NEGATIVE_ENVIRONMENT_SCORING, total_positive_score, EnvironmentScoreTuning.POSITIVE_ENVIRONMENT_SCORING, contributing_objects, object_contributions)
self._dirty = False
finally:
self._environment_score_alarm_handle = None
def _clear_environment_score(self):
for commodity in environment_score_mood_commodities:
if self.commodity_tracker.has_statistic(commodity):
self.commodity_tracker.remove_statistic(commodity)
self._environment_score_broadcasters.clear()
self._environment_score_commodity = None
if self.commodity_tracker.has_statistic(EnvironmentScoreTuning.NEGATIVE_ENVIRONMENT_SCORING):
self.commodity_tracker.remove_statistic(EnvironmentScoreTuning.NEGATIVE_ENVIRONMENT_SCORING)
if self.commodity_tracker.has_statistic(EnvironmentScoreTuning.POSITIVE_ENVIRONMENT_SCORING):
self.commodity_tracker.remove_statistic(EnvironmentScoreTuning.POSITIVE_ENVIRONMENT_SCORING)
if self._environment_score_alarm_handle is not None:
alarms.cancel_alarm(self._environment_score_alarm_handle)
self._environment_score_alarm_handle = None
| 55.831633 | 359 | 0.72704 | 1,214 | 10,943 | 6.092257 | 0.11285 | 0.153597 | 0.081125 | 0.050974 | 0.553678 | 0.420903 | 0.346809 | 0.275284 | 0.227961 | 0.196187 | 0 | 0.002802 | 0.217216 | 10,943 | 195 | 360 | 56.117949 | 0.860612 | 0 | 0 | 0.284091 | 0 | 0 | 0.015444 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.056818 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84e4a5de1b248ed60460b4bced5480be4b151dcc | 3,781 | py | Python | tests/conftest.py | daitakahashi/tinydb-query | 2090bc57eb368bce435a737979c6bb35476807bf | [
"MIT"
] | null | null | null | tests/conftest.py | daitakahashi/tinydb-query | 2090bc57eb368bce435a737979c6bb35476807bf | [
"MIT"
] | null | null | null | tests/conftest.py | daitakahashi/tinydb-query | 2090bc57eb368bce435a737979c6bb35476807bf | [
"MIT"
] | null | null | null |
import contextlib
import pytest
import tinydb
import tinydb_ql as QL
@contextlib.contextmanager
def _db_instance(*args, **kwargs):
with tinydb.TinyDB(*args, **kwargs) as db:
db.insert_multiple([
{'name': 'bob', 'age': 12,
'blob': 1,
'status': {
'gameover': False, 'cleared': False,
'current-stage': 3,
'by-stage': [{'stage': 'stage1', 'score': 100},
{'stage': 'stage2', 'score': 70},
{'stage': 'stage3', 'score': 50}]
},
'bonus': ['key', 'book', 'orb']},
{'name': 'alice', 'age': 14,
'blob': True,
'status': {
'gameover': False, 'cleared': True,
'by-stage': [{'stage': 'stage1', 'score': 100},
{'stage': 'stage2', 'score': 80},
{'stage': 'stage3', 'score': 90},
{'stage': 'stage4', 'score': 80},
{'stage': 'stage5', 'score': 100}]
},
'bonus': ['key', 'book', 'orb', 'candle']},
{'name': 'taro', 'age': 13,
'blob': '1',
'status': {
'gameover': True, 'cleared': False,
'lang': 'jp',
'current-stage': 3,
'by-stage': [{'stage': 'stage1', 'score': 100},
{'stage': 'stage2', 'score': 80},
{'stage': 'stage3', 'score': 40}]
},
'bonus': ['key', 'book']},
{'name': 'hanako', 'age': 15,
'blob': [1, 2],
'status': {
'gameover': False, 'cleared': False,
'lang': 'jp',
'current-stage': 4,
'by-stage': [{'stage': 'stage1', 'score': 80},
{'stage': 'stage2', 'score': 80},
{'stage': 'stage3', 'score': 100},
{'stage': 'stage4', 'score': 60}]
},
'bonus': ['book', 'candelabrum']},
{'name': 'ichiro', 'age': 16,
'blob': {'a': 2},
'status': {
'gameover': False, 'cleared': False,
'lang': 'jp',
'current-stage': 2,
'by-stage': [{'stage': 'stage1', 'score': 80},
{'stage': 'stage2', 'score': 80}]
},
'bonus': ['book', 'candle']}
])
yield db
@pytest.fixture(name='db_instance')
def onmemory_db():
with _db_instance(storage=tinydb.storages.MemoryStorage) as db:
yield db
@pytest.fixture(name='db_path')
def fs_db(tmp_path):
db_path = tmp_path / 'db.json'
with _db_instance(db_path):
return db_path
@pytest.fixture(name='run_test_query')
def _run_test_query(db_instance):
def runner(test_spec):
ql, tinydb_query, should_exists = test_spec
q = QL.Query(ql)
result = db_instance.search(q)
assert bool(result) is should_exists, q
assert result == db_instance.search(tinydb_query), q
yield runner
@pytest.fixture(name='run_test_by_selector')
def _run_test_by_selector(db_instance):
def runner(test_spec):
ql, selector = test_spec
q = QL.Query(ql)
result = db_instance.search(q)
expected = list(filter(selector, db_instance.all()))
assert result == expected, q
yield runner
@pytest.fixture(name='run_test_error')
def _run_test_error():
def runner(test_spec):
ql = test_spec
with pytest.raises(QL.QLSyntaxError):
QL.Query(ql)
yield runner
| 33.460177 | 67 | 0.446972 | 364 | 3,781 | 4.505495 | 0.258242 | 0.060976 | 0.043902 | 0.054878 | 0.454878 | 0.409756 | 0.367073 | 0.321951 | 0.278049 | 0.278049 | 0 | 0.032244 | 0.384819 | 3,781 | 112 | 68 | 33.758929 | 0.672829 | 0 | 0 | 0.336735 | 0 | 0 | 0.191534 | 0 | 0 | 0 | 0 | 0 | 0.030612 | 1 | 0.091837 | false | 0 | 0.040816 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84e8531b2f78d8767380447a8eab9577039dac81 | 3,324 | py | Python | project/settings/development.py | ellmetha/machina-vanilla | a64317141f274097c269f18053d80f48dae86dff | [
"MIT"
] | 1 | 2020-06-04T08:13:06.000Z | 2020-06-04T08:13:06.000Z | project/settings/development.py | ellmetha/machina-demo | a64317141f274097c269f18053d80f48dae86dff | [
"MIT"
] | null | null | null | project/settings/development.py | ellmetha/machina-demo | a64317141f274097c269f18053d80f48dae86dff | [
"MIT"
] | 1 | 2018-09-22T07:05:33.000Z | 2018-09-22T07:05:33.000Z | """
Development Django settings for machina-vanilla demo project
============================================================
This files imports the `base` settings and can add or modify previously defined settings to
alter the configuration of the application for development purposes.
For more information on this file, see https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import socket
from .base import * # noqa
# APP CONFIGURATION
# ------------------------------------------------------------------------------
INSTALLED_APPS += ( # noqa: F405
'debug_toolbar',
)
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE += ( # noqa: F405
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
# DEBUG CONFIGURATION
# ------------------------------------------------------------------------------
DEBUG = True
SILENCED_SYSTEM_CHECKS = ['captcha.recaptcha_test_key_error']
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
ALLOWED_HOSTS = ['127.0.0.1', 'localhost', ]
INTERNAL_IPS = ['127.0.0.1', ]
ADMINS = ()
MANAGERS = ()
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
TEMPLATES[0]['OPTIONS']['context_processors'] += ( # noqa: F405
'project.context_processors.webpack', )
TEMPLATES[0]['OPTIONS']['loaders'] = ( # noqa: F405
# Disables cached loader if any
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
STATICFILES_DIRS = (
MACHINA_MAIN_STATIC_DIR, # noqa: F405
str(PROJECT_PATH / 'main' / 'static' / 'build_dev'), # noqa: F405
str(PROJECT_PATH / 'main' / 'static' / 'build'), # noqa: F405
str(PROJECT_PATH / 'main' / 'static'), # noqa: F405
)
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# WEBPACK-DEV-SERVER CONFIGURATION
# ------------------------------------------------------------------------------
WEBPACK_DEV_SERVER_PORT = get_envsetting('WEBPACK_DEV_SERVER_PORT', 8080) # noqa: F405
WEBPACK_DEV_SERVER_URL = 'http://localhost:{port}'.format(port=WEBPACK_DEV_SERVER_PORT)
# Dynamically set a boolean indicating if the webpack dev server is started.
webpack_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
webpack_sock.bind(('localhost', WEBPACK_DEV_SERVER_PORT))
WEBPACK_DEV_SERVER_STARTED = False
except socket.error as e:
WEBPACK_DEV_SERVER_STARTED = (e.errno == 98)
webpack_sock.close()
# ENV-SPECIFIC CONFIGURATION
# ------------------------------------------------------------------------------
try:
# Allow the use of a settings module named "settings_env" that is not contributed to the
# repository (only when dev settings are in use!).
from .settings_env import * # noqa
except ImportError:
pass
| 31.065421 | 97 | 0.556258 | 321 | 3,324 | 5.598131 | 0.470405 | 0.040067 | 0.080134 | 0.044519 | 0.129104 | 0.095715 | 0.095715 | 0.04118 | 0 | 0 | 0 | 0.016229 | 0.128761 | 3,324 | 106 | 98 | 31.358491 | 0.604282 | 0.528881 | 0 | 0.04878 | 0 | 0 | 0.318637 | 0.215097 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.02439 | 0.097561 | 0 | 0.097561 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84eb634735ed60c5bacfdd3f05d4b74a3ee4a389 | 5,998 | py | Python | info.py | marcan/deresuteme | 2d77d04ba0127af57b1880fc764de2650b317d87 | [
"Apache-2.0"
] | 84 | 2016-03-14T17:00:29.000Z | 2022-01-26T11:59:46.000Z | info.py | marcan/deresuteme | 2d77d04ba0127af57b1880fc764de2650b317d87 | [
"Apache-2.0"
] | 2 | 2017-08-20T21:06:56.000Z | 2019-07-19T06:21:27.000Z | info.py | marcan/deresuteme | 2d77d04ba0127af57b1880fc764de2650b317d87 | [
"Apache-2.0"
] | 12 | 2017-03-13T11:59:43.000Z | 2021-12-24T17:20:25.000Z | #!/usr/bin/python
# -!- coding: utf-8 -!-
#
# Copyright 2016 Hector Martin <marcan@marcan.st>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json, base64, datetime, pytz, struct
tz = pytz.timezone("Asia/Tokyo")
def parse_ts(ts):
dt = tz.localize(datetime.datetime.strptime(ts, "%Y-%m-%d %H:%M:%S"), is_dst=None)
return int((dt - datetime.datetime(1970, 1, 1, tzinfo=pytz.utc)).total_seconds())
def parse_card(card_info, chara_list, chara_index):
ret = {
"love": int(card_info["love"]),
"level": int(card_info["level"]),
"id": int(card_info["card_id"]),
"star_rank": int(card_info["step"]) + 1,
"skill_level": int(card_info["skill_level"]),
"exp": int(card_info["exp"]),
}
if "custom_info" in card_info:
ret["image_id"] = card_info["custom_info"]["image_card_id"]
else:
ret["image_id"] = ret["id"]
if chara_list and chara_index in chara_list:
chara_info = chara_list[chara_index]
ret["potential"] = {
"vocal": chara_info["param_1"],
"dance": chara_info["param_2"],
"visual": chara_info["param_3"],
"life": chara_info["param_4"],
}
else:
ret["potential"] = {
"vocal": 0,
"dance": 0,
"visual": 0,
"life": 0,
}
return ret
class ProducerInfo(object):
DIFFICULTIES = {
1: "debut",
2: "normal",
3: "pro",
4: "master",
5: "master_plus",
11: "light",
12: "trick",
}
def __init__(self, data=None):
self.emblem_id = 1000001
self.emblem_ex_value = None
self.support_cards = None
if data is not None:
self.load_data(data)
@property
def timestamp_fmt(self):
dt = datetime.datetime.fromtimestamp(self.timestamp, tz)
return dt.strftime("%Y年%m月%d日 %H:%M:%S")
def load_data(self, data):
self.timestamp = int(data["data_headers"]["servertime"])
d = data["data"]
self.name = d["friend_info"]["user_info"]["name"]
self.comment = d["friend_info"]["user_info"]["comment"]
self.rank = int(d["friend_info"]["user_info"]["producer_rank"])
self.level = int(d["friend_info"]["user_info"]["level"])
self.prp = int(d["prp"])
self.fan = int(d["friend_info"]["user_info"]["fan"])
self.commu_no = int(d["story_number"])
self.album_no = int(d["album_number"])
self.creation_ts = parse_ts(d["friend_info"]["user_info"]["create_time"])
self.last_login_ts = parse_ts(d["friend_info"]["user_info"]["last_login_time"])
self.id = d["friend_info"]["user_info"]["viewer_id"]
self.emblem_id = int(d["friend_info"]["user_info"].get("emblem_id", 1000001))
self.emblem_ex_value = int(d["friend_info"]["user_info"].get("emblem_ex_value", None))
if self.emblem_id == 0:
self.emblem_id = 1000001
self.leader_card = parse_card(d["friend_info"]["leader_card_info"],
d["friend_info"]["user_chara_potential"], "chara_0")
self.support_cards = {
"cute": parse_card(d["friend_info"]["support_card_info"]["1"],
d["friend_info"]["user_chara_potential"], "chara_1"),
"cool": parse_card(d["friend_info"]["support_card_info"]["2"],
d["friend_info"]["user_chara_potential"], "chara_2"),
"passion": parse_card(d["friend_info"]["support_card_info"]["3"],
d["friend_info"]["user_chara_potential"], "chara_3"),
"all": parse_card(d["friend_info"]["support_card_info"]["4"],
d["friend_info"]["user_chara_potential"], "chara_4"),
}
self.cleared = {i: 0 for i in list(self.DIFFICULTIES.values())}
self.full_combo = {i: 0 for i in list(self.DIFFICULTIES.values())}
for i in d["user_live_difficulty_list"]:
dt = i["difficulty_type"]
if dt not in self.DIFFICULTIES:
continue
self.cleared[self.DIFFICULTIES[dt]] = int(i["clear_number"])
self.full_combo[self.DIFFICULTIES[dt]] = int(i["full_combo_number"])
KEYS = ["timestamp", "id", "commu_no", "prp", "album_no", "name", "comment",
"fan", "level", "rank", "creation_ts", "last_login_ts",
"leader_card", "cleared", "full_combo", "emblem_id",
"emblem_ex_value", "support_cards"]
def to_json(self):
return json.dumps({k: getattr(self, k) for k in self.KEYS})
@staticmethod
def from_json(j):
self = ProducerInfo()
v = json.loads(j)
for k in self.KEYS:
if k in v:
setattr(self, k, v[k])
if self.emblem_id == 0:
self.emblem_id = 1000001
return self
if __name__ == "__main__":
import sys, pickle
d = json.load(open(sys.argv[1]))
print("raw:")
print(d)
p1 = ProducerInfo(d)
print()
print("to_json:")
j = p1.to_json()
print(j)
p2 = ProducerInfo.from_json(j)
assert p1.__dict__ == p2.__dict__
#print
#print "serialize:"
#ser = p1.serialize()
#print ser.encode("hex")
#print
#p3 = ProducerInfo.unserialize(ser)
#assert p1.__dict__ == p3.__dict__
#p2 = ProducerInfo.from_json(open("error.json").read()).serialize()
| 36.351515 | 94 | 0.578526 | 784 | 5,998 | 4.186224 | 0.28699 | 0.042657 | 0.067032 | 0.068556 | 0.273309 | 0.225472 | 0.189519 | 0.120049 | 0.041438 | 0 | 0 | 0.019706 | 0.263921 | 5,998 | 164 | 95 | 36.573171 | 0.723669 | 0.135378 | 0 | 0.075 | 0 | 0 | 0.236149 | 0.004843 | 0 | 0 | 0 | 0 | 0.008333 | 1 | 0.058333 | false | 0.008333 | 0.016667 | 0.008333 | 0.141667 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84ec76065cd8fd886f8b8693283d91becf11271b | 1,562 | py | Python | tests/ansible/lib/action/assert_equal.py | msaladna/mitogen | c6824b68181729cb16c090e72f4d35d6c4d95523 | [
"BSD-3-Clause"
] | 1,526 | 2017-09-15T18:49:40.000Z | 2021-01-17T16:04:12.000Z | tests/ansible/lib/action/assert_equal.py | msaladna/mitogen | c6824b68181729cb16c090e72f4d35d6c4d95523 | [
"BSD-3-Clause"
] | 682 | 2017-09-11T17:43:12.000Z | 2021-01-17T05:26:26.000Z | tests/ansible/lib/action/assert_equal.py | msaladna/mitogen | c6824b68181729cb16c090e72f4d35d6c4d95523 | [
"BSD-3-Clause"
] | 111 | 2017-09-15T23:21:37.000Z | 2021-01-01T14:45:35.000Z | #
# Print data structure diff on assertion failure.
#
# assert_equal: left=some.result right={1:2}
#
__metaclass__ = type
import inspect
import unittest2
import ansible.template
from ansible.errors import AnsibleError
from ansible.plugins.action import ActionBase
from ansible.module_utils.six import string_types
TEMPLATE_KWARGS = {}
_argspec = inspect.getargspec(ansible.template.Templar.template)
if 'bare_deprecated' in _argspec.args:
TEMPLATE_KWARGS['bare_deprecated'] = False
class TestCase(unittest2.TestCase):
def runTest(self):
pass
def text_diff(a, b):
tc = TestCase()
tc.maxDiff = None
try:
tc.assertEqual(a, b)
return None
except AssertionError as e:
return str(e)
class ActionModule(ActionBase):
''' Fail with custom message '''
TRANSFERS_FILES = False
_VALID_ARGS = frozenset(('left', 'right'))
def template(self, obj):
return self._templar.template(
obj,
convert_bare=True,
**TEMPLATE_KWARGS
)
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars or {})
left = self.template(self._task.args['left'])
right = self.template(self._task.args['right'])
diff = text_diff(left, right)
if diff is None:
return {
'changed': False
}
return {
'changed': False,
'failed': True,
'msg': diff,
'_ansible_verbose_always': True,
}
| 22 | 68 | 0.621639 | 178 | 1,562 | 5.297753 | 0.488764 | 0.034995 | 0.033934 | 0.042418 | 0.050901 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003534 | 0.275288 | 1,562 | 70 | 69 | 22.314286 | 0.829505 | 0.075544 | 0 | 0.043478 | 0 | 0 | 0.065597 | 0.01605 | 0 | 0 | 0 | 0 | 0.043478 | 1 | 0.086957 | false | 0.021739 | 0.130435 | 0.021739 | 0.413043 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84ec94c6c196a5afef15f2ef72d96dcc3f618ad6 | 18,572 | py | Python | dataDisplay.py | andreasbayer/AEGUIFit | 6a1e31091b74d648d007c75c9fef6efae4086860 | [
"BSD-3-Clause"
] | null | null | null | dataDisplay.py | andreasbayer/AEGUIFit | 6a1e31091b74d648d007c75c9fef6efae4086860 | [
"BSD-3-Clause"
] | null | null | null | dataDisplay.py | andreasbayer/AEGUIFit | 6a1e31091b74d648d007c75c9fef6efae4086860 | [
"BSD-3-Clause"
] | null | null | null | import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from PyQt5.QtCore import pyqtSignal, Qt
import fitDataInfo as fdi
import AEFitDataInfo as adi
import polyFitDataInfo as pdi
import numpy as np
import traceback
class DataDisplay(FigureCanvas):
std_fig_width = 6.40
std_fig_height = 4.80
std_label_font_size = 15
std_scale_font_size = 15
std_ann_font_size = 24
statusbar_update = pyqtSignal(str)
is_loaded_changed = pyqtSignal(bool)
mark_default_text = 'Click with scroll wheel on figure to show energy value.'
def __init__(self, parent=None):
self.__fig, self.__ax = plt.subplots(tight_layout=True)
# FigureCanvas.__init__(self, self.__fig)
super(FigureCanvas, self).__init__(self.__fig)
if self.hasMouseTracking():
self.setMouseTracking(True)
self.__inv = self.__ax.transData.inverted()
else:
self.__inv = None
self.setParent(parent)
self.DisableRefresh(False)
self.setResizingEnabled(True)
self.reset()
def reset(self):
self.fit_area_col = 'white' # '#EFEFEF'
self.area_col = '#ABABAB'
self.ae_mark_col = 'black'
self.__dc = 'black'
self.__fc = 'orange'
self.__ls = '-'
self.__ew = 2 # 1.2
self.__lw = 2 # 1.2
self.marker_size = 4
#1.2
self.setData(None)
self.__fdiFits = list()
self.reset_fitIndex()
self.__combined_fit_data = list()
self.setStdErrors(None)
self.__clickmark = None
self.__showErrorBars = False
self.__annotations = None
self.__label_font = {'size': self.std_label_font_size}
self.__scale_font = {'size': self.std_scale_font_size}
self.__annotation_font = {'size': self.std_ann_font_size}
self.__ax.clear()
#self.refresh()
#self.set_fig_size([self.std_fig_width, self.std_fig_height], False)
def draw_event(self, renderer):
self.__inv = self.__ax.transData.inverted()
def set_fig_size(self, new_fig_size, forward=True):
if self.isLoaded():
if new_fig_size is not None:
if new_fig_size[0] is not None and new_fig_size[1] is not None:
if len(new_fig_size) == 2 and new_fig_size[0] > 0 and new_fig_size[1] > 0:
try:
self.__fig.set_size_inches(new_fig_size, forward=True)
except Exception as error:
print(error)
else:
try:
# setting it to same size won't update, therefore resize it to one pixel less. resizing window
# will adapt it and otherwise 1px is not noticeable
self.resize(self.parent().width()-1, self.parent().height()-1)
except Exception as e:
print(e)
else:
# self.__fig.set_size_inches(None)
# have the size adapt to the width of the canvas
pass
def isLoaded(self):
# if this changes, the trigger for is_loaded_changed has also be changed!
return self.__data is not None
def mousePressEvent(self, event):
super().mousePressEvent(event)
if self.isLoaded():
if event.button() == Qt.MidButton:
if self.__clickmark is None:
if self.__inv is not None:
values = (np.float64(event.x()), np.float64(event.y()))
tr_point = self.__inv.transform(values)
self.statusbar_update.emit(str(round(tr_point[0], 4)) + " eV")#, round(tr_point[1], 4))
# self.statusbar_update.emit(round(tr_point[0], 4), round(tr_point[1], 4))
self.__clickmark = tr_point[0]
self.update_clickmark()
self.draw()
else:
tr_point = (0., 0.)
else:
self.statusbar_update.emit(DataDisplay.mark_default_text)
self.__clickmark = None
#self.update_clickmark()
self.refresh()
def isRefreshDisabled(self):
return self.__refreshDisabled
def DisableRefresh(self, disable):
self.__refreshDisabled = disable
def refresh(self, data=None, stdErrors=None, showErrorBars=None, forgetZoomFrame=False, ignoreFirstPoint=False):
xlim = None
ylim = None
if data is not None:
self.setData(data)
if showErrorBars is not None:
self.__showErrorBars = showErrorBars
if self.isRefreshDisabled() is False:
if not forgetZoomFrame:
xlim = self.__ax.get_xlim()
ylim = self.__ax.get_ylim()
#(xmar, ymar) = self.__ax.margins()
if self.isLoaded():
if self.__showErrorBars == False:
stdErrors = None
else:
if stdErrors is not None:
self.__stdErrors = stdErrors
if self.__stdErrors is not None:
stdErrors = self.__stdErrors[:]
else:
stdErrors = None
self.__ax.clear()
#plot data only here:
if self.show_all_fits():
self.__plot_data(self.__data, stdErrors)
self.__plot_combined_fit()
else:
try:
self.__plot_data(self.current_fdi().get_data(), stdErrors)
if self.current_fdi().isFitted():
self.__plotFit(self.current_fdi())
except Exception as e:
print('refresh', e)
self.update_clickmark()
self.update_annotations()
self.update_xaxis()
self.draw()
if not forgetZoomFrame:
#if (self.__ax.get_xlim(), self.__ax.get_ylim()) != (xlim, ylim):
#self.__ax.set_xlim(xlim)
#self.__ax.set_ylim(ylim)
#self.__ax.margins(x=xmar, y=ymar)
#print(self.__ax.get_xlim(), self.__ax.get_ylim())
#print(self.__ax.margins())
self._zoom_to_bounds(xlim, ylim)
else:
set_breakpoint = True
return (xlim, ylim)
def update_clickmark(self):
if self.__clickmark is not None:
#if self.__data[self.__lowerZoom, 0] <= self.__clickmark <= self.__data[self.__upperZoom, 0]:
self.__ax.axvline(x=[self.__clickmark], color='g', linestyle='-.', linewidth='1')
#self.__ax.plot([self.__clickmark], [0], 'g^')
def update_annotations(self):
#trigger on_zoom or find another way
if self.__annotations is not None and len(self.__annotations) > 0:
#(xmin, xmax) = self.__ax.get_xlim()
#(ymin, ymax) = self.__ax.get_ylim()
#+xpos = xmin + 0.01 * (xmax - xmin)
#ypos = ymax - 0.01 * (ymax - ymin)
xpos = 0.02
ypos = 0.98
if len(self.__annotations) > self.__fitIndex+1:
annotation = self.__annotations[self.__fitIndex + 1]
self.__ax.annotate(annotation,
xy=(xpos, ypos),
xycoords = 'axes fraction',
xytext = (xpos, ypos),
textcoords = 'axes fraction',
horizontalalignment='left',
verticalalignment='top',
fontsize=self.__annotation_font['size'])
def update_xaxis(self):
self.__ax.axhline(y=0, color='black', linestyle=':', linewidth='1')
def current_fdi(self):
print(self.__fitIndex)
print(len(self.__fdiFits))
return self.__fdiFits[self.__fitIndex]
def show_all_fits(self):
return self.__fitIndex == -1
def getCurrentData(self):
if self.show_all_fits():
return self.__data
else:
return self.current_fdi().get_data()
def getAllData(self):
return self.__data
def set_annotation(self, annotation):
self.__annotations = annotation.split('-- next --\n')
self.update_annotations()
self.draw()
def getCurrentFitData(self):
if self.show_all_fits():
if self.__combined_fit_exists():
return self.__combined_fit_data[:, ]
else:
return None
else:
if self.current_fdi().isFitted() and self.current_fdi().isDisabled() is False:
return self.current_fdi().getFitData()
else:
return None
def getAllFitData(self, incl_disabled=False):
all_fit_data = list()
for fit in self.__fdiFits:
if fit.isFitted() and (fit.isDisabled() is False or incl_disabled):
all_fit_data.append(fit.getFitData())
return all_fit_data
def getCombinedFitData(self):
return self.__combined_fit_data
def __plot_data(self, data, stdErrors):
if stdErrors is not None:
stdErrors = stdErrors/2
self.__ax.errorbar(data[:, 0],
data[:, 1],
yerr=stdErrors, fmt='.', markersize=self.marker_size,
markeredgecolor=self.__dc, markerfacecolor=self.__dc, ecolor=self.__dc, elinewidth=self.__ew,
barsabove=True, capsize=2)
self.__ax.set_ylabel('Ion yield (1/s)', fontdict=self.__label_font)
self.__ax.set_xlabel('Electron energy (eV)', fontdict=self.__label_font)
for tick in self.__ax.xaxis.get_major_ticks():
tick.label.set_size(self.__scale_font['size'])
for tick in self.__ax.yaxis.get_major_ticks():
tick.label.set_size(self.__scale_font['size'])
def shiftData(self, increment):
if self.isLoaded():
for set in self.__data:
set[0] += increment
xlim = self.__ax.get_xlim()
ylim = self.__ax.get_ylim()
self._zoom_to_bounds((xlim[0] + increment, xlim[1] + increment), ylim)
def update_combined_fit_data(self, combined_fit_data):
self.__combined_fit_data = combined_fit_data
def ZoomByIncrement(self, bound, increment):
pass
#if self.isLoaded():
# if bound == 'u':
# if ((self.__upperZoom + increment) <= len(self.__data)) and (
# (self.__upperZoom + increment) > self.__lowerZoom):
# self.__upperZoom += increment
# elif bound == 'l':
# if ((self.__lowerZoom + increment) >= 0) and ((self.__lowerZoom + increment) < self.__upperZoom):
# self.__lowerZoom += increment
def ZoomShowAll(self):
if self.isLoaded():
self._zoom_to_xbounds(self.__data[0, 0], self.__data[-1, 0], *self.__ax.margins())
def ZoomToFit(self, x_lb, x_ub, fit_index):
if self.isLoaded():
self._zoom_to_xbounds(x_lb, x_ub, *self.__ax.margins())
def _zoom_to_xbounds(self, x_lb, x_ub, x_margin=0, y_margin=0):
print(x_lb, x_ub)
#(x_margin, y_margin) = self.__ax.margins()
y_lb = self._find_y_min_between(x_lb, x_ub, self.getCurrentData(), self.getStdErrors())
#y_lb = self._find_y_min_between(x_lb, x_ub, self.getCurrentData(), None)
if y_lb is None or y_lb > 0:
y_lb = 0
elif np.isnan(y_lb) or np.isinf(y_lb):
print("y_lb is ", y_lb)
return
y_ub = self._find_y_max_between(x_lb, x_ub, self.getCurrentData(), self.getStdErrors())
#y_ub = self._find_y_max_between(x_lb, x_ub, self.getCurrentData(), None)
if y_ub is None:
y_ub = 1
elif np.isnan(y_ub) or np.isinf(y_ub):
print("y_ub is ", y_ub)
return
x_dev = (x_ub-x_lb) * x_margin/2
y_dev = (y_ub - y_lb) * y_margin / 2
self.__ax.set_xlim(x_lb - x_dev, x_ub + x_dev)
self.__ax.set_ylim(y_lb - y_dev, y_ub + y_dev)
#self.__ax.set_xlim(x_lb, x_ub)
#self.__ax.set_ylim(y_lb, y_ub)
self.draw()
def _zoom_to_bounds(self, xbounds, ybounds, x_margin=0, y_margin=0):
(x_lb, x_ub) = xbounds
(y_lb, y_ub) = ybounds
x_dev = (x_ub - x_lb) * x_margin / 2
y_dev = (y_ub - y_lb) * y_margin / 2
self.__ax.set_xlim(x_lb - x_dev, x_ub + x_dev)
self.__ax.set_ylim(y_lb - y_dev, y_ub + y_dev)
# self.__ax.set_xlim(x_lb, x_ub)
# self.__ax.set_ylim(y_lb, y_ub)
self.draw()
def _find_y_max_between(self, xmin, xmax, data, errors):
y_max = None
error = 0
for i in range(1, len(data)):
(x, y) = data[i]
if errors is not None and i < len(errors):
error = errors[i]
else:
error = 0
if xmin <= x <= xmax:
if np.isnan(y) is not True and (y_max is None or y + error > y_max):
y_max = y + error
elif x > xmax:
break
return y_max
def _find_y_min_between(self, xmin, xmax, data, errors):
y_min = None
error = 0
for i in range(1, len(data)):
(x, y) = data[i]
if errors is not None and i < len(errors):
error = errors[i]
if xmin <= x <= xmax:
if np.isnan(y) is not True and (y_min is None or y - error < y_min):
y_min = y - error
elif x > xmax:
break
return y_min
def set_label_font_size(self, value):
if value > 0:
self.__label_font["size"] = value
def set_scale_font_size(self, value):
if value > 0:
self.__scale_font["size"] = value
def set_annotation_font_size(self, value):
if value > 0:
self.__annotation_font["size"] = value
def setData(self, data):
self.__data = data
self.is_loaded_changed.emit(self.isLoaded())
#self.__upperZoom = len(data)-1
#self.__lowerZoom = 0
def setStdErrors(self, stdErrors):
self.__stdErrors = stdErrors
def getStdErrors(self):
return self.__stdErrors
def addFit(self, p_fitDataInfo):
self.__fdiFits.append(p_fitDataInfo)
def removeFit(self, p_fitDataInfo):
self.__fdiFits.remove(p_fitDataInfo)
def resetFits(self):
self.__fdiFits.clear()
self.reset_fitIndex()
#pass index through, or implement it into fitInfo?
def update_fit(self, fitInfo):
#self.__fdiFits.insert(fitInfo.get_fit_index(), fitInfo)
pass
def reset_fitIndex(self):
self.__fitIndex = -1
def fit_index_changed(self, index):
if self.isLoaded():
self.__fitIndex = index
(xlim, ylim) = self.refresh()
self._zoom_to_bounds(xlim, ylim)
def clearFits(self):
self.__fdiFits.clear()
def showErrorBars(self, showErrorBars):
self.refresh(showErrorBars=showErrorBars)
def __plotFit(self, fdiCurrent: fdi.fitDataInfo):
fitData = fdiCurrent.getFitData()
self.__ax.plot(fitData[:, 0], fitData[:, 1],
linestyle=self.__ls, color=self.__fc, linewidth=self.__lw)
self.__mark_fit_data_in_plot(fdiCurrent)
def __plot_combined_fit(self):
if self.__combined_fit_exists():
self.__ax.plot(self.__combined_fit_data[:, 0],
self.__combined_fit_data[:, 1],
linestyle=self.__ls, color=self.__fc, linewidth=self.__lw)
for fdiCurrent in self.__fdiFits:
if fdiCurrent.isFitted() and fdiCurrent.isDisabled() != True:
self.__mark_fit_data_in_plot(fdiCurrent)
def __combined_fit_exists(self):
return self.__combined_fit_data is not None and len(self.__combined_fit_data) > 0
def isResizingEnabled(self):
return self.__isResizing
def setResizingEnabled(self, enabled):
self.__isResizing = enabled
def resizeEvent(self, event):
if self.isResizingEnabled():
super().resizeEvent(event)
#print(event.)
def __mark_fit_data_in_plot(self, fdiCurrent: fdi.fitDataInfo):
lowerFitBound = -1
upperFitBound = -1
if type(fdiCurrent) is adi.AEFitDataInfo:
lowerFitBound = fdiCurrent.getFitRelFrom()
upperFitBound = fdiCurrent.getFitRelTo()
fwhm = fdiCurrent.getFWHM()
p = fdiCurrent.getParameters()
self.__ax.axvline(x=p[1], color=self.ae_mark_col, linestyle='-', linewidth='1')
x_min = self.__ax.viewLim.intervalx[0]
x_max = self.__ax.viewLim.intervalx[1]
lb_fwhm = p[1] - 0.5 * fwhm
ub_fwhm = p[1] + 0.5 * fwhm
if lb_fwhm < x_min:
lb_fwhm = x_min
if ub_fwhm > x_max:
ub_fwhm = x_max
if ub_fwhm - lb_fwhm > 0:
self.__ax.axvspan(lb_fwhm, ub_fwhm, facecolor=self.area_col, alpha=0.5)
elif type(fdiCurrent) is pdi.polyFitDataInfo:
lowerFitBound = fdiCurrent.getFitFrom()
upperFitBound = fdiCurrent.getFitTo()
print(lowerFitBound, upperFitBound)
# mark relevant fit area
if lowerFitBound != -1 and upperFitBound != -1:
self.__ax.axvspan(lowerFitBound, upperFitBound, facecolor=self.fit_area_col, alpha=0.5)
def getFigure(self):
return self.__fig
| 33.463063 | 120 | 0.54846 | 2,165 | 18,572 | 4.362587 | 0.152425 | 0.029857 | 0.014293 | 0.006988 | 0.279619 | 0.191742 | 0.149497 | 0.130969 | 0.103335 | 0.103335 | 0 | 0.010793 | 0.351443 | 18,572 | 554 | 121 | 33.523466 | 0.77335 | 0.108658 | 0 | 0.29558 | 0 | 0 | 0.014915 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.143646 | false | 0.008287 | 0.024862 | 0.024862 | 0.254144 | 0.024862 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84ed9cd7c89c3cf96e8fc4e58db01358212f8b36 | 1,723 | py | Python | simple_lab/network.py | adam-phelps/simple_lab | 4a8acccfd6bf713742fe2ac0bd988f811218eb3b | [
"MIT"
] | null | null | null | simple_lab/network.py | adam-phelps/simple_lab | 4a8acccfd6bf713742fe2ac0bd988f811218eb3b | [
"MIT"
] | null | null | null | simple_lab/network.py | adam-phelps/simple_lab | 4a8acccfd6bf713742fe2ac0bd988f811218eb3b | [
"MIT"
] | null | null | null | def create_vpc(ec2_r, cidr_block, lab_tag):
""" Create VPC using EC2 resource. """
vpc = ec2_r.create_vpc(CidrBlock=cidr_block)
vpc.wait_until_available()
vpc.create_tags(Tags=[{
"Key": "Name",
"Value": lab_tag
}])
return vpc
def create_internet_gateway(ec2_r, vpc):
""" Create IGW and attach to allow SSH in. """
igw = ec2_r.create_internet_gateway()
vpc.attach_internet_gateway(InternetGatewayId=igw.id)
return igw
def create_route_table(vpc, igw):
""" Set the default route out the IGW. """
rt_table = vpc.create_route_table(VpcId=vpc.id)
rt_table.create_route(
DestinationCidrBlock='0.0.0.0/0',
GatewayId=igw.id
)
return rt_table
def create_subnet(ec2_c, vpc, rt_table, networks, lab_tag):
""" Must have a least one subnet to provision EC2 instances. """
subnet = ec2_c.create_subnet(CidrBlock=networks, VpcId=vpc.id)
rt_table.associate_with_subnet(SubnetId=subnet.id)
return subnet
def create_security_group(ec2_r, vpc, lab_tag):
""" Must have a security group to apply SSH allow policies. """
sg = ec2_r.create_security_group(
GroupName=lab_tag,
Description=lab_tag,
VpcId=vpc.id)
return sg
def authorize_security_group_ingress(ec2_c, sg_id, port):
""" Alows us to open a TCP port. """
try:
ec2_c.authorize_security_group_ingress(
GroupId=sg_id.id,
IpPermissions=[{
'IpProtocol': 'tcp',
'FromPort': int(port),
'ToPort': int(port),
'IpRanges': [{'CidrIp': '0.0.0.0/0'}]
}]
)
except Exception as e:
print(e)
return 0
| 28.716667 | 68 | 0.625073 | 234 | 1,723 | 4.367521 | 0.354701 | 0.015656 | 0.017613 | 0.015656 | 0.072407 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018053 | 0.260592 | 1,723 | 59 | 69 | 29.20339 | 0.784144 | 0.145676 | 0 | 0 | 0 | 0 | 0.049512 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.139535 | false | 0 | 0 | 0 | 0.27907 | 0.023256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84f16eceef3846efad926ffc7294baff132ede53 | 7,829 | py | Python | src/foreign_if/python/main/python/frovedis/exrpc/server.py | XpressAI/frovedis | bda0f2c688fb832671c5b542dd8df1c9657642ff | [
"BSD-2-Clause"
] | 63 | 2018-06-21T14:11:59.000Z | 2022-03-30T11:24:36.000Z | src/foreign_if/python/main/python/frovedis/exrpc/server.py | XpressAI/frovedis | bda0f2c688fb832671c5b542dd8df1c9657642ff | [
"BSD-2-Clause"
] | 5 | 2018-09-22T14:01:53.000Z | 2021-12-27T16:11:05.000Z | src/foreign_if/python/main/python/frovedis/exrpc/server.py | XpressAI/frovedis | bda0f2c688fb832671c5b542dd8df1c9657642ff | [
"BSD-2-Clause"
] | 12 | 2018-08-23T15:59:44.000Z | 2022-02-20T06:47:22.000Z | """
server.py
"""
import warnings
from . import node, rpclib
class ServerID(object):
"""A python container for generating IDs for frovedis server"""
__initial = 0
__id = __initial
# A threshold value, assuming it is safe to re-iterate
# server ID after reaching this value
__max_id = (1 << 31)
@staticmethod
def get():
"""
NAME: get
"""
ServerID.__id = (ServerID.__id + 1) % ServerID.__max_id
if ServerID.__id == 0:
ServerID.__id = ServerID.__initial + 1
return ServerID.__id
def explain(server_inst):
""" to_string() for server instance """
if server_inst is not None:
withmsg = "with " + str(server_inst.wsize) + " MPI process"
if server_inst.wsize > 1:
withmsg += "es."
else:
withmsg += "."
return "[ID: " + str(server_inst.sid) + "] FrovedisServer (" + \
str(server_inst.mnode) + ") has been initialized " + withmsg
else:
return "No active server is found!!";
class FrovedisServer(object):
"""A singleton implementation to store Frovedis server information"""
#default command
__cmd = "/opt/nec/ve/bin/mpirun -np 8 " + \
"/opt/nec/frovedis/ve/bin/frovedis_server"
__instance = None
def __new__(cls):
if FrovedisServer.__instance is None:
n_init_server = rpclib.initialize_server(\
FrovedisServer.__cmd.encode('ascii'))
excpt = rpclib.check_server_exception()
if excpt["status"]:
raise RuntimeError(excpt["info"])
#encoding hostname string to ascii, since it is the key-parameter to
#all rpc call
host = (n_init_server['hostname']).encode('ascii')
port = n_init_server['rpcport']
FrovedisServer.__instance = object.__new__(cls)
FrovedisServer.__instance.mnode = node.exrpc_node(host, port)
FrovedisServer.__instance.wsize = rpclib.get_worker_size(host, port)
FrovedisServer.__instance.sid = ServerID.get()
excpt = rpclib.check_server_exception()
if excpt["status"]:
raise RuntimeError(excpt["info"])
return FrovedisServer.__instance
@classmethod
def setCommand(cls, command):
"""
sets the default command for server initialization
"""
if not isinstance(command, str):
raise ValueError(\
"expected a string as for server initialization command!")
FrovedisServer.__cmd = command
@classmethod
def getCommand(cls):
"""
returns the default command in case the server is not initialized;
otherwise returns the command used for server initialization
"""
return FrovedisServer.__cmd
@classmethod
def getServerInstance(cls):
"""
getServerInstance
"""
inst = cls()
return (inst.mnode.get_host(), inst.mnode.get_port())
@classmethod
def initialize(cls, command):
"""
to initialize a new server (if no server is running)
with specified command
"""
if FrovedisServer.__instance is None:
cls.setCommand(command)
cls.getServerInstance()
else:
print("FrovedisServer is already initialized!!")
return explain(FrovedisServer.__instance)
@classmethod
def shut_down(cls):
""" to shut_down the current server """
if FrovedisServer.__instance is not None:
(host, port) = cls.getServerInstance()
rpclib.clean_server(host, port)
excpt = rpclib.check_server_exception()
if excpt["status"]:
raise RuntimeError(excpt["info"])
rpclib.finalize_server(host, port)
excpt = rpclib.check_server_exception()
if excpt["status"]:
raise RuntimeError(excpt["info"])
FrovedisServer.__instance = None
#else:
# print("No server to finalize!")
@classmethod
def display(cls):
""" to display server information"""
print(explain(FrovedisServer.__instance))
@classmethod
def getID(cls):
""" to get id of the current server """
if FrovedisServer.__instance is None:
warnings.warn("FrovedisServer is not initialized, hence "
"initializing the server with default command!",
category=UserWarning)
FrovedisServer.getServerInstance()
return FrovedisServer.__instance.sid
@classmethod
def isUP(cls, server_id=None):
"""
to confirm if the current server or
the specified server with given id is UP
"""
if FrovedisServer.__instance is None:
return False
else:
if server_id is None: # query made for existing server
return True
else: # query made for some specific server
return server_id == FrovedisServer.getID()
@classmethod
def reset(cls):
"""
resets server instance -> should be called only when it is
guaranteed that the server is already terminated.
"""
FrovedisServer.__instance = None
def check_server_state(server_id, inst_class_name):
if not FrovedisServer.isUP(server_id):
raise RuntimeError("FrovedisServer (ID: %d) associated with target "\
"'%s' object could not be reached!\n"\
"In case it has already been shut-down, "\
"you would need to re-fit the object.\n" \
% (server_id, inst_class_name))
return True
# decorator functions used for setting/checking server association
def set_association(func):
def set_assoc_wrapper(*args, **kwargs):
obj = args[0] # args[0] of func() must be self
obj.__sid = FrovedisServer.getID()
return func(*args, **kwargs)
return set_assoc_wrapper
def check_association(func):
def check_assoc_wrapper(*args, **kwargs):
obj = args[0] # args[0] of func() must be self
if not obj.is_fitted():
raise AttributeError(func.__name__ + ": is called before the "\
"object is actually constructed (fitted) or the object might " \
"have already been released!")
check_server_state(obj.__sid, obj.__class__.__name__)
return func(*args, **kwargs)
return check_assoc_wrapper
def do_if_active_association(func):
def do_if_active_assoc_wrapper(*args, **kwargs):
obj = args[0] # args[0] of func() must be self
if obj.is_fitted():
if FrovedisServer.isUP(obj.__sid):
return func(*args, **kwargs)
#else:
# print("no active server found associated with caller object!")
return do_if_active_assoc_wrapper
def clean_dump(sig, frame):
import os
import sys
import glob
import shutil
print("caught signal %d" % sig)
if "FROVEDIS_TMPDIR" in os.environ:
tmpdir = os.environ["FROVEDIS_TMPDIR"] + "/frovedis_w2v_dump_*"
else:
tmpdir = "/var/tmp/frovedis_w2v_dump_*"
w2v_dump = glob.glob(tmpdir)
for each in w2v_dump:
#print("removing dump: " + each)
shutil.rmtree(each)
FrovedisServer.reset() # safe, since signal handler has alreday terminated the server process
sys.exit(sig)
# ensuring Frovedis Server will definitely be shut-down on termination of
# a python program which will import this module.
import atexit
atexit.register(FrovedisServer.shut_down)
import signal
signal.signal(signal.SIGINT, clean_dump)
signal.signal(signal.SIGTERM, clean_dump)
| 35.107623 | 97 | 0.609912 | 873 | 7,829 | 5.271478 | 0.261168 | 0.071708 | 0.026076 | 0.028249 | 0.185789 | 0.119296 | 0.119296 | 0.101043 | 0.101043 | 0.101043 | 0 | 0.003458 | 0.298122 | 7,829 | 222 | 98 | 35.265766 | 0.834031 | 0.188274 | 0 | 0.269737 | 0 | 0 | 0.126414 | 0.014757 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131579 | false | 0 | 0.052632 | 0 | 0.348684 | 0.019737 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84f241e899fdd0756dc158f8d65dba55d53f1060 | 2,015 | py | Python | tests/conftest.py | Preocts/secretbox | 81638bc4ca5defe1f594a249d9806a622bc686fe | [
"MIT"
] | 3 | 2021-10-16T11:49:40.000Z | 2021-11-28T01:35:25.000Z | tests/conftest.py | Preocts/secretbox | 81638bc4ca5defe1f594a249d9806a622bc686fe | [
"MIT"
] | 50 | 2021-07-31T03:23:27.000Z | 2022-03-28T18:55:48.000Z | tests/conftest.py | Preocts/secretbox | 81638bc4ca5defe1f594a249d9806a622bc686fe | [
"MIT"
] | 1 | 2021-12-09T21:15:19.000Z | 2021-12-09T21:15:19.000Z | """Global fixtures"""
import os
import tempfile
from typing import Generator
from unittest.mock import patch
import pytest
AWS_ENV_KEYS = [
"AWS_ACCESS_KEY",
"AWS_SECRET_ACCESS_KEY",
"AWS_SECURITY_TOKEN",
"AWS_SESSION_TOKEN",
]
ENV_FILE_CONTENTS = [
"SECRETBOX_TEST_PROJECT_ENVIRONMENT=sandbox",
"#What type of .env supports comments?",
"",
"BROKEN KEY",
"VALID==",
"SUPER_SECRET = 12345",
"PASSWORD = correct horse battery staple",
'USER_NAME="not_admin"',
"MESSAGE = ' Totally not an \"admin\" account logging in'",
" SINGLE_QUOTES = 'test'",
"export NESTED_QUOTES = \"'Double your quotes, double your fun'\"",
' eXport SHELL_COMPATIBLE = "well, that happened"',
]
ENV_FILE_EXPECTED = {
"SECRETBOX_TEST_PROJECT_ENVIRONMENT": "sandbox",
"VALID": "=",
"SUPER_SECRET": "12345",
"PASSWORD": "correct horse battery staple",
"USER_NAME": "not_admin",
"MESSAGE": ' Totally not an "admin" account logging in',
"SINGLE_QUOTES": "test",
"NESTED_QUOTES": "'Double your quotes, double your fun'",
"SHELL_COMPATIBLE": "well, that happened",
}
@pytest.fixture
def mock_env_file() -> Generator[str, None, None]:
"""Builds and returns filename of a mock .env file"""
try:
file_desc, path = tempfile.mkstemp()
with os.fdopen(file_desc, "w", encoding="utf-8") as temp_file:
temp_file.write("\n".join(ENV_FILE_CONTENTS))
yield path
finally:
os.remove(path)
@pytest.fixture
def mask_aws_creds() -> Generator[None, None, None]:
"""Mask local AWS creds to avoid moto calling out"""
with patch.dict(os.environ):
for key in AWS_ENV_KEYS:
os.environ[key] = "masked"
yield None
@pytest.fixture
def remove_aws_creds() -> Generator[None, None, None]:
"""Removes AWS cresd from environment"""
with patch.dict(os.environ):
for key in AWS_ENV_KEYS:
os.environ.pop(key, None)
yield None
| 27.60274 | 71 | 0.642184 | 253 | 2,015 | 4.920949 | 0.422925 | 0.028112 | 0.051406 | 0.049799 | 0.504418 | 0.393574 | 0.346988 | 0.346988 | 0.281125 | 0.281125 | 0 | 0.00706 | 0.226799 | 2,015 | 72 | 72 | 27.986111 | 0.792041 | 0.07196 | 0 | 0.157895 | 0 | 0 | 0.392645 | 0.063818 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0.035088 | 0.087719 | 0 | 0.140351 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84f47bb45348683e51a33cad06f7af340e359008 | 2,806 | py | Python | ooobuild/lo/util/date.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/util/date.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/util/date.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Struct Class
# this is a auto generated file generated by Cheetah
# Namespace: com.sun.star.util
# Libre Office Version: 7.3
from ooo.oenv.env_const import UNO_NONE
import typing
class Date(object):
"""
Struct Class
represents a date value.
The time zone is unknown.
See Also:
`API Date <https://api.libreoffice.org/docs/idl/ref/structcom_1_1sun_1_1star_1_1util_1_1Date.html>`_
"""
__ooo_ns__: str = 'com.sun.star.util'
__ooo_full_ns__: str = 'com.sun.star.util.Date'
__ooo_type_name__: str = 'struct'
typeName: str = 'com.sun.star.util.Date'
"""Literal Constant ``com.sun.star.util.Date``"""
def __init__(self, Day: typing.Optional[int] = 0, Month: typing.Optional[int] = 0, Year: typing.Optional[int] = 0) -> None:
"""
Constructor
Arguments:
Day (int, optional): Day value.
Month (int, optional): Month value.
Year (int, optional): Year value.
"""
super().__init__()
if isinstance(Day, Date):
oth: Date = Day
self.Day = oth.Day
self.Month = oth.Month
self.Year = oth.Year
return
kargs = {
"Day": Day,
"Month": Month,
"Year": Year,
}
self._init(**kargs)
def _init(self, **kwargs) -> None:
self._day = kwargs["Day"]
self._month = kwargs["Month"]
self._year = kwargs["Year"]
@property
def Day(self) -> int:
"""
contains the day of month (1-31 or 0 for a void date).
"""
return self._day
@Day.setter
def Day(self, value: int) -> None:
self._day = value
@property
def Month(self) -> int:
"""
contains the month of year (1-12 or 0 for a void date).
"""
return self._month
@Month.setter
def Month(self, value: int) -> None:
self._month = value
@property
def Year(self) -> int:
"""
contains the year.
"""
return self._year
@Year.setter
def Year(self, value: int) -> None:
self._year = value
__all__ = ['Date']
| 25.981481 | 127 | 0.59052 | 369 | 2,806 | 4.352304 | 0.390244 | 0.03736 | 0.031133 | 0.043587 | 0.118929 | 0.070361 | 0.031133 | 0.031133 | 0 | 0 | 0 | 0.015167 | 0.295082 | 2,806 | 107 | 128 | 26.224299 | 0.796764 | 0.408411 | 0 | 0.068182 | 0 | 0 | 0.066341 | 0.030726 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.045455 | 0 | 0.431818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84f7f0583f87ce8d8fa213091c414701dd8af42c | 9,694 | py | Python | conda_lock/conda_lock.py | noahp/conda-lock | cd519f62aead1c4eb40a7ac4ba002fca25ef6e3a | [
"MIT"
] | null | null | null | conda_lock/conda_lock.py | noahp/conda-lock | cd519f62aead1c4eb40a7ac4ba002fca25ef6e3a | [
"MIT"
] | null | null | null | conda_lock/conda_lock.py | noahp/conda-lock | cd519f62aead1c4eb40a7ac4ba002fca25ef6e3a | [
"MIT"
] | null | null | null | """
Somewhat hacky solution to create conda lock files.
"""
import atexit
import json
import logging
import os
import pathlib
import platform
import shutil
import stat
import subprocess
import sys
import tempfile
from typing import Dict, List, MutableSequence, Optional, Set, Tuple, Union
import requests
import yaml
PathLike = Union[str, pathlib.Path]
if not (sys.version_info.major >= 3 and sys.version_info.minor >= 6):
print("conda_lock needs to run under python >=3.6")
sys.exit(1)
DEFAULT_PLATFORMS = ["osx-64", "linux-64", "win-64"]
def ensure_conda(conda_executable: Optional[str] = None):
if conda_executable:
if pathlib.Path(conda_executable).exists():
return conda_executable
conda_executable = shutil.which("conda")
if conda_executable:
return conda_executable
conda_executable = shutil.which("conda.exe")
if conda_executable:
return conda_executable
logging.info(
"No existing conda installation found. Installing the standalone conda solver"
)
return install_conda_exe()
def install_conda_exe():
conda_exe_prefix = "https://repo.anaconda.com/pkgs/misc/conda-execs"
if platform.system() == "Linux":
conda_exe_file = "conda-latest-linux-64.exe"
elif platform.system() == "Darwin":
conda_exe_file = "conda-latest-osx-64.exe"
elif platform.system() == "NT":
conda_exe_file = "conda-latest-win-64.exe"
else:
# TODO: Support windows here
raise ValueError(f"Unsupported platform: {platform.system()}")
resp = requests.get(f"{conda_exe_prefix}/{conda_exe_file}", allow_redirects=True)
resp.raise_for_status()
target_filename = os.path.expanduser(pathlib.Path(__file__).parent / "conda.exe")
with open(target_filename, "wb") as fo:
fo.write(resp.content)
st = os.stat(target_filename)
os.chmod(target_filename, st.st_mode | stat.S_IXUSR)
return target_filename
CONDA_PKGS_DIRS = None
def conda_pkgs_dir():
global CONDA_PKGS_DIRS
if CONDA_PKGS_DIRS is None:
temp_dir = tempfile.TemporaryDirectory()
CONDA_PKGS_DIRS = temp_dir.name
atexit.register(temp_dir.cleanup)
return CONDA_PKGS_DIRS
else:
return CONDA_PKGS_DIRS
def conda_env_override(platform):
env = dict(os.environ)
env.update(
{
"CONDA_SUBDIR": platform,
"CONDA_PKGS_DIRS": conda_pkgs_dir(),
"CONDA_UNSATISFIABLE_HINTS_CHECK_DEPTH": "0",
"CONDA_ADD_PIP_AS_PYTHON_DEPENDENCY": "False",
}
)
return env
def solve_specs_for_arch(
conda: PathLike, channels: List[str], specs: List[str], platform: str
) -> dict:
args: MutableSequence[PathLike] = [
conda,
"create",
"--prefix",
pathlib.Path(conda_pkgs_dir()).joinpath("prefix"),
"--override-channels",
"--dry-run",
"--json",
]
for channel in channels:
args.extend(["--channel", channel])
if channel == "defaults" and platform in {"win-64", "win-32"}:
# msys2 is a windows-only channel that conda automatically
# injects if the host platform is Windows. If our host
# platform is not Windows, we need to add it manually
args.extend(["--channel", "msys2"])
args.extend(specs)
try:
proc = subprocess.run(
args,
env=conda_env_override(platform),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf8",
)
proc.check_returncode()
except subprocess.CalledProcessError:
err_json = json.loads(proc.stdout)
print(err_json["message"])
print("\n")
print(f"Could not lock the environment for platform {platform}")
sys.exit(1)
return json.loads(proc.stdout)
def search_for_md5s(conda: PathLike, package_specs: List[dict], platform: str):
"""Use conda-search to determine the md5 metadata that we need.
This is only needed if pkgs_dirs is set in condarc.
Sadly this is going to be slow since we need to fetch each result individually
due to the cli of conda search
"""
found: Set[str] = set()
packages: List[Tuple[str, str]] = [
*[(d["name"], f"{d['name']}[url={d['url_conda']}]") for d in package_specs],
*[(d["name"], f"{d['name']}[url={d['url']}]") for d in package_specs],
]
for name, spec in packages:
if name in found:
continue
out = subprocess.run(
["conda", "search", "--use-index-cache", "--json", spec],
encoding="utf8",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=conda_env_override(platform),
)
content = json.loads(out.stdout)
if name in content:
assert len(content[name]) == 1
yield content[name][0]
found.add(name)
def parse_environment_file(environment_file: pathlib.Path) -> Dict:
if not environment_file.exists():
raise FileNotFoundError(f"{environment_file} not found")
with environment_file.open("r") as fo:
env_yaml_data = yaml.safe_load(fo)
# TODO: we basically ignore most of the fields for now.
# notable pip deps are not supported
specs = env_yaml_data["dependencies"]
channels = env_yaml_data.get("channels", [])
return {"specs": specs, "channels": channels}
def fn_to_dist_name(fn: str) -> str:
if fn.endswith(".conda"):
fn, _, _ = fn.partition(".conda")
elif fn.endswith(".tar.bz2"):
fn, _, _ = fn.partition(".tar.bz2")
else:
raise RuntimeError(f"unexpected file type {fn}", fn)
return fn
def make_lock_files(
conda: PathLike, platforms: List[str], channels: List[str], specs: List[str]
):
for plat in platforms:
print(f"generating lockfile for {plat}", file=sys.stderr)
dry_run_install = solve_specs_for_arch(
conda=conda, platform=plat, channels=channels, specs=specs
)
with open(f"conda-{plat}.lock", "w") as fo:
fo.write(f"# platform: {plat}\n")
fo.write("@EXPLICIT\n")
link_actions = dry_run_install["actions"]["LINK"]
for link in link_actions:
link[
"url_base"
] = f"{link['base_url']}/{link['platform']}/{link['dist_name']}"
link["url"] = f"{link['url_base']}.tar.bz2"
link["url_conda"] = f"{link['url_base']}.conda"
link_dists = {link["dist_name"] for link in link_actions}
fetch_actions = dry_run_install["actions"]["FETCH"]
import pprint
fetch_by_dist_name = {
fn_to_dist_name(pkg["fn"]): pkg for pkg in fetch_actions
}
non_fetch_packages = link_dists - set(fetch_by_dist_name)
if len(non_fetch_packages) > 0:
for search_res in search_for_md5s(
conda,
[l for l in link_actions if l["dist_name"] in non_fetch_packages],
plat,
):
dist_name = fn_to_dist_name(search_res["fn"])
fetch_by_dist_name[dist_name] = search_res
for pkg in link_actions:
url = fetch_by_dist_name[pkg["dist_name"]]["url"]
md5 = fetch_by_dist_name[pkg["dist_name"]]["md5"]
r = requests.head(url, allow_redirects=True)
url = r.url
fo.write(f"{url}#{md5}")
fo.write("\n")
print("To use the generated lock files create a new environment:", file=sys.stderr)
print("", file=sys.stderr)
print(
" conda create --name YOURENV --file conda-linux-64.lock", file=sys.stderr
)
print("", file=sys.stderr)
def main_on_docker(env_file, platforms):
env_path = pathlib.Path(env_file)
platform_arg = []
for p in platforms:
platform_arg.extend(["--platform", p])
subprocess.check_output(
[
"docker",
"run",
"--rm",
"-v",
f"{str(env_path.parent)}:/work:rwZ",
"--workdir",
"/work",
"conda-lock:latest",
"--file",
env_path.name,
*platform_arg,
]
)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--conda", default=None, help="path to the conda executable to use."
)
parser.add_argument(
"-p",
"--platform",
nargs="?",
action="append",
help="generate lock files for the following platforms",
)
parser.add_argument(
"-f",
"--file",
default="environment.yml",
help="path to a conda environment specification",
)
parser.add_argument(
"-m",
"--mode",
choices=["default", "docker"],
default="default",
help="""
Run this conda-lock in an isolated docker container. This may be
required to account for some issues where conda-lock conflicts with
existing condarc configurations.
""",
)
args = parser.parse_args()
environment_file = pathlib.Path(args.file)
desired_env = parse_environment_file(environment_file)
conda_exe = ensure_conda(args.conda)
make_lock_files(
conda=conda_exe,
channels=desired_env["channels"] or [],
specs=desired_env["specs"],
platforms=args.platform or DEFAULT_PLATFORMS,
)
if __name__ == "__main__":
main()
| 30.484277 | 87 | 0.598205 | 1,187 | 9,694 | 4.700927 | 0.251896 | 0.021505 | 0.016308 | 0.013441 | 0.160573 | 0.086559 | 0.04552 | 0.02509 | 0 | 0 | 0 | 0.00588 | 0.280689 | 9,694 | 317 | 88 | 30.580442 | 0.79435 | 0.057768 | 0 | 0.132 | 0 | 0 | 0.197626 | 0.041328 | 0 | 0 | 0 | 0.006309 | 0.004 | 1 | 0.044 | false | 0 | 0.064 | 0 | 0.152 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84f83345d1f3fb087e99dcd107ecadefc33a7378 | 2,447 | py | Python | nvtabular/ops/minmax.py | EvenOldridge/NVTabular | 85333ae754c0512f7b213a4e98117a1501500dda | [
"Apache-2.0"
] | null | null | null | nvtabular/ops/minmax.py | EvenOldridge/NVTabular | 85333ae754c0512f7b213a4e98117a1501500dda | [
"Apache-2.0"
] | null | null | null | nvtabular/ops/minmax.py | EvenOldridge/NVTabular | 85333ae754c0512f7b213a4e98117a1501500dda | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cudf._lib.nvtx import annotate
from .stat_operator import StatOperator
class MinMax(StatOperator):
"""
MinMax operation calculates min and max statistics of features.
Parameters
-----------
columns :
batch_mins : list of float, default None
batch_maxs : list of float, default None
mins : list of float, default None
maxs : list of float, default None
"""
def __init__(self, columns=None, batch_mins=None, batch_maxs=None, mins=None, maxs=None):
super().__init__(columns=columns)
self.batch_mins = batch_mins if batch_mins is not None else {}
self.batch_maxs = batch_maxs if batch_maxs is not None else {}
self.mins = mins if mins is not None else {}
self.maxs = maxs if maxs is not None else {}
@annotate("MinMax_op", color="green", domain="nvt_python")
def stat_logic(self, ddf, columns_ctx, input_cols, target_cols):
cols = self.get_columns(columns_ctx, input_cols, target_cols)
dask_stats = {}
dask_stats["mins"] = ddf[cols].min()
dask_stats["maxs"] = ddf[cols].max()
return dask_stats
@annotate("MinMax_finalize", color="green", domain="nvt_python")
def finalize(self, stats):
for col in stats["mins"].index.values_host:
self.mins[col] = stats["mins"][col]
self.maxs[col] = stats["maxs"][col]
def registered_stats(self):
return ["mins", "maxs", "batch_mins", "batch_maxs"]
def stats_collected(self):
result = [
("mins", self.mins),
("maxs", self.maxs),
("batch_mins", self.batch_mins),
("batch_maxs", self.batch_maxs),
]
return result
def clear(self):
self.batch_mins = {}
self.batch_maxs = {}
self.mins = {}
self.maxs = {}
return
| 33.520548 | 93 | 0.644871 | 329 | 2,447 | 4.653495 | 0.367781 | 0.052907 | 0.028739 | 0.047028 | 0.194644 | 0.169824 | 0 | 0 | 0 | 0 | 0 | 0.004322 | 0.243564 | 2,447 | 72 | 94 | 33.986111 | 0.822798 | 0.331835 | 0 | 0 | 0 | 0 | 0.082435 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.162162 | false | 0 | 0.054054 | 0.027027 | 0.351351 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84f88b05010a0d7cf4cbf357ee3a42b3fba0850b | 4,915 | py | Python | cli_command_execution.py | IBM-Cloud/Ray-Commands | 9078af9fb367d451ee86f1200507f96b6f01d539 | [
"Apache-2.0"
] | 2 | 2022-02-19T14:22:47.000Z | 2022-02-22T17:19:33.000Z | cli_command_execution.py | IBM-Cloud/Ray-Commands | 9078af9fb367d451ee86f1200507f96b6f01d539 | [
"Apache-2.0"
] | null | null | null | cli_command_execution.py | IBM-Cloud/Ray-Commands | 9078af9fb367d451ee86f1200507f96b6f01d539 | [
"Apache-2.0"
] | null | null | null | import os
import subprocess
import ray
import time
import sys
import humanfriendly
import pandas as pd
url_file = open('final_commands.txt','r')
lines = url_file.readlines()
from subprocess import PIPE, Popen
@ray.remote(num_cpus=0.2)
def do_it(filen, input_endpoint, output_endpoint, input_access_key, input_secret_key, output_access_key, output_secret_key):
filen=filen.strip()
cmd_string='mc alias set source https://{0} {1} {2} > /dev/null; mc alias set target https://{3} {4} {5} > /dev/null; {6}'.format(input_endpoint, input_access_key, input_secret_key, output_endpoint, output_access_key, output_secret_key, filen)
#print("Executing: '{}'".format(filen))
def run_command(cmd_string):
p = Popen(cmd_string, shell=True, stdout=PIPE, stderr=subprocess.STDOUT)
stdout, stderr = p.communicate()
return stdout.decode("utf-8")
max_attempts=3
attempt=1
while attempt < max_attempts:
output = run_command(cmd_string)
#print("'%s'" % output)
try:
transferred = output[output.find("Transferred: "):output.find("Speed: ")].split(" ")
transferred_str = transferred[1] + transferred[2]
return(transferred_str)
except IndexError:
attempt+=1 # Try again
print("Couldn't parse transferred data from output of copy task: {}".format(filen))
print("Output was:\n{}".format(output))
print("Returning 0 for transferred data volume.")
return "0"
def submission_progress(tasks, tasks_total):
bar_len = 80
tasks_filled_len = int(round(bar_len * tasks / float(tasks_total)))
tasks_percents = round(100.0 * tasks / float(tasks_total), 1)
tasks_bar = '=' * tasks_filled_len + '-' * (bar_len - tasks_filled_len)
sys.stdout.write('Tasks submitted (%s/%s) [%s] %s%s \r' %
(tasks, tasks_total, tasks_bar, tasks_percents, '%'))
sys.stdout.flush()
def execution_progress(bytes, bytes_total, tasks_left, tasks_total, duration):
bar_len = 25
bytes_filled_len = int(round(bar_len * bytes / float(bytes_total)))
bytes_percents = round(100.0 * bytes / float(bytes_total), 1)
bytes_bar = '=' * bytes_filled_len + '-' * (bar_len - bytes_filled_len)
tasks_filled_len = int(round(bar_len * (tasks_total-tasks_left) / float(tasks_total)))
tasks_percents = round(100.0 * (tasks_total-tasks_left) / float(tasks_total), 1)
tasks_bar = '=' * tasks_filled_len + '-' * (bar_len - tasks_filled_len)
sys.stdout.write('Bytes(%s/%s = %s/s) [%s] %s%s Tasks(%s/%s) [%s] %s%s Time(s):%s \r' %
(humanfriendly.format_size(bytes, binary=True), humanfriendly.format_size(bytes_total, binary=True),
humanfriendly.format_size(round(bytes/duration), binary=True), bytes_bar, bytes_percents, '%',
tasks_total-tasks_left, tasks_total, tasks_bar, tasks_percents, '%', round(duration)))
sys.stdout.flush()
if __name__ == '__main__':
ray.init(address='auto')
all_ref_objs = []
total_tasks = len(lines)
for task_number, line in enumerate(lines, start=1):
#print("\033[96m(Main loop)\033[0m Submitting task {}/{} for: {}".format(task_number, total_tasks, line))
refobj = do_it.remote(line.strip(), sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6])
all_ref_objs.append(refobj)
submission_progress(task_number, total_tasks)
print("")
total_volume = pd.read_csv("object_2_size.csv")["size"].apply(humanfriendly.parse_size).sum()
start_seconds = time.time()
result_ids = all_ref_objs
transferred_volume=0
while len(result_ids) > 0:
done_ids, result_ids = ray.wait(result_ids)
for done_id in done_ids:
transferred_volume+=1.048913043478261*humanfriendly.parse_size(ray.get(done_id))
#print("\033[96m(Main loop)\033[0m Transferred: {} / {}".format(humanfriendly.format_size(transferred_volume, binary=True),
# humanfriendly.format_size(total_volume, binary=True)))
duration=time.time()-start_seconds
#print("\033[96m(Main loop)\033[0m Elapsed time (s): {}".format(duration))
#print("\033[96m(Main loop)\033[0m Tasks left: {} / {}".format(len(result_ids), len(all_ref_objs)))
#print("\033[96m(Main loop)\033[0m Average Tasks/s: ", ((len(all_ref_objs)-len(result_ids))*1.1)/duration)
#print("\033[96m(Main loop)\033[0m Average Bytes/s: ", humanfriendly.format_size(transferred_volume/duration, binary=True))
execution_progress(transferred_volume, total_volume, len(result_ids), len(all_ref_objs), duration)
total_time=time.time()-start_seconds
print("")
print("\033[96m(Main loop)\033[0m Total time (s): ", total_time)
print("\033[96m(Main loop)\033[0m Total average Bytes/s: ", humanfriendly.format_size(total_volume/total_time, binary=True))
| 49.15 | 247 | 0.6706 | 685 | 4,915 | 4.576642 | 0.224818 | 0.009569 | 0.010526 | 0.010207 | 0.376396 | 0.289952 | 0.239872 | 0.171611 | 0.072727 | 0.045933 | 0 | 0.033848 | 0.182503 | 4,915 | 99 | 248 | 49.646465 | 0.746391 | 0.165005 | 0 | 0.078947 | 0 | 0.026316 | 0.124389 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.105263 | 0 | 0.184211 | 0.092105 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84f939c67c0b1fb6b0b4b98b5ce191bb5f2e9777 | 3,673 | py | Python | src/satproc/console/generalize.py | dymaxionlabs/satproc | 002c945eb378619659e85dddb73477b253899630 | [
"Apache-2.0"
] | 22 | 2021-05-19T13:45:55.000Z | 2022-02-17T22:40:28.000Z | src/satproc/console/generalize.py | dymaxionlabs/satproc | 002c945eb378619659e85dddb73477b253899630 | [
"Apache-2.0"
] | 35 | 2021-06-28T17:36:21.000Z | 2022-03-08T19:54:35.000Z | src/satproc/console/generalize.py | dymaxionlabs/satproc | 002c945eb378619659e85dddb73477b253899630 | [
"Apache-2.0"
] | 6 | 2021-05-20T22:49:37.000Z | 2022-03-31T14:30:32.000Z | # -*- coding: utf-8 -*-
import argparse
import logging
import os
import sys
from glob import glob
from satproc import __version__
from satproc.postprocess.generalize import generalize
__author__ = "Damián Silvani"
__copyright__ = "Dymaxion Labs"
_logger = logging.getLogger(__name__)
def parse_args(args):
"""Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="Generalize vector files by simplifying and/or smoothing polygons",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("input_vector", nargs="*", help="input vector file")
parser.add_argument("--input-dir", help="directory containing input rasters")
parser.add_argument("-o", "--output-dir", help="output directory")
parser.add_argument(
"-tcrs",
"--target-crs",
default=None,
help="reproject to another CRS before processing (EPSG code)",
)
parser.add_argument(
"--simplify",
choices=["douglas"],
default="douglas",
help="simplification method",
)
parser.add_argument(
"--douglas-tolerance",
type=float,
default=0.1,
help="Douglas-Peucker tolerance",
)
parser.add_argument(
"--smooth",
choices=["chaikin"],
default=None,
help="smoothing method",
)
parser.add_argument(
"--chaikin-refinements",
default=5,
type=int,
help="number of Chaikin refinements",
)
parser.add_argument(
"--version", action="version", version="satproc {ver}".format(ver=__version__)
)
parser.add_argument(
"-v",
"--verbose",
dest="loglevel",
help="set loglevel to INFO",
action="store_const",
const=logging.INFO,
)
parser.add_argument(
"-vv",
"--very-verbose",
dest="loglevel",
help="set loglevel to DEBUG",
action="store_const",
const=logging.DEBUG,
)
return parser.parse_args(args), parser
def setup_logging(loglevel):
"""Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
logging.basicConfig(
level=loglevel,
stream=sys.stdout,
format=logformat,
datefmt="%Y-%m-%d %H:%M:%S",
)
def main(args):
"""Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
args, parser = parse_args(args)
setup_logging(args.loglevel)
input_files = []
if args.input_vector:
input_files.extend(args.input_vector)
if args.input_dir:
files = list(glob(os.path.join(args.input_dir, "*")))
input_files.extend(files)
if not input_files:
raise RuntimeError(
(
"No input files found. "
"You should pass individual input_vector paths, or use --input-dir."
)
)
_logger.info("Num. input files: %d", len(input_files))
generalize(
input_files=input_files,
output_dir=args.output_dir,
target_crs=args.target_crs,
simplify=args.simplify,
douglas_tolerance=args.douglas_tolerance,
smooth=args.smooth,
chaikins_refinements=args.chaikin_refinements,
)
def run():
"""Entry point for console_scripts"""
main(sys.argv[1:])
if __name__ == "__main__":
run()
| 24.817568 | 87 | 0.613395 | 399 | 3,673 | 5.468672 | 0.390977 | 0.045371 | 0.085701 | 0.016499 | 0.078827 | 0.032997 | 0.032997 | 0 | 0 | 0 | 0 | 0.001848 | 0.263545 | 3,673 | 147 | 88 | 24.986395 | 0.804806 | 0.108631 | 0 | 0.132075 | 0 | 0 | 0.235496 | 0.017155 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037736 | false | 0.009434 | 0.066038 | 0 | 0.113208 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84f95fb1a9783e21e2199ef7dbdb747eb20c4617 | 4,932 | py | Python | piri/mapper.py | ryanzhoucoupa/piri | de8ed240492d836305c94decbede3933ea982db0 | [
"MIT"
] | 23 | 2020-08-25T13:24:22.000Z | 2022-03-09T14:15:23.000Z | piri/mapper.py | ryanzhoucoupa/piri | de8ed240492d836305c94decbede3933ea982db0 | [
"MIT"
] | 85 | 2020-08-25T12:04:16.000Z | 2021-10-04T08:08:14.000Z | piri/mapper.py | ryanzhoucoupa/piri | de8ed240492d836305c94decbede3933ea982db0 | [
"MIT"
] | 16 | 2020-10-06T10:26:37.000Z | 2021-11-18T00:07:39.000Z | import decimal
from typing import Any, Dict, List, Optional, Union
from returns.curry import partial
from returns.maybe import maybe
from returns.pipeline import is_successful
from returns.result import safe
from piri.collection_handlers import iterable_data_handler
from piri.constants import (
ARRAY,
ATTRIBUTES,
BRANCHING_ATTRIBUTES,
BRANCHING_OBJECTS,
ITERABLES,
NAME,
OBJECTS,
)
from piri.handlers import handle_attribute
decimal.getcontext().rounding = decimal.ROUND_HALF_UP
MappedDict = Dict[str, Any]
@safe
def map_data(
input_data,
configuration,
) -> Union[list, dict]:
"""Map entrypoint.
Try to get iterable data
if that fails then just run map_object normally, but make it into an array
if array is true.
If we had iterable data, iterate that data and run map_object with the
current iteration data added to the root of the input_data dictionary
"""
iterate_data = iterable_data_handler(
input_data, configuration.get(ITERABLES, []),
)
if not is_successful(iterate_data):
return map_object(input_data, configuration).map(
partial(set_array, array=configuration[ARRAY]),
).unwrap()
mapped_objects: List[dict] = []
# find returns function to work with iterables
for iteration in iterate_data.unwrap():
map_object(
iteration,
configuration,
).map(
mapped_objects.append,
)
return mapped_objects
def set_array(input_data, array):
"""Return data wrapped in array if if array=True."""
if array:
return [input_data]
return input_data
@maybe
def map_object(input_data, configuration) -> Optional[MappedDict]:
"""Map one object.
One object has a collections of:
Attribute mappings,
Nested object mappings,
Branching object mappings,
All functions we call return a dictionary with the mapped values
so all we have to do is to call update on a shared object_data dict.
return example:
return {
'attrib': 'val',
'object1': {'attrib1': 'val'}
'branching_object1: [{'attrib1': 'val'}]
}
"""
object_data: MappedDict = {}
map_attributes(
input_data, configuration.get(ATTRIBUTES, []),
).map(object_data.update)
map_objects(
input_data, configuration.get(OBJECTS, []),
).map(object_data.update)
map_branching_objects(
input_data, configuration.get(BRANCHING_OBJECTS, []),
).map(object_data.update)
# need this as long as empty dict is not seen as None by returns.maybe
return object_data or None
@maybe
def map_attributes(input_data, configuration) -> Optional[MappedDict]:
"""For all attributes map attribute.
name of attribute should be set
{
'attribute1': 'value',
'attribute2': 'value2',
}
"""
attributes: MappedDict = {}
for attribute_cfg in configuration:
attribute_value = handle_attribute(input_data, attribute_cfg)
if is_successful(attribute_value):
attributes[attribute_cfg[NAME]] = attribute_value.unwrap()
return attributes or None
@maybe
def map_objects(
input_data,
configuration,
) -> Optional[MappedDict]:
"""For all objects map object.
name of object should be set.
{
'name1': object,
'name2': object2,
}
"""
mapped_objects: MappedDict = {}
for object_cfg in configuration:
object_value = map_data(input_data, object_cfg)
if is_successful(object_value):
mapped_objects[object_cfg[NAME]] = object_value.unwrap()
return mapped_objects or None
@maybe
def map_branching_attributes(
input_data,
b_attributes,
) -> Optional[List[MappedDict]]:
"""Map branching attributes.
Branching attributes are a list of attribute mappings that will be
mapped to the same name in branching object.
"""
mapped_attributes: List[MappedDict] = []
for sub_cfg in b_attributes:
map_attributes(
input_data, sub_cfg,
).map(
mapped_attributes.append,
)
# need this as long as empty dict is not seen as None by returns.maybe
return mapped_attributes or None
@maybe
def map_branching_objects(
input_data,
configuration,
) -> Optional[MappedDict]:
"""Map branching object.
Branching object is a case where we want to create the same object multiple
times, however we want to find the data in different places.
"""
mapped_objects: MappedDict = {}
for b_object in configuration:
mapped = map_branching_attributes(
input_data, b_object[BRANCHING_ATTRIBUTES],
)
if is_successful(mapped):
mapped_objects[b_object[NAME]] = mapped.unwrap()
# need this as long as empty dict is not seen as None by returns.maybe
return mapped_objects or None
| 25.035533 | 79 | 0.672749 | 609 | 4,932 | 5.284072 | 0.229885 | 0.053139 | 0.068365 | 0.031075 | 0.251398 | 0.175886 | 0.088254 | 0.059664 | 0.059664 | 0.059664 | 0 | 0.002693 | 0.247161 | 4,932 | 196 | 80 | 25.163265 | 0.863991 | 0.302311 | 0 | 0.254902 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068627 | false | 0 | 0.088235 | 0 | 0.245098 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84fa6491306c7f4088496f353dbf7cbc5ef44491 | 2,955 | py | Python | digby/motors.py | gilbertmike/digby | cb0f762fb703cf0e8b0f0ac7387e8b89f67eaf8e | [
"MIT"
] | null | null | null | digby/motors.py | gilbertmike/digby | cb0f762fb703cf0e8b0f0ac7387e8b89f67eaf8e | [
"MIT"
] | null | null | null | digby/motors.py | gilbertmike/digby | cb0f762fb703cf0e8b0f0ac7387e8b89f67eaf8e | [
"MIT"
] | null | null | null | from gpiozero import OutputDevice, PWMOutputDevice
class MotorController:
def __init__(self, front_left_pwm, front_left_dir, front_right_pwm,
front_right_dir, back_left_pwm, back_left_dir, back_right_pwm,
back_right_dir, max_speed=0.5, reverse_fl=False,
reverse_bl=False, reverse_fr=False, reverse_br=False):
# Motor pins
self.fl_pwm = PWMOutputDevice(front_left_pwm, initial_value=False)
self.fl_dir = OutputDevice(front_left_dir,
active_high=reverse_fl,
initial_value=False)
self.fr_pwm = PWMOutputDevice(front_right_pwm, initial_value=False)
self.fr_dir = OutputDevice(front_right_dir,
active_high=reverse_fr,
initial_value=False)
self.bl_pwm = PWMOutputDevice(back_left_pwm, initial_value=False)
self.bl_dir = OutputDevice(back_left_dir,
active_high=reverse_bl,
initial_value=False)
self.br_pwm = PWMOutputDevice(back_right_pwm, initial_value=False)
self.br_dir = OutputDevice(back_right_dir,
active_high=reverse_br,
initial_value=False)
# Motor velocity
self.max_speed = max_speed
self.vl = 0
self.vr = 0
def steer(self, angle, vel):
"""Steer the motors.
The value `angle` is the smallest angle between the steering
vector and the Y-axis. The value is between -1 (positive X) and
1 (negative X).
The value `vel` is the speed. Value is between -1 (negative Y)
and 1 (positive Y).
"""
vel *= self.max_speed
if angle > 0:
self.vl = vel * (1-angle)
self.vr = vel
else:
self.vl = vel
self.vr = vel * (1-abs(angle))
self._update()
def _update(self):
self.fl_pwm.value = abs(self.vl)
self.bl_pwm.value = abs(self.vl)
if self.vl > 0:
self.fl_dir.on()
self.bl_dir.on()
else:
self.fl_dir.off()
self.bl_dir.off()
self.fr_pwm.value = abs(self.vr)
self.br_pwm.value = abs(self.vr)
if self.vr > 0:
self.fr_dir.on()
self.br_dir.on()
else:
self.fr_dir.off()
self.br_dir.off()
def get_telemetry(self):
return {
'fl_pwm': self.fl_pwm.value,
'fl_dir': self.fl_dir.value,
'fr_pwm': self.fr_pwm.value,
'fr_dir': self.fr_dir.value,
'bl_pwm': self.bl_pwm.value,
'bl_dir': self.bl_dir.value,
'br_pwm': self.br_pwm.value,
'br_dir': self.br_dir.value,
'vl': self.vl,
'vr': self.vr
}
| 35.60241 | 79 | 0.535702 | 370 | 2,955 | 4.010811 | 0.164865 | 0.06469 | 0.091644 | 0.099057 | 0.239218 | 0.076819 | 0 | 0 | 0 | 0 | 0 | 0.006997 | 0.371235 | 2,955 | 82 | 80 | 36.036585 | 0.791712 | 0.091709 | 0 | 0.109375 | 0 | 0 | 0.019787 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.015625 | 0.015625 | 0.109375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84fb8930b6c5b3170e2aacdf327ea9bfa1e892aa | 828 | py | Python | jcasts/episodes/emails.py | danjac/jcasts | 04f5ef1f536d51962c0433d092817c0153acb6af | [
"MIT"
] | 13 | 2021-09-17T07:41:00.000Z | 2022-02-10T10:00:48.000Z | jcasts/episodes/emails.py | danjac/jcasts | 04f5ef1f536d51962c0433d092817c0153acb6af | [
"MIT"
] | 167 | 2021-07-17T09:41:38.000Z | 2021-08-31T06:03:34.000Z | jcasts/episodes/emails.py | danjac/jcasts | 04f5ef1f536d51962c0433d092817c0153acb6af | [
"MIT"
] | null | null | null | from __future__ import annotations
from datetime import timedelta
from django_rq import job
from jcasts.episodes.models import Episode
from jcasts.shared.typedefs import User
from jcasts.users.emails import send_user_notification_email
@job("mail")
def send_new_episodes_email(user: User, since: timedelta) -> None:
"""Sends email with new episodes added to user's collection."""
episodes = (
Episode.objects.recommended(user, since).select_related("podcast").order_by("?")
)[:6]
if len(episodes) < 3:
return
send_user_notification_email(
user,
f"Hi {user.username}, here are some new episodes from your collection.",
"episodes/emails/new_episodes.txt",
"episodes/emails/new_episodes.html",
{
"episodes": episodes,
},
)
| 25.875 | 88 | 0.683575 | 102 | 828 | 5.372549 | 0.519608 | 0.100365 | 0.072993 | 0.091241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003082 | 0.216184 | 828 | 31 | 89 | 26.709677 | 0.841294 | 0.068841 | 0 | 0 | 0 | 0 | 0.2 | 0.084967 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.272727 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84fcf8179ffbbe188cded099879ea1fb89a61d64 | 1,439 | py | Python | src/pricehist/outputs/ledger.py | chrisberkhout/pricehist | a54da85a6fae15e2f771e8612aed089407ec5c22 | [
"MIT"
] | 4 | 2021-09-15T03:23:10.000Z | 2022-02-08T23:31:10.000Z | src/pricehist/outputs/ledger.py | chrisberkhout/pricehist | a54da85a6fae15e2f771e8612aed089407ec5c22 | [
"MIT"
] | null | null | null | src/pricehist/outputs/ledger.py | chrisberkhout/pricehist | a54da85a6fae15e2f771e8612aed089407ec5c22 | [
"MIT"
] | null | null | null | """
Ledger output
Supports both `Ledger <https://www.ledger-cli.org/>`_ and
`hledger <https://hledger.org/>`_ plain text accounting formats.
By default the output should be valid for Ledger, but can be customized for
hledger or other variants via formatting options. Invalid variants are
possible, so the user should be familiar with the requirements of the target
format.
Relevant sections of the Ledger manual:
* `Commodities and Currencies
<https://www.ledger-cli.org/3.0/doc/ledger3.html#Commodities-and-Currencies>`_
* `Commoditized Amounts
<https://www.ledger-cli.org/3.0/doc/ledger3.html#Commoditized-Amounts>`_
Relevant sections of the hledger manual:
* `Declaring market prices <https://hledger.org/hledger.html#declaring-market-prices>`_:
* `Declaring commodities <https://hledger.org/hledger.html#declaring-commodities`_:
Classes:
Ledger
"""
from pricehist.format import Format
from .baseoutput import BaseOutput
class Ledger(BaseOutput):
def format(self, series, source=None, fmt=Format()):
output = ""
for price in series.prices:
date = fmt.format_date(price.date)
base = fmt.base or series.base
quote = fmt.quote or series.quote
quote_amount = fmt.format_quote_amount(quote, price.amount)
timesep = " " if fmt.time else ""
output += f"P {date}{timesep}{fmt.time} {base} {quote_amount}\n"
return output
| 31.282609 | 88 | 0.707436 | 192 | 1,439 | 5.244792 | 0.427083 | 0.023833 | 0.041708 | 0.050645 | 0.160874 | 0.141013 | 0.0715 | 0.0715 | 0.0715 | 0.0715 | 0 | 0.005102 | 0.182766 | 1,439 | 45 | 89 | 31.977778 | 0.85119 | 0.599027 | 0 | 0 | 0 | 0 | 0.091549 | 0.044014 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84fcfab32088c0c654dbeb7d0fe1fb3a7f8528f9 | 450 | py | Python | services/bill_service.py | Priyeshpandey/splitwise | db1af921a1ebf3f4034a6672cd3a2515aaeeecd5 | [
"MIT"
] | null | null | null | services/bill_service.py | Priyeshpandey/splitwise | db1af921a1ebf3f4034a6672cd3a2515aaeeecd5 | [
"MIT"
] | null | null | null | services/bill_service.py | Priyeshpandey/splitwise | db1af921a1ebf3f4034a6672cd3a2515aaeeecd5 | [
"MIT"
] | null | null | null | from services.bill_service_interface import BillServiceInterface
from models.bill import Bill
class BillService(BillServiceInterface):
billDetails = {}
def addBill(self, id, groupId, amount, contri, paid):
bill = Bill()
bill.setId(id)
bill.setGroupId(groupId)
bill.setAmount(amount)
bill.setContri(contri)
bill.setPaid(paid)
self.__class__.billDetails[id] = bill
return bill
| 28.125 | 64 | 0.673333 | 48 | 450 | 6.1875 | 0.520833 | 0.053872 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.24 | 450 | 15 | 65 | 30 | 0.868421 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1701456722d2f336d3cdea0f8c3d767de89aa227 | 1,943 | py | Python | setup.py | pyecore/pyecoregen | 01988422afb7e534eff66d6ba3472a97e087ad4a | [
"BSD-3-Clause"
] | 18 | 2017-05-20T08:08:33.000Z | 2022-03-09T17:22:34.000Z | setup.py | pyecore/pyecoregen | 01988422afb7e534eff66d6ba3472a97e087ad4a | [
"BSD-3-Clause"
] | 30 | 2017-05-27T11:07:11.000Z | 2022-03-23T22:38:20.000Z | setup.py | pyecore/pyecoregen | 01988422afb7e534eff66d6ba3472a97e087ad4a | [
"BSD-3-Clause"
] | 5 | 2017-09-17T12:33:53.000Z | 2019-09-26T12:21:46.000Z | #!/usr/bin/env python
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
if sys.version_info < (3, 5):
sys.exit('Sorry, Python < 3.5 is not supported')
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
args = self.pytest_args if isinstance(self.pytest_args, list) else [self.pytest_args]
errno = pytest.main(args)
sys.exit(errno)
setup(
name="pyecoregen",
version='0.5.1',
description="Model to text framework for PyEcore, including the Ecore to Python generator",
long_description=open('README.rst').read(),
keywords="model metamodel EMF Ecore code generator",
url="https://github.com/pyecore/pyecoregen",
author="Mike Pagel",
author_email="mike@mpagel.de",
packages=find_packages(exclude=['tests']),
package_data={'': ['README.rst', 'LICENSE'],
'pyecoregen': ['templates/*']},
include_package_data=True,
install_requires=['pyecore', 'pymultigen', 'jinja2', 'autopep8'],
tests_require=['pytest'],
cmdclass={'test': PyTest},
entry_points={'console_scripts': ['pyecoregen = pyecoregen.cli:main']},
license='BSD 3-Clause',
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"Programming Language :: Python :: 3 :: Only",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"License :: OSI Approved :: BSD License",
]
)
| 31.852459 | 95 | 0.646423 | 221 | 1,943 | 5.574661 | 0.547511 | 0.048701 | 0.045455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007874 | 0.215646 | 1,943 | 60 | 96 | 32.383333 | 0.800525 | 0.010293 | 0 | 0 | 0 | 0 | 0.363684 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0.020833 | 0.083333 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
17036a282cb2f46b504f52189d2bf26ddabdebf9 | 2,230 | py | Python | tests/conftest.py | emaballarin/stockroom | e2e098c4731a4ad0d4952a99c972cb4816bb1abe | [
"Apache-2.0"
] | 67 | 2019-12-02T17:18:07.000Z | 2022-03-03T08:32:19.000Z | tests/conftest.py | emaballarin/stockroom | e2e098c4731a4ad0d4952a99c972cb4816bb1abe | [
"Apache-2.0"
] | 27 | 2019-12-11T07:53:41.000Z | 2021-03-30T00:58:32.000Z | tests/conftest.py | emaballarin/stockroom | e2e098c4731a4ad0d4952a99c972cb4816bb1abe | [
"Apache-2.0"
] | 8 | 2019-12-02T15:22:49.000Z | 2022-03-03T08:32:20.000Z | import inspect
import shutil
from pathlib import Path
import hangar
import numpy as np
import pytest
import torchvision_mocks as torchvision
from stockroom import StockRoom, keeper
from torchvision import datasets
@pytest.fixture()
def managed_tmpdir(monkeypatch, tmp_path):
monkeypatch.setitem(hangar.constants.LMDB_SETTINGS, "map_size", 2_000_000)
monkeypatch.setitem(hangar.constants.LMDB_SETTINGS, "map_size", 2_000_000)
monkeypatch.setattr(hangar.backends.hdf5_00, "COLLECTION_COUNT", 10)
monkeypatch.setattr(hangar.backends.hdf5_00, "COLLECTION_SIZE", 50)
monkeypatch.setattr(hangar.backends.hdf5_01, "COLLECTION_COUNT", 10)
monkeypatch.setattr(hangar.backends.hdf5_01, "COLLECTION_SIZE", 50)
monkeypatch.setattr(hangar.backends.numpy_10, "COLLECTION_SIZE", 50)
yield tmp_path
shutil.rmtree(tmp_path)
@pytest.fixture()
def repo(monkeypatch, managed_tmpdir):
cwd = Path(managed_tmpdir)
monkeypatch.setattr(Path, "cwd", lambda: cwd)
cwd.joinpath(".git").mkdir()
cwd.joinpath(".gitignore").touch()
keeper.init_repo("s", "a@b.c", overwrite=True)
yield None
@pytest.fixture()
def repo_with_col(repo):
repo = hangar.Repository(Path.cwd())
co = repo.checkout(write=True)
arr = np.arange(20).reshape(4, 5)
co.add_ndarray_column("ndcol", prototype=arr)
co.commit("init column")
co.close()
yield None
repo._env._close_environments()
@pytest.fixture()
def writer_stock(repo_with_col):
stock_obj = StockRoom(enable_write=True)
yield stock_obj
stock_obj._repo._env._close_environments()
@pytest.fixture()
def reader_stock(writer_stock):
arr = np.arange(20).reshape(4, 5)
col = writer_stock.data["ndcol"]
col[1] = arr
writer_stock.commit("added first data point")
writer_stock.close()
stock_obj = StockRoom()
yield stock_obj
stock_obj._repo._env._close_environments()
def is_valid(x):
return (
inspect.isclass(x)
and issubclass(x, torchvision.Torchvision)
and x != torchvision.Torchvision
)
@pytest.fixture()
def torchvision_datasets(monkeypatch):
for n, cls in inspect.getmembers(torchvision, is_valid):
monkeypatch.setattr(datasets, n, cls)
| 28.227848 | 78 | 0.727354 | 295 | 2,230 | 5.298305 | 0.338983 | 0.080614 | 0.06142 | 0.102367 | 0.357006 | 0.357006 | 0.357006 | 0.207933 | 0.140115 | 0.082534 | 0 | 0.024987 | 0.156502 | 2,230 | 78 | 79 | 28.589744 | 0.805954 | 0 | 0 | 0.253968 | 0 | 0 | 0.0713 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.142857 | 0.015873 | 0.269841 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
17042212e5fa71f184651ff1dc382b9b9ed9f1a9 | 1,278 | py | Python | research/cv/MaskedFaceRecognition/config.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | research/cv/MaskedFaceRecognition/config.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | research/cv/MaskedFaceRecognition/config.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
network config setting, will be used in train.py and eval.py
"""
from easydict import EasyDict as ed
config = ed({
"class_num": 10572,
"batch_size": 128,
"learning_rate": 0.01,
"lr_decay_epochs": [40, 80, 100],
"lr_decay_factor": 0.1,
"lr_warmup_epochs": 20,
"p": 16,
"k": 8,
"loss_scale": 1024,
"momentum": 0.9,
"weight_decay": 1e-4,
"epoch_size": 120,
"buffer_size": 10000,
"image_height": 128,
"image_width": 128,
"save_checkpoint": True,
"save_checkpoint_steps": 195,
"keep_checkpoint_max": 2,
"save_checkpoint_path": "./"
})
| 31.170732 | 78 | 0.643975 | 177 | 1,278 | 4.525424 | 0.700565 | 0.074906 | 0.032459 | 0.03995 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.056731 | 0.186228 | 1,278 | 40 | 79 | 31.95 | 0.713462 | 0.547731 | 0 | 0 | 0 | 0 | 0.414722 | 0.037702 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.045455 | 0 | 0.045455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
17061bab51406bbd5e3c868633cb54d4c996d97c | 321 | py | Python | backend/source/similaridade.py | AlissonSantos17/Sistema-de-Recomendacao | d276d14bea65bcf721a39f774fd9038ec775a063 | [
"Apache-2.0"
] | null | null | null | backend/source/similaridade.py | AlissonSantos17/Sistema-de-Recomendacao | d276d14bea65bcf721a39f774fd9038ec775a063 | [
"Apache-2.0"
] | null | null | null | backend/source/similaridade.py | AlissonSantos17/Sistema-de-Recomendacao | d276d14bea65bcf721a39f774fd9038ec775a063 | [
"Apache-2.0"
] | null | null | null | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
from recomendacao import distanciaEuclidiana
def getSimilares(base, usuario):
similaridade = [
(distanciaEuclidiana(base, usuario, outro), outro)
for outro in base if outro != usuario
]
similaridade.sort()
similaridade.reverse()
return similaridade[0:26] | 26.75 | 54 | 0.713396 | 36 | 321 | 6.361111 | 0.694444 | 0.09607 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018657 | 0.165109 | 321 | 12 | 55 | 26.75 | 0.835821 | 0.133956 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1708b50e51dd839f95011d63cd587ab6817e6ebc | 12,540 | py | Python | program/compare.py | rabo452/python-gui | ab0be8626d148c746c29cd362d342485c14c09ae | [
"MIT"
] | null | null | null | program/compare.py | rabo452/python-gui | ab0be8626d148c746c29cd362d342485c14c09ae | [
"MIT"
] | null | null | null | program/compare.py | rabo452/python-gui | ab0be8626d148c746c29cd362d342485c14c09ae | [
"MIT"
] | null | null | null | import requests
import pytube
import os
from bs4 import BeautifulSoup as bs
from moviepy.video.io.VideoFileClip import VideoFileClip
from youtube_transcript_api import YouTubeTranscriptApi
this_module = __import__(__name__)
#compare two or more different videos to find the same parts and download it
#for 1 main video can be more than 1 source video make sure of it!
class Compare_video_class:
# this function do all work for compare videos, return nothing
# videos - id of youtube videos
#youtube videos with transcripts
def main(self,videos = []):
if not videos: return
#here will be stored all files
if os.path.exists('./files') == False:
os.makedirs('./files')
if os.path.exists('./files/videos') == False:
os.makedirs('./files/videos')
if os.path.exists('./files/transcripts') == False:
os.makedirs('./files/transcripts')
main_videos_ids = []
sources_videos_ids = []
transcript_compare_main_videos = [] # save transcript into this arr for compare function
transcript_compare_sources_videos = [] # save transcript into this arr for compare function
#get main and sources videos
for main_video, sources_videos in videos:
main_videos_ids.append(main_video)
sources_videos_ids.append(sources_videos)
#loop that takes the transcript text from main and sources videos
for i in range(len(main_videos_ids)):
if not main_videos_ids[i] or not sources_videos_ids[i]:
continue
transcript_compare_sources_videos.append([]) #need to save arr of transcripts of source videos
transcript_compare_main_videos.append([])
video_title = self.delete_need_symbols(self.get_youtube_video_title(main_videos_ids[i]))
transcript_text_string_time = "{} \n".format(main_videos_ids[i])
transcript_text_string_without_time = "{} \n".format(main_videos_ids[i])
for phrase_obj in YouTubeTranscriptApi.get_transcript(main_videos_ids[i]):
transcript_text_string_time += '{} {} \n'.format(phrase_obj['text'].replace('\n', ''), self.secs_to_time(int(phrase_obj['start'])))
transcript_text_string_without_time += '{} \n'.format(phrase_obj['text'].replace('\n', ''))
for word in [word for word in phrase_obj['text'].lower().replace('!', '').replace('?', '').replace('\n', '').replace('.', '').replace(',', '').split(' ') if word != '']:
transcript_compare_main_videos[i].append(word) # for main only save words the timing doesn't matter
self.save_transcript_text(video_title, transcript_text_string_time)
self.save_transcript_text(video_title + '_', transcript_text_string_without_time)
for source_video_id in sources_videos_ids[i]:
video_title = self.delete_need_symbols(self.get_youtube_video_title(source_video_id))
transcript_text_string_time = "{} \n".format(source_video_id)
transcript_text_string_without_time = "{} \n".format(source_video_id)
transcript_source_video = []
for phrase_obj in YouTubeTranscriptApi.get_transcript(source_video_id):
transcript_text_string_time += '{} {} \n'.format(phrase_obj['text'].replace('\n', ''), self.secs_to_time(int(phrase_obj['start'])))
transcript_text_string_without_time += '{} \n'.format(phrase_obj['text'].replace('\n', ''))
for arr_word in [[word, int(phrase_obj['start'])] for word in phrase_obj['text'].lower().replace('!', '').replace('?', '').replace('\n', '').replace('.', '').replace(',', '').split(' ') if word != '']:
transcript_source_video.append(arr_word) #append the each word with timing of phrase
transcript_compare_sources_videos[i].append(transcript_source_video)
self.save_transcript_text(video_title, transcript_text_string_time)
self.save_transcript_text(video_title + '_', transcript_text_string_without_time)
#compare the transcript and get the timing there is transcript words the same for each main and source video
for i in range(len(transcript_compare_main_videos)):
transcript_main = transcript_compare_main_videos[i]
transcripts_sources = transcript_compare_sources_videos[i]
try:
self.cut_and_download_youtube_video(sources_videos_ids[i], self.compare_video_transcripts(transcript_main,transcripts_sources))
except: continue
def cut_and_download_youtube_video(self, sources_videos_ids, cut_moments):
if not cut_moments: return
for i in range(len(sources_videos_ids)):
source_video_id = sources_videos_ids[i]
video_cut_moments = cut_moments[i] #cut timestamp for this source video
video_title = self.delete_need_symbols(self.get_youtube_video_title(source_video_id))
download_path = './files/videos/{}/'.format(video_title)
if os.path.exists(download_path) == False:
os.makedirs(download_path)
youtube = pytube.YouTube('https://youtu.be/' + source_video_id)
video = youtube.streams.get_highest_resolution()
video.download(download_path, filename = video_title)
f = open(download_path + 'cut points video.txt', 'w+', encoding='utf-8')
cut_point_string = ''
count_of_video = 1 #how many videos already cutted need for name of file of cut video
transcripts = YouTubeTranscriptApi.get_transcript(source_video_id)
for cut_start_point, cut_end_point in video_cut_moments: # single_cut_moment[0] - start point, single_cut_moment[1] - end point
video_path = os.path.join(download_path, 'video {}/'.format(count_of_video))
if os.path.exists(video_path) == False: os.makedirs(video_path)
self.write_cutted_transcript(cut_start_point,cut_end_point, source_video_id, count_of_video, video_title, video_path, transcripts)
cut_point_string += " start point: {}, end point: {}, for {} video \n".format(self.secs_to_time(cut_start_point),self.secs_to_time(cut_end_point),count_of_video)
clip = VideoFileClip(download_path + video_title + '.mp4').subclip(cut_start_point, cut_end_point + 2)
clip.write_videofile(video_path + video_title + str(count_of_video) + '.mp4')
clip.close()
count_of_video += 1
f.write(cut_point_string)
f.close()
def write_cutted_transcript(self, start_point, end_point, video_id, count_of_video, video_title, path, transcripts):
f = open(os.path.join(path,f'transcript for video {count_of_video}.txt'), 'w+',encoding='utf-8')
cutted_transcript = ''
for phrase_obj in transcripts:
phrase_obj['start'] = int(phrase_obj['start'])
if phrase_obj['start'] > start_point and phrase_obj['start'] < end_point:
cutted_transcript += phrase_obj['text'] + ' {} \n'.format(self.secs_to_time(phrase_obj['start']))
f.write(cutted_transcript)
f.close()
#this function compare main transcript and sources transcripts videos
#return arr with timing that the same for main video and source video
def compare_video_transcripts(self, main_transcript_words, sources_transcripts):
result = []
for i in range(len(sources_transcripts)):
source_transcript_video = sources_transcripts[i]
result.append([])
source_index = -1
while source_index < len(source_transcript_video) - 1:
source_index += 1
source_word, souce_timing = source_transcript_video[source_index]
main_index = -1
while main_index < len(main_transcript_words) - 1:
main_index += 1
main_word = main_transcript_words[main_index]
if source_word == main_word:
#test the next 10 words for the same
part_of_video = True
for y in range(11):
if (main_index + y) >= len(main_transcript_words) - 1 or source_index + y >= (len(source_transcript_video) - 1):
part_of_video = False
break
main_word = main_transcript_words[main_index + y]
source_word, souce_timing = source_transcript_video[source_index + y]
if source_word != main_word:
part_of_video = False
break
if not part_of_video: continue
start_point = source_transcript_video[source_index][1]
source_index += 10
main_index += 10
while True:
if (main_index) >= (len(main_transcript_words) - 1) or (source_index) >= (len(source_transcript_video) - 1):
end_point = source_transcript_video[source_index][1]
result[i].append([start_point, end_point])
break
main_word = main_transcript_words[main_index]
source_word, souce_timing = source_transcript_video[source_index]
if main_word != source_word:
#check if the next 30 words the same maybe 1 word doesn't exit
the_same_part = False
for y in range(30):
if (main_index + y) >= len(main_transcript_words) - 1 or (source_index + y) >= (len(source_transcript_video) - 1):
the_same_part = False
break
main_word = main_transcript_words[main_index + y]
source_word, souce_timing = source_transcript_video[source_index + y]
if source_word == main_word:
the_same_part = True
main_index += y
source_index += y
break
if not the_same_part:
end_point = source_transcript_video[source_index][1]
result[i].append([start_point, end_point])
break
main_index += 1
source_index += 1
return result
#support functions
def delete_need_symbols(self,string):
replacements = ['?', '/', '\\', '*', '>', '<', '"', ':', '|', '\'']
for i in replacements:
string = string.replace(i, '')
return string
def secs_to_time(self,secs):
minutes, secs = str(round(secs / 60, 2)).split('.') #time in minutes
secs = str(round(float(secs) / 10 * 6)) #from milisecs to secs
if int(minutes) <= 9:
minutes = '0' + minutes
if int(secs) <= 9:
secs = '0' + secs
return "{}:{}".format(minutes,secs)
#save transcript text into file
def save_transcript_text(self, file_name, text):
if os.path.exists('./files/transcripts/{}'.format(file_name)) == False:
os.makedirs('./files/transcripts/{}'.format(file_name))
f = open("./files/transcripts/{}/{}.txt".format(file_name, file_name), 'w+', encoding='utf-8')
f.write(text)
f.close()
def get_youtube_video_title(self, id):
r = requests.get(f'https://www.youtube.com/watch?v={id}')
return bs(r.text,'lxml').select('title')[0].text.replace(' - YouTube', '')
if __name__ == '__main__':
Compare_video_class().main([['qj49b929tgk', ['BmYv8XGl-YU', '7f97tMnV_TU']]])
| 54.051724 | 222 | 0.58134 | 1,443 | 12,540 | 4.740818 | 0.128898 | 0.038883 | 0.035083 | 0.027628 | 0.44818 | 0.385031 | 0.333577 | 0.265166 | 0.255226 | 0.218974 | 0 | 0.007373 | 0.318581 | 12,540 | 231 | 223 | 54.285714 | 0.793212 | 0.09697 | 0 | 0.238372 | 0 | 0 | 0.054572 | 0.006596 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0.040698 | 0 | 0.116279 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca02d3451b5a0539ec40cd600dd5837fcf09dd42 | 35,162 | py | Python | tracker/tracker.py | matthewmckenna/tvst | 0c673a118c09231839d0960c59e566cb7963e0d6 | [
"MIT"
] | null | null | null | tracker/tracker.py | matthewmckenna/tvst | 0c673a118c09231839d0960c59e566cb7963e0d6 | [
"MIT"
] | null | null | null | tracker/tracker.py | matthewmckenna/tvst | 0c673a118c09231839d0960c59e566cb7963e0d6 | [
"MIT"
] | null | null | null | #!/usr/env/bin python3
"""
Utility to keep track of TV shows
"""
import argparse
import collections
# import datetime
import json
import logging
import os
# import re
from Queue import Queue # Python 3 only! module is queue in Python 2
import sys
import threading
import requests
from .exceptions import (
APIRequestError,
DatabaseError,
EpisodeOutOfBoundsError,
FoundFilmError, # API request related
InvalidUsageError,
SeasonOutOfBoundsError,
ShowAlreadyTrackedError,
ShortCodeAlreadyAssignedError,
ShowDatabaseNotFoundError,
ShowNotFoundError, # API request related
ShowNotTrackedError,
TrackerError,
TrackerDatabaseNotFoundError,
WatchlistError,
)
from .utils import (
check_for_databases,
check_for_season_episode_code,
Deserializer,
extract_season_episode_from_str,
EncodeShow,
extract_episode_details,
get_show_database_entry,
logging_init,
lunderize,
ProcessWatchlist,
RegisteredSerializable,
sanitize_title,
season_episode_str_from_show,
tabulator,
# titleize,
)
logger = logging.getLogger(__name__)
# TODO: Retrieve IGN ratings
# TODO: Retrieve episode synopsis
class Database(RegisteredSerializable):
"""Provide base method for different types of databases"""
def __init__(
self,
database_dir=None,
_shows=None,
):
if database_dir is None:
database_dir = os.path.join(os.path.expanduser('~'), '.showtracker')
self.database_dir = database_dir
self._shows = {} if _shows is None else _shows
def create_db_from_watchlist(self, watchlist_path):
"""Create a database from a watchlist"""
logger.info('Create show database from watchlist=%r', watchlist_path)
watchlist = ProcessWatchlist(watchlist_path)
# TODO: Could multithread here
for show in watchlist:
self.add_show(show, from_watchlist=True)
def add_show(self, show):
raise NotImplementedError
def write_db(self, indent=None):
"""Write database to disk"""
try:
os.mkdir(self.database_dir)
except OSError:
logger.debug('os.mkdir failed: directory=%r already exists', self.database_dir)
with open(self.path_to_db, 'w', encoding='utf-8') as f:
json.dump(self, f, cls=EncodeShow, indent=indent, sort_keys=True)
def __iter__(self):
return iter(self._shows)
class ShowDatabase(Database):
def __init__(
self,
database_dir=None,
path_to_db=None,
# showdb_name=None,
_shows=None,
):
super().__init__(database_dir, _shows)
# self.showdb_name = '.showdb.json' if showdb_name is None else showdb_name
showdb_name = '.showdb.json'
if path_to_db is None:
self.path_to_db = os.path.join(self.database_dir, showdb_name)
else:
self.path_to_db = path_to_db
# if not os.path.exists(self.path_to_showdb):
# self.create_database()
def add_show(self, show_details, from_watchlist=False):
"""Add a show to the database.
Args:
show_details: namedtuple with the following fields:
show_title
next_episode
notes
Example show_details:
'Game of Thrones'
'S01E01'
'Pilot episode'
"""
title = show_details.show_title
# Create a Show() object
show = Show(title)
# FIXME: Hidden IO
try:
show.populate_seasons()
except ShowNotFoundError as e:
if not from_watchlist:
raise
# If we know we're adding multiple shows (i.e., from a
# watchlist) then we should not raise again.
logger.info(e)
else:
logger.info('Add show=%r to showdb', show.ltitle)
self._shows[show.ltitle] = show
def __contains__(self, key):
return key in self._shows
def __repr__(self):
return '{}({!r})'.format(self.__class__.__name__, self.path_to_db)
class TrackerDatabase(Database):
"""Provided methods to read current tracker information.
Available methods:
next_episode:
"""
def __init__(
self,
database_dir=None,
path_to_db=None,
# tracker_name=None,
# showdb_name=None,
_shows=None,
):
super().__init__(database_dir, _shows)
# TODO: Do tracker_name and showdb_name need to be instance attributes?
# self.tracker_name = '.tracker.json' if tracker_name is None else tracker_name
tracker_name = '.tracker.json'
if path_to_db is None:
self.path_to_db = os.path.join(self.database_dir, tracker_name)
else:
self.path_to_db = path_to_db
# self.path_to_tracker = os.path.join(self.database_dir, self.tracker_name)
# self.showdb_name = '.showdb.json' if showdb_name is None else showdb_name
# self.path_to_showdb = os.path.join(self.database_dir, self.showdb_name)
# if not os.path.exists(self.path_to_tracker):
# self.create_database()
# show_db = load_database(self.path_to_showdb)
# self._add_next_prev_episode(show_db)
def create_tracker_from_watchlist(self, watchlist_path, showdb=None):
"""Create a tracker database from a watchlist"""
logger.info('Create tracker database from watchlist=%r', watchlist_path)
watchlist = ProcessWatchlist(watchlist_path)
for show in watchlist:
self.add_tracked_show(show, showdb)
def update_tracker_from_watchlist(self, watchlist_path, showdb=None):
"""Update an existing tracker using a watchlist"""
logger.info('Update existing tracker from watchlist=%r', watchlist_path)
watchlist = ProcessWatchlist(watchlist_path)
for show in watchlist:
ltitle = lunderize(show.show_title)
if ltitle in self._shows:
# Only want to adjust the next_episode and notes fields
if show.notes:
logger.debug(
'Update note for show=%r. Was %r, now %r',
ltitle,
self._shows[ltitle].notes,
show.notes,
)
self._shows[ltitle].notes = show.notes
logger.debug(
'Update next_episode field for show=%r. Was %r, now %r.',
ltitle,
self._shows[ltitle]._next_episode,
show.next_episode,
)
self._shows[ltitle]._next_episode = show.next_episode
# Update the next and prev attributes
self._shows[ltitle]._set_next_prev(showdb)
else:
self.add_tracked_show(show, showdb)
def add_tracked_show(self, show_details, showdb=None):
"""Add a show to the trackerdb"""
if showdb is None:
# Attempt to load the ShowDatabase from the common database
# directory
path_to_showdb = os.path.join(os.path.dirname(self.path_to_db), '.showdb.json')
try:
showdb = load_database(path_to_showdb)
except FileNotFoundError:
raise DatabaseError(
'Could not find show database={}'.format(path_to_showdb)
)
show = TrackedShow(
title=show_details.show_title,
_next_episode=show_details.next_episode,
notes=show_details.notes,
)
logger.info('Add show=%r to the tracker database.', show.ltitle)
self._shows[show.ltitle] = show
# Set the tracked show .title attribute to the 'official' show title
# retrieved from the API request
logger.info(
'Update tracked show title from <%r> to <%r>.',
self._shows[show.ltitle].title,
showdb._shows[show.ltitle].title,
)
self._shows[show.ltitle].title = showdb._shows[show.ltitle].title
# Set the next and prev episode attributes
self._shows[show.ltitle]._set_next_prev(showdb)
def _short_codes(self):
for s in self._shows:
yield self._shows[s].short_code
def __contains__(self, key):
full_title = key in self._shows
short_code = key.upper() in self._short_codes()
return full_title or short_code
def __repr__(self):
return '{}({!r})'.format(self.__class__.__name__, self.path_to_db)
class Season(RegisteredSerializable):
"""Represent a season of a TV show"""
def __init__(self, episodes_this_season=0, _episodes=None):
self._episodes = [] if _episodes is None else _episodes
self.episodes_this_season = 0 if episodes_this_season is None else len(self._episodes)
def add_episode(self, episode):
"""Add an episode object to self._episodes"""
self._episodes.append(episode)
def construct_episode(self, episode_details):
"""Return an Episode instance."""
return Episode(**episode_details)
def build_season(self, details):
"""Build a season of episodes.
Extract the response details which we are interested in, and add
Episode instances to this Season instance.
"""
season = int(details['Season'])
for episode in details['Episodes']:
episode_details = extract_episode_details(season, episode)
self.add_episode(self.construct_episode(episode_details))
# Update the number of episodes this season
self.episodes_this_season = len(self._episodes)
def __getitem__(self, index):
return self._episodes[index]
def __iter__(self):
return iter(self._episodes)
def __len__(self):
return len(self._episodes)
class Episode(RegisteredSerializable):
"""Small class to represent an Episode of a TV show."""
def __init__(self, episode, season, title, ratings):
self.episode = episode
self.season = season
self.title = title
self.ratings = ratings
def __eq__(self, other):
for k in self.__dict__:
if self.__dict__[k] != other.__dict__[k]:
return False
return True
def __ne__(self, other):
for k in self.__dict__:
if self.__dict__[k] != other.__dict__[k]:
return True
return False
def __repr__(self):
return '{self.title} (S{self.season:02d}E{self.episode:02d})'.format(
self=self
)
class ShowDetails(RegisteredSerializable):
"""Provide basic information about a show.
Provide access to various title formats and the short_code of
a Show.
"""
def __init__(self, title=None, short_code=None):
self.title = title
self.request_title = sanitize_title(title)
self.ltitle = lunderize(title)
self.short_code = short_code
def __lt__(self, other):
return self.ltitle < other.ltitle
def __gt__(self, other):
return self.ltitle > other.ltitle
def __eq__(self, other):
return self.ltitle == other.ltitle
def __ne__(self, other):
return self.ltitle != other.ltitle
def __repr__(self):
return 'ShowDetails(title={!r})'.format(self.title)
class TrackedShow(ShowDetails):
"""Keep track of next and previous episodes of a tracked show.
Available methods:
inc_episode
"""
def __init__(
self,
title=None,
ltitle=None,
request_title=None,
_next_episode='S01E01',
notes=None,
short_code=None,
_next=None,
_prev=None,
):
super().__init__(title, short_code)
self._next = _next
self.notes = notes
self._prev = _prev
self._next_episode = _next_episode
def _set_next_prev(self, show_database):
"""Set up the next and previous episodes of a TrackedShow.
Raises:
ShowNotFoundError: if show not found in the showdb
OutOfBoundsError: if season or episode is not found in the showdb
entry for the show, then this exception will be raised.
"""
# We don't need the entire showdb, so just get the entry for the
# show we're interested in.
showdb_entry = get_show_database_entry(show_database, title=self.ltitle)
# If an invalid season-episode code is found, this function returns
# season=1, episode=1
season, episode = extract_season_episode_from_str(self._next_episode)
# Account for zero-indexing.
season, episode = season-1, episode-1
self._validate_season_episode(showdb_entry, season, episode)
self._next = showdb_entry._seasons[season]._episodes[episode]
if season > 0:
if episode == 0:
season -= 1
episode = showdb_entry._seasons[season].episodes_this_season - 1
else:
episode -= 1
# If season or episode is non-zero, then a previous episode exists
if season or episode:
self._prev = showdb_entry._seasons[season]._episodes[episode]
else:
self._prev = None
def inc_dec_episode(self, show_database, inc=False, dec=False, by=1):
"""Increment or decrement the next episode for a show.
Raises:
ShowNotFoundError: if show not found in the showdb
"""
if inc and dec:
raise InvalidUsageError('Both inc and dec commands were passed.')
if not (inc or dec):
raise InvalidUsageError('Neither inc nor dec commands were passed.')
showdb_entry = get_show_database_entry(show_database, title=self.ltitle)
season, episode = self._adjust_season_episode(inc, dec)
if inc:
self.inc_episode(showdb_entry, season, episode, by)
else:
self.dec_episode(showdb_entry, season, episode, by)
def _adjust_season_episode(self, inc, dec):
"""Return a zero-index adjusted season and episode"""
if inc:
return self._next.season-1, self._next.episode-1
else:
try:
season, episode = self._prev.season-1, self._prev.episode-1
except AttributeError:
# self.prev is None, meaning we are dealing with the first
# episode of a show (S01E01).
season, episode = 0, 0
return season, episode
def _validate_season_episode(self, showdb, season, episode):
"""Check that the season and episode passed are valid."""
try:
_ = showdb._seasons[season]
except IndexError:
raise SeasonOutOfBoundsError('Season={!r} is out of bounds.'.format(season))
try:
_ = showdb._seasons[season]._episodes[episode]
except IndexError:
raise EpisodeOutOfBoundsError('Episode={!r} is out of bounds.'.format(episode))
def inc_episode(self, showdb, season, episode, by=1):
"""Advance the next episode for a tracked show.
Args:
showdb: a ShowDatabase entry for the current show
by: How many episodes to increment from the current
episode. Default is to advance by one episode.
Raises:
SeasonOutOfBoundsError: invalid season
EpisodeOutOfBoundsError: invalid episode
"""
for inc in range(by):
# Check if the current ('old') next_episode is the season finale
# If so, the 'new' next_episode will be the next season premiere.
if self._next.episode == showdb._seasons[season].episodes_this_season:
season += 1
episode = 0
else:
episode += 1
self._validate_season_episode(showdb, season, episode)
self._prev = self._next
self._next = showdb._seasons[season]._episodes[episode]
def dec_episode(self, showdb, season, episode, by=1):
"""Decrement the next episode for a tracked show.
Args:
showdb: a ShowDatabase entry for the current show
by: How many episodes to decrement from the current
episode. Default is to decrement by one episode.
"""
for dec in range(by):
if season == 0 and episode == 0:
self._next = showdb._seasons[season]._episodes[episode]
break # TODO: Perhaps raise something
# Decrement over a season boundary, for season > 0.
# Set the episode to the finale of the previous season
elif episode == 0:
season -= 1
episode = showdb._seasons[season].episodes_this_season-1
else:
episode -= 1
self._validate_season_episode(showdb, season, episode)
self._next = self._prev
self._prev = showdb._seasons[season]._episodes[episode]
def __eq__(self, other):
for k in self.__dict__:
if self.__dict__[k] != other.__dict__[k]:
return False
return True
def __ne__(self, other):
for k in self.__dict__:
if self.__dict__[k] != other.__dict__[k]:
return True
return False
def __repr__(self):
return (
'TrackedShow(title={self.title!r}, _next_episode={self._next_episode!r}, '
'notes={self.notes!r}, short_code={self.short_code!r})'.format(self=self)
)
class Show(ShowDetails):
"""Represent various details of a show.
Available attributes:
next
"""
def __init__(
self,
title=None,
ltitle=None,
request_title=None,
imdb_id=None,
short_code=None,
_seasons=None
):
super().__init__(title, short_code)
self._seasons = [] if _seasons is None else _seasons
self.imdb_id = imdb_id
def request_show_info(self, q, season=None, search=False):
"""Make API request with season information"""
if season:
payload = {'i': self.imdb_id, 'season': season}
elif search:
payload = {'s': self.request_title}
else:
payload = {'i': self.imdb_id}
logger.debug('Make API request with payload=%r', payload)
with requests.Session() as s:
response = s.get('http://www.omdbapi.com', params=payload)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
logger.exception(e)
# return response.json()
q.put(response.json())
def populate_seasons(self):
q = Queue()
# Make initial API request to search for the show we're interested in.
self.request_show_info(search=True)
response = q.get()
# Could not find the show in the external database (OMDbAPI)
if response['Response'] == 'False':
raise ShowNotFoundError(
'Could not find title={} in external database.'.format(self.request_title)
)
for title in response['Search']:
if title['Type'] == 'series':
self.imdb_id = title['imdbID']
break
# The search for title returned only films
if not self.imdb_id:
raise ShowNotFoundError(
'Could not find show with title={}'.format(self.request_title)
)
self.request_show_info()
show_details = q.get()
logger.debug(show_details)
total_seasons = int(show_details['totalSeasons'])
logger.debug('Total seasons for show <%r>: %r', self.request_title, total_seasons)
threads = []
# Make *total_seasons* API requests and pass responses to add_season
# to be stored.
for season in range(1, total_seasons+1):
t = threading.Thread(
target=self.request_show_info,
args=(q,),
kwargs={'season': season},
)
t.start()
threads.append(t)
# Wait for all threads to finish adding responses to the queue.
for t in threads:
t.join()
while not q.empty():
self.add_season(q.get())
# No guarantee that the seasons will be ordered correctly if we
# thread above
# TODO: verify that this works
self._seasons.sort(key=lambda k: k._episodes[0]['Season'])
# Update the show title
logger.info(
'Update show title from <%r> to "official" title retrieved '
'from external database <%r>.',
self.title,
show_details['Title'],
)
self.title = show_details['Title']
def add_season(self, season_details):
"""Create a Season instance and store API response."""
s = Season()
s.build_season(season_details)
self._seasons.append(s)
def load_database(path_to_database):
"""Return an existing database"""
try:
with open(path_to_database, 'r') as db:
deserialized_data = json.load(db)
except FileNotFoundError:
raise DatabaseError('Could not find database={}'.format(path_to_database))
else:
deserializer = Deserializer(deserialized_data)
database = deserializer.deserialize()
return database
def load_all_dbs(database_dir):
"""Load and return a ShowDB and TrackerDB.
Returns:
showdb: ShowDatabse instance
tracker: TrackerDatabase instance
"""
showdb = load_database(os.path.join(database_dir, '.showdb.json'))
tracker = load_database(os.path.join(database_dir, '.tracker.json'))
return showdb, tracker
def update_database():
"""Update an existing ShowDatabase.
"""
# TODO: Locate the database
# TODO: Rename the database
# TODO: Get list of existing shows in database
# TODO: Read watchlist
# TODO: Combine two lists of shows
# TODO: Create database
# TODO: Test database is valid
# TODO: Write database, and exit
# def update(self, from_file=True):
# if from_file:
# self.next['season'], self.next['episode'] = show_update()
# game_of_thrones = Show('Game of Thrones')
# query = 'game of thrones'
# show = getattr(sys.modules[__name__], query)
def test_update_database():
shows = ['Game of Thrones', 'Silicon Valley']
show_db = ShowDatabase()
for show in shows:
show_db.add_show(Show(show))
with open('db_test.json', 'w') as f:
json.dump(show_db, f, cls=EncodeShow, indent=2, sort_keys=True)
# def create_tracker(path_to_file):
# """Create a Tracker object."""
# tracker = TrackerDatabase()
# for show in tracked_shows:
# tracker.add(
# TrackedShow(
# title=show.title,
# next_episode=show.next_episode,
# notes=show.notes,
# )
# )
# return tracker
def update_tracker_title(tracker, database):
"""Update the title attribute in the tracker with those from
the ShowDatabase.
"""
for show in tracker:
show.title = database[show.ltitle]['title']
def process_args():
"""Process command line arguments."""
parser = argparse.ArgumentParser(
description='Utility to facilitate the tracking of TV shows',
prefix_chars='-+',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
# add_help=False,
)
show_kwargs = {
'help': 'title of show',
}
parser.add_argument(
'-l',
'--list',
help='list tracked shows',
action='store_true',
# default=5,
# metavar='N',
)
parser.add_argument(
'-w',
'--watchlist',
help='read a watchlist',
nargs='?',
const='watchlist.txt',
)
parser.add_argument(
'--database-dir',
help='directory where databases are located',
# nargs='?',
# const='watchlist.txt',
default=os.path.join(os.path.expanduser('~'), '.showtracker'),
)
parser.add_argument(
'-v',
'--verbose',
help='enable logging to file',
action='store_true',
)
subparsers = parser.add_subparsers(help='sub-commands', dest='sub_command')
parser_add = subparsers.add_parser(
'add',
help='add info to an existing show',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_dec = subparsers.add_parser(
'dec',
help='decrement the next episode of a show',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_inc = subparsers.add_parser(
'inc',
help='increment the next episode of a show',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser_rm = subparsers.add_parser(
'rm',
help='remove info from an existing tracked show',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser_add.add_argument('show', **show_kwargs)
parser_add.set_defaults(func=command_add)
parser_dec.add_argument('show', **show_kwargs)
parser_dec.set_defaults(func=command_inc_dec)
parser_inc.add_argument('show', **show_kwargs)
parser_inc.set_defaults(func=command_inc_dec)
parser_rm.add_argument('show', **show_kwargs)
parser_rm.set_defaults(func=command_rm)
parser_add.add_argument(
# '-n',
'--note',
help='add a note to the show',
# nargs='*',
)
parser_add.add_argument(
'-c',
'--short-code',
help='add a short_code to the show',
# metavar='SHORT_CODE',
)
parser_dec.add_argument(
'--by',
help='decrement the currently tracked episode by B',
default=1,
metavar='B',
type=int,
)
parser_inc.add_argument(
'--by',
help='increment the currently tracked episode by B',
default=1,
metavar='B',
type=int,
)
parser_rm.add_argument(
'--note',
help='add a note to the show',
action='store_true',
)
parser_rm.add_argument(
'-c',
'--short-code',
help='add a short_code to the show',
action='store_true',
)
return parser # .parse_args()
def handle_watchlist(args, showdb, trackerdb):
"""Handle watchlist processing"""
if not (showdb._shows and trackerdb._shows):
# Both showdb and trackerdb are empty
showdb.create_db_from_watchlist(args.watchlist)
logger.info('Write show database to disk.')
showdb.write_db()
trackerdb.create_tracker_from_watchlist(args.watchlist)
else:
# Get a list of shows currently in the showdb
shows = set([showdb._shows[s].request_title for s in showdb])
# Get a list of the shows in the watchlist
watchlist = ProcessWatchlist(args.watchlist)
wshows = [sanitize_title(s.show_title) for s in watchlist]
# Find shows which are in the watchlist, but don't exist in showdb
new_shows = [s for s in wshows if s not in shows]
logger.debug('Shows in watchlist, not in show database: %r', new_shows)
# TODO: Could multithread here
for s in new_shows:
add_show_to_showdb(s, showdb, from_watchlist=True)
logger.info('Write show database to disk.')
showdb.write_db()
trackerdb.update_tracker_from_watchlist(args.watchlist, showdb)
def add_show_to_showdb(title, showdb, from_watchlist=False):
"""Attempt to add a show to the showdb"""
Show = collections.namedtuple('Show', ('show_title'))
try:
showdb.add_show(Show(title), from_watchlist)
except ShowNotFoundError as e:
raise
except FoundFilmError as f:
raise
def command_add(args, showdb, trackerdb):
"""Add a show or a detail to a show"""
# Is show in the showdb?
if args.ltitle not in showdb:
add_show_to_showdb(args.show, showdb)
logger.info('Write show database to disk.')
showdb.write_db()
if args.ltitle in trackerdb:
if not args.note and not args.short_code:
raise ShowAlreadyTrackedError('<{!r}> is already tracked'.format(args.show))
else:
# Show is not in the tracker
NextEpisode = collections.namedtuple(
'NextEpisode',
('show_title', 'next_episode', 'notes'),
)
show = NextEpisode(args.show, args.next_episode, args.note)
logger.debug('Create NextEpisode namedtuple=%r', show)
trackerdb.add_tracked_show(show, showdb)
if args.note:
logger.info('Add note=%r to show=%r.', args.note, args.ltitle)
trackerdb._shows[args.ltitle].notes = args.note
if args.short_code:
upper_sc = args.short_code.upper()
if upper_sc in trackerdb._short_codes():
raise ShortCodeAlreadyAssignedError(
'Short-code <{}> is already in use'.format(upper_sc)
)
logger.info('Add short-code=%r to show=%r.', upper_sc, args.ltitle)
trackerdb._shows[args.ltitle].short_code = upper_sc
def command_inc_dec(args, showdb, trackerdb):
"""Increment or decrement the next episode for a show."""
inc = False
dec = False
if args.ltitle not in trackerdb:
raise ShowNotTrackedError('<{!r}> is not currently tracked.'.format(args.ltitle))
show = trackerdb._shows[args.ltitle]
if args.sub_command == 'inc':
inc = True
else:
dec = True
logger.info('%s. show=%r by %r episodes', args.sub_command, args.ltitle, args.by)
show.inc_dec_episode(showdb, inc=inc, dec=dec, by=args.by)
next_episode = season_episode_str_from_show(show)
logger.debug(
'Update _next_episode attribute for show=%r. Was %r, now %r.',
args.ltitle,
show._next_episode,
next_episode,
)
# TODO: Set this in the correct location
show._next_episode = next_episode
def command_rm(args, showdb, trackerdb):
"""Remove a show, or remove a detail from a show"""
if args.ltitle not in trackerdb:
raise ShowNotTrackedError('<{!r}> is not currently tracked.'.format(args.ltitle))
if args.note:
logger.info(
'Remove note for show=<%r>. Previous note=%r.',
args.ltitle,
trackerdb._shows[args.ltitle].notes,
)
trackerdb._shows[args.ltitle].notes = None
if args.short_code:
logger.info(
'Remove short-code for show=<%r>. Previous short-code=%r.',
args.ltitle,
trackerdb._shows[args.ltitle].short_code,
)
trackerdb._shows[args.ltitle].short_code = None
if not (args.note or args.short_code):
# If neither a note nor short_code were passed then remove the show
logger.info('Remove show=<%r> from tracker database.', args.ltitle)
del trackerdb._shows[args.ltitle]
def tracker(args):
"""Main body of code for application"""
# For most of the actions, we will be modifying the tracker, and we
# should save any changes made
save = True
db_check = check_for_databases(args.database_dir)
logger.debug(db_check)
if db_check.showdb_exists and db_check.tracker_exists:
showdb, trackerdb = load_all_dbs(args.database_dir)
logger.info(
'Successfully loaded show database and tracker database '
'from database_dir=%r', args.database_dir
)
elif db_check.showdb_exists and not db_check.tracker_exists:
raise TrackerDatabaseNotFoundError('Tracker database not found.')
elif not db_check.showdb_exists and db_check.tracker_exists:
raise ShowDatabaseNotFoundError('Show database not found.')
else:
# Neither database present
# Only correct usage at this point is to add a show
if not (args.watchlist or args.sub_command == 'add'):
raise InvalidUsageError('No databases found, and no attempt to add a show.')
# Handles case where databases are present, but a non-functional option
# was passed, such as --database-dir
if not (args.list or args.watchlist or args.sub_command):
raise InvalidUsageError('Databases present, but no other valid commands passed')
if not (db_check.showdb_exists and db_check.tracker_exists):
showdb = ShowDatabase(args.database_dir)
trackerdb = TrackerDatabase(args.database_dir)
if args.list:
# We haven't modified the tracker, so we shouldn't write to it
save = False
tabulator([trackerdb._shows[key] for key in trackerdb])
elif args.watchlist:
handle_watchlist(args, showdb, trackerdb)
else:
# Check if there is a season-episode code passed in the show
# field, e.g., 'game of thrones s06e10'
if check_for_season_episode_code(args.show):
show_split = args.show.split()
args.show = ' '.join(show_split[:-1])
args.next_episode = show_split[-1].upper()
logger.debug(
'Extracted season-episode code=%r from show field. '
'Show field now contains %r.', args.next_episode, args.show
)
else:
# If no next episode was passed in the show field, then default
# to the show premiere, i.e., 'S01E01'
args.next_episode = 'S01E01'
# Save an uppercase version of the show
ushow = args.show.upper()
# Check to see if the show field is really a short_code
# TODO: Perhaps a mapping dict would be more suitable for this
if ushow in trackerdb._short_codes():
for ltitle, s in trackerdb._shows.items():
if s.short_code == ushow:
args.show = s.title
args.ltitle = lunderize(args.show)
args.func(args, showdb, trackerdb)
if save:
logger.info('Write tracker database to disk.')
trackerdb.write_db()
def main():
"""Main entry point for this utility"""
parser = process_args()
args = parser.parse_args()
# Pass console=True to enable console log
logging_init(os.path.basename(__file__), debug=args.verbose)
# We don't need to see DEBUG or INFO messages from urllib3
logging.getLogger("urllib3").setLevel(logging.WARNING)
logger.info('Test')
logger.debug(args)
try:
tracker(args)
# TODO: Will these errors supercede any of the others?
except (DatabaseError, WatchlistError, TrackerError, APIRequestError) as e:
print('ERROR: {}'.format(e))
logger.exception(e)
parser.print_help()
except InvalidUsageError as e:
logger.exception(e)
parser.print_help()
if __name__ == '__main__':
sys.exit(main())
| 32.082117 | 94 | 0.615608 | 4,253 | 35,162 | 4.887374 | 0.112626 | 0.021697 | 0.005388 | 0.004618 | 0.339892 | 0.266044 | 0.222265 | 0.164534 | 0.140142 | 0.125132 | 0 | 0.002925 | 0.290257 | 35,162 | 1,095 | 95 | 32.111416 | 0.829981 | 0.211649 | 0 | 0.301028 | 0 | 0.001468 | 0.108376 | 0.006801 | 0 | 0 | 0 | 0.006393 | 0 | 1 | 0.088106 | false | 0.004405 | 0.016153 | 0.020558 | 0.15859 | 0.004405 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca02dcd616277e3b385bdfc3b02db8570bac7008 | 3,573 | py | Python | pycsp/common/toolkit.py | Erhannis/pycsp | 7b06b4d90a73ed84f60d4321ae66fbbf4ebab039 | [
"MIT"
] | 38 | 2016-04-18T08:39:23.000Z | 2021-12-23T01:49:09.000Z | pycsp/common/toolkit.py | Erhannis/pycsp | 7b06b4d90a73ed84f60d4321ae66fbbf4ebab039 | [
"MIT"
] | 8 | 2016-04-05T14:28:43.000Z | 2021-01-21T02:15:19.000Z | pycsp/common/toolkit.py | Erhannis/pycsp | 7b06b4d90a73ed84f60d4321ae66fbbf4ebab039 | [
"MIT"
] | 10 | 2015-12-11T14:43:11.000Z | 2022-01-04T08:48:35.000Z | """
Toolkit module
Copyright (c) 2009 John Markus Bjoerndalen <jmb@cs.uit.no>,
Brian Vinter <vinter@nbi.dk>, Rune M. Friborg <rune.m.friborg@gmail.com>.
See LICENSE.txt for licensing details (MIT License).
"""
from pycsp.common.six import string_types
import pycsp.current as pycsp
if pycsp.trace:
from pycsp.common import trace as pycsp
import subprocess
import types
def which(cmd):
P = subprocess.Popen(args=('which', cmd), stdin=None, stdout=subprocess.PIPE)
(stdout, _) = P.communicate()
return stdout.strip()
@pycsp.process
def file_r(cout, file, retire_on_eof=True, sep="\n"):
if isinstance(file, string_types):
file = open(file, 'r')
try:
buf = []
line = file.readline()
while (line):
buf.append(line)
if buf[-1].find(sep) > -1:
cout(buf)
buf = []
line = file.readline()
if buf:
cout(buf)
except:
pass
file.close()
if retire_on_eof:
pycsp.retire(cout)
@pycsp.process
def file_w(cin, file):
if isinstance(file, string_types):
file = open(file, 'w')
try:
while True:
data = cin()
if type(data) == list:
file.write(''.join(data))
else:
file.write(data)
file.flush()
except:
file.close()
@pycsp.process
def runner(cin):
while True:
command, stdinChEnd, stdoutChEnd, stderrChEnd = cin()
pycsp.Sequence(
execute(command, stdinChEnd, stdoutChEnd, stderrChEnd)
)
@pycsp.process
def execute(command, stdinChEnd=None, stdoutChEnd=None, stderrChEnd=None, retire_on_eof=True):
stdin, stdout, stderr = [None]*3
if stdinChEnd: stdin = subprocess.PIPE
if stdoutChEnd: stdout = subprocess.PIPE
if stderrChEnd: stderr = subprocess.PIPE
P = subprocess.Popen(args=command,
stdin=stdin,
stdout=stdout,
stderr=stderr)
@pycsp.choice
def handle_stdin(channel_input, stdin):
stdin.write(channel_input)
stdin.flush()
@pycsp.choice
def forwarder(channel_input, cout):
cout(channel_input)
altList = []
if stdinChEnd:
altList.append((stdinChEnd, handle_stdin(stdin=P.stdin)))
if stdoutChEnd:
C1 = pycsp.Channel()
C1in = C1.reader()
pycsp.Spawn(file_r(C1.writer(), P.stdout))
altList.append((C1in, forwarder(cout=stdoutChEnd)))
if stderrChEnd:
C2 = pycsp.Channel()
C2in = C2.reader()
pycsp.Spawn(file_r(C2.writer(), P.stderr))
altList.append((C2in, forwarder(cout=stderrChEnd)))
if altList:
alt = pycsp.Alternation(altList)
try:
while True:
alt.execute()
except pycsp.ChannelRetireException:
# stdout has reached eof
if stdoutChEnd:
pycsp.retire(C1in)
if stderrChEnd:
pycsp.retire(C2in)
if retire_on_eof:
if stdoutChEnd:
pycsp.retire(stdoutChEnd)
if stderrChEnd:
pycsp.retire(stderrChEnd)
else:
P.wait()
| 25.161972 | 94 | 0.52365 | 365 | 3,573 | 5.065753 | 0.309589 | 0.029746 | 0.03245 | 0.021633 | 0.094105 | 0.042185 | 0.042185 | 0.042185 | 0 | 0 | 0 | 0.00852 | 0.375875 | 3,573 | 141 | 95 | 25.340426 | 0.820628 | 0.065211 | 0 | 0.346939 | 0 | 0 | 0.002702 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0.010204 | 0.05102 | 0 | 0.132653 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca04ff4c503789e9653ebd021754a4374c36aaf9 | 22,428 | py | Python | api/backend.py | DataCloud-project/DIS-PIPE-Discovery---Visualization | 60fce163da330f09f90ca031f153ed07bb80f87a | [
"Apache-2.0"
] | 2 | 2022-02-28T01:56:03.000Z | 2022-03-02T21:06:35.000Z | api/backend.py | DataCloud-project/DIS-PIPE-Discovery---Visualization | 60fce163da330f09f90ca031f153ed07bb80f87a | [
"Apache-2.0"
] | null | null | null | api/backend.py | DataCloud-project/DIS-PIPE-Discovery---Visualization | 60fce163da330f09f90ca031f153ed07bb80f87a | [
"Apache-2.0"
] | 1 | 2022-02-22T10:30:21.000Z | 2022-02-22T10:30:21.000Z | import flask
from flask import request, jsonify, render_template
import os
import requests
IMAGE_FOLDER = os.path.join('static', 'images')
app = flask.Flask(__name__)
app.config["DEBUG"] = True
app.config['UPLOAD_FOLDER'] = IMAGE_FOLDER
from graphviz import Digraph
#import file xes
from pm4py.objects.log.importer.xes import importer as xes_importer
from pm4py.algo.discovery.dfg import algorithm as dfg_discovery
from pm4py.visualization.dfg import visualizer as dfg_visualization
import pm4py
from pm4py.objects.dfg.filtering import dfg_filtering
from pm4py.algo.discovery.inductive import algorithm as inductive_miner
from pm4py.visualization.petri_net import visualizer as pn_visualizer
from pm4py.objects.conversion.process_tree import converter
from pm4py.algo.filtering.log.start_activities import start_activities_filter
from pm4py.algo.filtering.log.end_activities import end_activities_filter
from pm4py.algo.discovery.footprints import algorithm as footprints_discovery
from pm4py.objects.log.util import interval_lifecycle
from pm4py.algo.filtering.log.variants import variants_filter
from pm4py.util import constants
#log = xes_importer.apply(log_path)
with open('../properties.txt') as f:
lines = f.readlines()
backend=lines[1]
backend = backend.split(': ')
backend = backend[1]
backend = backend.split('//')
path = backend[1].split(':')[0]
port_n = backend[1].split(':')[1]
port_n = port_n.split('/')[0]
#print(path)
#print(port_n)
f.close()
import platform
if platform.system() == "Windows":
log_path = 'event logs\\running-example.xes'
if platform.system() == "Linux":
log_path = 'event logs/running-example.xes'
def createGraphF(log):
log = interval_lifecycle.assign_lead_cycle_time(log, parameters={
constants.PARAMETER_CONSTANT_START_TIMESTAMP_KEY: "start_timestamp",
constants.PARAMETER_CONSTANT_TIMESTAMP_KEY: "time:timestamp"})
#DFG - process discovery
#dfg_freq = dfg_discovery.apply(log)
dfg, start_activities, end_activities = pm4py.discover_dfg(log)
parameters = dfg_visualization.Variants.FREQUENCY.value.Parameters
#visualize DFG - frequency
#gviz_freq = dfg_visualization.apply(dfg_freq, log=log, variant=dfg_visualization.Variants.FREQUENCY)
gviz_freq = dfg_visualization.apply(dfg, log=log, variant=dfg_visualization.Variants.FREQUENCY,
parameters={parameters.FORMAT: "svg", parameters.START_ACTIVITIES: start_activities,
parameters.END_ACTIVITIES: end_activities})
#dfg_visualization.save(gviz_freq, "static/images/frequency.png")
#freq_img = os.path.join(app.config['UPLOAD_FOLDER'], 'frequency.png')
return gviz_freq
def createGraphP(log):
log = interval_lifecycle.assign_lead_cycle_time(log, parameters={
constants.PARAMETER_CONSTANT_START_TIMESTAMP_KEY: "start_timestamp",
constants.PARAMETER_CONSTANT_TIMESTAMP_KEY: "time:timestamp"})
#DFG - process discovery
#dfg_perf = dfg_discovery.apply(log, variant=dfg_discovery.Variants.PERFORMANCE)
#parameters = {dfg_visualization.Variants.PERFORMANCE.value.Parameters.FORMAT: "svg"}
dfg, start_activities, end_activities = pm4py.discover_dfg(log)
parameters = dfg_visualization.Variants.PERFORMANCE.value.Parameters
#visualize DFG - performance
gviz_perf = dfg_visualization.apply(dfg, log=log, variant=dfg_visualization.Variants.PERFORMANCE,
parameters={parameters.FORMAT: "svg", parameters.START_ACTIVITIES: start_activities,
parameters.END_ACTIVITIES: end_activities})
#gviz_perf = dfg_visualization.apply(dfg_perf, log=log, variant=dfg_visualization.Variants.PERFORMANCE)
#dfg_visualization.view(gviz)
#dfg_visualization.save(gviz_perf, "static/images/performance.png")
#perf_img = os.path.join(app.config['UPLOAD_FOLDER'], 'performance.png')
#return render_template("index.html", img_freq = freq_img, img_perf = perf_img, string = str(gviz_freq))
#string_html = render_template("string.html", string = str(gviz_freq))
#frequency = render_template("img_freq.html", img_freq = freq_img)
#performance = render_template("img_perf.html", img_perf = perf_img)
return gviz_perf
def createGraphFReduced(log):
log = interval_lifecycle.assign_lead_cycle_time(log, parameters={
constants.PARAMETER_CONSTANT_START_TIMESTAMP_KEY: "start_timestamp",
constants.PARAMETER_CONSTANT_TIMESTAMP_KEY: "time:timestamp"})
#print(type(request.args.get('myPahtF')))
# GET
#print(type(request.args.get('myPathF')))
#x = request.args.get('myPathF')
if request.args.get('myActF') == None:
act = 100;
else:
act = int(request.args.get('myActF'))
if request.args.get('myPathF') == None:
path = 100;
else:
path = int(request.args.get('myPathF'))
#print("Freq: "+str(act)+" "+str(path))
dfg_f, sa_f, ea_f = pm4py.discover_directly_follows_graph(log)
parameters = dfg_visualization.Variants.FREQUENCY.value.Parameters
activities_count_f = pm4py.get_event_attribute_values(log, "concept:name")
dfg_f, sa_f, ea_f, activities_count_f = dfg_filtering.filter_dfg_on_activities_percentage(dfg_f, sa_f, ea_f, activities_count_f, act/100)
dfg_f, sa_f, ea_f, activities_count_f = dfg_filtering.filter_dfg_on_paths_percentage(dfg_f, sa_f, ea_f, activities_count_f, path/100)
gviz_f = dfg_visualization.apply(dfg_f, log=log, variant=dfg_visualization.Variants.FREQUENCY,
parameters={parameters.FORMAT: "svg", parameters.START_ACTIVITIES: sa_f,
parameters.END_ACTIVITIES: ea_f})
return gviz_f
def createGraphPReduced(log):
log = interval_lifecycle.assign_lead_cycle_time(log, parameters={
constants.PARAMETER_CONSTANT_START_TIMESTAMP_KEY: "start_timestamp",
constants.PARAMETER_CONSTANT_TIMESTAMP_KEY: "time:timestamp"})
#print(type(request.args.get('myPahtF')))
# GET
#print(type(request.args.get('myPathF')))
#x = request.args.get('myPathF')
if request.args.get('myActP') == None:
act = 100;
else:
act = int(request.args.get('myActP'))
if request.args.get('myPathP') == None:
path = 100;
else:
path = int(request.args.get('myPathP'))
#print("Perf: "+str(act)+" "+str(path))
dfg_p, sa_p, ea_p = pm4py.discover_directly_follows_graph(log)
parameters = dfg_visualization.Variants.PERFORMANCE.value.Parameters
activities_count_p = pm4py.get_event_attribute_values(log, "concept:name")
dfg_p, sa_p, ea_p, activities_count_p = dfg_filtering.filter_dfg_on_activities_percentage(dfg_p, sa_p, ea_p, activities_count_p, act/100)
dfg_p, sa_p, ea_p, activities_count_p = dfg_filtering.filter_dfg_on_paths_percentage(dfg_p, sa_p, ea_p, activities_count_p, path/100)
gviz_f = dfg_visualization.apply(dfg_p, log=log, variant=dfg_visualization.Variants.PERFORMANCE,
parameters={parameters.FORMAT: "svg", parameters.START_ACTIVITIES: sa_p,
parameters.END_ACTIVITIES: ea_p})
return gviz_f
@app.route('/dfgFrequency', methods=['GET'])
def dfgFrequency():
log = xes_importer.apply(log_path)
return str(createGraphF(log))
@app.route('/dfgPerformance', methods=['GET'])
def dfgPerformance():
log = xes_importer.apply(log_path)
return str(createGraphP(log))
@app.route('/variants', methods=['GET'])
def variants():
log = xes_importer.apply(log_path)
variants = variants_filter.get_variants(log)
variantsDict = '{'
j=0
for var, trace in variants.items():
cases = len(trace)
info = (list(variants.values())[j][0])
info = info.__getattribute__('attributes')
#Apri la variante
if("variant-index" in info):
variantsDict = variantsDict + '"' + str(info['variant-index']) + '": ['
else:
variantsDict = variantsDict + '"' + str(j) + '": ['
for i in range(0, cases):
info = (list(variants.values())[j][i])
info = info.__getattribute__('attributes')
caseName = info['concept:name']
variantsDict = variantsDict + '{"'+str(caseName)+'":['
for x in trace[i]:
timestamp = x['time:timestamp']
x['time:timestamp'] = str(timestamp)
stringX = str(x).replace("'",'"')
variantsDict = variantsDict + '' + stringX #+', '
variantsDict = variantsDict + ']}' # chiude ogni caso
variantsDict = variantsDict + ']' # chiude ogni variante
j =j+1
variantsDict = variantsDict + '}' # chiude tutto
variantsDict = variantsDict.replace("][","],[")
variantsDict = variantsDict.replace("}{","},{")
variantsDict = variantsDict.replace(']"','],"')
variantsDict = variantsDict.replace('True','"True"')
variantsDict = variantsDict.replace('False','"False"')
return variantsDict
@app.route('/dfgFreqReduced', methods=['GET', 'POST'])
def dfgFreqReduced():
log = xes_importer.apply(log_path)
return str(createGraphFReduced(log))
@app.route('/dfgPerfReduced', methods=['GET', 'POST'])
def dfgPerfReduced():
log = xes_importer.apply(log_path)
return str(createGraphPReduced(log))
@app.route('/filter', methods=['GET', 'POST'])
def filter():
log = xes_importer.apply(log_path)
variants = variants_filter.get_variants(log)
from pm4py.algo.filtering.log.timestamp import timestamp_filter
from pm4py.algo.filtering.log.cases import case_filter
# GET
if request.args.get('start') == None:
start = 0;
else:
start = request.args.get('start')
if request.args.get('end') == None:
end = 0;
else:
end = request.args.get('end')
if request.args.get('min') == None:
min_sec = 0;
else:
min_sec = float(request.args.get('min'))
if request.args.get('max') == None:
max_sec = 100*3,154e+7;
else:
max_sec = float(request.args.get('max'))
if request.args.get('filterTime') == "true":
if request.args.get('timeframe') == 'contained':
filtered_log = timestamp_filter.filter_traces_contained(log, start, end)
log = filtered_log
elif request.args.get('timeframe') == 'intersecting':
filtered_log = timestamp_filter.filter_traces_intersecting(log, start, end)
log = filtered_log
else:
filtered_log = log
if request.args.get('filterPerf') == "true":
filtered_log = case_filter.filter_case_performance(log, min_sec, max_sec)
log = filtered_log
else:
filtered_log = log
f = createGraphFReduced(filtered_log)
p = createGraphPReduced(filtered_log)
'''
variantsDict = '{'
cases = len(filtered_log)
print(cases)
j=0
for i in range(0, cases):
j=j+1
info = filtered_log[i].__getattribute__('attributes')
caseName = info['concept:name']
if ("variant-index" in info):
varIndex = info['variant-index']
if (i == 0):
variantsDict = variantsDict + '"' + str(varIndex) + '": ['
else:
variantsDict = variantsDict + '"' + str(j) + '": ['
variantsDict = variantsDict + '{"' + str(caseName) + '":['
for x in filtered_log[i]:
timestamp = x['time:timestamp']
x['time:timestamp'] = str(timestamp)
stringX = str(x).replace("'", '"')
variantsDict = variantsDict + '' + stringX # +', '
variantsDict = variantsDict + ']}' # chiude ogni caso
if ("variant-index" not in info):
variantsDict = variantsDict + ']' # chiude ogni variante
if ("variant-index" in info):
variantsDict = variantsDict + ']' # chiude ogni variante
variantsDict = variantsDict + '}' # chiude tutto
variantsDict = variantsDict.replace("][","],[")
variantsDict = variantsDict.replace("}{","},{")
variantsDict = variantsDict.replace(']"','],"')
'''
variantsDict = '{'
j=-1
for var, trace in variants.items():
j =j+1
cases = len(trace)
#print("Numero cases var "+str(j)+": "+str(cases))
varEmpty = True
for i in range(0, cases):
info = (list(variants.values())[j][i])
info = info.__getattribute__('attributes')
#print("info: "+str(info))
caseName = info['concept:name']
#print(trace[i])
inFilter = False
for k in range(0,len(filtered_log)):
infoFiltered = filtered_log[k].__getattribute__('attributes')
filteredCaseName = infoFiltered['concept:name']
if(filteredCaseName == caseName):
inFilter = True
break
if(inFilter == False):
break
if(i==0):
varEmpty = False
if("variant-index" in info):
variantsDict = variantsDict + '"' + str(info['variant-index']) + '": ['
else:
variantsDict = variantsDict + '"' + str(j) + '": ['
#variantsDict = variantsDict + '"' + str(j) + '": ['
variantsDict = variantsDict + '{"'+str(caseName)+'":['
#print("Trac i len: "+str(len(trace[i])))
for x in trace[i]:
timestamp = x['time:timestamp']
x['time:timestamp'] = str(timestamp)
stringX = str(x).replace("'",'"')
variantsDict = variantsDict + '' + stringX #+', '
variantsDict = variantsDict + ']}' # chiude ogni caso
if(varEmpty == False):
variantsDict = variantsDict + ']' # chiude ogni variante
variantsDict = variantsDict + '}' # chiude tutto
variantsDict = variantsDict.replace("][","],[")
variantsDict = variantsDict.replace("}{","},{")
variantsDict = variantsDict.replace(']"','],"')
#variantsDict = variantsDict.replace('}"','},"')
variantsDict = variantsDict.replace('True','"True"')
variantsDict = variantsDict.replace('False','"False"')
result = str(f)+"|||"+str(p)+"|||"+str(variantsDict)
return result
'''
@app.route('/petriNetFreq', methods=['GET'])
def petriNetFreq():
log = xes_importer.apply(log_path)
net, initial_marking, final_marking = inductive_miner.apply(log)
parameters = {pn_visualizer.Variants.FREQUENCY.value.Parameters.FORMAT: "png"}
gviz_pnf = pn_visualizer.apply(net, initial_marking, final_marking, parameters=parameters, variant=pn_visualizer.Variants.FREQUENCY, log=log)
return str(gviz_pnf)
@app.route('/petriNetPerf', methods=['GET'])
def petriNetPerf():
log = xes_importer.apply(log_path)
net, initial_marking, final_marking = inductive_miner.apply(log)
parameters = {pn_visualizer.Variants.PERFORMANCE.value.Parameters.FORMAT: "png"}
gviz_pnp = pn_visualizer.apply(net, initial_marking, final_marking, parameters=parameters, variant=pn_visualizer.Variants.PERFORMANCE, log=log)
return str(gviz_pnp)
@app.route('/bpmn', methods=['GET'])
def bpmn():
log = xes_importer.apply(log_path)
tree = pm4py.discover_process_tree_inductive(log)
bpmn_graph = converter.apply(tree, variant=converter.Variants.TO_BPMN)
gviz_bpmn = pm4py.visualization.bpmn.visualizer.apply(bpmn_graph)
return str(gviz_bpmn)
@app.route('/start', methods=['GET'])
def start():
log = xes_importer.apply(log_path)
log_start = start_activities_filter.get_start_activities(log)
return log_start
@app.route('/end', methods=['GET'])
def end():
log = xes_importer.apply(log_path)
end_activities = end_activities_filter.get_end_activities(log)
return end_activities
@app.route('/median', methods=['GET', 'POST'])
def median():
#MEDIAN CASE
from pm4py.statistics.traces.generic.log import case_statistics
import time
log = xes_importer.apply(log_path)
median_case_duration = case_statistics.get_median_case_duration(log, parameters={
case_statistics.Parameters.TIMESTAMP_KEY: "time:timestamp"
})
return str(median_case_duration)
@app.route('/total', methods=['GET', 'POST'])
def total():
#ALL CASES
from pm4py.statistics.traces.generic.log import case_statistics
import time
log = xes_importer.apply(log_path)
all_case_durations = case_statistics.get_all_case_durations(log, parameters={
case_statistics.Parameters.TIMESTAMP_KEY: "time:timestamp"})
total = 0
for i in range(0, len(all_case_durations)):
total = total + all_case_durations[i];
return str(total)
@app.route('/filterPerformance', methods=['GET', 'POST'])
def filterPerformance():
log = xes_importer.apply(log_path)
from pm4py.algo.filtering.log.cases import case_filter
# GET
if request.args.get('min') == None:
min_sec = 0;
else:
min_sec = float(request.args.get('min'))
if request.args.get('max') == None:
max_sec = 100*3,154e+7;
else:
max_sec = float(request.args.get('max'))
filtered_log = case_filter.filter_case_performance(log, min_sec, max_sec)
f = createGraphFReduced(filtered_log)
p = createGraphPReduced(filtered_log)
variantsDict = '{'
cases = len(filtered_log)
j=0
for i in range(0, cases):
info = filtered_log[i].__getattribute__('attributes')
caseName = info['concept:name']
if ("variant-index" in info):
varIndex = info['variant-index']
if (i == 0):
variantsDict = variantsDict + '"' + str(varIndex) + '": ['
else:
variantsDict = variantsDict + '"' + str(j+1) + '": ['
variantsDict = variantsDict + '{"' + str(caseName) + '":['
for x in filtered_log[i]:
timestamp = x['time:timestamp']
x['time:timestamp'] = str(timestamp)
stringX = str(x).replace("'", '"')
variantsDict = variantsDict + '' + stringX # +', '
j=j+1
variantsDict = variantsDict + ']}' # chiude ogni caso
if ("variant-index" not in info):
if (i == cases):
variantsDict = variantsDict + ']' # chiude ogni variante
if ("variant-index" in info):
variantsDict = variantsDict + ']' # chiude ogni variante
variantsDict = variantsDict + '}' # chiude tutto
variantsDict = variantsDict.replace("][","],[")
variantsDict = variantsDict.replace("}{","},{")
variantsDict = variantsDict.replace(']"','],"')
return result
@app.route('/filterTimeframe', methods=['GET', 'POST'])
def filterTimeframe():
log = xes_importer.apply(log_path)
from pm4py.algo.filtering.log.timestamp import timestamp_filter
# GET
if request.args.get('start') == None:
start = 0;
else:
start = request.args.get('start')
if request.args.get('end') == None:
end = 0;
else:
end = request.args.get('end')
if request.args.get('timeframe') == 'contained':
filtered_log = timestamp_filter.filter_traces_contained(log, start, end)
if request.args.get('timeframe') == 'intersecting':
filtered_log = timestamp_filter.filter_traces_intersecting(log, start, end)
f = createGraphFReduced(filtered_log)
p = createGraphPReduced(filtered_log)
variantsDict = '{'
cases = len(filtered_log)
j=0
for i in range(0, cases):
info = filtered_log[i].__getattribute__('attributes')
caseName = info['concept:name']
if ("variant-index" in info):
varIndex = info['variant-index']
if (i == 0):
variantsDict = variantsDict + '"' + str(varIndex) + '": ['
else:
variantsDict = variantsDict + '"' + str(j+1) + '": ['
variantsDict = variantsDict + '{"' + str(caseName) + '":['
for x in filtered_log[i]:
timestamp = x['time:timestamp']
x['time:timestamp'] = str(timestamp)
stringX = str(x).replace("'", '"')
variantsDict = variantsDict + '' + stringX # +', '
j=j+1
variantsDict = variantsDict + ']}' # chiude ogni caso
if ("variant-index" not in info):
if (i == cases):
variantsDict = variantsDict + ']' # chiude ogni variante
if ("variant-index" in info):
variantsDict = variantsDict + ']' # chiude ogni variante
variantsDict = variantsDict + '}' # chiude tutto
variantsDict = variantsDict.replace("][","],[")
variantsDict = variantsDict.replace("}{","},{")
variantsDict = variantsDict.replace(']"','],"')
result = str(f)+"|||"+str(p)+"|||"+str(variantsDict)
return result
'''
app.run(host=path, port=int(port_n)) | 39.41652 | 148 | 0.598716 | 2,373 | 22,428 | 5.474926 | 0.093552 | 0.10899 | 0.038793 | 0.020936 | 0.716749 | 0.691579 | 0.668334 | 0.640009 | 0.601216 | 0.56835 | 0 | 0.006501 | 0.273007 | 22,428 | 569 | 149 | 39.41652 | 0.79031 | 0.081996 | 0 | 0.458874 | 0 | 0 | 0.069677 | 0.004074 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04329 | false | 0 | 0.121212 | 0 | 0.207792 | 0.004329 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca05057b6f48797cfc259e18cdcc1c4ca53c5260 | 19,898 | py | Python | veroviz/_params.py | INFORMSJoC/2020.0340 | 9536a35b9607266ad95799cbb7e59c9451aaa6ea | [
"MIT"
] | 1 | 2022-03-28T09:56:53.000Z | 2022-03-28T09:56:53.000Z | veroviz/_params.py | INFORMSJoC/2020.0340 | 9536a35b9607266ad95799cbb7e59c9451aaa6ea | [
"MIT"
] | null | null | null | veroviz/_params.py | INFORMSJoC/2020.0340 | 9536a35b9607266ad95799cbb7e59c9451aaa6ea | [
"MIT"
] | 1 | 2021-10-30T05:01:49.000Z | 2021-10-30T05:01:49.000Z | # Copyright (c) 2021 Lan Peng and Chase Murray
# Licensed under the MIT License. See LICENSING for details.
# Const for distance unit changing
VRV_CONST_METERS_PER_KILOMETER = 1000.0
VRV_CONST_METERS_PER_MILE = 1609.34
VRV_CONST_METERS_PER_YARD = 0.9144
VRV_CONST_METERS_PER_FEET = 0.3048
VRV_CONST_METERS_PER_NAUTICAL_MILE = 1852.0
# Const for speed unit changing
VRV_CONST_MPS_TO_KPH = 3.6
VRV_CONST_MPS_TO_MPH = 2.23694
# Const for time unit changing
VRV_CONST_SECONDS_PER_HOUR = 3600.0
VRV_CONST_SECONDS_PER_MINUTE = 60.0
# Area conversions
VRV_CONST_SQKM_PER_SQMETER = 1e-6
VRV_CONST_SQMILES_PER_SQMETER = 3.861e-7
VRV_CONST_SQFT_PER_SQMETER = 10.7639
# Standard const
VRV_CONST_RADIUS_OF_EARTH = 6378100.0 # [meters]
# # Default error tolerance of distance between origin/destin to snapped loc
# VRV_DEFAULT_DISTANCE_ERROR_TOLERANCE = 10 # [meters]
# # Defaults for adding Leaflet objects (e.g., circles, polygons, text)
# VRV_DEFAULT_LEAFLET_OBJECT_COLOR_LINE = 'red'
# VRV_DEFAULT_LEAFLET_OBJECT_COLOR_FILL = 'red'
# VRV_DEFAULT_LEAFLET_FONTSIZE = 24 # pt
# VRV_DEFAULT_LEAFLET_FONTCOLOR = 'orange'
# VRV_DEFAULT_LEAFLET_MAPTILES = 'CartoDB positron'
# # Default Setting for leaflet
# VRV_DEFAULT_LEAFLETICONPREFIX = 'glyphicon'
# VRV_DEFAULT_LEAFLETICONTYPE = 'info-sign'
# VRV_DEFAULT_LEAFLETICONCOLOR = 'blue'
# VRV_DEFAULT_LEAFLETARCWEIGHT = 3
# VRV_DEFAULT_LEAFLETARCSTYLE = 'solid'
# VRV_DEFAULT_LEAFLETARCOPACITY = 0.8
# VRV_DEFAULT_LEAFLETARCCOLOR = 'orange'
# VRV_DEFAULT_LEAFLETBOUNDINGWEIGHT = 3
# VRV_DEFAULT_LEAFLETBOUNDINGOPACITY = 0.6
# VRV_DEFAULT_LEAFLETBOUNDINGSTYLE = 'dashed'
# VRV_DEFAULT_LEAFLETBOUNDINGCOLOR = 'brown'
# VRV_DEFAULT_ARCCURVETYPE = 'straight'
# VRV_DEFAULT_ARCCURVATURE = 0
# # Default Setting for Cesium
# VRV_DEFAULT_CESIUMMODELSCALE = 100 # 100%
# VRV_DEFAULT_CESIUMMODELMINPXSIZE = 75 # px
# VRV_DEFAULT_CESIUMICONTYPE = 'pin'
# VRV_DEFAULT_CESIUMICONSIZE = 40
# VRV_DEFAULT_CESIUMICONCOLOR = 'blue'
# VRV_DEFAULT_CESIUMPATHCOLOR = 'orange'
# VRV_DEFAULT_CESIUMPATHWEIGHT = 3
# VRV_DEFAULT_CESIUMPATHSTYLE = 'solid'
# VRV_DEFAULT_CESIUMPATHOPACITY = 0.8
# VRV_DEFAULT_LEAFLET_ARROWSIZE = 6
# # Default Settings for Gantt Chart
# VRV_DEFAULT_GANTTCOLOR = 'darkgray'
# VRV_DEFAULT_GANTTCOLORSERVICE = 'lightgray'
# VRV_DEFAULT_GANTTCOLORLOITER = 'lightgray'
# # Global Setting
# VRV_SETTING_PGROUTING_USERNAME = 'user'
# VRV_SETTING_PGROUTING_HOST = 'localhost'
# VRV_SETTING_PGROUTING_PASSWORD = ''
# VRV_SETTING_SHOWOUTPUTMESSAGE = True
# VRV_SETTING_SHOWWARNINGMESSAGE = True
config = {
"VRV_DEFAULT_DISTANCE_ERROR_TOLERANCE" : 10,
"VRV_DEFAULT_LEAFLET_OBJECT_COLOR_LINE" : 'red',
"VRV_DEFAULT_LEAFLET_OBJECT_COLOR_FILL" : 'red',
"VRV_DEFAULT_LEAFLET_FONTSIZE" : 24,
"VRV_DEFAULT_LEAFLET_FONTCOLOR" : 'orange',
"VRV_DEFAULT_LEAFLET_MAPTILES" : 'CartoDB positron',
"VRV_DEFAULT_LEAFLETICONPREFIX" : 'glyphicon',
"VRV_DEFAULT_LEAFLETICONTYPE" : 'info-sign',
"VRV_DEFAULT_LEAFLETICONCOLOR" : 'blue',
"VRV_DEFAULT_LEAFLETARCWEIGHT" : 3,
"VRV_DEFAULT_LEAFLETARCSTYLE" : 'solid',
"VRV_DEFAULT_LEAFLETARCOPACITY" : 0.8,
"VRV_DEFAULT_LEAFLETARCCOLOR" : 'orange',
"VRV_DEFAULT_LEAFLETBOUNDINGWEIGHT" : 3,
"VRV_DEFAULT_LEAFLETBOUNDINGOPACITY" : 0.6,
"VRV_DEFAULT_LEAFLETBOUNDINGSTYLE" : 'dashed',
"VRV_DEFAULT_LEAFLETBOUNDINGCOLOR" : 'brown',
"VRV_DEFAULT_ARCCURVETYPE" : 'straight',
"VRV_DEFAULT_ARCCURVATURE" : 0,
"VRV_DEFAULT_CESIUMMODELSCALE" : 100,
"VRV_DEFAULT_CESIUMMODELMINPXSIZE" : 75,
"VRV_DEFAULT_CESIUMICONTYPE" : 'pin',
"VRV_DEFAULT_CESIUMICONSIZE" : 40,
"VRV_DEFAULT_CESIUMICONCOLOR" : 'blue',
"VRV_DEFAULT_CESIUMPATHCOLOR" : 'orange',
"VRV_DEFAULT_CESIUMPATHWEIGHT" : 3,
"VRV_DEFAULT_CESIUMPATHSTYLE" : 'solid',
"VRV_DEFAULT_CESIUMPATHOPACITY" : 0.8,
"VRV_DEFAULT_LEAFLET_ARROWSIZE" : 6,
"VRV_DEFAULT_GANTTCOLOR" : 'darkgray',
"VRV_DEFAULT_GANTTCOLORSERVICE" : 'lightgray',
"VRV_DEFAULT_GANTTCOLORLOITER" : 'lightgray',
"VRV_SETTING_PGROUTING_USERNAME" : 'user',
"VRV_SETTING_PGROUTING_HOST" : 'localhost',
"VRV_SETTING_PGROUTING_PASSWORD" : '',
"VRV_SETTING_SHOWOUTPUTMESSAGE" : True,
"VRV_SETTING_SHOWWARNINGMESSAGE" : True
}
# For validation
nodesColumnList = [
'id',
'lat',
'lon',
'altMeters',
'nodeName',
'nodeType',
'popupText',
'leafletIconPrefix',
'leafletIconType',
'leafletColor',
'leafletIconText',
'cesiumIconType',
'cesiumColor',
'cesiumIconText',
'elevMeters'
]
arcsColumnList = [
'odID',
'objectID',
'startLat',
'startLon',
'endLat',
'endLon',
'leafletColor',
'leafletWeight',
'leafletStyle',
'leafletOpacity',
'leafletCurveType',
'leafletCurvature',
'useArrows',
'cesiumColor',
'cesiumWeight',
'cesiumStyle',
'cesiumOpacity',
'popupText',
'startElevMeters',
'endElevMeters',
]
assignmentsColumnList = [
'odID',
'objectID',
'modelFile',
'modelScale',
'modelMinPxSize',
'startTimeSec',
'startLat',
'startLon',
'startAltMeters',
'endTimeSec',
'endLat',
'endLon',
'endAltMeters',
'leafletColor',
'leafletWeight',
'leafletStyle',
'leafletOpacity',
'leafletCurveType',
'leafletCurvature',
'useArrows',
'cesiumColor',
'cesiumWeight',
'cesiumStyle',
'cesiumOpacity',
'ganttColor',
'popupText',
'startElevMeters',
'endElevMeters',
'wayname',
'waycategory',
'surface',
'waytype',
'steepness',
'tollway'
]
timeUnitsDictionary = {
'seconds': 's',
'second': 's',
'sec': 's',
's': 's',
'minutes': 'min',
'minute': 'min',
'mins': 'min',
'min': 'min',
'hours': 'h',
'hour': 'h',
'hrs': 'h',
'hr': 'h',
'h': 'h',
}
distanceUnitsDictionary = {
'meters': 'm',
'm': 'm',
'kilometers': 'km',
'km': 'km',
'miles': 'mi',
'mi': 'mi',
'yard': 'yard',
'feet': 'ft',
'ft': 'ft',
'nautical miles': 'nmi',
'nmi': 'nmi',
'nm': 'nmi'
}
areaUnitsDictionary = {
'sf': 'sqft',
'sqft': 'sqft',
'sqfeet': 'sqft',
'smi': 'sqmi',
'sqmi': 'sqmi',
'sqmiles': 'sqmi',
'sm': 'sqm',
'sqm': 'sqm',
'sqmeters': 'sqm',
'skm': 'sqkm',
'sqkm': 'sqkm',
'sqkilometers': 'sqkm'
}
mapBackgroundList = {
'cartodb positron',
'cartodb dark_matter',
'openstreetmap',
'stamen terrain',
'stamen toner',
'stamen watercolor',
'arcgis aerial',
'arcgis gray',
'arcgis ocean',
'arcgis roadmap',
'arcgis shaded relief',
'arcgis topo',
'open topo'
}
weatherMapList = {
'clouds',
'precip',
'pressure',
'wind',
'temp'
}
horizAlignList = {
'left',
'right',
'center'
}
dataframeList = {
'nodes',
'arcs',
'assignments'
}
loiterPositionList = [
'beforetakeoff',
'takeoffatalt',
'arrivalatalt',
'afterland'
]
routeType2DList = [
'euclidean2d',
'manhattan',
'fastest',
'shortest',
'pedestrian',
'cycling',
'truck',
'wheelchair'
]
routeType3DList = [
'square',
'triangular',
'trapezoidal',
'straight'
]
isoTravelModeList = [
'driving-car',
'driving-hgv',
'cycling-regular',
'cycling-road',
'cycling-mountain',
'cycling-electric',
'foot-walking',
'foot-hiking',
'wheelchair'
]
dataProviderDictionary = {
'mapquest': 'mapquest',
'mq': 'mapquest',
'pgrouting': 'pgrouting',
'pgr': 'pgrouting',
'osrm-online': 'osrm-online',
'osrm-ol': 'osrm-online',
'openrouteservice-online': 'ors-online',
'openrouteservice-ol': 'ors-online',
'ors-online': 'ors-online',
'ors-ol': 'ors-online',
'openrouteservice-local': 'ors-local',
'openrouteservice-l': 'ors-local',
'ors-local': 'ors-local',
'ors-l': 'ors-local'
}
# NOTE: The only valid dataProviders for the geocode()/reverseGeocode() functions are:
# None, 'ors-online', and 'mapquest'
geoDataProviderDictionary = {
'mapquest': 'mapquest',
'mq': 'mapquest',
'openrouteservice-online': 'ors-online',
'openrouteservice-ol': 'ors-online',
'ors-online': 'ors-online',
'ors-ol': 'ors-online'
}
# NOTE: The only valid dataProvider options for isochrones functions are:
# ors-online and ors-local
isoDataProviderDictionary = {
'openrouteservice-online': 'ors-online',
'openrouteservice-ol': 'ors-online',
'ors-online': 'ors-online',
'ors-ol': 'ors-online',
'openrouteservice-local': 'ors-local',
'openrouteservice-l': 'ors-local',
'ors-local': 'ors-local',
'ors-l': 'ors-local'
}
# NOTE: The only valid dataProvider options for elevation functions are:
# ors-online
# usgs
# elevAPI
elevDataProviderDictionary = {
'openrouteservice-online': 'ors-online',
'openrouteservice-ol': 'ors-online',
'ors-online': 'ors-online',
'ors-ol': 'ors-online',
'usgs': 'usgs',
'elevapi': 'elevapi',
'elev-api': 'elevapi',
'elevation-api': 'elevapi',
'elevationapi': 'elevapi'
}
weatherDataProviderDictionary = {
'openweather': 'openweather',
'openweathermap': 'openweather',
'openweatherapi': 'openweather',
'ow': 'openweather',
'owm': 'openweather'
}
matrixTypeList = [
'all2all',
'one2many',
'many2one'
]
nodeDistribList = [
"uniformBB",
"normalBB",
"normal",
"unifRoadBasedBB"
]
cesiumIconTypeList = [
'pin'
]
cesiumColorList = [
"Cesium.Color.ALICEBLUE",
"Cesium.Color.ANTIQUEWHITE",
"Cesium.Color.AQUA",
"Cesium.Color.AQUAMARINE",
"Cesium.Color.AZURE",
"Cesium.Color.BEIGE",
"Cesium.Color.BISQUE",
"Cesium.Color.BLACK",
"Cesium.Color.BLANCHEDALMOND",
"Cesium.Color.BLUE",
"Cesium.Color.BLUEVIOLET",
"Cesium.Color.BROWN",
"Cesium.Color.BURLYWOOD",
"Cesium.Color.CADETBLUE",
"Cesium.Color.CHARTREUSE",
"Cesium.Color.CHOCOLATE",
"Cesium.Color.CORAL",
"Cesium.Color.CORNFLOWERBLUE",
"Cesium.Color.CORNSILK",
"Cesium.Color.CRIMSON",
"Cesium.Color.CYAN",
"Cesium.Color.DARKBLUE",
"Cesium.Color.DARKCYAN",
"Cesium.Color.DARKGOLDENROD",
"Cesium.Color.DARKGRAY",
"Cesium.Color.DARKGREEN",
"Cesium.Color.DARKGREY",
"Cesium.Color.DARKKHAKI",
"Cesium.Color.DARKMAGENTA",
"Cesium.Color.DARKOLIVEGREEN",
"Cesium.Color.DARKORANGE",
"Cesium.Color.DARKORCHID",
"Cesium.Color.DARKRED",
"Cesium.Color.DARKSALMON",
"Cesium.Color.DARKSEAGREEN",
"Cesium.Color.DARKSLATEBLUE",
"Cesium.Color.DARKSLATEGRAY",
"Cesium.Color.DARKSLATEGREY",
"Cesium.Color.DARKTURQUOISE",
"Cesium.Color.DARKVIOLET",
"Cesium.Color.DEEPPINK",
"Cesium.Color.DEEPSKYBLUE",
"Cesium.Color.DIMGRAY",
"Cesium.Color.DIMGREY",
"Cesium.Color.DODGERBLUE",
"Cesium.Color.FIREBRICK",
"Cesium.Color.FLORALWHITE",
"Cesium.Color.FORESTGREEN",
"Cesium.Color.FUCHSIA",
"Cesium.Color.GAINSBORO",
"Cesium.Color.GHOSTWHITE",
"Cesium.Color.GOLD",
"Cesium.Color.GOLDENROD",
"Cesium.Color.GRAY",
"Cesium.Color.GREEN",
"Cesium.Color.GREENYELLOW",
"Cesium.Color.GREY",
"Cesium.Color.HONEYDEW",
"Cesium.Color.HOTPINK",
"Cesium.Color.INDIANRED",
"Cesium.Color.INDIGO",
"Cesium.Color.IVORY",
"Cesium.Color.KHAKI",
"Cesium.Color.LAVENDAR_BLUSH",
"Cesium.Color.LAVENDER",
"Cesium.Color.LAWNGREEN",
"Cesium.Color.LEMONCHIFFON",
"Cesium.Color.LIGHTBLUE",
"Cesium.Color.LIGHTCORAL",
"Cesium.Color.LIGHTCYAN",
"Cesium.Color.LIGHTGOLDENRODYELLOW",
"Cesium.Color.LIGHTGRAY",
"Cesium.Color.LIGHTGREEN",
"Cesium.Color.LIGHTGREY",
"Cesium.Color.LIGHTPINK",
"Cesium.Color.LIGHTSEAGREEN",
"Cesium.Color.LIGHTSKYBLUE",
"Cesium.Color.LIGHTSLATEGRAY",
"Cesium.Color.LIGHTSLATEGREY",
"Cesium.Color.LIGHTSTEELBLUE",
"Cesium.Color.LIGHTYELLOW",
"Cesium.Color.LIME",
"Cesium.Color.LIMEGREEN",
"Cesium.Color.LINEN",
"Cesium.Color.MAGENTA",
"Cesium.Color.MAROON",
"Cesium.Color.MEDIUMAQUAMARINE",
"Cesium.Color.MEDIUMBLUE",
"Cesium.Color.MEDIUMORCHID",
"Cesium.Color.MEDIUMPURPLE",
"Cesium.Color.MEDIUMSEAGREEN",
"Cesium.Color.MEDIUMSLATEBLUE",
"Cesium.Color.MEDIUMSPRINGGREEN",
"Cesium.Color.MEDIUMTURQUOISE",
"Cesium.Color.MEDIUMVIOLETRED",
"Cesium.Color.MIDNIGHTBLUE",
"Cesium.Color.MINTCREAM",
"Cesium.Color.MISTYROSE",
"Cesium.Color.MOCCASIN",
"Cesium.Color.NAVAJOWHITE",
"Cesium.Color.NAVY",
"Cesium.Color.OLDLACE",
"Cesium.Color.OLIVE",
"Cesium.Color.OLIVEDRAB",
"Cesium.Color.ORANGE",
"Cesium.Color.ORANGERED",
"Cesium.Color.ORCHID",
"Cesium.Color.PALEGOLDENROD",
"Cesium.Color.PALEGREEN",
"Cesium.Color.PALETURQUOISE",
"Cesium.Color.PALEVIOLETRED",
"Cesium.Color.PAPAYAWHIP",
"Cesium.Color.PEACHPUFF",
"Cesium.Color.PERU",
"Cesium.Color.PINK",
"Cesium.Color.PLUM",
"Cesium.Color.POWDERBLUE",
"Cesium.Color.PURPLE",
"Cesium.Color.RED",
"Cesium.Color.ROSYBROWN",
"Cesium.Color.ROYALBLUE",
"Cesium.Color.SADDLEBROWN",
"Cesium.Color.SALMON",
"Cesium.Color.SANDYBROWN",
"Cesium.Color.SEAGREEN",
"Cesium.Color.SEASHELL",
"Cesium.Color.SIENNA",
"Cesium.Color.SILVER",
"Cesium.Color.SKYBLUE",
"Cesium.Color.SLATEBLUE",
"Cesium.Color.SLATEGRAY",
"Cesium.Color.SLATEGREY",
"Cesium.Color.SNOW",
"Cesium.Color.SPRINGGREEN",
"Cesium.Color.STEELBLUE",
"Cesium.Color.TAN",
"Cesium.Color.TEAL",
"Cesium.Color.THISTLE",
"Cesium.Color.TOMATO",
"Cesium.Color.TRANSPARENT",
"Cesium.Color.TURQUOISE",
"Cesium.Color.VIOLET",
"Cesium.Color.WHEAT",
"Cesium.Color.WHITE",
"Cesium.Color.WHITESMOKE",
"Cesium.Color.YELLOW",
"Cesium.Color.YELLOWGREEN"
]
cesiumStyleList = [
"dashed",
"dotted",
"solid"
]
matplotlibColorDict = {
'aliceblue': '#F0F8FF',
'antiquewhite': '#FAEBD7',
'aqua': '#00FFFF',
'aquamarine': '#7FFFD4',
'azure': '#F0FFFF',
'beige': '#F5F5DC',
'bisque': '#FFE4C4',
'black': '#000000',
'blanchedalmond': '#FFEBCD',
'blue': '#0000FF',
'blueviolet': '#8A2BE2',
'brown': '#A52A2A',
'burlywood': '#DEB887',
'cadetblue': '#5F9EA0',
'chartreuse': '#7FFF00',
'chocolate': '#D2691E',
'coral': '#FF7F50',
'cornflowerblue': '#6495ED',
'cornsilk': '#FFF8DC',
'crimson': '#DC143C',
'cyan': '#00FFFF',
'darkblue': '#00008B',
'darkcyan': '#008B8B',
'darkgoldenrod': '#B8860B',
'darkgray': '#A9A9A9',
'darkgreen': '#006400',
'darkkhaki': '#BDB76B',
'darkmagenta': '#8B008B',
'darkolivegreen': '#556B2F',
'darkorange': '#FF8C00',
'darkorchid': '#9932CC',
'darkred': '#8B0000',
'darksalmon': '#E9967A',
'darkseagreen': '#8FBC8F',
'darkslateblue': '#483D8B',
'darkslategray': '#2F4F4F',
'darkturquoise': '#00CED1',
'darkviolet': '#9400D3',
'deeppink': '#FF1493',
'deepskyblue': '#00BFFF',
'dimgray': '#696969',
'dodgerblue': '#1E90FF',
'firebrick': '#B22222',
'floralwhite': '#FFFAF0',
'forestgreen': '#228B22',
'fuchsia': '#FF00FF',
'gainsboro': '#DCDCDC',
'ghostwhite': '#F8F8FF',
'gold': '#FFD700',
'goldenrod': '#DAA520',
'gray': '#808080',
'green': '#008000',
'greenyellow': '#ADFF2F',
'honeydew': '#F0FFF0',
'hotpink': '#FF69B4',
'indianred': '#CD5C5C',
'indigo': '#4B0082',
'ivory': '#FFFFF0',
'khaki': '#F0E68C',
'lavender': '#E6E6FA',
'lavenderblush': '#FFF0F5',
'lawngreen': '#7CFC00',
'lemonchiffon': '#FFFACD',
'lightblue': '#ADD8E6',
'lightcoral': '#F08080',
'lightcyan': '#E0FFFF',
'lightgoldenrodyellow': '#FAFAD2',
'lightgreen': '#90EE90',
'lightgray': '#D3D3D3',
'lightpink': '#FFB6C1',
'lightsalmon': '#FFA07A',
'lightseagreen': '#20B2AA',
'lightskyblue': '#87CEFA',
'lightslategray': '#778899',
'lightsteelblue': '#B0C4DE',
'lightyellow': '#FFFFE0',
'lime': '#00FF00',
'limegreen': '#32CD32',
'linen': '#FAF0E6',
'magenta': '#FF00FF',
'maroon': '#800000',
'mediumaquamarine': '#66CDAA',
'mediumblue': '#0000CD',
'mediumorchid': '#BA55D3',
'mediumpurple': '#9370DB',
'mediumseagreen': '#3CB371',
'mediumslateblue': '#7B68EE',
'mediumspringgreen': '#00FA9A',
'mediumturquoise': '#48D1CC',
'mediumvioletred': '#C71585',
'midnightblue': '#191970',
'mintcream': '#F5FFFA',
'mistyrose': '#FFE4E1',
'moccasin': '#FFE4B5',
'navajowhite': '#FFDEAD',
'navy': '#000080',
'oldlace': '#FDF5E6',
'olive': '#808000',
'olivedrab': '#6B8E23',
'orange': '#FFA500',
'orangered': '#FF4500',
'orchid': '#DA70D6',
'palegoldenrod': '#EEE8AA',
'palegreen': '#98FB98',
'paleturquoise': '#AFEEEE',
'palevioletred': '#DB7093',
'papayawhip': '#FFEFD5',
'peachpuff': '#FFDAB9',
'peru': '#CD853F',
'pink': '#FFC0CB',
'plum': '#DDA0DD',
'powderblue': '#B0E0E6',
'purple': '#800080',
'red': '#FF0000',
'rosybrown': '#BC8F8F',
'royalblue': '#4169E1',
'saddlebrown': '#8B4513',
'salmon': '#FA8072',
'sandybrown': '#FAA460',
'seagreen': '#2E8B57',
'seashell': '#FFF5EE',
'sienna': '#A0522D',
'silver': '#C0C0C0',
'skyblue': '#87CEEB',
'slateblue': '#6A5ACD',
'slategray': '#708090',
'snow': '#FFFAFA',
'springgreen': '#00FF7F',
'steelblue': '#4682B4',
'tan': '#D2B48C',
'teal': '#008080',
'thistle': '#D8BFD8',
'tomato': '#FF6347',
'turquoise': '#40E0D0',
'violet': '#EE82EE',
'wheat': '#F5DEB3',
'white': '#FFFFFF',
'whitesmoke': '#F5F5F5',
'yellow': '#FFFF00',
'yellowgreen': '#9ACD32'
}
leafletColorList = [
'red',
'blue',
'gray',
'darkred',
'lightred',
'orange',
'beige',
'green',
'darkgreen',
'lightgreen',
'darkblue',
'lightblue',
'purple',
'darkpurple',
'pink',
'cadetblue',
'lightgray',
'black',
'white',
'brown'
]
leafletIconPrefixList = [
"glyphicon",
"fa",
"custom"
]
leafletIconGlyphicon = [
'info-sign',
'home',
'glass',
'flag',
'star',
'bookmark',
'user',
'cloud'
]
leafletIconFa = [
'ambulance',
'bicycle',
'bus',
'car',
'flag',
'heartbeat',
'home',
'motorcycle',
'plane',
'question',
'ship',
'shopping-bag',
'shopping-basket',
'shopping-cart',
'star',
'subway',
'taxi',
'truck',
'university',
'user',
'users'
]
leafletStyleList = [
"dashed",
"dotted",
"solid"
]
# Mapping from ORS ID numbers to more descriptive explanations.
# See https://github.com/GIScience/openrouteservice-docs#routing-response for more info
orsWaycategoryDict = {
0: 'No category',
1: 'Highway',
2: 'Steps',
4: 'Ferry',
8: 'Unpaved road',
16: 'Track',
32: 'Tunnel',
64: 'Paved road',
128: 'Ford'
}
# https://github.com/GIScience/openrouteservice-docs#surface
orsSurfaceDict = {
0: 'Unknown',
1: 'Paved',
2: 'Unpaved',
3: 'Asphalt',
4: 'Concrete',
5: 'Cobblestone',
6: 'Metal',
7: 'Wood',
8: 'Compacted Gravel',
9: 'Fine Gravel',
10: 'Gravel',
11: 'Dirt',
12: 'Ground',
13: 'Ice',
14: 'Paving Stones',
15: 'Sand',
16: 'Woodchips',
17: 'Grass',
18: 'Grass Paver'
}
orsWaytypeDict = {
0: 'Unknown',
1: 'State Road',
2: 'Road',
3: 'Street',
4: 'Path',
5: 'Track',
6: 'Cycleway',
7: 'Footway',
8: 'Steps',
9: 'Ferry',
10: 'Construction'
}
# https://github.com/GIScience/openrouteservice-docs#steepness
orsSteepnessDict = {
-5: '>16%',
-4: '12-15%',
-3: '7-11%',
-2: '4-6%',
-1: '1-3%',
0: '0%',
1: '1-3%',
2: '4-6%',
3: '7-11%',
4: '12-15%',
5: '>16%'
} | 23.299766 | 87 | 0.626746 | 1,882 | 19,898 | 6.502657 | 0.383103 | 0.132129 | 0.016669 | 0.011767 | 0.266955 | 0.261236 | 0.245138 | 0.240235 | 0.240235 | 0.240235 | 0 | 0.039571 | 0.194793 | 19,898 | 854 | 88 | 23.299766 | 0.724254 | 0.128807 | 0 | 0.125 | 0 | 0 | 0.566114 | 0.204659 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.001389 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca08a41e4121d749dc35922571ea21c079fb53a8 | 6,229 | py | Python | adaptation.py | yxtj/VideoServing | 52d1c1c97021f11cc4d77c181ac1144fe3a789ce | [
"MIT"
] | null | null | null | adaptation.py | yxtj/VideoServing | 52d1c1c97021f11cc4d77c181ac1144fe3a789ce | [
"MIT"
] | null | null | null | adaptation.py | yxtj/VideoServing | 52d1c1c97021f11cc4d77c181ac1144fe3a789ce | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from common import Configuration
import numpy as np
import torch
import torch.nn as nn
#Configuration = namedtuple('Configuration', ['fps', 'rsl'])
def acc2index(accuracies, thresholds):
return np.digitize(accuracies, thresholds)
class Adapter():
def __init__(self, dim_feat=None, dim_hidden=None,
fps_list=None, rsl_list=None, acc_list=None,
unit_rsl_time=None, model_file=None, **kwargs):
if model_file is not None:
self.load_model(model_file)
return
self.dim_feat = dim_feat
self.dim_hidden = dim_hidden
assert fps_list == sorted(fps_list)
assert rsl_list == sorted(rsl_list)
self.fps_list = np.array(fps_list)
self.nfps = len(fps_list)
self.rsl_list = np.array(rsl_list)
self.nrsl = len(rsl_list)
self.acc_list = np.array(acc_list)
self.nacc = len(acc_list) + 1
# network
self.network = AdaptationModel(self.dim_feat, (self.nfps, self.nrsl),
self.nacc, self.dim_hidden)
# selection
self.unit_rsl_time = unit_rsl_time
assert len(unit_rsl_time) == self.nrsl
self.time_matrix = np.outer(fps_list, unit_rsl_time)
def load_model(self, model_file):
state = torch.load(model_file)
self.fps_list = state['fps_list']
self.nfps = len(self.fps_list)
self.rsl_list = state['rsl_list']
self.nrsl = len(self.rsl_list)
self.acc_list = state['acc_list']
self.nacc = len(self.acc_list)
self.dim_feat = state['dim_feat']
self.dim_hidden = state['dim_hidden']
self.network = AdaptationModel(self.dim_feat, (self.nfps, self.nrsl),
self.nacc, self.dim_hidden)
self.network.load_state_dict(state['model_state'])
self.unit_rsl_time = state['unit_rsl_time']
self.time_matrix = np.outer(self.fps_list, self.unit_rsl_time)
def save_model(self, model_file):
state = {'model_state': self.network.state_dict(),
'fps_list': self.fps_list,
'rsl_list': self.rsl_list,
'acc_list': self.acc_list,
'dim_feat': self.dim_feat,
'dim_hidden': self.dim_hidden,
'unit_rsl_time': self.unit_rsl_time}
torch.save(state, model_file)
def get(self, feature, acc_requires):
'''
Get the fastest configuration that satisfies the requirement.
If no one satisfies the lowest requirement, return the most accurate one.
Params:
feature: feature to use.
acc_requires: a list of accuracies requirements in DESCENDING order.
'''
with torch.no_grad():
pred = self.predict(feature).numpy()
conf_index = self.pick_config(pred, acc_requires)
fps = self.trans_fps_index(conf_index[0])
rsl = self.trans_rsl_index(conf_index[1])
return Configuration(fps, rsl)
def train_epoch(self):
self.network.train()
# internal functions
def predict(self, feature):
o = self.network(feature)
return o
def pick_config(self, predicted_acc, acc_bounds):
'''
Params:
predicted_acc : matrix of predicted accuracies of all configurations.
acc_bounds : list of accuracy requirements. Must be DESCENDING order
Returns the index of fps and resolution
'''
for bound in acc_bounds:
m = predicted_acc < bound # mask out those less than the bound
if not m.all():
# pick the fastest one
ind = np.ma.MaskedArray(self.time_matrix, m).argmin()
x, y = divmod(ind, self.time_matrix.shape[1])
return x, y
# no one satisfies any accuracy bound, return the best one
ind = predicted_acc.argmax()
x, y = divmod(ind, self.time_matrix.shape[1])
return x, y
def trans_fps_index(self, fps_index):
return self.fps_list[fps_index]
def trans_rsl_index(self, rsl_index):
return self.rsl_list[rsl_index]
def trans_config_index(self, fps_index, rsl_index):
return self.fps_list[fps_index], self.rsl_list[rsl_index]
# %% model
class AdaptationModel(nn.Module):
def __init__(self, dim_in, dim_outs, dim_range, dim_hidden=None):
super().__init__()
assert isinstance(dim_outs, (tuple, list))
self.dim_in = dim_in
self.dim_outs = dim_outs
self.dim_range = dim_range
self.dim_hidden = dim_hidden
self.n_o = np.product(dim_outs)
if dim_hidden is None or dim_hidden == 0:
self.hidden = nn.Identity()
else:
self.hidden = nn.Sequential(
nn.Linear(dim_in, dim_hidden), nn.ReLU(),
)
self.heads = nn.ModuleList(
[torch.nn.Linear(dim_hidden, dim_range) for i in range(self.n_o)])
def forward(self, x):
n = len(x)
shape = [n, *self.dim_outs, self.dim_range]
h = self.hidden(x)
h = torch.cat([self.heads[i](h) for i in range(self.n_o)])
h = h.view(shape)
#h = torch.sigmoid(h)
return h
class AdaptationLoss():
def __init__(self):
self.lf = nn.CrossEntropyLoss()
def __call__(self, output, target):
shape = output.shape
n = shape[0]
nc = np.product(shape[1:-1])
r = shape[-1]
assert n == len(target)
output = output.view(n, -1, r)
l = torch.zeros(1)
for i in range(nc):
l += self.lf(output[:,i,:], target)
return l
# %%
def train_epoch(loader,model,optimizer,loss_fn):
lss=[]
for i,(bx,by) in enumerate(loader):
optimizer.zero_grad()
o=model(bx)
l=loss_fn(o,by)
l.backward()
optimizer.step()
lss.append(l.item())
return lss
def evaluate(x, y, model):
model.eval()
with torch.no_grad():
p = model(x)
p = p.view((len(p), -1)).argmax(1)
# TODO: finish this | 34.225275 | 81 | 0.587735 | 830 | 6,229 | 4.193976 | 0.208434 | 0.036197 | 0.0316 | 0.017236 | 0.211146 | 0.091928 | 0.091928 | 0.064924 | 0.064924 | 0.064924 | 0 | 0.003692 | 0.304222 | 6,229 | 182 | 82 | 34.225275 | 0.799492 | 0.11607 | 0 | 0.090226 | 0 | 0 | 0.024476 | 0 | 0 | 0 | 0 | 0.005495 | 0.037594 | 1 | 0.12782 | false | 0 | 0.030075 | 0.030075 | 0.270677 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca097708767493554637834ada6bf578f67bee1f | 6,538 | py | Python | teslamateMqttToTelegram.py | carloscuezva/Teslamate-Telegram | f33843735011a94221b279028af66a7f09c92176 | [
"MIT"
] | 1 | 2021-11-01T11:38:52.000Z | 2021-11-01T11:38:52.000Z | teslamateMqttToTelegram.py | carloscuezva/Teslamate-Telegram | f33843735011a94221b279028af66a7f09c92176 | [
"MIT"
] | null | null | null | teslamateMqttToTelegram.py | carloscuezva/Teslamate-Telegram | f33843735011a94221b279028af66a7f09c92176 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import paho.mqtt.client as mqtt
import requests
import sys
from time import sleep
import config as conf
import logging
import logging.handlers
data = {
"sent_resumen": 0,
"display_name": "",
"state": "",
"software_current_version": "",
"software_new_version": "",
"battery_level": 100,
"usable_battery_level": 100,
"inside_temp": 22,
"outside_temp": 22,
"longitude": -5,
"latitude": 42,
"geofence": ""
}
botMessage = {
"send": 1,
"text": ""
}
logger = logging.getLogger()
RESTART = 15
OPTIONS = conf.OPTIONS.split("|")
def setup_logging():
if conf.DEBUG:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
handler = logging.handlers.RotatingFileHandler('teslamate2telegram.log', maxBytes=10000000, backupCount=5)
formatter = logging.Formatter("%(asctime)s %(levelname)s %(funcName)s:%(lineno)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def on_connect(client, userdata, flags, rc):
global botMessage
if conf.DEBUG:
logger.info("Connected to the TeslaMate MQTT")
client.subscribe("teslamate/cars/" + conf.CAR_ID + "/#")
botMessage = {
"send": 0,
"text": "🎉 Ahora estás conectado con _TeslaMate_ 🎉"
}
send_to_telegram()
def on_disconnect(client, userdata, rc=0):
if conf.DEBUG:
logger.debug("Disconnected: result code " + str(rc))
client.loop_stop()
sleep(RESTART)
create_mqtt_connection()
def on_message(client, userdata, message):
global botMessage
global data
try:
topic = str(message.topic).split('/')[3]
payload = str(message.payload.decode("utf-8"))
text = ""
if topic == "display_name":
data["display_name"] = payload
elif topic == "version":
data["software_current_version"] = payload
elif topic == "battery_level":
data["battery_level"] = payload
elif topic == "usable_battery_level":
data["usable_battery_level"] = payload
elif topic == "inside_temp":
data["inside_temp"] = payload
elif topic == "outside_temp":
data["outside_temp"] = payload
elif topic == "longitude":
data["longitude"] = payload
elif topic == "latitude":
data["latitude"] = payload
elif topic == "update_version":
if payload != "" and payload != data["software_current_version"]:
data["software_new_version"] = payload
text = "🎁 Actualización disponible: _{}_".format(payload)
elif topic == "state":
if data["state"] != payload:
if payload == "online":
text = "✨ Está despierto"
elif payload == "asleep":
text = "💤 Está dormido"
data['sent_resumen'] = 0
elif payload == "suspended":
text = "🛏️ Está durmiéndose"
elif payload == "charging":
text = "🔌 Está cargando"
elif payload == "offline":
text = "🛰️ No está conectado"
elif payload == "start":
text = "🚀 Está arrancando"
elif payload == "driving":
text = "🏁 Está conduciendo"
elif payload == "updating":
text = "🔄 Está actualizándose"
else:
text = "⭕ Estado desconocido"
data["state"] = payload
elif topic == "geofence":
data["geofence"] = payload
if conf.DEBUG:
logger.debug(topic + ": " + payload)
if text != "":
botMessage = {
"send": 0,
"text": text
}
if topic in OPTIONS and botMessage['send'] == 0 and botMessage['text'] != "":
send_to_telegram()
except:
logger.error("Exception on_message(): ", sys.exc_info()[0], message.topic, message.payload)
def get_formated_text():
if data["display_name"] != "":
txt = "*{}* {}".format(data["display_name"], botMessage['text'])
else:
txt = "{}".format(botMessage['text'])
return txt
def send_resume():
global botMessage, data
if conf.DEBUG:
logger.info("Send resume to Telegram")
if data["usable_battery_level"] != data["battery_level"]:
bat = "Usable {0}% (disponible {1}%)".format(data["usable_battery_level"], data["battery_level"])
else:
bat = "Disponible {0}%".format(data["usable_battery_level"])
if data["geofence"] != "":
geo = "Estás en [{0}](https://maps.google.com/maps?q=loc:{1},{2})".format(data["geofence"], data["latitude"], data["longitude"])
else:
geo = "[Lat: {0}, Long: {1}](https://maps.google.com/maps?q=loc:{0},{1})".format(data["latitude"], data["longitude"])
text = " -> Resumen:" \
+ "\n\n 🔋 {}".format(bat) \
+ "\n 🌡️ Interior {0}ºC".format(data["inside_temp"]) \
+ "\n 🌡️ Exterior {0}ºC".format(data["outside_temp"]) \
+ "\n 🌎 {}".format(geo)
botMessage = {
"send": 0,
"text": text
}
data['sent_resumen'] = 1
send_to_telegram()
def send_to_telegram():
global botMessage
send_text = 'https://api.telegram.org/bot' + conf.BOT_TOKEN + '/sendMessage?chat_id=' + conf.BOT_CHAT_ID + '&parse_mode=Markdown&disable_web_page_preview=True&text=' + get_formated_text()
response = requests.get(send_text)
if conf.DEBUG:
logger.debug(data)
logger.debug(response.text)
botMessage = {"send": 1, "text": ""}
def create_mqtt_connection():
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.on_disconnect = on_disconnect
try:
if conf.DEBUG:
logger.info("Trying to connect to the MQTT")
client.connect(conf.MQTT_SERVER, int(conf.MQTT_PORT), 30)
client.loop_start()
except (ValueError, Exception):
if conf.DEBUG:
logger.error("Error trying to connect to the MQTT")
sleep(RESTART)
create_mqtt_connection()
def main():
setup_logging()
create_mqtt_connection()
while True:
sleep(5)
if data['state'] == "asleep" and conf.SEND_RESUME and data['sent_resumen'] == 0:
send_resume()
if __name__ == '__main__':
main()
| 29.1875 | 191 | 0.563475 | 715 | 6,538 | 5.029371 | 0.272727 | 0.036707 | 0.044494 | 0.03782 | 0.146552 | 0.068409 | 0.035595 | 0 | 0 | 0 | 0 | 0.011871 | 0.291374 | 6,538 | 223 | 192 | 29.318386 | 0.759983 | 0.003212 | 0 | 0.198864 | 0 | 0.017045 | 0.255871 | 0.029777 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051136 | false | 0 | 0.039773 | 0 | 0.096591 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca099560fca77e06dcd73b0fa036c9138dea0681 | 9,323 | py | Python | examples/utils.py | CarbonEdge2021/SBTi-finance-tool | a5dbf1c200a9e80913c34251a918363a054dcb61 | [
"MIT"
] | 26 | 2020-07-24T14:49:24.000Z | 2021-10-13T10:04:52.000Z | examples/utils.py | CarbonEdge2021/SBTi-finance-tool | a5dbf1c200a9e80913c34251a918363a054dcb61 | [
"MIT"
] | 128 | 2020-07-27T08:48:27.000Z | 2021-09-25T11:35:22.000Z | examples/utils.py | CarbonEdge2021/SBTi-finance-tool | a5dbf1c200a9e80913c34251a918363a054dcb61 | [
"MIT"
] | 15 | 2020-07-31T14:47:07.000Z | 2021-07-26T19:33:07.000Z | import pandas as pd
import numpy as np
import copy as copy
import random
def print_aggregations(aggregations):
aggregations = aggregations.dict()
print("{:<10s} {:<10s} {}".format('Timeframe', 'Scope', 'Temp score'))
for time_frame, time_frame_values in aggregations.items():
if time_frame_values:
for scope, scope_values in time_frame_values.items():
if scope_values:
print("{:<10s} {:<10s} {:.2f}".format(time_frame, scope, scope_values["all"]["score"]))
def print_percentage_default_scores(aggregations):
aggregations = aggregations.dict()
print("{:<10s} {:<10s} {}".format('Timeframe', 'Scope', '% Default score'))
for time_frame, time_frame_values in aggregations.items():
if time_frame_values:
for scope, scope_values in time_frame_values.items():
if scope_values:
print("{:<10s} {:<10s} {:.2f}".format(time_frame, scope, scope_values['influence_percentage']))
def print_scenario_gain(actual_aggregations, scenario_aggregations):
print("Actual portfolio temperature score")
print_aggregations(actual_aggregations)
print()
print("Scenario portfolio temperature score")
print_aggregations(scenario_aggregations)
def print_grouped_scores(aggregations):
aggregations = aggregations.dict()
for time_frame, time_frame_values in aggregations.items():
if time_frame_values:
for scope, scope_values in time_frame_values.items():
if scope_values:
print()
print("{:<25s}{}".format('', 'Temp score'))
print("{} - {}".format(time_frame, scope))
for group, aggregation in scope_values["grouped"].items():
print("{:<25s}{t:.2f}".format(group, t=aggregation["score"]))
def collect_company_contributions(aggregated_portfolio, amended_portfolio, analysis_parameters):
timeframe, scope, grouping = analysis_parameters
scope = str(scope[0])
timeframe = str(timeframe[0]).lower()
company_names = []
relative_contributions = []
temperature_scores = []
for contribution in aggregated_portfolio[timeframe][scope]['all']['contributions']:
company_names.append(contribution.company_name)
relative_contributions.append(contribution.contribution_relative)
temperature_scores.append(contribution.temperature_score)
company_contributions = pd.DataFrame(data={'company_name': company_names, 'contribution': relative_contributions, 'temperature_score': temperature_scores})
additional_columns = ['company_name', 'company_id', 'company_market_cap', 'investment_value'] + grouping
company_contributions = company_contributions.merge(right=amended_portfolio[additional_columns], how='left', on='company_name')
company_contributions['portfolio_percentage'] = 100 * company_contributions['investment_value'] / company_contributions['investment_value'].sum()
company_contributions['ownership_percentage'] = 100 * company_contributions['investment_value'] / company_contributions['company_market_cap']
company_contributions = company_contributions.sort_values(by='contribution', ascending=False)
return company_contributions
def plot_grouped_statistics(aggregated_portfolio, company_contributions, analysis_parameters):
import matplotlib.pyplot as plt
timeframe, scope, grouping = analysis_parameters
scope = str(scope[0])
timeframe = str(timeframe[0]).lower()
sector_investments = company_contributions.groupby(grouping).investment_value.sum().values
sector_contributions = company_contributions.groupby(grouping).contribution.sum().values
sector_names = company_contributions.groupby(grouping).contribution.sum().keys()
sector_temp_scores = [aggregation.score for aggregation in aggregated_portfolio[timeframe][scope]['grouped'].values()]
sector_temp_scores, sector_names, sector_contributions, sector_investments = \
zip(*sorted(zip(sector_temp_scores, sector_names, sector_contributions, sector_investments), reverse=True))
fig = plt.figure(figsize=[10, 7.5])
ax1 = fig.add_subplot(231)
ax1.set_prop_cycle(plt.cycler("color", plt.cm.tab20.colors))
ax1.pie(sector_investments, autopct='%1.0f%%', pctdistance=1.25, labeldistance=2)
ax1.set_title("Investments", pad=15)
ax2 = fig.add_subplot(232)
ax2.set_prop_cycle(plt.cycler("color", plt.cm.tab20.colors))
ax2.pie(sector_contributions, autopct='%1.0f%%', pctdistance=1.25, labeldistance=2)
ax2.legend(labels=sector_names, bbox_to_anchor=(1.2, 1), loc='upper left')
ax2.set_title("Contributions", pad=15)
ax3 = fig.add_subplot(212)
ax3.bar(sector_names, sector_temp_scores)
ax3.set_title("Temperature scores per " + grouping[0])
ax3.set_ylabel("Temperature score")
for label in ax3.get_xticklabels():
label.set_rotation(45)
label.set_ha('right')
ax3.axhline(y=1.5, linestyle='--', color='k')
def anonymize(portfolio, provider):
portfolio_companies = portfolio['company_name'].unique()
for index, company_name in enumerate(portfolio_companies):
portfolio.loc[portfolio['company_name'] == company_name, 'company_id'] = 'C' + str(index + 1)
portfolio.loc[portfolio['company_name'] == company_name, 'company_isin'] = 'C' + str(index + 1)
provider.data['fundamental_data'].loc[provider.data['fundamental_data']['company_name'] == company_name, 'company_id'] = 'C' + str(index + 1)
provider.data['fundamental_data'].loc[provider.data['fundamental_data']['company_name'] == company_name, 'company_isic'] = 'C' + str(index + 1)
provider.data['target_data'].loc[provider.data['target_data']['company_name'] == company_name, 'company_id'] = 'C' + str(index + 1)
portfolio.loc[portfolio['company_name'] == company_name, 'company_name'] = 'Company' + str(
index + 1)
provider.data['fundamental_data'].loc[provider.data['fundamental_data']['company_name'] == company_name, 'company_name'] = 'Company' + str(
index + 1)
provider.data['target_data'].loc[provider.data['target_data']['company_name'] == company_name, 'company_name'] = 'Company' + str(
index + 1)
for index, company_name in enumerate(provider.data['fundamental_data']['company_name'].unique()):
if company_name not in portfolio['company_name'].unique():
provider.data['fundamental_data'].loc[provider.data['fundamental_data']['company_name'] == company_name, 'company_id'] = '_' + str(index + 1)
provider.data['fundamental_data'].loc[provider.data['fundamental_data']['company_name'] == company_name, 'company_name'] = 'Company_' + str(
index + 1)
return portfolio, provider
def plot_grouped_heatmap(grouped_aggregations, analysis_parameters):
import matplotlib.pyplot as plt
import matplotlib
timeframe, scope, grouping = analysis_parameters
scope = str(scope[0])
timeframe = str(timeframe[0]).lower()
group_1, group_2 = grouping
aggregations = grouped_aggregations[timeframe][scope].grouped
combinations = list(aggregations.keys())
groups = {group_1: [], group_2: []}
for combination in combinations:
item_group_1, item_group_2 = combination.split('-')
if item_group_1 not in groups[group_1]:
groups[group_1].append(item_group_1)
if item_group_2 not in groups[group_2]:
groups[group_2].append(item_group_2)
groups[group_1] = sorted(groups[group_1])
groups[group_2] = sorted(groups[group_2])
grid = np.zeros((len(groups[group_2]), len(groups[group_1])))
for i, item_group_2 in enumerate(groups[group_2]):
for j, item_group_1 in enumerate(groups[group_1]):
key = item_group_1+'-'+item_group_2
if key in combinations:
grid[i, j] = aggregations[item_group_1+'-'+item_group_2].score
else:
grid[i, j] = np.nan
current_cmap = copy.copy(matplotlib.cm.get_cmap('OrRd'))
current_cmap.set_bad(color='grey', alpha=0.4)
fig = plt.figure(figsize=[0.9*len(groups[group_1]), 0.8*len(groups[group_2])])
ax = fig.add_subplot(111)
im = ax.pcolormesh(grid, cmap=current_cmap)
ax.set_xticks(0.5 + np.arange(0, len(groups[group_1])))
ax.set_yticks(0.5 + np.arange(0, len(groups[group_2])))
ax.set_yticklabels(groups[group_2])
ax.set_xticklabels(groups[group_1])
for label in ax.get_xticklabels():
label.set_rotation(45)
label.set_ha('right')
fig.colorbar(im, ax=ax)
ax.set_title("Temperature score per " + group_2 + " per " + group_1)
def get_contributions_per_group(aggregations, analysis_parameters, group):
timeframe, scope, grouping = analysis_parameters
scope = str(scope[0])
timeframe = str(timeframe[0]).lower()
aggregations = aggregations.dict()
contributions = aggregations[timeframe][scope]['grouped'][group]['contributions']
contributions = pd.DataFrame(contributions)
columns = ['group'] + contributions.columns.tolist()
contributions['group'] = group
contributions = contributions[columns]
contributions.drop(columns=['contribution'], inplace=True)
return contributions | 49.590426 | 159 | 0.696342 | 1,127 | 9,323 | 5.517303 | 0.171251 | 0.060148 | 0.07816 | 0.049534 | 0.483274 | 0.436314 | 0.394661 | 0.380187 | 0.334674 | 0.31441 | 0 | 0.020173 | 0.170546 | 9,323 | 188 | 160 | 49.590426 | 0.783913 | 0 | 0 | 0.25641 | 0 | 0 | 0.13417 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057692 | false | 0 | 0.044872 | 0 | 0.121795 | 0.108974 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca0c63dc19f3b2e160b70ca4788264e5f2ce3527 | 3,635 | py | Python | powerapi/database/socket_db.py | EmileCadorel/powerapi | 588413fe8f8369d02084bd7adff1d610273a4b9f | [
"BSD-3-Clause"
] | null | null | null | powerapi/database/socket_db.py | EmileCadorel/powerapi | 588413fe8f8369d02084bd7adff1d610273a4b9f | [
"BSD-3-Clause"
] | null | null | null | powerapi/database/socket_db.py | EmileCadorel/powerapi | 588413fe8f8369d02084bd7adff1d610273a4b9f | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2018, INRIA
# Copyright (c) 2018, University of Lille
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import asyncio
from queue import Queue, Empty
from threading import Thread
from socket import socket
from . import IterDB, BaseDB
from powerapi.utils import JsonStream
BUFFER_SIZE = 4096
SOCKET_TIMEOUT = 0.5
class SocketDB(BaseDB):
def __init__(self, port):
BaseDB.__init__(self)
self.asynchrone=True
self.queue = None
# self.loop = asyncio.get_event_loop()
self.port = port
self.server = None
async def connect(self):
self.queue = asyncio.Queue()
# self.queue = Queue()
self.server = await asyncio.start_server(self.gen_server_callback(), host='127.0.0.1', port=self.port)
async def stop(self):
self.server.close()
await self.server.wait_closed()
def iter(self, report_model, stream_mode):
return IterSocketDB(report_model, stream_mode, self.queue)
def gen_server_callback(self):
async def callback(stream_reader, _):
stream = JsonStream(stream_reader)
while True:
json_str = await stream.read_json_object()
if json_str is None:
break
await self.queue.put(json_str)
# self.queue.put(json_str)
return callback
class IterSocketDB(IterDB):
"""
iterator connected to a socket that receive report from a sensor
"""
def __init__(self, report_model, stream_mode, queue):
"""
"""
IterDB.__init__(self, None, report_model, stream_mode)
self.queue = queue
def __aiter__(self):
"""
"""
return self
async def __anext__(self):
# def __next__(self):
"""
"""
try:
json = await asyncio.wait_for(self.queue.get(), 2)
# json = self.queue.get_nowait()
# self.queue.get()
report = self.report_model.get_type().deserialize(self.report_model.from_json(json))
return report
# except Empty:
except asyncio.TimeoutError:
return None
| 34.292453 | 110 | 0.680605 | 469 | 3,635 | 5.144989 | 0.41791 | 0.037298 | 0.024865 | 0.034811 | 0.137588 | 0.081227 | 0.056361 | 0.056361 | 0.056361 | 0.056361 | 0 | 0.007656 | 0.245392 | 3,635 | 105 | 111 | 34.619048 | 0.872038 | 0.485832 | 0 | 0 | 0 | 0 | 0.005022 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.133333 | 0.022222 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca0fe4b6afa0274c9433d3fa01c0567fab4f5a2c | 5,896 | py | Python | src/data/collect_follow_list.py | puevigreven/made_with_ml_incubator_project | f726e6506428774d8bf1c2cd3d31f2f94386b5e5 | [
"MIT"
] | null | null | null | src/data/collect_follow_list.py | puevigreven/made_with_ml_incubator_project | f726e6506428774d8bf1c2cd3d31f2f94386b5e5 | [
"MIT"
] | 4 | 2021-06-08T22:07:31.000Z | 2022-03-12T00:43:11.000Z | src/data/collect_follow_list.py | puevigreven/made_with_ml_incubator_project | f726e6506428774d8bf1c2cd3d31f2f94386b5e5 | [
"MIT"
] | null | null | null | import json
import logging
import os
import subprocess
import time
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from functools import partial
from os import listdir
from os.path import isfile, join
import nest_asyncio
import pandas as pd
import src.log_config as log_config
import twint
nest_asyncio.apply()
logging = log_config.get_logger()
logger = logging.getLogger(__name__)
class CollectFollowList:
def __init__(self):
self.data_folder_path = "../../data/raw/"
self.follow_list_path = (
"../../data/raw/follow_lists"
)
self.follow_list_dict = {}
self.count = 0
def get_follow_list(self, target_user):
try:
twint.output.clean_lists()
logger.info("started: " + str(target_user))
# global self.follow_list_dict
# global self.count
c = twint.Config()
c.Username = str(target_user)
c.Hide_output = True
c.Store_object = True
c.User_full = True
c.Min_wait_time = 120
twint.run.Following(c)
follow_list = twint.output.follows_list
self.follow_list_dict[str(target_user)] = follow_list
twint.output.clean_lists()
self.count = self.count + 1
logger.info(
"completed: "
+ str(target_user)
+ " follow list count: "
+ str(len(follow_list))
+ " count: "
+ str(self.count)
)
except Exception as e:
logger.info(str(e))
logger.info("Error occurred with follow list user: " + str(user))
time.sleep(300)
def process_main(self):
# global self.follow_list_dict
ts = time.time()
with open(self.data_folder_path + "relevant_user.txt") as f:
user_list = f.read().splitlines()
user_list = user_list[:10]
logger.info(len(user_list))
with ProcessPoolExecutor() as executor:
executor.map(self.get_follow_list, user_list)
logger.info("Done")
with open(self.data_folder_path + "result_follow_list.json", "w+") as fp:
json.dump(self.follow_list_dict, fp)
logger.info("Took " + str(time.time() - ts))
# global self.follow_list_dict
ts = time.time()
with open(self.data_folder_path + "relevant_user.txt") as f:
user_list = f.read().splitlines()
user_list = user_list[:5]
logger.info(len(user_list))
with ThreadPoolExecutor(max_workers=20) as executor:
executor.map(self.get_follow_list, user_list)
logger.info("Done")
with open(self.data_folder_path + "result_follow_list.json", "w+") as fp:
json.dump(self.follow_list_dict, fp)
logger.info("Took " + str(time.time() - ts))
def check_if_file_present(self, username):
# logger.info ("check if file present")
mypath = "/home/hustle/playground/twitter_thought_leader/data/raw/follow_lists/"
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
downloaded_user = []
for i in onlyfiles:
fname = i.split(".")[0]
downloaded_user.append(fname)
if username in downloaded_user:
return
raise
def subprocess_cmd(self, command):
try:
logger.info("start of the subprocess")
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
self.proc_stdout = process.communicate()[0].strip()
logger.info("end of the subprocess")
logger.info(str(self.proc_stdout))
except Exception as e:
logger.error(str(e))
logger.info("exception occered")
def get_follow_list_with_retry(self, username):
count = 0
while count < 5:
logger.info("attempt: " + str(count) + " for user: " + username)
count = count + 1
try:
command = (
"cd /home/hustle/playground/twitter_thought_leader/data/raw/follow_lists/; twint -u "
+ username
+ " --following -o "
+ username
+ ".txt --csv"
)
# logger.info(command)
self.subprocess_cmd(command)
self.check_if_file_present(username)
except:
logger.info("Sleeping for 1 min!")
time.sleep(60)
continue
logger.info("Completed for user: " + username)
break
def multiprocess_follow_list_with_retry(self):
onlyfiles = [
f
for f in listdir(self.follow_list_path)
if isfile(join(self.follow_list_path, f))
]
downloaded_user = []
for i in onlyfiles:
fname = i.split(".")[0]
downloaded_user.append(fname)
with open(self.data_folder_path + "refined_relevant_user_list.txt") as f:
user_list = f.read().splitlines()
logger.info("total users in refined list: " + str(len(user_list)))
logger.info("downloaded users in refined list: " + str(len(downloaded_user)))
user_list = list(set(user_list) - set(downloaded_user))
logger.info("to be downloaded: " + str(len(user_list)))
# user_list = user_list[:2]
# for i in user_list:
# self.get_follow_list_with_retry(i)
with ProcessPoolExecutor() as executor:
executor.map(self.get_follow_list_with_retry, user_list)
if __name__ == "__main__":
cfl = CollectFollowList()
# cfl.thread_main()
# list_of_user = [ "omarsar0", "lexfridman", "bhutanisanyam1", "drfeifei",]
cfl.multiprocess_follow_list_with_retry()
| 34.682353 | 105 | 0.58192 | 694 | 5,896 | 4.723343 | 0.230548 | 0.076266 | 0.042709 | 0.038438 | 0.408786 | 0.344722 | 0.283405 | 0.283405 | 0.274558 | 0.274558 | 0 | 0.006173 | 0.313094 | 5,896 | 169 | 106 | 34.887574 | 0.80321 | 0.057836 | 0 | 0.279412 | 0 | 0 | 0.117085 | 0.043659 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051471 | false | 0 | 0.095588 | 0 | 0.161765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca112008ab3c6828677b5ed0441a31de80ccd33e | 3,434 | py | Python | app/blueprints/orders/schemas.py | CalebM1987/orders-sqlalchemy-example | 5a2210c0fce19cafa4267ae463841b3b057abac8 | [
"Apache-2.0"
] | null | null | null | app/blueprints/orders/schemas.py | CalebM1987/orders-sqlalchemy-example | 5a2210c0fce19cafa4267ae463841b3b057abac8 | [
"Apache-2.0"
] | null | null | null | app/blueprints/orders/schemas.py | CalebM1987/orders-sqlalchemy-example | 5a2210c0fce19cafa4267ae463841b3b057abac8 | [
"Apache-2.0"
] | null | null | null | from marshmallow import Schema, EXCLUDE, fields, post_load
import datetime
from .models import *
class SerializableDateTime(fields.DateTime):
def _deserialize(self, value, attr, data, **kwargs):
if isinstance(value, datetime.datetime):
return value
return super()._deserialize(value, attr, data)
class OrderItemSchema(Schema):
class Meta:
unknown = EXCLUDE
OrderHeaderID = fields.Integer(description='order id', dump_only=True)
ProductName = fields.String(description='the product name')
Quantity = fields.Integer(description='the item quantity', missing=1)
UnitPrice = fields.Float(description='the price for the item', missing=0)
ItemTotal = fields.Float(description='the total base cost (unit cost * quantity)', missing=0, dump_only=True)
@post_load
def make_object(self, data, **kwargs):
return OrderItem(**data)
class OrderHeaderSchema(Schema):
class Meta:
unknown = EXCLUDE
OrderHeaderID = fields.Integer(description='the order id', dump_only=True)
CustomerID = fields.Integer(description='the customer id', dump_only=True)
CreationDate = SerializableDateTime(description='the order date')
Product = fields.String(description='the product label')
ItemTotal = fields.Float(description='the total base cost for order', dump_only=True)
TaxTotal = fields.Float(description='total tax for this order', dump_only=True)
ShippingTotal = fields.Float(description='the shipping total', missing=0)
GrandTotal = fields.Float(description='the grand total for order, includes tax and shipping costs', dump_only=True)
Items = fields.List(fields.Nested(OrderItemSchema), attribute='orderItems', many=True, missing=[])
@post_load
def make_object(self, data, **kwargs):
order = OrderHeader(**data)
# create items
for itemAsJson in data.get('Items', []):
item = OrderItemSchema().load(itemAsJson)
# append child order item
order.orderItems.append(item)
return order
class CustomerSchema(Schema):
class Meta:
unknown = EXCLUDE
CustomerID = fields.Integer(description='the customer id', dump_only=True)
FirstName = fields.String(description='customer first name')
LastName = fields.String(description='customer last name')
FullName = fields.String(description='customer full name', dump_only=True)
ShipToState = fields.String(description='state for shipping')
Orders = fields.List(fields.Nested(OrderHeaderSchema), many=True, missing=[])
@post_load
def make_object(self, data, **kwargs):
customer = Customer(**{k: data.get(k) for k in data.keys() if k != 'Orders'})
return customer
class BaseResourceMutationSchema(Schema):
id = fields.Integer(description='the resource id', default=1)
status = fields.String(description='the operation status (success|error)', default="success")
class CreateResourceSchema(BaseResourceMutationSchema):
message = fields.String(description='the create message', default="Successfully Created Resource")
class UpdateResourceSchema(BaseResourceMutationSchema):
message = fields.String(description='the update message', default="Successfully Updated Resource")
class DeleteResourceSchema(BaseResourceMutationSchema):
message = fields.String(description='the delete message', default="Successfully Deleted Resource") | 41.373494 | 119 | 0.721899 | 388 | 3,434 | 6.342784 | 0.293814 | 0.096709 | 0.093458 | 0.063389 | 0.320601 | 0.266558 | 0.194636 | 0.194636 | 0.142219 | 0.088582 | 0 | 0.001755 | 0.170355 | 3,434 | 83 | 120 | 41.373494 | 0.862057 | 0.010483 | 0 | 0.233333 | 0 | 0 | 0.176678 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.05 | 0.016667 | 0.8 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca11278032193161ac55c232cf25a8b385456e2f | 2,196 | py | Python | tests/unit/test_raspberry_pi_monitor.py | Kami/scalyr-agent-2-monitors | 367c57cd5489a4661ca6922c1c5ffa4c3b0969d6 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_raspberry_pi_monitor.py | Kami/scalyr-agent-2-monitors | 367c57cd5489a4661ca6922c1c5ffa4c3b0969d6 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_raspberry_pi_monitor.py | Kami/scalyr-agent-2-monitors | 367c57cd5489a4661ca6922c1c5ffa4c3b0969d6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Tomaz Muraus
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from scalyr_agent.test_base import ScalyrTestCase
import mock
from custom_monitors.raspberry_pi_monitor import RaspberryPiMetricsMonitor
__all__ = ["RaspberryPiMetricsMonitor"]
BASE_DIR = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
FIXTURES_DIR = os.path.join(BASE_DIR, "../fixtures")
EXPECTED_VALUES = [
("rpi.status.throttled_state", "0"),
("rpi.soc.temperature", 49),
("rpi.arm.clock", 1800),
("rpi.core.clock", 500),
("rpi.h264.clock", 0),
("rpi.sd.clock", 250),
("rpi.vec.clock", 0),
("rpi.core.volts", 0.94),
("rpi.sdram_c.volts", 1.1),
("rpi.sdram_i.volts", 1.1),
("rpi.sdram_p.volts", 1.1),
]
class RaspberryPiMonitorTestCase(ScalyrTestCase):
def test_gather_sample(self):
monitor_config = {
"module": "raspberry_pi_monitor",
"vcgencmd_path": os.path.join(FIXTURES_DIR, "mock_vcgencmd")
}
mock_logger = mock.Mock()
monitor = RaspberryPiMetricsMonitor(monitor_config, mock_logger)
self.assertEqual(mock_logger.emit_value.call_count, 0)
monitor.gather_sample()
self.assertEqual(mock_logger.emit_value.call_count, 11)
index = 0
for expected_metric_name, expected_metric_value in EXPECTED_VALUES:
actual_metric_name = mock_logger.emit_value.call_args_list[index][0][0]
actual_metric_value = mock_logger.emit_value.call_args_list[index][0][1]
self.assertEqual(expected_metric_name, actual_metric_name)
self.assertEqual(expected_metric_value, actual_metric_value)
index += 1
| 33.784615 | 84 | 0.702186 | 295 | 2,196 | 5.010169 | 0.447458 | 0.040595 | 0.037889 | 0.051421 | 0.128552 | 0.108254 | 0.108254 | 0.108254 | 0.050068 | 0 | 0 | 0.024636 | 0.186703 | 2,196 | 64 | 85 | 34.3125 | 0.802912 | 0.25 | 0 | 0 | 0 | 0 | 0.162278 | 0.031231 | 0 | 0 | 0 | 0 | 0.105263 | 1 | 0.026316 | false | 0 | 0.105263 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca122ad15974c3c8846065ba5622e84a28e0998f | 5,271 | py | Python | rally_openstack/task/scenarios/nova/server_groups.py | jogeo/rally-openstack | 83437e7c5925d5d647cd28f1821b6d51687b0123 | [
"Apache-2.0"
] | null | null | null | rally_openstack/task/scenarios/nova/server_groups.py | jogeo/rally-openstack | 83437e7c5925d5d647cd28f1821b6d51687b0123 | [
"Apache-2.0"
] | null | null | null | rally_openstack/task/scenarios/nova/server_groups.py | jogeo/rally-openstack | 83437e7c5925d5d647cd28f1821b6d51687b0123 | [
"Apache-2.0"
] | 1 | 2021-08-10T03:11:51.000Z | 2021-08-10T03:11:51.000Z | # Copyright 2017: Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import logging
from rally.task import validation
from rally_openstack.common import consts
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.nova import utils
LOG = logging.getLogger(__name__)
"""Scenarios for Nova Group servers."""
@validation.add("required_services", services=[consts.Service.NOVA])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["nova"]},
name="NovaServerGroups.create_and_list_server_groups",
platform="openstack")
class CreateAndListServerGroups(utils.NovaScenario):
def run(self, policies=None, all_projects=False, kwargs=None):
"""Create a server group, then list all server groups.
Measure the "nova server-group-create" and "nova server-group-list"
command performance.
:param policies: Server group policy
:param all_projects: If True, display server groups from all
projects(Admin only)
:param kwargs: The server group specifications to add.
DEPRECATED, specify arguments explicitly.
"""
if kwargs is None:
kwargs = {
"policies": policies
}
else:
LOG.warning("The argument `kwargs` is deprecated since"
" Rally 0.10.0. Specify all arguments from it"
" explicitly.")
server_group = self._create_server_group(**kwargs)
msg = ("Server Groups isn't created")
self.assertTrue(server_group, err_msg=msg)
server_groups_list = self._list_server_groups(all_projects)
msg = ("Server Group not included into list of server groups\n"
"Created server group: {}\n"
"list of server groups: {}").format(server_group,
server_groups_list)
self.assertIn(server_group, server_groups_list, err_msg=msg)
@validation.add("required_services", services=[consts.Service.NOVA])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["nova"]},
name="NovaServerGroups.create_and_get_server_group",
platform="openstack")
class CreateAndGetServerGroup(utils.NovaScenario):
def run(self, policies=None, kwargs=None):
"""Create a server group, then get its detailed information.
Measure the "nova server-group-create" and "nova server-group-get"
command performance.
:param policies: Server group policy
:param kwargs: The server group specifications to add.
DEPRECATED, specify arguments explicitly.
"""
if kwargs is None:
kwargs = {
"policies": policies
}
else:
LOG.warning("The argument `kwargs` is deprecated since"
" Rally 0.10.0. Specify all arguments from it"
" explicitly.")
server_group = self._create_server_group(**kwargs)
msg = ("Server Groups isn't created")
self.assertTrue(server_group, err_msg=msg)
server_group_info = self._get_server_group(server_group.id)
self.assertEqual(server_group.id, server_group_info.id)
@validation.add("required_services", services=[consts.Service.NOVA])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["nova"]},
name="NovaServerGroups.create_and_delete_server_group",
platform="openstack")
class CreateAndDeleteServerGroup(utils.NovaScenario):
def run(self, policies=None, kwargs=None):
"""Create a server group, then delete it.
Measure the "nova server-group-create" and "nova server-group-delete"
command performance.
:param policies: Server group policy
:param kwargs: The server group specifications to add.
DEPRECATED, specify arguments explicitly.
"""
if kwargs is None:
kwargs = {
"policies": policies
}
else:
LOG.warning("The argument `kwargs` is deprecated since"
" Rally 0.10.0. Specify all arguments from it"
" explicitly.")
server_group = self._create_server_group(**kwargs)
msg = ("Server Group isn't created")
self.assertTrue(server_group, err_msg=msg)
self._delete_server_group(server_group.id)
| 39.931818 | 78 | 0.642003 | 596 | 5,271 | 5.558725 | 0.251678 | 0.12617 | 0.038032 | 0.02626 | 0.648053 | 0.597344 | 0.597344 | 0.575913 | 0.559915 | 0.559915 | 0 | 0.005175 | 0.266743 | 5,271 | 131 | 79 | 40.236641 | 0.852005 | 0.293682 | 0 | 0.623188 | 0 | 0 | 0.245132 | 0.039233 | 0 | 0 | 0 | 0 | 0.072464 | 1 | 0.043478 | false | 0 | 0.072464 | 0 | 0.15942 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca12dc98ed1d3df8ddbdad5e413c00b24eb6f56d | 2,696 | py | Python | 1_open3d/20_colored_point_cloud_registration.py | lyffly/Python-3DPointCloud-and-RGBD | b3b973ec96045a34d3540522a115c5a77f4de136 | [
"Apache-2.0"
] | 16 | 2019-10-28T01:17:09.000Z | 2022-01-20T08:26:06.000Z | 1_open3d/20_colored_point_cloud_registration.py | lyffly/Python-3DPointCloud-and-RGBD | b3b973ec96045a34d3540522a115c5a77f4de136 | [
"Apache-2.0"
] | null | null | null | 1_open3d/20_colored_point_cloud_registration.py | lyffly/Python-3DPointCloud-and-RGBD | b3b973ec96045a34d3540522a115c5a77f4de136 | [
"Apache-2.0"
] | 3 | 2020-04-07T08:48:28.000Z | 2021-09-22T14:47:42.000Z | # coding = utf-8
# coding by liuyunfei
# origin code from open3d samples(github)
import numpy as np
import open3d as op3
import matplotlib.pyplot as plt
import copy
import time
def draw_registration_result_origin_color(source,target,transformation):
source_temp = copy.deepcopy(source)
source_temp.transform(transformation)
op3.visualization.draw_geometries([source_temp,target])
if __name__ == "__main__":
#1. 读两种点云数据,显示初始姿态
source = op3.io.read_point_cloud("demodata/ColoredICP/frag_115.ply")
target = op3.io.read_point_cloud("demodata/ColoredICP/frag_116.ply")
#op3.visualization.draw_geometries([source])
#op3.visualization.draw_geometries([target])
# 画初始的位置
current_transformation = np.identity(4)
draw_registration_result_origin_color(source,target,current_transformation)
# point-to-plane ICP 初始值
current_transformation = np.identity(4)
# 2. 点到面 ICP
result_icp = op3.registration.registration_icp(
source,
target,
0.02,
current_transformation,
op3.registration.TransformationEstimationPointToPlane()
)
print(result_icp)
draw_registration_result_origin_color(source,target,result_icp.transformation)
# colored pointcloud registration
voxel_radius = [0.04,0.02,0.01]
max_iter=[50,30,14]
current_transformation = np.identity(4)
# 3. colored pointcloud registration
# 着色点云配准
for scale in range(3):
iter = max_iter[scale]
radius = voxel_radius[scale]
print([iter,radius,scale])
print("3.1 downsample with a voxel size of %.2f" % radius)
# 下采样
source_down = source.voxel_down_sample(radius)
target_down = target.voxel_down_sample(radius)
print("3.2 Estimate normal.")
# 算法向量
source_down.estimate_normals(
op3.geometry.KDTreeSearchParamHybrid(radius=radius*2,max_nn=30)
)
target_down.estimate_normals(
op3.geometry.KDTreeSearchParamHybrid(radius=radius*2,max_nn=30)
)
print("3.3 Applying colored point cloud registration")
# color icp过程
result_icp = op3.registration.registration_colored_icp(
source_down,
target_down,
radius,
current_transformation,
op3.registration.ICPConvergenceCriteria(
relative_fitness=1e-6,
relative_rmse=1e-6,
max_iteration=iter
)
)
current_transformation = result_icp.transformation
print(result_icp)
draw_registration_result_origin_color(source,target,result_icp.transformation)
| 28.989247 | 82 | 0.673961 | 305 | 2,696 | 5.714754 | 0.347541 | 0.084337 | 0.050488 | 0.064257 | 0.414228 | 0.276535 | 0.276535 | 0.2249 | 0.177854 | 0.177854 | 0 | 0.030852 | 0.242582 | 2,696 | 92 | 83 | 29.304348 | 0.822723 | 0.116469 | 0 | 0.196429 | 0 | 0 | 0.07481 | 0.02705 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017857 | false | 0 | 0.089286 | 0 | 0.107143 | 0.107143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca14df6be2f407c519068e98b8fe3403b1f317be | 3,189 | py | Python | schevo/test/test_valid_values_resolve.py | Schevo/schevo | d57a41f8b7b514ed48dc0164dcd3412a89e9873b | [
"MIT"
] | 1 | 2020-09-05T00:47:50.000Z | 2020-09-05T00:47:50.000Z | schevo/test/test_valid_values_resolve.py | Schevo/schevo | d57a41f8b7b514ed48dc0164dcd3412a89e9873b | [
"MIT"
] | null | null | null | schevo/test/test_valid_values_resolve.py | Schevo/schevo | d57a41f8b7b514ed48dc0164dcd3412a89e9873b | [
"MIT"
] | null | null | null | """Tests for using tuples in valid_values"""
# Copyright (c) 2001-2009 ElevenCraft Inc.
# See LICENSE for details.
from schevo.error import EntityDoesNotExist
from schevo.test import CreatesSchema, raises
class BaseValidValuesResolve(CreatesSchema):
body = '''
class Foo(E.Entity):
name = f.string()
_key(name)
_initial = [
('foo 1',),
('foo 2',),
]
class Bar(E.Entity):
length = f.integer()
_key(length)
_initial = [
dict(length=123),
dict(length=456),
]
class Baz(E.Entity):
foo = f.entity('Foo', required=False,
valid_values=[('foo 2',)])
bar = f.entity('Bar', required=False,
valid_values=[(123,)])
foo_or_bar = f.entity('Foo', 'Bar', required=False,
valid_values=[('Foo', ('foo 1',)),
('Bar', (456,)),
])
class Bad(E.Entity):
bar = f.entity('Bar', required=False,
valid_values=[(789,)])
'''
def test_resolvable(self):
foo_1 = db.Foo.findone(name='foo 1')
foo_2 = db.Foo.findone(name='foo 2')
bar_123 = db.Bar.findone(length=123)
bar_456 = db.Bar.findone(length=456)
# Check Create transaction.
tx = db.Baz.t.create()
assert set(tx.f.foo.valid_values) == set([foo_2])
assert set(tx.f.bar.valid_values) == set([bar_123])
assert set(tx.f.foo_or_bar.valid_values) == set([foo_1, bar_456])
tx.foo = foo_1
assert raises(ValueError, db.execute, tx)
tx.bar = bar_456
assert raises(ValueError, db.execute, tx)
tx.foo_or_bar = foo_2
assert raises(ValueError, db.execute, tx)
tx.foo_or_bar = bar_123
assert raises(ValueError, db.execute, tx)
tx.foo = foo_2
tx.bar = bar_123
tx.foo_or_bar = foo_1
baz = db.execute(tx)
# Check update transaction.
tx = baz.t.update()
assert set(tx.f.foo.valid_values) == set([foo_2])
assert set(tx.f.bar.valid_values) == set([bar_123])
assert set(tx.f.foo_or_bar.valid_values) == set([foo_1, bar_456])
tx.foo = foo_1
assert raises(ValueError, db.execute, tx)
tx.bar = bar_456
assert raises(ValueError, db.execute, tx)
tx.foo_or_bar = foo_2
assert raises(ValueError, db.execute, tx)
tx.foo_or_bar = bar_123
assert raises(ValueError, db.execute, tx)
tx.foo = foo_2
tx.bar = bar_123
tx.foo_or_bar = bar_456
db.execute(tx)
def test_unresolvable(self):
# Unresolvable at first.
assert raises(ValueError, db.Bad.t.create)
# Make it resolvable.
bar_789 = db.execute(db.Bar.t.create(length=789))
bad = db.execute(db.Bad.t.create(bar=bar_789))
# class TestValidValuesResolve1(BaseValidValuesResolve):
# include = True
# format = 1
class TestValidValuesResolve2(BaseValidValuesResolve):
include = True
format = 2
| 27.491379 | 73 | 0.555033 | 405 | 3,189 | 4.214815 | 0.182716 | 0.063269 | 0.064441 | 0.126538 | 0.482132 | 0.422964 | 0.422964 | 0.422964 | 0.379613 | 0.379613 | 0 | 0.046147 | 0.320477 | 3,189 | 115 | 74 | 27.730435 | 0.741578 | 0.090624 | 0 | 0.4 | 0 | 0 | 0.317394 | 0.022523 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0.026667 | false | 0 | 0.026667 | 0 | 0.12 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca15c2826574dbb584e7daadacf5cc2051a092bd | 512 | py | Python | src/sentry/web/frontend/organization_home.py | jaysoffian/sentry | 80b4d3d89b8a51a0573d1aca9243255b34f0a852 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/web/frontend/organization_home.py | jaysoffian/sentry | 80b4d3d89b8a51a0573d1aca9243255b34f0a852 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/web/frontend/organization_home.py | jaysoffian/sentry | 80b4d3d89b8a51a0573d1aca9243255b34f0a852 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from sentry.models import Team
from sentry.web.frontend.base import OrganizationView
class OrganizationHomeView(OrganizationView):
def get(self, request, organization):
team_list = Team.objects.get_for_user(
organization=organization,
user=request.user,
with_projects=True,
)
context = {
'team_list': team_list,
}
return self.respond('sentry/organization-home.html', context)
| 25.6 | 69 | 0.666016 | 53 | 512 | 6.226415 | 0.566038 | 0.072727 | 0.072727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.255859 | 512 | 19 | 70 | 26.947368 | 0.866142 | 0 | 0 | 0 | 0 | 0 | 0.074219 | 0.056641 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.214286 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca1a2d87e331ba4796b185ef1dd36eee4f8da2b8 | 28,671 | py | Python | girc/client.py | ekimekim/girc | 505ec0eb3c0a46adfad62383d9634fe971a430d5 | [
"MIT"
] | 1 | 2016-02-09T10:04:29.000Z | 2016-02-09T10:04:29.000Z | girc/client.py | ekimekim/girc | 505ec0eb3c0a46adfad62383d9634fe971a430d5 | [
"MIT"
] | null | null | null | girc/client.py | ekimekim/girc | 505ec0eb3c0a46adfad62383d9634fe971a430d5 | [
"MIT"
] | null | null | null |
import errno
import json
import logging
import random
import string
import time
import weakref
from base64 import b64encode, b64decode
import gevent.queue
import gevent.pool
import gevent.event
import gevent.lock
from gevent import socket
from gevent import ssl
from girc import message
from girc import replycodes
from girc.handler import Handler, BoundHandler
from girc.server_properties import ServerProperties
from girc.channel import Channel
from girc.chunkprioqueue import ChunkedPriorityQueue
from girc.common import send_fd, recv_fd
DEFAULT_PORT = 6667
class ConnectionClosed(Exception):
def __str__(self):
return "The connection was unexpectedly closed"
class Client(object):
_socket = None
started = False
# some insight into the state of _recv_loop to allow for smooth connection handoff
_recv_buf = ''
_kill_recv = False
_stopping = False
REGISTRATION_TIMEOUT = 5
WAIT_FOR_MESSAGES_TIMEOUT = 20
PING_IDLE_TIME = 60
PING_TIMEOUT = 30
def __init__(self, hostname, nick, port=DEFAULT_PORT, password=None, nickserv_password=None,
ident=None, real_name=None, stop_handler=[], logger=None, version='girc', time='local',
twitch=False, ssl=False):
"""Create a new IRC connection to given host and port.
ident and real_name are optional args that control how we report ourselves to the server
(they both default to nick).
Similarly, version and time control our response to interrogative commands. Either can be set
None to disable response. Time defaults to 'local' (use local time) but 'utc' is also an option.
nick is the initial nick we set, though of course that can be changed later.
password is the server password, ie. as set by a PASS command.
nickserv_password will be sent in a Privmsg to NickServ with "IDENTIFY" after connecting.
stop_handler is a callback that will be called upon the client exiting for any reason
The callback should take args (client, ex) where client is this client and ex is the fatal error,
or None for a clean disconnect.
You may alternatively pass in a list of multiple callbacks.
Note that after instantiation you can add/remove further disconnect callbacks
by manipulating the client.stop_handlers set.
twitch=True sets some special behaviour for better operation with twitch.tv's unique variant of IRC.
ssl=True will cause the socket to connect using SSL.
ssl='insecure' will connect using SSL, however no attempt will be made to make the connection secure!
"""
self.hostname = hostname
self.port = port
self.password = password
self.nickserv_password = nickserv_password
self.ident = ident or nick
self.real_name = real_name or nick
self.version = version
self.time = time
self.ssl = ssl
self._channels = {}
self._users = weakref.WeakValueDictionary()
self._recv_queue = gevent.queue.Queue()
self._send_queue = ChunkedPriorityQueue()
# Message priorities are used as follows:
# -2: Critical registration messages with strict ordering
# -1: PONGs sent in reply to PINGs, required to not get disconnected
# 0: Other high-priority tasks - changing NICK, sending idle PINGs, etc
# >0: User messages
self._group = gevent.pool.Group()
self._activity = gevent.event.Event() # set each time we send or recv, for idle watchdog
self._stopped = gevent.event.AsyncResult() # contains None if exited cleanly, else set with exception
self.message_handlers = set() # set of Handler objects
self.stop_handlers = set()
self.server_properties = ServerProperties()
# NOTE: An aside about nicks
# When our nick is changing, race cdns mean we aren't sure if the server is expecting
# our old nick or our new nick. We have a few different attributes to address this:
# self.nick: a lock-protected lookup of self._nick (the lock is self._nick_lock),
# it represents what we should refer to ourself as (and blocks if it's ambiguous)
# self._nick: what we think the server thinks our nick is. when our nick is in the process
# of changing, this is the OLD nick.
# self._new_nick: None unless it is in the process of changing. When changing, the nick we
# are switching to.
# We can only attempt to change our nick when it is not in the middle of changing (we do this
# by holding self._nick_lock). If we get a forced nick change x -> y while changing, it changes
# self._nick if x is the old nick, or self._new_nick if x is the new nick.
# self.matches_nick() will always check both _nick and _new_nick.
self._nick = nick
self._nick_lock = gevent.lock.RLock()
self._new_nick = None
if not logger:
logger = logging.getLogger(__name__).getChild(type(self).__name__)
self.logger = logger
if callable(stop_handler):
self.stop_handlers.add(stop_handler)
else:
self.stop_handlers.update(stop_handler)
if twitch:
message.Message(self, 'CAP', 'REQ', 'twitch.tv/membership twitch.tv/commands twitch.tv/tags').send()
if self.nickserv_password:
self.msg('NickServ', 'IDENTIFY {}'.format(self.nickserv_password), priority=0)
# Register Handler methods
Handler.register_all(self, self)
@classmethod
def _from_handoff(cls, sock, recv_buf, channels, **init_args):
"""Alternate constructor that takes the args needed to resume a connection handed off
from another Client instance. Used to implement graceful restart without dropping connections.
sock is the connection to inherit.
recv_buf may contain data that was read from the socket but not processed (eg. a partial line)
channels is a list of joined channels
init args must match the ones from the handing off Client.
Handing off of an SSL socket is not supported.
"""
client = cls(**init_args)
client.logger.info("Initializing client from handoff args ({} channels)".format(len(channels)))
client._socket = sock
client._recv_buf = b64decode(recv_buf)
client.stop_handlers.add(lambda client: client._socket.close())
for name in channels:
# name may have come from a JSON object. Assume utf8.
if isinstance(name, unicode):
name = name.encode('utf8')
channel = client.channel(name)
channel._join() # note we don't send a JOIN
message.Message(client, 'NAMES', name).send() # re-sync user list
def handoff_start():
client.started = True
client.logger.debug("Starting using alternate handoff start")
client._start_greenlets()
client.start = handoff_start
return client
@classmethod
def from_instance_handoff(cls, client):
"""Takes a running client instance and creates a new Client instance without closing the connection."""
client._prepare_for_handoff()
new_client = cls._from_handoff(client._socket, **client._get_handoff_data())
client._finalize_handoff()
return new_client
@classmethod
def from_sock_handoff(cls, recv_sock, **init_args):
"""Takes a unix socket connection and uses it to receive a connection handoff.
Expects the remote process to send it a socket fd and handoff data - see client.handoff_to_sock()
While most init args are provided by handoff data, others (eg. logger) can be passed in as extra kwargs.
Note this method will block until the connection is closed.
"""
connection = None
try:
# receive fd from other process
fd = recv_fd(recv_sock)
connection = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
# receive other args as json
handoff_data = ''
s = True
while s: # loop until closed
s = recv_sock.recv(4096)
handoff_data += s
handoff_data = json.loads(handoff_data)
handoff_data = {k: v.encode('utf-8') if isinstance(v, unicode) else v
for k, v in handoff_data.items()}
handoff_data.update(init_args)
return cls._from_handoff(connection, **handoff_data)
except Exception:
if connection:
connection.close()
raise
@property
def stopped(self):
return self._stopped.ready()
@property
def nick(self):
"""Get our current nick. May block if it is in the middle of being changed."""
with self._nick_lock:
return self._nick
@nick.setter
def nick(self, new_nick):
"""Change the nick safely. Note this will block until the change is sent, acknowledged
and self.nick is updated. Will attempt to increment nick if it's already taken."""
for _ in range(6):
# Note we max out after 5 increments - if we're still getting conflicts with that search space (10^5),
# something weirder is going on (like the nick not being valid, which we don't have detection for).
if self.try_change_nick(new_nick):
return
new_nick = self.increment_nick(new_nick)
def try_change_nick(self, new_nick):
"""Change the nick safely. Note this will block until the change is sent, acknowledged
and self.nick is updated. Returns True if we managed to change to the desired nick,
False otherwise.
"""
with self._nick_lock:
self.logger.debug("Attempting to change nick {!r} -> {!r}".format(self._nick, new_nick))
try:
self._new_nick = new_nick
nick_msg = message.Nick(self, new_nick)
try:
nick_msg.send(priority=0)
# by waiting for messages, we force ourselves to wait until the Nick() has been processed
if not self.wait_for_messages(priority=0):
self.logger.warning((
"Timed out waiting for sync after sending NICK for change {!r}->{!r} - "
"assuming NICK change went through (this means we might have the wrong nick!)"
).format(self._nick, new_nick))
except Exception:
self.quit("Unrecoverable error while changing nick", priority=-1)
raise
self.logger.debug("Changed nick {!r} -> {!r}".format(self._nick, self._new_nick))
self._nick = self._new_nick # note that self._new_nick may not be new_nick, see forced_nick_change()
return self._nick == new_nick
finally:
# either we completed successfully or we aborted
# either way, we need to no longer be in the middle of changing nicks
self.logger.debug("Clearing _new_nick")
self._new_nick = None
def matches_nick(self, value):
"""A helper function for use in match args. It takes a value and returns whether that value
matches the client's current nick.
Note that you should use this, NOT self.nick, for checking incoming messages.
This function will continue working when self.nick is ambiguous, whereas the latter will block.
"""
return value in (self._nick, self._new_nick)
def increment_nick(self, nick):
"""For a given nick, return "incremented" nick by following rules:
If nick is of form /.*\|\d+/ (ie. ends in '|' then a number), add random digit to number
Otherwise, append | and a random digit.
This keeps nick length minimized while still performing well when 100s of clients are all
following the same algorithm for one constested nick.
"""
parts = nick.split('|')
if parts[-1] and not parts[-1].isdigit():
parts.append('')
parts[-1] += random.choice(string.digits)
return '|'.join(parts)
def handler(self, callback=None, **match_args):
"""Add callback to be called upon a matching message being received.
See geventirc.message.match() for match_args.
Callback should take args (client, message) and may return True to de-register itself.
If callback is not given, returns a decorator.
ie.
def foo(client, message):
...
client.handler(foo, **match_args)
is identical to
@client.handler(**match_args)
def foo(client, message):
...
For more detail, see handler.Handler()
This function simply creates a Handler() and immediately registers it with this client.
"""
return Handler(client=self, callback=callback, **match_args)
def _send(self, message, callback, priority):
"""A low level interface to send a message. You normally want to use Message.send() instead.
Callback is called after message is sent, and takes args (client, message).
Callback may be None.
"""
self.logger.debug("Queuing message {} at prio {}".format(message, priority))
if self._stopping:
self.logger.debug("Dropping message as we are stopping")
return
self._send_queue.put((priority, (message, callback)))
def _start_greenlets(self):
"""Start standard greenlets that should always run, and put them in a dict indexed by their name,
to allow special operations to refer to them specifically."""
self._named_greenlets = {
name: self._group.spawn(getattr(self, name))
for name in ('_send_loop', '_recv_loop', '_idle_watchdog')
}
def start(self):
if self.stopped:
self.logger.info("Ignoring start() - already stopped (please create a new Client instead)")
return
if self.started:
self.logger.info("Ignoring start() - already started")
return
self.started = True
self.logger.info("Starting client for {self.nick} on {self.hostname}:{self.port}".format(self=self))
try:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.ssl:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) if self.ssl == 'insecure' else ssl.create_default_context()
self._socket = context.wrap_socket(self._socket, server_hostname=self.hostname)
self.stop_handlers.add(lambda self: self._socket.close())
self._socket.connect((self.hostname, self.port))
except Exception as ex:
self.logger.exception("Error while connecting client")
self.stop(ex)
raise
# registration is a delicate dance...
with self._nick_lock, self._send_queue.limit_to(-1):
# by limiting to 0, we block all messages except pongs and registration
reg_done = gevent.event.Event()
reg_handlers = set()
@self.handler(command=replycodes.replies.WELCOME, sync=True)
def reg_got_welcome(client, msg):
reg_done.set()
for handler in reg_handlers:
handler.unregister(self)
reg_handlers.add(reg_got_welcome)
# Some anal servers require sending registration messages in a precise order
# and/or can't handle PINGs being sent during registration. This makes the standard
# nick-setting behaviour unsuitable. We're pretty sure we won't get a NICK
# forced change from the server during registration, so we only need to special-case
# handle a NICKNAMEINUSE message, and send the Nick() message manually.
@self.handler(command=replycodes.errors.NICKNAMEINUSE, sync=True)
def reg_nick_in_use(client, msg):
self._nick = self.increment_nick(self._nick)
message.Nick(self, self._nick).send(priority=-2)
reg_handlers.add(reg_nick_in_use)
if self.password:
message.Message(self, 'PASS', self.password).send(priority=-2)
message.Nick(self, self._nick).send(priority=-2)
message.User(self, self.ident, self.real_name).send(priority=-2)
self._start_greenlets()
if not reg_done.wait(self.REGISTRATION_TIMEOUT):
ex = Exception("Registration timeout")
self.stop(ex)
raise ex
self.logger.debug("Registration complete")
def _idle_watchdog(self):
"""Sends a ping if no activity for PING_IDLE_TIME seconds.
Disconnect if there is no response within PING_TIMEOUT seconds."""
try:
while True:
if self._activity.wait(self.PING_IDLE_TIME):
self._activity.clear()
continue
self.logger.info("No activity for {}s, sending PING".format(self.PING_IDLE_TIME))
if not self.wait_for_messages(self.PING_TIMEOUT, priority=0):
self.logger.error("No response to watchdog PING after {}s".format(self.PING_TIMEOUT))
self.stop(ConnectionClosed())
return
except Exception as ex:
self.logger.exception("error in _idle_watchdog")
self.stop(ex)
def _recv_loop(self):
error = None
try:
while True:
if self._kill_recv:
return
self._recv_waiting = True
try:
data = self._socket.recv(4096)
except socket.error as ex:
if ex.errno == errno.EINTR: # retry on EINTR
continue
raise
finally:
self._recv_waiting = False
if not data:
self.logger.info("no data from recv, socket closed")
break
lines = (self._recv_buf + data).split('\r\n')
self._recv_buf = lines.pop() # everything after final \r\n
if lines:
self._activity.set()
for line in lines:
self._process(line)
except Exception as ex:
self.logger.exception("error in _recv_loop")
error = ex
if self._recv_buf:
self.logger.warning("recv stream cut off mid-line, unused data: {!r}".format(self._recv_buf))
self.stop(error or ConnectionClosed())
def _send_loop(self):
send_queue = self._send_queue
try:
while True:
priority, (message, callback) = send_queue.get()
line = "{}\r\n".format(message.encode())
self.logger.debug("Sending message: {!r}".format(line))
try:
self._socket.sendall(line)
except socket.error as ex:
if ex.errno == errno.EPIPE:
self.logger.info("failed to send, socket closed")
self.stop(ConnectionClosed())
return
raise
self._activity.set()
if callback is not None:
self._group.spawn(callback, self, message)
if message.command == 'QUIT':
self.logger.info("QUIT sent, client shutting down")
self.stop()
return
except Exception as ex:
self.logger.exception("error in _send_loop")
self.stop(ex)
def _process(self, line):
self.logger.debug("Received message: {!r}".format(line))
line = line.strip()
if not line:
return
try:
msg = message.decode(line, self)
except message.InvalidMessage:
self.logger.warning("Could not decode message from server: {!r}".format(line), exc_info=True)
return
self.logger.debug("Getting handlers for message: {}".format(msg))
self._dispatch_handlers(msg)
def _dispatch_handlers(self, msg):
"""Carefully builds a set of greenlets for all message handlers, obeying ordering metadata for each handler.
Returns when all sync=True handlers have been executed."""
def normalize(handler):
# handler might be a Handler, BoundHandler or "sync"
return handler.handler if isinstance(handler, BoundHandler) else handler
# build dependency graph
graph = {handler: set() for handler in self.message_handlers}
graph['sync'] = set()
for handler in self.message_handlers:
for other in map(normalize, handler.after):
if other in graph:
graph[handler].add(other)
for other in map(normalize, handler.before):
if other in graph:
graph[other].add(handler)
# check for cycles
def check_cycles(handler, chain=()):
if handler in chain:
chain_text = " -> ".join(map(str, chain + (handler,)))
raise ValueError("Dependency cycle in handlers: {}".format(chain_text))
chain += handler,
for dep in graph[handler]:
check_cycles(dep, chain)
for handler in graph:
check_cycles(handler)
# set up the greenlets
greenlets = {}
def wait_and_handle(handler):
for dep in graph[handler]:
greenlets[dep].join()
return handler.handle(self, msg)
def wait_for_sync():
for dep in graph['sync']:
greenlets[dep].join()
for handler in self.message_handlers:
greenlets[handler] = self._group.spawn(wait_and_handle, handler)
greenlets['sync'] = self._group.spawn(wait_for_sync)
# wait for sync to finish
greenlets['sync'].get()
def stop(self, ex=None):
if self._stopping:
return self.wait_for_stop()
self._stopping = True
# we spawn a child greenlet so things don't screw up if current greenlet is in self._group
def _stop():
self._group.kill()
for fn in self.stop_handlers:
fn(self)
# post-stop: we clear a few structures to break reference loops
# since they no longer make sense.
for channel in self._channels.values():
channel.client = None
for user in self._users.values():
user.client = None
for handler in self.message_handlers.copy():
handler.unregister_for_client(self)
# queues might contain some final messages
self._send_queue = None
self._recv_queue = None
# act of setting _stopped will make wait_for_stop()s fire
if ex:
self._stopped.set_exception(ex)
else:
self._stopped.set(None)
gevent.spawn(_stop).join()
def msg(self, to, content, priority=16, block=False):
"""Shortcut to send a Privmsg. See Message.send()"""
message.Privmsg(self, to, content).send(priority=priority, block=block)
def quit(self, msg=None, priority=16, block=True):
"""Shortcut to send a Quit. See Message.send().
Note that sending a quit automatically stops the client."""
message.Quit(self, msg).send(priority=priority)
if block:
self.wait_for_stop()
def channel(self, name):
"""Fetch a channel object, or create it if it doesn't exist.
Note that the channel is not joined automatically."""
name = self.normalize_channel(name)
if name not in self._channels:
Channel(self, name) # this will register itself into _channels
return self._channels[name]
@property
def joined_channels(self):
"""Returns a list of channels we are currently joined to"""
return set(channel for channel in self._channels.values() if channel.joined)
def wait_for(self, **match_args):
"""Block until a message matching given args is received.
The matching message is returned.
See geventirc.message.match() for match_args"""
result = gevent.event.AsyncResult()
@self.handler(**match_args)
def wait_callback(self, msg):
result.set(msg)
return True # unregister
return result.get()
def wait_for_stop(self):
"""Wait for client to exit, raising if it failed"""
self._stopped.get()
def wait_for_messages(self, timeout=None, priority=16):
"""This function will attempt to block until the server has received and processed
all current messages. We rely on the fact that servers will generally react to messages
in order, and so we queue up a Ping and wait for the corresponding Pong."""
# We're conservative here with our payload - 8 characters only, letters and digits,
# and we assume it's case insensitive. This still gives us about 40 bits of information.
# Also, some servers set the payload to their server name in the reply
# and attach the payload as a second arg. Finally, we just dump a reasonable timeout
# over the whole thing, just in case.
payload = ''.join(random.choice(string.lowercase + string.digits) for x in range(8))
received = gevent.event.Event()
def match_payload(params):
return any(value.lower() == payload for value in params)
@self.handler(command=message.Pong, params=match_payload)
def on_pong(client, msg):
received.set()
return True # unregister
message.Ping(self, payload).send()
if received.wait(self.WAIT_FOR_MESSAGES_TIMEOUT if timeout is None else timeout):
return True
self.logger.warning("Timed out while waiting for matching pong in wait_for_messages()")
return False
# aliases - the wait_for_* names are more descriptive, but they map to common async concepts:
join = wait_for_stop
sync = wait_for_messages
def normalize_channel(self, name):
"""Ensures that a channel name has a correct prefix, defaulting to the first entry in CHANTYPES."""
if not name:
raise ValueError("Channel name cannot be empty")
if name[0] in self.server_properties.CHANTYPES:
return name
return "{prefix}{name}".format(name=name, prefix=self.server_properties.CHANTYPES[0])
@Handler(command=message.ISupport, sync=True)
def recv_support(self, client, msg):
self.server_properties.update(msg.properties)
@Handler(command=message.Ping)
def on_ping(self, client, msg):
message.Pong(client, msg.payload).send(priority=-1)
@Handler(command=replycodes.errors.NICKNAMEINUSE, sync=True)
def nick_in_use(self, client, msg):
server_nick, bad_nick = msg.params[:2] # server_nick is what server thinks our nick is right now
self.logger.debug("Nick {!r} in use (our nick: {!r} -> {!r})".format(bad_nick, self._nick, self._new_nick))
if self._new_nick:
# if we're changing nicks, ignore it unless it matches the new one
if bad_nick == self._new_nick:
# cancel current change
self.logger.debug("New nick in use, cancelling")
self._new_nick = self._nick
return
# if we aren't changing nicks, something has gone wrong.
self.logger.warning("Got nick-in-use while not changing nicks, _nick={!r}, params={!r}".format(self._nick, msg.params))
if bad_nick != self._nick:
return # this is some kind of weird race, but ok to ignore
# if we've reached here, we must have messed up earlier and thought we got a nick when we didn't.
# easiest way to recover: force our nick back to whatever the server thinks it is right now.
self.logger.warning("Recovering from nick-in-use confusion by forcing nick {!r} -> {!r}".format(self._nick, server_nick))
self._nick = server_nick
@Handler(command='NICK', sender=matches_nick, sync=True)
def forced_nick_change(self, client, msg):
if msg.sender == self._new_nick:
# we are changing, and this was sent after our change was recieved so we must respect it.
self._new_nick = msg.nickname
elif msg.sender == self._nick:
# either we aren't changing and everything is fine, or we are changing but this was
# sent before the NICK command was processed by the server, so we change our old value
# so further forced_nick_changes and matches_nick() still works.
self._nick = msg.nickname
@Handler(command='JOIN', sender=matches_nick, sync=True)
def forced_join(self, client, msg):
for name in msg.channels:
channel = self.channel(name)
channel._join()
@Handler(command='PRIVMSG', ctcp=lambda v: v and v[0].upper() == 'VERSION')
def ctcp_version(self, client, msg):
if self.version:
message.Notice(self, msg.sender, ('VERSION', self.version)).send()
@Handler(command='PRIVMSG', ctcp=lambda v: v and v[0].upper() == 'TIME')
def ctcp_time(self, client, msg):
if self.time is 'utc':
now = time.gmtime()
elif self.time is 'local':
now = time.localtime()
else:
return
now = time.strftime('%s|%F %T', now)
message.Notice(self, msg.sender, ('TIME', now)).send()
@Handler(command='PRIVMSG', ctcp=lambda v: v and v[0].upper() == 'PING')
def ctcp_ping(self, client, msg):
cmd, arg = msg.ctcp
message.Notice(self, msg.sender, ('PING', arg)).send()
def _get_handoff_data(self):
"""Collect all data needed for a connection handoff and return as dict.
Make sure _prepare_for_handoff has been called first."""
return dict(
recv_buf = b64encode(self._recv_buf),
channels = [channel.name for channel in self._channels.values() if channel.joined],
hostname = self.hostname,
nick = self._nick,
port = self.port,
password = self.password,
ident = self.ident,
real_name = self.real_name,
)
def _prepare_for_handoff(self):
"""Stop operations and prepare for a connection handoff.
Note that, among other things, this stops the client from responding to PINGs from the server,
and so effectively begins a timeout until the server drops the connection."""
if self.ssl:
raise ValueError("Handing off of an ssl connection is not supported")
# wait until we aren't changing nick, then permanently acquire the lock to prevent further changes
# (note that forced_nick_change could still change it, but that's ok because we're stopping recv_loop)
self._nick_lock.acquire()
self._named_greenlets['_idle_watchdog'].kill(block=True)
self._kill_recv = True # recv_loop will exit after processing current lines
if self._recv_waiting:
# recv_loop is stuck in a socket.recv call and should be bumped out
self._named_greenlets['_recv_loop'].kill(socket.error(errno.EINTR, "recv_loop is being killed"), block=False)
self._named_greenlets['_recv_loop'].get()
# we are now no longer recving messages - we set a trap on _send(), then wait for send_queue to drain.
# in practice, things should be unlikely to hit trap.
def trap(*a, **k): raise Exception("Connection is being handed off, messages cannot be sent")
self._send = trap
# since we need to clear send queue, it makes no sense to try to hand off while it is limited
if self._send_queue.get_limit() is not None:
raise Exception("Can't hand off while send queue is limited")
# We re-use the activity flag to check queue after each message is sent
while True:
self._activity.clear()
if self._send_queue.empty():
break
self._activity.wait()
# final state: recv loop is stopped, send loop is hung as no further messages can be queued and queue is empty
def _finalize_handoff(self):
"""Actually report stop once we have fully handed off."""
self.stop()
def handoff_to_sock(self, send_sock):
"""Takes a unix socket and hands off connection to other process via it.
Note that the receiving end will not complete until you close the connection."""
self._prepare_for_handoff()
handoff_data = json.dumps(self._get_handoff_data())
send_fd(send_sock, self._socket)
send_sock.sendall(handoff_data)
self._finalize_handoff()
| 39.061308 | 123 | 0.725193 | 4,326 | 28,671 | 4.690014 | 0.169209 | 0.014983 | 0.009759 | 0.007393 | 0.112869 | 0.079403 | 0.05328 | 0.043028 | 0.032037 | 0.021933 | 0 | 0.003269 | 0.178403 | 28,671 | 733 | 124 | 39.114598 | 0.85804 | 0.379303 | 0 | 0.186335 | 0 | 0 | 0.107287 | 0.001538 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109731 | false | 0.016563 | 0.043478 | 0.008282 | 0.256729 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca1ce780c2537fe4570a13d85a070b4f4bf60790 | 2,108 | py | Python | brightway2_conda_installer/__main__.py | haasad/brightway2-conda-installer | 48f82b7c594a6a873cead22fb1e92a4441c75125 | [
"BSD-3-Clause"
] | null | null | null | brightway2_conda_installer/__main__.py | haasad/brightway2-conda-installer | 48f82b7c594a6a873cead22fb1e92a4441c75125 | [
"BSD-3-Clause"
] | null | null | null | brightway2_conda_installer/__main__.py | haasad/brightway2-conda-installer | 48f82b7c594a6a873cead22fb1e92a4441c75125 | [
"BSD-3-Clause"
] | null | null | null | import sys
import subprocess
import tempfile
if not ('continuum' in sys.version.lower() or 'conda' in sys.version.lower()):
raise RuntimeError('This script needs to be executed with the anaconda python distribution.')
def create_new_brightway2_env():
name_str = 'name: {}\n'
channels_str = "channels:\n\t- defaults\n\t- haasad\n\t- conda-forge\n\t"
python_str = '\ndependencies:\n\t- python={}'
conda_dependencies = ['pypardiso',
'jupyter',
'matplotlib',
'flask',
'lxml',
'requests',
'nose',
'docopt',
'xlsxwriter',
'xlrd',
'unidecode',
'appdirs',
'psutil',
'unicodecsv',
'wrapt',
'whoosh',
'peewee',
'asteval',
'future',
'monotonic',
'fasteners']
conda_dependencies_str = '\n\t- ' + '\n\t- '.join(conda_dependencies)
pip_dependencies = ['brightway2',
'bw2analyzer',
'bw2calc',
'bw2data',
'bw2io',
'bw2parameters',
'bw2speedups',
'eight']
pip_dependencies_str = '\n\t- pip:\n\t\t- '+'\n\t\t- '.join(pip_dependencies)
yaml_str = name_str + channels_str + python_str + conda_dependencies_str + pip_dependencies_str
yaml_str = yaml_str.replace('\t', ' ')
temp_yaml_file = tempfile.mktemp(suffix='.yml')
env_name = input('Enter the name of your new environment: ')
if not env_name:
env_name = 'bw'
py_ver = input('Enter the python version, one of (2.7, 3.4, 3.5, 3.6): ')
if not py_ver in {'2.7', '3.4', '3.5', '3.6'}:
py_ver = '3.5'
with open(temp_yaml_file, 'w') as f:
f.write(yaml_str.format(env_name, py_ver))
subprocess.call(['conda-env', 'create', '-f', temp_yaml_file])
if __name__ == "__main__":
create_new_brightway2_env()
| 30.114286 | 96 | 0.509013 | 231 | 2,108 | 4.428571 | 0.4329 | 0.01955 | 0.035191 | 0.033236 | 0.01564 | 0.01564 | 0.01564 | 0.01564 | 0 | 0 | 0 | 0.019795 | 0.352941 | 2,108 | 69 | 97 | 30.550725 | 0.730205 | 0 | 0 | 0 | 0 | 0.037037 | 0.276565 | 0 | 0.037037 | 0 | 0 | 0 | 0 | 1 | 0.018519 | false | 0 | 0.055556 | 0 | 0.074074 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca210c8b8bd1e28df8213ce6fb3cae882fc18129 | 2,593 | py | Python | evd_ros_backend/evd_ros_core/src/evd_sim/pose_interpolator.py | Wisc-HCI/CoFrame | 7a54344248d80cb316d36aabd40bbd3cdbbc07eb | [
"MIT"
] | null | null | null | evd_ros_backend/evd_ros_core/src/evd_sim/pose_interpolator.py | Wisc-HCI/CoFrame | 7a54344248d80cb316d36aabd40bbd3cdbbc07eb | [
"MIT"
] | null | null | null | evd_ros_backend/evd_ros_core/src/evd_sim/pose_interpolator.py | Wisc-HCI/CoFrame | 7a54344248d80cb316d36aabd40bbd3cdbbc07eb | [
"MIT"
] | null | null | null |
from geometry_msgs.msg import Pose
from scipy.interpolate import interp1d
from pyquaternion import Quaternion
class PoseInterpolator:
def __init__(self, poseStart, poseEnd, velocity, minTime=1):
'''
param: poseStart is starting pose (that robot should already be at)
param: poseEnd is target ending pose
param: velocity scalar for positional intepolation
'''
self._poseStart = poseStart
self._poseEnd = poseEnd
self._velocity = velocity
pos0 = poseStart.position
pos1 = poseEnd.position
# Compute positional timing
tx = abs(pos1.x - pos0.x) / velocity
ty = abs(pos1.y - pos0.y) / velocity
tz = abs(pos1.z - pos0.z) / velocity
# Generate interpolation functions
self.xInterp = interp1d([0, tx], [pos0.x, pos1.x], kind='linear', assume_sorted=True, bounds_error=False, fill_value=(pos0.x, pos1.x))
self.yInterp = interp1d([0, ty], [pos0.y, pos1.y], kind='linear', assume_sorted=True, bounds_error=False, fill_value=(pos0.y, pos1.y))
self.zInterp = interp1d([0, tz], [pos0.z, pos1.z], kind='linear', assume_sorted=True, bounds_error=False, fill_value=(pos0.z, pos1.z))
# Prepare quaternions for interpolation
rot0 = poseStart.orientation
self._q0 = Quaternion(rot0.w, rot0.x, rot0.y, rot0.z)
rot1 = poseEnd.orientation
self._q1 = Quaternion(rot1.w, rot1.x, rot1.y, rot1.z)
# set final timing expectations
if max([tx,ty,tz]) > minTime:
self._fullTime = max([tx,ty,tz])
else:
self._fullTime = minTime # for rotation only movements
@property
def full_time(self):
return self._fullTime
def step(self, time):
pose = Pose()
pose.position.x = self.xInterp(time)
pose.position.y = self.yInterp(time)
pose.position.z = self.zInterp(time)
t = self.clamp(time/self._fullTime, 0, 1)
q = Quaternion.slerp(self._q0, self._q1, amount=time/self._fullTime)
pose.orientation.w = q.w
pose.orientation.x = q.x
pose.orientation.y = q.y
pose.orientation.z = q.z
return pose
@property
def end_pose(self):
return self._poseEnd
@property
def start_pose(self):
return self._poseStart
@property
def velocity(self):
return self._velocity
def clamp(self, val, lower, upper):
if val < lower:
return lower
elif val > upper:
return upper
else:
return val | 31.621951 | 142 | 0.615889 | 330 | 2,593 | 4.742424 | 0.3 | 0.038339 | 0.035783 | 0.042173 | 0.105431 | 0.105431 | 0.105431 | 0.105431 | 0.105431 | 0.105431 | 0 | 0.023467 | 0.276899 | 2,593 | 82 | 143 | 31.621951 | 0.8112 | 0.119938 | 0 | 0.109091 | 0 | 0 | 0.008043 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.127273 | false | 0 | 0.054545 | 0.072727 | 0.345455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca26de3069527458469525481c191fce42234a13 | 454 | py | Python | Lista07/ex009.py | Guilherme-Schwann/Listas-de-Exercicios-UFV-CCF-110 | f306c8dc6385ee8c9580e687afa16a49ace68f95 | [
"MIT"
] | 2 | 2021-09-05T22:29:33.000Z | 2021-09-09T00:13:16.000Z | Lista07/ex009.py | Guilherme-Schwann/Listas-de-Exercicios-UFV-CCF-110 | f306c8dc6385ee8c9580e687afa16a49ace68f95 | [
"MIT"
] | null | null | null | Lista07/ex009.py | Guilherme-Schwann/Listas-de-Exercicios-UFV-CCF-110 | f306c8dc6385ee8c9580e687afa16a49ace68f95 | [
"MIT"
] | null | null | null | v = [int(input()) for x in range(121)]
menor, maior = v[0], v[0]
soma = 0
for i in range(121):
soma += v[i]
media = soma / 121
diasmen = 0
for i in range(121):
if v[i] < menor:
menor = v[i]
elif v[i] > maior:
maior = v[i]
if v[i] < media:
diasmen += 1
print(f'Menor temp.: {menor} | Maior temp.: {maior}')
print(f'Média de temperatura: {media}')
print(f'Foram {diasmen} dias com a temperatura menor que a média.')
| 25.222222 | 67 | 0.57489 | 80 | 454 | 3.2625 | 0.3625 | 0.045977 | 0.114943 | 0.05364 | 0.114943 | 0.114943 | 0 | 0 | 0 | 0 | 0 | 0.050296 | 0.255507 | 454 | 17 | 68 | 26.705882 | 0.721893 | 0 | 0 | 0.117647 | 0 | 0 | 0.284141 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.176471 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca2994cdce6a918a52358bb0350464c0936acaaf | 3,979 | py | Python | applications/common/helper.py | wangyuan02605/webcloud | e57a2713125b751ee8bb8da29b789e2044e789aa | [
"MIT"
] | null | null | null | applications/common/helper.py | wangyuan02605/webcloud | e57a2713125b751ee8bb8da29b789e2044e789aa | [
"MIT"
] | null | null | null | applications/common/helper.py | wangyuan02605/webcloud | e57a2713125b751ee8bb8da29b789e2044e789aa | [
"MIT"
] | null | null | null | from sqlalchemy import and_
from applications.extensions import db
class ModelFilter:
"""
orm多参数构造器
"""
filter_field = {}
filter_list = []
type_exact = "exact"
type_neq = "neq"
type_greater = "greater"
type_less = "less"
type_vague = "vague"
type_contains = "contains"
type_between = "between"
def __init__(self):
self.filter_field = {}
self.filter_list = []
def exact(self, field_name, value):
"""
准确查询字段
:param field_name: 模型字段名称
:param value: 值
"""
if value and value != '':
self.filter_field[field_name] = {"data": value, "type": self.type_exact}
def neq(self, field_name, value):
"""
不等于查询字段
:param field_name: 模型字段名称
:param value: 值
"""
if value and value != '':
self.filter_field[field_name] = {"data": value, "type": self.type_neq}
def greater(self, field_name, value):
"""
大于查询字段
:param field_name: 模型字段名称
:param value: 值
"""
if value and value != '':
self.filter_field[field_name] = {"data": value, "type": self.type_greater}
def less(self, field_name, value):
"""
大于查询字段
:param field_name: 模型字段名称
:param value: 值
"""
if value and value != '':
self.filter_field[field_name] = {"data": value, "type": self.type_less}
def vague(self, field_name, value: str):
"""
模糊查询字段
:param field_name: 模型字段名称
:param value: 值
"""
if value and value != '':
self.filter_field[field_name] = {"data": ('%' + value + '%'), "type": self.type_vague}
def left_vague(self, field_name, value: str):
"""
左模糊查询字段
:param field_name: 模型字段名称
:param value: 值
"""
if value and value != '':
self.filter_field[field_name] = {"data": ('%' + value), "type": self.type_vague}
def right_vague(self, field_name, value: str):
"""
左模糊查询字段
:param field_name: 模型字段名称
:param value: 值
"""
if value and value != '':
self.filter_field[field_name] = {"data": (value + '%'), "type": self.type_vague}
def contains(self, field_name, value: str):
"""
包含查询字段
:param field_name: 模型字段名称
:param value: 值
"""
if value and value != '':
self.filter_field[field_name] = {"data": value, "type": self.type_contains}
def between(self, field_name, value1, value2):
"""
范围查询字段
:param field_name: 模型字段名称
:param value: 值
"""
if value1 and value2 and value1 != '' and value2 != '':
self.filter_field[field_name] = {"data": [value1, value2], "type": self.type_between}
def get_filter(self, model: db.Model):
"""
获取过滤条件
:param model: 模型字段名称
"""
for k, v in self.filter_field.items():
if v.get("type") == self.type_vague:
self.filter_list.append(getattr(model, k).like(v.get("data")))
if v.get("type") == self.type_contains:
self.filter_list.append(getattr(model, k).contains(v.get("data")))
if v.get("type") == self.type_exact:
self.filter_list.append(getattr(model, k) == v.get("data"))
if v.get("type") == self.type_neq:
self.filter_list.append(getattr(model, k) != v.get("data"))
if v.get("type") == self.type_greater:
self.filter_list.append(getattr(model, k) > v.get("data"))
if v.get("type") == self.type_less:
self.filter_list.append(getattr(model, k) < v.get("data"))
if v.get("type") == self.type_between:
self.filter_list.append(getattr(model, k).between(v.get("data")[0], v.get("data")[1]))
return and_(*self.filter_list)
| 31.330709 | 102 | 0.535059 | 470 | 3,979 | 4.357447 | 0.121277 | 0.118652 | 0.09375 | 0.087891 | 0.67334 | 0.663086 | 0.62793 | 0.57959 | 0.563477 | 0.538086 | 0 | 0.003689 | 0.318673 | 3,979 | 126 | 103 | 31.579365 | 0.751752 | 0.120885 | 0 | 0.135593 | 0 | 0 | 0.056452 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.186441 | false | 0 | 0.033898 | 0 | 0.40678 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca2fa87d6cf4a73a4b45887ceb3513ea71760058 | 1,843 | py | Python | auxil/broker/tests/benchmark/broker-benchmark.py | hugolin615/zeek-4.0.0-ele420520-spring2021 | 258e9b2ee1f2a4bd45c6332a75304793b7d44d40 | [
"Apache-2.0"
] | 1 | 2021-03-06T19:51:07.000Z | 2021-03-06T19:51:07.000Z | auxil/broker/tests/benchmark/broker-benchmark.py | hugolin615/zeek-4.0.0-ele420520-spring2021 | 258e9b2ee1f2a4bd45c6332a75304793b7d44d40 | [
"Apache-2.0"
] | null | null | null | auxil/broker/tests/benchmark/broker-benchmark.py | hugolin615/zeek-4.0.0-ele420520-spring2021 | 258e9b2ee1f2a4bd45c6332a75304793b7d44d40 | [
"Apache-2.0"
] | null | null | null | # ping.py
import sys
import time
import broker
event = int(sys.argv[1])
total_sent_ev1 = 0
total_recv_ev1 = 0
first_t = float(time.time())
last_t = first_t
last_sent_ev1 = 0
def printStats(stats):
t = stats[0]
dt = stats[1]
ev1 = stats[1 + event].value
global total_recv_ev1
total_recv_ev1 += ev1
global last_t, last_sent_ev1
now = time.time()
# rate = "sending at {:.2f} ev/s, receiving at {:.2f} ev/s".format(total_sent_ev1 / (now - first_t) , total_recv_ev1 / (now - first_t))
rate = "sending at {:.2f} ev/s, receiving at {:.2f} ev/s".format((total_sent_ev1 - last_sent_ev1) / (now - last_t), ev1 / dt.total_seconds())
last_t = now
last_sent_ev1 = total_sent_ev1
print("{} dt={} ev{}={} (total {} of {}) {}".format(t, dt, event, ev1, total_recv_ev1, total_sent_ev1, rate))
def sendBatch(p, num):
event_1s = [broker.zeek.Event("event_{}".format(event), [i, "test"]) for i in range(num)]
for e in event_1s:
p.publish(e)
global total_sent_ev1
total_sent_ev1 += len(event_1s)
def wait(s, t):
waited = 0
while True:
msgs = s.poll()
for m in msgs:
e = broker.zeek.Event(m[1])
if e.name() == "stats_update":
printStats(e.args()[0])
time.sleep(0.01)
waited += 0.01
if waited >= t:
break
ep = broker.Endpoint()
s = ep.make_subscriber("/benchmark/stats")
ss = ep.make_status_subscriber(True);
ep.peer("127.0.0.1", 9999)
# Wait until connection is established.
st = ss.get()
if not (type(st) == broker.Status and st.code() == broker.SC.PeerAdded):
print("could not connect")
sys.exit(1)
p = ep.make_publisher("/benchmark/events")
while True:
sendBatch(p, 5000)
wait(s, .001)
if ss.available():
print(ss.get())
sys.exit(0)
| 23.329114 | 145 | 0.606077 | 290 | 1,843 | 3.67931 | 0.324138 | 0.072165 | 0.078725 | 0.026242 | 0.133083 | 0.09747 | 0.09747 | 0.09747 | 0.09747 | 0.09747 | 0 | 0.04416 | 0.238199 | 1,843 | 78 | 146 | 23.628205 | 0.715812 | 0.097124 | 0 | 0.037037 | 0 | 0 | 0.100602 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.055556 | 0 | 0.111111 | 0.092593 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca32bda0e8473b1b0757239f57fdc6efaa9ba516 | 39,222 | py | Python | aiopg/connection.py | arssher/aiopg | ed69a066608ac4788b2bc8a0cdd03690f22adb3a | [
"BSD-2-Clause"
] | 1,307 | 2015-01-06T15:52:21.000Z | 2022-03-25T16:04:53.000Z | aiopg/connection.py | arssher/aiopg | ed69a066608ac4788b2bc8a0cdd03690f22adb3a | [
"BSD-2-Clause"
] | 765 | 2015-01-11T10:17:57.000Z | 2022-01-29T13:04:30.000Z | aiopg/connection.py | arssher/aiopg | ed69a066608ac4788b2bc8a0cdd03690f22adb3a | [
"BSD-2-Clause"
] | 194 | 2015-02-20T09:29:30.000Z | 2022-03-03T19:49:19.000Z | import abc
import asyncio
import contextlib
import datetime
import enum
import errno
import platform
import select
import sys
import traceback
import uuid
import warnings
import weakref
from collections.abc import Mapping
from types import TracebackType
from typing import (
Any,
Callable,
Generator,
List,
Optional,
Sequence,
Tuple,
Type,
cast,
)
import psycopg2
import psycopg2.extensions
import psycopg2.extras
from .log import logger
from .utils import (
ClosableQueue,
_ContextManager,
create_completed_future,
get_running_loop,
)
TIMEOUT = 60.0
# Windows specific error code, not in errno for some reason, and doesnt map
# to OSError.errno EBADF
WSAENOTSOCK = 10038
def connect(
dsn: Optional[str] = None,
*,
timeout: float = TIMEOUT,
enable_json: bool = True,
enable_hstore: bool = True,
enable_uuid: bool = True,
echo: bool = False,
**kwargs: Any,
) -> _ContextManager["Connection"]:
"""A factory for connecting to PostgreSQL.
The coroutine accepts all parameters that psycopg2.connect() does
plus optional keyword-only `timeout` parameters.
Returns instantiated Connection object.
"""
connection = Connection(
dsn,
timeout,
bool(echo),
enable_hstore=enable_hstore,
enable_uuid=enable_uuid,
enable_json=enable_json,
**kwargs,
)
return _ContextManager[Connection](connection, disconnect) # type: ignore
async def disconnect(c: "Connection") -> None:
await c.close()
def _is_bad_descriptor_error(os_error: OSError) -> bool:
if platform.system() == "Windows": # pragma: no cover
winerror = int(getattr(os_error, "winerror", 0))
return winerror == WSAENOTSOCK
return os_error.errno == errno.EBADF
class IsolationCompiler(abc.ABC):
__slots__ = ("_isolation_level", "_readonly", "_deferrable")
def __init__(
self, isolation_level: Optional[str], readonly: bool, deferrable: bool
):
self._isolation_level = isolation_level
self._readonly = readonly
self._deferrable = deferrable
@property
def name(self) -> str:
return self._isolation_level or "Unknown"
def savepoint(self, unique_id: str) -> str:
return f"SAVEPOINT {unique_id}"
def release_savepoint(self, unique_id: str) -> str:
return f"RELEASE SAVEPOINT {unique_id}"
def rollback_savepoint(self, unique_id: str) -> str:
return f"ROLLBACK TO SAVEPOINT {unique_id}"
def commit(self) -> str:
return "COMMIT"
def rollback(self) -> str:
return "ROLLBACK"
def begin(self) -> str:
query = "BEGIN"
if self._isolation_level is not None:
query += f" ISOLATION LEVEL {self._isolation_level.upper()}"
if self._readonly:
query += " READ ONLY"
if self._deferrable:
query += " DEFERRABLE"
return query
def __repr__(self) -> str:
return self.name
class ReadCommittedCompiler(IsolationCompiler):
__slots__ = ()
def __init__(self, readonly: bool, deferrable: bool):
super().__init__("Read committed", readonly, deferrable)
class RepeatableReadCompiler(IsolationCompiler):
__slots__ = ()
def __init__(self, readonly: bool, deferrable: bool):
super().__init__("Repeatable read", readonly, deferrable)
class SerializableCompiler(IsolationCompiler):
__slots__ = ()
def __init__(self, readonly: bool, deferrable: bool):
super().__init__("Serializable", readonly, deferrable)
class DefaultCompiler(IsolationCompiler):
__slots__ = ()
def __init__(self, readonly: bool, deferrable: bool):
super().__init__(None, readonly, deferrable)
@property
def name(self) -> str:
return "Default"
class IsolationLevel(enum.Enum):
serializable = SerializableCompiler
repeatable_read = RepeatableReadCompiler
read_committed = ReadCommittedCompiler
default = DefaultCompiler
def __call__(self, readonly: bool, deferrable: bool) -> IsolationCompiler:
return self.value(readonly, deferrable) # type: ignore
async def _release_savepoint(t: "Transaction") -> None:
await t.release_savepoint()
async def _rollback_savepoint(t: "Transaction") -> None:
await t.rollback_savepoint()
class Transaction:
__slots__ = ("_cursor", "_is_begin", "_isolation", "_unique_id")
def __init__(
self,
cursor: "Cursor",
isolation_level: Callable[[bool, bool], IsolationCompiler],
readonly: bool = False,
deferrable: bool = False,
):
self._cursor = cursor
self._is_begin = False
self._unique_id: Optional[str] = None
self._isolation = isolation_level(readonly, deferrable)
@property
def is_begin(self) -> bool:
return self._is_begin
async def begin(self) -> "Transaction":
if self._is_begin:
raise psycopg2.ProgrammingError(
"You are trying to open a new transaction, use the save point"
)
self._is_begin = True
await self._cursor.execute(self._isolation.begin())
return self
async def commit(self) -> None:
self._check_commit_rollback()
await self._cursor.execute(self._isolation.commit())
self._is_begin = False
async def rollback(self) -> None:
self._check_commit_rollback()
if not self._cursor.closed:
await self._cursor.execute(self._isolation.rollback())
self._is_begin = False
async def rollback_savepoint(self) -> None:
self._check_release_rollback()
if not self._cursor.closed:
await self._cursor.execute(
self._isolation.rollback_savepoint(
self._unique_id # type: ignore
)
)
self._unique_id = None
async def release_savepoint(self) -> None:
self._check_release_rollback()
await self._cursor.execute(
self._isolation.release_savepoint(self._unique_id) # type: ignore
)
self._unique_id = None
async def savepoint(self) -> "Transaction":
self._check_commit_rollback()
if self._unique_id is not None:
raise psycopg2.ProgrammingError("You do not shut down savepoint")
self._unique_id = f"s{uuid.uuid1().hex}"
await self._cursor.execute(self._isolation.savepoint(self._unique_id))
return self
def point(self) -> _ContextManager["Transaction"]:
return _ContextManager[Transaction](
self.savepoint(),
_release_savepoint,
_rollback_savepoint,
)
def _check_commit_rollback(self) -> None:
if not self._is_begin:
raise psycopg2.ProgrammingError(
"You are trying to commit " "the transaction does not open"
)
def _check_release_rollback(self) -> None:
self._check_commit_rollback()
if self._unique_id is None:
raise psycopg2.ProgrammingError("You do not start savepoint")
def __repr__(self) -> str:
return (
f"<{self.__class__.__name__} "
f"transaction={self._isolation} id={id(self):#x}>"
)
def __del__(self) -> None:
if self._is_begin:
warnings.warn(
f"You have not closed transaction {self!r}", ResourceWarning
)
if self._unique_id is not None:
warnings.warn(
f"You have not closed savepoint {self!r}", ResourceWarning
)
async def __aenter__(self) -> "Transaction":
return await self.begin()
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc: Optional[BaseException],
tb: Optional[TracebackType],
) -> None:
if exc_type is not None:
await self.rollback()
else:
await self.commit()
async def _commit_transaction(t: Transaction) -> None:
await t.commit()
async def _rollback_transaction(t: Transaction) -> None:
await t.rollback()
class Cursor:
def __init__(
self,
conn: "Connection",
impl: Any,
timeout: float,
echo: bool,
isolation_level: Optional[IsolationLevel] = None,
):
self._conn = conn
self._impl = impl
self._timeout = timeout
self._echo = echo
self._transaction = Transaction(
self, isolation_level or IsolationLevel.default
)
@property
def echo(self) -> bool:
"""Return echo mode status."""
return self._echo
@property
def description(self) -> Optional[Sequence[Any]]:
"""This read-only attribute is a sequence of 7-item sequences.
Each of these sequences is a collections.namedtuple containing
information describing one result column:
0. name: the name of the column returned.
1. type_code: the PostgreSQL OID of the column.
2. display_size: the actual length of the column in bytes.
3. internal_size: the size in bytes of the column associated to
this column on the server.
4. precision: total number of significant digits in columns of
type NUMERIC. None for other types.
5. scale: count of decimal digits in the fractional part in
columns of type NUMERIC. None for other types.
6. null_ok: always None as not easy to retrieve from the libpq.
This attribute will be None for operations that do not
return rows or if the cursor has not had an operation invoked
via the execute() method yet.
"""
return self._impl.description # type: ignore
def close(self) -> None:
"""Close the cursor now."""
if not self.closed:
self._impl.close()
@property
def closed(self) -> bool:
"""Read-only boolean attribute: specifies if the cursor is closed."""
return self._impl.closed # type: ignore
@property
def connection(self) -> "Connection":
"""Read-only attribute returning a reference to the `Connection`."""
return self._conn
@property
def raw(self) -> Any:
"""Underlying psycopg cursor object, readonly"""
return self._impl
@property
def name(self) -> str:
# Not supported
return self._impl.name # type: ignore
@property
def scrollable(self) -> Optional[bool]:
# Not supported
return self._impl.scrollable # type: ignore
@scrollable.setter
def scrollable(self, val: bool) -> None:
# Not supported
self._impl.scrollable = val
@property
def withhold(self) -> bool:
# Not supported
return self._impl.withhold # type: ignore
@withhold.setter
def withhold(self, val: bool) -> None:
# Not supported
self._impl.withhold = val
async def execute(
self,
operation: str,
parameters: Any = None,
*,
timeout: Optional[float] = None,
) -> None:
"""Prepare and execute a database operation (query or command).
Parameters may be provided as sequence or mapping and will be
bound to variables in the operation. Variables are specified
either with positional %s or named %({name})s placeholders.
"""
if timeout is None:
timeout = self._timeout
waiter = self._conn._create_waiter("cursor.execute")
if self._echo:
logger.info(operation)
logger.info("%r", parameters)
try:
self._impl.execute(operation, parameters)
except BaseException:
self._conn._waiter = None
raise
try:
await self._conn._poll(waiter, timeout)
except asyncio.TimeoutError:
self._impl.close()
raise
async def executemany(self, *args: Any, **kwargs: Any) -> None:
# Not supported
raise psycopg2.ProgrammingError(
"executemany cannot be used in asynchronous mode"
)
async def callproc(
self,
procname: str,
parameters: Any = None,
*,
timeout: Optional[float] = None,
) -> None:
"""Call a stored database procedure with the given name.
The sequence of parameters must contain one entry for each
argument that the procedure expects. The result of the call is
returned as modified copy of the input sequence. Input
parameters are left untouched, output and input/output
parameters replaced with possibly new values.
"""
if timeout is None:
timeout = self._timeout
waiter = self._conn._create_waiter("cursor.callproc")
if self._echo:
logger.info("CALL %s", procname)
logger.info("%r", parameters)
try:
self._impl.callproc(procname, parameters)
except BaseException:
self._conn._waiter = None
raise
else:
await self._conn._poll(waiter, timeout)
def begin(self) -> _ContextManager[Transaction]:
return _ContextManager[Transaction](
self._transaction.begin(),
_commit_transaction,
_rollback_transaction,
)
def begin_nested(self) -> _ContextManager[Transaction]:
if self._transaction.is_begin:
return self._transaction.point()
return _ContextManager[Transaction](
self._transaction.begin(),
_commit_transaction,
_rollback_transaction,
)
def mogrify(self, operation: str, parameters: Any = None) -> bytes:
"""Return a query string after arguments binding.
The byte string returned is exactly the one that would be sent to
the database running the .execute() method or similar.
"""
ret = self._impl.mogrify(operation, parameters)
assert (
not self._conn.isexecuting()
), "Don't support server side mogrify"
return ret # type: ignore
async def setinputsizes(self, sizes: int) -> None:
"""This method is exposed in compliance with the DBAPI.
It currently does nothing but it is safe to call it.
"""
self._impl.setinputsizes(sizes)
async def fetchone(self) -> Any:
"""Fetch the next row of a query result set.
Returns a single tuple, or None when no more data is
available.
"""
ret = self._impl.fetchone()
assert (
not self._conn.isexecuting()
), "Don't support server side cursors yet"
return ret
async def fetchmany(self, size: Optional[int] = None) -> List[Any]:
"""Fetch the next set of rows of a query result.
Returns a list of tuples. An empty list is returned when no
more rows are available.
The number of rows to fetch per call is specified by the
parameter. If it is not given, the cursor's .arraysize
determines the number of rows to be fetched. The method should
try to fetch as many rows as indicated by the size
parameter. If this is not possible due to the specified number
of rows not being available, fewer rows may be returned.
"""
if size is None:
size = self._impl.arraysize
ret = self._impl.fetchmany(size)
assert (
not self._conn.isexecuting()
), "Don't support server side cursors yet"
return ret # type: ignore
async def fetchall(self) -> List[Any]:
"""Fetch all (remaining) rows of a query result.
Returns them as a list of tuples. An empty list is returned
if there is no more record to fetch.
"""
ret = self._impl.fetchall()
assert (
not self._conn.isexecuting()
), "Don't support server side cursors yet"
return ret # type: ignore
async def scroll(self, value: int, mode: str = "relative") -> None:
"""Scroll to a new position according to mode.
If mode is relative (default), value is taken as offset
to the current position in the result set, if set to
absolute, value states an absolute target position.
"""
self._impl.scroll(value, mode)
assert (
not self._conn.isexecuting()
), "Don't support server side cursors yet"
@property
def arraysize(self) -> int:
"""How many rows will be returned by fetchmany() call.
This read/write attribute specifies the number of rows to
fetch at a time with fetchmany(). It defaults to
1 meaning to fetch a single row at a time.
"""
return self._impl.arraysize # type: ignore
@arraysize.setter
def arraysize(self, val: int) -> None:
"""How many rows will be returned by fetchmany() call.
This read/write attribute specifies the number of rows to
fetch at a time with fetchmany(). It defaults to
1 meaning to fetch a single row at a time.
"""
self._impl.arraysize = val
@property
def itersize(self) -> int:
# Not supported
return self._impl.itersize # type: ignore
@itersize.setter
def itersize(self, val: int) -> None:
# Not supported
self._impl.itersize = val
@property
def rowcount(self) -> int:
"""Returns the number of rows that has been produced of affected.
This read-only attribute specifies the number of rows that the
last :meth:`execute` produced (for Data Query Language
statements like SELECT) or affected (for Data Manipulation
Language statements like UPDATE or INSERT).
The attribute is -1 in case no .execute() has been performed
on the cursor or the row count of the last operation if it
can't be determined by the interface.
"""
return self._impl.rowcount # type: ignore
@property
def rownumber(self) -> int:
"""Row index.
This read-only attribute provides the current 0-based index of the
cursor in the result set or ``None`` if the index cannot be
determined."""
return self._impl.rownumber # type: ignore
@property
def lastrowid(self) -> int:
"""OID of the last inserted row.
This read-only attribute provides the OID of the last row
inserted by the cursor. If the table wasn't created with OID
support or the last operation is not a single record insert,
the attribute is set to None.
"""
return self._impl.lastrowid # type: ignore
@property
def query(self) -> Optional[str]:
"""The last executed query string.
Read-only attribute containing the body of the last query sent
to the backend (including bound arguments) as bytes
string. None if no query has been executed yet.
"""
return self._impl.query # type: ignore
@property
def statusmessage(self) -> str:
"""the message returned by the last command."""
return self._impl.statusmessage # type: ignore
@property
def tzinfo_factory(self) -> datetime.tzinfo:
"""The time zone factory used to handle data types such as
`TIMESTAMP WITH TIME ZONE`.
"""
return self._impl.tzinfo_factory # type: ignore
@tzinfo_factory.setter
def tzinfo_factory(self, val: datetime.tzinfo) -> None:
"""The time zone factory used to handle data types such as
`TIMESTAMP WITH TIME ZONE`.
"""
self._impl.tzinfo_factory = val
async def nextset(self) -> None:
# Not supported
self._impl.nextset() # raises psycopg2.NotSupportedError
async def setoutputsize(
self, size: int, column: Optional[int] = None
) -> None:
# Does nothing
self._impl.setoutputsize(size, column)
async def copy_from(self, *args: Any, **kwargs: Any) -> None:
raise psycopg2.ProgrammingError(
"copy_from cannot be used in asynchronous mode"
)
async def copy_to(self, *args: Any, **kwargs: Any) -> None:
raise psycopg2.ProgrammingError(
"copy_to cannot be used in asynchronous mode"
)
async def copy_expert(self, *args: Any, **kwargs: Any) -> None:
raise psycopg2.ProgrammingError(
"copy_expert cannot be used in asynchronous mode"
)
@property
def timeout(self) -> float:
"""Return default timeout for cursor operations."""
return self._timeout
def __aiter__(self) -> "Cursor":
return self
async def __anext__(self) -> Any:
ret = await self.fetchone()
if ret is not None:
return ret
raise StopAsyncIteration
async def __aenter__(self) -> "Cursor":
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc: Optional[BaseException],
tb: Optional[TracebackType],
) -> None:
self.close()
def __repr__(self) -> str:
return (
f"<"
f"{type(self).__module__}::{type(self).__name__} "
f"name={self.name}, "
f"closed={self.closed}"
f">"
)
async def _close_cursor(c: Cursor) -> None:
c.close()
class Connection:
"""Low-level asynchronous interface for wrapped psycopg2 connection.
The Connection instance encapsulates a database session.
Provides support for creating asynchronous cursors.
"""
_source_traceback = None
def __init__(
self,
dsn: Optional[str],
timeout: float,
echo: bool = False,
enable_json: bool = True,
enable_hstore: bool = True,
enable_uuid: bool = True,
**kwargs: Any,
):
self._enable_json = enable_json
self._enable_hstore = enable_hstore
self._enable_uuid = enable_uuid
self._loop = get_running_loop()
self._waiter: Optional[
"asyncio.Future[None]"
] = self._loop.create_future()
kwargs["async_"] = kwargs.pop("async", True)
kwargs.pop("loop", None) # backward compatibility
self._conn = psycopg2.connect(dsn, **kwargs)
self._dsn = self._conn.dsn
assert self._conn.isexecuting(), "Is conn an async at all???"
self._fileno: Optional[int] = self._conn.fileno()
self._timeout = timeout
self._last_usage = self._loop.time()
self._writing = False
self._echo = echo
self._notifies = asyncio.Queue() # type: ignore
self._notifies_proxy = ClosableQueue(self._notifies, self._loop)
self._weakref = weakref.ref(self)
self._loop.add_reader(
self._fileno, self._ready, self._weakref # type: ignore
)
if self._loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
@staticmethod
def _ready(weak_self: "weakref.ref[Any]") -> None:
self = cast(Connection, weak_self())
if self is None:
return
waiter = self._waiter
try:
state = self._conn.poll()
while self._conn.notifies:
notify = self._conn.notifies.pop(0)
self._notifies.put_nowait(notify)
except (psycopg2.Warning, psycopg2.Error) as exc:
if self._fileno is not None:
try:
select.select([self._fileno], [], [], 0)
except OSError as os_exc:
if _is_bad_descriptor_error(os_exc):
with contextlib.suppress(OSError):
self._loop.remove_reader(self._fileno)
# forget a bad file descriptor, don't try to
# touch it
self._fileno = None
try:
if self._writing:
self._writing = False
if self._fileno is not None:
self._loop.remove_writer(self._fileno)
except OSError as exc2:
if exc2.errno != errno.EBADF:
# EBADF is ok for closed file descriptor
# chain exception otherwise
exc2.__cause__ = exc
exc = exc2
self._notifies_proxy.close(exc)
if waiter is not None and not waiter.done():
waiter.set_exception(exc)
else:
if self._fileno is None:
# connection closed
if waiter is not None and not waiter.done():
waiter.set_exception(
psycopg2.OperationalError("Connection closed")
)
if state == psycopg2.extensions.POLL_OK:
if self._writing:
self._loop.remove_writer(self._fileno) # type: ignore
self._writing = False
if waiter is not None and not waiter.done():
waiter.set_result(None)
elif state == psycopg2.extensions.POLL_READ:
if self._writing:
self._loop.remove_writer(self._fileno) # type: ignore
self._writing = False
elif state == psycopg2.extensions.POLL_WRITE:
if not self._writing:
self._loop.add_writer(
self._fileno, self._ready, weak_self # type: ignore
)
self._writing = True
elif state == psycopg2.extensions.POLL_ERROR:
self._fatal_error(
"Fatal error on aiopg connection: "
"POLL_ERROR from underlying .poll() call"
)
else:
self._fatal_error(
f"Fatal error on aiopg connection: "
f"unknown answer {state} from underlying "
f".poll() call"
)
def _fatal_error(self, message: str) -> None:
# Should be called from exception handler only.
self._loop.call_exception_handler(
{
"message": message,
"connection": self,
}
)
self.close()
if self._waiter and not self._waiter.done():
self._waiter.set_exception(psycopg2.OperationalError(message))
def _create_waiter(self, func_name: str) -> "asyncio.Future[None]":
if self._waiter is not None:
raise RuntimeError(
f"{func_name}() called while another coroutine "
f"is already waiting for incoming data"
)
self._waiter = self._loop.create_future()
return self._waiter
async def _poll(
self, waiter: "asyncio.Future[None]", timeout: float
) -> None:
assert waiter is self._waiter, (waiter, self._waiter)
self._ready(self._weakref)
try:
await asyncio.wait_for(self._waiter, timeout)
except (asyncio.CancelledError, asyncio.TimeoutError) as exc:
await asyncio.shield(self.close())
raise exc
except psycopg2.extensions.QueryCanceledError as exc:
self._loop.call_exception_handler(
{
"message": exc.pgerror,
"exception": exc,
"future": self._waiter,
}
)
raise asyncio.CancelledError
finally:
self._waiter = None
def isexecuting(self) -> bool:
return self._conn.isexecuting() # type: ignore
def cursor(
self,
name: Optional[str] = None,
cursor_factory: Any = None,
scrollable: Optional[bool] = None,
withhold: bool = False,
timeout: Optional[float] = None,
isolation_level: Optional[IsolationLevel] = None,
) -> _ContextManager[Cursor]:
"""A coroutine that returns a new cursor object using the connection.
*cursor_factory* argument can be used to create non-standard
cursors. The argument must be subclass of
`psycopg2.extensions.cursor`.
*name*, *scrollable* and *withhold* parameters are not supported by
psycopg in asynchronous mode.
"""
self._last_usage = self._loop.time()
coro = self._cursor(
name=name,
cursor_factory=cursor_factory,
scrollable=scrollable,
withhold=withhold,
timeout=timeout,
isolation_level=isolation_level,
)
return _ContextManager[Cursor](coro, _close_cursor)
async def _cursor(
self,
name: Optional[str] = None,
cursor_factory: Any = None,
scrollable: Optional[bool] = None,
withhold: bool = False,
timeout: Optional[float] = None,
isolation_level: Optional[IsolationLevel] = None,
) -> Cursor:
if timeout is None:
timeout = self._timeout
impl = await self._cursor_impl(
name=name,
cursor_factory=cursor_factory,
scrollable=scrollable,
withhold=withhold,
)
cursor = Cursor(self, impl, timeout, self._echo, isolation_level)
return cursor
async def _cursor_impl(
self,
name: Optional[str] = None,
cursor_factory: Any = None,
scrollable: Optional[bool] = None,
withhold: bool = False,
) -> Any:
if cursor_factory is None:
impl = self._conn.cursor(
name=name, scrollable=scrollable, withhold=withhold
)
else:
impl = self._conn.cursor(
name=name,
cursor_factory=cursor_factory,
scrollable=scrollable,
withhold=withhold,
)
return impl
def _close(self) -> None:
"""Remove the connection from the event_loop and close it."""
# N.B. If connection contains uncommitted transaction the
# transaction will be discarded
if self._fileno is not None:
self._loop.remove_reader(self._fileno)
if self._writing:
self._writing = False
self._loop.remove_writer(self._fileno)
self._conn.close()
if not self._loop.is_closed():
if self._waiter is not None and not self._waiter.done():
self._waiter.set_exception(
psycopg2.OperationalError("Connection closed")
)
self._notifies_proxy.close(
psycopg2.OperationalError("Connection closed")
)
def close(self) -> "asyncio.Future[None]":
self._close()
return create_completed_future(self._loop)
@property
def closed(self) -> bool:
"""Connection status.
Read-only attribute reporting whether the database connection is
open (False) or closed (True).
"""
return self._conn.closed # type: ignore
@property
def raw(self) -> Any:
"""Underlying psycopg connection object, readonly"""
return self._conn
async def commit(self) -> None:
raise psycopg2.ProgrammingError(
"commit cannot be used in asynchronous mode"
)
async def rollback(self) -> None:
raise psycopg2.ProgrammingError(
"rollback cannot be used in asynchronous mode"
)
# TPC
async def xid(
self, format_id: int, gtrid: str, bqual: str
) -> Tuple[int, str, str]:
return self._conn.xid(format_id, gtrid, bqual) # type: ignore
async def tpc_begin(self, *args: Any, **kwargs: Any) -> None:
raise psycopg2.ProgrammingError(
"tpc_begin cannot be used in asynchronous mode"
)
async def tpc_prepare(self) -> None:
raise psycopg2.ProgrammingError(
"tpc_prepare cannot be used in asynchronous mode"
)
async def tpc_commit(self, *args: Any, **kwargs: Any) -> None:
raise psycopg2.ProgrammingError(
"tpc_commit cannot be used in asynchronous mode"
)
async def tpc_rollback(self, *args: Any, **kwargs: Any) -> None:
raise psycopg2.ProgrammingError(
"tpc_rollback cannot be used in asynchronous mode"
)
async def tpc_recover(self) -> None:
raise psycopg2.ProgrammingError(
"tpc_recover cannot be used in asynchronous mode"
)
async def cancel(self) -> None:
raise psycopg2.ProgrammingError(
"cancel cannot be used in asynchronous mode"
)
async def reset(self) -> None:
raise psycopg2.ProgrammingError(
"reset cannot be used in asynchronous mode"
)
@property
def dsn(self) -> Optional[str]:
"""DSN connection string.
Read-only attribute representing dsn connection string used
for connectint to PostgreSQL server.
"""
return self._dsn # type: ignore
async def set_session(self, *args: Any, **kwargs: Any) -> None:
raise psycopg2.ProgrammingError(
"set_session cannot be used in asynchronous mode"
)
@property
def autocommit(self) -> bool:
"""Autocommit status"""
return self._conn.autocommit # type: ignore
@autocommit.setter
def autocommit(self, val: bool) -> None:
"""Autocommit status"""
self._conn.autocommit = val
@property
def isolation_level(self) -> int:
"""Transaction isolation level.
The only allowed value is ISOLATION_LEVEL_READ_COMMITTED.
"""
return self._conn.isolation_level # type: ignore
async def set_isolation_level(self, val: int) -> None:
"""Transaction isolation level.
The only allowed value is ISOLATION_LEVEL_READ_COMMITTED.
"""
self._conn.set_isolation_level(val)
@property
def encoding(self) -> str:
"""Client encoding for SQL operations."""
return self._conn.encoding # type: ignore
async def set_client_encoding(self, val: str) -> None:
self._conn.set_client_encoding(val)
@property
def notices(self) -> List[str]:
"""A list of all db messages sent to the client during the session."""
return self._conn.notices # type: ignore
@property
def cursor_factory(self) -> Any:
"""The default cursor factory used by .cursor()."""
return self._conn.cursor_factory
async def get_backend_pid(self) -> int:
"""Returns the PID of the backend server process."""
return self._conn.get_backend_pid() # type: ignore
async def get_parameter_status(self, parameter: str) -> Optional[str]:
"""Look up a current parameter setting of the server."""
return self._conn.get_parameter_status(parameter) # type: ignore
async def get_transaction_status(self) -> int:
"""Return the current session transaction status as an integer."""
return self._conn.get_transaction_status() # type: ignore
@property
def protocol_version(self) -> int:
"""A read-only integer representing protocol being used."""
return self._conn.protocol_version # type: ignore
@property
def server_version(self) -> int:
"""A read-only integer representing the backend version."""
return self._conn.server_version # type: ignore
@property
def status(self) -> int:
"""A read-only integer representing the status of the connection."""
return self._conn.status # type: ignore
async def lobject(self, *args: Any, **kwargs: Any) -> None:
raise psycopg2.ProgrammingError(
"lobject cannot be used in asynchronous mode"
)
@property
def timeout(self) -> float:
"""Return default timeout for connection operations."""
return self._timeout
@property
def last_usage(self) -> float:
"""Return time() when connection was used."""
return self._last_usage
@property
def echo(self) -> bool:
"""Return echo mode status."""
return self._echo
def __repr__(self) -> str:
return (
f"<"
f"{type(self).__module__}::{type(self).__name__} "
f"isexecuting={self.isexecuting()}, "
f"closed={self.closed}, "
f"echo={self.echo}, "
f">"
)
def __del__(self) -> None:
try:
_conn = self._conn
except AttributeError:
return
if _conn is not None and not _conn.closed:
self.close()
warnings.warn(f"Unclosed connection {self!r}", ResourceWarning)
context = {"connection": self, "message": "Unclosed connection"}
if self._source_traceback is not None:
context["source_traceback"] = self._source_traceback
self._loop.call_exception_handler(context)
@property
def notifies(self) -> ClosableQueue:
"""Return notification queue (an asyncio.Queue -like object)."""
return self._notifies_proxy
async def _get_oids(self) -> Tuple[Any, Any]:
cursor = await self.cursor()
rv0, rv1 = [], []
try:
await cursor.execute(
"SELECT t.oid, typarray "
"FROM pg_type t JOIN pg_namespace ns ON typnamespace = ns.oid "
"WHERE typname = 'hstore';"
)
async for oids in cursor:
if isinstance(oids, Mapping):
rv0.append(oids["oid"])
rv1.append(oids["typarray"])
else:
rv0.append(oids[0])
rv1.append(oids[1])
finally:
cursor.close()
return tuple(rv0), tuple(rv1)
async def _connect(self) -> "Connection":
try:
await self._poll(self._waiter, self._timeout) # type: ignore
except BaseException:
await asyncio.shield(self.close())
raise
if self._enable_json:
psycopg2.extras.register_default_json(self._conn)
if self._enable_uuid:
psycopg2.extras.register_uuid(conn_or_curs=self._conn)
if self._enable_hstore:
oid, array_oid = await self._get_oids()
psycopg2.extras.register_hstore(
self._conn, oid=oid, array_oid=array_oid
)
return self
def __await__(self) -> Generator[Any, None, "Connection"]:
return self._connect().__await__()
async def __aenter__(self) -> "Connection":
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc: Optional[BaseException],
tb: Optional[TracebackType],
) -> None:
await self.close()
| 31.277512 | 79 | 0.597828 | 4,367 | 39,222 | 5.199908 | 0.127777 | 0.019024 | 0.024265 | 0.023252 | 0.380307 | 0.331161 | 0.278448 | 0.242426 | 0.214066 | 0.1791 | 0 | 0.002972 | 0.313676 | 39,222 | 1,253 | 80 | 31.302474 | 0.840596 | 0.144205 | 0 | 0.381688 | 0 | 0 | 0.084657 | 0.007598 | 0 | 0 | 0 | 0 | 0.008323 | 1 | 0.095125 | false | 0 | 0.02497 | 0.026159 | 0.237812 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca332f4e3962c40a19b554a8ccd714164ef6dac0 | 4,545 | py | Python | meme_scraper/meme_scraper/spiders/memes_spider.py | wyndwarrior/DankMemes | d807e7d7e5b619eb98e7398534ea389a98fe9500 | [
"MIT"
] | 20 | 2016-11-14T19:49:55.000Z | 2021-01-19T00:46:47.000Z | meme_scraper/meme_scraper/spiders/memes_spider.py | wyndwarrior/DankMemes | d807e7d7e5b619eb98e7398534ea389a98fe9500 | [
"MIT"
] | 3 | 2016-11-14T20:28:21.000Z | 2018-01-08T16:44:36.000Z | meme_scraper/meme_scraper/spiders/memes_spider.py | wyndwarrior/DankMemes | d807e7d7e5b619eb98e7398534ea389a98fe9500 | [
"MIT"
] | 2 | 2017-02-03T04:15:10.000Z | 2018-01-08T17:04:47.000Z | import scrapy
def is_gif(url):
return url[-4:] == '.gif' or url[-5:] == '.gifv'
def is_image(url):
return url[-4:] == '.jpg' or url[-5:] == '.jpeg' or url[-4:] == '.png'
class MemesSpider(scrapy.Spider):
name = "memes"
def __init__(self):
self.NUM_PAGES = 1
self.resolution = "500x500"
def start_requests(self):
reddit_url = 'https://www.reddit.com/'
subreddits = [
'r/dankmemes/',
'r/memes/',
'r/funny/',
'r/harambe/',
'r/dank_meme/',
]
for subreddit in subreddits:
for page_type in ['', 'top/', 'top/?sort=top&t=all']:
request = scrapy.Request(url=reddit_url+subreddit, callback=self.parse_reddit)
request.meta['subreddit'] = subreddit
yield request
def parse_reddit(self, response):
for post in response.css('div.sitetable div.thing'):
likes = post.css('div.unvoted::text').extract_first()
meme_url = post.css('a::attr(href)').extract_first()
title = post.css('a::text').extract_first()
if meme_url.lower().find(response.meta['subreddit']) >= 0:
full_next_url = response.urljoin(meme_url)
request = scrapy.Request(full_next_url, callback=self.parse_reddit_deep)
request.meta['subreddit'] = response.meta['subreddit']
request.meta['likes'] = likes
request.meta['title'] = title
yield request
continue
if is_gif(meme_url):
continue
if meme_url.find('imgur') >= 0 and not is_image(meme_url):
meme_url += '.jpg' # code alert!
yield {
'likes': likes,
'meme_url': meme_url,
'title': title,
'subreddit': response.meta['subreddit']
}
next_page = response.css('span.next-button a::attr(href)').extract_first()
try:
request = scrapy.Request(next_page, callback=self.parse_reddit)
request.meta['subreddit'] = response.meta['subreddit']
yield request
except TypeError:
pass
def parse_reddit_deep(self, response):
meme_url = response.css('div.media-preview-content a::attr(href)').extract_first()
if meme_url is not None:
if is_gif(meme_url):
return
if meme_url.find('imgur') >= 0 and not is_image(meme_url):
meme_url += '.jpg' # code alert!
yield {
'likes': response.meta['likes'],
'meme_url': meme_url,
'title': response.meta['title'],
'subreddit': response.meta['subreddit']
}
def parse_meme_generator(self, response):
for quote in response.css('div.item_medium_small'):
yield {
'base_url': quote.css('a::attr(href)').extract_first(),
'base_image': quote.css('a img::attr(src)').extract_first(),
}
base_url = quote.css('a::attr(href)').extract_first()
if base_url is not None:
full_base_url = response.urljoin(base_url)
request = scrapy.Request(full_base_url, callback=self.parse_meme_page)
request.meta['base_url'] = base_url
yield request
def parse_meme_page(self, response):
meme_name = ""
for header in response.css('h1'):
meme_name = header.css('h1::text').extract_first()
break
for meme_entry in response.css('div.item_medium_small'):
text = meme_entry.css('a img::attr(alt)').extract_first()
delimiter_index = text.find('-')
if delimiter_index >= 0:
text = text[delimiter_index:]
yield {
'base_url': response.meta['base_url'],
'text': text,
'meme_url': meme_entry.css('a img::attr(src)').extract_first(),
}
pager = response.css('div.pager ul.pager')
entries = [entry for entry in pager.css('ul li')]
next_url = entries[-1].css('a::attr(href)').extract_first()
if next_url is not None:
full_next_url = response.urljoin(next_url)
request = scrapy.Request(full_next_url, callback=self.parse_meme_page)
request.meta['base_url'] = response.meta['base_url']
yield request | 37.875 | 94 | 0.540374 | 526 | 4,545 | 4.477186 | 0.212928 | 0.053503 | 0.02293 | 0.040764 | 0.464968 | 0.357113 | 0.259023 | 0.162208 | 0.162208 | 0.131635 | 0 | 0.006205 | 0.326293 | 4,545 | 120 | 95 | 37.875 | 0.7629 | 0.005061 | 0 | 0.22549 | 0 | 0 | 0.145133 | 0.014823 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078431 | false | 0.009804 | 0.009804 | 0.019608 | 0.137255 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca33e7b95795b18f73106c41b3e69597ae402a94 | 9,706 | py | Python | src/phase/utils.py | PacificBiosciences/pbampliconclustering | cc0efd2a88a56852ffff5dd04e90cb394ff46671 | [
"BSD-3-Clause-Clear"
] | 5 | 2019-09-27T20:10:33.000Z | 2020-11-15T13:03:47.000Z | src/phase/utils.py | PacificBiosciences/pbampliconclustering | cc0efd2a88a56852ffff5dd04e90cb394ff46671 | [
"BSD-3-Clause-Clear"
] | 2 | 2021-01-27T12:32:24.000Z | 2021-02-02T18:02:27.000Z | src/phase/utils.py | PacificBiosciences/pbampliconclustering | cc0efd2a88a56852ffff5dd04e90cb394ff46671 | [
"BSD-3-Clause-Clear"
] | 3 | 2020-12-28T10:12:49.000Z | 2020-12-28T10:13:13.000Z | import pysam
import pandas as pd
import mappy as mp
from itertools import chain
from statistics import median
from collections import Counter
from scipy.stats import entropy
MINLEN=50
MAXLEN=50000
def summary(splitter,caller):
try:
minsig = splitter.minSignal
minfrac = splitter._minFrac
minreads = splitter._minReads
feat = len(splitter.sigVar.columns)
except AttributeError:
minsig,minfrac,minreads = ('NA',)*3
feat = len(splitter.nodes)
mincnt = splitter.minCount
total = splitter.stats['total alignments']
prim = splitter.stats['primary alignments']
plup = splitter.stats['pileup alignments']
clust = splitter.stats['clustered reads']
ref = caller.readCounts[0]
lrg = max(caller.readCounts.values())
noise = caller.readCounts[-1]
conly = {k:v for k,v in caller.readCounts.items() if k!=-1}
nclust = len(conly)
entr = entropy(list(conly.values()))
tcount = Counter(caller.variantGroupMap.values())
tref = tcount[0]
tlrg = max(tcount.values())
tnoise = tcount[-1]
tconly = {k:v for k,v in tcount.items() if k!=-1}
ngrps = len(tconly)
tentr = entropy(list(tconly.values()))
return f'''
Input Params
------------
minSignal:\t\t{minsig}
minFrac:\t\t{minfrac}
minReads:\t\t{minreads}
Read Counts
-----------
Total Alignments:\t{total:,}
Primary Alignments:\t{prim:,}\t({prim/total:.2})
Pileup Reads:\t\t{plup:,}\t({plup/total:.2})
Clustered Reads:\t{clust:,}\t({clust/total:.2}) (covering all variant pos)
Cluster Info
------------
Min Cluster Size:\t{mincnt:,}
Cluster Features:\t{feat:,}
Cluster Stats (out of {clust:,})
-------------
Reference Calls:\t{ref:,}\t({ref/clust:.2})
Largest Fraction:\t{lrg:,}\t({lrg/clust:.2})
Noise Reads:\t\t{noise:,}\t({noise/clust:.2})
N Clusters:\t\t{nclust:,}
Shannon Entropy:\t{entr:.4}
Total Fractions (out of {clust:,})
---------------
Reference Calls:\t{tref:,}\t({tref/clust:.2})
Largest Fraction:\t{tlrg:,}\t({tlrg/clust:.2})
Noise Reads(<3):\t{tnoise:,}\t({tnoise/clust:.2})
Unique Var Comb:\t{ngrps:,}
Shannon Entropy:\t{tentr:.4}
'''
def getFileType(fname):
ext = fname.rsplit('.',1)[-1]
if ext == 'bam':
return 'bam'
elif ext in ['fastq','fq','fasta','fa']:
return 'fastx'
else:
raise PhaseUtils_Error(f'unknown filetype extension: {ext}')
def hpCollapse(maxLen=1):
def csgen(sequence):
last = None
for char in sequence:
if char == last:
n += 1
else:
last = char
n = 0
if n < maxLen:
yield char
return lambda seq: ''.join(csgen(seq))
def writeSimpleBED(chrm,start,stop,name,cov,filename,mode='w'):
with open(filename,mode) as ofile:
ofile.write('\t'.join(map(str,[chrm,start,stop,name,cov])) + '\n')
def writeRegionBam(inBam,outBam,region):
with pysam.AlignmentFile(inBam) as ibam:
with pysam.AlignmentFile(outBam,'wb',template=ibam) as obam:
for rec in ibam.fetch(region=region):
obam.write(rec)
pysam.index(outBam)
return outBam
class SimpleRecord:
def __init__(self,name,sequence):
self.name = name
self.sequence = sequence
def __len__(self):
return len(self.sequence)
class RecordGenerator:
def __init__(self,inFile,fileType=None,region=None,minLength=MINLEN,maxLength=MAXLEN):
self.inFile = inFile
self.region = region
self.minLen = minLength
self.maxLen = maxLength
ftype = getFileType(inFile) if fileType is None else fileType
self.generator = {'bam' : self._bamIter,
'fastx': self._fastxIter}[ftype]
self.counter = Counter()
def getNameIdx(self):
'''Run through without returning sequence'''
return {rec.name:i
for i,rec in enumerate(self.generator(self.inFile,
region=self.region,
track=False))}
def report(self):
other = ",".join([f"{n}:{c}" for n,c in self.counter.items() if n!="pass"])
return f'Alignments loaded: {self.counter["pass"]}; filtered: {other}'
def _bamIter(self,bamfile,track=True,**kwargs):
bam = pysam.AlignmentFile(bamfile,check_sq=False)
if kwargs['region']:
recgen = bam.fetch(region=kwargs['region'])
else:
recgen = bam
for rec in recgen:
kind = self._classifyBam(rec)
if track:
self.counter[kind] += 1
if kind == 'pass':
yield SimpleRecord(rec.query_name,rec.query_sequence)
def _fastxIter(self,fastx,track=True,**kwargs):
for rec in pysam.FastxFile(fastx):
kind = self._classifyFq(rec)
if track:
self.counter[kind] += 1
if kind == 'pass':
yield SimpleRecord(rec.name,rec.sequence)
def _classifyBam(self,rec):
if rec.flag & 0x100:
return 'secondary'
if rec.flag & 0x800:
return 'supplementary'
if rec.query_length < self.minLen:
return f'short(<{self.minLen})'
if rec.query_length > self.maxLen:
return f'long(>{self.maxLen})'
else:
return 'pass'
def _classifyFq(self,rec):
if len(rec.sequence) < self.minLen:
return f'short(<{self.minLen})'
if len(rec.sequence) > self.maxLen:
return f'long(>{self.maxLen})'
else:
return 'pass'
def __iter__(self):
return self.generator(self.inFile,region=self.region)
class RecTracker:
def __init__(self,):
self.counter = Counter()
def __call__(self,rec):
try:
if rec.flag & 0x100:
self.counter['secondary'] +=1
elif rec.flag & 0x800:
self.counter['supplementary'] +=1
else:
self.counter['primary'] +=1
except AttributeError: #no flag == fastarec
self.counter['primary'] +=1
def __repr__(self):
other = ",".join([f"{n}:{c}" for n,c in self.counter.items() if n!="primary"])
return f'Alignments loaded: {self.counter["primary"]}; filtered: {other}'
class PilerUpper:
def __init__(self,inFile,region=None,refSeq=None,method='median',
minLength=50,maxLength=1e6,maxHP=1,log=None,
multifunc=None,nproc=1):
self.recGen = RecordGenerator(inFile,region=region,
minLength=minLength,
maxLength=maxLength)
self.collapse = hpCollapse(maxHP) if maxHP else (lambda x:x)
self.log = log
self.nproc = nproc
self.refseq = self._getRef(refSeq,method)
self.aligner = self._getAligner()
self.getRow = self._getRow if nproc==1 else multifunc
self.varDf = self._fillVDF()
def _getRef(self,reference,method):
if reference:
#refSeq must be a string DNA sequence [ATGC]
return self.collapse(reference)
elif method == 'first':
for rec in self.recGen:
size = len(rec.sequence)
if size >= self.minLength and size <= self.maxLength:
return self.collapse(rec.sequence)
elif method == 'median':
seqs = pd.Series(rec.sequence for rec in self.recGen)
medIdx = seqs.str.len().sort_values().index[int(len(seqs)/2)]
return self.collapse(seqs[medIdx])
#TODO add random selection method
else:
raise ValueError(f'No method named {method}')
def _getAligner(self):
return mp.Aligner(seq=self.refseq,preset='splice',best_n=1)
def map(self,seq):
try:
return list(self.aligner.map(self.collapse(seq),cs=True))[0]
except IndexError:
raise PhaseUtils_Error(f'Unable to align sequence: {seq}')
def _getRow(self,rec):
return self.expandCS(self.map(rec.sequence),name=rec.name)
def _fillVDF(self):
if self.log:
self.log.info('Aligning compressed reads')
if self.nproc != 1:
self.log.warn('Parallel processing of hp compression not implemented yet. Proceeding with 1 proc')
result = map(self._getRow,self.recGen)
if self.log:
self.log.debug(self.recGen.report())
return pd.concat(result,axis=1).T
def getOps(self,csString):
ops = ':*-+~'
op = None
val = ''
for s in csString:
if s in ops:
if op:
yield (op,val)
op,val = s,''
op = s
else:
val += s
yield (op,val)
def parseOp(self,opGrp):
op,val = opGrp
if op == ':':
return ('.',)*int(val)
elif op == '-':
return (op+val,) + ('*',)*(len(val)-1)
else:
return (op+val,)
def expandCS(self,aln,name=None):
out = pd.Series(chain(*map(self.parseOp,self.getOps(aln.cs))))
#fix insertion locations
insIdx = out[out.str.contains('\+')].index
out[insIdx-1] = out[insIdx]
out.drop(insIdx,inplace=True)
out.index = range(aln.r_st,len(out)+aln.r_st)
if name:
out.name = name
return out
class PhaseUtils_Error(Exception):
pass
| 33.239726 | 110 | 0.562127 | 1,159 | 9,706 | 4.653149 | 0.25453 | 0.024476 | 0.007417 | 0.002225 | 0.145744 | 0.106434 | 0.081216 | 0.066753 | 0.054144 | 0.054144 | 0 | 0.010027 | 0.301257 | 9,706 | 291 | 111 | 33.353952 | 0.785167 | 0.016073 | 0 | 0.149606 | 0 | 0.003937 | 0.162683 | 0.056604 | 0 | 0 | 0.002096 | 0.003436 | 0 | 1 | 0.110236 | false | 0.027559 | 0.027559 | 0.015748 | 0.271654 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca34d060f848bab1e475f4337bc0cb9c7625f083 | 7,472 | py | Python | bioptim/optimization/optimization_variable.py | Naassila/bioptim | 511e7ba315de5ca8c3bdcc85decd43bac30676b9 | [
"MIT"
] | null | null | null | bioptim/optimization/optimization_variable.py | Naassila/bioptim | 511e7ba315de5ca8c3bdcc85decd43bac30676b9 | [
"MIT"
] | null | null | null | bioptim/optimization/optimization_variable.py | Naassila/bioptim | 511e7ba315de5ca8c3bdcc85decd43bac30676b9 | [
"MIT"
] | null | null | null | from typing import Union
import numpy as np
from casadi import MX, SX, vertcat, horzcat
from ..misc.mapping import BiMapping
class OptimizationVariable:
"""
An optimization variable and the indices to find this variable in its state or control vector
Attributes
----------
name: str
The name of the variable
mx: MX
The MX variable associated with this variable
index: range
The indices to find this variable
mapping: BiMapping
The mapping of the MX
parent_list: OptimizationVariableList
The parent that added this entry
Methods
-------
__len__(self)
The len of the MX reduced
cx(self)
The CX of the variable (starting point)
cx_end(self)
The CX of the variable (ending point)
"""
def __init__(self, name: str, mx: MX, index: [range, list], mapping: BiMapping = None, parent_list=None):
"""
Parameters
----------
name: str
The name of the variable
mx: MX
The MX variable associated with this variable
index: [range, list]
The indices to find this variable
parent_list: OptimizationVariableList
The list the OptimizationVariable is in
"""
self.name: str = name
self.mx: MX = mx
self.index: [range, list] = index
self.mapping: BiMapping = mapping
self.parent_list: OptimizationVariableList = parent_list
def __len__(self):
"""
The len of the MX reduced
Returns
-------
The number of element (correspond to the nrows of the MX)
"""
return len(self.index)
@property
def cx(self):
"""
The CX of the variable
"""
if self.parent_list is None:
raise RuntimeError(
"OptimizationVariable must have been created by OptimizationVariableList to have a cx. "
"Typically 'all' cannot be used"
)
return self.parent_list.cx[self.index, :]
@property
def cx_end(self):
if self.parent_list is None:
raise RuntimeError(
"OptimizationVariable must have been created by OptimizationVariableList to have a cx. "
"Typically 'all' cannot be used"
)
return self.parent_list.cx_end[self.index, :]
class OptimizationVariableList:
"""
A list of OptimizationVariable
Attributes
----------
elements: list
Each of the variable separated
_cx: Union[MX, SX]
The symbolic MX or SX of the list (starting point)
_cx_end: Union[MX, SX]
The symbolic MX or SX of the list (ending point)
mx_reduced: MX
The reduced MX to the size of _cx
Methods
-------
__getitem__(self, item: Union[int, str])
Get a specific variable in the list, whether by name or by index
append(self, name: str, cx: list, mx: MX, bimapping: BiMapping)
Add a new variable to the list
cx(self)
The the cx of all elements together (starting point)
cx_end(self)
The the cx of all elements together (ending point)
mx(self)
The MX of all variable concatenated together
shape(self)
The size of the CX
__len__(self)
The number of variables in the list
"""
def __init__(self):
self.elements: list = []
self._cx: Union[MX, SX, np.ndarray] = np.array([])
self._cx_end: Union[MX, SX, np.ndarray] = np.array([])
self._cx_intermediates: list = []
self.mx_reduced: MX = MX.sym("var", 0, 0)
def __getitem__(self, item: Union[int, str]):
"""
Get a specific variable in the list, whether by name or by index
Parameters
----------
item: Union[int, str]
The index or name of the element to return
Returns
-------
The specific variable in the list
"""
if isinstance(item, int):
return self.elements[item]
elif isinstance(item, str):
if item == "all":
index = []
for elt in self.elements:
index.extend(list(elt.index))
return OptimizationVariable("all", self.mx, index)
for elt in self.elements:
if item == elt.name:
return elt
raise KeyError(f"{item} is not in the list")
else:
raise ValueError("OptimizationVariableList can be sliced with int or str only")
def append(self, name: str, cx: list, mx: MX, bimapping: BiMapping):
"""
Add a new variable to the list
Parameters
----------
name: str
The name of the variable
cx: list
The list of SX or MX variable associated with this variable
mx: MX
The MX variable associated with this variable
bimapping: BiMapping
The Mapping of the MX against CX
"""
index = range(self._cx.shape[0], self._cx.shape[0] + cx[0].shape[0])
self._cx = vertcat(self._cx, cx[0])
self._cx_end = vertcat(self._cx_end, cx[-1])
for i, c in enumerate(cx[1:-1]):
if i >= len(self._cx_intermediates):
self._cx_intermediates.append(c)
else:
self._cx_intermediates[i] = vertcat(self._cx_intermediates[i], c)
self.mx_reduced = vertcat(self.mx_reduced, MX.sym("var", cx[0].shape))
self.elements.append(OptimizationVariable(name, mx, index, bimapping, self))
@property
def cx(self):
"""
The the cx of all elements together (starting point)
"""
return self._cx[:, 0]
@property
def cx_end(self):
"""
The the cx of all elements together (ending point)
"""
return self._cx_end[:, 0]
@property
def cx_intermediates_list(self):
"""
The the cx of all elements together (starting point)
"""
return self._cx_intermediates
@property
def mx(self):
"""
Returns
-------
The MX of all variable concatenated together
"""
return vertcat(*[elt.mx for elt in self.elements])
def __contains__(self, item: str):
"""
If the item of name item is in the list
"""
for elt in self.elements:
if item == elt.name:
return True
else:
return False
def keys(self):
"""
All the names of the elements in the list
"""
return [elt for elt in self]
@property
def shape(self):
"""
The size of the CX
"""
return self._cx.shape[0]
def __len__(self):
"""
The number of variables in the list
"""
return len(self.elements)
def __iter__(self):
"""
Allow for the list to be used in a for loop
Returns
-------
A reference to self
"""
self._iter_idx = 0
return self
def __next__(self):
"""
Get the next phase of the option list
Returns
-------
The next phase of the option list
"""
self._iter_idx += 1
if self._iter_idx > len(self):
raise StopIteration
return self[self._iter_idx - 1].name
| 26.877698 | 109 | 0.55795 | 918 | 7,472 | 4.429194 | 0.149237 | 0.024594 | 0.013773 | 0.014757 | 0.504181 | 0.473684 | 0.416134 | 0.361043 | 0.336203 | 0.303246 | 0 | 0.003511 | 0.351981 | 7,472 | 277 | 110 | 26.974729 | 0.836225 | 0.369781 | 0 | 0.305263 | 0 | 0 | 0.085128 | 0.018687 | 0 | 0 | 0 | 0 | 0 | 1 | 0.178947 | false | 0 | 0.042105 | 0 | 0.421053 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca356ce5b01f12eac1c48272cd08a7d0cde42c26 | 519 | py | Python | AdventCode2017_Python/test.py | yomodev/AdventOfCode | 79c4dd1d129a4ef78631f70564041ed2e9b499eb | [
"MIT"
] | 1 | 2019-12-05T14:55:24.000Z | 2019-12-05T14:55:24.000Z | AdventCode2017_Python/test.py | yomodev/AdventOfCode | 79c4dd1d129a4ef78631f70564041ed2e9b499eb | [
"MIT"
] | null | null | null | AdventCode2017_Python/test.py | yomodev/AdventOfCode | 79c4dd1d129a4ef78631f70564041ed2e9b499eb | [
"MIT"
] | null | null | null | firewall = {}
with open('day13.txt') as file:
for line in file.readlines():
depth, layer_range = map(int, line.strip().split(':'))
firewall[depth] = layer_range
delay = 0
while True:
caught = False
scanner = delay
for position in range(max(firewall.keys())+1):
if position in firewall and scanner % (firewall[position] * 2 - 2) == 0:
caught = True
break
scanner += 1
if caught:
delay += 1
else:
print(delay)
break | 24.714286 | 80 | 0.55684 | 64 | 519 | 4.484375 | 0.546875 | 0.069686 | 0.10453 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025641 | 0.323699 | 519 | 21 | 81 | 24.714286 | 0.792023 | 0 | 0 | 0.105263 | 0 | 0 | 0.019231 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca389eab535ecdfd9adf226e5f2e3a90095ea7c7 | 1,330 | py | Python | edwiges/__init__.py | loggi/maildog | 1439365b86015f10c8057d0bb32c3aa6810390b5 | [
"Apache-2.0"
] | 12 | 2017-07-22T02:29:56.000Z | 2019-11-25T14:20:37.000Z | edwiges/__init__.py | loggi/maildog | 1439365b86015f10c8057d0bb32c3aa6810390b5 | [
"Apache-2.0"
] | 3 | 2019-12-26T16:40:03.000Z | 2022-03-21T22:16:50.000Z | edwiges/__init__.py | loggi/maildog | 1439365b86015f10c8057d0bb32c3aa6810390b5 | [
"Apache-2.0"
] | null | null | null | """Main entry point
"""
import os
import logging
import logmatic
from pyramid.config import Configurator, ConfigurationError
REQUIRED_SETTINGS = [
'edwiges.provider_host',
'edwiges.provider_port',
]
ENV_SETTINGS = [
'edwiges.provider_host',
'edwiges.provider_port',
'edwiges.provider_username',
'edwiges.provider_password',
]
logger = logging.getLogger('edwiges')
handler = logging.StreamHandler()
handler.setFormatter(logmatic.JsonFormatter())
logger.addHandler(handler)
logger.setLevel(logging.INFO)
def get_config_environ(name):
env_name = name.replace('.', '_').upper()
return os.environ.get(env_name)
def main(global_config, **settings):
print
for name in ENV_SETTINGS:
settings[name] = get_config_environ(name) or settings.get(name)
for name in REQUIRED_SETTINGS:
if settings.get(name) is None:
error = 'confiration entry for {} is missing'.format(name)
logger.critical(error)
raise ConfigurationError(error)
config = Configurator(settings=settings)
config.include("cornice")
config.scan("edwiges.views")
host = settings['edwiges.provider_host']
port = settings['edwiges.provider_port']
logger.info("Starting server", extra={'host': host, 'port': port})
return config.make_wsgi_app()
| 23.333333 | 71 | 0.701504 | 152 | 1,330 | 5.993421 | 0.401316 | 0.131723 | 0.100988 | 0.088913 | 0.100988 | 0.100988 | 0.100988 | 0 | 0 | 0 | 0 | 0 | 0.178195 | 1,330 | 56 | 72 | 23.75 | 0.833486 | 0.01203 | 0 | 0.105263 | 0 | 0 | 0.201224 | 0.13466 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0.026316 | 0.105263 | 0 | 0.210526 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca38c798ccfaff9e029635bf136f595aa05ba85c | 1,817 | py | Python | test/test_ofc.py | mbdevpl/open-fortran-parser-xml | 127f4ec2ba7cd06eb010794560eb8e4b9494fdfe | [
"Apache-2.0"
] | 16 | 2017-08-01T03:11:17.000Z | 2021-03-04T13:11:43.000Z | test/test_ofc.py | mbdevpl/open-fortran-parser-xml | 127f4ec2ba7cd06eb010794560eb8e4b9494fdfe | [
"Apache-2.0"
] | 14 | 2017-08-08T12:26:47.000Z | 2021-04-08T14:27:39.000Z | test/test_ofc.py | mbdevpl/open-fortran-parser-xml | 127f4ec2ba7cd06eb010794560eb8e4b9494fdfe | [
"Apache-2.0"
] | 9 | 2018-04-04T08:08:15.000Z | 2022-02-01T13:23:59.000Z | """Tests for ofc_wrapper module."""
import logging
import pathlib
import platform
import tempfile
import unittest
from open_fortran_parser.ofc_wrapper import CodeForm, execute_compiler, transpile
_LOG = logging.getLogger(__name__)
_HERE = pathlib.Path(__file__).resolve().parent
INPUT_PATHS = [_HERE.joinpath('examples', _) for _ in ['empty.f']]
INDENTS = (None, 2, 4, 8)
FORMS = (None, CodeForm.Fixed, CodeForm.Free)
class Tests(unittest.TestCase):
maxDiff = None
@unittest.skipIf(platform.system() == 'Windows', 'OFC not available on Windows')
def test_execute_compiler(self):
for input_path in INPUT_PATHS:
for indent in INDENTS:
for form in FORMS:
output_file = tempfile.NamedTemporaryFile(delete=False)
output_file_path = pathlib.Path(output_file.name)
for output_path in (None, output_file_path):
with self.subTest(input_path=input_path, output_path=output_path,
indent=indent, form=form):
result = execute_compiler(input_path, output_path, indent, form)
self.assertEqual(result.returncode, 0, msg=result)
@unittest.skipIf(platform.system() == 'Windows', 'OFC not available on Windows')
def test_transpile(self):
for input_path in INPUT_PATHS:
for indent in INDENTS:
for form in FORMS:
with self.subTest(input_path=input_path, indent=indent, form=form):
code = transpile(input_path, indent, form, raise_on_error=True)
self.assertIsNotNone(code)
self.assertIsInstance(code, str)
self.assertGreater(len(code), 0)
| 38.659574 | 92 | 0.617501 | 205 | 1,817 | 5.253659 | 0.370732 | 0.066852 | 0.038997 | 0.051996 | 0.336119 | 0.295265 | 0.295265 | 0.233983 | 0.233983 | 0.233983 | 0 | 0.003897 | 0.293891 | 1,817 | 46 | 93 | 39.5 | 0.835542 | 0.01596 | 0 | 0.228571 | 0 | 0 | 0.047699 | 0 | 0 | 0 | 0 | 0 | 0.114286 | 1 | 0.057143 | false | 0 | 0.171429 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca395e36cbe0de5a7289dd4b7706b1a242253411 | 4,892 | py | Python | topological_sort.py | Fieldhunter/Data-Structures-and-Algorithms | 8e5ea4313bf27bbfd9dd16cfaf3e7c56cf42a88a | [
"Apache-2.0"
] | 2 | 2019-02-24T10:01:39.000Z | 2019-02-24T10:01:41.000Z | topological_sort.py | Fieldhunter/Data-Structures-and-Algorithms | 8e5ea4313bf27bbfd9dd16cfaf3e7c56cf42a88a | [
"Apache-2.0"
] | null | null | null | topological_sort.py | Fieldhunter/Data-Structures-and-Algorithms | 8e5ea4313bf27bbfd9dd16cfaf3e7c56cf42a88a | [
"Apache-2.0"
] | null | null | null | """
@version: python3.6
@author: Fieldhunter
@contact: 1677532160yuan@gmail.com
@time: 2020-05-03
"""
import functools
class Node():
def __init__(self,element):
self.data = element
self.in_degree = 0
self.out_degree = 0
class Adjacency_list():
"""
Self.__node_mapping is used to record the correspondence between
node and node ordinal number.
Self.__mapping is used to record the correspondence between node data
and node ordinal number.
In self.__data, using node ordinal number express pointing relationship.
"""
def __init__(self):
self.__data = {}
self.__node_mapping = []
self.__mapping = []
def add_data(self, start, end):
start, end = str(start), str(end)
if start not in self.__mapping:
new_node = Node(start)
self.__node_mapping.append(new_node)
self.__mapping.append(start)
start_num = len(self.__mapping) - 1
else:
start_num = self.__mapping.index(start)
if end not in self.__mapping:
new_node = Node(end)
self.__node_mapping.append(new_node)
self.__mapping.append(end)
end_num = len(self.__mapping) - 1
else:
end_num = self.__mapping.index(end)
if not self.__data.get(start_num, False):
new_list = [end_num]
self.__data[start_num] = new_list
self.__node_mapping[start_num].out_degree += 1
self.__node_mapping[end_num].in_degree += 1
else:
if end_num in self.__data[start_num]:
print("data is in list")
else:
self.__data[start_num].append(end_num)
self.__node_mapping[start_num].out_degree += 1
self.__node_mapping[end_num].in_degree += 1
def kahn(self):
"""
The in_list array records the input degree's numbers of each node.
The queue array is a queue that stores the node subscripts to be processed.
The result array is used to store the result order.
"""
in_list = []
for i in self.__node_mapping:
in_list.append(i.in_degree)
quene = []
result = []
try:
# find the starting node subscript with 0 in_degree
pointer = in_list.index(0)
quene.append(pointer)
in_list[pointer] = None
except:
pass
while len(quene) != 0:
pointer = quene[0]
del quene[0]
result.append(self.__mapping[pointer])
if self.__data.get(pointer, False):
for i in self.__data.get(pointer):
in_list[i] -= 1
# set None to the node which has 0 in_degree
if in_list[i] == 0:
quene.append(i)
in_list[i] = None
"""
In the end, if not all of them are None in in_list,
it means there are ring in the diagram.
"""
if in_list.count(None) == len(in_list):
print(result)
else:
print("A ring in map")
def DFS(self):
def loop_output(num, result, count):
count+=1
"""
If the function executes' numbers more than the total
numbers of nodes, there must be a ring.
"""
if count > len(self.__mapping):
return False
if inverse_adjacency_list.get(num, False):
for j in inverse_adjacency_list.get(num):
result = loop_output(j, result, count)
if result != False:
# if this node has not output, then output
if check_list[num] != True:
check_list[num] = True
result.append(self.__mapping[num])
return result
"""
The check_list array is used to determine whether the subscript node
has been output.
The out_list array records the number of out_degree of each node.
The result array is used to store the result order.
The count is used to record the execution times of the loop output function.
"""
check_list = [False] * len(self.__mapping)
out_list = []
result = []
count = 0
for i in self.__node_mapping:
out_list.append(i.out_degree)
# building a reverse adjacency list
inverse_adjacency_list = {}
for i in self.__data:
if self.__data.get(i, False):
for j in self.__data.get(i):
if inverse_adjacency_list.get(j, False):
inverse_adjacency_list[j].append(i)
else:
new_list = [i]
inverse_adjacency_list[j] = new_list
try:
"""
Find the node with the initial out_degree's value of 0,
that is, the end of the original sequence.
"""
pointer = out_list.index(0)
result = loop_output(pointer, result, count)
if result == False:
print("A ring in map")
else:
print(result)
except:
print("A ring in map")
"""
Check if the code used to access the class information,Decorator function.
The purpose of simply adding code is to prevent Adjacency list from
being tampered with maliciously and to provide the API for developers.
"""
def __check_code(func):
@functools.wraps(func)
def check(self, code):
if code != 'adsf;{h3096j34ka`fd>&/edgb^45:6':
raise Exception('code is wrong!')
result = func(self, code)
return result
return check
@__check_code
def return_data(self, code):
return self.__data
@__check_code
def return_mapping(self, code):
return self.__mapping, self.__node_mapping
| 26.301075 | 79 | 0.684791 | 752 | 4,892 | 4.219415 | 0.208777 | 0.052001 | 0.052001 | 0.012606 | 0.239836 | 0.168925 | 0.141822 | 0.124803 | 0.124803 | 0.065553 | 0 | 0.012741 | 0.213818 | 4,892 | 185 | 80 | 26.443243 | 0.812272 | 0.147997 | 0 | 0.258621 | 0 | 0 | 0.030546 | 0.009565 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086207 | false | 0.008621 | 0.008621 | 0.017241 | 0.163793 | 0.051724 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca3c1444dbdd47210fdc594c38dc6c7fc4b597b3 | 3,247 | py | Python | 5.3c_matrix_multiplication.py | bayu-wilson/phys218_example | f6d624d0747c42b29e9855c34a2ab1af28d97654 | [
"MIT"
] | null | null | null | 5.3c_matrix_multiplication.py | bayu-wilson/phys218_example | f6d624d0747c42b29e9855c34a2ab1af28d97654 | [
"MIT"
] | null | null | null | 5.3c_matrix_multiplication.py | bayu-wilson/phys218_example | f6d624d0747c42b29e9855c34a2ab1af28d97654 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Program to multiply 2 matrices together. (nxm)*(mxl)"""
import numpy as np
import timeit
speed_test = True
def check_shape(A,B):
"""Checking if it's possible to multiply these matrices together"""
shape_A = np.shape(A)
shape_B = np.shape(B)
if shape_A[1] != shape_B[0]:
raise ValueError("shapes {0} and {1} not aligned".format(shape_A,shape_B))
def matrix_mult_nested(A,B):
"""Matrix multiplication using nested for loops"""
check_shape(A,B)
rows = np.shape(A)[0]
cols = np.shape(B)[1]
C = np.zeros((rows,cols)) #resultant matrix after multiplication
for i in range(rows):
for j in range(cols): #loops give [i,j] location in matrix
for k in range(cols): #each [i,j] location is a dot-product of a row of X and a column of Y
C[i][j] += A[i][k]*B[k][j]
return C
def matrix_mult_list_comp(A,B):
"""Matrix multiplication using list comprehension"""
check_shape(A,B)
rows = np.shape(A)[0]
cols = np.shape(B)[1]
C = [[np.sum([A[i][k]*B[k][j] for k in range(cols)]) for j in range(cols)] for i in range(rows)]
return C
def matrix_mult_numpy(A,B):
"""Matrix multiplication using numpy dot function"""
C = np.dot(A,B)
return C
def time_elasped(function):
start = timer()
function
end = timer()
return end-start
#Example matrices
X = np.array([[1,2],[3,4],[5,6]])
Y = np.array([[5,6],[7,8]])
print("Example Matrices")
print("X * Y = \n{0} * \n{1}".format(X,Y))
print("Nested for loop:\n", matrix_mult_nested(X,Y))
print("List comprehension:\n", matrix_mult_list_comp(X,Y))
print("Numpy dot function:\n", matrix_mult_numpy(X,Y))
if speed_test: #speed test is optional
#Large Matrix
np.random.seed(1)
large_X = np.random.rand(20,20)
large_X_inv = np.linalg.inv(large_X)
print("\nSpeed test")
#I only made these function because they don't have any arguments so I can use timeit on them easily
def func1():
return matrix_mult_nested(large_X,large_X_inv)
def func2():
return matrix_mult_list_comp(X,Y)
def func3():
return matrix_mult_numpy(X,Y)
t_nested = np.min(timeit.repeat("func1()", setup="from __main__ import func1",repeat=5,number= 100))/100
t_list_comp = np.min(timeit.repeat("func2()", setup="from __main__ import func2",repeat=5,number= 100))/100
t_numpy = np.min(timeit.repeat("func3()", setup="from __main__ import func3",repeat=5,number= 100))/100
print("Nested for loop:")
print("{0:.3e} seconds".format(t_nested))
print("List comprehension:")
print("{0:.3e} seconds".format(t_list_comp))
print("Numpy dot function:")
print("{0:.3e} seconds".format(t_numpy))
#Code graveyard
# print(timeit.repeat("func2()", setup="from __main__ import func2"))
# print(timeit.repeat("func3()", setup="from __main__ import func3")
# t_nested = time_elasped(matrix_mult_nested(large_X,large_X_inv))
# t_list_comp = time_elasped(matrix_mult_list_comp(large_X,large_X_inv))
# t_numpy = time_elasped(matrix_mult_numpy(large_X,large_X_inv))
# print("Nested for loop:\n", t_nested*1e7)
# #np.round(matrix_mult_nested(large_X,large_X_inv)))
# print("List comprehension:\n", t_list_comp*1e7)
# #np.round(matrix_mult_list_comp(large_X,large_X_inv)))
# print("Numpy dot function:\n", t_numpy*1e7)
# #np.round(matrix_mult_numpy(large_X,large_X_inv)))
| 30.92381 | 108 | 0.706498 | 573 | 3,247 | 3.809773 | 0.228621 | 0.046725 | 0.032982 | 0.038479 | 0.460834 | 0.293175 | 0.21301 | 0.21301 | 0.067797 | 0.037563 | 0 | 0.025469 | 0.12935 | 3,247 | 104 | 109 | 31.221154 | 0.746728 | 0.36988 | 0 | 0.152542 | 0 | 0 | 0.169262 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135593 | false | 0 | 0.084746 | 0.050847 | 0.338983 | 0.20339 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca3ce730517d85d71f6c4ae8b988f12d3ee052be | 2,349 | py | Python | examples/3-shared-resources/new2.py | sietse/simpy-fsm | a62eb6feeb238147626f20530cfc700f45136f12 | [
"MIT"
] | 2 | 2020-08-04T09:00:55.000Z | 2021-11-09T22:43:39.000Z | examples/3-shared-resources/new2.py | sietse/simpy-fsm | a62eb6feeb238147626f20530cfc700f45136f12 | [
"MIT"
] | 8 | 2020-05-25T09:53:36.000Z | 2020-05-25T10:10:50.000Z | examples/3-shared-resources/new2.py | sietse/simpy-fsm | a62eb6feeb238147626f20530cfc700f45136f12 | [
"MIT"
] | 2 | 2020-08-04T09:01:03.000Z | 2020-12-02T20:27:58.000Z | # Behaviour is identical to simpy_3_resource.py. The difference in
# implementation: Awaiting a charging station + using it is not one
# state that starts with a context manager, but it is two separate
# states. awaiting_battery() awaits the slot; charging() takes the slot,
# charges, and releases the slot.
import simpy
from simpy_fsm.v1 import FSM, process_name
class Car(FSM):
"""Like Car, but handles the resource itself instead of with a context
manager."""
def __init__(
self,
env,
initial_state="driving",
*,
name,
charging_station,
driving_time,
charging_time
):
self.name = name
self.charging_station = charging_station
self.driving_time = driving_time
self.charging_time = charging_time
super().__init__(env, initial_state)
def driving(self, data):
yield env.timeout(self.driving_time)
print("%s arriving at %d" % (self.name, self.env.now))
return self.awaiting_battery
def awaiting_battery(self, data):
# Wait for the charging station and acquire it
self.charging_request = self.charging_station.request()
yield self.charging_request
print("%s starting to charge at %s" % (self.name, self.env.now))
# Instead of invisibly passing `charging_request` via `self` to
# the `charging` state, can we make the flow of data clearer
# by making `charging_request` part of the thunk we return?
# Something like this:
#
# return (self.charging, charging_request)
#
# which fsm.trampoline would then handle appropriately.
return self.charging
def charging(self, data):
# The charging station has been acquired;
yield env.timeout(self.charging_time)
# BCS is the battery charging station
print("%s leaving the bcs at %s" % (self.name, self.env.now))
self.charging_station.release(self.charging_request)
if __name__ == "__main__":
env = simpy.Environment()
bcs = simpy.Resource(env, capacity=2)
cars = [
Car(
env,
name=process_name(i, of=4),
charging_station=bcs,
driving_time=i * 2,
charging_time=5,
)
for i in range(4)
]
env.run()
| 31.743243 | 74 | 0.631758 | 298 | 2,349 | 4.818792 | 0.372483 | 0.104457 | 0.039694 | 0.031337 | 0.041783 | 0.029248 | 0.029248 | 0 | 0 | 0 | 0 | 0.004154 | 0.282673 | 2,349 | 73 | 75 | 32.178082 | 0.848071 | 0.338868 | 0 | 0.044444 | 0 | 0 | 0.054319 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088889 | false | 0 | 0.044444 | 0 | 0.2 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca3d4fe9e2876f20e4c3eaac920e8250524628b2 | 2,299 | py | Python | scripts/summarize-rewards.py | jhostetler/jmcplan | 489994fa60a084d4c361a6c62b22d1a5f713c117 | [
"BSD-2-Clause"
] | 2 | 2020-03-11T13:14:33.000Z | 2021-07-17T22:45:15.000Z | scripts/summarize-rewards.py | jhostetler/jmcplan | 489994fa60a084d4c361a6c62b22d1a5f713c117 | [
"BSD-2-Clause"
] | null | null | null | scripts/summarize-rewards.py | jhostetler/jmcplan | 489994fa60a084d4c361a6c62b22d1a5f713c117 | [
"BSD-2-Clause"
] | null | null | null | # LICENSE
# Copyright (c) 2013-2016, Jesse Hostetler (jessehostetler@gmail.com)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from csv import CsvAttribute, CsvDataset
import argparse
import statistics
cl_parser = argparse.ArgumentParser( description="Creates random subsets of faults" )
cl_parser.add_argument( "input_file", type=str, nargs=1,
help="A 'rewards.csv' input file" )
cl_parser.add_argument( "-d", type=int, default=60, help="Size of intervals for analysis" )
args = cl_parser.parse_args()
with open( args.input_file[0] ) as fin:
data = CsvDataset( fin )
intervals = []
for fv in data.feature_vectors:
i = data.attribute_index( "r1" )
j = i + args.d
while j < len(data.attributes):
ri = float(fv[i])
rj = float(fv[j])
intervals.append( abs(rj - ri) )
i += 2
j += 2
print( "n: " + str(len(intervals)) )
print( "mean: " + str(statistics.mean(intervals)) )
print( "stddev: " + str(statistics.stdev(intervals)) )
print( "min: " + str(min(intervals)) )
print( "max: " + str(max(intervals)) )
| 42.574074 | 91 | 0.739887 | 330 | 2,299 | 5.121212 | 0.524242 | 0.018935 | 0.020118 | 0.027219 | 0.108876 | 0.080473 | 0.080473 | 0.080473 | 0.080473 | 0.080473 | 0 | 0.008896 | 0.168769 | 2,299 | 53 | 92 | 43.377358 | 0.875458 | 0.578947 | 0 | 0 | 0 | 0 | 0.150424 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.12 | 0 | 0.12 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca3d5f2a23ca8a36c8a715d7a40fab8cb03b8764 | 18,411 | py | Python | hs_fetcher.py | JCMarques15/tor-hs-fetcher | 3d63f5acd3e7d8c51c6682c15e1e2db37d3df18d | [
"MIT"
] | 4 | 2018-06-26T13:40:31.000Z | 2021-11-16T11:31:33.000Z | hs_fetcher.py | JCMarques15/tor-hs-fetcher | 3d63f5acd3e7d8c51c6682c15e1e2db37d3df18d | [
"MIT"
] | null | null | null | hs_fetcher.py | JCMarques15/tor-hs-fetcher | 3d63f5acd3e7d8c51c6682c15e1e2db37d3df18d | [
"MIT"
] | 5 | 2020-02-06T01:08:50.000Z | 2021-05-03T17:38:59.000Z | #!/usr/bin/env python3
import subprocess
import subprocess
import sys
import threading
import sqlite3
import re
import datetime
from pathlib import Path
import base64
import binascii
class myThread (threading.Thread):
def __init__(self, threadID, name, pid, db, lock):
threading.Thread.__init__(self)
# Initialize variables passed on object creation
self.threadID = threadID
self.name = name
self.pid = pid
self.db = db
self.cursor = db.cursor()
self.lock = lock
# Initialize variable with formated date for later directory naming
self.extraction_datetime = datetime.datetime.today().strftime('%Y-%m-%d-%H')
# Initialize regex processing rules
self.v2_full_descriptor_regex = re.compile("rendezvous-service-descriptor.*?-----END SIGNATURE[-]{0,5}", re.DOTALL)
self.v3_full_descriptor_regex = re.compile(r"hs-descriptor\s[\d].*?signature\s.*?\s", re.DOTALL)
self.v3_cert_regex = re.compile("[-]{0,5}BEGIN ED25519 CERT[-]{0,5}\n(.*?)\n[-]{0,5}END ED25519 CERT[-]{0,5}", re.DOTALL)
self.rendezvous_regex = re.compile(r"rendezvous-service-descriptor\s(.*)")
self.descriptor_version_regex = re.compile(r"version\s(.*)")
self.descriptor_pkey_regex = re.compile("permanent-key\n-----BEGIN RSA PUBLIC KEY-----(.*?)-----END RSA PUBLIC KEY-----", re.DOTALL)
self.secret_id_regex = re.compile(r"secret-id-part\s(.*)")
self.publication_time_regex = re.compile(r"publication-time\s(.*)")
self.protocol_versions_regex = re.compile(r"protocol-versions\s(.*)")
self.introduction_points_encoded_regex = re.compile("introduction-points\n-----BEGIN MESSAGE-----\n(.*?)\n-----END MESSAGE-----", re.DOTALL)
self.signature_regex = re.compile("signature\n-----BEGIN SIGNATURE-----(.*?)-----END SIGNATURE[-]{0,5}", re.DOTALL)
self.full_introduction_points_decoded_regex = re.compile("introduction-point.*?-----END RSA PUBLIC KEY-----.*?-----END RSA PUBLIC KEY-----", re.DOTALL)
# Initialize counters
self.v2_descriptor_counter = 0
self.v3_descriptor_counter = 0
def run(self):
print ("Starting {} with pid: {}".format(self.name, self.pid))
# Call dump memory and store the output of the script for processing
self.script_output = self.dump_memory(self.pid)
for self.line in self.script_output:
print(self.line)
# Reads the contents of the strings file into a variable
with open("{}/Memory_Dumps/{}H-{}.str".format(sys.path[0], self.extraction_datetime, self.pid), "r") as self.strings_file:
self.file_contents = self.strings_file.read()
# Try to extract V3 descriptors out of the strings file
try:
# Takes all of the v3 descriptors out of the strings file and extracts the cert for identification purposes
for self.v3_descriptor in self.v3_full_descriptor_regex.finditer(self.file_contents):
try:
# Extract the certificate for comparison
self.v3_cert = self.v3_cert_regex.search(self.v3_descriptor.group(0)).group(1).replace('\n', '')
# Acquire lock to interact with DB
self.lock.acquire()
print("{}: Acquired lock!".format(self.name))
# Check if cert is already in DB, if not call function to add it
if (self.cursor.execute("SELECT EXISTS(SELECT * FROM v3_descriptors WHERE descriptor_cert='{}')".format(self.v3_cert,)).fetchone()[0] == 0):
self.db_insert_v3_cert()
self.v3_descriptor_counter += 1
else:
print("[-] V3 cert already in the Database!")
# Commit changed to DB and release the lock
self.db.commit()
self.lock.release()
print("{}: Released lock!\n".format(self.name))
except sqlite3.OperationalError as err:
sys.stderr.write("Sqlite error:\n{}\n".format(err.args))
# If no V3 descriptors are found it prints a message and continues to V2 descriptors extraction
except TypeError as err:
print("No V3 descriptors found! Error: {}".format(err.args))
# Try to extract V2 descriptors out of the strings file
try:
# Takes all of the V2 descriptors out of the strings variable and process each one by one
for self.descriptor in self.v2_full_descriptor_regex.finditer(self.file_contents):
try:
# try to extract the fields of the descriptor
try:
# Extracts each field into his own variable
self.rendezvous = self.rendezvous_regex.search(self.descriptor.group(0)).group(1)
self.descriptor_version = self.descriptor_version_regex.search(self.descriptor.group(0)).group(1)
self.pkey = self.descriptor_pkey_regex.search(self.descriptor.group(0)).group(1).replace('\n', '')
self.secret_id = self.secret_id_regex.search(self.descriptor.group(0)).group(1)
self.publication_time = self.publication_time_regex.search(self.descriptor.group(0)).group(1)
self.protocol_versions = self.protocol_versions_regex.search(self.descriptor.group(0)).group(1)
try:
self.introduction_points_encoded = self.introduction_points_encoded_regex.search(self.descriptor.group(0)).group(1).replace('\n', '')
except AttributeError:
self.introduction_points_encoded = None
self.signature = self.signature_regex.search(self.descriptor.group(0)).group(1).replace('\n', '')
self.onion_link = "{}.onion".format(self.calc_onion_link(self.pkey))
# Extracts each introduction point and adds it to a list
if (self.introduction_points_encoded is not None):
self.introduction_points_list = list(self.full_introduction_points_decoded_regex.finditer(self.decode_introduction_points(self.introduction_points_encoded)))
self.introduction_points_count = len(self.introduction_points_list)
else:
self.introduction_points_list = None
self.introduction_points_count = 0
# Captures an exception raised when there is an error on the decoding of certain fields
# It prints a message and continues to the next descriptor without inserting into the database
except UnicodeDecodeError:
print("Found descriptor with bad encoding!\n")
continue
except binascii.Error as err:
print("Encoding error:\n{}".format(err.args))
continue
except Exception as err:
sys.stderr.write("Error found: {}\n".format(err.args))
continue
# Thread acquires the lock to access the database
self.lock.acquire()
print("{}: Acquired lock!".format(self.name))
# Checks if there is already an entry for the onion link
if (self.cursor.execute("SELECT EXISTS(SELECT * FROM hidden_services WHERE link='{}')".format(self.onion_link,)).fetchone()[0] == 0):
# if there isn't then it calls the function to add it
self.db_insert_link()
self.v2_descriptor_counter += 1
else:
# If there is then retrieves the link_id of the entry for later use
print("[-] Onion link already in the Database")
self.onion_link_id = self.cursor.execute("SELECT id FROM hidden_services WHERE link='{}'".format(self.onion_link)).fetchone()[0]
print("Onion link id is: {}".format(self.onion_link_id))
# Check if there is already a descriptor entry for the onion link
if (self.cursor.execute("SELECT EXISTS(SELECT link_id, publication_time FROM descriptors WHERE link_id='{}')".format(self.onion_link_id)).fetchone()[0] == 0):
# If there isn't then call function to insert descriptor, also calls the function to add it to the snapshot table
self.db_insert_descriptor()
self.snapshot_insert_descriptor()
else:
# If there is an entry, it checks if the entry publication time is the same as the newly extracted descriptor
if (self.cursor.execute("SELECT EXISTS(SELECT link_id, publication_time FROM descriptors WHERE link_id='{}' and publication_time='{}')".format(self.onion_link_id, self.publication_time)).fetchone()[0] == 0):
# If it is not then it calls the function to updates the entry in the database and also calls the function to add it to the snapshot
#self.db_update_descriptor()
self.snapshot_insert_descriptor()
else:
# If it is the same then just prints a message and continues
print("[-] Descriptor is still the same as the one in the Database!")
# At the end of each entry it commits the changes to the database file and releases the lock so other threads can access the database
self.db.commit()
self.lock.release()
print("{}: Released lock!\n".format(self.name))
except sqlite3.OperationalError as err:
sys.stderr.write("Sqlite error:\n{}\n".format(err.args))
# Aquire lock at the end and add to the database to a specific table the amount of new links with date of the scan
self.lock.acquire()
print("{}: Acquired lock!".format(self.name))
print("[+] Inserting extraction stats into Database")
try:
self.cursor.execute("INSERT INTO extraction_stats(v2, v3, extraction_date, pid) VALUES(?,?,?,?)", (self.v2_descriptor_counter, self.v3_descriptor_counter, "{}H".format(self.extraction_datetime), self.pid))
except sqlite3.IntegrityError:
sys.stderr.write("[-] Entry for the hour/pid is already in the database\n")
self.db.commit()
self.lock.release()
print("{}: Released lock!\n".format(self.name))
# If nothing gets extracted it captures the exception
# raised from trying to iterate an empty object and prints a message
except TypeError as err:
print("No V2 descriptors found! Error: {}".format(err.args))
print ("Exiting {}".format(self.name))
# Function to insert new links into the database
def db_insert_link(self):
print("[+] Inserting Onion link into the Database")
self.cursor.execute("INSERT INTO hidden_services(link, reachable, classification) VALUES(?,?,?)", (self.onion_link, "Unknown", "None"))
self.onion_link_id = self.cursor.lastrowid
# Function to update the fields of an existing entry in the database
def db_update_link(self):
print("[+] Updating Onion link info in the Database")
# TODO: Add update code
# self.cursor.execute("UPDATE hidden_services SET reachable='?' classification='?' WHERE link='?'", (self.onion_link,))
# Function to insert new descriptors
def db_insert_descriptor(self):
print("[+] Inserting the descriptor into the Database")
self.cursor.execute("INSERT INTO descriptors(link_id, rendezvous_service_descriptor, format_version, permanent_key, secret_id_part, publication_time, protocol_versions, introduction_points_count, descriptor_signature) VALUES(:link_id, :rendezvous, :format_version, :permanent_key, :secret_id, :publication_time, :protocol_versions, :introduction_points_count, :descriptor_signature)", {
"link_id":self.onion_link_id,
"rendezvous":self.rendezvous,
"format_version":self.descriptor_version,
"permanent_key":self.pkey,
"secret_id":self.secret_id,
"publication_time":self.publication_time,
"protocol_versions":self.protocol_versions,
"introduction_points_count":self.introduction_points_count,
"descriptor_signature":self.signature})
if (self.introduction_points_list is not None):
print("[+] Inserting the Introduction Points into the Database")
self.ip_counter = 0
for self.entry in self.introduction_points_list:
self.ip_counter+=1
self.fields = re.match(r"introduction-point\s(.*?)\sip-address\s(.*?)\sonion-port\s(.*?)\sonion-key\s-----BEGIN RSA PUBLIC KEY-----\s(.*?)\s-----END RSA PUBLIC KEY-----\sservice-key\s-----BEGIN RSA PUBLIC KEY-----\s(.*?)\s-----END RSA PUBLIC KEY-----", self.entry.group(0), re.DOTALL)
self.cursor.execute("INSERT INTO descriptors_introduction_points(id, link_id, introduction_point, ip_address, onion_port, onion_key, service_key) VALUES(:id, :link_id, :introduction_point, :ip, :port, :onion_key, :service_key)", {
"id":self.ip_counter,
"link_id":self.onion_link_id,
"introduction_point":self.fields.group(1),
"ip":self.fields.group(2),
"port":self.fields.group(3),
"onion_key":self.fields.group(4).replace('\n', ''),
"service_key":self.fields.group(5).replace('\n', '')})
# Function to update the entry in the database with the newly published descriptor
def db_update_descriptor(self):
print("[+] Updating the descriptor entry in the Database")
self.cursor.execute("UPDATE descriptors SET rendezvous_service_descriptor='{}', format_version='{}', permanent_key='{}', secret_id_part='{}', publication_time='{}', protocol_versions='{}', introduction_points_count='{}', descriptor_signature='{}' WHERE link_id='{}'".format(self.rendezvous, self.descriptor_version, self.pkey, self.secret_id, self.publication_time, self.protocol_versions, self.introduction_points_count, self.signature, self.onion_link_id,))
print("[+] Updating the descriptor introduction points in the Database")
self.cursor.execute("DELETE FROM descriptors_introduction_points WHERE link_id='{}'".format(self.onion_link_id))
if (self.introduction_points_list is not None):
self.ip_counter = 0
for self.entry in self.introduction_points_list:
self.ip_counter+=1
self.fields = re.match(r"introduction-point\s(.*?)\sip-address\s(.*?)\sonion-port\s(.*?)\sonion-key\s-----BEGIN RSA PUBLIC KEY-----\s(.*?)\s-----END RSA PUBLIC KEY-----\sservice-key\s-----BEGIN RSA PUBLIC KEY-----\s(.*?)\s-----END RSA PUBLIC KEY-----", self.entry.group(0), re.DOTALL)
self.cursor.execute("INSERT INTO descriptors_introduction_points(id, link_id, introduction_point, ip_address, onion_port, onion_key, service_key) VALUES(:id, :link_id, :introduction_point, :ip, :port, :onion_key, :service_key)", {
"id":self.ip_counter,
"link_id":self.onion_link_id,
"introduction_point":self.fields.group(1),
"ip":self.fields.group(2),
"port":self.fields.group(3),
"onion_key":self.fields.group(4).replace('\n', ''),
"service_key":self.fields.group(5).replace('\n', '')})
# Function to insert the descriptor into a snapshot table for archiving purposes
def snapshot_insert_descriptor(self):
print("[+] Inserting the descriptor snapshot into the Database")
self.cursor.execute("INSERT INTO descriptors_snapshot(link_id, rendezvous_service_descriptor, format_version, permanent_key, secret_id_part, publication_time, protocol_versions, introduction_points, descriptor_signature) VALUES(:link_id, :rendezvous, :format_version, :permanent_key, :secret_id, :publication_time, :protocol_versions, :introduction_points, :descriptor_signature)", {
"link_id":self.onion_link_id,
"rendezvous":self.rendezvous,
"format_version":self.descriptor_version,
"permanent_key":self.pkey,
"secret_id":self.secret_id,
"publication_time":self.publication_time,
"protocol_versions":self.protocol_versions,
"introduction_points":self.introduction_points_encoded,
"descriptor_signature":self.signature})
def db_insert_v3_cert(self):
print("[+] Inserting v3 cert into the Database")
self.cursor.execute("INSERT INTO v3_descriptors(descriptor_cert) VALUES(?)", (self.v3_cert,))
# Function to call the shell script to make the hourly memory dump of the tor processes
# TODO: Convert from the shell scrip to native python code
def dump_memory(self, pid):
self.process_manager = subprocess.Popen(["{}/process_dumper.sh".format(sys.path[0]), pid], stdout=subprocess.PIPE, universal_newlines=True)
self.output, self._err = self.process_manager.communicate()
return self.output.splitlines()
# Function to call the shell script that calculates the onion link from the public key
# TODO: Convert from the shell scrip to native python code
def calc_onion_link(self, pkey):
print("Decoding publick key and extracting the onion link!")
self.process_manager = subprocess.Popen(["{}/onion-link-calc.sh".format(sys.path[0]), pkey], stdout=subprocess.PIPE, universal_newlines=True)
self.output, self._err = self.process_manager.communicate()
print("Decoded link: {}.onion".format(self.output.splitlines()[0]))
return self.output.splitlines()[0]
# Function that decodes the instruction pointers message field of the descriptor
def decode_introduction_points(self, encoded_introduction_points):
print("Decoding instruction pointers message" )
self.output = base64.decodestring(encoded_introduction_points.encode('utf-8'))
print("Decoded the instruction pointers!")
return self.output.decode('utf-8')
def extract_pid():
process_manager = subprocess.Popen(['pgrep', '^tor'], stdout=subprocess.PIPE, universal_newlines=True)
output, _err = process_manager.communicate()
return output.splitlines()
def main():
# Variable declaration
tor_pid = extract_pid()
thread_counter=0
thread_list=[]
lock = threading.Lock()
if Path("{}/Database/hidden_services.db".format(sys.path[0])).is_file():
print("Database exists, opening it up...")
db = sqlite3.connect("{}/Database/hidden_services.db".format(sys.path[0]), check_same_thread=False)
else:
print("Database doesn't exist, creating it...")
db = sqlite3.connect("{}/Database/hidden_services.db".format(sys.path[0]), check_same_thread=False)
cursor = db.cursor()
with open("{}/sqlite_database_create.sql".format(sys.path[0])) as create_sql:
cursor.executescript(create_sql.read())
# Start the threads
for pid in tor_pid:
thread_list.append(myThread(thread_counter+1, "Thread-{}".format(thread_counter+1), pid, db, lock))
thread_counter += 1
for relay_thread in thread_list:
relay_thread.start()
relay_thread.join()
db.close()
print("Exiting main thread!")
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.stderr.write("\rCtrl-C captured, Exiting!\n")
sys.exit(1) | 56.302752 | 463 | 0.689696 | 2,464 | 18,411 | 4.998377 | 0.133523 | 0.054076 | 0.032153 | 0.013397 | 0.514615 | 0.455586 | 0.412796 | 0.375447 | 0.338097 | 0.29888 | 0 | 0.008649 | 0.183586 | 18,411 | 327 | 464 | 56.302752 | 0.810725 | 0.170713 | 0 | 0.371308 | 0 | 0.033755 | 0.32247 | 0.097438 | 0 | 0 | 0 | 0.003058 | 0 | 1 | 0.054852 | false | 0 | 0.042194 | 0 | 0.118143 | 0.139241 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca40058a64a570aca77aabf42d24d5a31d357fe2 | 4,691 | py | Python | data_prep/bert_data_prep/gen_bert_data.py | internetarchive/pdf_trio | 682b1dcd8a07edcbfe89380a4a83477b8c71a980 | [
"Apache-2.0"
] | 16 | 2020-02-12T20:10:58.000Z | 2022-03-24T05:06:44.000Z | data_prep/bert_data_prep/gen_bert_data.py | internetarchive/pdf_trio | 682b1dcd8a07edcbfe89380a4a83477b8c71a980 | [
"Apache-2.0"
] | 1 | 2021-03-05T06:39:23.000Z | 2021-03-05T07:23:43.000Z | data_prep/bert_data_prep/gen_bert_data.py | internetarchive/pdf_trio | 682b1dcd8a07edcbfe89380a4a83477b8c71a980 | [
"Apache-2.0"
] | 3 | 2020-02-12T19:44:51.000Z | 2021-01-02T19:11:14.000Z | #!/usr/bin/env python3
"""
Copyright 2019 Internet Archive
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Process pdf txt files to make training data for BERT to use run_classifier.py which comes with the
repo https://github.com/google-research/bert
The output is a TSV file for one category. Multiple TSV files (for positive/negative cases) can be concatenated
and should be shuf'ed.
The inputs are txt files which came from PDFs, organized in a single directory.
The text is cleaned up to remove punctuation, control chars, and truncated in the middle if the number
of words exceeds the specified max.
"""
import argparse
import os
import glob
import re
# init the arg parser
parser = argparse.ArgumentParser()
parser.add_argument("--category", type=str, default='', help="specify classification, 0 or 1", required=True)
parser.add_argument("--input", type=str, default='', help="directory with .txt files to process", required=True)
parser.add_argument("--output", type=str, default='', help="the output tsv file", required=True)
parser.add_argument("--max_tokens", type=int, default=512, help="max tokens")
parser.add_argument("--testing", default=False, help="testing mode, be verbose, only a few cycles", action="store_true")
# remove these: working domains append_only
parser.add_argument("--append_only", default=False, help="only append to existing .ft files", action="store_true")
# read arguments from the command line
args = parser.parse_args()
# get operands
target_category = args.category
indir = args.input
outfile = args.output
print(" target_category=" + target_category)
print(" indir=" + indir)
"""
Output Format is TSV
column 1 - a unique ID
column 2 - an integer label - my dataset uses 0 for a negative paper and 1 for a positive paper
column 3 - a dummy column where each line has the same letter (in this case 'a') - perhaps this is used in other NLP tasks
column 4 - the text, which has had tabs and newlines stripped out of it.
"""
# returns a list of tokens from the url
# Note: some items are empty
def extract_tokens(url):
d = extract_domain(url)
uri = extract_uri(url)
tokens = uri.split('/')
tokens.append(d)
return tokens
def basename(fpath):
offset_slash = fpath.rfind("/")
if offset_slash >= 0:
fpath = fpath[offset_slash+1:]
return fpath
def dirname(fpath):
offset_slash = fpath.rfind("/")
if offset_slash <= 0:
return "."
fpath = fpath[:offset_slash]
return fpath
def trim_tokens(file_tokens):
ntokens = len(file_tokens)
if ntokens > args.max_tokens:
cut_out = ntokens - args.max_tokens
front_end_offset = int(args.max_tokens/2)
back_begin_offset = front_end_offset + cut_out
file_tokens = file_tokens[:front_end_offset] + file_tokens[back_begin_offset:]
return file_tokens
kount = 0
with open(outfile, 'w') as fout:
for filename in glob.glob(indir + "/*.txt"):
kount += 1
if args.testing and kount > 5:
print("Stopping early due to --testing flag")
break;
fid = basename(filename[:-4])
with open(filename, 'r') as file:
file_text = file.read()
file_text = re.sub(r'[\x00-\x1F]+', ' ', file_text)
# ToDo: should really use unicode char groupings to clean
file_text = file_text.replace(',', ' ')
file_text = file_text.replace('.', ' ')
file_text = file_text.replace('!', ' ')
file_text = file_text.replace(';', ' ')
file_text = file_text.replace('-', ' ')
file_text = file_text.replace('"', ' ')
file_text = file_text.replace("'", ' ')
file_text = file_text.replace("(", ' ')
file_text = file_text.replace(")", ' ')
file_text = file_text.replace("[", ' ')
file_text = file_text.replace("]", ' ')
file_text = file_text.replace("/", ' ')
# convert consecutive whitespace to one space
file_text = re.sub('\s+', ' ', file_text)
file_tokens = trim_tokens(file_text.split())
fout.write("%s\t%s\ta\t%s\n" % (fid, target_category, " ".join(file_tokens)))
| 35.80916 | 122 | 0.673204 | 669 | 4,691 | 4.599402 | 0.394619 | 0.077998 | 0.054599 | 0.062398 | 0.143971 | 0.115697 | 0.115697 | 0.115697 | 0.115697 | 0.089698 | 0 | 0.008415 | 0.214666 | 4,691 | 130 | 123 | 36.084615 | 0.826819 | 0.291409 | 0 | 0.058824 | 0 | 0 | 0.127997 | 0 | 0 | 0 | 0 | 0.007692 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0 | 0.191176 | 0.044118 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca41adf512489971642b9250b9391a54e0238eab | 23,451 | py | Python | fitbit/api.py | benhoyle/python-ihealth | 32173d35d368d4adb615f8c9c61aa2c1fb22ca63 | [
"Apache-2.0"
] | 2 | 2015-03-13T22:43:33.000Z | 2015-06-26T19:10:25.000Z | fitbit/api.py | benhoyle/python-ihealth | 32173d35d368d4adb615f8c9c61aa2c1fb22ca63 | [
"Apache-2.0"
] | null | null | null | fitbit/api.py | benhoyle/python-ihealth | 32173d35d368d4adb615f8c9c61aa2c1fb22ca63 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import oauth2 as oauth
import requests
import urlparse
import json
import datetime
import urllib
from fitbit.exceptions import (BadResponse, DeleteError, HTTPBadRequest,
HTTPUnauthorized, HTTPForbidden,
HTTPServerError, HTTPConflict, HTTPNotFound)
from fitbit.utils import curry
class FitbitConsumer(oauth.Consumer):
pass
# example client using httplib with headers
class FitbitOauthClient(oauth.Client):
API_ENDPOINT = "https://api.fitbit.com"
AUTHORIZE_ENDPOINT = "https://www.fitbit.com"
API_VERSION = 1
_signature_method = oauth.SignatureMethod_HMAC_SHA1()
request_token_url = "%s/oauth/request_token" % API_ENDPOINT
access_token_url = "%s/oauth/access_token" % API_ENDPOINT
authorization_url = "%s/oauth/authorize" % AUTHORIZE_ENDPOINT
def __init__(self, consumer_key, consumer_secret, user_key=None,
user_secret=None, user_id=None, *args, **kwargs):
if user_key and user_secret:
self._token = oauth.Token(user_key, user_secret)
else:
# This allows public calls to be made
self._token = None
if user_id:
self.user_id = user_id
self._consumer = FitbitConsumer(consumer_key, consumer_secret)
super(FitbitOauthClient, self).__init__(self._consumer, *args, **kwargs)
def _request(self, method, url, **kwargs):
"""
A simple wrapper around requests.
"""
return requests.request(method, url, **kwargs)
def make_request(self, url, data={}, method=None, **kwargs):
"""
Builds and makes the Oauth Request, catches errors
https://wiki.fitbit.com/display/API/API+Response+Format+And+Errors
"""
if not method:
method = 'POST' if data else 'GET'
request = oauth.Request.from_consumer_and_token(self._consumer, self._token, http_method=method, http_url=url, parameters=data)
request.sign_request(self._signature_method, self._consumer,
self._token)
response = self._request(method, url, data=data,
headers=request.to_header())
if response.status_code == 401:
raise HTTPUnauthorized(response)
elif response.status_code == 403:
raise HTTPForbidden(response)
elif response.status_code == 404:
raise HTTPNotFound(response)
elif response.status_code == 409:
raise HTTPConflict(response)
elif response.status_code >= 500:
raise HTTPServerError(response)
elif response.status_code >= 400:
raise HTTPBadRequest(response)
return response
def fetch_request_token(self, parameters=None):
"""
Step 1 of getting authorized to access a user's data at fitbit: this
makes a signed request to fitbit to get a token to use in the next
step. Returns that token.
Set parameters['oauth_callback'] to a URL and when the user has
granted us access at the fitbit site, fitbit will redirect them to the URL
you passed. This is how we get back the magic verifier string from fitbit
if we're a web app. If we don't pass it, then fitbit will just display
the verifier string for the user to copy and we'll have to ask them to
paste it for us and read it that way.
"""
"""
via headers
-> OAuthToken
Providing 'oauth_callback' parameter in the Authorization header of
request_token_url request, will have priority over the dev.fitbit.com
settings, ie. parameters = {'oauth_callback': 'callback_url'}
"""
request = oauth.Request.from_consumer_and_token(
self._consumer,
http_url=self.request_token_url,
parameters=parameters
)
request.sign_request(self._signature_method, self._consumer, None)
response = self._request(request.method, self.request_token_url,
headers=request.to_header())
return oauth.Token.from_string(response.content)
def authorize_token_url(self, token):
"""Step 2: Given the token returned by fetch_request_token(), return
the URL the user needs to go to in order to grant us authorization
to look at their data. Then redirect the user to that URL, open their
browser to it, or tell them to copy the URL into their browser.
"""
request = oauth.Request.from_token_and_callback(
token=token,
http_url=self.authorization_url
)
return request.to_url()
#def authorize_token(self, token):
# # via url
# # -> typically just some okay response
# request = oauth.Request.from_token_and_callback(token=token,
# http_url=self.authorization_url)
# response = self._request(request.method, request.to_url(),
# headers=request.to_header())
# return response.content
def fetch_access_token(self, token, verifier):
"""Step 4: Given the token from step 1, and the verifier from step 3 (see step 2),
calls fitbit again and returns an access token object. Extract .key and .secret
from that and save them, then pass them as user_key and user_secret in future
API calls to fitbit to get this user's data.
"""
request = oauth.Request.from_consumer_and_token(self._consumer, token, http_method='POST', http_url=self.access_token_url, parameters={'oauth_verifier': verifier})
body = "oauth_verifier=%s" % verifier
response = self._request('POST', self.access_token_url, data=body,
headers=request.to_header())
if response.status_code != 200:
# TODO custom exceptions
raise Exception("Invalid response %s." % response.content)
params = urlparse.parse_qs(response.content, keep_blank_values=False)
self.user_id = params['encoded_user_id'][0]
self._token = oauth.Token.from_string(response.content)
return self._token
class Fitbit(object):
US = 'en_US'
METRIC = 'en_UK'
API_ENDPOINT = "https://api.fitbit.com"
API_VERSION = 1
_resource_list = [
'body',
'activities',
'foods',
'water',
'sleep',
'heart',
'bp',
'glucose',
]
_qualifiers = [
'recent',
'favorite',
'frequent',
]
def __init__(self, consumer_key, consumer_secret, system=US, **kwargs):
self.client = FitbitOauthClient(consumer_key, consumer_secret, **kwargs)
self.SYSTEM = system
# All of these use the same patterns, define the method for accessing
# creating and deleting records once, and use curry to make individual
# Methods for each
for resource in self._resource_list:
setattr(self, resource, curry(self._COLLECTION_RESOURCE, resource))
if resource not in ['body', 'glucose']:
# Body and Glucose entries are not currently able to be deleted
setattr(self, 'delete_%s' % resource, curry(
self._DELETE_COLLECTION_RESOURCE, resource))
for qualifier in self._qualifiers:
setattr(self, '%s_activities' % qualifier, curry(self.activity_stats, qualifier=qualifier))
setattr(self, '%s_foods' % qualifier, curry(self._food_stats,
qualifier=qualifier))
def make_request(self, *args, **kwargs):
##@ This should handle data level errors, improper requests, and bad
# serialization
headers = kwargs.get('headers', {})
headers.update({'Accept-Language': self.SYSTEM})
kwargs['headers'] = headers
method = kwargs.get('method', 'POST' if 'data' in kwargs else 'GET')
response = self.client.make_request(*args, **kwargs)
if response.status_code == 202:
return True
if method == 'DELETE':
if response.status_code == 204:
return True
else:
raise DeleteError(response)
try:
rep = json.loads(response.content)
except ValueError:
raise BadResponse
return rep
def user_profile_get(self, user_id=None):
"""
Get a user profile. You can get other user's profile information
by passing user_id, or you can get the current user's by not passing
a user_id
.. note:
This is not the same format that the GET comes back in, GET requests
are wrapped in {'user': <dict of user data>}
https://wiki.fitbit.com/display/API/API-Get-User-Info
"""
if user_id is None:
user_id = "-"
url = "%s/%s/user/%s/profile.json" % (self.API_ENDPOINT,
self.API_VERSION, user_id)
return self.make_request(url)
def user_profile_update(self, data):
"""
Set a user profile. You can set your user profile information by
passing a dictionary of attributes that will be updated.
.. note:
This is not the same format that the GET comes back in, GET requests
are wrapped in {'user': <dict of user data>}
https://wiki.fitbit.com/display/API/API-Update-User-Info
"""
url = "%s/%s/user/-/profile.json" % (self.API_ENDPOINT,
self.API_VERSION)
return self.make_request(url, data)
def _COLLECTION_RESOURCE(self, resource, date=None, user_id=None,
data=None):
"""
Retrieving and logging of each type of collection data.
Arguments:
resource, defined automatically via curry
[date] defaults to today
[user_id] defaults to current logged in user
[data] optional, include for creating a record, exclude for access
This implements the following methods::
body(date=None, user_id=None, data=None)
activities(date=None, user_id=None, data=None)
foods(date=None, user_id=None, data=None)
water(date=None, user_id=None, data=None)
sleep(date=None, user_id=None, data=None)
heart(date=None, user_id=None, data=None)
bp(date=None, user_id=None, data=None)
* https://wiki.fitbit.com/display/API/Fitbit+Resource+Access+API
"""
if not date:
date = datetime.date.today()
if not user_id:
user_id = '-'
if not isinstance(date, basestring):
date = date.strftime('%Y-%m-%d')
if not data:
url = "%s/%s/user/%s/%s/date/%s.json" % (
self.API_ENDPOINT,
self.API_VERSION,
user_id,
resource,
date,
)
else:
data['date'] = date
url = "%s/%s/user/%s/%s.json" % (
self.API_ENDPOINT,
self.API_VERSION,
user_id,
resource,
)
return self.make_request(url, data)
def _DELETE_COLLECTION_RESOURCE(self, resource, log_id):
"""
deleting each type of collection data
Arguments:
resource, defined automatically via curry
log_id, required, log entry to delete
This builds the following methods::
delete_body(log_id)
delete_activities(log_id)
delete_foods(log_id)
delete_water(log_id)
delete_sleep(log_id)
delete_heart(log_id)
delete_bp(log_id)
"""
url = "%s/%s/user/-/%s/%s.json" % (
self.API_ENDPOINT,
self.API_VERSION,
resource,
log_id,
)
response = self.make_request(url, method='DELETE')
return response
def time_series(self, resource, user_id=None, base_date='today',
period=None, end_date=None):
"""
The time series is a LOT of methods, (documented at url below) so they
don't get their own method. They all follow the same patterns, and
return similar formats.
Taking liberty, this assumes a base_date of today, the current user,
and a 1d period.
https://wiki.fitbit.com/display/API/API-Get-Time-Series
"""
if not user_id:
user_id = '-'
if period and end_date:
raise TypeError("Either end_date or period can be specified, not both")
if end_date:
if not isinstance(end_date, basestring):
end = end_date.strftime('%Y-%m-%d')
else:
end = end_date
else:
if not period in ['1d', '7d', '30d', '1w', '1m', '3m', '6m', '1y', 'max']:
raise ValueError("Period must be one of '1d', '7d', '30d', '1w', '1m', '3m', '6m', '1y', 'max'")
end = period
if not isinstance(base_date, basestring):
base_date = base_date.strftime('%Y-%m-%d')
url = "%s/%s/user/%s/%s/date/%s/%s.json" % (
self.API_ENDPOINT,
self.API_VERSION,
user_id,
resource,
base_date,
end
)
return self.make_request(url)
def activity_stats(self, user_id=None, qualifier=''):
"""
* https://wiki.fitbit.com/display/API/API-Get-Activity-Stats
* https://wiki.fitbit.com/display/API/API-Get-Favorite-Activities
* https://wiki.fitbit.com/display/API/API-Get-Recent-Activities
* https://wiki.fitbit.com/display/API/API-Get-Frequent-Activities
This implements the following methods::
recent_activities(user_id=None, qualifier='')
favorite_activities(user_id=None, qualifier='')
frequent_activities(user_id=None, qualifier='')
"""
if not user_id:
user_id = '-'
if qualifier:
if qualifier in self._qualifiers:
qualifier = '/%s' % qualifier
else:
raise ValueError("Qualifier must be one of %s"
% ', '.join(self._qualifiers))
else:
qualifier = ''
url = "%s/%s/user/%s/activities%s.json" % (
self.API_ENDPOINT,
self.API_VERSION,
user_id,
qualifier,
)
return self.make_request(url)
def _food_stats(self, user_id=None, qualifier=''):
"""
This builds the convenience methods on initialization::
recent_foods(user_id=None, qualifier='')
favorite_foods(user_id=None, qualifier='')
frequent_foods(user_id=None, qualifier='')
* https://wiki.fitbit.com/display/API/API-Get-Recent-Foods
* https://wiki.fitbit.com/display/API/API-Get-Frequent-Foods
* https://wiki.fitbit.com/display/API/API-Get-Favorite-Foods
"""
if not user_id:
user_id = '-'
url = "%s/%s/user/%s/foods/log/%s.json" % (
self.API_ENDPOINT,
self.API_VERSION,
user_id,
qualifier,
)
return self.make_request(url)
def add_favorite_activity(self, activity_id):
"""
https://wiki.fitbit.com/display/API/API-Add-Favorite-Activity
"""
url = "%s/%s/user/-/activities/favorite/%s.json" % (
self.API_ENDPOINT,
self.API_VERSION,
activity_id,
)
return self.make_request(url, method='POST')
def delete_favorite_activity(self, activity_id):
"""
https://wiki.fitbit.com/display/API/API-Delete-Favorite-Activity
"""
url = "%s/%s/user/-/activities/favorite/%s.json" % (
self.API_ENDPOINT,
self.API_VERSION,
activity_id,
)
return self.make_request(url, method='DELETE')
def add_favorite_food(self, food_id):
"""
https://wiki.fitbit.com/display/API/API-Add-Favorite-Food
"""
url = "%s/%s/user/-/foods/log/favorite/%s.json" % (
self.API_ENDPOINT,
self.API_VERSION,
food_id,
)
return self.make_request(url, method='POST')
def delete_favorite_food(self, food_id):
"""
https://wiki.fitbit.com/display/API/API-Delete-Favorite-Food
"""
url = "%s/%s/user/-/foods/log/favorite/%s.json" % (
self.API_ENDPOINT,
self.API_VERSION,
food_id,
)
return self.make_request(url, method='DELETE')
def create_food(self, data):
"""
https://wiki.fitbit.com/display/API/API-Create-Food
"""
url = "%s/%s/user/-/foods.json" % (
self.API_ENDPOINT,
self.API_VERSION,
)
return self.make_request(url, data=data)
def get_meals(self):
"""
https://wiki.fitbit.com/display/API/API-Get-Meals
"""
url = "%s/%s/user/-/meals.json" % (
self.API_ENDPOINT,
self.API_VERSION,
)
return self.make_request(url)
def get_devices(self):
"""
https://wiki.fitbit.com/display/API/API-Get-Devices
"""
url = "%s/%s/user/-/devices.json" % (
self.API_ENDPOINT,
self.API_VERSION,
)
return self.make_request(url)
def activities_list(self):
"""
https://wiki.fitbit.com/display/API/API-Browse-Activities
"""
url = "%s/%s/activities.json" % (
self.API_ENDPOINT,
self.API_VERSION,
)
return self.make_request(url)
def activity_detail(self, activity_id):
"""
https://wiki.fitbit.com/display/API/API-Get-Activity
"""
url = "%s/%s/activities/%s.json" % (
self.API_ENDPOINT,
self.API_VERSION,
activity_id
)
return self.make_request(url)
def search_foods(self, query):
"""
https://wiki.fitbit.com/display/API/API-Search-Foods
"""
url = "%s/%s/foods/search.json?%s" % (
self.API_ENDPOINT,
self.API_VERSION,
urllib.urlencode({'query': query})
)
return self.make_request(url)
def food_detail(self, food_id):
"""
https://wiki.fitbit.com/display/API/API-Get-Food
"""
url = "%s/%s/foods/%s.json" % (
self.API_ENDPOINT,
self.API_VERSION,
food_id
)
return self.make_request(url)
def food_units(self):
"""
https://wiki.fitbit.com/display/API/API-Get-Food-Units
"""
url = "%s/%s/foods/units.json" % (
self.API_ENDPOINT,
self.API_VERSION
)
return self.make_request(url)
def get_friends(self, user_id=None):
"""
https://wiki.fitbit.com/display/API/API-Get-Friends
"""
if not user_id:
user_id = '-'
url = "%s/%s/user/%s/friends.json" % (
self.API_ENDPOINT,
self.API_VERSION,
user_id
)
return self.make_request(url)
def get_friends_leaderboard(self, period):
"""
https://wiki.fitbit.com/display/API/API-Get-Friends-Leaderboard
"""
if not period in ['7d', '30d']:
raise ValueError("Period must be one of '7d', '30d'")
url = "%s/%s/user/-/friends/leaders/%s.json" % (
self.API_ENDPOINT,
self.API_VERSION,
period
)
return self.make_request(url)
def invite_friend(self, data):
"""
https://wiki.fitbit.com/display/API/API-Create-Invite
"""
url = "%s/%s/user/-/friends/invitations.json" % (
self.API_ENDPOINT,
self.API_VERSION,
)
return self.make_request(url, data=data)
def invite_friend_by_email(self, email):
"""
Convenience Method for
https://wiki.fitbit.com/display/API/API-Create-Invite
"""
return self.invite_friend({'invitedUserEmail': email})
def invite_friend_by_userid(self, user_id):
"""
Convenience Method for
https://wiki.fitbit.com/display/API/API-Create-Invite
"""
return self.invite_friend({'invitedUserId': user_id})
def respond_to_invite(self, other_user_id, accept=True):
"""
https://wiki.fitbit.com/display/API/API-Accept-Invite
"""
url = "%s/%s/user/-/friends/invitations/%s.json" % (
self.API_ENDPOINT,
self.API_VERSION,
other_user_id,
)
accept = 'true' if accept else 'false'
return self.make_request(url, data={'accept': accept})
def accept_invite(self, other_user_id):
"""
Convenience method for respond_to_invite
"""
return self.respond_to_invite(other_user_id)
def reject_invite(self, other_user_id):
"""
Convenience method for respond_to_invite
"""
return self.respond_to_invite(other_user_id, accept=False)
def get_badges(self, user_id=None):
"""
https://wiki.fitbit.com/display/API/API-Get-Badges
"""
if not user_id:
user_id = '-'
url = "%s/%s/user/%s/badges.json" % (
self.API_ENDPOINT,
self.API_VERSION,
user_id
)
return self.make_request(url)
def subscription(self, subscription_id, subscriber_id, collection=None,
method='POST'):
"""
https://wiki.fitbit.com/display/API/Fitbit+Subscriptions+API
"""
if not collection:
url = "%s/%s/user/-/apiSubscriptions/%s.json" % (
self.API_ENDPOINT,
self.API_VERSION,
subscription_id
)
else:
url = "%s/%s/user/-/%s/apiSubscriptions/%s-%s.json" % (
self.API_ENDPOINT,
self.API_VERSION,
collection,
subscription_id,
collection
)
return self.make_request(
url,
method=method,
headers={"X-Fitbit-Subscriber-id": subscriber_id}
)
def list_subscriptions(self, collection=''):
"""
https://wiki.fitbit.com/display/API/Fitbit+Subscriptions+API
"""
if collection:
collection = '/%s' % collection
url = "%s/%s/user/-%s/apiSubscriptions.json" % (
self.API_ENDPOINT,
self.API_VERSION,
collection,
)
return self.make_request(url)
@classmethod
def from_oauth_keys(self, consumer_key, consumer_secret, user_key=None,
user_secret=None, user_id=None, system=US):
client = FitbitOauthClient(consumer_key, consumer_secret, user_key,
user_secret, user_id)
return self(client, system)
| 34.38563 | 171 | 0.569102 | 2,736 | 23,451 | 4.720029 | 0.13633 | 0.028341 | 0.03833 | 0.045997 | 0.511073 | 0.45563 | 0.410872 | 0.356899 | 0.323138 | 0.2663 | 0 | 0.004028 | 0.32246 | 23,451 | 681 | 172 | 34.436123 | 0.808736 | 0.264125 | 0 | 0.326531 | 0 | 0.002551 | 0.101103 | 0.056738 | 0 | 0 | 0 | 0.001468 | 0 | 1 | 0.09949 | false | 0.002551 | 0.020408 | 0 | 0.260204 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca433cfdc583b5417c142f3e27cbbc19fc20126f | 636 | py | Python | python_first_step/sortalgoArrayOperation/sort2.py | cartellefo/projet | 23c67e847b415fb47f71e830b89a227fffed109b | [
"MIT"
] | null | null | null | python_first_step/sortalgoArrayOperation/sort2.py | cartellefo/projet | 23c67e847b415fb47f71e830b89a227fffed109b | [
"MIT"
] | null | null | null | python_first_step/sortalgoArrayOperation/sort2.py | cartellefo/projet | 23c67e847b415fb47f71e830b89a227fffed109b | [
"MIT"
] | null | null | null | import time
import numpy as np
import numpy.linalg as nl
import random
import matplotlib.pyplot as plt
def sortInt(n_max) :
# summe des carre
listInt=[]
for i in range(1, n_max) :
s = random.randint(1,10)
listInt.append(s)
return(listInt)
intRand= sortInt(5)
print(intRand)
def tri_ins(t):
permut = 0
for k in range(1,len(t)):
temp=t[k]
j=k
while j>0 and temp<t[j-1]:
permut=permut+1
t[j]=t[j-1]
j-=1
t[j]=temp
print(t)
permut= permut + 2*len(t)
return t,permut
x,e=tri_ins(intRand)
| 16.307692 | 34 | 0.542453 | 102 | 636 | 3.343137 | 0.45098 | 0.02346 | 0.046921 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0311 | 0.342767 | 636 | 38 | 35 | 16.736842 | 0.784689 | 0.023585 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.185185 | 0 | 0.296296 | 0.074074 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca436d41c043f791ff6f753cb4ff5c619518a094 | 2,844 | py | Python | plot_p.py | alexfmsu/pyquantum | 78b09987cbfecf549e67b919bb5cb2046b21ad44 | [
"MIT"
] | null | null | null | plot_p.py | alexfmsu/pyquantum | 78b09987cbfecf549e67b919bb5cb2046b21ad44 | [
"MIT"
] | null | null | null | plot_p.py | alexfmsu/pyquantum | 78b09987cbfecf549e67b919bb5cb2046b21ad44 | [
"MIT"
] | 2 | 2020-07-28T08:40:06.000Z | 2022-02-16T23:04:58.000Z | from PyQuantum.Tools.PlotBuilder2D import *
from PyQuantum.Tools.CSV import *
import numpy as np
x = []
y = []
# path = 'oout'
path = 'out_2'
def df(x, y):
f_xn = y[-1]
xn = x[-1]
DF = []
for i in range(len(y)-2, -1, -1):
f_xn_1 = y[i]
xn_1 = x[i]
df = (f_xn-f_xn_1) / (xn - xn_1)
DF.append(df)
f_xn = f_xn_1
xn = xn_1
return DF[::-1]
n_counts = 100
for i in range(1, 2):
# Fidelity_s2_list = list_from_csv(path+'/fidelity_s2' + str(i) + '.csv')
T_list = list_from_csv(path+'/T_list' + str(i) + '.csv')
cnt = list_from_csv(path+'/cnt' + str(i) + '.csv')
print(len(T_list))
# print(T_list)
Tt = [T_list[0]]
# for i in T_list:
# print(i)
# exit(0)
for t in range(1, len(T_list)):
Tt.append(T_list[t] - T_list[t-1])
# print(T_list[t], '-', T_list[t-1], T_list[t] - T_list[t-1])
dt_avg = sum(Tt) / cnt[0]
print(dt_avg)
exit(0)
for i in Tt:
print(i * 1e9)
exit(0)
# # dt /= cnt[0]
# print(dt)
# dt_avg = sum(dt) / cnt[0]
dt = max(Tt) / n_counts
Ti = []
for j in range(n_counts+1):
Ti.append(dt * j)
print(Ti)
Ni = [0] * (n_counts+1)
for j in Tt:
print(j, dt, int(j / dt))
Ni[int(j / dt)] += 1
print(Ni)
y = Ni
x = Ti
# exit(0)
# y = df(T_list, sink_list)
# x = dt_avg
# exit(0)
# T_avg = sum([t for t in T_list]) / cnt[0]
# # print(i, ': ', T_sum, ', ', cnt, sep='')
# T_str = None
if max(T_list) >= 1e-3:
# Ti *= 1e3
# T_str = 'ms'
# T_avg *= 1e3
# dt *= 1e3
# T_list = [i * 1e3 for i in T_list]
Ti = [i * 1e3 for i in Ti]
elif max(T_list) >= 1e-6:
# Ti *= 1e6
# T_str = 'mks'
# T_avg *= 1e6
# dt *= 1e6
# T_list = [i * 1e6 for i in T_list]
Ti = [i * 1e6 for i in Ti]
elif max(T_list) >= 1e-9:
Ti = [i * 1e9 for i in Ti]
# Ti *= 1e9
# T_str = 'ns'
# T_avg *= 1e9
# dt *= 1e9
# # T_list *= 1e9
# T_list = [i * 1e9 for i in T_list]
# # T_list = [i * 1e9 for i in T_list]
# # print(Fidelity_s2_list[0], dt)
# # x = list(range(1, cnt[0]+1))
# # y = T_list
# # x =
# y = df(T_list)
# x.append(Fidelity_s2_list[0])
# x.append(i)
# y.append(dt)
print(x, len(x))
print(y, len(y))
exit(0)
data = [go.Scatter(
x=x,
y=y,
mode='markers',
# x=T_list[1:],
# y=df(T_list, sink_list),
# name=w_0,
)]
make_plot({
'to_file': False,
'online': False,
'data': data,
'x_title': 'time, ' + '',
# 'x_title': 'time, ' + str(T_str),
'y_title': 'sink',
'title': 'avg',
'html': 'avg.html',
})
| 18.588235 | 77 | 0.457806 | 475 | 2,844 | 2.566316 | 0.166316 | 0.127153 | 0.054143 | 0.028712 | 0.241181 | 0.19032 | 0.146842 | 0.091879 | 0.091879 | 0 | 0 | 0.049918 | 0.359001 | 2,844 | 152 | 78 | 18.710526 | 0.61876 | 0.343179 | 0 | 0.044776 | 0 | 0 | 0.050745 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014925 | false | 0 | 0.044776 | 0 | 0.074627 | 0.119403 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca4406d690ff4c796afb3625d7199843d05e7652 | 7,644 | py | Python | omdbapi-python-tool/Old/dont_use_experimental_multi_processor.py | know-airl/Vote-Goat-Data | d523d45107b5994b9135577db5e0269eb6d4c613 | [
"MIT"
] | 1 | 2020-06-12T16:58:19.000Z | 2020-06-12T16:58:19.000Z | omdbapi-python-tool/Old/dont_use_experimental_multi_processor.py | know-airl/Vote-Goat-Data | d523d45107b5994b9135577db5e0269eb6d4c613 | [
"MIT"
] | 2 | 2018-07-04T11:19:13.000Z | 2018-07-12T11:46:16.000Z | omdbapi-python-tool/Old/dont_use_experimental_multi_processor.py | know-airl/Vote-Goat-Data | d523d45107b5994b9135577db5e0269eb6d4c613 | [
"MIT"
] | null | null | null | import omdb # For scraping omdbapi.com
import ujson # For outputting to disk
from time import sleep # For sleeping between scrape attempts
import numpy as np # Not utilised past the failure text file function
import os # For checking the existing data files
import math # For rounding float up to nearest integer
# multiprocessing packages:
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
from progressbar import Bar, ETA, ProgressBar, Percentage # For providing progressbar functionality. This is actually "progressbar33" in python3.
def del_file(file_number):
"""
Delete a json file. Perhaps a def is overboard.
"""
latest_file_name = 'data_' + (str(file_number).zfill(6)) + '.json'
os.remove(latest_file_name)
#def check_json_files(range_ref):
# """
# Check for the existence of existing data_xxxxxx.json files.
# If they do exist, delete the final data file (in case it is partially filled, or broken).
# Return final data file - 1.
# Purpose: Recover last known scraping state, avoiding manually finding the latest scrape_start value to tweak.
# """
# no_file = None # Perhaps we could just return 'None' instead of declaring ahead of the if statement?
#
# if(os.path.exists('data_000000.json')):
# latest_json_file = 0 # Init the default value
#
# ceiling_range = int(math.ceil(range_ref/json_batch_limit)) + 1 # Could be float, so round up to integer!
# #print("ceiling_range: {}".format(ceiling_range))
# for i in range(ceiling_range):
# #print("i: {}".format(i))
# json_file_name = 'data_' + (str(i).zfill(6)) + '.json'
# if(os.path.exists(json_file_name)):
# #print("continue: {}".format(i))
# latest_json_file = i
# continue
# else:
# #print("break: {}".format(i))
# break # We found the latest!
# #print("deleting: {}".format(latest_json_file))
# #print("delete: {}".format(i))
# del_file(latest_json_file) # Delete the final file to avoid issues
# #return (latest_json_file - 1) # Return the last remaining file number (after we deleted the latest file)
# else:
# return no_file
def generate_imdb_tt_list(start, stop):
"""
Generate the list of IMDB id tags, which we will use to query omdbapi.
This creates a list of lists, not a single list!
Format: "tt + (fill: up to 6 zeros) number"
"""
imdb_tag_list = [] # Empty list to hold our imdb tags
list_chunk_size = int(math.ceil((stop - start)/json_batch_limit) + 1)
external_iterator = 0
for i in range(int(math.ceil((stop - start) / list_chunk_size)) + 1):
internal_list = []
for current_iteration in range(list_chunk_size):
prepend_imdb_tag = 'tt' #Required prepended tag
internal_list.append(prepend_imdb_tag+(str(external_iterator + 1).zfill(7))) #appending the number to tt & padding with zeros.
external_iterator += 1
imdb_tag_list.append(internal_list)
return(imdb_tag_list) # Return the id list
def write_json_to_disk(file_name, json_data):
"""
When called, write the json_data to a json file.
We will end up with many data_*.json files.
These files will be merged using jq.
"""
with open(file_name, 'w') as outfile:
ujson.dump(json_data, outfile, encode_html_chars=True, escape_forward_slashes=False, ensure_ascii=False) #Write JSON to data.json (disk)
def write_failures_to_disk():
"""
When called will write all timeout'd query IMDB ids.
Future functionality could include these files
"""
if (np.size(skipped_ids) > 0):
with open("failure.txt", "w") as text_file:
for skipped_item in skipped_ids:
text_file.write(skipped_item + '\n')
def scrape_omdb(omdb_id_list):
"""
When called, will scrape omdbapi for the range of imdb ids we generated during the generate_imdb_tt_list() step.
Will display a progress bar so you know the script is still functioning.
WARNING: If you set the range to 1 million records, expect the script to take up to 2-3 days.
"""
imdb_json_data = {} #Empty data field
imdb_json_data['items'] = [] #Empty data list titled 'items'
progress_iterator = 0 #For the progress bar
json_batch_iterator = 0 #Every json_batch_iterator iterations we will revert to 0, limiting the size of each json file.
current_filename = str(omdb_id_list[0]) + "_" + str(omdb_id_list[-1]) + '.json' #Create a new fileformat
for current_tag in omdb_id_list: #for loop iterate over the list of imdb tags we generated
sleep(0.1) #Sleeping 100 milliseconds, to attempt to mitigate cloudflare 524 errors
try:
current_target = omdb.request(i=current_tag, r='json', plot='full', apikey=api_key, timeout=10) #This is where we request data from OMDBAPI.com
except:
skipped_ids.append(current_tag) #We want to keep track of the IDs which were skipped due to timeout errors!
continue
if(current_target.status_code != 200): #Check if the scraped data contains an error (such as exceeding the quantity of their database's contents)
skipped_ids.append(current_tag) # We want to keep track of the IDs which were skipped due to timeout errors!
continue
else:
if (current_target.json()['Response'] == "True"): # If false: Something has gone wrong!
try:
imdb_json_data['items'].append(current_target.json()) # Scrape succeeded. Store JSON.
pbar.update(progress_iterator + 1) # Display incremented progress
progress_iterator += 1 # Iterate the progress bar for next iteration
json_batch_iterator += 1 # Iterate the json batch number iterator
except:
skipped_ids.append(current_tag) # Write failure to disk!
else:
skipped_ids.append(current_tag) # Write failure to disk!
write_json_to_disk(current_filename, imdb_json_data['items']) # Final output, likely not triggered the batch limit if statement trigger above
if __name__ == '__main__':
#check_json_existence = check_json_files((scrape_stop - scrape_start)) # Check if any files exist already
#if (check_json_existence == None):
# ref_file_name = 0 # Start the file numbering from 0
#else:
# scrape_start = (check_json_existence * json_batch_limit) # New scraping location
# ref_file_name = check_json_existence # Start the fi;e numbering from where we left off
api_key = "PRIVATE_KEY" # Paid private key, don't publicly share nor change.
scrape_start = 1
scrape_stop = 200 # Range of imdb ids we will generate and scrape.
json_batch_limit = 10
quantity_workers = 5
skipped_ids = [] # Initializing a list to keep track of scraping attempts which timed out.
omdb_id_list_holder = generate_imdb_tt_list(scrape_start, scrape_stop) # Generate our list of lists
widgets = [Percentage(), # Setting how we wan the progress bar to look
' ', Bar(),
' ', ETA()]
pbar_range = scrape_stop - scrape_start
pbar = ProgressBar(widgets=widgets, maxval=pbar_range).start() #Prepare the progress bar
pool = ThreadPool(quantity_workers) # Let's drop into hyperthread space!
pool.map(scrape_omdb, omdb_id_list_holder) # Deploy the pool workers
pool.close()
pool.join()
pbar.finish() #Once we've complete the scraping, end the progress bar.
| 47.185185 | 155 | 0.675693 | 1,086 | 7,644 | 4.578269 | 0.310313 | 0.01609 | 0.012068 | 0.018504 | 0.080048 | 0.057522 | 0.055109 | 0.055109 | 0.055109 | 0.037409 | 0 | 0.009604 | 0.237179 | 7,644 | 161 | 156 | 47.478261 | 0.84308 | 0.55887 | 0 | 0.153846 | 0 | 0 | 0.028032 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064103 | false | 0 | 0.115385 | 0 | 0.179487 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca441024b2e763c4af65aa1f10420402eb5cf8f7 | 22,324 | py | Python | tests/hetr_tests/test_hetr_integration.py | NervanaSystems/ngraph-python | ac032c83c7152b615a9ad129d54d350f9d6a2986 | [
"Apache-2.0"
] | 18 | 2018-03-19T04:16:49.000Z | 2021-02-08T14:44:58.000Z | tests/hetr_tests/test_hetr_integration.py | rsumner31/ngraph | 5e5c9bb9f24d95aee190b914dd2d44122fc3be53 | [
"Apache-2.0"
] | 2 | 2019-04-16T06:41:49.000Z | 2019-05-06T14:08:13.000Z | tests/hetr_tests/test_hetr_integration.py | rsumner31/ngraph | 5e5c9bb9f24d95aee190b914dd2d44122fc3be53 | [
"Apache-2.0"
] | 11 | 2018-06-16T15:59:08.000Z | 2021-03-06T00:45:30.000Z | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import numpy as np
import pytest
from contextlib import closing
from ngraph.testing import ExecutorFactory
import ngraph as ng
import ngraph.transformers as ngt
from ngraph.transformers.hetr.mpilauncher import MPILauncher
import time
import os
import subprocess
import tempfile
import random
pytestmark = pytest.mark.hetr_only
STARTUP_TIME = 3
ax_A = ng.make_axis(4)
ax_B = ng.make_axis(8)
ax_C = ng.make_axis(12)
ax_D = ng.make_axis(24)
@pytest.mark.multi_device
@pytest.mark.parametrize('config', [
{
'device_id': ('0', '1'),
},
{
'device_id': ('0', '1', '2', '3'),
},
])
def test_broadcast_scalar(hetr_device, config):
if hetr_device == 'gpu':
pytest.skip('gpu communication broadcast op is not supported.')
device_id = config['device_id']
x = ng.placeholder(())
y = ng.placeholder(())
with ng.metadata(device_id=device_id, parallel=ax_A):
x_plus_y = x + y
with closing(ngt.make_transformer_factory('hetr', device=hetr_device)()) as transformer:
computation = transformer.computation(x_plus_y, x, y)
res = computation(1, 2)
np.testing.assert_array_equal(res, 3)
@pytest.mark.multi_device
@pytest.mark.parametrize('config', [
{
'axes': ng.make_axes([ax_A]),
'device_id': ('0', '1'),
'parallel_axis': ax_A,
},
{
'axes': ng.make_axes([ax_A, ax_B]),
'device_id': ('0', '1', '2', '3'),
'parallel_axis': ax_A,
},
])
def test_distributed_plus_one(hetr_device, config):
device_id = config['device_id']
axes = config['axes']
parallel_axis = config['parallel_axis']
with ng.metadata(device=hetr_device):
x = ng.placeholder(axes=axes)
with ng.metadata(device_id=device_id, parallel=parallel_axis):
x_plus_one = x + 1
np_x = np.random.randint(100, size=axes.lengths)
with closing(ngt.make_transformer_factory('hetr', device=hetr_device)()) as transformer:
computation = transformer.computation(x_plus_one, x)
res = computation(np_x)
np.testing.assert_array_equal(res, np_x + 1)
@pytest.mark.multi_device
@pytest.mark.parametrize('config', [
{
'axes_x': ng.make_axes([ax_B, ax_A]),
'axes_w': ng.make_axes([ax_A, ax_C]),
'device_id': ('0', '1'),
'parallel_axis': ax_B,
},
{
'axes_x': ng.make_axes([ax_B, ax_A]),
'axes_w': ng.make_axes([ax_A, ax_C]),
'device_id': ('0', '1', '2', '3'),
'parallel_axis': ax_B,
},
])
def test_distributed_dot(hetr_device, config):
if hetr_device == 'gpu':
pytest.xfail("Intermittent failure on jenkins for mgpu")
device_id = config['device_id']
axes_x = config['axes_x']
axes_w = config['axes_w']
parallel_axis = config['parallel_axis']
np_weight = np.ones(axes_w.lengths)
with ng.metadata(device=hetr_device):
x = ng.placeholder(axes=axes_x)
with ng.metadata(device_id=device_id, parallel=parallel_axis):
w = ng.variable(axes=axes_w, initial_value=np_weight)
dot = ng.dot(x, w)
np_x = np.random.randint(100, size=axes_x.lengths)
with closing(ngt.make_transformer_factory('hetr',
device=hetr_device)()) as transformer:
computation = transformer.computation(dot, x)
res = computation(np_x)
np.testing.assert_array_equal(res, np.dot(np_x, np_weight))
@pytest.mark.multi_device
def test_multi_computations(hetr_device):
if hetr_device == 'gpu':
pytest.xfail("enable after gpu exgraph")
axes_x = ng.make_axes([ax_A, ax_B])
x = ng.placeholder(axes=axes_x)
y = ng.placeholder(())
with ng.metadata(device_id=('0', '1'), parallel=ax_A):
f = x ** 2
out = y - ng.mean(f, out_axes=())
np_x = np.random.randint(10, size=axes_x.lengths)
np_y = np.random.randint(10)
with closing(ngt.make_transformer_factory('hetr', device=hetr_device)()) as t:
comp = t.computation(out, x, y)
another_comp = t.computation(f, x)
res_comp = comp(np_x, np_y)
res_another_comp = another_comp(np_x)
ref_comp = np_y - np.mean(np_x**2)
np.testing.assert_array_equal(res_comp, ref_comp)
np.testing.assert_array_equal(res_another_comp, np_x**2)
@pytest.mark.multi_device
@pytest.mark.parametrize('config', [
{
'axes': ng.make_axes([ax_A]),
'device_id': ('0', '1'),
'parallel_axis': ax_A,
},
])
def test_repeat_computation(hetr_device, config):
if hetr_device == 'gpu':
pytest.xfail("enable after gpu exgraph")
device_id = config['device_id']
axes = config['axes']
parallel_axis = config['parallel_axis']
with ng.metadata(device=hetr_device):
x = ng.placeholder(axes=axes)
with ng.metadata(device_id=device_id, parallel=parallel_axis):
x_plus_one = x + 1
np_x = np.random.randint(100, size=axes.lengths)
with closing(ngt.make_transformer_factory('hetr', device=hetr_device)()) as transformer:
comp = transformer.computation(x_plus_one, x)
comp2 = transformer.computation(x_plus_one, x)
res = comp(np_x)
np.testing.assert_array_equal(res, np_x + 1)
res2 = comp2(np_x)
np.testing.assert_array_equal(res2, np_x + 1)
@pytest.mark.multi_device
def test_comm_broadcast_op(hetr_device):
if hetr_device == 'gpu':
pytest.skip('gpu communication broadcast op is not supported.')
H = ng.make_axis(length=4, name='height')
N = ng.make_axis(length=8, name='batch')
weight = ng.make_axis(length=2, name='weight')
x = ng.placeholder(axes=[N, H])
# w will be broadcasted to devices
w = ng.placeholder(axes=[H, weight])
with ng.metadata(device=hetr_device, device_id=('0', '1'), parallel=N):
dot = ng.dot(x, w)
np_x = np.random.randint(100, size=[N.length, H.length])
np_weight = np.random.randint(100, size=[H.length, weight.length])
with closing(ngt.make_transformer_factory('hetr', device=hetr_device)()) as transformer:
computation = transformer.computation(dot, x, w)
res = computation(np_x, np_weight)
np.testing.assert_array_equal(res, np.dot(np_x, np_weight))
@pytest.mark.multi_device
def test_reduce_scalar(hetr_device):
"""
A scalar is produced by sum() on each worker
in this case, should be mean reduced before being returned
"""
if hetr_device == 'gpu':
pytest.xfail("gather/reduce work-around for gpus does not choose between mean or sum,\
it uses only the value on the first device and ignores the values on other devices")
N = ng.make_axis(length=8, name='batch')
x = ng.placeholder(axes=[N])
with ng.metadata(device=hetr_device, device_id=('0', '1'), parallel=N):
out = ng.sum(x)
np_x = np.random.randint(100, size=[N.length])
with closing(ngt.make_transformer_factory('hetr', device=hetr_device)()) as transformer:
computation = transformer.computation(out, x)
res = computation(np_x)
# gather returns one element per worker
np.testing.assert_array_equal(res, np.sum(np_x) / 2.)
@pytest.mark.multi_device
def test_reduce_vector(hetr_device):
"""
A whole vector is produced on each worker and should be reduced
before being returned, but not along its axes since it
does not have the parallel axis in its axes
"""
if hetr_device == 'gpu':
pytest.xfail("broadcast communication ops not yet supported on gpus")
H = ng.make_axis(length=4, name='height')
N = ng.make_axis(length=8, name='batch')
weight = ng.make_axis(length=2, name='weight')
x = ng.placeholder(axes=[N, H])
w = ng.placeholder(axes=[H, weight])
with ng.metadata(device=hetr_device, device_id=('0', '1'), parallel=N):
dot = ng.dot(x, w)
out = ng.sum(dot, N)
np_x = np.random.randint(100, size=[N.length, H.length])
np_weight = np.random.randint(100, size=[H.length, weight.length])
with closing(ngt.make_transformer_factory('hetr', device=hetr_device)()) as transformer:
computation = transformer.computation(out, x, w)
res = computation(np_x, np_weight)
# TODO should the reduce infer a sum or mean?
expected = np.sum(np.dot(np_x, np_weight), 0) / 2.
np.testing.assert_array_equal(res, expected)
@pytest.mark.multi_device
def test_distributed_dot_parallel_second_axis(hetr_device):
if hetr_device == 'gpu':
pytest.xfail("Axes Layout needs to be fixed for GPUs after changes to make\
parallel_axis the least contiguous axis for scatter/gather communication ops")
H = ng.make_axis(length=6, name='height')
N = ng.make_axis(length=8, name='batch')
W1 = ng.make_axis(length=2, name='W1')
W2 = ng.make_axis(length=4, name='W2')
x = ng.placeholder(axes=[H, N])
w2 = ng.placeholder(axes=[W2, W1])
with ng.metadata(device=hetr_device, device_id=('0', '1'), parallel=N):
w1 = ng.placeholder(axes=[W1, H])
dot1 = ng.dot(w1, x).named("dot1")
dot2 = ng.dot(w2, dot1).named("dot2")
np_x = np.random.randint(100, size=[H.length, N.length])
np_w1 = np.random.randint(100, size=[W1.length, H.length])
np_w2 = np.random.randint(100, size=[W2.length, W1.length])
with closing(ngt.make_transformer_factory('hetr', device=hetr_device)()) as transformer:
computation = transformer.computation([dot2, dot1], x, w1, w2)
res2, res1 = computation(np_x, np_w1, np_w2)
np.testing.assert_array_equal(res1, np.dot(np_w1, np_x))
np.testing.assert_array_equal(res2, np.dot(np_w2, np.dot(np_w1, np_x)))
computation2 = transformer.computation([dot1, dot2], x, w1, w2)
res1, res2 = computation2(np_x, np_w1, np_w2)
np.testing.assert_array_equal(res1, np.dot(np_w1, np_x))
np.testing.assert_array_equal(res2, np.dot(np_w2, np.dot(np_w1, np_x)))
@pytest.mark.multi_device
@pytest.mark.parametrize('config', [
{
'axes': ng.make_axes([ax_A]),
'device_id': ('0', '1'),
'parallel_axis': ax_A,
},
{
'axes': ng.make_axes([ax_A, ax_B]),
'device_id': ('0', '1', '2', '3'),
'parallel_axis': ax_A,
},
])
def test_distributed_plus_two(hetr_device, config):
device_id = config['device_id']
axes = config['axes']
parallel_axis = config['parallel_axis']
with ng.metadata(device=hetr_device):
x = ng.placeholder(axes=axes)
with ng.metadata(device_id=device_id, parallel=parallel_axis):
x_plus_one = x + 1
x_plus_two = x_plus_one + 1
np_x = np.random.randint(100, size=axes.lengths)
with closing(ngt.make_transformer_factory('hetr', device=hetr_device)()) as transformer:
computation = transformer.computation(x_plus_two, x)
res = computation(np_x)
np.testing.assert_array_equal(res, np_x + 2)
@pytest.mark.multi_device
@pytest.mark.parametrize('config', [
{
'axes': None,
},
{
'axes': ng.make_axes([ax_A]),
},
{
'axes': ng.make_axes([ax_A, ax_B]),
},
])
def test_to_and_from_device(hetr_device, config):
axes = config['axes']
with ng.metadata(device=hetr_device):
x = ng.placeholder(axes=axes) if axes else ng.placeholder(())
with ng.metadata(device_id='1'):
x_plus_one = x + 1
x_plus_two = x_plus_one * 2
np_x = np.random.randint(100, size=axes.lengths) if axes else random.random()
with closing(ngt.make_transformer_factory('hetr', device=hetr_device)()) as transformer:
computation = transformer.computation([x_plus_one, x_plus_two], x)
res = computation(np_x)
np.testing.assert_allclose(res[0], np_x + 1.0)
np.testing.assert_allclose(res[1], (np_x + 1.0) * 2.0)
@pytest.mark.multi_device
@pytest.mark.parametrize('config', [
{
'axes': None,
},
{
'axes': ng.make_axes([ax_A]),
},
{
'axes': ng.make_axes([ax_A, ax_B]),
},
])
def test_computation_return_list(hetr_device, config):
axes = config['axes']
with ng.metadata(device=hetr_device):
x = ng.placeholder(axes=axes) if axes else ng.placeholder(())
with ng.metadata(device_id='1'):
x_plus_one = x + 1
with ng.metadata(device_id='2'):
x_plus_two = x_plus_one + 1
with ng.metadata(device_id='3'):
x_mul_two = x_plus_two * 2
np_x = np.random.randint(100, size=axes.lengths) if axes else random.random()
with closing(ngt.make_transformer_factory('hetr', device=hetr_device)()) as transformer:
computation = transformer.computation([x_plus_one, x_plus_two, x_mul_two], x)
res = computation(np_x)
np.testing.assert_allclose(res[0], np_x + 1)
np.testing.assert_allclose(res[1], np_x + 2)
np.testing.assert_allclose(res[2], (np_x + 2) * 2)
@pytest.mark.hetr_gpu_only
def test_gpu_send_and_recv(hetr_device):
pytest.xfail("GitHub issue: #2007, Unknown error - investigation is needed")
# put x+1 on cpu numpy
with ng.metadata(device='cpu'):
x = ng.placeholder(())
x_plus_one = x + 1
# put x+2 on gpu numpy
with ng.metadata(device='gpu'):
x_plus_two = x_plus_one + 1
with ExecutorFactory() as ex:
computation = ex.executor(x_plus_two, x)
for i in [10, 20, 30]:
assert computation(i) == i + 2
# put x+1 on gpu numpy
with ng.metadata(device='gpu'):
x = ng.placeholder(())
x_plus_one = x + 1
# put x+2 on cpu numpy
with ng.metadata(device='cpu'):
x_plus_two = x_plus_one + 1
with ExecutorFactory() as ex:
computation = ex.executor(x_plus_two, x)
for i in [10, 20, 30]:
assert computation(i) == i + 2
def test_recvop_axes_using_dot():
x_value = np.array([[1],
[2]])
w_value = np.array([[-1, 1]])
A1 = ng.make_axis(length=1)
A2 = ng.make_axis(length=2)
A3 = ng.make_axis(length=2)
x = ng.placeholder([A2, A1])
w = ng.variable([A1, A3], initial_value=w_value)
with ng.metadata(device_id='1'):
result = ng.dot(x, w)
with ExecutorFactory() as ex:
computation = ex.executor(result, x, w)
val_ng = computation(x_value, w_value)
val_np = np.dot(x_value, w_value)
ng.testing.assert_allclose(val_ng, val_np)
def test_recvop_tensorupdate():
"""
The tensor (RecvOp_#_#) associated with the following conv op has two views:
1) Non-flat view (e.g. RecvOp_#_#_1_1_1_1_4.shape=(1,1,1,1,4))
2) Flat view (e.g. RecvOp_#_#_1_4.shape = (1,4))
This test ensures that inside RecvOp code generation, the generated code
should make sure both views get updated (e.g. by using update_RecvOp_#_# API)
In this test, ng.dot operation tends to use the flat view (i.e. RecvOp_#_#_1_4)
And previously RecvOp with RecvOp_#_#_1_1_1_1_4 = recv_from_send(send_id) failed
to update both two views (i.e. flat and non-flat view of the same buffer/tensor)
"""
class ConvParams(object):
def __init__(self, C=1, N=1, K=1, D=1, H=1, W=1, T=1, R=1, S=1,
pad_d=0, pad_h=0, pad_w=0,
str_d=1, str_h=1, str_w=1):
from ngraph.frontends.common.utils import conv_output_dim
M = conv_output_dim(D, T, pad_d, str_d)
P = conv_output_dim(H, R, pad_h, str_h)
Q = conv_output_dim(W, S, pad_w, str_w)
self.dimO = (K, M, P, Q, N)
self.dimI = (C, D, H, W, N)
self.dimF = (C, T, R, S, K)
self.conv_params = dict(
pad_d=pad_d, pad_h=pad_h, pad_w=pad_w,
str_d=str_d, str_h=str_h, str_w=str_w,
dil_d=1, dil_h=1, dil_w=1
)
self.batch_axis = ng.make_axis(name='N', length=N)
self.ax_i = ng.make_axes([
ng.make_axis(name='C', length=C),
ng.make_axis(name='D', length=D),
ng.make_axis(name='H', length=H),
ng.make_axis(name='W', length=W),
self.batch_axis
])
self.ax_f = ng.make_axes([
ng.make_axis(name='C', length=C),
ng.make_axis(name='D', length=T),
ng.make_axis(name='H', length=R),
ng.make_axis(name='W', length=S),
ng.make_axis(name='K', length=K),
])
self.ax_o = ng.make_axes([
ng.make_axis(name='C', length=K),
ng.make_axis(name='D', length=M),
ng.make_axis(name='H', length=P),
ng.make_axis(name='W', length=Q),
self.batch_axis
])
# Layer 1, using convolutation introduces multi/flatten view of tensors
cf = ConvParams(C=2, N=4, K=1, H=2, W=2, R=2, S=2)
inputs = ng.placeholder(axes=cf.ax_i)
filters = ng.placeholder(axes=cf.ax_f)
# randomly initialize
from ngraph.testing import RandomTensorGenerator
rng = RandomTensorGenerator(0, np.float32)
# put value 1 into inputs/filters for conv
input_value = rng.uniform(1, 1, cf.ax_i)
filter_value = rng.uniform(1, 1, cf.ax_f)
conv = ng.convolution(cf.conv_params, inputs, filters, axes=cf.ax_o)
# Layer 2, using dot to ensure recv_op.axes == send_op.axes
from ngraph.frontends.neon import UniformInit
# put value 1 into weights for dot
init_uni = UniformInit(1, 1)
W_A = ng.make_axis(length=2)
w_axes = ng.make_axes(W_A) + conv.axes.feature_axes()
w = ng.variable(axes=w_axes, initial_value=init_uni)
with ng.metadata(device_id='1'):
dot = ng.dot(w, conv)
with ExecutorFactory() as ex:
dot_comp = ex.executor(dot, filters, inputs)
dot_val = dot_comp(filter_value, input_value)
np.testing.assert_array_equal(dot_val, [[8., 8., 8., 8.],
[8., 8., 8., 8.]])
class ClosingHetrServers():
def __init__(self, ports):
self.tmpfile = tempfile.NamedTemporaryFile(dir=os.path.dirname(os.path.realpath(__file__)),
delete=True)
self.processes = []
for p in ports:
hetr_server = os.path.dirname(os.path.realpath(__file__)) +\
"/../../ngraph/transformers/hetr/hetr_server.py"
command = ["python", hetr_server, "-tf", self.tmpfile.name, "-p", p]
try:
proc = subprocess.Popen(command)
self.processes.append(proc)
except Exception as e:
print(e)
time.sleep(STARTUP_TIME)
def close(self):
for p in self.processes:
p.terminate()
for p in self.processes:
p.kill()
for p in self.processes:
p.wait()
self.tmpfile.close()
def test_rpc_transformer():
pytest.xfail("Needs investigation-STARTUP_TIME is too large, needs to be over 5 seconds.")
from ngraph.transformers.hetr.rpc_client import RPCTransformerClient
rpc_client_list = list()
port_list = ['50111', '50112']
num_procs = len(port_list)
with closing(ClosingHetrServers(port_list)):
for p in range(num_procs):
rpc_client_list.append(RPCTransformerClient('cpu' + str(p),
'localhost:' + port_list[p]))
np.testing.assert_equal(rpc_client_list[p].is_trans_built, False)
np.testing.assert_equal(rpc_client_list[p].transformer_type, 'cpu' + str(p))
np.testing.assert_equal(rpc_client_list[p].server_address, 'localhost:' + port_list[p])
for p in range(num_procs):
rpc_client_list[p].build_transformer()
np.testing.assert_equal(rpc_client_list[p].is_trans_built, True)
for p in range(num_procs):
rpc_client_list[p].close_transformer()
for p in range(num_procs):
rpc_client_list[p].close()
np.testing.assert_equal(rpc_client_list[p].is_trans_built, False)
def test_mpilauncher():
os.environ["HETR_SERVER_PORTS"] = "51111, 51112"
mpilauncher = MPILauncher()
mpilauncher.launch(2, 1)
# Check if process has launched
assert mpilauncher.mpirun_proc.poll() is None
mpilauncher.close()
# Check if process has completed
assert mpilauncher.mpirun_proc is None
@pytest.mark.multi_device
@pytest.mark.parametrize('config', [
{
'dataset': 'cifar10',
'iter_count': 1,
'batch_size': 64,
'device_id': ('0', '1'),
'bprop': True,
'batch_norm': False,
}
])
def test_hetr_benchmark(hetr_device, config):
pytest.skip('Possible issue only on jenkins, disable until figured out.')
"""
Description:
Test to ensure benchmarks are working.
Benchmark used for test is mini_resnet
"""
from examples.benchmarks.mini_resnet import run_resnet_benchmark
c = config
run_resnet_benchmark(dataset=c['dataset'],
num_iterations=c['iter_count'],
n_skip=1,
batch_size=c['batch_size'],
device_id=c['device_id'],
transformer_type='hetr',
device=hetr_device,
bprop=c['bprop'],
batch_norm=c['batch_norm'],
visualize=False)
| 35.775641 | 99 | 0.61781 | 3,220 | 22,324 | 4.072981 | 0.130124 | 0.044224 | 0.025162 | 0.039649 | 0.579642 | 0.55509 | 0.506367 | 0.47236 | 0.438506 | 0.40732 | 0 | 0.020702 | 0.244849 | 22,324 | 623 | 100 | 35.833066 | 0.757267 | 0.091785 | 0 | 0.437895 | 0 | 0 | 0.064125 | 0.003593 | 0 | 0 | 0 | 0.001605 | 0.065263 | 1 | 0.044211 | false | 0 | 0.035789 | 0 | 0.084211 | 0.002105 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca45b500d25fab159b618f1a88b1a991fcd45ebe | 17,159 | py | Python | yardstick/network_services/traffic_profile/ixia_rfc2544.py | upfront710/yardstick | 2c3898f2ca061962cedbfc7435f78b59aa39b097 | [
"Apache-2.0"
] | 28 | 2017-02-07T07:46:42.000Z | 2021-06-30T08:11:06.000Z | yardstick/network_services/traffic_profile/ixia_rfc2544.py | upfront710/yardstick | 2c3898f2ca061962cedbfc7435f78b59aa39b097 | [
"Apache-2.0"
] | 6 | 2018-01-18T08:00:54.000Z | 2019-04-11T04:51:41.000Z | yardstick/network_services/traffic_profile/ixia_rfc2544.py | upfront710/yardstick | 2c3898f2ca061962cedbfc7435f78b59aa39b097 | [
"Apache-2.0"
] | 46 | 2016-12-13T10:05:47.000Z | 2021-02-18T07:33:06.000Z | # Copyright (c) 2016-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import collections
from yardstick.common import utils
from yardstick.network_services.traffic_profile import base as tp_base
from yardstick.network_services.traffic_profile import trex_traffic_profile
LOG = logging.getLogger(__name__)
class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
UPLINK = 'uplink'
DOWNLINK = 'downlink'
DROP_PERCENT_ROUND = 6
STATUS_SUCCESS = "Success"
STATUS_FAIL = "Failure"
def __init__(self, yaml_data):
super(IXIARFC2544Profile, self).__init__(yaml_data)
self.rate = self.config.frame_rate
self.rate_unit = self.config.rate_unit
self.iteration = 0
self.full_profile = {}
def _get_ip_and_mask(self, ip_range):
_ip_range = ip_range.split('-')
if len(_ip_range) == 1:
return _ip_range[0], None
mask = utils.get_mask_from_ip_range(_ip_range[0], _ip_range[1])
return _ip_range[0], mask
def _get_fixed_and_mask(self, port_range):
_port_range = str(port_range).split('-')
if len(_port_range) == 1:
return int(_port_range[0]), 0
return int(_port_range[0]), int(_port_range[1])
def _get_ixia_traffic_profile(self, profile_data, mac=None):
mac = {} if mac is None else mac
result = {}
for traffickey, values in profile_data.items():
if not traffickey.startswith((self.UPLINK, self.DOWNLINK)):
continue
# values should be single-item dict, so just grab the first item
try:
key, value = next(iter(values.items()))
except StopIteration:
result[traffickey] = {}
continue
port_id = value.get('id', 1)
port_index = port_id - 1
result[traffickey] = {
'bidir': False,
'id': port_id,
'rate': self.rate,
'rate_unit': self.rate_unit,
'outer_l2': {},
'outer_l3': {},
'outer_l4': {},
}
frame_rate = value.get('frame_rate')
if frame_rate:
flow_rate, flow_rate_unit = self.config.parse_rate(frame_rate)
result[traffickey]['rate'] = flow_rate
result[traffickey]['rate_unit'] = flow_rate_unit
outer_l2 = value.get('outer_l2')
if outer_l2:
result[traffickey]['outer_l2'].update({
'framesize': outer_l2.get('framesize'),
'framesPerSecond': True,
'QinQ': outer_l2.get('QinQ'),
'srcmac': mac.get('src_mac_{}'.format(port_index)),
'dstmac': mac.get('dst_mac_{}'.format(port_index)),
})
if value.get('outer_l3v4'):
outer_l3 = value['outer_l3v4']
src_key, dst_key = 'srcip4', 'dstip4'
else:
outer_l3 = value.get('outer_l3v6')
src_key, dst_key = 'srcip6', 'dstip6'
if outer_l3:
srcip = srcmask = dstip = dstmask = None
if outer_l3.get(src_key):
srcip, srcmask = self._get_ip_and_mask(outer_l3[src_key])
if outer_l3.get(dst_key):
dstip, dstmask = self._get_ip_and_mask(outer_l3[dst_key])
result[traffickey]['outer_l3'].update({
'count': outer_l3.get('count', 1),
'dscp': outer_l3.get('dscp'),
'ttl': outer_l3.get('ttl'),
'srcseed': outer_l3.get('srcseed', 1),
'dstseed': outer_l3.get('dstseed', 1),
'srcip': srcip,
'dstip': dstip,
'srcmask': srcmask,
'dstmask': dstmask,
'type': key,
'proto': outer_l3.get('proto'),
'priority': outer_l3.get('priority')
})
outer_l4 = value.get('outer_l4')
if outer_l4:
src_port = src_port_mask = dst_port = dst_port_mask = None
if outer_l4.get('srcport'):
src_port, src_port_mask = (
self._get_fixed_and_mask(outer_l4['srcport']))
if outer_l4.get('dstport'):
dst_port, dst_port_mask = (
self._get_fixed_and_mask(outer_l4['dstport']))
result[traffickey]['outer_l4'].update({
'srcport': src_port,
'dstport': dst_port,
'srcportmask': src_port_mask,
'dstportmask': dst_port_mask,
'count': outer_l4.get('count', 1),
'seed': outer_l4.get('seed', 1),
})
return result
def _ixia_traffic_generate(self, traffic, ixia_obj, traffic_gen):
ixia_obj.update_frame(traffic, self.config.duration)
ixia_obj.update_ip_packet(traffic)
ixia_obj.update_l4(traffic)
self._update_traffic_tracking_options(traffic_gen)
ixia_obj.start_traffic()
def _update_traffic_tracking_options(self, traffic_gen):
traffic_gen.update_tracking_options()
def update_traffic_profile(self, traffic_generator):
def port_generator():
for vld_id, intfs in sorted(traffic_generator.networks.items()):
if not vld_id.startswith((self.UPLINK, self.DOWNLINK)):
continue
profile_data = self.params.get(vld_id)
if not profile_data:
continue
self.profile_data = profile_data
self.full_profile.update({vld_id: self.profile_data})
for intf in intfs:
yield traffic_generator.vnfd_helper.port_num(intf)
self.ports = [port for port in port_generator()]
def execute_traffic(self, traffic_generator, ixia_obj=None, mac=None):
mac = {} if mac is None else mac
first_run = self.first_run
if self.first_run:
self.first_run = False
self.pg_id = 0
self.max_rate = self.rate
self.min_rate = 0.0
else:
self.rate = self._get_next_rate()
self.iteration = traffic_generator.rfc_helper.iteration.value
traffic = self._get_ixia_traffic_profile(self.full_profile, mac)
self._ixia_traffic_generate(traffic, ixia_obj, traffic_generator)
return first_run
# pylint: disable=unused-argument
def get_drop_percentage(self, samples, tol_min, tolerance, precision,
resolution, first_run=False, tc_rfc2544_opts=None):
completed = False
drop_percent = 100.0
num_ifaces = len(samples)
duration = self.config.duration
in_packets_sum = sum(
[samples[iface]['InPackets'] for iface in samples])
out_packets_sum = sum(
[samples[iface]['OutPackets'] for iface in samples])
in_bytes_sum = sum(
[samples[iface]['InBytes'] for iface in samples])
out_bytes_sum = sum(
[samples[iface]['OutBytes'] for iface in samples])
rx_throughput = round(float(in_packets_sum) / duration, 3)
tx_throughput = round(float(out_packets_sum) / duration, 3)
# Rx throughput in Bps
rx_throughput_bps = round(float(in_bytes_sum) / duration, 3)
# Tx throughput in Bps
tx_throughput_bps = round(float(out_bytes_sum) / duration, 3)
packet_drop = abs(out_packets_sum - in_packets_sum)
try:
drop_percent = round(
(packet_drop / float(out_packets_sum)) * 100,
self.DROP_PERCENT_ROUND)
except ZeroDivisionError:
LOG.info('No traffic is flowing')
if first_run:
completed = True if drop_percent <= tolerance else False
if (first_run and
self.rate_unit == tp_base.TrafficProfileConfig.RATE_FPS):
self.rate = float(out_packets_sum) / duration / num_ifaces
if drop_percent > tolerance:
self.max_rate = self.rate
elif drop_percent < tol_min:
self.min_rate = self.rate
else:
completed = True
last_rate = self.rate
next_rate = self._get_next_rate()
if abs(next_rate - self.rate) < resolution:
LOG.debug("rate=%s, next_rate=%s, resolution=%s", self.rate,
next_rate, resolution)
# stop test if the difference between the rate transmission
# in two iterations is smaller than the value of the resolution
completed = True
LOG.debug("tolerance=%s, tolerance_precision=%s drop_percent=%s "
"completed=%s", tolerance, precision, drop_percent,
completed)
latency_ns_avg = float(sum(
[samples[iface]['LatencyAvg'] for iface in samples])) / num_ifaces
latency_ns_min = min([samples[iface]['LatencyMin'] for iface in samples])
latency_ns_max = max([samples[iface]['LatencyMax'] for iface in samples])
samples['Status'] = self.STATUS_FAIL
if round(drop_percent, precision) <= tolerance:
samples['Status'] = self.STATUS_SUCCESS
samples['TxThroughput'] = tx_throughput
samples['RxThroughput'] = rx_throughput
samples['TxThroughputBps'] = tx_throughput_bps
samples['RxThroughputBps'] = rx_throughput_bps
samples['DropPercentage'] = drop_percent
samples['LatencyAvg'] = latency_ns_avg
samples['LatencyMin'] = latency_ns_min
samples['LatencyMax'] = latency_ns_max
samples['Rate'] = last_rate
samples['PktSize'] = self._get_framesize()
samples['Iteration'] = self.iteration
return completed, samples
class IXIARFC2544PppoeScenarioProfile(IXIARFC2544Profile):
"""Class handles BNG PPPoE scenario tests traffic profile"""
def __init__(self, yaml_data):
super(IXIARFC2544PppoeScenarioProfile, self).__init__(yaml_data)
self.full_profile = collections.OrderedDict()
def _get_flow_groups_params(self):
flows_data = [key for key in self.params.keys()
if key.split('_')[0] in [self.UPLINK, self.DOWNLINK]]
for i in range(len(flows_data)):
uplink = '_'.join([self.UPLINK, str(i)])
downlink = '_'.join([self.DOWNLINK, str(i)])
if uplink in flows_data:
self.full_profile.update({uplink: self.params[uplink]})
if downlink in flows_data:
self.full_profile.update({downlink: self.params[downlink]})
def update_traffic_profile(self, traffic_generator):
networks = collections.OrderedDict()
# Sort network interfaces pairs
for i in range(len(traffic_generator.networks)):
uplink = '_'.join([self.UPLINK, str(i)])
downlink = '_'.join([self.DOWNLINK, str(i)])
if uplink in traffic_generator.networks:
networks[uplink] = traffic_generator.networks[uplink]
if downlink in traffic_generator.networks:
networks[downlink] = traffic_generator.networks[downlink]
def port_generator():
for intfs in networks.values():
for intf in intfs:
yield traffic_generator.vnfd_helper.port_num(intf)
self._get_flow_groups_params()
self.ports = [port for port in port_generator()]
def _get_prio_flows_drop_percentage(self, stats):
drop_percent = 100
for prio_id in stats:
prio_flow = stats[prio_id]
sum_packet_drop = abs(prio_flow['OutPackets'] - prio_flow['InPackets'])
try:
drop_percent = round(
(sum_packet_drop / float(prio_flow['OutPackets'])) * 100,
self.DROP_PERCENT_ROUND)
except ZeroDivisionError:
LOG.info('No traffic is flowing')
prio_flow['DropPercentage'] = drop_percent
return stats
def _get_summary_pppoe_subs_counters(self, samples):
result = {}
keys = ['SessionsUp',
'SessionsDown',
'SessionsNotStarted',
'SessionsTotal']
for key in keys:
result[key] = \
sum([samples[port][key] for port in samples
if key in samples[port]])
return result
def get_drop_percentage(self, samples, tol_min, tolerance, precision,
resolution, first_run=False, tc_rfc2544_opts=None):
completed = False
sum_drop_percent = 100
num_ifaces = len(samples)
duration = self.config.duration
last_rate = self.rate
priority_stats = samples.pop('priority_stats')
priority_stats = self._get_prio_flows_drop_percentage(priority_stats)
summary_subs_stats = self._get_summary_pppoe_subs_counters(samples)
in_packets_sum = sum(
[samples[iface]['InPackets'] for iface in samples])
out_packets_sum = sum(
[samples[iface]['OutPackets'] for iface in samples])
in_bytes_sum = sum(
[samples[iface]['InBytes'] for iface in samples])
out_bytes_sum = sum(
[samples[iface]['OutBytes'] for iface in samples])
rx_throughput = round(float(in_packets_sum) / duration, 3)
tx_throughput = round(float(out_packets_sum) / duration, 3)
# Rx throughput in Bps
rx_throughput_bps = round(float(in_bytes_sum) / duration, 3)
# Tx throughput in Bps
tx_throughput_bps = round(float(out_bytes_sum) / duration, 3)
sum_packet_drop = abs(out_packets_sum - in_packets_sum)
try:
sum_drop_percent = round(
(sum_packet_drop / float(out_packets_sum)) * 100,
self.DROP_PERCENT_ROUND)
except ZeroDivisionError:
LOG.info('No traffic is flowing')
latency_ns_avg = float(sum(
[samples[iface]['LatencyAvg'] for iface in samples])) / num_ifaces
latency_ns_min = min([samples[iface]['LatencyMin'] for iface in samples])
latency_ns_max = max([samples[iface]['LatencyMax'] for iface in samples])
samples['TxThroughput'] = tx_throughput
samples['RxThroughput'] = rx_throughput
samples['TxThroughputBps'] = tx_throughput_bps
samples['RxThroughputBps'] = rx_throughput_bps
samples['DropPercentage'] = sum_drop_percent
samples['LatencyAvg'] = latency_ns_avg
samples['LatencyMin'] = latency_ns_min
samples['LatencyMax'] = latency_ns_max
samples['Priority'] = priority_stats
samples['Rate'] = last_rate
samples['PktSize'] = self._get_framesize()
samples['Iteration'] = self.iteration
samples.update(summary_subs_stats)
if tc_rfc2544_opts:
priority = tc_rfc2544_opts.get('priority')
if priority:
drop_percent = samples['Priority'][priority]['DropPercentage']
else:
drop_percent = sum_drop_percent
else:
drop_percent = sum_drop_percent
if first_run:
completed = True if drop_percent <= tolerance else False
if (first_run and
self.rate_unit == tp_base.TrafficProfileConfig.RATE_FPS):
self.rate = float(out_packets_sum) / duration / num_ifaces
if drop_percent > tolerance:
self.max_rate = self.rate
elif drop_percent < tol_min:
self.min_rate = self.rate
else:
completed = True
next_rate = self._get_next_rate()
if abs(next_rate - self.rate) < resolution:
LOG.debug("rate=%s, next_rate=%s, resolution=%s", self.rate,
next_rate, resolution)
# stop test if the difference between the rate transmission
# in two iterations is smaller than the value of the resolution
completed = True
LOG.debug("tolerance=%s, tolerance_precision=%s drop_percent=%s "
"completed=%s", tolerance, precision, drop_percent,
completed)
samples['Status'] = self.STATUS_FAIL
if round(drop_percent, precision) <= tolerance:
samples['Status'] = self.STATUS_SUCCESS
return completed, samples
| 40.279343 | 83 | 0.594615 | 1,962 | 17,159 | 4.926096 | 0.153925 | 0.034144 | 0.014485 | 0.024625 | 0.558717 | 0.503983 | 0.484739 | 0.444697 | 0.435385 | 0.415106 | 0 | 0.012064 | 0.309225 | 17,159 | 425 | 84 | 40.374118 | 0.803341 | 0.0623 | 0 | 0.476331 | 0 | 0 | 0.078451 | 0.00274 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050296 | false | 0 | 0.014793 | 0 | 0.115385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca478284b5ab15449bbe9ae4cf72cb8e3dd5e8d8 | 1,201 | py | Python | notes/2020/2020-02-23-python-aiohttp-crud-demo-app/app.py | freininghaus/notes | 1db927afaa70eb6e3af4fd4fcdf868275da7131c | [
"MIT"
] | null | null | null | notes/2020/2020-02-23-python-aiohttp-crud-demo-app/app.py | freininghaus/notes | 1db927afaa70eb6e3af4fd4fcdf868275da7131c | [
"MIT"
] | null | null | null | notes/2020/2020-02-23-python-aiohttp-crud-demo-app/app.py | freininghaus/notes | 1db927afaa70eb6e3af4fd4fcdf868275da7131c | [
"MIT"
] | null | null | null | import json
import os
from aiohttp import web
DATA = {}
async def list_keys(request):
return web.json_response(sorted(DATA))
async def add_document(request):
key = request.match_info["key"]
try:
document = await request.json()
except json.decoder.JSONDecodeError as error:
raise web.HTTPBadRequest(
text=f"Failed to parse request content as JSON. Error at position {error.pos}: {error.msg}")
DATA[key] = document
return web.Response(text="ok")
async def delete_document(request):
key = request.match_info["key"]
try:
del DATA[key]
except KeyError:
raise web.HTTPNotFound()
return web.Response(text="ok")
async def get_document(request):
key = request.match_info["key"]
try:
return web.json_response(DATA[key])
except KeyError:
raise web.HTTPNotFound()
print("CRUD demo app (using a list of routes)")
app = web.Application()
app.add_routes([
web.get("/", list_keys),
web.put("/{key}", add_document),
web.post("/{key}", add_document),
web.delete("/{key}", delete_document),
web.get("/{key}", get_document)
])
web.run_app(app, port=os.getenv("PORT", 8080))
| 22.240741 | 104 | 0.653622 | 162 | 1,201 | 4.746914 | 0.37037 | 0.041612 | 0.070221 | 0.097529 | 0.343303 | 0.343303 | 0.343303 | 0.156047 | 0 | 0 | 0 | 0.004211 | 0.208993 | 1,201 | 53 | 105 | 22.660377 | 0.805263 | 0 | 0 | 0.315789 | 0 | 0.026316 | 0.13572 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.078947 | 0 | 0.184211 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca48cfdcb7622dc5e3da4e6a96f90e5e3237d589 | 6,356 | py | Python | flow/flow-master/glow.py | neumason/zi2zi | e8ce521ebd260eae569cd5b3e7923f9450b0cca2 | [
"Apache-2.0"
] | null | null | null | flow/flow-master/glow.py | neumason/zi2zi | e8ce521ebd260eae569cd5b3e7923f9450b0cca2 | [
"Apache-2.0"
] | 1 | 2021-12-22T03:35:18.000Z | 2021-12-22T03:35:18.000Z | flow/flow-master/glow.py | neumason/zi2zi | e8ce521ebd260eae569cd5b3e7923f9450b0cca2 | [
"Apache-2.0"
] | null | null | null | #! -*- coding: utf-8 -*-
# Keras implement of Glow
# Glow模型的Keras版
# https://blog.openai.com/glow/
from keras.layers import *
from keras.models import Model
from keras.datasets import cifar10
from keras.callbacks import Callback
from keras.optimizers import Adam
from flow_layers import *
import imageio
import numpy as np
from scipy import misc
import glob
import os
import tensorflow as tf
from keras.utils.vis_utils import plot_model
config = tf.ConfigProto()
config.gpu_options.allow_growth=True #不全部占满显存, 按需分配
os.environ["CUDA_VISIBLE_DEVICES"] = "0,2"
if not os.path.exists('samples'):
os.mkdir('samples')
imgs = glob.glob('../experiment/testimg/*.jpg')
img_size = 64 # for a fast try, please use img_size=32
depth = 8 # orginal paper use depth=32
level = 3 # orginal paper use level=6 for 256*256 CelebA HQ
def imread(f):
x = misc.imread(f, mode='RGB')
x = misc.imresize(x, (img_size, img_size))
x = x.astype(np.float32)
return x / 255 * 2 - 1
def data_generator(batch_size=64):
X = []
while True:
np.random.shuffle(imgs)
for f in imgs:
X.append(imread(f))
if len(X) == batch_size:
X = np.array(X)
yield X,X.reshape((X.shape[0], -1))
X = []
def build_basic_model(in_channel):
"""基础模型,即耦合层中的模型(basic model for Coupling)
"""
_in = Input(shape=(None, None, in_channel))
_ = _in
hidden_dim = 512
_ = Conv2D(hidden_dim,
(3, 3),
padding='same')(_)
# _ = Actnorm(add_logdet_to_loss=False)(_)
_ = Activation('relu')(_)
_ = Conv2D(hidden_dim,
(1, 1),
padding='same')(_)
# _ = Actnorm(add_logdet_to_loss=False)(_)
_ = Activation('relu')(_)
_ = Conv2D(in_channel,
(3, 3),
kernel_initializer='zeros',
padding='same')(_)
return Model(_in, _)
squeeze = Squeeze()
inner_layers = []
outer_layers = []
for i in range(5):
inner_layers.append([])
for i in range(3):
outer_layers.append([])
x_in = Input(shape=(img_size, img_size, 3))
x = x_in
x_outs = []
# 给输入加入噪声(add noise into inputs for stability.)
x = Lambda(lambda s: K.in_train_phase(s + 1./256 * K.random_uniform(K.shape(s)), s))(x)
for i in range(level):
x = squeeze(x)
for j in range(depth):
actnorm = Actnorm()
permute = Permute(mode='random')
split = Split()
couple = CoupleWrapper(build_basic_model(3*2**(i+1)))
concat = Concat()
inner_layers[0].append(actnorm)
inner_layers[1].append(permute)
inner_layers[2].append(split)
inner_layers[3].append(couple)
inner_layers[4].append(concat)
x = actnorm(x)
x = permute(x)
x1, x2 = split(x)
x1, x2 = couple([x1, x2])
x = concat([x1, x2])
if i < level-1:
split = Split()
condactnorm = CondActnorm()
reshape = Reshape()
outer_layers[0].append(split)
outer_layers[1].append(condactnorm)
outer_layers[2].append(reshape)
x1, x2 = split(x)
x_out = condactnorm([x2, x1])
x_out = reshape(x_out)
x_outs.append(x_out)
x = x1
else:
for _ in outer_layers:
_.append(None)
final_actnorm = Actnorm()
final_concat = Concat()
final_reshape = Reshape()
x = final_actnorm(x)
x = final_reshape(x)
x = final_concat(x_outs+[x])
encoder = Model(x_in, x)
plot_model(encoder, to_file="encoder.png", show_shapes=True)
for l in encoder.layers:
if hasattr(l, 'logdet'):
encoder.add_loss(l.logdet)
encoder.summary()
encoder.compile(loss=lambda y_true,y_pred: 0.5 * K.sum(y_pred**2, 1) + 0.5 * np.log(2*np.pi) * K.int_shape(y_pred)[1],
optimizer=Adam(1e-4))
# 搭建逆模型(生成模型),将所有操作倒过来执行
x_in = Input(shape=K.int_shape(encoder.outputs[0])[1:])
x = x_in
x = final_concat.inverse()(x)
outputs = x[:-1]
x = x[-1]
x = final_reshape.inverse()(x)
x = final_actnorm.inverse()(x)
x1 = x
for i,(split,condactnorm,reshape) in enumerate(zip(*outer_layers)[::-1]):
if i > 0:
x1 = x
x_out = outputs[-i]
x_out = reshape.inverse()(x_out)
x2 = condactnorm.inverse()([x_out, x1])
x = split.inverse()([x1, x2])
for j,(actnorm,permute,split,couple,concat) in enumerate(zip(*inner_layers)[::-1][i*depth: (i+1)*depth]):
x1, x2 = concat.inverse()(x)
x1, x2 = couple.inverse()([x1, x2])
x = split.inverse()([x1, x2])
x = permute.inverse()(x)
x = actnorm.inverse()(x)
x = squeeze.inverse()(x)
decoder = Model(x_in, x)
plot_model(decoder, to_file="decoder.png", show_shapes=True)
def sample(path, std=1):
"""采样查看生成效果(generate samples per epoch)
"""
n = 9
figure = np.zeros((img_size * n, img_size * n, 3))
for i in range(n):
for j in range(n):
decoder_input_shape = (1,) + K.int_shape(decoder.inputs[0])[1:]
z_sample = np.array(np.random.randn(*decoder_input_shape)) * std
x_decoded = decoder.predict(z_sample)
digit = x_decoded[0].reshape(img_size, img_size, 3)
figure[i * img_size: (i + 1) * img_size,
j * img_size: (j + 1) * img_size] = digit
figure = (figure + 1) / 2 * 255
figure = np.clip(figure, 0, 255).astype('uint8')
imageio.imwrite(path, figure)
class Evaluate(Callback):
def __init__(self):
self.lowest = 1e10
def on_epoch_end(self, epoch, logs=None):
path = 'samples/test_%s.png' % epoch
sample(path, 0.9)
if logs['loss'] <= self.lowest:
self.lowest = logs['loss']
encoder.save_weights('./best_encoder.weights')
elif logs['loss'] > 0 and epoch > 10:
"""在后面,loss一般为负数,一旦重新变成正数,
就意味着模型已经崩溃,需要降低学习率。
In general, loss is less than zero.
If loss is greater than zero again, it means model has collapsed.
We need to reload the best model and lower learning rate.
"""
encoder.load_weights('./best_encoder.weights')
K.set_value(encoder.optimizer.lr, 1e-5)
evaluator = Evaluate()
encoder.fit_generator(data_generator(),
steps_per_epoch=1000,
epochs=1000,
callbacks=[evaluator])
| 28.630631 | 118 | 0.595028 | 895 | 6,356 | 4.064804 | 0.281564 | 0.026938 | 0.006597 | 0.012095 | 0.059373 | 0.041781 | 0.031886 | 0.031886 | 0.031886 | 0.031886 | 0 | 0.030956 | 0.263059 | 6,356 | 221 | 119 | 28.760181 | 0.74573 | 0.071901 | 0 | 0.125749 | 0 | 0 | 0.03674 | 0.012663 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035928 | false | 0 | 0.077844 | 0 | 0.131737 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca4a501860d545885c7a68c5914cd3f72741cdda | 488 | py | Python | Py exercises/Palindrome.py | arvindkarir/python-pandas-code | fb3b68f07f0438cd0ef6d7ad669ce78650d884a8 | [
"MIT"
] | null | null | null | Py exercises/Palindrome.py | arvindkarir/python-pandas-code | fb3b68f07f0438cd0ef6d7ad669ce78650d884a8 | [
"MIT"
] | null | null | null | Py exercises/Palindrome.py | arvindkarir/python-pandas-code | fb3b68f07f0438cd0ef6d7ad669ce78650d884a8 | [
"MIT"
] | null | null | null | #Palindrome check for string s
s = ['d','o','g','G','o','d'] #need to define a string
s = [element.lower() for element in s] #convert each element to lowercase
def isPal(s):
print(' isPal called with', s ) #printing the string
if len(s) <= 1: #zero or one letter case
print('Base case is always true')
return True
else:
ans = s[0] == s[-1] and isPal(s[1:-1]) #isPal called inside, recursive function
print('Return', ans, 'for', s)
return ans
| 32.533333 | 84 | 0.608607 | 80 | 488 | 3.7125 | 0.5625 | 0.020202 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013477 | 0.239754 | 488 | 14 | 85 | 34.857143 | 0.787062 | 0.340164 | 0 | 0 | 0 | 0 | 0.188742 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0 | 0 | 0.272727 | 0.272727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca4bf45e859d235d8665ccec531d457472e1d9c8 | 4,653 | py | Python | 05ctc/my_dataset.py | sadahry/python_asr | a9e69867cd4d2e5f21ffa8266e288dd1a0720ea0 | [
"MIT"
] | null | null | null | 05ctc/my_dataset.py | sadahry/python_asr | a9e69867cd4d2e5f21ffa8266e288dd1a0720ea0 | [
"MIT"
] | null | null | null | 05ctc/my_dataset.py | sadahry/python_asr | a9e69867cd4d2e5f21ffa8266e288dd1a0720ea0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
#
# Pytorchで用いるDatasetの定義
#
from numpy.lib.arraypad import pad
import tensorflow as tf
# 数値演算用モジュール(numpy)をインポート
import numpy as np
# sysモジュールをインポート
import sys
import math
def build_dataset(feat_scp,
label_scp,
feat_mean,
feat_std,
batch_size,
num_tokens,
pad_index=0,
splice=0,
):
''' ミニバッチデータを作成するクラス
torch.utils.data.Datasetクラスを継承し,
以下の関数を定義する
__len__: 総サンプル数を出力する関数
__getitem__: 1サンプルのデータを出力する関数
feat_scp: 特徴量リストファイル
label_scp: ラベルファイル
feat_mean: 特徴量の平均値ベクトル
feat_std: 特徴量の次元毎の標準偏差を並べたベクトル
pad_index: バッチ化の際にフレーム数を合わせる
ためにpaddingする整数値
splice: 前後(splice)フレームを特徴量を結合する
splice=1とすると,前後1フレーム分結合
するので次元数は3倍になる.
splice=0の場合は何もしない
'''
# 発話の数
num_utts = 0
# 各発話のID
id_list = []
# 各発話の特徴量ファイルへのパスを記したリスト
feat_list = []
# 各発話の特徴量フレーム数を記したリスト
feat_len_list = []
# 特徴量の平均値ベクトル
feat_mean = feat_mean
# 特徴量の標準偏差ベクトル
feat_std = feat_std
# 標準偏差のフロアリング
# (0で割ることが発生しないようにするため)
feat_std[feat_std<1E-10] = 1E-10
# 特徴量の次元数
feat_dim = \
np.size(feat_mean)
# 各発話のラベル
label_list = []
# 各発話のラベルの長さを記したリスト
label_len_list = []
# フレーム数の最大値
max_feat_len = 0
# ラベル長の最大値
max_label_len = 0
# 1バッチに含める発話数
batch_size = batch_size
# トークン数(blankを含む)
num_tokens = num_tokens
# フレーム埋めに用いる整数値
pad_index = pad_index
# splice:前後nフレームの特徴量を結合
splice = splice
# 特徴量リスト,ラベルを1行ずつ
# 読み込みながら情報を取得する
with open(feat_scp, mode='r') as file_f, \
open(label_scp, mode='r') as file_l:
for (line_feats, line_label) in zip(file_f, file_l):
# 各行をスペースで区切り,
# リスト型の変数にする
parts_feats = line_feats.split()
parts_label = line_label.split()
# 発話ID(partsの0番目の要素)が特徴量と
# ラベルで一致していなければエラー
if parts_feats[0] != parts_label[0]:
sys.stderr.write('IDs of feat and '\
'label do not match.\n')
exit(1)
# 発話IDをリストに追加
id_list.append(parts_feats[0])
# 特徴量ファイルのパスをリストに追加
feat_list.append(parts_feats[1])
# フレーム数をリストに追加
feat_len = np.int32(parts_feats[2])
feat_len_list.append(feat_len)
# ラベル(番号で記載)をint型の
# numpy arrayに変換
label = np.int32(parts_label[1:])
# ラベルリストに追加
label_list.append(label)
# ラベルの長さを追加
label_len_list.append(np.int32(len(label)))
# 発話数をカウント
num_utts += 1
# フレーム数の最大値を得る
max_feat_len = \
np.max(feat_len_list)
# ラベル長の最大値を得る
max_label_len = \
np.max(label_len_list)
# ラベルデータの長さを最大フレーム長に
# 合わせるため,pad_indexの値で埋める
for n in range(num_utts):
# 埋めるフレームの数
# = 最大フレーム数 - 自分のフレーム数
pad_len = max_label_len \
- label_len_list[n]
label_list[n] = \
np.pad(label_list[n],
[0, pad_len],
mode='constant',
constant_values=pad_index)
dataset = tf.data.Dataset.from_tensor_slices(
(feat_list, label_list, feat_len_list, label_len_list))
@tf.function
def __getitem__(feat_path, label, feat_len, label_len):
''' サンプルデータを返す関数
本実装では発話単位でバッチを作成する.
'''
feat = getfeat(feat_path, feat_len)
# 特徴量,ラベルとそれぞれの長さのバッチを返す
# paddingや変換によって元の長さが消失してしまうためinputに含める
return {
"feat": feat,
"label": label,
"feat_len": feat_len,
"label_len": label_len,
}
def getfeat(feat_path, feat_len):
''' 特徴量の取得処理を分離
'''
# 特徴量データを特徴量ファイルから読み込む
feat = tf.io.read_file(feat_path)
feat = tf.io.decode_raw(feat, tf.float32)
# フレーム数 x 次元数の配列に変形
feat = tf.reshape(feat, (-1, feat_dim))
# 平均と標準偏差を使って正規化(標準化)を行う
feat = (feat - feat_mean) / feat_std
# 特徴量データのフレーム数を最大フレーム数に
# 合わせるため,pad_indexで埋める
pad_len = max_feat_len - feat_len
feat = tf.concat([feat, tf.fill((pad_len, feat_dim), pad_index)], 0)
return feat
dataset = (
dataset.map(
__getitem__,num_parallel_calls=tf.data.AUTOTUNE
)
.batch(batch_size)
.prefetch(buffer_size=tf.data.AUTOTUNE)
)
return dataset
| 25.565934 | 76 | 0.563507 | 486 | 4,653 | 5.106996 | 0.358025 | 0.042305 | 0.024174 | 0.012087 | 0.029009 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013536 | 0.349022 | 4,653 | 181 | 77 | 25.707182 | 0.805877 | 0.271223 | 0 | 0 | 0 | 0 | 0.022503 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.057471 | 0 | 0.126437 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca4e977a8bbc9ad8ddd91fa8ba9e6bd2a4340a7e | 1,410 | py | Python | aztk/internal/configuration_base.py | Geims83/aztk | 8f8e7b268bdbf82c3ae4ecdcd907077bd6fe69b6 | [
"MIT"
] | 161 | 2017-10-04T08:58:27.000Z | 2022-01-03T13:01:04.000Z | aztk/internal/configuration_base.py | Geims83/aztk | 8f8e7b268bdbf82c3ae4ecdcd907077bd6fe69b6 | [
"MIT"
] | 400 | 2017-09-29T21:52:08.000Z | 2021-01-08T02:48:56.000Z | aztk/internal/configuration_base.py | isabella232/aztk | 6e04372d19661ead6744387edab7beda16e3d928 | [
"MIT"
] | 74 | 2017-10-13T04:41:26.000Z | 2021-12-20T15:56:42.000Z | import yaml
from aztk.error import AztkError, InvalidModelError
class ConfigurationBase:
"""
Base class for any configuration.
Include methods to help with validation
"""
@classmethod
def from_dict(cls, args: dict):
"""
Create a new model from a dict values
The dict is cleaned from null values and passed expanded to the constructor
"""
try:
return cls._from_dict(args)
except (ValueError, TypeError) as e:
pretty_args = yaml.dump(args, default_flow_style=False)
raise AztkError("{0} {1}\n{2}".format(cls.__name__, str(e), pretty_args))
@classmethod
def _from_dict(cls, args: dict):
clean = dict((k, v) for k, v in args.items() if v)
return cls(**clean)
def validate(self):
raise NotImplementedError("Validate not implemented")
def valid(self):
try:
self.validate()
return True
except AztkError:
return False
def _validate_required(self, attrs):
for attr in attrs:
if not getattr(self, attr):
raise InvalidModelError("{0} missing {1}.".format(self.__class__.__name__, attr))
def _merge_attributes(self, other, attrs):
for attr in attrs:
val = getattr(other, attr)
if val is not None:
setattr(self, attr, val)
| 29.375 | 97 | 0.598582 | 171 | 1,410 | 4.789474 | 0.479532 | 0.029304 | 0.043956 | 0.053724 | 0.126984 | 0.080586 | 0.080586 | 0 | 0 | 0 | 0 | 0.005139 | 0.309929 | 1,410 | 47 | 98 | 30 | 0.836588 | 0.132624 | 0 | 0.193548 | 0 | 0 | 0.044369 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.193548 | false | 0 | 0.064516 | 0 | 0.419355 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca4fb8eba7e86423c3e317fa09aefd90269c7fd8 | 3,836 | py | Python | scripts/RSS_Reader/rss_reader.py | phoexer/Kelly | 2cccb8bc5197120754714b08226cd4ec1f63fb73 | [
"MIT"
] | null | null | null | scripts/RSS_Reader/rss_reader.py | phoexer/Kelly | 2cccb8bc5197120754714b08226cd4ec1f63fb73 | [
"MIT"
] | null | null | null | scripts/RSS_Reader/rss_reader.py | phoexer/Kelly | 2cccb8bc5197120754714b08226cd4ec1f63fb73 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
#this is a comment .!/usr/bin/env python3
import feedparser
import pandas as pd
import time
import numpy as np
import sys
import logging
import csv
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
#from subprocess import check_output
#import sys
with open('feed_links.csv', mode='r') as infile:
reader = csv.reader(infile)
urls = {rows[0]:rows[1] for rows in reader}
#urls = {}
#urls["Technology"] = 'https://www.google.com/alerts/feeds/04633522030720328702/17953401794911713837'
#urls["Xbox"] = 'https://www.google.com/alerts/feeds/04633522030720328702/9761777365347078087'
#urls["Video Games OR gaming"] = 'https://www.google.com/alerts/feeds/04633522030720328702/9669040427288917831'
#urls["Playstation"] = 'https://www.google.com/alerts/feeds/04633522030720328702/15206837917531560928'
#urls["e sports"] = "https://www.google.com/alerts/feeds/04633522030720328702/8442039481667620914"
#urls["CNN - Technology"] = 'http://rss.cnn.com/rss/edition_technology.rss'
#urls["CNN - Science and Space"] = 'http://rss.cnn.com/rss/edition_space.rss'
#urls["CNN - Africa"] = 'http://rss.cnn.com/rss/edition_africa.rss'
#urls["CNN - World"] = 'http://rss.cnn.com/rss/edition_world.rss'
#urls["Reuters UK: Technology News"] = 'http://feeds.reuters.com/reuters/technologyNews'
#urls["Reuters UK: Top News"] = ''
#feed_name = sys.argv[1]
#url = sys.argv[2]
db = 'data/feeds.csv'
#checks every hour
sleep_time = 60 * 60 # 1/60 * 3600 * 1000
#
# function to get the current time
#
def current_time_millis():
return int(round(time.time() * 1000))
def read_file(filepath):
"""Reads a book and returns string"""
s = []
csv.writer()
with open (filepath, "r") as f:
reader = csv.reader(f)
for row in reader:
if(len(row) > 0):
s.append(row)
return s
def load_lookup():
"""
Reads the data/feeds.csv file and loads all values in it into Pandas DataFrame lookup.
If the file does not exist it returns an empty DataFrame.
"""
lookup = None
try:
lookup = pd.read_csv(db, names = ["stamp", "title", "url", "summary","topic"], encoding='utf-8')
except:
logging.error("Failed to read file: ./data/feeds.csv")
if lookup is None:
lookup = pd.DataFrame(columns = ["stamp", "title", "url", "summary","topic"])
return lookup
def post_is_in_db(title):
"""Checks if string value `title` exists in the 'titles' column of lookup"""
try:
return title in lookup.title.values
except:
logging.error("Exception lookup")
#logging.debug("URL:\t" + url)
#lookup = load_lookup()
while True:
lookup = load_lookup()
logging.debug("Lookup:\t" + str(len(lookup)))
posts_to_save = []
current_timestamp = current_time_millis()
for url in urls:
logging.debug("Topic:\t" + url)
#
# get the feed data from the url
#
feed = feedparser.parse(urls[url])
for post in feed.entries:
# if post is already in the database, skip it
# TODO check the time
row = [None] * 5
row[0] = str(current_timestamp)
row[1] = post.title
row[2] = post.link
row[3] = post.summary
row[4] = url
if not post_is_in_db(post.title):
posts_to_save.append(row)
logging.debug("New Posts to save:\t" + str(len(posts_to_save)))
if(len(posts_to_save) > 0):
df = pd.DataFrame(posts_to_save, columns=["stamp", "title", "url", "summary","topic"])
lookup = lookup.append(df)
lookup.to_csv(db, header=False, sep=',',encoding='utf-8')
logging.debug("Sleeping")
lookup = None
posts_to_save = None
time.sleep(sleep_time) | 30.444444 | 112 | 0.637643 | 525 | 3,836 | 4.584762 | 0.329524 | 0.020357 | 0.03199 | 0.035314 | 0.174907 | 0.16452 | 0.099709 | 0 | 0 | 0 | 0 | 0.077102 | 0.215589 | 3,836 | 126 | 113 | 30.444444 | 0.722832 | 0.395464 | 0 | 0.09375 | 0 | 0 | 0.113182 | 0 | 0 | 0 | 0 | 0.007937 | 0 | 1 | 0.0625 | false | 0 | 0.109375 | 0.015625 | 0.234375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca52ac0d4f6eff14369de57d20140633d902fe4f | 2,269 | py | Python | models/Update.py | C3atUofU/Hierarchical-SGD | ecc0f25065f78e70ed8deff7dfc9809331e19f21 | [
"MIT"
] | null | null | null | models/Update.py | C3atUofU/Hierarchical-SGD | ecc0f25065f78e70ed8deff7dfc9809331e19f21 | [
"MIT"
] | null | null | null | models/Update.py | C3atUofU/Hierarchical-SGD | ecc0f25065f78e70ed8deff7dfc9809331e19f21 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import torch
from torch import nn, autograd
from torch.utils.data import DataLoader, Dataset
import numpy as np
import random
from sklearn import metrics
class DatasetSplit(Dataset):
def __init__(self, dataset, idxs):
self.dataset = dataset
self.idxs = list(idxs)
def __len__(self):
return len(self.idxs)
def __getitem__(self, item):
image, label = self.dataset[self.idxs[item]]
return image, label
class LocalUpdate(object):
def __init__(self, args, dataset=None, idxs=None, iters=None, nums=None):
self.args = args
self.loss_func = nn.CrossEntropyLoss()
self.ldr_train = DataLoader(DatasetSplit(dataset, idxs), batch_size=self.args.local_bs, shuffle=True, num_workers=args.num_workers)
self.iters = iters
self.nums = nums
def train(self, net):
net.train()
#compute number of epochs to run
num_batch = len(self.ldr_train)
eps = int(self.iters/num_batch)
rem_iters = self.iters % num_batch
# train and update
optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr, momentum=self.args.momentum)
count = 0 #counting the number of remaining local iterations
for ep in range(0,eps):
for batch_idx, (images, labels) in enumerate(self.ldr_train):
images, labels = images.to(self.args.device), labels.to(self.args.device)
net.zero_grad()
log_probs = net(images)
loss = self.loss_func(log_probs, labels)
loss.backward()
optimizer.step()
if rem_iters != 0:
for batch_idx, (images, labels) in enumerate(self.ldr_train):
images, labels = images.to(self.args.device), labels.to(self.args.device)
net.zero_grad()
log_probs = net(images)
loss = self.loss_func(log_probs, labels)
loss.backward()
optimizer.step()
count = count + 1
if count == rem_iters:
break
return net.state_dict()
| 33.865672 | 139 | 0.585721 | 278 | 2,269 | 4.629496 | 0.363309 | 0.055944 | 0.037296 | 0.049728 | 0.28749 | 0.28749 | 0.28749 | 0.28749 | 0.28749 | 0.28749 | 0 | 0.004502 | 0.314676 | 2,269 | 66 | 140 | 34.378788 | 0.823151 | 0.070075 | 0 | 0.285714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102041 | false | 0 | 0.122449 | 0.020408 | 0.326531 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca5b47ff066ef55e1d1779346f934318a0c7cf47 | 1,996 | py | Python | 2 - Code/210924 - DQN Extensions #1/01 - DQN Basic.py | utilForever/2021-HYU-HAI-RLBootCamp | 6ec1c405a32b03903c6d73d7b882ef07610eafe4 | [
"MIT"
] | 31 | 2021-04-08T15:21:56.000Z | 2022-03-15T08:18:33.000Z | 2 - Code/210929 - DQN Extensions #2/01 - DQN Basic.py | utilForever/2021-HYU-HAI-RLBootCamp | 6ec1c405a32b03903c6d73d7b882ef07610eafe4 | [
"MIT"
] | null | null | null | 2 - Code/210929 - DQN Extensions #2/01 - DQN Basic.py | utilForever/2021-HYU-HAI-RLBootCamp | 6ec1c405a32b03903c6d73d7b882ef07610eafe4 | [
"MIT"
] | null | null | null | import argparse
import gym
import ptan
import random
import torch
import torch.optim as optim
from ignite.engine import Engine
from lib import dqn_model, epsilon_tracker, hyper_params, utils
if __name__ == "__main__":
random.seed(hyper_params.SEED)
torch.manual_seed(hyper_params.SEED)
params = hyper_params.HYPER_PARAMS['pong']
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", default=False, action="store_true", help="Enable CUDA")
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
env = gym.make(params.env_name)
env = ptan.common.wrappers.wrap_dqn(env)
env.seed(hyper_params.SEED)
net = dqn_model.DQN(env.observation_space.shape, env.action_space.n).to(device)
target_net = ptan.agent.TargetNet(net)
selector = ptan.actions.EpsilonGreedyActionSelector(epsilon=params.epsilon_start)
epsilon_tracker = epsilon_tracker.EpsilonTracker(selector, params)
agent = ptan.agent.DQNAgent(net, selector, device=device)
exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, gamma=params.gamma)
buffer = ptan.experience.ExperienceReplayBuffer(exp_source, buffer_size=params.replay_size)
optimizer = optim.Adam(net.parameters(), lr=params.learning_rate)
def process_batch(engine_for_batch, batch):
optimizer.zero_grad()
loss_v = utils.calc_loss_dqn(batch, net, target_net.target_model, gamma=params.gamma, device=device)
loss_v.backward()
optimizer.step()
epsilon_tracker.frame(engine_for_batch.state.iteration)
if engine_for_batch.state.iteration % params.target_net_sync == 0:
target_net.sync()
return {
"loss": loss_v.item(),
"epsilon": selector.epsilon,
}
engine = Engine(process_batch)
utils.setup_ignite(engine, params, exp_source, "01_DQN_Baseline")
engine.run(utils.batch_generator(buffer, params.replay_initial, params.batch_size))
| 32.721311 | 108 | 0.725952 | 257 | 1,996 | 5.400778 | 0.392996 | 0.04755 | 0.032421 | 0.041066 | 0.040346 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001806 | 0.167836 | 1,996 | 60 | 109 | 33.266667 | 0.833835 | 0 | 0 | 0 | 0 | 0 | 0.036072 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02381 | false | 0 | 0.190476 | 0 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca60b53e49cf1b6bb9d51a2399b1a6d7c2de70cd | 1,487 | py | Python | exercicio-05.py | gabrilu/Atividade | 3c71113099e827913e4a560ecfd029706ba90a00 | [
"Apache-2.0"
] | null | null | null | exercicio-05.py | gabrilu/Atividade | 3c71113099e827913e4a560ecfd029706ba90a00 | [
"Apache-2.0"
] | null | null | null | exercicio-05.py | gabrilu/Atividade | 3c71113099e827913e4a560ecfd029706ba90a00 | [
"Apache-2.0"
] | null | null | null | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# Os dados utilizados referem-se à granulometria (tamanho de partícula) de um testemunho marinho;
# Valores negativos em y indicam a profundidade da coluna estratigráfica em metros, sendo y = 0 o topo do testemunho (início do assoalho oceânico);
# Valores em x indicam o tamanho de partícula na escala phi, na qual maiores valores de phi evidenciam partículas menores;
# Valores em z indicam a porcentagem (ou volume) de determinada fração (y) em função da profundidade (x).
# Valor em branco no plot refere-se a uma amostra de porcentagem muito acima de 14.5% (outlier)
x = pd.read_csv("C:\\Users\\xavie\\Desktop\\fracao.csv", header = 0) # Carregando csv contendo os valores das frações granulométricas (phi)
y = pd.read_csv("C:\\Users\\xavie\\Desktop\\prof.csv", header = 0) # Carregando csv contendo os valores de profundidade do testemunho (m)
z = pd.read_csv("C:\\Users\\xavie\\Desktop\\granulometria.csv", header = 0) # Carregando os valores de porcentagem por peso de peneiramento (Wt. %)
# Obs: Modificar o diretório dos arquivos
X, Y = np.meshgrid(x, y*(-1)) # Agrupando x,y e multiplicando y por (-1) para orientar a coluna estratigráfica do topo para a base
plt.contourf(X, Y, z, np.arange(0, 14.5, 1), cmap='viridis') # Determinando um range entre 0 - 14.5%
plt.title('Particle Size Contour Plot')
plt.xlabel('phi')
plt.ylabel('Depth (m)')
clb = plt.colorbar()
clb.ax.set_title('Wt. (%)')
plt.show()
| 61.958333 | 147 | 0.741762 | 244 | 1,487 | 4.504098 | 0.491803 | 0.007279 | 0.024568 | 0.027298 | 0.146497 | 0.146497 | 0.146497 | 0.072793 | 0 | 0 | 0 | 0.014196 | 0.147276 | 1,487 | 23 | 148 | 64.652174 | 0.852524 | 0.635508 | 0 | 0 | 0 | 0 | 0.316981 | 0.218868 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.214286 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca6399fda024ff3ca6fb6ffc6995041a2c15a6d0 | 1,644 | py | Python | 2021/04/day_04.py | mfep/advent-of-code-2020 | 99ba3d68383f5a5e15ec876a9915082f479b8ee4 | [
"MIT"
] | 2 | 2020-12-05T18:21:14.000Z | 2020-12-08T19:37:51.000Z | 2021/04/day_04.py | mfep/advent-of-code | 99ba3d68383f5a5e15ec876a9915082f479b8ee4 | [
"MIT"
] | null | null | null | 2021/04/day_04.py | mfep/advent-of-code | 99ba3d68383f5a5e15ec876a9915082f479b8ee4 | [
"MIT"
] | null | null | null | class Board(object):
def __init__(self, lines):
assert(len(lines) == 5)
self.data = []
for line in lines:
self.data += [int(x) for x in line.split()]
assert(len(self.data) == 25)
self.drawn = [False for _ in self.data]
def score(self, drawn):
return sum([0 if self.drawn[idx] else self.data[idx] for idx in range(25)]) * drawn
def update(self, x):
try:
idx = self.data.index(x)
self.drawn[idx] = True
row = idx // 5
col = idx % 5
row_indices = range(5 * row, 5 * row + 5)
col_indices = range(col, 25, 5)
if sum(map(lambda idx: self.drawn[idx], row_indices)) == 5 \
or sum(map(lambda idx: self.drawn[idx], col_indices)) == 5:
return self.score(x)
else:
return None
except ValueError:
return None
with open('day_04.txt') as f:
lines = f.readlines()
num_boards = (len(lines) - 1) // 6
deck = [int(x) for x in lines[0].split(',')]
def part1():
boards = []
for i in range(num_boards):
boards.append(Board(lines[2+6*i:1+6*(i+1)]))
for drawn in deck:
for board in boards:
score = board.update(drawn)
if score:
print(score)
return
def part2():
boards = []
for i in range(num_boards):
boards.append(Board(lines[2+6*i:1+6*(i+1)]))
for drawn in deck:
boards_to_remove = []
for board in boards:
score = board.update(drawn)
if score:
if len(boards) == 1:
print(score)
return
boards_to_remove.append(board)
while len(boards) > 1 and boards_to_remove:
boards.remove(boards_to_remove.pop(0))
part1()
part2()
| 24.537313 | 87 | 0.583333 | 254 | 1,644 | 3.692913 | 0.255906 | 0.051173 | 0.051173 | 0.017058 | 0.319829 | 0.298507 | 0.298507 | 0.240938 | 0.240938 | 0.240938 | 0 | 0.03172 | 0.27129 | 1,644 | 66 | 88 | 24.909091 | 0.751252 | 0 | 0 | 0.357143 | 0 | 0 | 0.006691 | 0 | 0 | 0 | 0 | 0 | 0.035714 | 1 | 0.089286 | false | 0 | 0 | 0.017857 | 0.214286 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca63c85b7dd719215770c89a01e560ca42f2a093 | 2,944 | py | Python | test/test_database_variable.py | sensiblecodeio/csv-to-cantabular-metadata-2021 | 32bad86f5acaec76af6538bdeff89fc78575c8dd | [
"Apache-2.0"
] | null | null | null | test/test_database_variable.py | sensiblecodeio/csv-to-cantabular-metadata-2021 | 32bad86f5acaec76af6538bdeff89fc78575c8dd | [
"Apache-2.0"
] | null | null | null | test/test_database_variable.py | sensiblecodeio/csv-to-cantabular-metadata-2021 | 32bad86f5acaec76af6538bdeff89fc78575c8dd | [
"Apache-2.0"
] | null | null | null | import unittest.mock
import unittest
import os
import pathlib
from ons_csv_to_ctb_json_load import Loader
from helper_funcs import conditional_mock_open, build_test_file
HEADERS = ['Id', 'Database_Mnemonic', 'Variable_Mnemonic', 'Version', 'Lowest_Geog_Variable_Flag']
REQUIRED_FIELDS = {'Variable_Mnemonic': 'VAR1',
'Database_Mnemonic': 'DB1',
'Id': '1',
'Version': '1'}
INPUT_DIR = os.path.join(pathlib.Path(__file__).parent.resolve(), 'testdata')
FILENAME = os.path.join(INPUT_DIR, 'Database_Variable.csv')
class TestDatabaseVariable(unittest.TestCase):
def run_test(self, rows, expected_error):
with unittest.mock.patch('builtins.open', conditional_mock_open('Database_Variable.csv',
read_data = build_test_file(HEADERS, rows))):
with self.assertRaisesRegex(ValueError, expected_error):
Loader(INPUT_DIR, None).load_database_to_variables(['DB1', 'DB2', 'DB3'])
def test_required_fields(self):
for field in REQUIRED_FIELDS:
with self.subTest(field=field):
row = REQUIRED_FIELDS.copy()
row[field] = ''
self.run_test([row], f'^Reading {FILENAME}:2 no value supplied for required field {field}$')
def test_invalid_values(self):
for field in ['Variable_Mnemonic', 'Database_Mnemonic', 'Lowest_Geog_Variable_Flag']:
with self.subTest(field=field):
row = REQUIRED_FIELDS.copy()
row[field] = 'X'
self.run_test([row], f'^Reading {FILENAME}:2 invalid value X for {field}$')
def test_duplicate_entry(self):
self.run_test(
[REQUIRED_FIELDS, REQUIRED_FIELDS],
f'^Reading {FILENAME}:3 duplicate value combo VAR1/DB1 for Variable_Mnemonic/Database_Mnemonic$')
def test_lowest_geog_on_non_geo_var(self):
row = REQUIRED_FIELDS.copy()
row['Lowest_Geog_Variable_Flag'] = 'Y'
self.run_test(
[row],
f'^Reading {FILENAME} Lowest_Geog_Variable_Flag set on non-geographic variable VAR1 for database DB1$')
def test_duplicate_lowest_geog(self):
self.run_test([{'Variable_Mnemonic': 'GEO1', 'Database_Mnemonic': 'DB1', 'Id': '1',
'Version': '1', 'Lowest_Geog_Variable_Flag': 'Y'},
{'Variable_Mnemonic': 'GEO2', 'Database_Mnemonic': 'DB1', 'Id': '1',
'Version': '1', 'Lowest_Geog_Variable_Flag': 'Y'}],
f'^Reading {FILENAME} Lowest_Geog_Variable_Flag set on GEO2 and GEO1 for database DB1$')
def test_no_lowest_geog_var(self):
row = REQUIRED_FIELDS.copy()
row['Variable_Mnemonic'] = 'GEO1'
self.run_test(
[row],
f'^Reading {FILENAME} Lowest_Geog_Variable_Flag not set on any geographic variable for database DB1$')
if __name__ == '__main__':
unittest.main()
| 42.666667 | 115 | 0.638247 | 354 | 2,944 | 5.002825 | 0.271186 | 0.062112 | 0.08131 | 0.099379 | 0.330322 | 0.293619 | 0.293619 | 0.241671 | 0.206663 | 0.17956 | 0 | 0.012102 | 0.242188 | 2,944 | 68 | 116 | 43.294118 | 0.781712 | 0 | 0 | 0.2 | 0 | 0 | 0.324049 | 0.094429 | 0 | 0 | 0 | 0 | 0.018182 | 1 | 0.127273 | false | 0 | 0.109091 | 0 | 0.254545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca651fc6921278187b2a3bb6efc946859def4b2c | 705 | py | Python | reefbot/test/TestStillCaptureModule.py | MRSD2018/reefbot-1 | a595ca718d0cda277726894a3105815cef000475 | [
"MIT"
] | null | null | null | reefbot/test/TestStillCaptureModule.py | MRSD2018/reefbot-1 | a595ca718d0cda277726894a3105815cef000475 | [
"MIT"
] | null | null | null | reefbot/test/TestStillCaptureModule.py | MRSD2018/reefbot-1 | a595ca718d0cda277726894a3105815cef000475 | [
"MIT"
] | null | null | null | #!/usr/bin/python
"""Sends a message that is read by the StillCaptureModule."""
import roslib; roslib.load_manifest('reefbot')
import rospy
from reefbot_msgs.msg import ImageCaptured
if __name__ == "__main__":
rospy.init_node('TestStillCaptureModule')
publisher = rospy.Publisher(
rospy.get_param('still_image_topic', 'still_image'),
ImageCaptured,
tcp_nodelay=True, latch=False)
request = ImageCaptured()
request.image_id = 23
request.image.height = 16
request.image.width = 20
request.image.data = range(16*20)
request.image.encoding = "8UC1"
request.image.step = 20*4
r = rospy.Rate(1)
while not rospy.is_shutdown():
publisher.publish(request)
r.sleep()
| 25.178571 | 61 | 0.723404 | 93 | 705 | 5.290323 | 0.645161 | 0.146341 | 0.056911 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026846 | 0.15461 | 705 | 27 | 62 | 26.111111 | 0.798658 | 0.102128 | 0 | 0 | 0 | 0 | 0.110048 | 0.035088 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.15 | 0 | 0.15 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca653894731570c9c358faadfe28ba24fca6a793 | 4,924 | py | Python | Python Tkinter All List III Save and Open ToDo Lists/SaveandOpe ToD Lists.py | BrianMarquez3/Python-Course | 2622b4ddfd687505becfd246e82a2ed0cb9b76f3 | [
"MIT"
] | 20 | 2020-08-19T23:27:01.000Z | 2022-02-03T12:02:17.000Z | Python Tkinter All List III Save and Open ToDo Lists/SaveandOpe ToD Lists.py | BrianMarquez3/Python-Course | 2622b4ddfd687505becfd246e82a2ed0cb9b76f3 | [
"MIT"
] | 1 | 2021-04-10T18:06:05.000Z | 2021-04-10T18:06:05.000Z | Python Tkinter All List III Save and Open ToDo Lists/SaveandOpe ToD Lists.py | BrianMarquez3/Python-Course | 2622b4ddfd687505becfd246e82a2ed0cb9b76f3 | [
"MIT"
] | 2 | 2020-12-03T19:35:36.000Z | 2021-11-10T14:58:39.000Z | ############################################################################
# Python Tkinter All List III Save and Open ToDo Lists
# Python Tkinter All List III Guardar y abrir listas de tareas pendientes
############################################################################
from tkinter import *
from tkinter.font import Font
from tkinter import filedialog
import pickle
root = Tk()
root.title("Python Tkinter All List III Save and Open ToDo Lists")
root.iconbitmap("Python Tkinter All List III Save and Open ToDo Lists/icons/drink.ico")
root.geometry('500x500')
# Define our Font
my_font = Font(
family="Brush Script MT",
size =30,
weight="bold")
my_frame = Frame(root)
my_frame.pack(pady=10)
# Create listbox
my_list = Listbox(my_frame,
font=my_font,
width=25,
height=5,
bg="SystemButtonFace",
bd=0,
fg="#464646",
#Seleccion con color
highlightthickness=0,
selectbackground="#a6a6a6",
activestyle="none")
my_list.pack(side=LEFT, fill=BOTH)
# Create dummy list
stuff = ["Walk the Dog", "Buy Griceries", "Take a Nap", "Learn Tkinter", "Rule the wold"]
# add dummy list to list box
for item in stuff:
my_list.insert(END, item)
# Create scrollbar
my_scrollbar = Scrollbar(my_frame)
my_scrollbar.pack(side=RIGHT, fill=BOTH)
# add scrollbar
my_list.config(yscrollcommand=my_scrollbar.set)
my_scrollbar.config(command=my_list.yview)
# create entry box to add items to the list
my_entry = Entry(root, font=("Helvetica", 24),width=26)
my_entry.pack(pady=20)
# Create a button frame
button_frame = Frame(root)
button_frame.pack(pady=20)
# Funtions
def delete_item():
my_list.delete(ANCHOR)
def add_item():
my_list.insert(END, my_entry.get())
my_entry.delete(0, END)
def cross_off_item():
# Cross off item
my_list.itemconfig(
my_list.curselection(),
fg="#dedede")
# Get rid of selection bar
my_list.select_clear(0, END)
def uncross_item():
# unross off item
my_list.itemconfig(
my_list.curselection(),
fg="#464646")
# Get rid of selection bar
my_list.select_clear(0, END)
# Delete Selecttetd
def delete_crossed():
count = 0
while count < my_list.size():
if my_list.itemcget(count, "fg") == "#dedede":
my_list.delete(my_list.index(count))
else:
count += 1
def save_list():
file_name = filedialog.asksaveasfilename(
initialdir="Python Tkinter All List III Save and Open ToDo Lists/data",
title="Save File",
filetypes=(
("Dat Files", "*.dat"),
("All Files", "*.*"))
)
if file_name:
if file_name.endswith(".dat"):
pass
else:
file_name = f'{file_name}.dat'
# Delete crossed off items before save
count = 0
while count < my_list.size():
if my_list.itemcget(count, "fg") == "#dedede":
my_list.delete(my_list.index(count))
else:
count += 1
# Grab all the stuff from the list
stuff = my_list.get(0, END)
# Open the File
output_file = open(file_name, 'wb')
# Actually add the stuff to the fil
pickle.dump(stuff, output_file)
def open_list():
file_name = filedialog.asksaveasfilename(
initialdir="Python Tkinter All List III Save and Open ToDo Lists/data",
title="open File",
filetypes=(
("Dat Files", "*.dat"),
("All Files", "*.*"))
)
if file_name:
# Delete currently open list
my_list.delete(0, END)
# Open the File
input_file = open(file_name, 'rb')
# load thwe data
stuff = pickle.load(input_file)
#out stuff rto the screen
for item in stuff:
my_list.insert(END, item)
def clear_list():
my_list.delete(0, END)
# Create Menu
my_menu = Menu(root)
root.config(menu=my_menu)
# Add items to the menu
file_menu = Menu(my_menu, tearoff=False)
my_menu.add_cascade(label="File", menu=file_menu)
#addd dropdown items
file_menu.add_command(label="Save List", command=save_list)
file_menu.add_command(label="Open List", command=open_list)
file_menu.add_separator()
file_menu.add_command(label="Clear List", command=clear_list)
# add some buttons
delete_button = Button(button_frame, text="Delete Item", command=delete_item)
add_button = Button(button_frame, text="Add Item", command=add_item)
cross_off_button = Button(button_frame, text="Cross off Item", command=cross_off_item)
uncross_button = Button(button_frame, text="Uncross Item", command=uncross_item)
delete_crossed_button = Button(button_frame, text="Delete Crossed", command=delete_crossed)
delete_button.grid(row=0, column=0)
add_button.grid(row=0, column=1, padx=20)
cross_off_button.grid(row=0, column=2)
uncross_button.grid(row=0, column=3, padx=20)
delete_crossed_button.grid(row=0, column=4)
root.mainloop() | 27.355556 | 91 | 0.647035 | 689 | 4,924 | 4.467344 | 0.256894 | 0.048733 | 0.031189 | 0.038986 | 0.402859 | 0.305718 | 0.27128 | 0.27128 | 0.27128 | 0.221897 | 0 | 0.015951 | 0.210601 | 4,924 | 180 | 92 | 27.355556 | 0.77592 | 0.140536 | 0 | 0.321739 | 0 | 0 | 0.143175 | 0.005184 | 0 | 0 | 0 | 0.005556 | 0 | 1 | 0.069565 | false | 0.008696 | 0.034783 | 0 | 0.104348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca664f99426c4233bb183526150ff77bb24145d7 | 848 | py | Python | homeassistant/components/sleepiq/coordinator.py | d4rk1/core | 52ca1a3d475fdd96e6998d8c8b0be9a423b4dd06 | [
"Apache-2.0"
] | 1 | 2022-02-20T14:35:42.000Z | 2022-02-20T14:35:42.000Z | homeassistant/components/sleepiq/coordinator.py | d4rk1/core | 52ca1a3d475fdd96e6998d8c8b0be9a423b4dd06 | [
"Apache-2.0"
] | 4 | 2022-03-02T07:18:01.000Z | 2022-03-31T07:09:30.000Z | homeassistant/components/sleepiq/coordinator.py | d4rk1/core | 52ca1a3d475fdd96e6998d8c8b0be9a423b4dd06 | [
"Apache-2.0"
] | null | null | null | """Coordinator for SleepIQ."""
from datetime import timedelta
import logging
from asyncsleepiq import AsyncSleepIQ
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
_LOGGER = logging.getLogger(__name__)
UPDATE_INTERVAL = timedelta(seconds=60)
class SleepIQDataUpdateCoordinator(DataUpdateCoordinator[dict[str, dict]]):
"""SleepIQ data update coordinator."""
def __init__(
self,
hass: HomeAssistant,
client: AsyncSleepIQ,
username: str,
) -> None:
"""Initialize coordinator."""
super().__init__(
hass,
_LOGGER,
name=f"{username}@SleepIQ",
update_method=client.fetch_bed_statuses,
update_interval=UPDATE_INTERVAL,
)
self.client = client
| 25.69697 | 75 | 0.676887 | 77 | 848 | 7.181818 | 0.506494 | 0.075949 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003091 | 0.237028 | 848 | 32 | 76 | 26.5 | 0.851623 | 0.095519 | 0 | 0 | 0 | 0 | 0.023968 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.227273 | 0 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca6e3b8ba437468f1d2e6ebc7b338d1c4822e773 | 311 | py | Python | launcher.py | Bolinooo/Algorithms | 08b1e94eaa483845eb7611eef6192a047e04711f | [
"MIT"
] | null | null | null | launcher.py | Bolinooo/Algorithms | 08b1e94eaa483845eb7611eef6192a047e04711f | [
"MIT"
] | null | null | null | launcher.py | Bolinooo/Algorithms | 08b1e94eaa483845eb7611eef6192a047e04711f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
__author__ = "Bolinooo"
def main():
L1 = [i for i in range(1, 10)]
L2 = [i * 2 for i in range(1, 10)]
print("Step 1: Import the correct algorithm")
print("Step 2: Try out the correct algorithm using the above lists or your own")
if __name__ == "__main__":
main()
| 19.4375 | 84 | 0.623794 | 51 | 311 | 3.568627 | 0.647059 | 0.043956 | 0.065934 | 0.120879 | 0.153846 | 0.153846 | 0 | 0 | 0 | 0 | 0 | 0.046809 | 0.244373 | 311 | 15 | 85 | 20.733333 | 0.72766 | 0.064309 | 0 | 0 | 0 | 0 | 0.424138 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.25 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca73356a167a495ef7f15554e401d4c020b25f47 | 822 | py | Python | codes/day9_task1.py | tayyrov/AdventOfCode | 69003407fd345ea76f8125b4b132e5b5d5ea33ab | [
"MIT"
] | 1 | 2021-12-07T10:54:48.000Z | 2021-12-07T10:54:48.000Z | codes/day9_task1.py | tayyrov/AdventOfCode | 69003407fd345ea76f8125b4b132e5b5d5ea33ab | [
"MIT"
] | null | null | null | codes/day9_task1.py | tayyrov/AdventOfCode | 69003407fd345ea76f8125b4b132e5b5d5ea33ab | [
"MIT"
] | null | null | null | """
Advent Of Code 2021
Day 9
Date: 09-12-2021
Site: https://adventofcode.com/2021/day/9
Author: Tayyrov
"""
def isValid(r, c):
return 0 <= c < cols and 0 <= r < rows
input_file = open('../input_files/day9_input', 'r')
matrix = input_file.readlines()
rows = len(matrix)
cols = len(matrix[0].strip())
directions = [(1, 0), (-1, 0), (0, 1), (0, -1)]
ans = 0
for r in range(rows):
for c in range(cols):
point = matrix[r][c]
all_good = True
for x, y in directions:
nr, nc = r + x, c + y
if not isValid(nr, nc):
continue
if matrix[nr][nc] <= point:
all_good = False
if all_good:
ans += int(point)+1
print(f"The sum of the risk levels of all low points is {ans}")
| 22.216216 | 64 | 0.519465 | 125 | 822 | 3.36 | 0.488 | 0.014286 | 0.038095 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.058608 | 0.335766 | 822 | 36 | 65 | 22.833333 | 0.710623 | 0.121655 | 0 | 0 | 0 | 0 | 0.116519 | 0.036873 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0 | 0.047619 | 0.095238 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca73616e330f62bf07e4bc8f69070d118dc75955 | 6,532 | py | Python | scripts/demo_taskonomy.py | jozhang97/Side-tuning | dea345691fb7ee0230150fe56ddd644efdffa6ac | [
"MIT"
] | 56 | 2020-01-12T05:45:59.000Z | 2022-03-17T15:04:15.000Z | scripts/demo_taskonomy.py | jozhang97/Side-tuning | dea345691fb7ee0230150fe56ddd644efdffa6ac | [
"MIT"
] | 7 | 2020-01-28T23:14:45.000Z | 2022-02-10T01:56:48.000Z | scripts/demo_taskonomy.py | jozhang97/Side-tuning | dea345691fb7ee0230150fe56ddd644efdffa6ac | [
"MIT"
] | 2 | 2020-02-29T14:51:23.000Z | 2020-03-07T03:23:27.000Z | import time
from tqdm import tqdm
import torch
import torch.nn.functional as F
from evkit.utils.losses import weighted_l1_loss, weighted_l2_loss, softmax_cross_entropy, dense_cross_entropy
from tlkit.models.lifelong_framework import LifelongSidetuneNetwork
from tlkit.data.datasets.taskonomy_dataset import get_lifelong_dataloaders
import tnt.torchnet as tnt
# Determine what device to use
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('Training on device:', device)
# Define what tasks to consider
tasks = ['principal_curvature', # Note: In the rest of the codebase, this is refered to as 'curvature'
'segment_semantic', 'reshading', 'keypoints3d', 'keypoints2d',
'edge_texture', 'edge_occlusion', 'depth_zbuffer',
'depth_euclidean', 'normal', 'class_object', 'rgb']
task_specific_transfer_kwargs = [
{'out_channels': 2, 'is_decoder_mlp': False}, # curvature
{'out_channels': 18, 'is_decoder_mlp': False}, # segment_semantic
{'out_channels': 1, 'is_decoder_mlp': False}, # reshading
{'out_channels': 1, 'is_decoder_mlp': False}, # keypoints3d
{'out_channels': 1, 'is_decoder_mlp': False}, # keypoints2d
{'out_channels': 1, 'is_decoder_mlp': False}, # edge_texture
{'out_channels': 1, 'is_decoder_mlp': False}, # edge_occlusion
{'out_channels': 1, 'is_decoder_mlp': False}, # depth_zbuffer
{'out_channels': 1, 'is_decoder_mlp': False}, # depth_euclidean
{'out_channels': 3, 'is_decoder_mlp': False}, # normal
{'out_channels': 1000, 'is_decoder_mlp': True}, # class_object
{'out_channels': 3, 'is_decoder_mlp': False}, # rgb
]
loss_fns = [weighted_l2_loss, softmax_cross_entropy, weighted_l1_loss,
weighted_l1_loss, weighted_l1_loss, weighted_l1_loss, weighted_l1_loss, weighted_l1_loss,
weighted_l1_loss, weighted_l1_loss, dense_cross_entropy, weighted_l1_loss]
# Set up Model
model = LifelongSidetuneNetwork(
base_class='TaskonomyEncoder',
base_kwargs={ 'eval_only': True, 'normalize_outputs': False },
base_weights_path='../side-tuning/side-tuning/assets/pytorch/curvature_encoder.dat',
side_class='FCN5',
side_kwargs={ 'eval_only': False, 'normalize_outputs': False },
side_weights_path='../side-tuning/side-tuning/assets/pytorch/distillation/curvature-distilled.pth',
task_specific_transfer_kwargs=task_specific_transfer_kwargs,
transfer_class='PreTransferedDecoder',
transfer_kwargs={
'transfer_class': 'TransferConv3',
'transfer_weights_path': None,
'transfer_kwargs': {'n_channels': 8, 'residual': True},
'decoder_class': 'TaskonomyDecoder',
'decoder_weights_path': None,
'decoder_kwargs': {'eval_only': False},
},
merge_method='merge_operators.Alpha',
dataset='taskonmy',
tasks=range(len(tasks)),
)
model.to(device)
# Prepare Dataloaders
dataloaders = get_lifelong_dataloaders(
data_path='../side-tuning/taskonomy-sample-model-1',
sources = [['rgb']] * len(tasks),
targets=[[t] for t in tasks],
masks=[False] * len(tasks),
epochs_per_task=3,
split=None,
batch_size=16,
batch_size_val=16,
num_workers=8,
max_images_per_task=100,
)
dl_train, dl_val = dataloaders['train'], dataloaders['val']
# Set up optimizer (in general, make sure to set weight decay for alpha to 0!)
optimizer = torch.optim.Adam(
[
{'params': [param for name, param in model.named_parameters() if 'merge_operator' in name or 'context' in name or 'alpha' in name], 'weight_decay': 0.0},
{'params': [param for name, param in model.named_parameters() if 'merge_operator' not in name and 'context' not in name and 'alpha' not in name]},
],
lr=1e-4, weight_decay=2e-6
)
# Set up logging
mlog = tnt.logger.TensorboardMeterLogger(
env='demo',
log_dir='/tmp/taskonomy_demo/tensorboards',
plotstylecombined=True
)
for task in range(len(tasks)):
mlog.add_meter(f'losses/task_{task}', tnt.meter.ValueSummaryMeter())
# Training loop
print('Starting training')
model.train(True)
start_time = time.time()
seen = set()
for epoch in range(len(tasks)):
for task_idx, batch_tuple in tqdm(dl_train, desc="Epoch " + str(epoch) + " (Train)"):
# Prepare for new task
old_size = len(seen)
seen.add(task_idx)
if len(seen) > old_size:
model.start_task(task_idx, train=True) # important to stop gradients from flowing to other tasks
# Process Batch
batch_tuple = [elm.to(device) for elm in batch_tuple]
x, label = batch_tuple[0], batch_tuple[1]
x = F.interpolate(x, 256)
label = F.interpolate(label, 256) if len(label.shape) == 4 else label
# Forward
pred = model(x, task_idx=task_idx)
label = label[:,:pred.shape[1],:,:] if len(label.shape) == 4 else label # data may be duplicated
loss = loss_fns[task_idx](pred, label)['total']
# Backward
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=1.0)
optimizer.step()
# Log
mlog.update_meter(loss.item()/x.shape[0], meters={f'losses/task_{task_idx}'}, phase='train')
meter_dict = mlog.peek_meter('train')
print('Finished training in:', time.time() - start_time, 'seconds. \nResults:')
for task_idx in range(len(tasks)):
print(f'\t Task {task_idx} Train Loss:', meter_dict[f'losses/task_{task_idx}'].item())
# Validate model on held-out data
model.to(device)
model.train(False)
for task_idx, batch_tuple in tqdm(dl_val, desc="Validation"):
# Process Batch
batch_tuple = [elm.to(device) for elm in batch_tuple]
x, label = batch_tuple[0], batch_tuple[1]
x = F.interpolate(x, 256)
label = F.interpolate(label, 256) if len(label.shape) == 4 else label
# Forward
pred = model(x, task_idx=task_idx)
label = label[:,:pred.shape[1],:,:] if len(label.shape) == 4 else label # data may be duplicated
loss = loss_fns[task_idx](pred, label)['total']
# Log
mlog.update_meter(loss.item()/x.shape[0], meters={f'losses/task_{task_idx}'}, phase='val')
meter_dict = mlog.peek_meter('val')
print('Results (Validation): ')
for task_idx in range(len(tasks)):
print(f'\t Task {task_idx} Val Loss:', meter_dict[f'losses/task_{task_idx}'].item())
# Save checkpoint
print('Saving model to /tmp/taskonomy_demo/model.pth')
torch.save(model.state_dict(), '/tmp/taskonomy_demo/model.pth') | 39.829268 | 161 | 0.686467 | 903 | 6,532 | 4.739756 | 0.262458 | 0.029439 | 0.033645 | 0.043692 | 0.389019 | 0.361449 | 0.346028 | 0.31215 | 0.247196 | 0.230841 | 0 | 0.014517 | 0.177434 | 6,532 | 164 | 162 | 39.829268 | 0.782058 | 0.098132 | 0 | 0.219512 | 0 | 0 | 0.242916 | 0.068283 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.065041 | 0 | 0.065041 | 0.056911 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca74d344cab07cbfd869efaea460c4c5bc949315 | 316 | py | Python | exercises/de/exc_02_05_02.py | tuanducdesign/spacy-course | f8d092c5fa2997fccb3f367d174dce8667932b3d | [
"MIT"
] | 2 | 2020-07-07T01:46:37.000Z | 2021-04-20T03:19:43.000Z | exercises/de/exc_02_05_02.py | tuanducdesign/spacy-course | f8d092c5fa2997fccb3f367d174dce8667932b3d | [
"MIT"
] | null | null | null | exercises/de/exc_02_05_02.py | tuanducdesign/spacy-course | f8d092c5fa2997fccb3f367d174dce8667932b3d | [
"MIT"
] | null | null | null | import spacy
nlp = spacy.blank("de")
# Importiere die Klasse Doc
from ____ import ____
# Erwarteter Text: "Na, alles klar?"
words = ["Na", ",", "alles", "klar", "?"]
spaces = [____, ____, ____, ____, ____]
# Erstelle ein Doc mit den Wörtern und Leerzeichen
doc = ____(____, ____=____, ____=____)
print(doc.text)
| 21.066667 | 50 | 0.674051 | 34 | 316 | 4.735294 | 0.735294 | 0.086957 | 0.136646 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.167722 | 316 | 14 | 51 | 22.571429 | 0.612167 | 0.344937 | 0 | 0 | 0 | 0 | 0.073892 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca74e85f36d8815a258660b91b00fe2163b98715 | 2,602 | py | Python | un_contre_un.py | ForrayGabriel/polychess | 6555d32d2526d79c1de1c264d8c42c48bee5d412 | [
"MIT"
] | 1 | 2021-11-26T10:08:39.000Z | 2021-11-26T10:08:39.000Z | un_contre_un.py | ForrayGabriel/polychess | 6555d32d2526d79c1de1c264d8c42c48bee5d412 | [
"MIT"
] | null | null | null | un_contre_un.py | ForrayGabriel/polychess | 6555d32d2526d79c1de1c264d8c42c48bee5d412 | [
"MIT"
] | 2 | 2020-12-10T08:07:05.000Z | 2021-01-07T15:26:04.000Z | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 10 11:00:53 2020
@author: Gabriel
"""
import chess
import chess.svg
import random
#Fonction qui demander un mouvement dans la console et le retourn sous la forme d'un chess move
def recup_move():
case_dep = input("Case départ :")
case_arr = input("Case arrivée :")
move = chess.Move.from_uci(case_dep+case_arr)
while move not in board.legal_moves:
print("Mouvement illégal, merci de recommencer")
case_dep = input("Case départ :")
case_arr = input("Case arrivée :")
move = chess.Move.from_uci(case_dep+case_arr)
return move
#Fonction qui retourne un coup au hasard dans la liste des coups possibles
def random_move():
moves = []
for i in board.legal_moves:
moves.append(i)
index = random.randint(0,len(moves)-1)
return moves[index]
choix = input("Choix du mode de jeu :\n1 - Joueur contre Joueur\n2 - Joueur contre ordinateur aléatoire\n3 - Ordinateur contre lui même en aléatoire\n")
#set the board to its initial position
#corresponding to: rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1
board = chess.Board()
#Fonction pour jouer en joueur contre joueur
def un_contre_un():
white = True
print(board)
while board.is_game_over() == False:
if white :
print("\nWhite to play :")
white = False
board.push(recup_move())
else :
print("\nBlack to play :")
white = True
board.push(recup_move())
print(board)
#Fonction pour jouer contre un bot jouant aléatoirement
def bot_random():
white = True
print(board)
while board.is_game_over() == False:
if white :
print("\nWhite to play :")
white = False
board.push(recup_move())
else :
print("\nBlack to play :")
white = True
board.push(random_move())
print(board)
#Fonction pour faire s'affronter deux bots jouant aléatoirement
def autoplay_random():
white = True
print(board)
while board.is_game_over() == False:
if white :
print("\nWhite to play :")
white = False
board.push(random_move())
else :
print("\nBlack to play :")
white = True
board.push(random_move())
input("Appuyer sur entrée pour continuer")
print(board)
if choix == "1":
un_contre_un()
elif choix == "2":
bot_random()
elif choix == "3":
autoplay_random()
else :
print("Choix non valide") | 27.680851 | 152 | 0.609147 | 345 | 2,602 | 4.495652 | 0.373913 | 0.034816 | 0.042553 | 0.03675 | 0.411348 | 0.3804 | 0.3804 | 0.3804 | 0.3804 | 0.3804 | 0 | 0.01454 | 0.286318 | 2,602 | 94 | 153 | 27.680851 | 0.820679 | 0.197156 | 0 | 0.623188 | 0 | 0.014493 | 0.184096 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072464 | false | 0 | 0.043478 | 0 | 0.144928 | 0.202899 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca77263261ea32fe43d682889ede3057c2714eb1 | 953 | py | Python | pyretrommo/player.py | snwhd/pyretrommo | fabb523d9b4385ed8a1ff0b2ac787cc5d88a23b7 | [
"blessing"
] | 1 | 2021-11-25T09:33:30.000Z | 2021-11-25T09:33:30.000Z | pyretrommo/player.py | snwhd/pyretrommo | fabb523d9b4385ed8a1ff0b2ac787cc5d88a23b7 | [
"blessing"
] | null | null | null | pyretrommo/player.py | snwhd/pyretrommo | fabb523d9b4385ed8a1ff0b2ac787cc5d88a23b7 | [
"blessing"
] | null | null | null | #!/usr/bin/env python3
from typing import (
Tuple,
)
from .character import Character
from .gen.player_class import PlayerClass
from .gen.player_stats import STATS_BY_PLAYER_CLASS
from .gen.equipment import GearType
from .stats import Stats
class Player(Character):
def __init__(
self,
username: str,
level: int,
player_class: PlayerClass,
gear: GearType,
boosts: Stats,
) -> None:
self.level = level
self.player_class = player_class
self.gear = gear
self.boosts = boosts
stats = self.calculate_stats()
abilities = PlayerClass.get_abilities(player_class, level)
super().__init__(username, stats, abilities)
def calculate_stats(self) -> Stats:
stats = Stats(*STATS_BY_PLAYER_CLASS[self.player_class][self.level])
for gear in self.gear:
stats += gear
stats += self.boosts
return stats
| 25.078947 | 76 | 0.645331 | 112 | 953 | 5.276786 | 0.303571 | 0.1489 | 0.076142 | 0.060914 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001437 | 0.269675 | 953 | 37 | 77 | 25.756757 | 0.847701 | 0.022036 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.2 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca77ed517dcb7e292e04556306984fb49fef89ce | 8,361 | py | Python | m2wsgi/io/gevent.py | rfk/m2wsgi | ae3d054ba8594dd286c82351325d88a0af0245aa | [
"MIT"
] | 1 | 2015-11-08T11:31:20.000Z | 2015-11-08T11:31:20.000Z | m2wsgi/io/gevent.py | ged/m2wsgi | 669ad356ee69ab1dffb28fb35dd7bde415999209 | [
"MIT"
] | 3 | 2018-02-25T02:16:32.000Z | 2018-02-25T02:16:41.000Z | m2wsgi/io/gevent.py | rfk/m2wsgi | ae3d054ba8594dd286c82351325d88a0af0245aa | [
"MIT"
] | 1 | 2021-11-29T00:01:00.000Z | 2021-11-29T00:01:00.000Z | """
m2wsgi.io.gevent: gevent-based I/O module for m2wsgi
=====================================================
This module provides subclasses of m2wsgi.WSGIHandler and related classes
that are specifically tuned for running under gevent. You can import
and use the classes directory from here, or you can select this module
when launching m2wsgi from the command-line::
m2wsgi --io=gevent dotted.app.name tcp://127.0.0.1:9999
You will need the gevent_zeromq package from here:
https://github.com/traviscline/gevent-zeromq
"""
# Copyright (c) 2011, Ryan Kelly.
# All rights reserved; available under the terms of the MIT License.
from __future__ import absolute_import
from m2wsgi.util import fix_absolute_import
fix_absolute_import(__file__)
from m2wsgi.io import base
import gevent
import gevent.monkey
import gevent.event
import gevent.core
import gevent.hub
import gevent_zeromq
from gevent_zeromq import zmq
import zmq.core.poll as zmq_poll
if hasattr(zmq, '_Context'):
ZContext = zmq._Context
else:
ZContext = zmq.Context
if hasattr(zmq, '_Socket'):
ZSocket = zmq._Socket
else:
ZSocket = zmq.Socket
def monkey_patch():
"""Hook to monkey-patch the interpreter for this IO module.
This calls the standard gevent monkey-patching routines. Don't worry,
it's not called by default unless you're running from the command line.
"""
gevent.monkey.patch_all()
gevent_zeromq.monkey_patch()
# Patch signal module for gevent compatability.
# Courtesy of http://code.google.com/p/gevent/issues/detail?id=49
import signal
_orig_signal = signal.signal
def gevent_signal_wrapper(signum,*args,**kwds):
handler = signal.getsignal(signum)
if callable(handler):
handler(signum,None)
def gevent_signal(signum,handler):
_orig_signal(signum,handler)
return gevent.hub.signal(signum,gevent_signal_wrapper,signum)
signal.signal = gevent_signal
# The BaseConnection recv logic is based on polling, but I can't get
# gevent polling on multiple sockets to work correctly.
# Instead, we simulate polling on each socket individually by reading an item
# and keeping it in a local buffer.
#
# Ideally I would juse use the _wait_read() method on gevent-zmq sockets,
# but this seems to cause hangs for me. Still investigating.
class _Context(ZContext):
def socket(self,socket_type):
if self.closed:
raise zmq.ZMQError(zmq.ENOTSUP)
return _Socket(self,socket_type)
def term(self):
# This seems to be needed to let other greenthreads shut down.
# Omit it, and the SIGHUP handler gets "bad file descriptor" errors.
gevent.sleep(0.1)
return super(_Context,self).term()
class _Socket(ZSocket):
def __init__(self,*args,**kwds):
self._polled_recv = None
super(_Socket,self).__init__(*args,**kwds)
# This blockingly-reads a message from the socket, but stores
# it in a buffer rather than returning it.
def _recv_poll(self,flags=0,copy=True,track=False):
if self._polled_recv is None:
self._polled_recv = super(_Socket,self).recv(flags,copy,track)
# This uses the buffered result if available, or polls otherwise.
def recv(self,flags=0,copy=True,track=False):
v = self._polled_recv
while v is None:
self._recv_poll(flags,copy=copy,track=track)
v = self._polled_recv
self._polled_recv = None
return v
zmq.Context = _Context
zmq.Socket = _Socket
class Client(base.Client):
__doc__ = base.Client.__doc__
class Request(base.Client):
__doc__ = base.Client.__doc__
class ConnectionBase(base.ConnectionBase):
__doc__ = base.ConnectionBase.__doc__ + """
This ConnectionBase subclass is designed for use with gevent. It uses
the monkey-patched zmq module from gevent and spawns a number of green
threads to manage non-blocking IO and interrupts.
"""
ZMQ_CTX = zmq.Context()
# A blocking zmq.core.poll doesn't play nice with gevent.
# Instead we read from each socket in a separate greenthread, and keep
# the results in a local buffer so they don't get lost. An interrupt
# then just kills all the currently-running threads.
def __init__(self):
super(ConnectionBase,self).__init__()
self.poll_threads = []
def _poll(self,sockets,timeout=None):
# If there's anything available non-blockingly, just use it.
(ready,_,error) = zmq_poll.select(sockets,[],sockets,timeout=0)
if ready:
return ready
if error:
return []
if timeout == 0:
return []
# Spawn a greenthread to poll-recv from each socket.
ready = []
threads = []
res = gevent.event.Event()
for sock in sockets:
threads.append(gevent.spawn(self._do_poll,sock,ready,res,timeout))
self.poll_threads.append((res,threads))
# Wait for one of them to return, or for an interrupt.
try:
res.wait()
finally:
gevent.killall(threads)
gevent.joinall(threads)
return ready
def _do_poll(self,sock,ready,res,timeout):
if timeout is None:
sock._recv_poll()
else:
with gevent.Timeout(timeout,False):
sock._recv_poll()
ready.append(sock)
if not res.is_set():
res.set()
def _interrupt(self):
for (res,threads) in self.poll_threads:
gevent.killall(threads)
if not res.is_set():
res.set()
class Connection(base.Connection,ConnectionBase):
__doc__ = base.Connection.__doc__ + """
This Connection subclass is designed for use with gevent. It uses the
monkey-patched zmq module from gevent and spawns a number of green
threads to manage non-blocking IO and interrupts.
"""
class DispatcherConnection(base.DispatcherConnection,ConnectionBase):
__doc__ = base.DispatcherConnection.__doc__ + """
This DispatcherConnection subclass is designed for use with gevent. It
uses the monkey-patched zmq module from gevent and spawns a number of
green threads to manage non-blocking IO and interrupts.
"""
class StreamingUploadFile(base.StreamingUploadFile):
__doc__ = base.StreamingUploadFile.__doc__ + """
This StreamingUploadFile subclass is designed for use with gevent. It
uses uses gevent.sleep() instead of time.sleep().
"""
def _wait_for_data(self):
curpos = self.fileobj.tell()
cursize = os.fstat(self.fileobj.fileno()).st_size
while curpos >= cursize:
gevent.sleep(0.01)
cursize = os.fstat(self.fileobj.fileno()).st_size
class Handler(base.Handler):
__doc__ = base.Handler.__doc__ + """
This Handler subclass is designed for use with gevent. It spawns a
a new green thread to handle each incoming request.
"""
ConnectionClass = Connection
def __init__(self,*args,**kwds):
super(Handler,self).__init__(*args,**kwds)
# We need to count the number of inflight requests, so the
# main thread can wait for them to complete when shutting down.
self._num_inflight_requests = 0
self._all_requests_complete = gevent.event.Event()
def handle_request(self,req):
self._num_inflight_requests += 1
if self._num_inflight_requests >= 1:
self._all_requests_complete.clear()
@gevent.spawn
def do_handle_request():
try:
self.process_request(req)
finally:
self._num_inflight_requests -= 1
if self._num_inflight_requests == 0:
self._all_requests_complete.set()
def wait_for_completion(self):
if self._num_inflight_requests > 0:
self._all_requests_complete.wait()
class WSGIResponder(base.WSGIResponder):
__doc__ = base.WSGIResponder.__doc__
class WSGIHandler(base.WSGIHandler,Handler):
__doc__ = base.WSGIHandler.__doc__ + """
This WSGIHandler subclass is designed for use with gevent. It spawns a
a new green thread to handle each incoming request.
"""
ResponderClass = WSGIResponder
StreamingUploadClass = StreamingUploadFile
| 32.917323 | 78 | 0.672527 | 1,093 | 8,361 | 4.945105 | 0.26441 | 0.011656 | 0.015541 | 0.023312 | 0.207956 | 0.20074 | 0.20074 | 0.171878 | 0.158187 | 0.142091 | 0 | 0.005972 | 0.238967 | 8,361 | 253 | 79 | 33.047431 | 0.84347 | 0.257266 | 0 | 0.240741 | 0 | 0 | 0.170185 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.104938 | false | 0 | 0.080247 | 0 | 0.382716 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca783de6ff83c65ac31c3c3f5577a1372db396ed | 8,584 | py | Python | examples/text_buffer.py | DT-was-an-ET/explorer-hat | 9dd8624a094b9a7663fbcbb95be72fdb946eecc7 | [
"MIT"
] | null | null | null | examples/text_buffer.py | DT-was-an-ET/explorer-hat | 9dd8624a094b9a7663fbcbb95be72fdb946eecc7 | [
"MIT"
] | null | null | null | examples/text_buffer.py | DT-was-an-ET/explorer-hat | 9dd8624a094b9a7663fbcbb95be72fdb946eecc7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# for use with Python 3
# text_buffer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNimport sys, getoptESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
# Standard library imports
from datetime import datetime
from shutil import copyfile
from sys import exit as sys_exit
import os
# Third party imports
# None
# Local application imports
from utility import pr,make_time_text,send_by_ftp
from buffer_log import class_buffer_log
class class_text_buffer(object):
# Rotating Buffer Class
# Initiate with just the size required Parameter
# Get data with just a position in buffer Parameter
def __init__(self,headings,config):
#initialization
self.__config = config
print(" Buffer Init for : ",self.__config.prog_name," with a size of : ",self.__config.text_buffer_length, " and width of : ", len(headings) + 1, " including time stamp")
if not os.path.exists('log'):
os.makedirs('log')
self.__source_ref = 0 # a number used to control prevention repeat messages
self.__width = len(headings) + 1
self.line_values = ["not set"]*(len(headings))
self.__dta = [ [ None for di in range(self.__width+1) ] for dj in range(self.__config.text_buffer_length+1) ]
self.__size = 0
self.__posn = self.__config.text_buffer_length-1
#**********************************************
#self.__headings = ["Time"]
#for hdg_ind in range(0,self.__width-1):
# #print(hdg_ind,headings[hdg_ind])
# self.__headings.append(headings[hdg_ind])
#print(self.__headings)
#**************************************************
self.__headings = headings
self.__pr_values = ["text"] * self.__width
self.__html_filename = config.prog_name + "_log.html"
self.__html_filename_save_as = config.prog_path + self.__html_filename
self.__www_filename = config.local_dir_www + self.__html_filename
self.__ftp_creds = config.ftp_creds_filename
self.__send_html_count = 0
if self.__config.log_buffer_flag:
self.__send_log_count = 0
self.__log = class_buffer_log(config)
def size(self):
return self.__config.text_buffer_length
def update_buffer(self,values,appnd,ref):
#append a line of info at the current position plus 1
# print("Update Buffer appnd and ref are : ",appnd,ref)
if appnd + (self.__source_ref != ref):
#we adding message and incrementing posn
if self.__size < self.__config.text_buffer_length-1 :
self.__size += 1
if self.__posn == self.__config.text_buffer_length-1:
# last insert was at the end so go back to beginning@@
self.__posn = 0
else:
# first increment insert position by one
self.__posn += 1
# then insert a line full of values
self.__source_ref = ref
else:
self.__source_ref = ref
if len(values) > self.__width :
print("Width Error for :",self.__config.prog_name, len(values) , self.__width, values)
sys.exit()
for i in range(0,len(values)):
self.__dta[self.__posn][i] = values[i]
#print("Buffer updated and log buffer flag is : ",self.__config.log_buffer_flag)
if self.__config.log_buffer_flag:
self.__log.log_to_file(self.__headings,values)
#self.__log.copy_log_to_www(False)
#send log file to website configevery ten scans
# *************************
#if self.__send_log_count > 10:
#print("Sending log file by FTP")
# self.__log.send_log_by_ftp(False,self.__config.log_directory,self.__config.ftp_timeout)
# self.__send_log_count = 0
#else:
#print("not FTP This Time count is : ",self.__send_log_count)
# self.__send_log_count += 1
def get_line_dta(self, key):
#return stored element from position relative to current insert position in buffer
line_dta = [" - "]*self.__width
if (self.__posn-key) > -1:
# need to take values from arlogea before current insert position
for i in range(self.__width):
line_dta[i] = self.__dta[self.__posn-key][i]
return(line_dta)
else:
# need to take values from after current insert position
for i in range(self.__width):
#following two lines used too debug the calc to get the lower part of status file
#print("That Calc key,self.__size,self.__config.text_buffer_length, self.__posn-key,key sum",
# key,self.__size,self.__config.text_buffer_length, self.__posn-key,(self.__posn-key),self.(self.__config.text_buffer_length + (self.__posn-key))
line_dta[i] = self.__dta[self.__config.text_buffer_length + (self.__posn-key)][i]
return(line_dta)
def get_dta(self):
# get all the data inserted so far, or the whole buffer
all_data = [ [ None for di in range(self.__width+1) ] for dj in range(self.__config.text_buffer_length+1) ]
for ind in range(0,self.__size):
line_dta = self.get_line_dta(ind)
# Following line for debug data from Buffer
# print("get_dta >>>",ind,line_dta)
for i in range(len(line_dta)):
all_data[ind][i] = line_dta[i]
return(all_data)
def just_log(self,appnd,ref,log_time,refresh_interval):
self.__log.log_to_file(self.__headings,self.line_values)
def pr(self,appnd,ref,log_time,refresh_interval):
here = "buffer.pr for " + self.__config.prog_name
make_values = [" -- "]*self.__width
prtime = datetime.now()
for_screen = log_time.strftime('%d/%m/%Y %H:%M:%S')
# following alternative will show more resolution for fractions of a second
# for_screen = log_time.strftime('%d/%m/%Y %H:%M:%S.%f')
make_values[0] = for_screen
file_start = """<head>
<meta http-equiv="refresh" content="""
file_start = file_start + str(refresh_interval)
file_start = file_start + """ />
</head>
<caption>Rotating Buffer Display</caption>"""
tbl_start = """ <p>
<table style="float: left;" border="1">
<tbody>"""
tbl_start_line = """<tr>"""
tbl_end_line = """</tr>"""
tbl_start_col = """<td>"""
tbl_end_col= """</td>"""
tbl_end = """</tbody>
</table>"""
file_end = """
</body>
</html>"""
try:
for i in range(0,self.__width -1):
make_values[i+1] = str(self.line_values[i])
for_screen = for_screen + " " + str(self.line_values[i])
except:
print("Error in make values in ...buffer.pr for : ",self.__config.prog_name)
print("i,values,len(self.line_value>s),self.__width",i,self.line_values,len(self.line_values),self.__width)
sys_exit()
# print to screen and to status log and update html file
if appnd:
print(" appending : " + self.__config.prog_name + " : " + for_screen)
else:
print("not appending : " + self.__config.prog_name + " : " + for_screen)
self.update_buffer(make_values,appnd,ref)
with open(self.__html_filename,'w') as htmlfile:
htmlfile.write(file_start)
htmlfile.write("<p>" + self.__html_filename + " : " + make_time_text(datetime.now()) + "</p>\n<p>")
htmlfile.write(tbl_start + tbl_start_line)
for ind in range(0,len(self.__headings)):
htmlfile.write(tbl_start_col + self.__headings[ind] + tbl_end_col)
htmlfile.write(tbl_end_line)
buffer_dta = self.get_dta()
for ind in range(self.__size):
htmlfile.write(tbl_start_line)
for i in range(self.__width):
htmlfile.write(tbl_start_col + str(buffer_dta[ind][i]) + tbl_end_col)
htmlfile.write(tbl_end_line)
htmlfile.write(tbl_end)
htmlfile.write(file_end)
copyfile(self.__html_filename, self.__www_filename)
# To debug FTP change end of following line to " = True"
if self.__send_html_count >= 3:
FTP_dbug_flag = False
ftp_result = send_by_ftp(FTP_dbug_flag,self.__ftp_creds, self.__html_filename_save_as, self.__html_filename,"",self.__config.ftp_timeout)
for pres_ind in range(0,len(ftp_result)):
pr(FTP_dbug_flag,here, str(pres_ind) + " : ", ftp_result[pres_ind])
self.__send_html_count = 0
else:
self.__send_html_count += 1
return
| 37.484716 | 174 | 0.681034 | 1,279 | 8,584 | 4.241595 | 0.218921 | 0.04424 | 0.028387 | 0.040553 | 0.299908 | 0.21788 | 0.180277 | 0.114101 | 0.067834 | 0.052719 | 0 | 0.007333 | 0.189772 | 8,584 | 228 | 175 | 37.649123 | 0.772682 | 0.339585 | 0 | 0.139535 | 0 | 0 | 0.093145 | 0.008197 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054264 | false | 0 | 0.046512 | 0.007752 | 0.124031 | 0.046512 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca783e77d01b6a65f34fba8804bdd660d6dfe9a6 | 886 | py | Python | repos/build_pipeline/lambdas/extract_metrics/extract_metrics.py | roshansthomas/amazon-sagemaker-mlops-with-featurestore-and-datawrangler | 0159df4e04fb6848c2e34b8597f7601630f9bdc1 | [
"MIT-0"
] | null | null | null | repos/build_pipeline/lambdas/extract_metrics/extract_metrics.py | roshansthomas/amazon-sagemaker-mlops-with-featurestore-and-datawrangler | 0159df4e04fb6848c2e34b8597f7601630f9bdc1 | [
"MIT-0"
] | null | null | null | repos/build_pipeline/lambdas/extract_metrics/extract_metrics.py | roshansthomas/amazon-sagemaker-mlops-with-featurestore-and-datawrangler | 0159df4e04fb6848c2e34b8597f7601630f9bdc1 | [
"MIT-0"
] | null | null | null |
"""
This Lambda parses the output of ModelQualityStep to extract the value of a specific metric
"""
import json
import boto3
sm_client = boto3.client("sagemaker")
s3 = boto3.resource('s3')
def lambda_handler(event, context):
# model quality report URI
model_quality_report_uri = event['model_quality_report_uri']
metric_name = event['metric_name']
o = s3.Object(*split_s3_path(model_quality_report_uri))
retval = json.load(o.get()['Body'])
metrics = json.load(o.get()['Body'])
return {
"statusCode": 200,
"body": json.dumps(f"{metric_name} extracted"),
"metric_value": json.dumps(metrics['binary_classification_metrics'][metric_name]['value'])
}
def split_s3_path(s3_path):
path_parts=s3_path.replace("s3://","").split("/")
bucket=path_parts.pop(0)
key="/".join(path_parts)
return bucket, key
| 26.058824 | 98 | 0.673815 | 119 | 886 | 4.789916 | 0.453782 | 0.084211 | 0.126316 | 0.147368 | 0.05614 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02069 | 0.181716 | 886 | 33 | 99 | 26.848485 | 0.765517 | 0.132054 | 0 | 0 | 0 | 0 | 0.189474 | 0.069737 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca7aaf0d27786d0d1b6110d5b7821183f0fa08f7 | 2,383 | py | Python | main.py | Plouffi/Feh-Pass-Reviews | 0e4722da7f752a1b82ef7a65f43f7a76389f22b3 | [
"MIT"
] | 3 | 2020-02-18T20:40:11.000Z | 2020-03-16T15:05:13.000Z | main.py | Plouffi/Feh-Pass-Reviews | 0e4722da7f752a1b82ef7a65f43f7a76389f22b3 | [
"MIT"
] | null | null | null | main.py | Plouffi/Feh-Pass-Reviews | 0e4722da7f752a1b82ef7a65f43f7a76389f22b3 | [
"MIT"
] | null | null | null | from data_manager import DataManager, plot_fehpass_mention, plot_score_distribution
from scraper import Scraper
from datetime import date as dt
from cloud import Cloud
import sys
import json
def load_config():
with open("./ressources/config.json") as f:
config = json.load(f)
return config
def save_last_import():
with open("./ressources/copy.txt", "w") as file:
file.write(dt.today().strftime('%d/%m/%Y'))
def get_reviews():
"""
Method to fecth reviews from the Playstore
"""
config = load_config()
dm = DataManager(config)
s = Scraper(config)
reviews = s.get_reviews(config["feh_release_date"])
dm.insert_reviews(reviews)
dm.export()
def compute_data():
"""
Method to compute statistics from reviews data
"""
config = load_config()
dm = DataManager(config)
try:
# load data review in export file (path in config)
dm.load()
# computing stats
score_distribution, review_distribution = dm.compute_score_distribution()
means = {
"Cumulative mean": dm.compute_cumulative_mean(),
"Rolling average (1month)": dm.compute_rolling_mean()
}
review_with_mention, review_in_en, review_with_mention_1star = dm.compute_fehpass_mention()
# display stats
plot_score_distribution(score_distribution, review_distribution)
dm.plot_res(means)
dm.plot_overall_stats()
plot_fehpass_mention(review_with_mention, review_in_en, review_with_mention_1star)
except FileNotFoundError:
print(f"File '{config['export_path']}' not found. Launch scrapping process to create it")
def make_wordcloud():
"""
Method to generate a word cloud. Images are save in "ressources" directory.
"""
config = load_config()
dm = DataManager(config)
try:
# load data in export file (path in config)
dm.load()
reviews_before_fp = dm.get_reviews_as_dict(language="en", period="before")
reviews_after_fp = dm.get_reviews_as_dict(language="en", period="after")
cloud = Cloud(alpha=10.)
cloud.load_reviews(reviews_before_fp, reviews_after_fp)
cloud.cloud()
except FileNotFoundError:
print(f"File '{config['export_path']}' not found. Please check provided path is a valid path.")
def main():
process = sys.argv[1]
if process == "get_reviews":
get_reviews()
elif process == "compute_data":
compute_data()
elif process == "make_wordcloud":
make_wordcloud()
else:
print("No process provided")
if __name__ == "__main__":
main()
| 25.351064 | 97 | 0.738984 | 332 | 2,383 | 5.057229 | 0.328313 | 0.035736 | 0.0405 | 0.032162 | 0.324598 | 0.280524 | 0.259678 | 0.259678 | 0.223943 | 0.126266 | 0 | 0.002946 | 0.145195 | 2,383 | 93 | 98 | 25.623656 | 0.821306 | 0.120436 | 0 | 0.196721 | 0 | 0 | 0.170708 | 0.046072 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098361 | false | 0.04918 | 0.114754 | 0 | 0.229508 | 0.04918 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca7bcebe8a789ca0d1b9184a805dc93a7fdf5be0 | 4,679 | py | Python | simulation-ros/src/turtlebot2i/turtlebot2i_safety/src/two_robots.py | EricssonResearch/scott-eu | aad7fd2f767a3c5e7d89223a593fd979ad596db3 | [
"Apache-2.0"
] | 19 | 2017-06-29T07:41:26.000Z | 2021-11-03T18:48:48.000Z | simulation-ros/src/turtlebot2i/turtlebot2i_safety/src/two_robots.py | EricssonResearch/scott-eu | aad7fd2f767a3c5e7d89223a593fd979ad596db3 | [
"Apache-2.0"
] | 175 | 2017-06-29T09:37:43.000Z | 2021-07-09T12:55:28.000Z | simulation-ros/src/turtlebot2i/turtlebot2i_safety/src/two_robots.py | EricssonResearch/scott-eu | aad7fd2f767a3c5e7d89223a593fd979ad596db3 | [
"Apache-2.0"
] | 8 | 2017-10-31T08:53:12.000Z | 2021-07-21T06:14:43.000Z | #!/usr/bin/env python
"""
Edited from navigation.py in turtlebot2i_navigation module
"""
import rospy
import actionlib
from nav_msgs.msg import Odometry
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
import geometry_msgs.msg
from sensor_msgs.msg import LaserScan
from tf.transformations import quaternion_from_euler
import math
from std_msgs.msg import Float64
from turtlebot2i_safety.msg import SafetyZone
import numpy as np
from kobuki_msgs.msg import BumperEvent
def init_var():
#Here we initialize the global variables.
orientation=geometry_msgs.msg.Quaternion()
orientation=quaternion_from_euler(0,0,0)#(roll, pitch,yaw) # return an array
global goal1, goal2
goal1 = MoveBaseGoal()
goal1.target_pose.header.frame_id = "map"
goal1.target_pose.header.stamp = rospy.Time.now()
goal1.target_pose.pose.position.x = -1.0
goal1.target_pose.pose.position.y = -2.5
goal1.target_pose.pose.position.z = 0.063 #1.34851861
goal1.target_pose.pose.orientation.x=0.0
goal1.target_pose.pose.orientation.y=0.0
goal1.target_pose.pose.orientation.z=orientation[2]
goal1.target_pose.pose.orientation.w=orientation[3]
goal2 = MoveBaseGoal()
goal2.target_pose.header.frame_id = "map"
goal2.target_pose.header.stamp = rospy.Time.now()
goal2.target_pose.pose.position.x = -4.5
goal2.target_pose.pose.position.y = 3.0
goal2.target_pose.pose.position.z = 0.063 #1.34851861
goal2.target_pose.pose.orientation.x=0.0
goal2.target_pose.pose.orientation.y=0.0
goal2.target_pose.pose.orientation.z=orientation[2]
goal2.target_pose.pose.orientation.w=orientation[3]
global client1, client2
client1 = actionlib.SimpleActionClient('turtlebot2i/move_base', MoveBaseAction)
client2 = actionlib.SimpleActionClient('turtlebot2i_0/move_base', MoveBaseAction)
global rob1goal, rob2goal
rob1goal = True
rob2goal = True
global time_start, prev_pos, init_pos, pose_cb_count, travelled_distance, sum_distance_to_goal
time_start = rospy.get_time()
init_pos = geometry_msgs.msg.Point()
prev_pos = geometry_msgs.msg.Point()
curr_pos = geometry_msgs.msg.Point()
pose_cb_count = 0
travelled_distance = 0.0 #the less the better
sum_distance_to_goal = 0.0 #the less the better
def move_to_goal(goal, client):
client.wait_for_server()
client.send_goal(goal)
print("Goal position is sent! waiting the robot to finish....")
wait = client.wait_for_result(timeout=rospy.Duration(1200.0)) #timeout in seconds
if not wait:
rospy.logerr("Action server not available or timeout!")
rospy.signal_shutdown("Action server not available!")
def distance2D(pos1, pos2):
return math.sqrt((pos1.x - pos2.x)**2 + (pos1.y - pos2.y)**2)
def init_subscription():
time_start = rospy.get_time()
rospy.Subscriber('/turtlebot2i/sensors/global_pose', geometry_msgs.msg.PoseStamped, update_pose_callback1)
rospy.Subscriber('/turtlebot2i_0/sensors/global_pose', geometry_msgs.msg.PoseStamped, update_pose_callback2)
def update_pose_callback1(data):
global rob1goal
if rob1goal:
if distance2D(data.pose.position, goal1.target_pose.pose.position) < 0.2: #check distance to goal
print("goal reached!a")
client1.cancel_all_goals()
client1.send_goal(goal2)
rob1goal = False
else:
if distance2D(data.pose.position, goal2.target_pose.pose.position) < 0.2: #check distance to goal
print("goal reached!b")
client1.cancel_all_goals()
client1.send_goal(goal1)
rob1goal = True
def update_pose_callback2(data):
global rob2goal
if rob2goal:
if distance2D(data.pose.position, goal2.target_pose.pose.position) < 0.2: #check distance to goal
print("goal reached!c")
client2.cancel_all_goals()
client2.send_goal(goal1)
rob2goal = False
else:
if distance2D(data.pose.position, goal1.target_pose.pose.position) < 0.2: #check distance to goal
print("goal reached!d")
client2.cancel_all_goals()
client2.send_goal(goal2)
rob2goal = True
if __name__ == '__main__':
try:
rospy.init_node('two_robots_py')
init_var()
init_subscription()
client1.wait_for_server()
client2.wait_for_server()
print("Both clients are ready, sending the goals")
client1.send_goal(goal1)
client2.send_goal(goal2)
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo("Navigation test finished.")
| 36.554688 | 112 | 0.70421 | 632 | 4,679 | 5.023734 | 0.259494 | 0.069291 | 0.07937 | 0.069291 | 0.450709 | 0.360945 | 0.333228 | 0.171969 | 0.171969 | 0.115906 | 0 | 0.040936 | 0.195982 | 4,679 | 127 | 113 | 36.84252 | 0.80303 | 0.06775 | 0 | 0.190476 | 0 | 0 | 0.087598 | 0.025357 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.114286 | 0.009524 | 0.180952 | 0.057143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca7e72ac8eca8a96798ab1b879172ea0ca0060fa | 7,333 | py | Python | python-flask-server/openapi_server/models/operation_filter_kgraph_top_n_parameters.py | broadinstitute/genetics-kp-dev | 902a153a33942ba5d224c129db0ae58562927085 | [
"MIT"
] | null | null | null | python-flask-server/openapi_server/models/operation_filter_kgraph_top_n_parameters.py | broadinstitute/genetics-kp-dev | 902a153a33942ba5d224c129db0ae58562927085 | [
"MIT"
] | 8 | 2021-06-14T18:10:53.000Z | 2022-03-23T18:30:10.000Z | python-flask-server/openapi_server/models/operation_filter_kgraph_top_n_parameters.py | broadinstitute/genetics-kp-dev | 902a153a33942ba5d224c129db0ae58562927085 | [
"MIT"
] | 1 | 2022-02-22T21:24:58.000Z | 2022-02-22T21:24:58.000Z | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server import util
class OperationFilterKgraphTopNParameters(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, edge_attribute=None, max_edges=50, keep_top_or_bottom='top', qedge_keys=None, qnode_keys=None): # noqa: E501
"""OperationFilterKgraphTopNParameters - a model defined in OpenAPI
:param edge_attribute: The edge_attribute of this OperationFilterKgraphTopNParameters. # noqa: E501
:type edge_attribute: str
:param max_edges: The max_edges of this OperationFilterKgraphTopNParameters. # noqa: E501
:type max_edges: int
:param keep_top_or_bottom: The keep_top_or_bottom of this OperationFilterKgraphTopNParameters. # noqa: E501
:type keep_top_or_bottom: str
:param qedge_keys: The qedge_keys of this OperationFilterKgraphTopNParameters. # noqa: E501
:type qedge_keys: List[str]
:param qnode_keys: The qnode_keys of this OperationFilterKgraphTopNParameters. # noqa: E501
:type qnode_keys: List[str]
"""
self.openapi_types = {
'edge_attribute': str,
'max_edges': int,
'keep_top_or_bottom': str,
'qedge_keys': List[str],
'qnode_keys': List[str]
}
self.attribute_map = {
'edge_attribute': 'edge_attribute',
'max_edges': 'max_edges',
'keep_top_or_bottom': 'keep_top_or_bottom',
'qedge_keys': 'qedge_keys',
'qnode_keys': 'qnode_keys'
}
self._edge_attribute = edge_attribute
self._max_edges = max_edges
self._keep_top_or_bottom = keep_top_or_bottom
self._qedge_keys = qedge_keys
self._qnode_keys = qnode_keys
@classmethod
def from_dict(cls, dikt) -> 'OperationFilterKgraphTopNParameters':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The OperationFilterKgraphTopN_parameters of this OperationFilterKgraphTopNParameters. # noqa: E501
:rtype: OperationFilterKgraphTopNParameters
"""
return util.deserialize_model(dikt, cls)
@property
def edge_attribute(self):
"""Gets the edge_attribute of this OperationFilterKgraphTopNParameters.
The name of the edge attribute to filter on. # noqa: E501
:return: The edge_attribute of this OperationFilterKgraphTopNParameters.
:rtype: str
"""
return self._edge_attribute
@edge_attribute.setter
def edge_attribute(self, edge_attribute):
"""Sets the edge_attribute of this OperationFilterKgraphTopNParameters.
The name of the edge attribute to filter on. # noqa: E501
:param edge_attribute: The edge_attribute of this OperationFilterKgraphTopNParameters.
:type edge_attribute: str
"""
if edge_attribute is None:
raise ValueError("Invalid value for `edge_attribute`, must not be `None`") # noqa: E501
self._edge_attribute = edge_attribute
@property
def max_edges(self):
"""Gets the max_edges of this OperationFilterKgraphTopNParameters.
The number of edges to keep. # noqa: E501
:return: The max_edges of this OperationFilterKgraphTopNParameters.
:rtype: int
"""
return self._max_edges
@max_edges.setter
def max_edges(self, max_edges):
"""Sets the max_edges of this OperationFilterKgraphTopNParameters.
The number of edges to keep. # noqa: E501
:param max_edges: The max_edges of this OperationFilterKgraphTopNParameters.
:type max_edges: int
"""
if max_edges is not None and max_edges < 0: # noqa: E501
raise ValueError("Invalid value for `max_edges`, must be a value greater than or equal to `0`") # noqa: E501
self._max_edges = max_edges
@property
def keep_top_or_bottom(self):
"""Gets the keep_top_or_bottom of this OperationFilterKgraphTopNParameters.
Indicate whether or not the the top or bottom n values should be kept. # noqa: E501
:return: The keep_top_or_bottom of this OperationFilterKgraphTopNParameters.
:rtype: str
"""
return self._keep_top_or_bottom
@keep_top_or_bottom.setter
def keep_top_or_bottom(self, keep_top_or_bottom):
"""Sets the keep_top_or_bottom of this OperationFilterKgraphTopNParameters.
Indicate whether or not the the top or bottom n values should be kept. # noqa: E501
:param keep_top_or_bottom: The keep_top_or_bottom of this OperationFilterKgraphTopNParameters.
:type keep_top_or_bottom: str
"""
allowed_values = ["top", "bottom"] # noqa: E501
if keep_top_or_bottom not in allowed_values:
raise ValueError(
"Invalid value for `keep_top_or_bottom` ({0}), must be one of {1}"
.format(keep_top_or_bottom, allowed_values)
)
self._keep_top_or_bottom = keep_top_or_bottom
@property
def qedge_keys(self):
"""Gets the qedge_keys of this OperationFilterKgraphTopNParameters.
This indicates if you only want to filter on specific edge_keys. If not provided or empty, all edges will be filtered on. # noqa: E501
:return: The qedge_keys of this OperationFilterKgraphTopNParameters.
:rtype: List[str]
"""
return self._qedge_keys
@qedge_keys.setter
def qedge_keys(self, qedge_keys):
"""Sets the qedge_keys of this OperationFilterKgraphTopNParameters.
This indicates if you only want to filter on specific edge_keys. If not provided or empty, all edges will be filtered on. # noqa: E501
:param qedge_keys: The qedge_keys of this OperationFilterKgraphTopNParameters.
:type qedge_keys: List[str]
"""
self._qedge_keys = qedge_keys
@property
def qnode_keys(self):
"""Gets the qnode_keys of this OperationFilterKgraphTopNParameters.
This indicates if you only want nodes corresponding to a specific list of qnode_keys to be removed. If not provided or empty, no nodes will be removed when filtering. Allows us to know what to do with the nodes connected to edges that are removed. # noqa: E501
:return: The qnode_keys of this OperationFilterKgraphTopNParameters.
:rtype: List[str]
"""
return self._qnode_keys
@qnode_keys.setter
def qnode_keys(self, qnode_keys):
"""Sets the qnode_keys of this OperationFilterKgraphTopNParameters.
This indicates if you only want nodes corresponding to a specific list of qnode_keys to be removed. If not provided or empty, no nodes will be removed when filtering. Allows us to know what to do with the nodes connected to edges that are removed. # noqa: E501
:param qnode_keys: The qnode_keys of this OperationFilterKgraphTopNParameters.
:type qnode_keys: List[str]
"""
self._qnode_keys = qnode_keys
| 38.798942 | 269 | 0.684167 | 909 | 7,333 | 5.290429 | 0.141914 | 0.028072 | 0.061759 | 0.077979 | 0.691204 | 0.585985 | 0.534415 | 0.514244 | 0.495737 | 0.33271 | 0 | 0.013821 | 0.250102 | 7,333 | 188 | 270 | 39.005319 | 0.860702 | 0.539343 | 0 | 0.217391 | 0 | 0 | 0.149154 | 0.012341 | 0 | 0 | 0 | 0 | 0 | 1 | 0.173913 | false | 0 | 0.072464 | 0 | 0.347826 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca810acaf114d27759397ca02efb21720c0a1ece | 14,285 | py | Python | bauble/model/propagation.py | Bauble/bauble.api | 183c97fda076ea870e21e70ecf89a2a94a7f5722 | [
"BSD-3-Clause"
] | null | null | null | bauble/model/propagation.py | Bauble/bauble.api | 183c97fda076ea870e21e70ecf89a2a94a7f5722 | [
"BSD-3-Clause"
] | 1 | 2015-02-05T13:15:00.000Z | 2015-02-05T13:15:00.000Z | bauble/model/propagation.py | Bauble/bauble.api | 183c97fda076ea870e21e70ecf89a2a94a7f5722 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# propagation module
#
import datetime
import os
from random import random
import re
import sys
import weakref
import traceback
import xml.sax.saxutils as saxutils
import dateutil.parser as date_parser
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.orm.session import object_session
from sqlalchemy.exc import DBAPIError
import bauble
import bauble.db as db
from bauble.model import Model
#from bauble.error import check
#import bauble.utils as utils
import bauble.paths as paths
#import bauble.editor as editor
#from bauble.utils.log import debug
#import bauble.prefs as prefs
from bauble.error import CommitException
import bauble.types as types
prop_type_values = {'Seed': _("Seed"),
'UnrootedCutting': _('Unrooted cutting'),
'Other': _('Other')}
class PlantPropagation(Model):
"""
PlantPropagation provides an intermediate relation from
Plant->Propagation
"""
__tablename__ = 'plant_prop'
plant_id = Column(Integer, ForeignKey('plant.id'), nullable=False)
propagation_id = Column(Integer, ForeignKey('propagation.id'),
nullable=False)
propagation = relation('Propagation', uselist=False)
plant = relation('Plant', uselist=False)
def json(self, depth=1):
"""
"""
d = self.propagation.json(depth)
d['ref'] = '/plant/' + str(self.plant_id) + d['ref]']
return d
class Propagation(Model):
"""
Propagation
"""
__tablename__ = 'propagation'
#recvd_as = Column(Unicode(10)) # seed, urcu, other
prop_type = Column(types.Enum(values=prop_type_values.keys(),
translations=prop_type_values),
nullable=False)
notes = Column(UnicodeText)
date = Column(types.Date)
cutting = relation('PropCutting',
primaryjoin='Propagation.id==PropCutting.propagation_id',
cascade='all,delete-orphan', uselist=False,
backref=backref('propagation', uselist=False))
seed = relation('PropSeed',
primaryjoin='Propagation.id==PropSeed.propagation_id',
cascade='all,delete-orphan', uselist=False,
backref=backref('propagation', uselist=False))
def _get_details(self):
if self.prop_type == 'Seed':
return self.seed
elif self.prop_type == 'UnrootedCutting':
return self.cutting
elif self.notes:
return self.notes
else:
raise NotImplementedError
def _set_details(self, details):
"""
The details param is a dictionary of properties for either the PropCutting or PropSeed
depenging on the property type.
"""
if self.prop_type == 'Seed':
self.cutting = None
if self.seed is None:
self.seed = PropSeed()
self.seed.set_attributes(details)
elif self.prop_type == 'UnrootedCutting':
self.seed = None
if self.cutting is None:
self.cutting = PropCutting()
self.cutting.set_attributes(details)
elif self.prop_type != 'Other':
raise ValueError("Unknown propagation type: {}".format(self.prop_type))
details = property(_get_details, _set_details)
def get_summary(self):
"""
"""
# TODO: need a date format string from the settings
# date_format = prefs.prefs[prefs.date_format_pref]
date_format = '%d-%m-%Y'
def get_date(date):
if isinstance(date, datetime.date):
return date.strftime(date_format)
return date
s = str(self)
if self.prop_type == 'UnrootedCutting':
c = self.cutting
values = []
if c.cutting_type is not None:
values.append(_('Cutting type: %s') %
cutting_type_values[c.cutting_type])
if c.length:
values.append(_('Length: %(length)s%(unit)s') %
dict(length=c.length,
unit=length_unit_values[c.length_unit]))
if c.tip:
values.append(_('Tip: %s') % tip_values[c.tip])
if c.leaves:
s = _('Leaves: %s') % leaves_values[c.leaves]
if c.leaves == 'Removed' and c.leaves_reduced_pct:
s.append('(%s%%)' % c.leaves_reduced_pct)
values.append(s)
if c.flower_buds:
values.append(_('Flower buds: %s') %
flower_buds_values[c.flower_buds])
if c.wound is not None:
values.append(_('Wounded: %s' % wound_values[c.wound]))
if c.fungicide:
values.append(_('Fungal soak: %s' % c.fungicide))
if c.hormone:
values.append(_('Hormone treatment: %s' % c.hormone))
if c.bottom_heat_temp:
values.append(_('Bottom heat: %(temp)s%(unit)s') %
dict(temp=c.bottom_heat_temp,
unit=bottom_heat_unit_values[c.bottom_heat_unit]))
if c.container:
values.append(_('Container: %s' % c.container))
if c.media:
values.append(_('Media: %s' % c.media))
if c.location:
values.append(_('Location: %s' % c.location))
if c.cover:
values.append(_('Cover: %s' % c.cover))
if c.rooted_pct:
values.append(_('Rooted: %s%%') % c.rooted_pct)
s = ', '.join(values)
elif self.prop_type == 'Seed':
s = str(self)
seed = self.seed
values = []
if seed.pretreatment:
values.append(_('Pretreatment: %s') % seed.pretreatment)
if seed.nseeds:
values.append(_('# of seeds: %s') % seed.nseeds)
date_sown = get_date(seed.date_sown)
if date_sown:
values.append(_('Date sown: %s') % date_sown)
if seed.container:
values.append(_('Container: %s') % seed.container)
if seed.media:
values.append(_('Media: %s') % seed.media)
if seed.covered:
values.append(_('Covered: %s') % seed.covered)
if seed.location:
values.append(_('Location: %s') % seed.location)
germ_date = get_date(seed.germ_date)
if germ_date:
values.append(_('Germination date: %s') % germ_date)
if seed.nseedlings:
values.append(_('# of seedlings: %s') % seed.nseedlings)
if seed.germ_pct:
values.append(_('Germination rate: %s%%') % seed.germ_pct)
date_planted = get_date(seed.date_planted)
if date_planted:
values.append(_('Date planted: %s') % date_planted)
s = ', '.join(values)
elif self.notes:
s = utils.utf8(self.notes)
return s
def json(self, depth=1):
d = dict(ref="/propagation/" + str(self.id))
if depth > 0:
d['prop_type'] = self.prop_type
if self.prop_type == 'UnrootedCutting':
d.update(self._json_cutting(depth))
elif self.prop_type == 'Seed':
d.update(self._json_seed(depth))
def _json_cutting(self, depth):
d = dict()
d['cutting_type'] = self.cutting.cutting_type
d['tip'] = self.cutting.tip
d[' leaves'] = self.cutting.leaves
d['leaves_reduced_pct'] = self.cutting.leaves_reduced_pct
d['length'] = self.cutting.length
d['length_unit'] = self.cutting.length_unit
# single/double/slice
d['wound'] = self.cutting.wound
# removed/None
d['flower_buds'] = self.cutting.flower_buds
d['fungicide'] = self.cutting.fungicide # fungal soak
d['hormone'] = self.cutting.hormone # powder/liquid/None....solution
d['media'] = self.cutting.media
d['container'] = self.cutting.container
d['location'] = self.cutting.location
d['cover'] = self.cutting.cover # vispore, poly, plastic dome, poly bag
d['bottom_heat_temp'] = self.cutting.bottom_heat_temp # temperature of bottom heat
# F/C
d['bottom_heat_unit'] = self.cutting.bottom_heat_unit
d['rooted_pct'] = self.cutting.rooted_pct
d['rooted'] = []
for rooted in self.cutting.rooted:
d['rooted'].append(dict(date=rooted.date, quantity=rooted.quantity))
return d
def _json_seed(self, depth):
d = dict()
d['pretreatment'] = self.seed.pretreatment
d['nseeds'] = self.seed.nseeds
d['date_sown'] = self.seed.date_sown
d['container'] = self.seed.container
d['media'] = self.seed.media
d['covered'] = self.seed.covered
d['location'] = self.seed.location
d['moved_from'] = self.seed.moved_from
d['moved_to'] = self.seed.moved_to
d['moved_date'] = self.seed.moved_date
d['germ_date'] = self.seed.germ_date
d['nseedlings'] = self.seed.nseedlings
d['germ_pct'] = self.seed.germ_pct
d['date_planted'] = self.seed.date_planted
return d
class PropRooted(Model):
"""
Rooting dates for cutting
"""
__tablename__ = 'prop_cutting_rooted'
__mapper_args__ = {'order_by': 'date'}
date = Column(types.Date)
quantity = Column(Integer, autoincrement=False)
cutting_id = Column(Integer, ForeignKey('prop_cutting.id'), nullable=False)
cutting_type_values = {'Nodal': _('Nodal'),
'InterNodal': _('Internodal'),
'Other': _('Other')}
tip_values = {'Intact': _('Intact'),
'Removed': _('Removed'),
'None': _('None'),
None: ''}
leaves_values = {'Intact': _('Intact'),
'Removed': _('Removed'),
'None': _('None'),
None: ''}
flower_buds_values = {'Removed': _('Removed'),
'None': _('None'),
None: ''}
wound_values = {'No': _('No'),
'Single': _('Singled'),
'Double': _('Double'),
'Slice': _('Slice'),
None: ''}
hormone_values = {'Liquid': _('Liquid'),
'Powder': _('Powder'),
'No': _('No')}
bottom_heat_unit_values = {'F': _('\302\260F'),
'C': _('\302\260C'),
None: ''}
length_unit_values = {'mm': _('mm'),
'cm': _('cm'),
'in': _('in'),
None: ''}
class PropCutting(Model):
"""
A cutting
"""
__tablename__ = 'prop_cutting'
cutting_type = Column(types.Enum(values=cutting_type_values.keys(),
translations=cutting_type_values),
default='Other')
tip = Column(types.Enum(values=tip_values.keys(),
translations=tip_values))
leaves = Column(types.Enum(values=leaves_values.keys(),
translations=leaves_values))
leaves_reduced_pct = Column(Integer, autoincrement=False)
length = Column(Integer, autoincrement=False)
length_unit = Column(types.Enum(values=length_unit_values.keys(),
translations=length_unit_values))
# single/double/slice
wound = Column(types.Enum(values=wound_values.keys(),
translations=wound_values))
# removed/None
flower_buds = Column(types.Enum(values=flower_buds_values.keys(),
translations=flower_buds_values))
fungicide = Column(Unicode) # fungal soak
hormone = Column(Unicode) # powder/liquid/None....solution
media = Column(Unicode)
container = Column(Unicode)
location = Column(Unicode)
cover = Column(Unicode) # vispore, poly, plastic dome, poly bag
bottom_heat_temp = Column(Integer, autoincrement=False) # temperature of bottom heat
# TODO: make the bottom heat unit required if bottom_heat_temp is
# not null
# F/C
bottom_heat_unit = Column(types.Enum(values=bottom_heat_unit_values.keys(),
translations=bottom_heat_unit_values),
nullable=True)
rooted_pct = Column(Integer, autoincrement=False)
#aftercare = Column(UnicodeText) # same as propgation.notes
propagation_id = Column(Integer, ForeignKey('propagation.id'),
nullable=False)
rooted = relation('PropRooted', cascade='all,delete-orphan',
backref=backref('cutting', uselist=False))
class PropSeed(Model):
"""
"""
__tablename__ = 'prop_seed'
pretreatment = Column(UnicodeText)
nseeds = Column(Integer, nullable=False, autoincrement=False)
date_sown = Column(types.Date, nullable=False)
container = Column(Unicode) # 4" pot plug tray, other
media = Column(Unicode) # seedling media, sphagnum, other
# covered with #2 granite grit: no, yes, lightly heavily
covered = Column(Unicode)
# not same as location table, glasshouse(bottom heat, no bottom
# heat), polyhouse, polyshade house, fridge in polybag
location = Column(Unicode)
# TODO: do we need multiple moved to->moved from and date fields
moved_from = Column(Unicode)
moved_to = Column(Unicode)
moved_date = Column(types.Date)
germ_date = Column(types.Date)
nseedlings = Column(Integer, autoincrement=False) # number of seedling
germ_pct = Column(Integer, autoincrement=False) # % of germination
date_planted = Column(types.Date)
propagation_id = Column(Integer, ForeignKey('propagation.id'),
nullable=False)
def __str__(self):
# what would the string be...???
# cuttings of self.accession.taxon_str() and accession number
return repr(self)
| 34.756691 | 94 | 0.566398 | 1,546 | 14,285 | 5.05304 | 0.158473 | 0.038402 | 0.016897 | 0.021505 | 0.171915 | 0.077829 | 0.065796 | 0.05658 | 0.045315 | 0.021505 | 0 | 0.002138 | 0.312566 | 14,285 | 410 | 95 | 34.841463 | 0.793381 | 0.100805 | 0 | 0.191638 | 0 | 0 | 0.114972 | 0.006383 | 0 | 0 | 0 | 0.007317 | 0 | 1 | 0.031359 | false | 0 | 0.066202 | 0.003484 | 0.334495 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca81d00c9b08ea72e49aff7d13fb4cf046744724 | 517 | py | Python | imdbTask5.py | Anjalipatil18/IMDB_SCRAPER | 2ec79b91198e2f458d1099ee8ad27acde71b09b6 | [
"MIT"
] | null | null | null | imdbTask5.py | Anjalipatil18/IMDB_SCRAPER | 2ec79b91198e2f458d1099ee8ad27acde71b09b6 | [
"MIT"
] | null | null | null | imdbTask5.py | Anjalipatil18/IMDB_SCRAPER | 2ec79b91198e2f458d1099ee8ad27acde71b09b6 | [
"MIT"
] | null | null | null | from imdbTask1 import*
from imdbTask4 import*
def get_movie_list_details(api):
movieListDetails=[]
for i in api[:10]:
link=i["movieLink"]
movieUrl=Scrap_Movie_Detail(link)
new=movieUrl.copy() #They copy() method returns a shallow copy of the dictionary.
movieUrl.clear() #The clear() method removes all items from the dictionary.
movieListDetails.append(new)
return movieListDetails
movieOfDetails=get_movie_list_details(movieInfo)
# pprint(movieOfDetails)
| 34.466667 | 91 | 0.717602 | 63 | 517 | 5.761905 | 0.619048 | 0.044077 | 0.066116 | 0.104683 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009639 | 0.197292 | 517 | 14 | 92 | 36.928571 | 0.86506 | 0.270793 | 0 | 0 | 0 | 0 | 0.024064 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.166667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca890cc0c4b716f777412a40314e358dfa561905 | 1,365 | py | Python | problems/risk/solutions/risk.py | lucidsoftware/lucid-programming-competition-2018 | be22bff0f0d1b008f5729c0b6e5e2adb08925c96 | [
"Apache-2.0"
] | 2 | 2019-03-05T22:35:47.000Z | 2019-03-05T22:35:55.000Z | problems/risk/solutions/risk.py | lucidsoftware/lucid-programming-competition-2018 | be22bff0f0d1b008f5729c0b6e5e2adb08925c96 | [
"Apache-2.0"
] | 2 | 2020-07-16T21:55:43.000Z | 2021-05-08T12:14:05.000Z | problems/risk/solutions/risk.py | lucidsoftware/lucid-programming-competition-2018 | be22bff0f0d1b008f5729c0b6e5e2adb08925c96 | [
"Apache-2.0"
] | 4 | 2019-03-08T01:42:00.000Z | 2021-01-28T03:01:34.000Z | from collections import defaultdict
from itertools import product
from functools import lru_cache
from sys import setrecursionlimit
setrecursionlimit(10000)
@lru_cache(maxsize=6)
def p_outcomes(a_dice, d_dice):
total = 0
results = defaultdict(int)
for a_rolls in product(range(1,7), repeat=a_dice):
for d_rolls in product(range(1,7), repeat=d_dice):
a_sorted = reversed(sorted(a_rolls))
d_sorted = reversed(sorted(d_rolls))
a_deaths = 0
d_deaths = 0
for a, d in zip(a_sorted, d_sorted):
if a > d:
d_deaths += 1
else:
a_deaths += 1
results[(a_deaths, d_deaths)] += 1
total += 1
return [(result, count/total) for result, count in results.items()]
@lru_cache(maxsize=None)
def p_victory(attackers, defenders):
a_dice = min(attackers-1, 3)
d_dice = min(defenders, 2)
if a_dice <= 0:
return 0
if d_dice <= 0:
return 1
total = 0
for (a_deaths, d_deaths), probability in p_outcomes(a_dice, d_dice):
total += probability * p_victory(attackers - a_deaths, defenders - d_deaths)
return total
for _ in range(int(input())):
attackers, defenders = map(int, input().split())
print("{0:.7f}".format(p_victory(attackers, defenders)))
| 29.673913 | 84 | 0.612454 | 190 | 1,365 | 4.205263 | 0.289474 | 0.031289 | 0.06383 | 0.035044 | 0.12766 | 0.12766 | 0.12766 | 0 | 0 | 0 | 0 | 0.027551 | 0.282051 | 1,365 | 45 | 85 | 30.333333 | 0.787755 | 0 | 0 | 0.052632 | 0 | 0 | 0.005128 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.105263 | 0 | 0.263158 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca8a90e5ceb797a2b9672b05c74b49ae1602ffc4 | 1,240 | py | Python | solutions/2021/day_06.py | mokytis/advent-of-code | 7bddbc87411388bb0da8284c3daa5252f9d5007d | [
"MIT"
] | null | null | null | solutions/2021/day_06.py | mokytis/advent-of-code | 7bddbc87411388bb0da8284c3daa5252f9d5007d | [
"MIT"
] | null | null | null | solutions/2021/day_06.py | mokytis/advent-of-code | 7bddbc87411388bb0da8284c3daa5252f9d5007d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Puzzle Title: AoC 2021 Day 6: Lanternfish
Puzzle Link: https://adventofcode.com/2021/day/6
Solution Author: Luke Spademan <info@lukespademan.com>
Solution License: MIT
"""
import fileinput
from collections import Counter
import copy
def parse_input():
for line in fileinput.input():
if line.strip():
return Counter([int(timer) for timer in line.rstrip().split(",")])
def fishes_after_days(fishes, days):
# children get held for 2 days before being added to the general population
children = [0, 0]
for day in range(days + 1):
for timer in range(7):
offset_timer = (timer - day) % 7
if offset_timer == 6:
children.append(fishes[timer])
fishes[timer] += children.pop(0)
return sum([fishes[t] for t in fishes]) + sum(children)
def solve_part1(data):
return fishes_after_days(data, 80)
def solve_part2(data):
return fishes_after_days(data, 256)
def main():
data = parse_input()
part1_ans = solve_part1(copy.copy(data))
print(f"Part 1: {part1_ans}")
part2_ans = solve_part2(copy.copy(data))
print(f"Part 2: {part2_ans}")
if __name__ == "__main__":
main()
| 22.545455 | 79 | 0.642742 | 174 | 1,240 | 4.431034 | 0.442529 | 0.042802 | 0.058366 | 0.054475 | 0.132296 | 0.132296 | 0 | 0 | 0 | 0 | 0 | 0.034884 | 0.237097 | 1,240 | 54 | 80 | 22.962963 | 0.780127 | 0.219355 | 0 | 0 | 0 | 0 | 0.049009 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.178571 | false | 0 | 0.107143 | 0.071429 | 0.428571 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca8bb0a6b5e76ac3dc813001f4574315eedd8360 | 2,082 | py | Python | Day 20: Particle Swarm/Day 20: Particle Swarm.py | djvanhelmond/AdventofCode2017 | a41879e172b8f2db23ac6a5f71433ce6b484b909 | [
"BSD-3-Clause"
] | null | null | null | Day 20: Particle Swarm/Day 20: Particle Swarm.py | djvanhelmond/AdventofCode2017 | a41879e172b8f2db23ac6a5f71433ce6b484b909 | [
"BSD-3-Clause"
] | null | null | null | Day 20: Particle Swarm/Day 20: Particle Swarm.py | djvanhelmond/AdventofCode2017 | a41879e172b8f2db23ac6a5f71433ce6b484b909 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/local/bin/python3
import math
class Particle():
def __init__(self, position, velocity, acceleration):
self.position = position
self.velocity = velocity
self.acceleration = acceleration
self.absA = self.__absoluteAccelleration()
def __absoluteAccelleration(self):
return math.sqrt(sum([ math.pow(val, 2) for val in self.acceleration ]))
def update(self):
for i in range(3):
self.velocity[i] += self.acceleration[i]
self.position[i] += self.velocity[i]
class Gpu():
def __init__(self, particleList):
self.particles = {}
for key in range(len(particleList)):
self.particles[key] = self.__loadParticles(particleList[key])
def __loadParticles(self, particleData):
p, v, a = particleData.split(", ")
_, px, py, pz, _ = p.replace("<", ",").replace(">", ",").split(",")
_, vx, vy, vz, _ = v.replace("<", ",").replace(">", ",").split(",")
_, ax, ay, az, _ = a.replace("<", ",").replace(">", ",").split(",")
return Particle([int(px),int(py),int(pz)], [int(vx),int(vy),int(vz)], [int(ax),int(ay),int(az)])
def slowestParticle(self):
slowest = 0
for key in self.particles:
if self.particles[key].absA < self.particles[slowest].absA:
slowest = key
return slowest
def __removeCollisions(self):
allPositions = [ self.particles[key].position for key in self.particles ]
collisions = [x for n, x in enumerate(allPositions) if x in allPositions[:n]]
delkeys = [ key for key in self.particles if self.particles[key].position in collisions ]
for key in delkeys:
del self.particles[key]
def tick(self):
for key in self.particles:
self.particles[key].update()
self.__removeCollisions()
with open("./input.txt") as f: INPUT = f.readlines()
canvas = Gpu(INPUT)
print("Star 1: %i" % canvas.slowestParticle())
for i in range(100):
canvas.tick()
print("Star 2: %i" % len(canvas.particles))
| 34.131148 | 104 | 0.597502 | 251 | 2,082 | 4.85259 | 0.298805 | 0.128079 | 0.039409 | 0.039409 | 0.098522 | 0.064039 | 0.064039 | 0.064039 | 0.064039 | 0 | 0 | 0.005711 | 0.243036 | 2,082 | 60 | 105 | 34.7 | 0.767132 | 0.011047 | 0 | 0.043478 | 0 | 0 | 0.023324 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.173913 | false | 0 | 0.021739 | 0.021739 | 0.304348 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca8e74bdcdf56ae9a145fc5960607020ba877bc3 | 25,083 | py | Python | QualityControl/QC_aggregate_FC.py | tsmonteiro/fmri_proc | ee740cfa3c3a7ef8e1ee1ebd3b286a66712e0ec1 | [
"MIT"
] | 2 | 2021-11-16T10:00:33.000Z | 2021-12-13T02:57:40.000Z | QualityControl/QC_aggregate_FC.py | tsmonteiro/fmri_proc | ee740cfa3c3a7ef8e1ee1ebd3b286a66712e0ec1 | [
"MIT"
] | null | null | null | QualityControl/QC_aggregate_FC.py | tsmonteiro/fmri_proc | ee740cfa3c3a7ef8e1ee1ebd3b286a66712e0ec1 | [
"MIT"
] | 1 | 2021-12-13T02:57:27.000Z | 2021-12-13T02:57:27.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 17 09:39:23 2020
@author: u0101486
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 5 12:26:49 2019
@author: u0101486
"""
# Aggregate QC measures
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import configparser
from scipy.stats import pearsonr, spearmanr
from statsmodels.stats.multitest import multipletests
import scipy.stats as sst
import h5py
import pandas as pd
import pingouin as pg
from scipy.stats import ttest_ind
from sklearn.metrics import mutual_info_score
config = configparser.ConfigParser()
config.read('/home/luna.kuleuven.be/u0101486/workspace/fmri_proc/params.ini')
#PATH to qclib
sys.path.append(config['PATHS']['QCLIB_PATH'])
sys.path.append('/home/luna.kuleuven.be/u0101486/workspace/fmri_proc/ext/nitransforms/nitransforms')
import qclib.group_plots as gp
import shutil
def calc_MI(x, y, bins):
c_xy = np.histogram2d(x, y, bins)[0]
mi = mutual_info_score(None, None, contingency=c_xy)
return mi
def plot_mean_results(mats=None, odir=None, atlas='aal', motion=None, mdl='NONE'):
mats = np.array(mats)
fname = str.upper(atlas) + '_' + mdl + '_MeanFC.png'
gp.plot_fc_mat(np.mean(mats,axis=0), outFile=odir+'/' + fname, atlas=atlas, figDpi=100)
plt.close('all')
nSub = mats.shape[0]
hist = []
for s in range(nSub):
hist.append(np.histogram(mats[s,:,:], bins=30) )
fig = plt.figure(figsize=(20,20), dpi=100, facecolor='w', edgecolor='k')
for h in hist:
plt.plot( h[1][1:], h[0] )
fname = str.upper(atlas) + '_' + mdl + '_Histogram.png'
plt.savefig(odir + '/' + fname)
plt.close('all')
if not motion == None:
motion = np.array(motion)
nNodes = mats.shape[1]
motionMat = np.zeros((nNodes,nNodes))
pvals = []
for n1 in range(nNodes):
for n2 in range(n1+1,nNodes):
fc = mats[:,n1,n2]
afc = pearsonr(motion, fc)
motionMat[n1,n2] = afc[0]
pvals.append(afc[1])
pcorr = multipletests(pvals, 0.05, 'fdr_bh')
pcorr = pcorr[1]
idx=0
for n1 in range(nNodes):
for n2 in range(n1+1,nNodes):
if pcorr[idx] < 0.05:
motionMat[n2,n1] = motionMat[n2,n1]
else:
motionMat[n2,n1] = 0
idx += 1
fname = 'Motion_' + str.upper(atlas) + '_' + mdl + '.png'
gp.plot_fc_mat(motionMat, outFile=odir+'/' + fname, atlas=atlas, figDpi=100)
plt.close('all')
def plot_group_results(mats=None, odir=None, opref=None, atlas='aal', groups=None,
save_group_mats=False, mdl='NONE'):
mats = np.array(mats)
nNodes = mats.shape[1]
groupEffect = np.zeros( (nNodes, nNodes) )
pvals = []
groups = np.array(groups)
groupsLbl = np.unique( groups )
for n1 in range(nNodes):
for n2 in range(n1+1,nNodes):
fc = mats[:,n1,n2]
tstat, p = ttest_ind( fc[ np.where(groups == groupsLbl[0]) ], fc[ np.where(groups == groupsLbl[-1]) ], equal_var=False )
groupEffect[n1,n2] = np.mean(fc[ np.where(groups == groupsLbl[0]) ]) - np.mean(fc[ np.where(groups == groupsLbl[-1]) ])
pvals.append(p)
pcorr = multipletests(pvals, 0.05, 'holm')
pcorr = pcorr[1]
idx=0
for n1 in range(nNodes):
for n2 in range(n1+1,nNodes):
if pcorr[idx] < 0.05:
groupEffect[n2,n1] = groupEffect[n1,n2]
else:
groupEffect[n2,n1] = 0
idx += 1
fname = odir + '/' + str.upper(atlas) + '_' + opref + '_' + mdl + '.png'
gp.plot_fc_mat(groupEffect, outFile=fname, atlas=atlas, vmin=-0.4, vmax=0.4, figDpi=100)
plt.close('all')
if save_group_mats == True:
matG1 = np.mean( mats[np.where(groups == groupsLbl[0])], axis=0 )
matG2 = np.mean( mats[np.where(groups == groupsLbl[-1])], axis=0 )
ofileG1 = odir + '/' + str.upper(atlas) + '_' + opref + '_' + mdl + '_G1.png'
ofileG2 = odir + '/' + str.upper(atlas) + '_' + opref + '_' + mdl + '_G2.png'
gp.plot_fc_mat(matG1, outFile=ofileG1, atlas=atlas, figDpi=100)
plt.close('all')
gp.plot_fc_mat(matG2, outFile=ofileG2, atlas=atlas, figDpi=100)
plt.close('all')
def plot_regression_results(mats=None, ofile=None, atlas='aal', regressor=None):
print("TODO. Sorry.")
#END OF FUNCTION DEFINITIONS
# ======================================================================
# ======================================================================
#TODO Make it easier to switch between projects
#project = 'CRUNCH'
#project = 'RepImpact'
project = 'CAI_China'
baseDir = '/home/luna.kuleuven.be/u0101486/workspace/data/' + project + '/tmp/'
qcDir = '/home/luna.kuleuven.be/u0101486/workspace/data/' + project + '/Quality_Control/FC/'
ages = np.loadtxt('/home/luna.kuleuven.be/u0101486/workspace/data/CRUNCH/ages.txt', delimiter=',')[:,0]
statModels = ['NONE', 'SFIX_CC', 'SFIX_D', 'SRP24WM1CSF1', 'SRP24CC', 'SRP9', 'SFIX']
#statModels = ['NONE', 'SFIX', 'SFIX_D', 'SRP24WM1CSF1', 'SRP24CC', 'SRP9']
#statModels = ['NONE']
#statModels = ['SFIX_D']
if os.path.isdir(qcDir) == True:
shutil.rmtree(qcDir)
os.mkdir(qcDir)
if project == 'RepImpact' and not os.path.isdir(qcDir + '/TimePoint'):
os.mkdir(qcDir + '/TimePoint')
if project == 'RepImpact' and not os.path.isdir(qcDir + '/Center'):
os.mkdir(qcDir + '/Center')
import bct
for mdl in statModels:
aalMats = []
aalScMats = []
histAal = []
histLg4 = []
lg4Mats = []
strength = []
incAges = []
incSubs = []
incMotion = []
notProc = []
incSIDs = []
incGroups = []
incCenters = []
fcQc = qcDir + '/Mats_' + mdl + '/'
if os.path.isdir(fcQc):
shutil.rmtree(fcQc)
os.mkdir(fcQc)
for sub in sorted(os.listdir(baseDir)):
print(sub)
subDir = baseDir + '/' + sub
fcAal = subDir + '/QA_' + mdl + '/05_FC_' + mdl + '_correlation_matrix_aal.txt'
fcLg4 = subDir + '/QA_' + mdl + '/05_FC_' + mdl + '_correlation_matrix_lg400.txt'
motFile = subDir + '/maximum_disp.1d_delt'
scMatFile = '/media/u0101486/Seagate Backup Plus Drive/Crunch_SC_AAL2_dirty/R' + sub[2:] + '_muw__connectome_tractogram10MSIFT2_FOD_AVE99_WM_norm_zerodia_sym_aal2.mat'
if os.path.isfile(fcAal) and os.path.isfile(fcLg4): # and os.path.isfile(scMatFile):
mot = np.mean(np.loadtxt(motFile) )
m = np.loadtxt(motFile)
nSpikes = len(m[np.where(m>0.5)])
if mot > .4 or nSpikes > 50:
print(sub + ': Mean = {:02f}, Max = {:02f}, (N = {})'.format(mot, np.max(m), nSpikes) )
shutil.copyfile(fcAal, fcQc + '/AAL_' + sub + '_HM.txt')
shutil.copyfile(fcLg4, fcQc + '/LocalGlobal_' + sub + '_HM.txt')
continue
shutil.copyfile(fcAal, fcQc + '/AAL_' + sub + '.txt')
shutil.copyfile(fcLg4, fcQc + '/LocalGlobal_' + sub + '.txt')
if project == 'CRUNCH':
i = int(sub[2:])-1
incAges.append(ages[i])
f = h5py.File(scMatFile)
for k, v in f.items():
scMat = np.array(v)
aalScMats.append( (scMat) )
if ages[i] < 30:
incGroups.append(0)
if ages[i] < 62 and ages[i] >= 30:
incGroups.append(1)
if ages[i] >= 62:
incGroups.append(2)
if project == 'RepImpact':
sid = int(sub[3:])-1
incGroups.append( int(sub[1]) )
if sub[0] == 'B':
incCenters.append(0)
incSIDs.append( sid + 100 )
if sub[0] == 'N':
incCenters.append(1)
incSIDs.append( sid + 200 )
if project == 'CAI_China':
sid = int(sub[3:])
if sub[3] == '0':
incGroups.append(0)
incSIDs.append( sid + 100 )
if sub[3] == '1':
incGroups.append(1)
incSIDs.append( sid + 200 )
aal = np.loadtxt(fcAal)
lg4 = np.loadtxt(fcLg4)
incMotion.append( mot )
#randNet = bct.randmio_und_signed(aal, 1)
#Cr = bct.clustering_coef_wu(randNet[0])
#C = bct.clustering_coef_wu(aal)
#Lr = ( bct.distance_wei(randNet[0])[0] )
#L = ( bct.distance_wei(aal)[0] )
#smallWorld = np.nanmean( (C/Cr) / (L/Lr) )
#aal = bct.threshold_proportional(aal, 0.25)
nodeStr = bct.strengths_und(aal)
nodeStr = nodeStr[94:]
#nodeStr = np.sort(nodeStr[0])[::-1] + np.sort(abs(nodeStr[1]))[::-1]
#nodeStr = bct.eigenvector_centrality_und(lg4)
#nodeStr = bct.clustering_coef_wu(lg4)
#histLg4.append(np.sum(nodeStr))
strength.append( np.mean(nodeStr) )
#nodeStr = bct.strengths_und_sign(aal)
#nodeStr = np.sort(nodeStr[0])[::-1] + np.sort(abs(nodeStr[1]))[::-1]
#nodeStr = bct.eigenvector_centrality_und(aal)
#histAal.append(np.histogram(nodeStr, bins=30))
#histLg4.append(np.histogram(lg4, bins=30))
#histAal.append(np.histogram(aal, bins=30))
#histLg4.append(nodeStr)
aalMats.append( np.arctanh(aal) )
lg4Mats.append( np.arctanh(lg4) )
else:
notProc.append(sub)
plt.plot(strength)
# END for sub sorted(....)
#%%
print( "Plotting group comparisons AAL" )
plot_group_results(mats=aalMats, odir=qcDir, opref='GroupEffect', mdl=mdl,
atlas='aal', groups=incGroups, save_group_mats=True)
print( "Plotting group comparisons LocalGlobal" )
plot_group_results(mats=lg4Mats, odir=qcDir, opref='GroupEffect', mdl=mdl,
atlas='localGlobal', groups=incGroups, save_group_mats=True)
print( "Plotting mean FC matrices" )
plot_mean_results(mats=aalMats, odir=qcDir, atlas='aal', motion=incMotion, mdl=mdl)
plot_mean_results(mats=lg4Mats, odir=qcDir, atlas='localGlobal', motion=incMotion, mdl=mdl)
'''
#%%
import nilearn.plotting as nlp
from pingouin import partial_corr
import pandas as pd
caiDuration = [120,36,10,2,4,120,72,24,7,12,7,4,60,8,36,7.5,9,6,192,192,120,60,12]
kfas = [52,15,29,65,60,35,12,50,62,55,17,35,57,22,40,60,62,17,25,65,57,57,70]
aofas = [73,58,63,79,66,57,65,89,87,89,29,42,80,78,69,79,85,37,39,72,76,95,79]
age = [31,40,31,31,27,29,29,24,25,30,13,26,38,30,33,31,31,41,38,37,25,22,43]
bmi = [21.25,21.97,26.12,18.69,20.98,28.34,29.69,20.68,20.99,22.35 ,26.95 ,19.38 ,27.14 ,28.74 ,26.53 ,20.03 ,23.03 ,20.83 ,20.90 ,30.02 ,22.49 ,25.71, 24.22 ]
vasr = [0.8,0.9,3.5,0,0.6,0,1.7,0.7,0,0,1.1,0,0,3.4,2.6,1.4,0,0.8,0,1.9,1,0,4.5]
durMat = np.zeros((120,120))
pvals = []
lg4Mats = np.array(lg4Mats)
aalMats = np.array(aalMats)
for n1 in range(120):
print(n1)
for n2 in range(n1+1,120):
conn = aalMats[26:,n1,n2]
#df = pd.DataFrame({'FC':conn, 'Duration':caiDuration, 'Age':age, 'BMI':bmi, 'AOFAS':aofas, 'KFAS':kfas, 'VASR':vasr})
#res = partial_corr(data=df,x='FC',y='KFAS',covar=['Age'])
afc = pearsonr(aofas, conn)
durMat[n1,n2] = afc[0] #res["r"][0]
#pvals.append(res["p-val"][0])
pvals.append(afc[1])
corrP = multipletests(pvals, 0.05, 'fdr_bh')
corrP = corrP[1]
idx = 0
for n1 in range(120):
for n2 in range(n1+1,120):
if corrP[idx] < 0.05:
print('{} x {} : r = {:.03f} (p = {:.03f})'.format(n1,n2,durMat[n1,n2],corrP[idx]))
durMat[n2,n1] = durMat[n1,n2]
idx += 1
gp.plot_fc_mat(durMat, outFile=None, figDpi=70, atlas='aal', vmin=-0.75, vmax=0.75, cmap='jet',
title=None)
#%%
import nilearn.plotting as nlp
pidx=0
for n1 in range(6):
for n2 in range(5):
ax = plt.subplot(6,5,pidx+1)
nlp.plot_matrix( lg4Mats[pidx], vmax=0.6, vmin=-0.6, axes=ax )
plt.title(pidx+1)
pidx += 1
#%%
strength = np.array(strength)
incGroups = np.array(incGroups)
mu1 = np.mean(strength[np.where(incGroups==0)])
sd1 = np.std(strength[np.where(incGroups==0)])
mu2 = np.mean(strength[np.where(incGroups==1)])
sd2 = np.std(strength[np.where(incGroups==1)])
plt.scatter([1,2], [mu1, mu2] )
plt.plot( [1,1], [mu1-sd1, mu1+sd1], color='k' )
plt.plot( [2,2], [mu2-sd2, mu2+sd2], color='k' )
#%%
meanAal = np.mean( aalMats, axis=0 )
meanSAal = np.mean( aalScMats, axis=0 )
meanSAal = meanSAal / np.max(meanSAal)
aalMats = np.array(aalMats)
lg4Mats = np.array(lg4Mats)
if project == 'RepImpact':
incGroups = np.array(incGroups)
incCenters = np.array(incCenters)
incSIDs = np.array(incSIDs)
#%%
import pandas as pd
import pingouin as pg
centerEffectLg4 = np.zeros( (lg4Mats.shape[1],lg4Mats.shape[1]) )
timePointEffectLg4 = np.zeros( (lg4Mats.shape[1],lg4Mats.shape[1]) )
pvalsCenterLg4 = []
pvalsTimePointLg4 = []
for n1 in range(lg4Mats.shape[1]):
for n2 in range(n1+1,lg4Mats.shape[2]):
fc = lg4Mats[:,n1,n2]
df = pd.DataFrame({'Center':incCenters,
'TimePoint':incGroups,
'SIDs':incSIDs,
'FC':fc})
# Compute the two-way mixed-design ANOVA
aov = pg.mixed_anova(dv='FC', within='TimePoint', between='Center', subject='SIDs', data=df)
centerEffectLg4[n1,n2] = np.mean(fc[np.where(incCenters==0)]) - np.mean(fc[np.where(incCenters==1)])
timePointEffectLg4[n1,n2] = aov['F'][1]
pvalsCenterLg4.append(aov['p-unc'][0])
pvalsTimePointLg4.append(aov['p-unc'][0])
# Pretty printing of ANOVA summary
#pg.print_table(aov)
corrPvalsCenterLg4 = multipletests(pvalsCenterLg4, 0.05, 'fdr_bh')
corrPvalsCenterLg4 = corrPvalsCenterLg4[1]
corrPvalsTimePointLg4 = multipletests(pvalsTimePointLg4, 0.05, 'fdr_bh')
corrPvalsTimePointLg4 = corrPvalsTimePointLg4[1]
idx=0
for n1 in range(lg4Mats.shape[1]):
for n2 in range(n1+1,lg4Mats.shape[2]):
if corrPvalsTimePointLg4[idx] < 0.05:
timePointEffectLg4[n2,n1] = timePointEffectLg4[n2,n1]
if corrPvalsCenterLg4[idx] < 0.05:
centerEffectLg4[n2,n1] = centerEffectLg4[n2,n1]
#%%
meanAalT1 = np.mean( aalMats[np.where(incGroups==1)], axis=0 )
meanAalT2 = np.mean( aalMats[np.where(incGroups==2)], axis=0 )
meanAalT3 = np.mean( aalMats[np.where(incGroups==3)], axis=0 )
meanLg4T1 = np.mean( lg4Mats[np.where(incGroups==1)], axis=0 )
meanLg4T2 = np.mean( lg4Mats[np.where(incGroups==2)], axis=0 )
meanLg4T3 = np.mean( lg4Mats[np.where(incGroups==3)], axis=0 )
meanLg4B = np.mean( lg4Mats[np.where(incCenters==0)], axis=0 )
meanLg4N = np.mean( lg4Mats[np.where(incCenters==1)], axis=0 )
meanAalB = np.mean( aalMats[np.where(incCenters==0)], axis=0 )
meanAalN = np.mean( aalMats[np.where(incCenters==1)], axis=0 )
gp.plot_fc_mat(meanAalB, outFile=outFileAalC1, atlas='aal', figDpi=120)
plt.close('all')
gp.plot_fc_mat(meanAalN, outFile=outFileAalC2, atlas='aal', figDpi=120)
plt.close('all')
gp.plot_fc_mat(meanLg4B, outFile=outFileLg4C1, atlas='localGlobal', figDpi=120)
plt.close('all')
gp.plot_fc_mat(meanLg4N, outFile=outFileLg4C2, atlas='localGlobal', figDpi=120)
plt.close('all')
gp.plot_fc_mat(centerEffectLg4, outFile=outFileLg4CE, atlas='localGlobal', figDpi=120)
plt.close('all')
gp.plot_fc_mat(meanAalT1, outFile=outFileAalT1, atlas='aal', figDpi=120)
plt.close('all')
gp.plot_fc_mat(meanAalT2, outFile=outFileAalT2, atlas='aal', figDpi=120)
plt.close('all')
gp.plot_fc_mat(meanAalT3, outFile=outFileAalT3, atlas='aal', figDpi=120)
plt.close('all')
gp.plot_fc_mat(meanLg4T1, outFile=outFileLg4T1, atlas='localGlobal', figDpi=120)
plt.close('all')
gp.plot_fc_mat(meanLg4T2, outFile=outFileLg4T2, atlas='localGlobal', figDpi=120)
plt.close('all')
gp.plot_fc_mat(meanLg4T3, outFile=outFileLg4T3, atlas='localGlobal', figDpi=120)
plt.close('all')
gp.plot_fc_mat(timePointEffectLg4, outFile=outFileLg4TE, atlas='localGlobal', vmin=0.5, vmax=5, cmap='hot', figDpi=120)
plt.close('all')
sdAal = np.std( aalMats, axis=0 )
meanLg4 = np.mean( lg4Mats, axis=0 )
sdLg4 = np.std( lg4Mats, axis=0 )
if project == 'CRUNCH':
# Corr with age
alg4 = np.array(lg4Mats)
nNodes4 = len(meanLg4)
ageMat = np.zeros((nNodes4,nNodes4))
pvals = []
for n1 in range(nNodes4):
for n2 in range(n1+1,nNodes4):
fc = alg4[:,n1,n2]
afc = pearsonr(incAges, fc)
#lr = linregress(incAges, fc)
#afc = np.corrcoef(incAges, fc)[0][1]
ageMat[n1,n2] = afc[0]
pvals.append(afc[1])
# Corr with age AAL
aAal = np.array(aalMats)
nNodesA = len(meanAal)
ageMatA = np.zeros((nNodesA,nNodesA))
pvalsA = []
for n1 in range(nNodesA):
for n2 in range(n1+1,nNodesA):
fc = aAal[:,n1,n2]
afc = pearsonr(incAges, fc)
#lr = linregress(incAges, fc)
#afc = np.corrcoef(incAges, fc)[0][1]
ageMatA[n1,n2] = afc[0]
pvalsA.append(afc[1])
# Corr with age AAL
sAal = np.array(aalScMats)
nNodesA = len(meanAal)
ageMatS = np.zeros((nNodesA,nNodesA))
pvalsS = []
for n1 in range(nNodesA):
for n2 in range(n1+1,nNodesA):
fc = sAal[:,n1,n2]
afc = pearsonr(incAges, fc)
#lr = linregress(incAges, fc)
#afc = np.corrcoef(incAges, fc)[0][1]
ageMatS[n1,n2] = afc[0]
pvalsS.append(afc[1])
corrPA = multipletests(pvalsA, 0.05, 'fdr_bh')
corrPA = corrPA[1]
corrPS = multipletests(pvalsS, 0.05, 'fdr_bh')
corrPS = corrPS[1]
corrP = multipletests(pvals, 0.05, 'fdr_bh')
corrP = corrP[1]
idx = 0
for n1 in range(nNodesA):
for n2 in range(n1+1,nNodesA):
if corrPA[idx] < 0.05:
ageMatA[n2,n1] = ageMatA[n1,n2]
idx += 1
idx = 0
for n1 in range(nNodesA):
for n2 in range(n1+1,nNodesA):
if corrPS[idx] < 0.05:
ageMatS[n2,n1] = ageMatS[n1,n2]
idx += 1
idx = 0
for n1 in range(nNodes4):
for n2 in range(n1+1,nNodes4):
if corrP[idx] < 0.05:
ageMat[n2,n1] = ageMat[n1,n2]
idx += 1
gp.plot_fc_mat(ageMat, outFile=outFileLg4A, atlas='localGlobal', figDpi=72)
plt.close('all')
gp.plot_fc_mat(ageMatA, outFile=outFileAalA, atlas='aal', figDpi=72)
plt.close('all')
gp.plot_fc_mat(ageMatS, outFile=outFileAalAS, atlas='aal', figDpi=72)
plt.close('all')
# Corr with Motion
alg4 = np.array(lg4Mats)
nNodes4 = len(meanLg4)
ageMat = np.zeros((nNodes4,nNodes4))
pvals = []
for n1 in range(nNodes4):
for n2 in range(n1+1,nNodes4):
fc = alg4[:,n1,n2]
afc = pearsonr(incMotion, fc)
#lr = linregress(incAges, fc)
#afc = np.corrcoef(incAges, fc)[0][1]
ageMat[n1,n2] = afc[0]
pvals.append(afc[1])
# Corr with age AAL
aAal = np.array(aalMats)
nNodesA = len(meanAal)
ageMatA = np.zeros((nNodesA,nNodesA))
pvalsA = []
for n1 in range(nNodesA):
for n2 in range(n1+1,nNodesA):
fc = aAal[:,n1,n2]
afc = pearsonr(incMotion, fc)
#lr = linregress(incAges, fc)
#afc = np.corrcoef(incAges, fc)[0][1]
ageMatA[n1,n2] = afc[0]
pvalsA.append(afc[1])
corrPA = multipletests(pvalsA, 0.05, 'fdr_bh')
corrPA = corrPA[1]
corrP = multipletests(pvals, 0.05, 'fdr_bh')
corrP = corrP[1]
idx = 0
for n1 in range(nNodesA):
for n2 in range(n1+1,nNodesA):
if corrPA[idx] < 0.05:
ageMatA[n2,n1] = ageMatA[n1,n2]
idx += 1
idx = 0
for n1 in range(nNodes4):
for n2 in range(n1+1,nNodes4):
if corrP[idx] < 0.05:
ageMat[n2,n1] = ageMat[n1,n2]
idx += 1
gp.plot_fc_mat(ageMat, outFile=outFileLg4M, atlas='localGlobal', figDpi=72)
plt.close('all')
gp.plot_fc_mat(ageMatA, outFile=outFileAalM, atlas='aal', figDpi=72)
plt.close('all')
#gp.plot_fc_mat(meanSAal, outFile=outFileAalMS, atlas='aal', figDpi=72, vmin=0, vmax=0.1, cmap='hot')
#plt.close('all')
gp.plot_fc_mat(meanLg4, outFile=outFileLg4, atlas='localGlobal', figDpi=72)
plt.close('all')
#ms = np.reshape(meanSAal, (1,nNodesA*nNodesA))
#ma = np.reshape(meanAal, (1,nNodesA*nNodesA))
#mDist = np.corrcoef( ma, ms )
#mDist = mutual_info_score(np.reshape(ms, 8836), np.reshape(ma,8836))
#gp.plot_fc_mat(meanAal, outFile=outFileAal, atlas='aal', figDpi=72, title='R_structural = {:.03f}'.format(mDist))
#plt.close('all')
gp.plot_fc_mat(sdLg4, outFile=outFileLg4S, atlas='localGlobal', vmin=0.1, vmax=0.25, cmap='hot', figDpi=72)
plt.close('all')
gp.plot_fc_mat(sdAal, outFile=outFileAalS, atlas='aal', vmin=0.1, vmax=0.25, cmap='hot', figDpi=72)
plt.close('all')
#%%
fig = plt.figure(figsize=(20,20), dpi=72, facecolor='w', edgecolor='k')
if project == 'CRUNCH':
normAge = incAges/np.max(incAges)
idx=0
colors=[(1,0,0,0.3),(0,1,0,0.3),(0,0,1,0.3)]
for h in histLg4:
#plt.scatter( incGroups[idx], h, color=colors[incGroups[idx]-1] )
if project == 'RepImpact':
plt.plot( h[1][1:], h[0], color=colors[incGroups[idx]-1] )
if project == 'CRUNCH':
plt.plot(h[1][1:], h[0], color=(normAge[idx],normAge[idx],normAge[idx]))
idx+=1
plt.savefig(outFileLg4H)
plt.close('all')
#%%
fig = plt.figure(figsize=(20,20), dpi=72, facecolor='w', edgecolor='k')
idx=0
for h in histAal:
if project == 'RepImpact':
plt.plot( h[1][1:], h[0], color=colors[incGroups[idx]-1] )
if project == 'CRUNCH':
plt.plot(h[1][1:], h[0], color=(normAge[idx],normAge[idx],normAge[idx]))
idx += 1
plt.savefig(outFileAalH)
plt.close('all')
print("Done with " + mdl)
''' | 33.134742 | 175 | 0.521429 | 3,101 | 25,083 | 4.158659 | 0.164141 | 0.021169 | 0.025589 | 0.023883 | 0.486275 | 0.437035 | 0.369262 | 0.322581 | 0.277916 | 0.262019 | 0 | 0.070862 | 0.325998 | 25,083 | 757 | 176 | 33.134742 | 0.691944 | 0.0543 | 0 | 0.20603 | 0 | 0 | 0.112283 | 0.051799 | 0 | 0 | 0 | 0.001321 | 0 | 1 | 0.020101 | false | 0 | 0.080402 | 0 | 0.105528 | 0.030151 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca8f100cbcd38d31bcc26bca290e0a38cd1a6502 | 5,987 | py | Python | dalle_pytorch/distributed_backends/deepspeed_backend.py | Gitsamshi/DALLE-pytorch | 6cfc43158a4615865e97c839133290afcf289824 | [
"MIT"
] | 4,025 | 2021-01-05T23:52:33.000Z | 2022-03-31T11:17:44.000Z | dalle_pytorch/distributed_backends/deepspeed_backend.py | Gitsamshi/DALLE-pytorch | 6cfc43158a4615865e97c839133290afcf289824 | [
"MIT"
] | 318 | 2021-01-06T08:14:36.000Z | 2022-03-15T07:23:39.000Z | dalle_pytorch/distributed_backends/deepspeed_backend.py | Gitsamshi/DALLE-pytorch | 6cfc43158a4615865e97c839133290afcf289824 | [
"MIT"
] | 449 | 2021-01-06T17:30:17.000Z | 2022-03-30T03:42:17.000Z | import json
import os
import torch
from .distributed_backend import DistributedBackend
class DeepSpeedBackend(DistributedBackend):
"""Distributed backend using the DeepSpeed engine."""
BACKEND_MODULE_NAME = 'deepspeed'
BACKEND_NAME = 'DeepSpeed'
def wrap_arg_parser(self, parser):
if not self.has_backend():
parser.add_argument(
'--deepspeed',
type=lambda _: False,
help=(
'whether to use DeepSpeed '
"(ignored since it's not available)"
),
)
else:
parser = self.backend_module.add_config_arguments(parser)
parser.add_argument(
'--local_rank',
type=int,
default=-1,
help='local rank passed from distributed launcher',
)
return parser
def _initialize(self):
self.backend_module.init_distributed()
if torch.cuda.is_available():
torch.cuda.set_device(self._get_local_rank())
@staticmethod
def _require_torch_distributed_init():
"""Raise an error when `torch.distributed` has not been
initialized yet.
"""
assert torch.distributed.is_initialized(), \
('`torch.distributed` is not initialized; please call '
'`DeepSpeedBackend.initialize` at the start of your script')
def _get_world_size(self):
self._require_torch_distributed_init()
return torch.distributed.get_world_size()
def _get_rank(self):
self._require_torch_distributed_init()
return torch.distributed.get_rank()
def _get_local_rank(self):
self._require_torch_distributed_init()
return int(os.environ['LOCAL_RANK'])
def _local_barrier(self):
self._require_torch_distributed_init()
torch.distributed.barrier()
def _check_args(self, args, optimizer, lr_scheduler, kwargs):
"""Return an appropriate optimizer and learning rate scheduler
after checking the values passed to `distribute`.
"""
self._check_argvs(args, optimizer, lr_scheduler, kwargs)
(optimizer, lr_scheduler) = self._check_config(
args, optimizer, lr_scheduler, kwargs)
return (optimizer, lr_scheduler)
def _check_argvs(self, args, optimizer, lr_scheduler, kwargs):
"""Apply several sanity checks to the given command
line arguments.
"""
has_json_config = (hasattr(args, 'deepspeed_config')
and args.deepspeed_config is not None)
has_dict_config = 'config_params' in kwargs
if (
# No config given
(not has_json_config and not has_dict_config)
# JSON config file does not exist
or (not has_dict_config
and not os.path.isfile(args.deepspeed_config))
):
# Let DeepSpeed handle these argument errors.
return
if not args.deepspeed:
print(
'WARNING: DeepSpeed backend was selected; setting '
'`args.deepspeed = True`'
)
args.deepspeed = True
if has_json_config and has_dict_config:
print(
'WARNING: DeepSpeed config was given as both JSON file and '
'Python dictionary. Python dictionary takes precedence.'
)
def _check_config(self, args, optimizer, lr_scheduler, kwargs):
"""Return an appropriate optimizer and learning rate scheduler
for the DeepSpeed configuration.
"""
if 'config_params' in kwargs:
config = kwargs['config_params']
else:
with open(args.deepspeed_config, 'r') as json_config_file:
config = json.load(json_config_file)
if 'optimizer' in config and optimizer is not None:
print(
'WARNING: Optimizer encountered in both DeepSpeed config and '
'keyword arguments. Optimizer in DeepSpeed config '
'takes precedence.'
)
optimizer = None
if 'scheduler' in config and lr_scheduler is not None:
print(
'WARNING: Learning rate scheduler encountered in both '
'DeepSpeed config and keyword arguments. Learning rate '
'scheduler in DeepSpeed config takes precedence.'
)
# For the LR scheduler, the JSON config already has
# precedence. We do this for forward compatibility.
lr_scheduler = None
return (optimizer, lr_scheduler)
def _distribute(
self,
args=None,
model=None,
optimizer=None,
model_parameters=None,
training_data=None,
lr_scheduler=None,
**kwargs,
):
"""Return a distributed model engine, optimizer, dataloader, and
learning rate scheduler. These are obtained by wrapping the
given values with the backend.
For the other or other possible arguments,
see `deepspeed.initialize`.
"""
(optimizer, lr_scheduler) = self._check_args(
args, optimizer, lr_scheduler, kwargs)
return self.backend_module.initialize(
args=args,
model=model,
optimizer=optimizer,
model_parameters=model_parameters,
training_data=training_data,
lr_scheduler=lr_scheduler,
**kwargs,
)
def _average_all(self, tensor):
self._require_torch_distributed_init()
# We copy because modification happens in-place
averaged = tensor.detach().clone()
# We use `all_reduce` because it is better supported than `reduce`
torch.distributed.all_reduce(averaged, torch.distributed.ReduceOp.SUM)
return averaged / self.get_world_size()
| 34.80814 | 78 | 0.601637 | 629 | 5,987 | 5.524642 | 0.27186 | 0.050647 | 0.057554 | 0.046619 | 0.24777 | 0.168058 | 0.127482 | 0.127482 | 0.084029 | 0.084029 | 0 | 0.000248 | 0.325372 | 5,987 | 171 | 79 | 35.011696 | 0.860114 | 0.152664 | 0 | 0.175 | 0 | 0 | 0.162371 | 0.005886 | 0 | 0 | 0 | 0 | 0.008333 | 1 | 0.1 | false | 0.008333 | 0.033333 | 0 | 0.233333 | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |