id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
11256656 | <reponame>UWaterloo-ASL/LAS_Gym
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 23 00:37:13 2018
@author: jack.lingheng.meng
"""
#original file: Integration_Demo_for_ROM_Exhibit_new.py
import logging
from datetime import datetime, date
from threading import Timer
import os
import numpy as np
from gym import spaces
from LASAgent.InternalEnvOfAgent import InternalEnvOfAgent
from LASAgent.InternalEnvOfCommunity import InternalEnvOfCommunity
class Learning():
def __init__(self, get_observation, take_action):
"""
Note: make sure initialize Learning with these two functions.
Args:
get_observation (function):
take_action (function):
"""
# Logging
experiment_results_dir = os.path.join(os.path.abspath('..'), 'ROM_Experiment_results')
if not os.path.exists(experiment_results_dir):
os.makedirs(experiment_results_dir)
logging.basicConfig(filename = os.path.join(experiment_results_dir,'ROM_experiment_'+datetime.now().strftime("%Y%m%d_%H%M%S")+'.log'),
level = logging.DEBUG,
format='%(asctime)s:%(levelname)s: %(message)s')
self.get_observation = get_observation
self.take_action = take_action
def setup_learning(self):
#######################################################################
# Schedual thress experiments #
# 1. Daiwei's Experiment: Agent controls parameterized actions
# 2. Lingheng's Experiment 1: Single-Agent controls raw actions
# 3. Lingheng's Experiment 2: Agent-Community contrls raw actions
#
# Note:
# 1. It takes some time to initialize learning agents, so call
# setup_learning() before 9:45am everyday.
# 2. Single_Agent.stop() will take about 3 minutes to save learned models.
# 3. Agent_Community.stop() will take about 10 minutes to save learned models.
# Problem:
# We don't want visitors to feel the pause when saving learned models
# Solution:
# To get rid of time-gap when switching behavior modes, use multiple threads
# to interact with visitors with different interaction modes i.e. when one
# interaction mode is done, another interaction mode starts interacting
# immediately at the same time the previous thread keeps saving learned models.
#######################################################################
# Initialize observation and action space
self.instantiate_observation_and_action_space()
# Initialize Learning Agents
self.instantiate_LAS_Agent_parameterized_action()
self.instantiate_LAS_Agent_raw_action()
self.instantiate_LAS_Agent_Community_raw_action()
# Schedule Experiments
self.schedule_experiments()
# Start Threads
self.start_threads()
def instantiate_observation_and_action_space(self):
"""
Create observation and action space:
observation space:
24 IRs shared by all agents:
self.observation_space_name (string list): sensor names in observation space
self.observation_space (gym.spaces.Box): observation space shared by all agents
action space:
1. based on 17 parameters:
self.para_action_space_name (string list): parameter names in parameter action space
self.para_action_space (gym.spaces.Box): parameter action space
2. based on raw actuators:
self.raw_action_space_name
self.raw_action_space
"""
# Observation space shared by all agents
self.observation_space_name = ['IR1-1','IR1-2','IR2-1','IR2-2','IR3-1', 'IR3-2',
'IR4-1', 'IR4-2', 'IR5-1', 'IR5-2', 'IR6-1', 'IR6-2',
'IR7-1', 'IR7-2', 'IR8-1', 'IR8-2', 'IR9-1', 'IR9-2',
'IR10-1', 'IR10-2', 'IR11-1', 'IR11-2', 'IR12-1', 'IR12-2']
sensors_dim = 24 # 24 IRs
obs_max = np.array([1.]*sensors_dim)
obs_min = np.array([0.]*sensors_dim)
self.observation_space = spaces.Box(obs_min, obs_max, dtype = np.float32)
# Parameterized action space
# 1.a) ramp up time: the time it takes for the actuator to fade to its maximum value
# 1.b) hold time: the time it that the actuator holds at the maximum value
# 1.c) ramp down time: the time it takes for the actuator to fade to 0
# 1.d) maximum brightness
# 2) the time gap between the moth starting to fade and the protocell starting to fade
# 3) time between activation of each SMA arm on breathing pore
# 4) time between activation of each breathing pore
# 5a) minimum time to wait before activating background behaviour
# 5b) maximum time to wait before activating background behaviour
# 6a) time to wait before trying to pick an actuator
# 6b) probability of successfully choosing an actuator
# 7) time between choosing SMA to actuate
# 8a) minimum time to wait before performing sweep
# 8b) maximum time to wait before performing sweep
self.para_action_space_name = ['1_a', '1_b', '1_c', '1_d', '2', '3', '4',
'5_a', '5_b', '6_a', '6_b', '7', '8_a', '8_b']
para_actuators_dim = 17 # 17 Parameters
para_act_max = np.array([1]*para_actuators_dim)
para_act_min = np.array([-1]*para_actuators_dim)
self.para_action_space = spaces.Box(para_act_max, para_act_min, dtype = np.float32)
# Raw actuator action space
# 24 nodes, each of which has 6 SMAs, 1 Moth and 1 LED.
self.raw_action_space_name = ['sma1_node#0', 'sma2_node#0', 'sma3_node#0', 'sma4_node#0',
'sma5_node#0', 'sma6_node#0', 'sma1_node#1', 'sma2_node#1',
'sma3_node#1', 'sma4_node#1', 'sma5_node#1', 'sma6_node#1',
'sma1_node#2', 'sma2_node#2', 'sma3_node#2', 'sma4_node#2',
'sma5_node#2', 'sma6_node#2', 'sma1_node#3', 'sma2_node#3',
'sma3_node#3', 'sma4_node#3', 'sma5_node#3', 'sma6_node#3',
'sma1_node#4', 'sma2_node#4', 'sma3_node#4', 'sma4_node#4',
'sma5_node#4', 'sma6_node#4', 'sma1_node#5', 'sma2_node#5',
'sma3_node#5', 'sma4_node#5', 'sma5_node#5', 'sma6_node#5',
'sma1_node#6', 'sma2_node#6', 'sma3_node#6', 'sma4_node#6',
'sma5_node#6', 'sma6_node#6', 'sma1_node#7', 'sma2_node#7',
'sma3_node#7', 'sma4_node#7', 'sma5_node#7', 'sma6_node#7',
'sma1_node#8', 'sma2_node#8', 'sma3_node#8', 'sma4_node#8',
'sma5_node#8', 'sma6_node#8', 'sma1_node#9', 'sma2_node#9',
'sma3_node#9', 'sma4_node#9', 'sma5_node#9', 'sma6_node#9',
'sma1_node#10', 'sma2_node#10', 'sma3_node#10', 'sma4_node#10',
'sma5_node#10', 'sma6_node#10', 'sma1_node#11', 'sma2_node#11',
'sma3_node#11', 'sma4_node#11', 'sma5_node#11', 'sma6_node#11',
'sma1_node#12', 'sma2_node#12', 'sma3_node#12', 'sma4_node#12',
'sma5_node#12', 'sma6_node#12', 'sma1_node#13', 'sma2_node#13',
'sma3_node#13', 'sma4_node#13', 'sma5_node#13', 'sma6_node#13',
'sma1_node#14', 'sma2_node#14', 'sma3_node#14', 'sma4_node#14',
'sma5_node#14', 'sma6_node#14', 'sma1_node#15', 'sma2_node#15',
'sma3_node#15', 'sma4_node#15', 'sma5_node#15', 'sma6_node#15',
'sma1_node#16', 'sma2_node#16', 'sma3_node#16', 'sma4_node#16',
'sma5_node#16', 'sma6_node#16', 'sma1_node#17', 'sma2_node#17',
'sma3_node#17', 'sma4_node#17', 'sma5_node#17', 'sma6_node#17',
'sma1_node#18', 'sma2_node#18', 'sma3_node#18', 'sma4_node#18',
'sma5_node#18', 'sma6_node#18', 'sma1_node#19', 'sma2_node#19',
'sma3_node#19', 'sma4_node#19', 'sma5_node#19', 'sma6_node#19',
'sma1_node#20', 'sma2_node#20', 'sma3_node#20', 'sma4_node#20',
'sma5_node#20', 'sma6_node#20', 'sma1_node#21', 'sma2_node#21',
'sma3_node#21', 'sma4_node#21', 'sma5_node#21', 'sma6_node#21',
'sma1_node#22', 'sma2_node#22', 'sma3_node#22', 'sma4_node#22',
'sma5_node#22', 'sma6_node#22', 'sma1_node', 'sma2_node',
'sma3_node', 'sma4_node', 'sma5_node', 'sma6_node',
'light_node#0', 'light_node#1', 'light_node#2', 'light_node#3',
'light_node#4', 'light_node#5', 'light_node#6', 'light_node#7',
'light_node#8', 'light_node#9', 'light_node#10', 'light_node#11',
'light_node#12', 'light_node#13', 'light_node#14', 'light_node#15',
'light_node#16', 'light_node#17', 'light_node#18', 'light_node#19',
'light_node#20', 'light_node#21', 'light_node#22', 'light_node',
'moth_node#0', 'moth_node#1', 'moth_node#2', 'moth_node#3',
'moth_node#4', 'moth_node#5', 'moth_node#6', 'moth_node#7',
'moth_node#8', 'moth_node#9', 'moth_node#10', 'moth_node#11',
'moth_node#12', 'moth_node#13', 'moth_node#14', 'moth_node#15',
'moth_node#16', 'moth_node#17', 'moth_node#18', 'moth_node#19',
'moth_node#20', 'moth_node#21', 'moth_node#22', 'moth_node']
raw_actuators_dim = (6+1+1)*24 # (1 moth + 1 LED + 6 SMAs) * 24 nodes
raw_act_max = np.array([1]*raw_actuators_dim)
raw_act_min = np.array([-1]*raw_actuators_dim)
self.raw_action_space = spaces.Box(raw_act_max, raw_act_min, dtype = np.float32)
def instantiate_LAS_Agent_parameterized_action(self):
#######################################################################
# Instatiate LAS-Agent #
#######################################################################
# Note: 1. Set load_pretrained_agent_flag to "True" only when you have
# and want to load pretrained agent.
# 2. Keep initializing parameters unchanged if using pretrained agent.
agent_name = 'LAS_Single_Agent_Parameterized_Action'
x_order_sensor_reading = 20
x_order_sensor_reading_sliding_window = 5
x_order_sensor_reading_preprocess_type = 'max_pool_sensory_readings'#'average_pool_sensory_readings'#'concatenate_sensory_readings'
occupancy_reward_type = 'IR_distance'
interaction_mode = 'real_interaction'
load_pretrained_agent_flag = False
self.single_agent_parameterized_action = InternalEnvOfAgent(agent_name,
self.observation_space,
self.para_action_space,
self.observation_space_name,
self.para_action_space_name,
x_order_sensor_reading,
x_order_sensor_reading_sliding_window,
x_order_sensor_reading_preprocess_type,
occupancy_reward_type,
interaction_mode,
load_pretrained_agent_flag)
logging.info('Instantiate {} done!'.format(agent_name))
def instantiate_LAS_Agent_raw_action(self):
#######################################################################
# Instatiate LAS-Agent #
#######################################################################
# Note: 1. Set load_pretrained_agent_flag to "True" only when you have
# and want to load pretrained agent.
# 2. Keep initializing parameters unchanged if using pretrained agent.
agent_name = 'LAS_Single_Agent_Raw_Action'
x_order_sensor_reading = 20
x_order_sensor_reading_sliding_window = 5
x_order_sensor_reading_preprocess_type = 'max_pool_sensory_readings'#'average_pool_sensory_readings'#'concatenate_sensory_readings'
occupancy_reward_type = 'IR_distance'
interaction_mode = 'real_interaction'
load_pretrained_agent_flag = False
self.single_agent_raw_action = InternalEnvOfAgent(agent_name,
self.observation_space,
self.raw_action_space,
self.observation_space_name,
self.raw_action_space_name,
x_order_sensor_reading,
x_order_sensor_reading_sliding_window,
x_order_sensor_reading_preprocess_type,
occupancy_reward_type,
interaction_mode,
load_pretrained_agent_flag)
logging.info('Instantiate {} done!'.format(agent_name))
def instantiate_LAS_Agent_Community_raw_action(self):
#######################################################################
# Instatiate LAS-Agent-Community #
#######################################################################
# Note: 1. Set load_pretrained_agent_flag to "True" only when you have and want
# to load pretrained agent.
# 2. Keep initializing parameters unchanged if using pretrained agent.
community_name = 'LAS_Agent_Community_raw_action'
community_size = 3
x_order_sensor_reading = 20
x_order_sensor_reading_sliding_window = 5
x_order_sensor_reading_preprocess_type = 'max_pool_sensory_readings'#'average_pool_sensory_readings'#'concatenate_sensory_readings'
occupancy_reward_type = 'IR_distance'
interaction_mode = 'real_interaction'
load_pretrained_agent_flag = False
self.LAS_agent_community_raw_action = InternalEnvOfCommunity(community_name,
community_size,
self.observation_space,
self.raw_action_space,
self.observation_space_name,
self.raw_action_space_name,
x_order_sensor_reading,
x_order_sensor_reading_sliding_window,
x_order_sensor_reading_preprocess_type,
occupancy_reward_type,
interaction_mode,
load_pretrained_agent_flag)
logging.info('Instantiate {} done!'.format(community_name))
def interact_with_learning_agent(self, agent, end_time):
"""
Note:
self.get_observation() and self.take_action(action) are functions
passed into when initializing the object.
Args:
agent (learning agent object)
end_time (str): (in format %HH%MM%SS) the end time of interaction
"""
logging.info('{}: Start interaction. Default End_time: {}'.format(agent.name, end_time))
# Interact untill end_time
while not datetime.now().strftime("%H%M%S") > end_time:
observation = self.get_observation()
take_action_flag, action = agent.feed_observation(observation)
if take_action_flag == True:
self.take_action(action)
# Save learned model
logging.info('{}: Interaction is done. Saving learned models...'.format(agent.name))
agent.stop()
logging.info('{}: Saving learned models done.'.format(agent.name))
def interact_with_prescribed_behavior(self, agent = 'prescribed_behavior', end_time = '130000'):
"""
TODO: Please put prescribed behavior in this function.
Args:
agent (str):not important paramter just for keeping the same format with interact_with_learning_agent
end_time (str):(in format %HH%MM%SS) the end time of interaction
"""
logging.info('{}: Start interaction. Default End_time: {}'.format(agent, end_time))
# Interact untill end_time
while not datetime.now().strftime("%H%M%S") > end_time:
observation = self.get_observation()
# TODO: replace with prescribed-behavior i.e. 17 predefined parameters
action = []
self.take_action(action)
logging.info('{}: Interaction is done.'.format(agent))
def interaction_mode_scheduler(self, interaction_mode, agent,
start_time, end_time, schedule_start_time):
"""
Args:
interaction_mode (func): function name
agent: depends on interaction mode
1. agent object: for learning agent
2. 'priscribed_behavior': for priscribed behavior
start_time (str):(with format'hhmmss')
end_time (str): (with format'hhmmss')
schedule_start_time (datetime object):
Returns:
interaction_thread: a delayed thread for an interaction mode which will start at a given time.
"""
start_delay = (datetime.strptime(date.today().strftime("%Y%m%d")+'-'+start_time, '%Y%m%d-%H%M%S') - schedule_start_time).total_seconds()
if start_delay < 0:
logging.error('{} starts earlier than schedualing time!'.format(interaction_mode.__name__))
interaction_thread = Timer(interval = start_delay,
function = interaction_mode,
kwargs={'agent': agent,
'end_time': end_time})
return interaction_thread
def schedule_experiments(self):
# Get current time to calculate interaction start-time-delay
schedule_start_time = datetime.now()
# Schedule first experiment:
# Daiwei's Experiment: Agent controls parameterized actions
# TODO: set start and end times to '130002' and '140000'
self.first_experiment_start_time = '210801' # format: %H%M%S e.g. 1:00pm is 130000
self.first_experiment_end_time = '212800' # format: %H%M%S e.g. 2:30pm is 143000
self.first_experiment_thread = self.interaction_mode_scheduler(self.interact_with_learning_agent,
self.single_agent_parameterized_action,
self.first_experiment_start_time,
self.first_experiment_end_time,
schedule_start_time)
# TODO: Four our first test, comment out second and third experiments
# Schedule second experiment:
# Lingheng's Experiment 1: Single-Agent controls raw actions
# TODO: set start and end times to '140002' and '150000'
self.second_experiment_start_time = '212801' # format: %H%M%S e.g. 2:30pm is 143000
self.second_experiment_end_time = '214800' # format: %H%M%S e.g. 4:00pm is 160000
self.second_experiment_thread = self.interaction_mode_scheduler(self.interact_with_learning_agent,
self.single_agent_raw_action,
self.second_experiment_start_time,
self.second_experiment_end_time,
schedule_start_time)
# Schedule third experiment:
# Lingheng's Experiment 2: Agent-Community contrls raw actions
# TODO: set start and end times to '150002' and '160000'
self.third_experiment_start_time = '212801' # format: %H%M%S e.g. 2:30pm is 143000
self.third_experiment_end_time = '214800' # format: %H%M%S e.g. 4:00pm is 160000
self.third_experiment_thread = self.interaction_mode_scheduler(self.interact_with_learning_agent,
self.LAS_agent_community_raw_action,
self.third_experiment_start_time,
self.third_experiment_end_time,
schedule_start_time)
# Schedule prescribed-behavior 1
# TODO: set start and end times to '093000' and '130000'
self.prescribed_behavior_start_time_1 = '210501' # format: %H%M%S e.g. 10:00am is 100000
self.prescribed_behavior_end_time_1 = '210800' # format: %H%M%S e.g. 1:00pm is 130000
self.prescribed_behavior_thread_1 = self.interaction_mode_scheduler(self.interact_with_prescribed_behavior,
'prescribed_behavior',
self.prescribed_behavior_start_time_1,
self.prescribed_behavior_end_time_1,
schedule_start_time)
# Schedule prescribed-behavior 2
# TODO: set start and end times to '160002' and '173000'
self.prescribed_behavior_start_time_2 = '214801' # format: %H%M%S e.g. 4:00pm is 160000
self.prescribed_behavior_end_time_2 = '215300' # format: %H%M%S e.g. 5:30pm is 173000
self.prescribed_behavior_thread_2 = self.interaction_mode_scheduler(self.interact_with_prescribed_behavior,
'prescribed_behavior',
self.prescribed_behavior_start_time_2,
self.prescribed_behavior_end_time_2,
schedule_start_time)
def start_threads(self):
# Schedule interaction with learning agent
self.first_experiment_thread.start()
logging.info('first_experiment_thread scheduled: {}-{}'.format(self.first_experiment_start_time, self.first_experiment_end_time))
self.second_experiment_thread.start()
logging.info('second_experiment_thread scheduled: {}-{}'.format(self.second_experiment_start_time, self.second_experiment_end_time))
self.third_experiment_thread.start()
logging.info('second_experiment_thread scheduled: {}-{}'.format(self.third_experiment_start_time, self.third_experiment_end_time))
# Schedule interaction with presribed-behavior
self.prescribed_behavior_thread_1.start()
logging.info('prescribed_behavior_thread_1 scheduled: {}-{}'.format(self.prescribed_behavior_start_time_1, self.prescribed_behavior_end_time_1))
self.prescribed_behavior_thread_2.start()
logging.info('prescribed_behavior_thread_2 scheduled: {}-{}'.format(self.prescribed_behavior_start_time_2, self.prescribed_behavior_end_time_2))
def check_if_interactions_done(self):
if not self.first_experiment_thread.is_alive()\
and not self.second_experiment_thread.is_alive()\
and not self.third_experiment_thread.is_alive()\
and not self.prescribed_behavior_thread_1.is_alive()\
and not self.prescribed_behavior_thread_2.is_alive():
logging.info('All interactions are done.')
return True
else:
return False
if __name__ == '__main__':
"""
TODO: make sure initialize "Learning" with these two functions.
Args:
get_observation (function):
take_action (function):
"""
learning_system = Learning_System()
learning = Learning(learning_system)
learning.setup_learning()
# TODO: put initialization work for master script in here
# Check if all interactions are done.
while True:
if learning.check_if_interactions_done():
break
| StarcoderdataPython |
12856083 | import logging
from server.singleton_meta import SingletonMeta
log = logging.getLogger(__name__)
class PriceCache(metaclass=SingletonMeta):
def __init__(self):
log.debug("[PriceCache] Init new price cache")
self.price_cache = {}
def init_cache_for_ticker(self, watched_ticker_id):
log.info(f"[PriceCache] Init cache for watched ticker {watched_ticker_id}")
if self.price_cache.get(watched_ticker_id):
return
self.price_cache[watched_ticker_id] = {
"high": None,
"low": None,
"price": None
}
def cached_prices_for_ticker(self, watched_ticker_id):
return self.price_cache.get(watched_ticker_id)
def cached_price(self, watched_ticker_id, key):
cache = self.price_cache.get(watched_ticker_id)
if not cache:
return None
log.info(f"[PriceCache] Getting {key} for {watched_ticker_id}: {cache[key]}")
return cache[key]
def update_cached_price(self, watched_ticker_id, key, val):
log.info(f"[PriceCache] Updating {key} price for {watched_ticker_id}: {val}")
self.price_cache[watched_ticker_id][key] = val
def delete_watched_ticker(self, watched_ticker_id):
log.info(f"[PriceCache] Deleting cache for {watched_ticker_id}")
del self.price_cache[watched_ticker_id]
def reset_cached_values(self):
for prices in self.price_cache.values():
prices["low"] = None
prices["high"] = None
prices["price"] = None | StarcoderdataPython |
9601979 | """
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from flash_kinetis import Flash_Kinetis
flash_algo = { 'load_address' : 0x20000000,
'instructions' : [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0xb510482c, 0x5120f24c, 0xf64d81c1, 0x81c11128, 0xf0218801, 0x80010101, 0x44484827, 0xf884f000,
0xd0002800, 0xbd102001, 0x47702000, 0xb5104822, 0xf0004448, 0xb920f86f, 0x2100481f, 0xf0004448,
0x491ef907, 0xf442684a, 0x604a0270, 0xb510bd10, 0x46014604, 0x22014818, 0xf0004448, 0xb930f831,
0x23004815, 0x46212201, 0xf0004448, 0x4913f8bb, 0xf442684a, 0x604a0270, 0xb5febd10, 0x460b460c,
0x46014606, 0x4615480c, 0xf0004448, 0xb958f87f, 0x21012000, 0x1000e9cd, 0x48079002, 0x4622462b,
0x44484631, 0xf8e0f000, 0x684a4904, 0x0270f442, 0xbdfe604a, 0x40052000, 0x00000004, 0x4001f000,
0x41f0e92d, 0x460d4614, 0xf846f000, 0xd11b2800, 0x1e64442c, 0xd0050521, 0xeb012101, 0x1e423114,
0x3401eb02, 0x447e4e09, 0x8024f8df, 0x42a52709, 0x6830d80a, 0xf8886005, 0xf0007007, 0x2800f8ed,
0xf505d102, 0xe7f25580, 0x81f0e8bd, 0x0000023e, 0x40020000, 0x4903b120, 0x71c82044, 0xb8dcf000,
0x47702004, 0x40020000, 0x4916b190, 0x4a166cc9, 0x6103f3c1, 0xf832447a, 0x03091011, 0x2200d00a,
0x2100e9c0, 0x60812102, 0x60c102c9, 0x47704610, 0x47702004, 0x47702064, 0xea41b128, 0x075b0302,
0x2065d003, 0x20044770, 0x68034770, 0xd804428b, 0x44116840, 0x42884418, 0x2066d201, 0x20004770,
0x00004770, 0x40048000, 0x000001d0, 0x47f0e92d, 0x0014461d, 0xd01e460e, 0xf7ff461a, 0x2800ffdd,
0x4f0ed11a, 0xf8df447f, 0xf04fa038, 0x2d000807, 0x6838d012, 0x68396006, 0x60486820, 0x68606839,
0x60883408, 0x8007f88a, 0xf886f000, 0xd1032800, 0x3d083608, 0x2004e7eb, 0x87f0e8bd, 0x00000180,
0x40020000, 0x47f0e92d, 0x4614469a, 0x4605460e, 0xffb2f7ff, 0xd1252800, 0x0101e9d5, 0xf8f1fbb0,
0xf1c84271, 0x40010000, 0x42b5424d, 0x4445d100, 0x1bafb1bc, 0xd90042a7, 0x480b4627, 0x44780939,
0x60066800, 0x22014809, 0x0a0a71c2, 0x728172c2, 0xa009f880, 0xf850f000, 0xd1032800, 0x443e1be4,
0x2000e7e5, 0x87f0e8bd, 0x00000106, 0x40020000, 0x4804b128, 0x71c22240, 0xf0007181, 0x2004b83d,
0x00004770, 0x40020000, 0x4df0e92d, 0xe9dd001c, 0x46168709, 0xd025460d, 0xff6ef7ff, 0xd11f2800,
0xb04cf8df, 0xf8df44fb, 0x2e00a04c, 0xf8dbd018, 0x600d1000, 0xf88a2202, 0x9a082007, 0x200bf88a,
0x0000f8db, 0x60816821, 0xf816f000, 0xf1b8b160, 0xd0010f00, 0x5000f8c8, 0xd0012f00, 0x60392100,
0x8df0e8bd, 0xe7fb2004, 0x1d241f36, 0xe7dc1d2d, 0x000000a0, 0x40020000, 0x2170480a, 0x21807001,
0x78017001, 0xd5fc0609, 0x06817800, 0x2067d501, 0x06c14770, 0x2068d501, 0x07c04770, 0x2069d0fc,
0x00004770, 0x40020000, 0x00000000, 0x00080000, 0x00100000, 0x00200000, 0x00400000, 0x00800000,
0x01000000, 0x01000000, 0x40020004, 0x00000000,
],
'pc_init' : 0x20000021,
'pc_eraseAll' : 0x2000004D,
'pc_erase_sector' : 0x2000006F,
'pc_program_page' : 0x2000009B,
'begin_stack' : 0x20001000,
'begin_data' : 0x20003000,
'static_base' : 0x20000000 + 0x20 + 0x32c,
'page_size' : 4096
};
class Flash_k64f(Flash_Kinetis):
def __init__(self, target):
super(Flash_k64f, self).__init__(target, flash_algo)
| StarcoderdataPython |
387859 | <gh_stars>0
from aiohttp import web
from discord.ext.commands import Bot
from db.models.account import Account
from db.models.user import User
from db.redis import RedisDB
from util.discord.messages import Messages
from util.env import Env
from util.regex import RegexUtil, AddressMissingException, AddressAmbiguousException
import config
import datetime
import logging
import rapidjson as json
class GrahamServer(object):
"""An AIOHTTP server that listens for callbacks and provides various APIs"""
def __init__(self, bot: Bot, host: str, port: int):
self.bot = bot
self.app = web.Application()
self.app.add_routes([
web.post('/callback', self.callback),
web.get('/ufw/{wallet}', self.ufw),
web.get('/wfu/{user}', self.wfu),
web.get('/users', self.users)
])
self.logger = logging.getLogger()
self.host = host
self.port = port
self.min_amount = 10 if Env.banano() else 0.1
def format_js_iso(self, date: datetime.datetime) -> str:
"""Format a datetime object into a user-friendly representation"""
return datetime.datetime.strftime(date, '%Y-%m-%dT%H:%M:%S.{0}Z').format(int(round(date.microsecond / 1000.0)))
async def ufw(self, request: web.Request):
"""Return user info for specified wallet addresses
e.g. http://server/wfu/ban_16n5c7qozokx661rneikh6e3mf978mc46qqjen7a51pwzood155bwrha6sfj+ban_37z6omyukgpgttq7bdagweaxdrdm5wjy7tdm97ggtkobdetme3bmhfayjowj"""
if 'wallet' not in request.match_info:
return web.HTTPBadRequest(reason="wallet is required")
try:
addresses = RegexUtil.find_address_matches(request.match_info['wallet'])
except AddressMissingException:
return web.HTTPBadRequest(reason="bad address specified")
accounts = await Account.filter(address__in=addresses).prefetch_related('user').all()
if accounts is None:
return web.json_response(
data={'error': 'user(s) not found'},
dumps=json.dumps
)
resp = []
for account in accounts:
resp.append(
{
'user_id': account.user.id,
'user_last_known_name': account.user.name,
'address': account.address,
'created_ts_utc': self.format_js_iso(account.user.created_at)
}
)
return web.json_response(
data=resp,
dumps=json.dumps
)
async def wfu(self, request: web.Request):
"""Return user info for specified discord IDs
e.g. http://server/wfu/303599885800964097+412286270694359052"""
if 'user' not in request.match_info:
return web.HTTPBadRequest(reason="user(s) is required")
user_ids = []
for u in request.match_info['user'].split('+'):
try:
user_ids.append(int(u.strip()))
except ValueError:
return web.HTTPBadRequest(reason="user IDs should be integers")
users = await User.filter(id__in=user_ids).prefetch_related('account').all()
if users is None:
return web.json_response(
data={'error': 'user(s) not found'},
dumps=json.dumps
)
resp = []
for user in users:
resp.append(
{
'user_id': user.id,
'user_last_known_name': user.name,
'address': user.account.address,
'created_ts_utc': self.format_js_iso(user.created_at)
}
)
return web.json_response(
data=resp,
dumps=json.dumps
)
async def users(self, request: web.Request):
cached = await RedisDB.instance().get("apiuserscache")
if cached is not None:
return web.json_response(
data=json.loads(cached),
dumps=json.dumps
)
# Get all of not cached
users = await User.all().prefetch_related('account')
resp = []
for user in users:
resp.append(
{
'user_id': user.id,
'user_last_known_name': user.name,
'address': user.account.address,
'created_ts_utc': self.format_js_iso(user.created_at)
}
)
await RedisDB.instance().set("apiuserscache", json.dumps(resp), expires=1800)
return web.json_response(
data=resp,
dumps=json.dumps
)
async def callback(self, request: web.Request):
"""Route for handling HTTP callback"""
request_json = await request.json()
hash = request_json['hash']
self.logger.debug(f"callback received {hash}")
# cache
if not await RedisDB.instance().exists(f"callback:{hash}"):
await RedisDB.instance().set(f"callback:{hash}", "val", expires=300)
else:
return web.HTTPOk()
# De-serialize block
request_json['block'] = json.loads(request_json['block'])
# only consider sends
if 'is_send' in request_json and (request_json['is_send'] or request_json['is_send'] == 'true'):
if 'amount' in request_json:
# only consider self.min_amount or larger
converted_amount = Env.raw_to_amount(int(request_json['amount']))
if converted_amount >= self.min_amount:
# Figure out of this is one of our users
link = request_json['block']['link_as_account']
account = await Account.filter(address=link).prefetch_related('user').first()
if account is None:
return web.HTTPOk()
# See if this is an internal TX
internal = await RedisDB.instance().exists(f"hash:{hash}")
if internal:
return web.HTTPOk()
self.logger.debug(f'Deposit received: {request_json["amount"]} for {account.user.id}')
amount_string = f"{Env.raw_to_amount(int(request_json['amount']))} {Env.currency_symbol()}"
discord_user = await self.bot.fetch_user(account.user.id)
if discord_user is not None:
await Messages.send_success_dm(discord_user, f"Your deposit of **{amount_string}** has been received. It will be in your available balance shortly!", header="Deposit Success", footer=f"I only notify you of deposits that are {self.min_amount} {Env.currency_symbol()} or greater.")
return web.HTTPOk()
async def start(self):
"""Start the server"""
runner = web.AppRunner(self.app, access_log = None if not config.Config.instance().debug else self.logger)
await runner.setup()
site = web.TCPSite(runner, self.host, self.port)
await site.start() | StarcoderdataPython |
6557176 | from base import REF_MARKER, CITATION_NEEDED_MARKER
from base import get_localized_snippet_parser
| StarcoderdataPython |
125202 | <reponame>jqueguiner/training_results_v1.0
"""NumPy implementation of losses in 3DUnet.
https://github.com/mmarcinkiewicz/training/blob/Add_unet3d/image_segmentation/unet3d/model/losses.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import REDACTED
from __future__ import print_function
import numpy as np
def softmax(x):
return np.exp(x) / np.exp(x).sum(-1, keepdims=True)
def cross_entropy_loss(logits: np.ndarray,
one_hot_labels: np.ndarray) -> np.ndarray:
"""Returns the cross entropy loss between some logits and some labels.
Args:
logits: Output of the model.
one_hot_labels: One-hot encoded labels. Dimensions should match the logits.
Returns:
The cross entropy, averaged over the first dimension (samples).
"""
log_softmax_logits = np.log(softmax(logits))
loss = -np.sum(one_hot_labels * log_softmax_logits, axis=-1)
return np.mean(loss)
def compute_dice(prediction,
target,
to_onehot_y=True,
to_onehot_x=False,
use_softmax=True,
use_argmax=False,
include_background=False,
layout="NDHWC"):
"""Returns the dice coefficient between prediction and target.
Args:
prediction: Prediction.
target: Target.
to_onehot_y:
to_onehot_x:
use_softmax: Whether to use softmax.
use_argmax: Whether to use argmax.
include_background: Whether to include background.
layout:
Returns:
The dice coefficient which is essentially a measure of overlap between two
samples.
"""
smooth_nr = 1e-6
smooth_dr = 1e-6
if layout == "NCDHW":
channel_axis = 1
reduce_axis = tuple(list(range(2, len(prediction.shape))))
else:
channel_axis = -1
reduce_axis = tuple(list(range(1, len(prediction.shape) - 1)))
num_pred_ch = prediction.shape[channel_axis]
if use_softmax:
prediction = softmax(prediction)
elif use_argmax:
prediction = np.argmax(prediction, axis=channel_axis)
if to_onehot_y:
target = to_one_hot(target, layout, channel_axis)
if to_onehot_x:
prediction = to_one_hot(prediction, layout, channel_axis)
if not include_background:
assert num_pred_ch > 1, \
(f"To exclude background the prediction needs more than one channel. "
f"Got {num_pred_ch}.")
if layout == "NCDHW":
target = target[:, 1:]
prediction = prediction[:, 1:]
else:
target = target[..., 1:]
prediction = prediction[..., 1:]
assert (target.shape == prediction.shape), \
(f"Target and prediction shape do not match. Target: ({target.shape}), "
f"prediction: ({prediction.shape}).")
intersection = np.sum(target * prediction, axis=reduce_axis)
target_sum = np.sum(target, axis=reduce_axis)
prediction_sum = np.sum(prediction, axis=reduce_axis)
dice = (2.0 * intersection + smooth_nr) / (
target_sum + prediction_sum + smooth_dr)
return dice
def to_one_hot(array, layout, channel_axis):
if len(array.shape) >= 5:
array = np.squeeze(array, axis=channel_axis)
array = np.array(array[..., np.newaxis] == np.arange(3), dtype=np.float32)
if layout == "NCDHW":
array = np.transpose(array, (0, 4, 1, 2, 3))
return array
def compute_dice_ce_loss(y_pred,
y_true,
to_onehot_y,
use_softmax,
layout,
include_background=False):
"""Returns the average of the dice coeffcient and cross entropy.
Args:
y_pred: Prediction.
y_true: Target.
to_onehot_y:
use_softmax: Whether to use softmax.
layout:
include_background: Whether to include background.
Returns:
The average of the dice coeffcient and cross entropy.
"""
dice = 1.0 - np.mean(
compute_dice(
y_pred,
y_true,
to_onehot_y=to_onehot_y,
use_softmax=use_softmax,
include_background=include_background))
if layout == "NCDHW":
channel_axis = 1
else:
channel_axis = -1
cross_entropy = cross_entropy_loss(y_pred,
to_one_hot(y_true, layout, channel_axis))
return (dice + cross_entropy) / 2
def compute_dice_score(y_pred,
y_true,
to_onehot_y=True,
use_argmax=True,
layout="NDHWC",
include_background=False,
compute_mean_score=True):
"""CPU compute dice score."""
dice_scores = compute_dice(
y_pred,
y_true,
to_onehot_y=to_onehot_y,
to_onehot_x=True,
use_softmax=False,
use_argmax=use_argmax,
layout=layout,
include_background=include_background)
if compute_mean_score:
return np.mean(dice_scores, axis=0)
else:
return dice_scores
| StarcoderdataPython |
6542213 | #!/usr/bin/env python3
from abc import ABC, abstractmethod
class AbstractDynamicConfig(ABC):
@abstractmethod
def to_dynamic_reconfigure(self):
"""
convert config to dynamic reconfigure dict
:return: dynamic reconfigure dict
"""
pass
@abstractmethod
def from_dynamic_reconfigure(self, dynamic_reconfigure):
"""
convert dynamic reconfigure dict to config
:param dynamic_reconfigure: dynamic reconfigure dict
:return: config instance
"""
pass
| StarcoderdataPython |
3223967 | <gh_stars>10-100
"""
extra the last layer embedding of inception3
refer: https://github.com/pytorch/examples/blob/master/imagenet/main.py
"""
import argparse
from tqdm import tqdm
import numpy as np
import json
import datetime
import sys
import os
from scipy.io import loadmat
import pickle
import torch.nn.functional as F
import torch.nn as nn
import torch
import yaml
import torchvision.transforms as transforms
import pretrainedmodels
from PIL import Image
from tensorboardX import SummaryWriter
from models.S2ILinearModel import S2ILayer2Model_IAS
import utils
class ToSpaceBGR(object):
def __init__(self,is_bgr):
self.is_bgr = is_bgr
def __call__(self,tensor):
if self.is_bgr:
new_tensor = tensor.clone()
new_tensor[0] = tensor[2]
new_tensor[2] = tensor[0]
tensor = new_tensor
return tensor
class ToRange255(object):
def __init__(self, is_255):
self.is_255 = is_255
def __call__(self, tensor):
if self.is_255:
tensor.mul_(255)
return tensor
class train(object):
def __init__(self,device,cfg_file,summaryFolder):
self.device = device
self.summaryFolder = summaryFolder
with open(cfg_file,'r') as f:
self.cfg = yaml.safe_load(f)
utils.init_seeds(self.cfg['model_hyp']['random_seed'])
attSplit = loadmat('./dataset/xlsa/'+self.cfg['dataset']['name']+'/att_splits.mat')
res101 = loadmat('./dataset/xlsa/'+self.cfg['dataset']['name']+'/res101.mat')
labels = res101['labels'].astype(int).squeeze() - 1
seen_dataLoc = attSplit['test_seen_loc'].squeeze() - 1
unseen_dataLoc = attSplit['test_unseen_loc'].squeeze() -1
seen_labels = labels[seen_dataLoc]
unseen_labels = labels[unseen_dataLoc]
self.seen_labels = np.unique(seen_labels)
self.unseen_labels = np.unique(unseen_labels)
self.clsname = [ attSplit['allclasses_names'][i][0][0] for i in range(len(attSplit['allclasses_names']))]
att_matrix = np.transpose(attSplit['att'])
self.cfg['model_hyp']['att_feats'] = att_matrix.shape[1]
self.attMatrix = att_matrix.copy()
self.attMatrix[:len(self.seen_labels)] = att_matrix[self.seen_labels]
self.attMatrix[len(self.seen_labels):] = att_matrix[self.unseen_labels]
self.attMatrix = torch.FloatTensor(self.attMatrix).to(self.device)
pass
def mkSummary(self,summaryFolder):
self.writer = SummaryWriter(summaryFolder)
def createLinearModel(self,):
"""
build Linear Model use attribute build Adjacency matrix
"""
self.model = S2ILayer2Model_IAS(att_dims=self.cfg['model_hyp']['att_feats'],img_dims=self.cfg['model_hyp']['img_feats'],tmp=10).to(self.device)
def loadDataFromCustom(self,dataType):
"""
Load Data from Xlsa:
------
"""
print('load from customed')
with open('./dataset/finetune/'+self.cfg['dataset']['name']+'/dvbeExtracted.pkl','rb') as f:
ExtractFeature = pickle.load(f)
imgfs = np.array(ExtractFeature[dataType]['features'])
labels = np.array(ExtractFeature[dataType]['labels'])
select_id, idx = np.unique(labels,return_inverse=True)
if dataType.lower() == "test_unseen":
labels = idx + len(self.seen_labels)
else:
labels = idx
imgfs = torch.FloatTensor(imgfs.reshape(-1,2048))
labels = torch.LongTensor(labels)
dataset = torch.utils.data.TensorDataset(imgfs,labels)
dataloader = torch.utils.data.DataLoader(dataset,batch_size=self.cfg['dataset_hyp']['batch_size'],shuffle=True)
return dataloader
def loss(self,):
"""
loss function for Linear model
output is Cosine Similarity
"""
self.lossf = nn.CrossEntropyLoss(weight=None).to(self.device)
def optim(self,):
"""
optimizer for Attribute Adejacenciy GCN
"""
self.optimizer = torch.optim.Adam(self.model.parameters(),self.cfg['model_hyp']['lr'],weight_decay=self.cfg['model_hyp']['weight_decay'])
def scheduler(self,):
"""
scheduler for optimizer
"""
#self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer,T_max=self.cfg['model_hyp']['epochs'])
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer,step_size=2,gamma=0.87)
def loadChk(self,chkFile):
"""
if resume training
args:
chkFile
return:
bestAcc in chkFile
"""
print("=> load checkpoint '{}'".format(chkFile))
chk = torch.load(chkFile)
self.cfg['model_hyp']['start_epoch'] = chk['epoch']
#best_acc = chk['best_acc']
self.model.load_state_dict(chk['model'])
self.optimizer.load_state_dict(chk['optimizer'])
self.scheduler.load_state_dict(chk['scheduler'])
#return best_acc
def saveModel(self,epoch,Info,model,optimizer,scheduler,trainSeenEval,testSeenEval,testUnseenEval,H,chkfile):
"""
save Model
"""
chk = {}
chk['model'] = model.state_dict()
chk['optimizer'] = optimizer.state_dict()
chk['scheduler'] = scheduler.state_dict()
chk['Info'] = Info #'test/UnseenAcc:%.2f test/SeenAcc:%.2f H:%.2f \nfs:%.2f fu:%.2f'
chk['eval'] = {'trainSeen':trainSeenEval,\
'testSeen': testSeenEval,\
'testUnseen':testUnseenEval,\
'H':H}
chk['epoch'] = epoch
torch.save(chk, chkfile)
def trainModel_1Epoch(self,dataloader,imgfType='customed',recordFile=None):
self.model.train()
self.scheduler.step()
for images,targets in tqdm(dataloader):
images = images.to(self.device)
targets = targets.to(self.device)
if self.cfg['dataset_hyp']['imgfType'] == 'customed':
images = self.caffeRes101(images).squeeze()
preds = self.model(self.attMatrix[:len(self.seen_labels)],images,targets,TrainOrTest="Train")
# use which loss function
loss = self.lossf(preds,targets)
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(),1)
self.optimizer.step()
loss = loss.detach().cpu().tolist()
# record
if recordFile:
with open(recordFile,'a')as f:
f.write('{:>17.3e}'.format(loss))
return {
'loss':np.mean(loss),
}
def evaluation(self,dataloader,dataType='seen',accType='allCls',imgfType='customed',recordFile=None,seenOrunseenCls=None):
"""
evaluation model on input dataloader
args:
dataloader
dataType: 'seen' : count acc, CEloss, MSEloss
'unseen' : count acc, CEloss(1), MSEloss
accType: 'allCls' : return acc mean of all cls
'allImg' : return acc mean of all images
imgfType: 'customed': use resnet101 features
'xlsa' : xlsa resnet101 features
recordFile: None : do not record
'print': print eval
filepath: record in this file
return:
accs: (int) mean of all cls or all images
decide by accType
accs_cls: (dict) mean acc according to cls
generalAccs_cls: (dict) mean generalAccs 2 cls,
splitacc: datasplit bw seen and unseen
cos_cls:(dict){cls:[sample_cos]} sample cosimilarity
"""
# imgCls generated from semantic adjacency matrix GCN
total_spp_loss = []
generalAccs = []
accs = []
generalAccs_cls = {}
accs_cls = {}
cos_cls = {}
spp_loss_cls = {}
splitaccs = []
splitacc_cls = {}
with torch.no_grad():
for images,targets in tqdm(dataloader):
images = images.to(self.device)
targets = targets.to(self.device)
if self.cfg['dataset_hyp']['imgfType'] == 'customed':
images = self.caffeRes101(images).squeeze()
generalpreds,generalpreds1 = self.model(self.attMatrix,images,TrainOrTest="Test")
if dataType.lower()=='seen':
preds,_ = self.model(self.attMatrix[:len(self.seen_labels)],images,TrainOrTest="Test")
preds = torch.argmax(preds,dim=1)
elif dataType.lower()=='unseen':
preds,_ = self.model(self.attMatrix[len(self.seen_labels):],images,TrainOrTest="Test")
preds = torch.argmax(preds,dim=1)+len(self.seen_labels)
generalpreds = torch.argmax(generalpreds,dim=1)
# seen or unseen split accuracy
splitacc = [ i in seenOrunseenCls for i in generalpreds.cpu().detach().tolist()]
splitaccs.extend(splitacc)
splitacc = np.array(splitacc)
# calculate accs
if accType == 'allImages':
acc = preds == targets
accs.extend(acc.cpu().detach().tolist())
# calculate accs_cls
for c in torch.unique(targets):
loc = targets == c
acc_c = preds[loc] == targets[loc]
generalacc_c = generalpreds[loc] == targets[loc]
dist_c = generalpreds1[loc,c].cpu().detach().tolist()
acc_c = acc_c.cpu().detach().tolist()
generalacc_c = generalacc_c.cpu().detach().tolist()
try:
accs_cls[c.cpu().detach().tolist()].extend(acc_c)
generalAccs_cls[c.cpu().detach().tolist()].extend(generalacc_c)
cos_cls[c.cpu().detach().tolist()].extend(dist_c)
splitacc_cls[c.cpu().detach().tolist()].extend(list(splitacc[loc.cpu().detach().numpy()]))
except(KeyError):
accs_cls[c.cpu().detach().tolist()] = acc_c
generalAccs_cls[c.cpu().detach().tolist()] = generalacc_c
cos_cls[c.cpu().detach().tolist()] = dist_c
splitacc_cls[c.cpu().detach().tolist()] = list(splitacc[loc.cpu().detach().numpy()])
# average class accuracy
if accType == 'allCls':
for c in accs_cls:
accs.append(np.mean(accs_cls[c]))
generalAccs.append(np.mean(generalAccs_cls[c]))
for c in accs_cls:
accs_cls[c] = np.mean(accs_cls[c])*100
generalAccs_cls[c] = np.mean(generalAccs_cls[c])*100
splitacc_cls[c] = np.mean(splitacc_cls[c])*100
return {'accs':np.mean(accs)*100,
'generalAccs':np.mean(generalAccs)*100,
'generalAcc_cls':generalAccs_cls,
'acc_cls':accs_cls,
'splitacc':np.mean(splitaccs)*100,
'splitacc_cls':splitacc_cls,
'cos_cls':cos_cls
}
def prepareModel(self,):
self.createLinearModel()
self.mkSummary(self.summaryFolder)
self.loss()
self.optim()
self.scheduler()
def trainModel(self,chkFile,recordFile,resume=False):
"""
train Model #epochs defined in cfg
args:
chkFile: save model chkpoint
recordFile: save acc and loss
"""
trainData = self.loadDataFromCustom(dataType='train_seen')
testSeenData = self.loadDataFromCustom(dataType='test_seen')
testUnseenData = self.loadDataFromCustom(dataType='test_unseen')
bestHs = 0
for epoch in range(self.cfg['model_hyp']['start_epoch'],self.cfg['model_hyp']['epochs']):
print('modelhyp:',chkFile.strip('/chk'),'epoch:',epoch)
with open(recordFile,'a') as f:
f.write('{:^5d}'.format(epoch))
# train S2I
trainSeenEval=self.trainModel_1Epoch(trainData,imgfType=self.cfg['dataset_hyp']['imgfType'])
testSeenEval = self.evaluation(testSeenData,dataType='seen',accType=self.cfg['model_hyp']['accType'],recordFile=recordFile,imgfType=self.cfg['dataset_hyp']['imgfType'],seenOrunseenCls=np.arange(0,len(self.seen_labels)))
testUnseenEval = self.evaluation(testUnseenData,dataType='unseen',accType=self.cfg['model_hyp']['accType'],recordFile=recordFile,imgfType=self.cfg['dataset_hyp']['imgfType'],seenOrunseenCls=np.arange(len(self.seen_labels),len(self.seen_labels)+len(self.unseen_labels)))
#testSeenEval = self.evaluation(testSeenData,dataType='seen',accType=self.cfg['model_hyp']['accType'],recordFile=recordFile)
#testUnseenEval = self.evaluation(testUnseenData,dataType='unseen',accType=self.cfg['model_hyp']['accType'],recordFile=recordFile)
seenCos = np.mean([ np.mean(testSeenEval['cos_cls'][i]) for i in testSeenEval['cos_cls']])
unseenCos = np.mean([ np.mean(testUnseenEval['cos_cls'][i]) for i in testUnseenEval['cos_cls']])
difseenCosBWunseenCos = seenCos - unseenCos
generalAccs = testSeenEval['generalAccs']
generalAccu = testUnseenEval['generalAccs']
H = 2*generalAccs*generalAccu/(generalAccs+generalAccu)
accs = testSeenEval['accs']
accu = testUnseenEval['accs']
fs = testSeenEval['splitacc']
fu = testUnseenEval['splitacc']
Hs = 2*fs*fu/(fs+fu)
self.writer.add_scalar('general/generalSeenAcc',generalAccs,epoch)
self.writer.add_scalar('general/genealUnseenAcc',generalAccu,epoch)
self.writer.add_scalar('general/H',H,epoch)
self.writer.add_scalar('split/SeenAcc',accs,epoch)
self.writer.add_scalar('split/UnseenAcc',accu,epoch)
self.writer.add_scalar('loss/loss',trainSeenEval['loss'],epoch)
self.writer.add_scalar('split/fs',fs,epoch)
self.writer.add_scalar('split/fu',fu,epoch)
self.writer.add_scalar('split/Hs',Hs,epoch)
self.writer.add_scalar('split/seenCos',seenCos,epoch)
self.writer.add_scalar('split/unseenCos',unseenCos,epoch)
self.writer.add_scalar('split/seenCos-unseenCos',difseenCosBWunseenCos,epoch)
for i in range(len(self.seen_labels),len(self.seen_labels)+len(self.unseen_labels)):
self.writer.add_scalar('SplitUnseen/'+str(i)+self.clsname[self.unseen_labels[i-len(self.seen_labels)]],testUnseenEval['splitacc_cls'][i],epoch)
self.writer.add_scalar('UnseenAccs/'+str(i)+self.clsname[self.unseen_labels[i-len(self.seen_labels)]],testUnseenEval['acc_cls'][i],epoch)
self.writer.add_scalar('generalUnseen/'+str(i)+self.clsname[self.unseen_labels[i-len(self.seen_labels)]],testUnseenEval['generalAcc_cls'][i],epoch)
for i in range(len(self.seen_labels)):
self.writer.add_scalar('SplitSeen/'+str(i)+self.clsname[self.seen_labels[i]],testSeenEval['splitacc_cls'][i],epoch)
self.writer.add_scalar('SeenAccs/'+str(i)+self.clsname[self.seen_labels[i]],testSeenEval['acc_cls'][i],epoch)
self.writer.add_scalar('generalseen/'+str(i)+self.clsname[self.seen_labels[i]],testSeenEval['generalAcc_cls'][i],epoch)
Info ='generalAccs:%.2f generalAccu:%.2f H:%.2f \n Accs:%.2f Accu:%.2f fs:%.2f fu:%.2f Hs:%.2f'%(generalAccs,generalAccu,H,accs,accu,testSeenEval['splitacc'],testUnseenEval['splitacc'],Hs)
print(Info)
with open(recordFile,'a') as f:
f.write('\n')
if Hs > bestHs:
bestHs = Hs
self.saveModel(epoch,Info,self.model,self.optimizer,self.scheduler,trainSeenEval,testSeenEval,testUnseenEval,Hs,chkFile+'_best_Hs.pt')
best_HsInfo = Info
if epoch%500 == 0 and epoch > 0:
self.saveModel(self.cfg['model_hyp']['start_epoch'],Info,self.model,self.optimizer,self.scheduler,trainSeenEval,testSeenEval,testUnseenEval,H,chkFile+'_'+str(epoch)+'.pt')
print('bestHs')
print(best_HsInfo)
if __name__ =="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--modelChk',type=str,help='model chkpoint file')
parser.add_argument('--recordFile',type=str,help='record loss and acc file')
parser.add_argument('--summaryFolder',type=str,help='tensorboardX file folder')
parser.add_argument('--yaml',type=str,help='checkpoint file saved in yaml')
parser.add_argument('--device',default='cuda:0',type=str,help='cuda:0,cuda:1,cuda:2,cpu'
)
parser.add_argument('--train',action='store_true',help='train or just eval')
parser.add_argument('--resume',action='store_true',help='resume to train model')
opt = parser.parse_args()
chkFile = opt.modelChk
recordFile = opt.recordFile
device = torch.device(opt.device)
t = train(device,opt.yaml,opt.summaryFolder)
t.prepareModel()
if opt.train:
# train model
t.trainModel(chkFile=chkFile,recordFile=recordFile,resume=opt.resume)
else:
# evaluate model
t.evalModel(chkFile)
| StarcoderdataPython |
338016 | #!/usr/bin/python
# Author: sc0tfree
# Twitter: @sc0tfree
# Email: <EMAIL>
import os
import socket
def generate_random_hex(length):
'''
Generates a hex string of arbitrary length - 1, ending in a newline.
'''
hex_string = os.urandom(length - 1)
hex_string += '\x0a'
return hex_string
host = '127.0.0.1'
port = 12345
s = socket.socket()
s.bind((host, port))
s.listen(5)
try:
while True:
c, addr = s.accept()
print 'Connection established from', addr[0], ':', addr[1]
c.send('Hello from Test Server\n')
# Echo Test
c.send('Echo Test - enter string:')
data = c.recv(1024)
print 'Echo Test - received: ', data
c.send('Echo Test - received: ' + data + '\n')
# Hex Test
c.send('Hex Test - enter length:')
data = c.recv(1024)
try:
hex_length = int(data)
except ValueError:
c.send('You must enter a number. Defaulting to 10.\n')
hex_length = 10
hex_string = generate_random_hex(hex_length)
c.send('Sending hex string...\n\n')
print 'Hex Test - sending: ', hex_string
c.send(hex_string)
c.close()
print 'Closed connection to ', addr[0], ':', addr[1]
except KeyboardInterrupt:
c.close()
print '\nExiting...'
exit(0)
| StarcoderdataPython |
1640757 | """
У нас есть завод, который производит сковородки. Он может делать 2 вида
товаров: сковорода, сковорода с крышкой
"""
from abc import ABC, abstractmethod
class Pan:
parts: list
def __init__(self):
self.parts = []
def add_part(self, part):
self.parts.append(part)
def list_parts(self):
print(f"Товар: {', '.join(self.parts)}")
class Builder(ABC):
@abstractmethod
def produce_cap(self) -> None:
pass
@abstractmethod
def produce_pan(self) -> None:
pass
class PanBuilder(Builder):
_pan: Pan
def __init__(self):
self.reset()
@property
def pan(self):
pan = self._pan
self.reset()
return pan
def reset(self):
self._pan = Pan()
def produce_cap(self) -> None:
self._pan.add_part('крышка')
def produce_pan(self) -> None:
self._pan.add_part('сковорода')
class Director:
builder: PanBuilder
def __init__(self, builder):
self.builder = builder
def build_pan_with_cap(self):
self.builder.produce_pan()
self.builder.produce_cap()
def build_pan(self):
self.builder.produce_pan()
if __name__ == '__main__':
pan_builder = PanBuilder()
director = Director(pan_builder)
director.build_pan()
pan_builder.pan.list_parts()
director.build_pan_with_cap()
pan_builder.pan.list_parts()
| StarcoderdataPython |
11355133 | <filename>setup.py
from setuptools import setup, find_packages
with open("./README.md", "r") as fh:
long_description = fh.read()
setup(
name="glasses",
version="0.0.6",
author="<NAME> & <NAME>",
author_email="<EMAIL>",
description="Compact, concise and customizable deep learning computer vision",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/FrancescoSaverioZuppichini/glasses",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
)
| StarcoderdataPython |
9710291 | <filename>src/Honeybee_OpenStudio DX Heating Coil.py
# By <NAME>
# <EMAIL>
# Honeybee started by <NAME> is licensed
# under a Creative Commons Attribution-ShareAlike 3.0 Unported License.
# this component can be used to create a custom DX coil, either 1 or 2 speed
# if you specify a one speed coil, just use the high speed inputs. The low speed
# inputs will always be ignored for 1 speed coil definitions
"""
EPlus DX Heating Coil
-
Provided by Honeybee 0.0.55
Args:
_dxCoilSpeed:...0 = 1 speed, 1 = 2 speed
_name:...provide a unique coil for each one that you use
_availabilitySchedule_:... an OpenStudio or Honeybee can be plugged in here to limit the availability of the cooling coil.
_ratedHighSpeedTotalCooling_: ...This value is typically blank, it can be autosized (the Units are in Watts)/
_ratedHighSpeedCOP_: ... the efficiency at design conditions for the DX coil
_ratedLowSpeedTotalCooling_: ... This value is typically blank, it can be autosized (the Units are in Watts)/
_ratedLowSpeedCOP_: ... the efficiency at design conditions for the DX coil
_minimumOutdoorDryBulb_: ... If left blank, the default is -8C (17.6F) temperature when the compressor is shut off
_outdoorDryBulbDefrostDisabled_ :... If left blank, the default is 5C (41F). It is the temperature, below which, defrost is enabled to de-ice the heat source.
_maxOutdoorDryBulbForCrankcase_: ... If left blank, the default is 10C (50F). It is the temperature above which the compressor crankcase heater is disabled.
_crankCaseHeaterCapacity_ :... If left blank, the default is zero. It is the capacity of the compressor crankcase heater (Watts), which will turn on if below the stated temperature and the compressor is not running.
_defrostStrategy_: ... If left blank, the default is 'ReverseCycle'. Two options for this 'ReverseCycle', 'Resistive'. Spelling must be correct. It is the type of heating cycle used to melt frost accumulated on the outdoor coil.
_defrostControl_: ... If left blank, the default is 'timed'. Two options are 'timed' and 'on-demand'.
_resistiveDefrostHeatCap_: If left blank, the default in honeybee is zero. It is the capacity in Watts of the resistive element used for defrost.
_Curves_ ... Not yet implemented. Allows you to specify custom part load curves for DX coils.
_unitInternalStaticPressure_ ... (units are Pascals). This item is rarely used, but helps to calculate EER and IEER for variable speed DX systems. Refers to the total internal pressure of the air handler.
Returns:
DXCoil:...return DX coil definition
"""
#high/low speed airflow between .00004027 to .00006041 m3/s per Watt
#high/low speed airflow between .00001667 to .00003355 m3/s per Watt for DOAS
#add unit internal static air pressure? will be used to calculate EER for variable volume fans (if not used, 773.3 W/m3/s for specific fan power
#COP hi lo default is 3
from clr import AddReference
AddReference('Grasshopper')
import scriptcontext as sc
import pprint
import Grasshopper.Kernel as gh
ghenv.Component.Name = "Honeybee_OpenStudio DX Heating Coil"
ghenv.Component.NickName = 'EPlusDXHeatingCoil'
ghenv.Component.Message = 'VER 0.0.55\nOCT_31_2014'
ghenv.Component.Category = "Honeybee"
ghenv.Component.SubCategory = "10 | Energy | AirsideSystems"
#compatibleHBVersion = VER 0.0.55\nAUG_25_2014
#compatibleLBVersion = VER 0.0.58\nAUG_20_2014
try: ghenv.Component.AdditionalHelpFromDocStrings = "2"
except: pass
#this is to class-i-fy the dictionary
class dictToClass(object):
def __init__(self,pyDict):
self.d = pyDict
#this dictionary used for reporting messages to the only
condType = {
0:'Air Cooled',
1:'Evaporatively Cooled'
}
def main():
print 'Use this component to override a default DX heating coil'
print 'please note: '
print 'capacity units are in Watts at the rated condition (not including fan heat.)'
print 'COP is a dimensionless engineering units at the rated condition.'
print 'The rated condition is: '
print 'air entering the cooling coil a 21.11C drybulb/15.55C wetbulb, air entering the outdoor condenser coil at 8.33C drybulb/6.11C wetbulb,'
#setup
DXCoil = []
pp = pprint.PrettyPrinter(indent=4)
#main warnings and error checking of user input
#if the Defrost strategy is resistive, the user should have to provide a resistive capacity
if _defrostStrategy_ != None:
if _defrostStrategy_ == 'Resistive':
if _resistiveDefrostHeatCap_ == None:
print 'You have specified a defrost strategy of Resistive, but you have not set a Defrost Heat Capacity (in Watts)'
print 'The default resistive heater capacity in Honeybee is 0 Watts. Please provide a resistive capacity.'
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, 'Please provide a resistiveDefrostHeaterCap. See readMe output for more details.')
elif _defrostStrategy_ != "ReverseCycle":
print 'The only allowable defrostStragegy inputs are "Resistive" and "ReverseCycle". Any other inputs will be ignored.'
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, 'Your defrost strategy is invalid and this component will not run.')
return DXCoil
if _Curves_ != None:
print 'Custom efficiency and capacity curves will be provided in a future release of Honeybee.'
w = gh.GH_RuntimeMessageLevel.Remark
ghenv.Component.AddRuntimeMessage(w, 'Your curves definition will be ignored.')
if _dxCoilSpeed == 0:
if _ratedLowSpeedAirflowRate_ != None:
message = 'All low speed settings are ignored when DX Coil is Single Speed (dxCoilSpeed=0).'
print message
w = gh.GH_RuntimeMessageLevel.Remark
ghenv.Component.AddRuntimeMessage(w, message)
if _ratedLowSpeedTotalHeating_ != None:
message = 'All low speed settings are ignored when DX Coil is Single Speed (dxCoilSpeed=0).'
print message
w = gh.GH_RuntimeMessageLevel.Remark
ghenv.Component.AddRuntimeMessage(w, message)
if _ratedLowSpeedCOP_ != None:
message = 'All low speed settings are ignored when DX Coil is Single Speed (dxCoilSpeed=0).'
print message
w = gh.GH_RuntimeMessageLevel.Remark
ghenv.Component.AddRuntimeMessage(w, message)
if sc.sticky.has_key('honeybee_release'):
#check Honeybee version
try:
if not sc.sticky['honeybee_release'].isCompatible(ghenv.Component):
return
except:
warning = "You need a newer version of Honeybee to use this compoent." + \
" Use updateHoneybee component to update userObjects.\n" + \
"If you have already updated userObjects drag Honeybee_Honeybee component " + \
"into canvas and try again."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return DXCoil
if _dxCoilSpeed == None:
print 'Before you can begin....'
print 'you must provide a coil speed to use this component'
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "Please provide a dxCoil Speed for the coil.")
elif _dxCoilSpeed == 0:
print 'We are now hunting for Honeybee defaults for 1 Speed DX Heating Coils...'
try:
hb_1xDXCoil = sc.sticky["honeybee_1xDXHeatingCoilParams"]().oneSpeedDXDict
print 'We have found the Honeybee default for 1 Speed DX Heating Coils: '
except:
print 'We could not find the Honeybee default for 1 Speed DX Heating Coils'
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "Cannot locate default Honeybee 1 Speed DX Heating Coil.")
return DXCoil
if _name!=None:
pp.pprint(hb_1xDXCoil)
print ''
coil = {
'name':_name,
'availSch':_availabilitySchedule_,
'ratedAirflowRate':_ratedHighSpeedAirflowRate_,
'ratedTotalHeating':_ratedHighSpeedTotalHeating_,
'ratedCOP':_ratedHighSpeedCOP_,
'minOutdoorDryBulb': _minimumOutdoorDryBulb_,
'outdoorDBDefrostEnabled': _outdoorDryBulbDefrostDisabled_,
'outdoorDBCrankcase': _maxOutdoorDryBulbForCrankcase_,
'crankcaseCapacity': _crankCaseHeaterCapacity_,
'defrostStrategy': _defrostStrategy_,
'defrostControl': _defrostControl_,
'resistiveDefrostCap': _resistiveDefrostHeatCap_,
'Curves':_Curves_
}
#update the hive
actions = []
updatedCoilParams = {}
updatedCoilParams['type'] = _dxCoilSpeed
for key in hb_1xDXCoil.keys():
if coil.has_key(key) and coil[key] != None:
s = key + ' has been updated to ' + str(coil[key])
actions.append(s)
updatedCoilParams[key] = coil[key]
else:
s = key + ' is still set to Honeybee Default: ' + str(hb_1xDXCoil[key])
actions.append(s)
updatedCoilParams[key] = hb_1xDXCoil[key]
#two speed coil output to class
DXCoil = dictToClass(updatedCoilParams)
print 'your coil definition has been uploaded and ready for use. Your coil:'
pp.pprint(updatedCoilParams)
print ''
print 'actions completed for your coil definition: '
for action in actions:
print action
else:
print 'Before you can begin....'
print 'you must provide a unique name for the coil to use this component'
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "Please provide a name for this coil.")
return DXCoil
elif (_dxCoilSpeed == 1):
print 'We currently only support one speed DX Heating Coils.'
print 'Please set dxCoilSpeed to Single Speed (=0)'
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "Honeybee currently only supports one speed coils.")
return DXCoil
print 'We are now hunting for Honeybee defaults for 2 Speed DX Heating Coils...'
try:
hb_2xDXCoil = sc.sticky["honeybee_2xDXHeatingCoilParams"]().twoSpeedDXDict
print 'We have found the Honeybee default for 2 Speed DX Heating Coils: '
except:
print 'We could not find the Honeybee default for 2 Speed DX Heating Coils'
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "Cannot locate default Honeybee 2 Speed DX Heating Coil.")
return DXCoil
if _name!=None:
pp.pprint(hb_2xDXCoil)
print ''
coil = {
'name':_name,
'availSch':_availabilitySchedule_,
'ratedHighSpeedAirflowRate':_ratedHighSpeedAirflowRate_,
'ratedHighSpeedTotalHeating':_ratedHighSpeedTotalHeating_,
'ratedHighSpeedCOP':_ratedHighSpeedCOP_,
'ratedLowSpeedAirflowRate':_ratedLowSpeedAirflowRate_,
'ratedLowSpeedTotalHeating':_ratedLowSpeedTotalHeating_,
'ratedLowSpeedCOP':_ratedLowSpeedCOP_,
'minOutdoorDryBulb': _minimumOutdoorDryBulb_,
'outdoorDBDefrostEnabled': _outdoorDryBulbDefrostDisabled_,
'outdoorDBCrankcase': _maxOutdoorDryBulbForCrankcase_,
'crankcaseCapacity': _crankCaseHeaterCapacity_,
'defrostStrategy': _defrostStrategy_,
'defrostControl': _defrostControl_,
'resistiveDefrostCap': _resistiveDefrostHeatCap_,
'Curves':_Curves_
}
#update the hive
actions = []
updatedCoilParams = {}
updatedCoilParams['type'] = _dxCoilSpeed
for key in hb_2xDXCoil.keys():
if coil.has_key(key) and coil[key] != None:
s = key + ' has been updated to ' + str(coil[key])
actions.append(s)
updatedCoilParams[key] = coil[key]
else:
s = key + ' is still set to Honeybee Default: ' + str(hb_2xDXCoil[key])
actions.append(s)
updatedCoilParams[key] = hb_2xDXCoil[key]
#two speed coil output to class
DXCoil = dictToClass(updatedCoilParams)
print 'your coil definition has been uploaded and ready for use. Your coil:'
pp.pprint(updatedCoilParams)
print ''
print 'actions completed for your coil definition: '
for action in actions:
print action
else:
print 'Before you can begin....'
print 'you must provide a unique name for the coil to use this component'
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "Please provide a name for this coil.")
else:
print "You should first let Honeybee to fly..."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "You should let Honeybee to fly...")
return DXCoil
DXCoil = main() | StarcoderdataPython |
1863058 |
import os
import time
import ftplib
from urllib.parse import urlparse as urllib_urlparse
import urllib3
import bs4
http = urllib3.PoolManager()
def urlparse(url):
"""
:param str url:
:return: (scheme, netloc, path, params, query, fragment)
:rtype: urllib.parse.ParseResult
"""
return urllib_urlparse(url, allow_fragments=False)
kMaxFtpDepth = 10
class OtherException(Exception):
pass
class NotFoundException(OtherException):
pass
class TemporaryException(Exception):
pass
def list_dir(url):
"""
:param str url:
:returns: tuple of lists: (dirs, files). both are absolute urls
:rtype: (list[str],list[str])
"""
o = urlparse(url)
if o.scheme == "ftp":
try:
return ftp_list_dir(url)
except ftplib.error_temp as exc:
time.sleep(1) # sleep to not hammer too much
raise TemporaryException(exc)
except ftplib.Error as exc:
# All others are probably errors where we cannot recover from.
# Most common are some sort of 5xx errors (no such file etc).
# However, some FTP servers wrongly classify certain errors,
# and we check for them first.
if "the maximum number of allowed clients" in str(exc):
time.sleep(1) # sleep to not hammer too much
raise TemporaryException(exc)
if "the maximum number of connections" in str(exc):
time.sleep(1)
raise TemporaryException(exc)
raise OtherException(exc)
except ftplib.all_errors as exc:
# These might be network errors, etc.
# This is very much temporary.
raise TemporaryException("undefined other expected: %s" % (str(exc) or repr(exc)))
elif o.scheme in ('http', 'https'):
return http_list_dir(url)
raise NotImplementedError
def ftp_list_dir(url):
"""
:param str url:
:returns: tuple of lists: (dirs, files). both are absolute urls
:rtype: (list[str],list[str])
"""
o = urlparse(url)
ftp = ftplib.FTP()
kwargs = {"host": o.hostname or o.netloc}
if o.port:
kwargs["port"] = o.port
ftp.connect(**kwargs)
if len(os.path.normpath(o.path).split("/")) > kMaxFtpDepth:
raise OtherException("max ftp depth reached in %r" % url)
with ftp:
kwargs = {}
if o.username:
kwargs["user"] = o.username
if o.password:
kwargs["passwd"] = <PASSWORD>
ftp.login(**kwargs)
path = o.path
if path[:1] != "/":
path = "/" + path # add leading slash
while path[1:2] == "/": # remove leading double slashes
path = path[1:]
path = os.path.normpath(path)
if len(path) > 1 and path[-1:] == "/":
path = path[:-1] # remove trailing slash
ftp.cwd(path)
cur_pwd = ftp.pwd()
if cur_pwd != path:
raise OtherException("path doesnt match: %r vs pwd %r" % (path, cur_pwd))
lines = []
ftp.dir(o.path, lines.append)
if not lines:
return [], []
if "<DIR>" in lines[0] or lines[0][:1] not in "d-l":
return _ftp_list_dir_windows(url, lines)
else:
return _ftp_list_dir_unix(url, lines)
# thanks https://github.com/laserson/ftptree/blob/master/crawltree.py
def _ftp_list_dir_unix(url, lines):
"""
:param str url:
:param list[str] lines:
:returns: tuple of lists: (dirs, files). both are absolute urls
:rtype: (list[str],list[str])
"""
dirs, files = [], []
for line in lines:
if not line:
continue
fields = line.split()
if len(fields) < 9:
raise ValueError("Unix listing, unexpected line, too few fields: %r" % line)
name = ' '.join(fields[8:])
if line[0] == 'd':
container = dirs
elif line[0] == '-':
container = files
elif line[0] == 'l':
continue
else:
raise ValueError("Unix listing, unexpected line, type: %r" % line)
container.append(url.rstrip("/") + "/" + name)
return dirs, files
def _ftp_list_dir_windows(url, lines):
"""
:param str url:
:param list[str] lines:
:returns: tuple of lists: (dirs, files). both are absolute urls
:rtype: (list[str],list[str])
"""
dirs, files = [], []
for line in lines:
if not line:
continue
fields = line.split()
if len(fields) < 4:
raise ValueError("Windows listing, unexpected line, too few fields: %r" % line)
name = ' '.join(fields[3:])
if fields[2].strip() == '<DIR>':
container = dirs
else:
container = files
container.append(url.rstrip("/") + "/" + name)
return dirs, files
def _get_base_url(url):
"""
:param str url: e.g. "http://localhost/some_dir/index.html"
:return: e.g. "http://localhost/some_dir/"
:rtype: str
"""
if url.endswith('/'):
return url
start_idx = url.index('://') + len('://')
if '/' not in url[start_idx:]: # Just 'http://domain.com'.
return url + '/'
return url[:url.rindex('/') + 1]
def http_list_dir(url):
"""
:param str url:
:returns: tuple of lists: (dirs, files). both are absolute urls
:rtype: (list[str],list[str])
"""
r = http.request('GET', url)
if r.status != 200:
if r.status == 404:
raise NotFoundException("HTTP Return code %i, reason: %s" % (r.status, r.reason))
raise OtherException("HTTP Return code %i, reason: %s" % (r.status, r.reason))
for req in r.retries.history:
if req.redirect_location:
url = req.redirect_location
base_url = _get_base_url(url)
bs = bs4.BeautifulSoup(r.data, "html5lib") # Parse.
# This is just a good heuristic.
dirs = []
files = []
for sub_url in [anchor['href'] for anchor in bs.findAll('a', href=True)]:
# Take all relative paths only.
if ':' in sub_url:
continue
if sub_url.startswith('/'):
continue
if sub_url.startswith('.'):
continue
# Ignore any starting with '?' such as '?C=N;O=D'.
if sub_url.startswith('?'):
continue
# Ending with '/' is probably a dir.
if sub_url.endswith('/'):
dirs += [base_url.rstrip("/") + "/" + sub_url]
else:
files += [base_url.rstrip("/") + "/" + sub_url]
return dirs, files
| StarcoderdataPython |
3347877 | #!/usr/bin/env python
import os
import json
class Config():
CALIBRATION = -11600.00
_config = None
_config_file_Path = None
def __init__(self, config_file_path ):
if ( True == os.path.isfile ( config_file_path ) ):
with open ( config_file_path, "r" ) as f:
self._config = json.load ( f )
else:
self._config = {}
#
# https://en.wikipedia.org/wiki/Keg#Sixth_barrel
#
self._config["keg_weight"] = 14
self._config["full_weight" ] = 0
self._config["weight" ] = 0
self._config["beer_name" ] = "Guinness"
self._config["beer_type" ] = "Stout"
self._config["calibration" ] = self.CALIBRATION
self._config_file_Path = config_file_path
def _sync (self):
with open ( self._config_file_Path, "w+" ) as f:
json.dump ( self._config, f, indent=4 )
"""
""
"" Beer Info
""
"""
def set_beer_type(self, beer_type ):
self._config["beer_type"] = beer_type
self._sync()
def get_beer_type(self):
if ( "beer_type" not in self._config ):
self.set_beer_type("")
return self._config["beer_type"]
def set_beer_name(self, beer_name ):
self._config["beer_name"] = beer_name
self._sync()
def get_beer_name(self):
if ( "beer_name" not in self._config ):
self.set_beer_name("")
return self._config["beer_name"]
"""
""
"" WEIGHT
""
"""
def set_keg_weight(self, weight):
#
# Already set in pounds, no need to calibrate
#
self._config["keg_weight"] = weight
self._sync()
def get_keg_weight(self):
if ( "keg_weight" not in self._config ):
self.set_keg_weight(0)
return self._config["keg_weight"]
def set_full_weight(self, weight):
weight -= self.get_base_weight()
weight /= self.get_calibration()
self._config["full_weight" ] = weight
self._sync()
def get_full_weight(self):
if ( "full_weight" not in self._config ):
return 0
return self._config["full_weight" ]
def set_calibration(self,calibration):
self._config["calibration"] = calibration
self._sync()
def get_calibration(self):
if ( "calibration" not in self._config ):
self.set_calibration(0)
return self._config["calibration"]
def set_base_weight (self, weight):
self._config["base_weight"] = weight
self._sync()
def get_base_weight (self):
if ( "base_weight" not in self._config ):
self.set_base_weight(0)
return self._config["base_weight"]
def set_weight(self, weight):
weight -= self.get_base_weight()
weight /= self.get_calibration()
self._config["weight"] = weight
self._sync()
def get_weight(self):
if ( "weight" not in self._config ):
self.set_weight(0)
return self._config["weight"]
"""
""
"" TEMPERATURE
""
"""
def get_temperature(self):
if ( "temperature" not in self._config ):
self.set_temperature(0)
return self._config["temperature"]
def set_temperature(self,temperature):
self._config["temperature"] = temperature
self._sync()
"""
""
"" HUMIDITY
""
"""
def get_humidity(self):
if ( "humidity" not in self._config ):
self.set_humidity(0)
return self._config["humidity"]
def set_humidity(self,humidity):
self._config["humidity"] = humidity
self._sync()
def dup (self):
#
# Return a copy
#
return dict ( self._config )
| StarcoderdataPython |
283574 | import re
def read_file(path: str = "input") -> str:
with open(path) as file:
return file.read()
def part_1():
fields = {"byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"}
content = read_file().split("\n\n")
print("Solution for part 1 of day 4:", sum((set(y.split(":")[0] for y in x.split()) - {"cid"}) == fields for x in content))
def part_2():
rules = {
"byr": re.compile(r"(19[2-9][0-9])|(200[0-2])"),
"iyr": re.compile(r"20(1[0-9]|20)"),
"eyr": re.compile(r"20(2[0-9]|30)"),
"hgt": re.compile(r"((1[5-8][0-9])|(19[0-3]))cm|(([56][0-9])|(7[0-6]))in"),
"hcl": re.compile(r"#[0-9a-f]{6}"),
"ecl": re.compile(r"amb|blu|brn|gry|grn|hzl|oth"),
"pid": re.compile(r"[0-9]{9}"),
}
content = read_file().split("\n\n")
valid = 0
for passport in content:
passport = dict(x.split(":") for x in passport.split())
if set(passport.keys()) - {"cid"} == set(rules.keys()):
for field, value in passport.items():
if field == "cid":
continue
if not rules[field].fullmatch(value):
break
else:
valid += 1
print("Solution for part 2 of day 4:", valid)
part_1()
part_2()
| StarcoderdataPython |
1790706 | <gh_stars>0
# -*- encoding: utf-8 -*-
import pprint
import requests
import datetime
from influxdb import InfluxDBClient
TODOS_API = "https://api.pomotodo.com/1/todos"
HAEDER = {
"Authorization": "token"
}
def get_todos():
res = requests.get(TODOS_API, headers=HAEDER)
data = res.json()
list_data = list()
for entry in data:
single_data = {
"measurement": "pomotodo",
"tags": {
"name": ""
},
"time": None,
"fields": {
"count": None
}
}
if entry["description"] in [u'\u5b9e\u8df5', u'\u5b66\u4e60']:
single_data["tags"]["name"] = entry["description"]
single_data["time"] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
single_data["fields"]["count"] = float(entry["costed_pomo_count"])
list_data.append(single_data)
client = InfluxDBClient(host='localhost', port=8086, database='pomotodo')
client.write_points(list_data)
if __name__ == '__main__':
get_todos()
| StarcoderdataPython |
4870099 | from pyspark import SparkContext, SparkConf
from pyspark.sql import SQLContext, Row
import os
""" This function obtains the name of
summary statistics
"""
def get_txt_file_name_summary_statistics():
return 'vs_energies_summary_statistics.txt'
""" This function obtains the name of
summary statistics
"""
def get_json_file_name_summary_statistics():
return 'vs_energies_summary_statistics.json'
def get_summary_statistics(sc, rdd_vs_energies_sorted):
sqlCtx = SQLContext(sc)
vs_energies_sorted_table = sqlCtx.createDataFrame(rdd_vs_energies_sorted)
vs_energies_sorted_table.registerTempTable("vs_energies_sorted")
summary_statistics = sqlCtx.sql("SELECT count(energy) as total, min(energy) as min_e, max(energy) as max_e, avg(energy) as avg_e FROM vs_energies_sorted")
return summary_statistics
def save_txt_summary_statistics(path_analysis, summary_statistics):
text_file = os.path.join(path_analysis, get_txt_file_name_summary_statistics())
f_file = open(text_file, "w")
energies_out = summary_statistics.map(lambda p: "Total_Model: {},Min_Energy: {},Max_Energy: {},AVG_Energy: {}".format(p.total,p.min_e,p.max_e,p.avg_e)).collect()
for l in energies_out[0].split(","):
line = l+"\n"
f_file.write(line)
f_file.close()
| StarcoderdataPython |
5077100 | <filename>tests/test_accountdata_filter.py
import os
import datetime
import pytest
from homeplotter.accountdata import AccountData
from homeplotter.timeseries import TimeSeries
resource_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'example_data'))
cat_path = os.path.join(resource_path,"categories.json")
tag_path = os.path.join(resource_path,"tags.json")
data_path1 = os.path.join(resource_path,"data1.csv")
data_path2 = os.path.join(resource_path,"data2.csv")
def test_filter__date():
acc_data1 = AccountData(data_path1,cat_path)
first_date = acc_data1.get_column("date")[0]
next_date = acc_data1.get_column("date")[1]
acc_data1.filter_data("date",">",first_date)
assert(acc_data1.get_column("date")[0] == next_date)
@pytest.mark.parametrize("fun",[">",">=","==","!=","<=","<"] )
def test_filter__amount(fun):
acc_data1 = AccountData(data_path1,cat_path)
acc_data1.filter_data("amount",fun,200)
for amount in acc_data1.get_column("amount"):
if fun == ">":
assert(amount>200)
elif fun == ">=":
assert(amount>=200)
elif fun == "==":
assert(amount==200)
elif fun == "!=":
assert(amount!=200)
elif fun == "<=":
assert(amount<=200)
elif fun == "<":
assert(amount<200)
def test_filter__category():
acc_data1 = AccountData(data_path1,cat_path)
acc_data1.filter_data("category","==","cat2")
assert(acc_data1.get_column("category")[0]=="cat2")
def test_filter__type_ok():
acc_data1 = AccountData(data_path1,cat_path)
acc_data1.filter_data("amount",">",100)
acc_data1.filter_data("amount",">",200.5)
acc_data1.filter_data("text","!=","test")
acc_data1.filter_data("category","==","cat1")
acc_data1.filter_data("date",">",datetime.date(2021,1,4))
def test_filter__type_wrong():
acc_data1 = AccountData(data_path1,cat_path)
with pytest.raises(ValueError):
acc_data1.filter_data("amount","==","text")
with pytest.raises(ValueError):
acc_data1.filter_data("date",">",2)
with pytest.raises(ValueError):
acc_data1.filter_data("text",">","text")
with pytest.raises(ValueError):
acc_data1.filter_data("category","==",datetime.date(2021,1,1))
def test_reset_filter():
acc_data1 = AccountData(data_path1,cat_path)
original_data = acc_data1.get_data()
acc_data1.filter_data("amount","==",1000)
filtered_data = acc_data1.get_data()
acc_data1.reset_filter()
reset_data = acc_data1.get_data()
assert(original_data==reset_data)
assert(reset_data!=filtered_data)
def test_filter__categories():
acc_data = AccountData(data_path1,cat_path)
acc_data.filter_data("category","!=","cat1")
assert("cat1" not in acc_data.get_categories())
acc_data.filter_data("category","==","cat3")
assert(acc_data.get_categories()==["cat3"])
def test_filter__tags():
acc_data = AccountData(data_path1,tag_file=tag_path)
acc_data.filter_data("tags","!=","tag1")
assert("tag1" not in acc_data.get_tags())
acc_data.filter_data("tags","==","tag3")
assert(acc_data.get_tags()==["tag3"])
def test_filter__multi_tags():
#If a data point has multiple tags, those tags should be preserved
acc_data = AccountData(data_path1,tag_file=tag_path)
acc_data.filter_data("tags","==","överföring")
assert(len(acc_data.get_tags())>1)
def test_filter__tag_list():
acc_data = AccountData(data_path1,tag_file=tag_path)
acc_data.filter_data("tags","==",["tag1","överföring"])
#Should only contain the once that contains all the tags.
assert(sorted(acc_data.get_tags())==sorted(["tag1","överföring"]))
def test_filter__multi_tags_list():
#If a data point has multiple tags, those tags should be preserved even if list is used
acc_data = AccountData(data_path1,tag_file=tag_path)
acc_data.filter_data("tags","==",["överföring"])
assert(len(acc_data.get_tags())>1)
def test_filter__tag_empty():
acc_data = AccountData(data_path1,tag_file=tag_path)
#Should only return those that lack any tags.
acc_data.filter_data("tags","==",[])
assert(acc_data.get_tags()==[])
assert(len(acc_data.get_data())>0)
def test_filter__not_tag_list():
acc_data = AccountData(data_path1,tag_file=tag_path)
acc_data.filter_data("tags","!=",["tag1","överföring"])
#Should still contain överföring, just not any that also tag1
assert("överföring" in acc_data.get_tags())
def test_filter__not_tag_empty():
acc_data = AccountData(data_path1,tag_file=tag_path)
#Should only return those that has tags
acc_data.filter_data("tags","!=",[])
assert(all(len(tags)>0 for tags in acc_data.get_column("tags")))
def test_filter__empty():
#If filtering twice with an inverted filter the result should be empty
acc_data = AccountData(data_path1,cat_path)
acc_data.filter_data("amount",">=",300)
acc_data.filter_data("amount","<",300)
assert(len(acc_data.get_data())==0)
#Categories should also be empty
assert(len(acc_data.get_categories())==0)
#It should be possible to still call get_timeseries()
assert(type(acc_data.get_timeseries())==TimeSeries)
| StarcoderdataPython |
11239280 | s = float(input('O salário inicial é de: R$'))
a = 15
ns = (s * (1 + a / 100))
print('O salário com aumento de {}% é: R${:.2f}'.format(a, ns))
| StarcoderdataPython |
1607587 | <filename>contrib/devtools/github-merge.py
#!/usr/bin/env python3
# Copyright (c) 2016-2017 The Cruro Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# This script will locally construct a merge commit for a pull request on a
# github repository, inspect it, sign it and optionally push it.
# The following temporary branches are created/overwritten and deleted:
# * pull/$PULL/base (the current master we're merging onto)
# * pull/$PULL/head (the current state of the remote pull request)
# * pull/$PULL/merge (github's merge)
# * pull/$PULL/local-merge (our merge)
# In case of a clean merge that is accepted by the user, the local branch with
# name $BRANCH is overwritten with the merged result, and optionally pushed.
import os
from sys import stdin,stdout,stderr
import argparse
import hashlib
import subprocess
import sys
import json
import codecs
from urllib.request import Request, urlopen
from urllib.error import HTTPError
# External tools (can be overridden using environment)
GIT = os.getenv('GIT','git')
BASH = os.getenv('BASH','bash')
# OS specific configuration for terminal attributes
ATTR_RESET = ''
ATTR_PR = ''
ATTR_NAME = ''
ATTR_WARN = ''
COMMIT_FORMAT = '%H %s (%an)%d'
if os.name == 'posix': # if posix, assume we can use basic terminal escapes
ATTR_RESET = '\033[0m'
ATTR_PR = '\033[1;36m'
ATTR_NAME = '\033[0;36m'
ATTR_WARN = '\033[1;31m'
COMMIT_FORMAT = '%C(bold blue)%H%Creset %s %C(cyan)(%an)%Creset%C(green)%d%Creset'
def git_config_get(option, default=None):
'''
Get named configuration option from git repository.
'''
try:
return subprocess.check_output([GIT,'config','--get',option]).rstrip().decode('utf-8')
except subprocess.CalledProcessError:
return default
def get_response(req_url, ghtoken):
req = Request(req_url)
if ghtoken is not None:
req.add_header('Authorization', 'token ' + ghtoken)
return urlopen(req)
def retrieve_json(req_url, ghtoken, use_pagination=False):
'''
Retrieve json from github.
Return None if an error happens.
'''
try:
reader = codecs.getreader('utf-8')
if not use_pagination:
return json.load(reader(get_response(req_url, ghtoken)))
obj = []
page_num = 1
while True:
req_url_page = '{}?page={}'.format(req_url, page_num)
result = get_response(req_url_page, ghtoken)
obj.extend(json.load(reader(result)))
link = result.headers.get('link', None)
if link is not None:
link_next = [l for l in link.split(',') if 'rel="next"' in l]
if len(link_next) > 0:
page_num = int(link_next[0][link_next[0].find("page=")+5:link_next[0].find(">")])
continue
break
return obj
except HTTPError as e:
error_message = e.read()
print('Warning: unable to retrieve pull information from github: %s' % e)
print('Detailed error: %s' % error_message)
return None
except Exception as e:
print('Warning: unable to retrieve pull information from github: %s' % e)
return None
def retrieve_pr_info(repo,pull,ghtoken):
req_url = "https://api.github.com/repos/"+repo+"/pulls/"+pull
return retrieve_json(req_url,ghtoken)
def retrieve_pr_comments(repo,pull,ghtoken):
req_url = "https://api.github.com/repos/"+repo+"/issues/"+pull+"/comments"
return retrieve_json(req_url,ghtoken,use_pagination=True)
def retrieve_pr_reviews(repo,pull,ghtoken):
req_url = "https://api.github.com/repos/"+repo+"/pulls/"+pull+"/reviews"
return retrieve_json(req_url,ghtoken,use_pagination=True)
def ask_prompt(text):
print(text,end=" ",file=stderr)
stderr.flush()
reply = stdin.readline().rstrip()
print("",file=stderr)
return reply
def get_symlink_files():
files = sorted(subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', 'HEAD']).splitlines())
ret = []
for f in files:
if (int(f.decode('utf-8').split(" ")[0], 8) & 0o170000) == 0o120000:
ret.append(f.decode('utf-8').split("\t")[1])
return ret
def tree_sha512sum(commit='HEAD'):
# request metadata for entire tree, recursively
files = []
blob_by_name = {}
for line in subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', commit]).splitlines():
name_sep = line.index(b'\t')
metadata = line[:name_sep].split() # perms, 'blob', blobid
assert(metadata[1] == b'blob')
name = line[name_sep+1:]
files.append(name)
blob_by_name[name] = metadata[2]
files.sort()
# open connection to git-cat-file in batch mode to request data for all blobs
# this is much faster than launching it per file
p = subprocess.Popen([GIT, 'cat-file', '--batch'], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
overall = hashlib.sha512()
for f in files:
blob = blob_by_name[f]
# request blob
p.stdin.write(blob + b'\n')
p.stdin.flush()
# read header: blob, "blob", size
reply = p.stdout.readline().split()
assert(reply[0] == blob and reply[1] == b'blob')
size = int(reply[2])
# hash the blob data
intern = hashlib.sha512()
ptr = 0
while ptr < size:
bs = min(65536, size - ptr)
piece = p.stdout.read(bs)
if len(piece) == bs:
intern.update(piece)
else:
raise IOError('Premature EOF reading git cat-file output')
ptr += bs
dig = intern.hexdigest()
assert(p.stdout.read(1) == b'\n') # ignore LF that follows blob data
# update overall hash with file hash
overall.update(dig.encode("utf-8"))
overall.update(" ".encode("utf-8"))
overall.update(f)
overall.update("\n".encode("utf-8"))
p.stdin.close()
if p.wait():
raise IOError('Non-zero return value executing git cat-file')
return overall.hexdigest()
def get_acks_from_comments(head_commit, comments):
# Look for abbreviated commit id, because not everyone wants to type/paste
# the whole thing and the chance of collisions within a PR is small enough
head_abbrev = head_commit[0:6]
acks = []
for c in comments:
review = [l for l in c['body'].split('\r\n') if 'ACK' in l and head_abbrev in l]
if review:
acks.append((c['user']['login'], review[0]))
return acks
def make_acks_message(head_commit, acks):
if acks:
ack_str ='\n\nACKs for top commit:\n'.format(head_commit)
for name, msg in acks:
ack_str += ' {}:\n'.format(name)
ack_str += ' {}\n'.format(msg)
else:
ack_str ='\n\nTop commit has no ACKs.\n'
return ack_str
def print_merge_details(pull, title, branch, base_branch, head_branch, acks):
print('%s#%s%s %s %sinto %s%s' % (ATTR_RESET+ATTR_PR,pull,ATTR_RESET,title,ATTR_RESET+ATTR_PR,branch,ATTR_RESET))
subprocess.check_call([GIT,'log','--graph','--topo-order','--pretty=format:'+COMMIT_FORMAT,base_branch+'..'+head_branch])
if acks is not None:
if acks:
print('{}ACKs:{}'.format(ATTR_PR, ATTR_RESET))
for (name, message) in acks:
print('* {} {}({}){}'.format(message, ATTR_NAME, name, ATTR_RESET))
else:
print('{}Top commit has no ACKs!{}'.format(ATTR_WARN, ATTR_RESET))
def parse_arguments():
epilog = '''
In addition, you can set the following git configuration variables:
githubmerge.repository (mandatory),
user.signingkey (mandatory),
user.ghtoken (default: none).
githubmerge.host (default: <EMAIL>),
githubmerge.branch (no default),
githubmerge.testcmd (default: none).
'''
parser = argparse.ArgumentParser(description='Utility to merge, sign and push github pull requests',
epilog=epilog)
parser.add_argument('pull', metavar='PULL', type=int, nargs=1,
help='Pull request ID to merge')
parser.add_argument('branch', metavar='BRANCH', type=str, nargs='?',
default=None, help='Branch to merge against (default: githubmerge.branch setting, or base branch for pull, or \'master\')')
return parser.parse_args()
def main():
# Extract settings from git repo
repo = git_config_get('githubmerge.repository')
host = git_config_get('githubmerge.host','<EMAIL>')
opt_branch = git_config_get('githubmerge.branch',None)
testcmd = git_config_get('githubmerge.testcmd')
ghtoken = git_config_get('user.ghtoken')
signingkey = git_config_get('user.signingkey')
if repo is None:
print("ERROR: No repository configured. Use this command to set:", file=stderr)
print("git config githubmerge.repository <owner>/<repo>", file=stderr)
sys.exit(1)
if signingkey is None:
print("ERROR: No GPG signing key set. Set one using:",file=stderr)
print("git config --global user.signingkey <key>",file=stderr)
sys.exit(1)
if host.startswith(('https:','http:')):
host_repo = host+"/"+repo+".git"
else:
host_repo = host+":"+repo
# Extract settings from command line
args = parse_arguments()
pull = str(args.pull[0])
# Receive pull information from github
info = retrieve_pr_info(repo,pull,ghtoken)
if info is None:
sys.exit(1)
title = info['title'].strip()
body = info['body'].strip()
# precedence order for destination branch argument:
# - command line argument
# - githubmerge.branch setting
# - base branch for pull (as retrieved from github)
# - 'master'
branch = args.branch or opt_branch or info['base']['ref'] or 'master'
# Initialize source branches
head_branch = 'pull/'+pull+'/head'
base_branch = 'pull/'+pull+'/base'
merge_branch = 'pull/'+pull+'/merge'
local_merge_branch = 'pull/'+pull+'/local-merge'
devnull = open(os.devnull, 'w', encoding="utf8")
try:
subprocess.check_call([GIT,'checkout','-q',branch])
except subprocess.CalledProcessError:
print("ERROR: Cannot check out branch %s." % (branch), file=stderr)
sys.exit(3)
try:
subprocess.check_call([GIT,'fetch','-q',host_repo,'+refs/pull/'+pull+'/*:refs/heads/pull/'+pull+'/*',
'+refs/heads/'+branch+':refs/heads/'+base_branch])
except subprocess.CalledProcessError:
print("ERROR: Cannot find pull request #%s or branch %s on %s." % (pull,branch,host_repo), file=stderr)
sys.exit(3)
try:
subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+head_branch], stdout=devnull, stderr=stdout)
head_commit = subprocess.check_output([GIT,'log','-1','--pretty=format:%H',head_branch]).decode('utf-8')
assert len(head_commit) == 40
except subprocess.CalledProcessError:
print("ERROR: Cannot find head of pull request #%s on %s." % (pull,host_repo), file=stderr)
sys.exit(3)
try:
subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+merge_branch], stdout=devnull, stderr=stdout)
except subprocess.CalledProcessError:
print("ERROR: Cannot find merge of pull request #%s on %s." % (pull,host_repo), file=stderr)
sys.exit(3)
subprocess.check_call([GIT,'checkout','-q',base_branch])
subprocess.call([GIT,'branch','-q','-D',local_merge_branch], stderr=devnull)
subprocess.check_call([GIT,'checkout','-q','-b',local_merge_branch])
try:
# Go up to the repository's root.
toplevel = subprocess.check_output([GIT,'rev-parse','--show-toplevel']).strip()
os.chdir(toplevel)
# Create unsigned merge commit.
if title:
firstline = 'Merge #%s: %s' % (pull,title)
else:
firstline = 'Merge #%s' % (pull,)
message = firstline + '\n\n'
message += subprocess.check_output([GIT,'log','--no-merges','--topo-order','--pretty=format:%H %s (%an)',base_branch+'..'+head_branch]).decode('utf-8')
message += '\n\nPull request description:\n\n ' + body.replace('\n', '\n ') + '\n'
try:
subprocess.check_call([GIT,'merge','-q','--commit','--no-edit','--no-ff','--no-gpg-sign','-m',message.encode('utf-8'),head_branch])
except subprocess.CalledProcessError:
print("ERROR: Cannot be merged cleanly.",file=stderr)
subprocess.check_call([GIT,'merge','--abort'])
sys.exit(4)
logmsg = subprocess.check_output([GIT,'log','--pretty=format:%s','-n','1']).decode('utf-8')
if logmsg.rstrip() != firstline.rstrip():
print("ERROR: Creating merge failed (already merged?).",file=stderr)
sys.exit(4)
symlink_files = get_symlink_files()
for f in symlink_files:
print("ERROR: File %s was a symlink" % f)
if len(symlink_files) > 0:
sys.exit(4)
# Compute SHA512 of git tree (to be able to detect changes before sign-off)
try:
first_sha512 = tree_sha512sum()
except subprocess.CalledProcessError:
print("ERROR: Unable to compute tree hash")
sys.exit(4)
print_merge_details(pull, title, branch, base_branch, head_branch, None)
print()
# Run test command if configured.
if testcmd:
if subprocess.call(testcmd,shell=True):
print("ERROR: Running %s failed." % testcmd,file=stderr)
sys.exit(5)
# Show the created merge.
diff = subprocess.check_output([GIT,'diff',merge_branch+'..'+local_merge_branch])
subprocess.check_call([GIT,'diff',base_branch+'..'+local_merge_branch])
if diff:
print("WARNING: merge differs from github!",file=stderr)
reply = ask_prompt("Type 'ignore' to continue.")
if reply.lower() == 'ignore':
print("Difference with github ignored.",file=stderr)
else:
sys.exit(6)
else:
# Verify the result manually.
print("Dropping you on a shell so you can try building/testing the merged source.",file=stderr)
print("Run 'git diff HEAD~' to show the changes being merged.",file=stderr)
print("Type 'exit' when done.",file=stderr)
if os.path.isfile('/etc/debian_version'): # Show pull number on Debian default prompt
os.putenv('debian_chroot',pull)
subprocess.call([BASH,'-i'])
second_sha512 = tree_sha512sum()
if first_sha512 != second_sha512:
print("ERROR: Tree hash changed unexpectedly",file=stderr)
sys.exit(8)
# Retrieve PR comments and ACKs and add to commit message, store ACKs to print them with commit
# description
comments = retrieve_pr_comments(repo,pull,ghtoken) + retrieve_pr_reviews(repo,pull,ghtoken)
if comments is None:
print("ERROR: Could not fetch PR comments and reviews",file=stderr)
sys.exit(1)
acks = get_acks_from_comments(head_commit=head_commit, comments=comments)
message += make_acks_message(head_commit=head_commit, acks=acks)
# end message with SHA512 tree hash, then update message
message += '\n\nTree-SHA512: ' + first_sha512
try:
subprocess.check_call([GIT,'commit','--amend','--no-gpg-sign','-m',message.encode('utf-8')])
except subprocess.CalledProcessError:
print("ERROR: Cannot update message.", file=stderr)
sys.exit(4)
# Sign the merge commit.
print_merge_details(pull, title, branch, base_branch, head_branch, acks)
while True:
reply = ask_prompt("Type 's' to sign off on the above merge, or 'x' to reject and exit.").lower()
if reply == 's':
try:
subprocess.check_call([GIT,'commit','-q','--gpg-sign','--amend','--no-edit'])
break
except subprocess.CalledProcessError:
print("Error while signing, asking again.",file=stderr)
elif reply == 'x':
print("Not signing off on merge, exiting.",file=stderr)
sys.exit(1)
# Put the result in branch.
subprocess.check_call([GIT,'checkout','-q',branch])
subprocess.check_call([GIT,'reset','-q','--hard',local_merge_branch])
finally:
# Clean up temporary branches.
subprocess.call([GIT,'checkout','-q',branch])
subprocess.call([GIT,'branch','-q','-D',head_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',base_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',merge_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',local_merge_branch],stderr=devnull)
# Push the result.
while True:
reply = ask_prompt("Type 'push' to push the result to %s, branch %s, or 'x' to exit without pushing." % (host_repo,branch)).lower()
if reply == 'push':
subprocess.check_call([GIT,'push',host_repo,'refs/heads/'+branch])
break
elif reply == 'x':
sys.exit(1)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1943068 | <reponame>SunsetWolf/qlib
from qlib.data.dataset.handler import DataHandler, DataHandlerLP
EPSILON = 1e-4
class HighFreqHandler(DataHandlerLP):
def __init__(
self,
instruments="csi300",
start_time=None,
end_time=None,
infer_processors=[],
learn_processors=[],
fit_start_time=None,
fit_end_time=None,
drop_raw=True,
):
def check_transform_proc(proc_l):
new_l = []
for p in proc_l:
p["kwargs"].update(
{
"fit_start_time": fit_start_time,
"fit_end_time": fit_end_time,
}
)
new_l.append(p)
return new_l
infer_processors = check_transform_proc(infer_processors)
learn_processors = check_transform_proc(learn_processors)
data_loader = {
"class": "QlibDataLoader",
"kwargs": {
"config": self.get_feature_config(),
"swap_level": False,
"freq": "1min",
},
}
super().__init__(
instruments=instruments,
start_time=start_time,
end_time=end_time,
data_loader=data_loader,
infer_processors=infer_processors,
learn_processors=learn_processors,
drop_raw=drop_raw,
)
def get_feature_config(self):
fields = []
names = []
template_if = "If(IsNull({1}), {0}, {1})"
template_paused = "Select(Gt($hx_paused_num, 1.001), {0})"
def get_normalized_price_feature(price_field, shift=0):
# norm with the close price of 237th minute of yesterday.
if shift == 0:
template_norm = "{0}/DayLast(Ref({1}, 243))"
else:
template_norm = "Ref({0}, " + str(shift) + ")/DayLast(Ref({1}, 243))"
template_fillnan = "FFillNan({0})"
# calculate -> ffill -> remove paused
feature_ops = template_paused.format(
template_fillnan.format(
template_norm.format(template_if.format("$close", price_field), template_fillnan.format("$close"))
)
)
return feature_ops
fields += [get_normalized_price_feature("$open", 0)]
fields += [get_normalized_price_feature("$high", 0)]
fields += [get_normalized_price_feature("$low", 0)]
fields += [get_normalized_price_feature("$close", 0)]
fields += [get_normalized_price_feature("$vwap", 0)]
names += ["$open", "$high", "$low", "$close", "$vwap"]
fields += [get_normalized_price_feature("$open", 240)]
fields += [get_normalized_price_feature("$high", 240)]
fields += [get_normalized_price_feature("$low", 240)]
fields += [get_normalized_price_feature("$close", 240)]
fields += [get_normalized_price_feature("$vwap", 240)]
names += ["$open_1", "$high_1", "$low_1", "$close_1", "$vwap_1"]
# calculate and fill nan with 0
template_gzero = "If(Ge({0}, 0), {0}, 0)"
fields += [
template_gzero.format(
template_paused.format(
"If(IsNull({0}), 0, {0})".format("{0}/Ref(DayLast(Mean({0}, 7200)), 240)".format("$volume"))
)
)
]
names += ["$volume"]
fields += [
template_gzero.format(
template_paused.format(
"If(IsNull({0}), 0, {0})".format(
"Ref({0}, 240)/Ref(DayLast(Mean({0}, 7200)), 240)".format("$volume")
)
)
)
]
names += ["$volume_1"]
return fields, names
class HighFreqBacktestHandler(DataHandler):
def __init__(
self,
instruments="csi300",
start_time=None,
end_time=None,
):
data_loader = {
"class": "QlibDataLoader",
"kwargs": {
"config": self.get_feature_config(),
"swap_level": False,
"freq": "1min",
},
}
super().__init__(
instruments=instruments,
start_time=start_time,
end_time=end_time,
data_loader=data_loader,
)
def get_feature_config(self):
fields = []
names = []
template_if = "If(IsNull({1}), {0}, {1})"
template_paused = "Select(Gt($hx_paused_num, 1.001), {0})"
# template_paused = "{0}"
template_fillnan = "FFillNan({0})"
fields += [
template_fillnan.format(template_paused.format("$close")),
]
names += ["$close0"]
fields += [
template_paused.format(
template_if.format(
template_fillnan.format("$close"),
"$vwap",
)
)
]
names += ["$vwap0"]
fields += [template_paused.format("If(IsNull({0}), 0, {0})".format("$volume"))]
names += ["$volume0"]
fields += [template_paused.format("If(IsNull({0}), 0, {0})".format("$factor"))]
names += ["$factor0"]
return fields, names
| StarcoderdataPython |
8026576 | '''
Implements extension to d1_common.resource_map to assist with
populating an index of ORE relationships.
'''
import logging
from d1_common import resource_map
class OreParser(resource_map.ResourceMap):
def getRelations(self):
'''
Retrieve the dataset relationships from package.
Returns: {
metadata_pids: [],
resource_map_pids: [],
data_pids: []
}
'''
res = {"metadata_pids":[],
"resource_map_pids": [],
"data_pids": []
}
res["resource_map_pids"].append(self.getResourceMapPid())
res["metadata_pids"] = self.getAggregatedScienceMetadataPids()
res["data_pids"] = self.getAggregatedScienceDataPids()
return res
| StarcoderdataPython |
159906 | <reponame>Samayel/sdr-gnuradio-projects<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SPDX-License-Identifier: GPL-3.0
#
# GNU Radio Python Flow Graph
# Title: FMradio (PlutoSDR)
# GNU Radio version: v3.8.2.0-60-g25b63e7e
from distutils.version import StrictVersion
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print("Warning: failed to XInitThreads()")
from PyQt5 import Qt
from gnuradio import qtgui
from gnuradio.filter import firdes
import sip
from gnuradio import analog
from gnuradio import audio
from gnuradio import blocks
from gnuradio import filter
from gnuradio import gr
import sys
import signal
from argparse import ArgumentParser
from gnuradio.eng_arg import eng_float, intx
from gnuradio import eng_notation
from gnuradio.qtgui import Range, RangeWidget
import iio
from gnuradio import qtgui
class FMradio_PlutoSDR(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "FMradio (PlutoSDR)")
Qt.QWidget.__init__(self)
self.setWindowTitle("FMradio (PlutoSDR)")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "FMradio_PlutoSDR")
try:
if StrictVersion(Qt.qVersion()) < StrictVersion("5.0.0"):
self.restoreGeometry(self.settings.value("geometry").toByteArray())
else:
self.restoreGeometry(self.settings.value("geometry"))
except:
pass
##################################################
# Variables
##################################################
self.volume = volume = 1
self.transition = transition = 1e6
self.samp_rate = samp_rate = 1000000
self.quadrature = quadrature = 500e3
self.freq = freq = 101800000
self.cutoff = cutoff = 100e3
self.audio_dec = audio_dec = 10
##################################################
# Blocks
##################################################
self._volume_range = Range(0, 5, 0.2, 1, 200)
self._volume_win = RangeWidget(self._volume_range, self.set_volume, 'volume', "counter_slider", float)
self.top_grid_layout.addWidget(self._volume_win)
self._freq_range = Range(96000000, 104000000, 200000, 101800000, 200)
self._freq_win = RangeWidget(self._freq_range, self.set_freq, 'Frequency', "counter_slider", int)
self.top_grid_layout.addWidget(self._freq_win)
self.rational_resampler_xxx_1 = filter.rational_resampler_ccc(
interpolation=1,
decimation=int(samp_rate/quadrature),
taps=None,
fractional_bw=None)
self.rational_resampler_xxx_0 = filter.rational_resampler_fff(
interpolation=48,
decimation=50,
taps=None,
fractional_bw=None)
self.qtgui_sink_x_0 = qtgui.sink_c(
1024, #fftsize
firdes.WIN_BLACKMAN_hARRIS, #wintype
freq, #fc
samp_rate, #bw
"", #name
True, #plotfreq
True, #plotwaterfall
True, #plottime
True #plotconst
)
self.qtgui_sink_x_0.set_update_time(1.0/10)
self._qtgui_sink_x_0_win = sip.wrapinstance(self.qtgui_sink_x_0.pyqwidget(), Qt.QWidget)
self.qtgui_sink_x_0.enable_rf_freq(True)
self.top_grid_layout.addWidget(self._qtgui_sink_x_0_win)
self.low_pass_filter_0 = filter.fir_filter_ccf(
1,
firdes.low_pass(
1,
samp_rate,
cutoff,
transition,
firdes.WIN_HAMMING,
6.76))
self.iio_pluto_source_0 = iio.pluto_source('ip:pluto.local', 101800000, samp_rate, 2000000, 32768, True, True, True, 'manual', 60, '', True)
self.blocks_multiply_const_vxx_0 = blocks.multiply_const_ff(volume)
self.audio_sink_0 = audio.sink(48000, '', True)
self.analog_wfm_rcv_0 = analog.wfm_rcv(
quad_rate=quadrature,
audio_decimation=audio_dec,
)
##################################################
# Connections
##################################################
self.connect((self.analog_wfm_rcv_0, 0), (self.rational_resampler_xxx_0, 0))
self.connect((self.blocks_multiply_const_vxx_0, 0), (self.audio_sink_0, 0))
self.connect((self.iio_pluto_source_0, 0), (self.rational_resampler_xxx_1, 0))
self.connect((self.low_pass_filter_0, 0), (self.analog_wfm_rcv_0, 0))
self.connect((self.low_pass_filter_0, 0), (self.qtgui_sink_x_0, 0))
self.connect((self.rational_resampler_xxx_0, 0), (self.blocks_multiply_const_vxx_0, 0))
self.connect((self.rational_resampler_xxx_1, 0), (self.low_pass_filter_0, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "FMradio_PlutoSDR")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_volume(self):
return self.volume
def set_volume(self, volume):
self.volume = volume
self.blocks_multiply_const_vxx_0.set_k(self.volume)
def get_transition(self):
return self.transition
def set_transition(self, transition):
self.transition = transition
self.low_pass_filter_0.set_taps(firdes.low_pass(1, self.samp_rate, self.cutoff, self.transition, firdes.WIN_HAMMING, 6.76))
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.iio_pluto_source_0.set_params(101800000, self.samp_rate, 2000000, True, True, True, 'manual', 60, '', True)
self.low_pass_filter_0.set_taps(firdes.low_pass(1, self.samp_rate, self.cutoff, self.transition, firdes.WIN_HAMMING, 6.76))
self.qtgui_sink_x_0.set_frequency_range(self.freq, self.samp_rate)
def get_quadrature(self):
return self.quadrature
def set_quadrature(self, quadrature):
self.quadrature = quadrature
def get_freq(self):
return self.freq
def set_freq(self, freq):
self.freq = freq
self.qtgui_sink_x_0.set_frequency_range(self.freq, self.samp_rate)
def get_cutoff(self):
return self.cutoff
def set_cutoff(self, cutoff):
self.cutoff = cutoff
self.low_pass_filter_0.set_taps(firdes.low_pass(1, self.samp_rate, self.cutoff, self.transition, firdes.WIN_HAMMING, 6.76))
def get_audio_dec(self):
return self.audio_dec
def set_audio_dec(self, audio_dec):
self.audio_dec = audio_dec
def main(top_block_cls=FMradio_PlutoSDR, options=None):
if StrictVersion("4.5.0") <= StrictVersion(Qt.qVersion()) < StrictVersion("5.0.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls()
tb.start()
tb.show()
def sig_handler(sig=None, frame=None):
Qt.QApplication.quit()
signal.signal(signal.SIGINT, sig_handler)
signal.signal(signal.SIGTERM, sig_handler)
timer = Qt.QTimer()
timer.start(500)
timer.timeout.connect(lambda: None)
def quitting():
tb.stop()
tb.wait()
qapp.aboutToQuit.connect(quitting)
qapp.exec_()
if __name__ == '__main__':
main()
| StarcoderdataPython |
6494930 | from .util import normalize, rotx, roty, rotz, get_rgba, translate, scale
from .shapes import GlQuad, GlTri, GlVertices, GlCube, GlSphericalRect, GlSphericalCirc, GlCylinder, GlSphericalPoints, GlSphericalTexturedRect
| StarcoderdataPython |
3475647 | # -*- coding: utf8 -*-
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from pylero.base_polarion import BasePolarion
class FieldDiff(BasePolarion):
"""Object to handle the Polarion WSDL tns3:FieldDiff class
Attributes:
added (ArrayOf_xsd_anyType)
after (anyType)
before (anyType)
collection (boolean)
field_name (string)
removed (ArrayOf_xsd_anyType)
"""
_cls_suds_map = {"added": "added",
"after": "after",
"before": "before",
"collection": "collection",
"field_name": "fieldName",
"removed": "removed",
"uri": "_uri",
"_unresolved": "_unresolved"}
_obj_client = "tracker_client"
_obj_struct = "tns3:FieldDiff"
| StarcoderdataPython |
5004391 | # Generated by Django 3.1.4 on 2020-12-19 12:39
import blog.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20201211_2246'),
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('file', models.FileField(upload_to=blog.models.generate_random_name)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
]
| StarcoderdataPython |
9662573 | # -*- coding: utf-8 -*-
"""A client for interacting with APICURON."""
from .api import (
Achievement,
DESCRIPTION_URL,
Description,
RESUBMISSION_URL,
Report,
Submission,
Term,
resubmit_curations,
submit_description,
)
__all__ = [
# URLs
"DESCRIPTION_URL",
"RESUBMISSION_URL",
# Data Models
"Term",
"Achievement",
"Description",
"Report",
"Submission",
# Utilities
"submit_description",
"resubmit_curations",
]
| StarcoderdataPython |
11255956 | <gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2017-12-10 16:17
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('account', '0003_signup_is_ready'),
]
operations = [
migrations.CreateModel(
name='Mail',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('email', models.EmailField(blank=True, max_length=200, null=True)),
('subject', models.CharField(max_length=300)),
('body', models.TextField(help_text="Use '|' for seperating paras", max_length=5000)),
('button', models.BooleanField(default=False)),
('button_name', models.CharField(blank=True, max_length=200, null=True)),
('button_path', models.CharField(blank=True, max_length=200, null=True)),
('user_account', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='account.UserAccount')),
],
options={
'abstract': False,
},
),
]
| StarcoderdataPython |
6477745 | import abc
import warnings
from enum import Enum
from math import ceil
from .imaging import ImagingInformation, Modes
class LabViewVersions(Enum):
pre2018 = "pre-2018 (original)"
v231 = "2.3.1"
class LabViewHeader(metaclass=abc.ABCMeta):
"""A class to represent all information stored in a LabView header file.
This abstract class only offers some basic functionality and structure.
Different subclasses should be defined to provide special behaviour for
particular versions of the LabView setup used in the lab.
"""
property_names = {}
@classmethod
def from_file(cls, filename):
"""Create an object to hold the information in a LabView header file."""
# Parse the file
with open(filename, 'r') as ini:
fields = []
section = ''
parsed_fields = {}
for line in ini:
line = line.strip()
if len(line) > 0:
if line.startswith('['):
section = line[1:-1]
parsed_fields[section] = {}
line_number = 0 # used to count tab-separated lines
elif '=' in line:
words = line.split('=')
key, value = words[0].strip(), words[1].strip()
fields.append([section, key, value])
try:
value = float(value)
except ValueError:
pass
if isinstance(value, str) and value[0] == value[-1] == '"':
value = value[1:-1]
parsed_fields[section][key] = value
elif '\t' in line:
# Delay parsing these lines until later
line_number += 1
key = 'line_{}'.format(line_number)
fields.append([section, key, line])
else:
warnings.warn("Unrecognised non-blank line in {}: {}".format(filename, line))
# Decide which version to instantiate
try:
version = parsed_fields['LOGIN']['Software Version']
except KeyError:
# older versions do not store the LabView version
return LabViewHeaderPre2018(fields, parsed_fields)
else:
if version == '2.3.1':
return LabViewHeader231(fields, parsed_fields)
else:
raise ValueError('Unsupported LabView version {}.'.format(version))
def __init__(self, fields, processed_fields):
"""Create a header object from the given raw and processed fields.
Subclasses can add specialised behaviour by overriding this.
"""
# Keep track of the raw fields in case we need to return them later
# (such as to add them to an NWB file).
self._raw_fields = fields
# Accessing the header's information should normally be through the
# processed fields, stored in self.sections.
self._sections = processed_fields
self._imaging_mode = self._determine_imaging_mode()
def __getitem__(self, item):
"""Retrieve an entry from the header's fields."""
return self._sections[item]
@property
@abc.abstractmethod
def version(self):
"""The version of LabView as found in the header file.
Can be used to "branch" behaviour based on the version, rather than
inspecting the class.
"""
pass
@property
def imaging_mode(self):
return self._imaging_mode
@abc.abstractmethod
def _determine_imaging_mode(self):
pass
@abc.abstractmethod
def _imaging_section(self):
"""Get the section of the header that holds the imaging parameters."""
pass
def get_imaging_information(self):
# Imaging parameters can be stored in different sections of the header
# depending on the LabView version.
section = self._imaging_section()
# Parameter names also vary between versions, so use the appropriate ones.
property_names = self.property_names
# Also cast the integer parameters since they are read as floats.
cycles_per_trial = int(section[property_names["number_of_cycles"]])
gains = {"Red": section[property_names["gain_red"]],
"Green": section[property_names["gain_green"]]}
frame_size = int(section[property_names["frame_size"]])
field_of_view = section[property_names["field_of_view"]]
number_of_miniscans = int(section[property_names["number_of_miniscans"]])
dwell_time = section[property_names["dwell_time"]]
return ImagingInformation(cycles_per_trial, gains, frame_size, field_of_view,
number_of_miniscans, dwell_time)
def determine_trial_times(self):
"""Try to extract the start and stop time of each trial.
Raise an error if this is impossible because the information is not
included in the header.
"""
raise NotImplementedError
def get_raw_fields(self):
"""Get the fields of the header as directly read from the file.
Returns a list of (section, key, value) triples containing strings,
without any processing applied. Useful for storing the original header
in NWB files for clearer provenance.
"""
return self._raw_fields
class LabViewHeaderPre2018(LabViewHeader):
property_names = {
"frame_size": "frame size",
"field_of_view": "field of view",
"dwell_time": "dwelltime (us)",
"number_of_cycles": "number of cycles",
"number_of_miniscans": "number of miniscans",
"gain_red": "pmt 1",
"gain_green": "pmt 2",
}
@property
def version(self):
return LabViewVersions.pre2018
def _determine_imaging_mode(self):
if self['GLOBAL PARAMETERS']['number of poi'] > 0:
return Modes.pointing
elif self['GLOBAL PARAMETERS']['number of miniscans'] > 0:
return Modes.miniscan
else:
raise ValueError('Unsupported imaging type: numbers of poi and miniscans are zero.')
def _imaging_section(self):
# In the older version, parameters were stored in the global section.
return self["GLOBAL PARAMETERS"]
class LabViewHeader231(LabViewHeader):
property_names = {
"frame_size": "Frame Size",
"field_of_view": "field of view",
"dwell_time": "pixel dwell time (us)",
"number_of_cycles": "Number of cycles",
"number_of_miniscans": "Number of miniscans",
"gain_red": "pmt 1",
"gain_green": "pmt 2",
}
# In this version of LabView, the trial times are stored in their own
# (misleadingly titled) section of the header.
trial_times_section = 'Intertrial FIFO Times'
def __init__(self, fields, processed_fields):
super().__init__(fields, processed_fields)
self._parse_trial_times()
@property
def version(self):
return LabViewVersions.v231
def _determine_imaging_mode(self):
volume_imaging = self['IMAGING MODES']['Volume Imaging']
functional_imaging = self['IMAGING MODES']['Functional Imaging']
if volume_imaging == 'TRUE' and functional_imaging == 'TRUE':
raise ValueError('Unsupported imaging type: only one of "Volume '
'Imaging" and "Functional Imaging" can be true.')
if volume_imaging == 'TRUE':
return Modes.volume
elif functional_imaging == 'TRUE':
mode_name = self['FUNCTIONAL IMAGING']['Functional Mode']
if mode_name == "Point":
return Modes.pointing
elif mode_name == "Patch":
return Modes.miniscan
else:
raise ValueError('Unrecognised imaging mode: {}. Valid options'
' are: "Point", "Patch'.format(mode_name))
else:
raise ValueError('Unsupported imaging type: either "Volume Imaging"'
' or "Functional Imaging" must be true.')
def _imaging_section(self):
# In LabView version 2.3.1, imaging parameters are stored under the
# relevant imaging mode section.
imaging_section_name = ("VOLUME IMAGING"
if self.imaging_mode is Modes.volume
else "FUNCTIONAL IMAGING")
return self[imaging_section_name]
def _parse_trial_times(self):
# The relevant entries in the raw fields are triples whose first
# element is the section header; select them based on that.
time_fields = [field
for field in self._raw_fields
if field[0] == self.trial_times_section]
assert len(time_fields) > 0, 'Trial times not found in header!'
for line in time_fields:
# The actual text of the line is the third element in the triple
words = line[2].split('\t')
assert len(words) == 2, 'Too many columns found for trial time'
# Lines start with a line number (with decimal points) followed by a time
key, value = int(float(words[0])), float(words[1])
self._sections[self.trial_times_section][key] = value
def determine_trial_times(self):
trial_times = []
parsed_times = self[self.trial_times_section]
number_of_trials = ceil(len(parsed_times) / 2)
# occasionally, the end time of the last trial will be missing,
# in which case it will be assigned None and determined later from the speed ata
last_time_present = (len(parsed_times) == 2 * number_of_trials)
for i in range(number_of_trials):
start = parsed_times[2 * i]
if i < (number_of_trials-1) or last_time_present:
end = parsed_times[2 * i + 1]
else:
end = None # determine final 'end' later from speed data
trial_times.append((start, end))
return trial_times
| StarcoderdataPython |
11237355 | <gh_stars>0
#!/usr/bin/env python
"""
Indexer module
Manages the multiprocess approach to indexing the database. This module spawns a
fixed number of worker process where each worker feeds repository urls fetched
from the queue. These are passed to the indexing object.
The worker is defined by worker.py which is the root execution of the process.
"""
import pika
import sys
import worker
from shutil import rmtree
from time import sleep
from algthm.utils.file import dir_empty
from cfg.loader import cfg
from multiprocessing import Process
from logger import logger
from dex.core.db import MongoConnection
from dex.core.exceptions.indexer import IndexerBootFailure
from logging import CRITICAL, getLogger
from datetime import datetime
from elasticsearch import Elasticsearch, ElasticsearchException
logger.setup_logging()
logger = logger.get_logger('dex')
pika_logger = getLogger('pika')
pika_logger.setLevel(CRITICAL)
def initialize_workers(num_workers, target, daemon=True):
"""
Initializes the worker processes.
"""
workers = []
process = None
print '> initializing {} workers ..'.format(num_workers),
for i in range(num_workers):
try:
process = Process(target=target, args=(i + 1,))
process.daemon = daemon
process.start()
workers.append(process)
sys.stdout.write('\r')
sys.stdout.write('> %s workers initialized' % (i + 1))
sys.stdout.flush()
sleep(cfg.settings.general.worker_cooling)
except RuntimeError:
pass
print ' .. ok'
return workers
def test_db_connection(db_conn):
"""
Tests that the db connection is alive and well.
"""
# TODO: implement mongo connection test
return True
def test_mq_connection(mq_conn):
"""
Tests that the mq connection is alive and well.
"""
# TODO: implement mq conn test
return True
def test_es_connection(es_conn):
return es_conn.ping()
def cool_off(duration=3, char='*'):
"""
Throws up a progress bar for the given duration.
"""
interval = duration / 100.0
for i in range(101):
sys.stdout.write('\r')
sys.stdout.write('\033[1;34m%-82s %d\033[0m' %
(char * (int(i * 0.82)), i))
sys.stdout.flush()
sleep(interval)
print
def finish_session(db_conn, session_id):
db_conn.sessions.update(
{'_id': session_id},
{
'$set': {
'finish_time': datetime.today()
}
},
multi=True,
upsert=True
)
def prepare_workspace(workspace):
ok = True
try:
rmtree(workspace)
ok = True
except OSError:
pass # already prepared
return ok
def welcome(working_directory):
welcome = """
.' .;. _
.-..' .-. `.,' ' DEX indexing module 0.0.4. Copyright 2014 Algthm.
: ; .;.-' ,'`. Working Directory: [working_directory]
`:::'`.`:::'-' `._. Log: [log_location]
"""
print '\033[1;34m{}\033[0m'\
.format(welcome.replace('[log_location]', '/Users/jon/tmp/dex.log')
.replace('[working_directory]', working_directory))
def main():
working_directory = cfg.settings.general.directory
welcome(working_directory)
while 1:
try:
print '> preparing workspace ..',
if prepare_workspace(cfg.settings.general.directory):
print 'ok'
print '> connecting to Mongo ..',
db_conn = MongoConnection().get_db()
if db_conn:
print 'done'
else:
raise IndexerBootFailure('Could not connect to DB.')
print '> connecting to MQ @ {} ..'\
.format(cfg.settings.mq.connection.host),
try:
mq_conn = pika.BlockingConnection(pika.ConnectionParameters(
host=cfg.settings.mq.connection.host
))
if mq_conn:
print 'done'
except pika.exceptions.AMQPConnectionError:
raise IndexerBootFailure('Could not connect to MQ.')
print '> connecting to ElasticSearch @ localhost ..',
try:
es_conn = Elasticsearch()
if es_conn:
print 'done'
except ElasticsearchException as e:
raise IndexerBootFailure('Could not connect to ES {}'.format(e))
print 'letting connections establish before testing.'
cool_off(cfg.settings.general.cooling)
print '> testing DB connection and schema ..',
if test_db_connection(db_conn):
print 'ok'
else:
raise IndexerBootFailure('Algthm schema not defined in DB.')
print '> testing MQ connection ..',
if test_mq_connection(mq_conn):
print 'ok'
else:
raise IndexerBootFailure('MQ connection failed.')
workers = initialize_workers(cfg.settings.general.workers,
worker.target)
print 'letting workers establish.'
cool_off(cfg.settings.general.cooling)
#-------------------------------------------------------------------
# All Checks Complete - Run
#-------------------------------------------------------------------
print '> running ...'
while True:
print '.',
sleep(5)
# Presence of contents in the working directory denotes there are a
# number of workers still processes jobs. Wait for directory to be
# empty before continuing.
print '> finalising ..',
while not dir_empty(working_directory):
print '.',
sleep(5)
cool_off(1)
for p in enumerate(workers):
try:
p[-1].terminate()
except RuntimeError:
pass
break
except IndexerBootFailure as e:
print e
print "exiting .."
break
if __name__ == "__main__":
main()
| StarcoderdataPython |
9746694 | import os
from glob import glob
from textwrap import dedent
import numpy as np
import pytest
import pytest_mpl
import astropy
from astropy.coordinates import SkyCoord
from astropy import units as u
import pygedm
import fruitbat
from fruitbat import Frb, utils, cosmologies, methods, table, plot, catalogue
def test_get_bibtex_is_correct():
ads_bibtex = dedent(
r"""
@ARTICLE{2019JOSS....4.1399B,
author = {{<NAME>},
title = "{Fruitbat: A Python Package for Estimating Redshifts of Fast Radio Bursts}",
journal = {The Journal of Open Source Software},
keywords = {Astrophysics - Instrumentation and Methods for Astrophysics, Astrophysics - High Energy Astrophysical Phenomena},
year = "2019",
month = "May",
volume = {4},
number = {37},
pages = {1399},
doi = {10.21105/joss.01399},
archivePrefix = {arXiv},
eprint = {1905.04294},
primaryClass = {astro-ph.IM},
adsurl = {https://ui.adsabs.harvard.edu/abs/2019JOSS....4.1399B},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
"""
)
assert fruitbat.get_bibtex() == ads_bibtex.strip()
def test_cite_is_get_bibtex():
assert fruitbat.__cite__() == fruitbat.get_bibtex()
class TestFrbClass:
# Create FRB objects for testing
frb = Frb(dm=1000, dm_excess=1000, name='simple_frb')
frb_raj_decj = Frb(dm=1000, raj="11:05:50.0", decj="-8:34:12.0")
frb_gl_gb = Frb(dm=1000, gl="30.5", gb="-60.2")
frb_w_s = Frb(dm=1000, width=30.0, peak_flux=20.0)
frb_host_known = Frb(dm=1000, dm_excess=900, z_host=1.0, dm_host_loc=200)
frb_dm_host_0 = Frb(dm=1000, dm_excess=900, z_host=1.0)
frb_dm_host_est = Frb(dm=1100, dm_host_est=100)
frb_energy = Frb(dm=1000, obs_bandwidth=400, width=1, peak_flux=2)
frb_energy_freq = Frb(dm=1000, obs_freq_central=0.4, width=1, peak_flux=2)
frb_utc = Frb(dm=1000, utc="1999-01-01T00:00:00.000")
frb_with_units = Frb(dm=1000, obs_bandwidth=400*u.MHz)
frb_fluence = Frb(dm=1000, fluence=2)
def test_frb_dm_assignment_int(self):
test_frb = Frb(1000)
assert test_frb.dm == 1000.0 * u.pc * u.cm**-3
def test_frb_dm_assignment_float(self):
test_frb = Frb(1000.0)
assert test_frb.dm == 1000.0 * u.pc * u.cm**-3
def test_frb_dm_assignment_string(self):
with pytest.raises(ValueError):
test_frb = Frb("1000")
def test_frb_gl_gb_assignment_float(self):
test_frb = Frb(1000, gl=10, gb=20)
assert (test_frb.gl.value, test_frb.gb.value) == (10, 20)
def test_frb_gl_gb_assignment_string(self):
test_frb = Frb(1000, gl="10", gb="20")
assert (test_frb.gl.value, test_frb.gb.value) == (10, 20)
# Test that methods returns the correct value for DM=1000 and planck2018
def test_methods(self):
methods = {
"Ioka2003": 0.80856155,
"Inoue2004": 0.98344417,
"Zhang2018": 1.10879646
}
for method in methods.keys():
z = self.frb.calc_redshift(method=method, cosmology="Planck18")
assert np.isclose(z.value, methods[method]), "Fail: {}".format(method)
# Test that a ValueError is raised when an invalid method is given.
def test_invalid_method(self):
invalid_method = "jacqui1992"
with pytest.raises(ValueError):
self.frb.calc_redshift(method=invalid_method, cosmology="Planck18")
# Test that a ValueError is raised when an invalid cosmology is given.
def test_invalid_cosmology(self):
invalid_cosmology = "cosmos_1964"
with pytest.raises(ValueError):
self.frb.calc_redshift(method="Ioka2003", cosmology=invalid_cosmology)
# Test raises error on dispersion measure less than zero
def test_frb_negative_dm(self):
with pytest.raises(ValueError):
Frb(dm=-1000)
# Test that the skycoords are calculated correctly when given raj and decj
def test_frb_calc_skycoords_raj_decj(self):
ra_str = "11:05:50.0"
dec_str = "-8:34:12.0"
skycoords = self.frb_raj_decj.calc_skycoords()
test_skycoords = SkyCoord(ra_str, dec_str, frame="icrs",
unit=(u.hourangle, u.deg))
ra, dec = skycoords.ra.value, skycoords.dec.value
test_ra, test_dec = test_skycoords.ra.value, test_skycoords.dec.value
assert np.isclose((ra, dec), (test_ra, test_dec)).all()
# Test that the skycoords are calculated correctly when given gl and gb
def test_frb_calc_skycoords_gl_gb(self):
gl_str = "30.5"
gb_str = "-60.2"
skycoords = self.frb_gl_gb.calc_skycoords()
test_skycoords = SkyCoord(gl_str, gb_str, frame="galactic", unit=u.deg)
gl, gb = skycoords.galactic.l.value, skycoords.galactic.b.value
test_gl, test_gb = test_skycoords.l.value, test_skycoords.b.value
assert np.isclose((gl, gb), (test_gl, test_gb)).all()
# Test that calc_skycoords raises an error if no coords are given
def test_frb_calc_skycoords_no_coords(self):
with pytest.raises(ValueError):
self.frb.calc_skycoords()
# Test fluence is calculated correctly when given width and peak_flux.
def test_frb_calc_fluence(self):
fluence = self.frb_w_s.calc_fluence()
assert np.isclose(fluence.value, 600.0)
# Test calc_fluence raises a ValueError if width and peak_flux are None.
def test_frb_calc_fluence_raise_error(self):
with pytest.raises(ValueError):
self.frb.calc_fluence()
# Test calc_dm_igm calculates the dm_igm correctly for a known host.
def test_frb_calc_dm_igm(self):
dm_igm = self.frb_host_known.calc_dm_igm()
assert np.isclose(dm_igm.value, 800.0)
# Test calc_dm_igm raises ValueError when z is None.
def test_frb_calc_dm_igm_z_none(self):
with pytest.raises(ValueError):
self.frb_w_s.calc_dm_igm()
# Test calc_dm_igm raises ValueError when dm_host is 0.0 and z is not None.
def test_frb_calc_dm_igm_dm_host_zero(self):
with pytest.raises(ValueError):
self.frb_dm_host_0.calc_dm_igm()
# Test calc_redshift with subract_host
def test_frb_calc_redshift_subtract_host(self):
dm_1 = self.frb_dm_host_est.calc_redshift(method="Ioka2003", subtract_host=True)
dm_2 = self.frb.calc_redshift(method="Ioka2003")
assert np.isclose(dm_1, dm_2)
# Test that calc_redshift will raise error if subtract_host is not a bool
def test_frb_subtract_host_not_bool(self):
with pytest.raises(ValueError):
self.frb_dm_host_est.calc_redshift(subtract_host="yes")
# Test calc_dm_galaxy calculates dm_galaxy correctly for given coordinates.
def test_frb_calc_dm_galaxy_ymw16(self):
dm_galaxy = self.frb_raj_decj.calc_dm_galaxy("YMW16")
dm_pymw16, t_sc_pymw16 = pygedm.dist_to_dm(
self.frb_raj_decj.skycoords.galactic.l,
self.frb_raj_decj.skycoords.galactic.b, 25000, method="YMW16")
assert np.isclose(dm_galaxy.value, dm_pymw16.value)
# Test calc_dm_galaxy calculates dm_galaxy correctly for given coordinates.
# def test_frb_calc_dm_galaxy_ne2001(self):
# dm_galaxy = self.frb_raj_decj.calc_dm_galaxy("NE2001")
# dm_ne2001, t_sc_ne2001 = pygedm.dist_to_dm(
# self.frb_raj_decj.skycoords.galactic.l,
# self.frb_raj_decj.skycoords.galactic.b, 25000, method="NE2001")
# assert np.isclose(dm_galaxy.value, dm_ne2001.value)
# Test calc_dm_galaxy raises a ValueError when no coordinates are given
def test_frb_cal_dm_galaxy_no_coords(self):
with pytest.raises(ValueError):
self.frb.calc_dm_galaxy(model="ymw16")
def test_frb_calc_lum_dist_without_z(self):
with pytest.raises(ValueError):
self.frb.z = None
self.frb.calc_luminosity_distance()
# Test calc_energy calculates the energy of an FRB
def test_frb_calc_energy_bandwidth(self):
self.frb_energy.calc_redshift(method="Inoue2004")
energy = self.frb_energy.calc_energy(use_bandwidth=True)
assert np.isclose(energy.value, 2.13256754066293e+40)
def test_frb_calc_energy_frequency(self):
self.frb_energy_freq.calc_redshift(method="Inoue2004")
energy = self.frb_energy_freq.calc_energy()
assert np.isclose(energy.value, 2.13256754066293e+37)
def test_frb_calc_energy_no_fluence(self):
with pytest.raises(ValueError):
self.frb.calc_redshift(method="Inoue2004")
self.frb.calc_energy(use_bandwidth=True)
def test_frb_calc_energy_no_bandwidth(self):
with pytest.raises(ValueError):
self.frb_fluence.calc_redshift(method="Inoue2004")
self.frb_fluence.calc_energy(use_bandwidth=True)
def test_frb_calc_energy_no_frequency(self):
with pytest.raises(ValueError):
self.frb_energy.calc_redshift(method="Inoue2004")
self.frb_energy.calc_energy()
def test_frb_calc_luminosity_bandwidth(self):
self.frb_energy.calc_redshift(method="Inoue2004")
lum = self.frb_energy.calc_luminosity(use_bandwidth=True)
assert np.isclose(lum.value, 4.229828665e+43)
def test_frb_calc_luminosity_frequency(self):
self.frb_energy_freq.calc_redshift(method="Inoue2004")
lum = self.frb_energy_freq.calc_luminosity()
assert np.isclose(lum.value, 4.2298286655e+40)
def test_frb_calc_luminosity_no_frequency(self):
with pytest.raises(ValueError):
self.frb_energy.calc_redshift(method="Inoue2004")
self.frb_energy.calc_luminosity()
def test_frb_calc_comoving_distance(self):
self.frb.calc_redshift(method="Inoue2004")
dist = self.frb.calc_comoving_distance()
assert np.isclose(dist.value, 3351.51321266)
def test_frb_pass_wrong_units(self):
with pytest.raises(ValueError):
Frb(dm=1000, obs_bandwidth=400*u.m)
# Test that the FRB __repr__ is printed
def test_frb__repr__(self):
print(self.frb)
# Test all methods and properties get values and print
def test_frb_attrs(self):
for d in dir(self.frb):
attr = getattr(self.frb, d)
print(attr)
def test_create_FlatLambdaCDM_cosmology():
# Test FlatLambdaCDM
FlatLambdaCDM_params = {'H0': 67, 'Om0': 0.3, 'flat': True}
flcdm = cosmologies.create_cosmology(FlatLambdaCDM_params)
assert type(flcdm) == astropy.cosmology.core.FlatLambdaCDM
def test_create_FlatwCDM_cosmology():
# Test FlatwCDM
FlatwCDM_params = {'H0': 67, 'Om0': 0.3, 'flat': True, 'w0': 0.9}
fwcdm = cosmologies.create_cosmology(FlatwCDM_params)
assert type(fwcdm) == astropy.cosmology.core.FlatwCDM
def test_create_LambdaCDM_cosmology():
# Test LambdaCDM
LambdaCDM_params = {'H0': 67, 'Om0': 0.3, 'Ode0': 0.8, 'flat': False}
lcdm = cosmologies.create_cosmology(LambdaCDM_params)
assert type(lcdm) == astropy.cosmology.core.LambdaCDM
def test_create_wCDM_cosmology():
# Test wCDM
wCDM_params = {'H0': 67, 'Om0': 0.3, 'Ode0': 0.8, 'flat': False, 'w0': 0.9}
wcdm = cosmologies.create_cosmology(wCDM_params)
assert type(wcdm) == astropy.cosmology.core.wCDM
class Test_fz_integrand:
# Create default cosmology
cosmo = cosmologies.create_cosmology()
cosmo_w0 = cosmologies.create_cosmology({'w0': 1})
# Test _fz_integrand correctly computes for z = 0
def test_fz_integrand_z0(self):
fz = methods._f_integrand(0, self.cosmo)
assert np.isclose(fz, 1.0)
# Test _fz_integrand correctly computes for z = 2
def test_fz_integrand_z2(self):
fz = methods._f_integrand(2, self.cosmo)
assert np.isclose(fz, 1.011299)
def test_fz_integrand_w1_z1(self):
fz = methods._f_integrand(1, self.cosmo_w0)
assert np.isclose(fz, 0.291111)
# Test _check_keys_in_dict raises a KeyError when dict is missing keys
def test_check_keys_in_dict_missing():
required_keys = ["key1", "key2"]
dictionary = {"key1": 1, "otherkey": 2}
with pytest.raises(KeyError):
utils.check_keys_in_dict(dictionary, required_keys)
def test_check_keys_in_dict_all():
required_keys = ["key1", "key2"]
dictionary = {"key1": 1, "key2": 2}
result = utils.check_keys_in_dict(dictionary, required_keys)
assert result
class TestAddingMethods:
def new_method(self, z, cosmo):
return 1200 * z
def test_add_method(self):
methods.add_method("new_method", self.new_method)
assert "new_method" in methods.available_methods()
def test_reset_methods(self):
methods.reset_methods()
assert "new_method" not in methods.available_methods()
class TestCatalogue:
def test_create_analysis_catalogue(self):
catalogue.create_analysis_catalogue("pytest_output_analysis_catalogue")
assert os.path.exists("pytest_output_analysis_catalogue.csv")
def test_create_method_catalogue(self):
catalogue.create_methods_catalogue("pytest_output_methods_catalogue")
assert os.path.exists("pytest_output_methods_catalogue.csv")
class TestCreateTables:
def test_create_tables_normal(self):
method_list = methods.builtin_method_functions()
cosmology_list = cosmologies.builtin_cosmology_functions()
# Create a lookup table for each analytic method and cosmology
for method_key in method_list:
if method_key in methods.methods_hydrodynamic():
continue
for cosmo_key in cosmology_list:
if cosmo_key == "EAGLE": # Skip EAGLE since it is the same as Planck13
continue
here = os.getcwd()
cosmo = cosmologies.builtin_cosmology_functions()[cosmo_key]
filename = "_".join(["pytest_output", method_key, cosmo_key])
table.create(method=method_key, filename=filename,
cosmo=cosmo, output_dir=here, zmin=0,
zmax=20, num_samples=10000)
# Compare new tables to existing tables for 4 dm values
pre_calc_table_name = "{}.hdf5".format(method_key)
pre_calc_table = utils.get_path_to_file_from_here(pre_calc_table_name, subdirs=["data"])
new_calc_table = "pytest_output_{}_{}.hdf5".format(method_key, cosmo_key)
#pre_calc = table.load(pre_calc_fn)
#new_calc = table.load(new_calc_fn, data_dir=here)
test_dm_list = [0, 100, 1000, 2000]
for dm in test_dm_list:
new_z = table.get_z_from_table(dm, new_calc_table, cosmo_key)
pre_z = table.get_z_from_table(dm, pre_calc_table, cosmo_key)
assert np.isclose(new_z, pre_z, rtol=1e-03)
def test_create_table_zhang_figm_free_elec(self):
cosmo = cosmologies.builtin_cosmology_functions()["Planck18"]
filename = "_".join(["pytest_output", "Zhang2018",
"Planck18", "figm_free_elec"])
here = os.getcwd()
table.create(method="Zhang2018", filename=filename, cosmo=cosmo,
output_dir=here, f_igm=0.5, free_elec=0.4)
def test_create_table_zhang_figm_error(self):
cosmo = cosmologies.builtin_cosmology_functions()["Planck18"]
with pytest.raises(ValueError):
table.create(method="Zhang2018", cosmo=cosmo, f_igm=-1)
def test_create_table_zhang_free_elec_error(self):
cosmo = cosmologies.builtin_cosmology_functions()["Planck18"]
filename = "_".join(["pytest_output", "Zhang2018",
"Planck18", "free_elec_error"])
with pytest.raises(ValueError):
table.create(method="Zhang2018", filename=filename, cosmo=cosmo,
free_elec=-1)
def test_create_table_invalid_method(self):
with pytest.raises(ValueError):
table.create(method="Webb1995")
class TestPlots:
# Test that the method plot creates an output file
def test_method_plot(self):
with pytest_mpl.plugin.switch_backend('Agg'):
plot.method_comparison(filename="pytest_output_method")
cwd = os.getcwd()
if not os.path.exists(os.path.join(cwd, "pytest_output_method.png")):
raise OSError
# Test that the cosmology plot creates and output file
def test_cosmology_plot(self):
with pytest_mpl.plugin.switch_backend('Agg'):
plot.cosmology_comparison(filename="pytest_output_cosmo")
cwd = os.getcwd()
if not os.path.exists(os.path.join(cwd, "pytest_output_cosmo.png")):
raise OSError
def test_redshift_pdf_plot(self):
frb = Frb(510, gl=34,gb=15)
with pytest_mpl.plugin.switch_backend('Agg'):
plot.redshift_pdf(frb, filename="pytest_output_pdf", usetex=False)
cwd = os.getcwd()
if not os.path.exists(os.path.join(cwd, "pytest_output_cosmo.png")):
raise OSError
def test_cleanup():
# Remove the files at end of test
test_files = glob("*pytest_output*")
test_files += glob("./data/*pytest_output*")
for file in test_files:
os.remove(file)
| StarcoderdataPython |
280043 | __version__ = "0.0.1"
import uuid
import time
import traceback
from importlib.metadata import version # PYTHON >= 3.8
from loguru import logger
from fastapi import FastAPI
from starlette.requests import Request
from starlette.responses import JSONResponse
from starlette.exceptions import HTTPException
from starlette.middleware.cors import CORSMiddleware
from fastapi.exceptions import RequestValidationError
from quanto_trabalhou_presidente.config import DESCRIPTION
from quanto_trabalhou_presidente.routes import appointment
from quanto_trabalhou_presidente.exceptions import QuantoTrabalhouPresidenteException
app = FastAPI(
title="Quanto Trabalhou o Presidente?",
description=DESCRIPTION,
version=__version__,
docs_url="/swagger",
redoc_url="/docs"
)
logger.level("INCOME REQUEST", no=1, color="<yellow>")
logger.level("PROCESSED REQUEST", no=2, color="<yellow>")
app.include_router(appointment.router, prefix='/appointment', tags=['appointment'])
@app.middleware("http")
async def add_process_time_header(request: Request, call_next):
id = uuid.uuid1()
logger.log("INCOME REQUEST", f"[{request.method}] ID: {id} - IP: {request.client.host}"
+ f" - ENDPOINT: {request.url.path}")
start_time = time.time()
response = await call_next(request)
process_time = time.time() - start_time
logger.log("PROCESSED REQUEST", f"[{request.method}] ID: {id} - IP: {request.client.host}"
+ f" - ENDPOINT: {request.url.path} - EXCECUTION TIME: {process_time}")
response.headers["X-Process-Time"] = str(process_time)
return response
@app.exception_handler(QuantoTrabalhouPresidenteException)
async def camara_exception_handler(request: Request, exception: QuantoTrabalhouPresidenteException):
return JSONResponse(
status_code=exception.status_code,
content={
"status": exception.status_code,
"message": exception.message,
"stacktrace": traceback.format_exc()
}
)
@app.exception_handler(RequestValidationError)
async def validation_exception_handler(request: Request, exception: RequestValidationError):
return JSONResponse(
status_code=422,
content={
"status": 422,
"message": "Invalid Request Field",
"stacktrace": traceback.format_exc()
}
)
@app.exception_handler(HTTPException)
async def http_exception_handler(request: Request, exception: HTTPException):
message = {401: "Not Authorize", 404: "Not Found", 405: "Method Not Allowd"}
return JSONResponse(
status_code=exception.status_code,
content={
"status": exception.status_code,
"message": message[exception.status_code],
"stacktrace": traceback.format_exc()
}
)
app.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_methods=['*'],
allow_credentials=True,
allow_headers=['*']
)
| StarcoderdataPython |
1951683 | from . import auth
from flask import render_template, redirect, url_for, flash, request
from ..models import User
from .forms import RegistrationForm, LoginForm
from .. import db
from flask_login import login_user, current_user, logout_user, login_required
from ..email import mail_message
@auth.route("/login", methods=['GET','POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('parking.home'))
form = LoginForm()
if form.validate_on_submit():
user=User.query.filter_by(email=form.email.data).first()
if user and user.verify_password(form.password.data):
login_user(user, remember=form.remember.data)
next_page = request.args.get('next')
return redirect(next_page) if next_page else redirect(url_for('parking.home'))
flash('You have been logged in!, success')
else:
flash('Login Unsuccessful. Please Check username and password', 'danger')
return render_template('auth/login.html', title='login', form=form)
@auth.route("/register",methods=['GET','POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('parking.home'))
form = RegistrationForm()
if form.validate_on_submit():
new_user = User(username=form.username.data, email= form.email.data, password=form.password.data)
db.session.add(new_user)
db.session.commit()
mail_message("Welcome to Parking System","email/welcome_user",new_user.email,user=new_user)
flash(f'Account created for {form.username.data}!', 'success')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', title='Register', form=form)
@auth.route("/logout")
def logout():
logout_user()
return redirect(url_for('auth.login'))
@auth.route("/account")
@login_required
def account():
image_file = url_for('static', filename='profile_pics/' + current_user.image_file)
return render_template('auth/account.html', title='Account', image_file=image_file)
@auth.route('/make_me_admin')
@login_required
def make_me_admin():
user = User.query.get(current_user.id)
user.role = 'admin'
db.session.add(user)
db.session.commit()
return redirect(url_for('admin.index')) | StarcoderdataPython |
6693922 | # -*- coding: utf-8 -*-
"""
Copyright (c) 2018 <NAME> GmbH
All rights reserved.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
@author: <NAME>
"""
import abc
import numpy as np
from scipy import interpolate
def gLin(m, s, A, b=None):
if A.ndim == 1:
dim = 1
else:
dim = A.shape[0]
if b is None:
if dim == 1:
b = 0
else:
b = np.zeros(dim)
M = np.dot(A, m) + b
S = np.dot(A, np.dot(s, A.T))
if dim > 1:
S = (S + S.T) / 2
C = A.T
return M, S, C
def resample(data, factor):
""" Up or downsample data by a given factor
args:
data: ndarray, (N, D), input data to be resampled along first dimension
factor: double, >1 = upsample, <1 = downsample
returns:
data: ndarray, (floor(N*factor), D) up or downsampled data
"""
N, D = data.shape
x = np.linspace(1, N, N)
x_new = np.linspace(1, N, int(N * factor))
f = interpolate.interp1d(x, data, kind='cubic', axis=0)
return f(x_new)
def handle_exception(inst, config, text):
if 'raise_exception' in config and config['raise_exception'] is True:
raise
print()
print('---------------------------------------------------')
print('## %s' % (text))
print(inst)
print('---------------------------------------------------')
class Configurable(object):
__metaclass__ = abc.ABCMeta
def configure(self, config):
assert type(config) == dict, 'configure method of %s expects dict type config parameter' % (self.__class__)
# Copy all attributes from config-dict to the class's local space
for key, value in config.items():
if hasattr(self, key):
setattr(self, key, value)
else:
raise NotImplementedError('Unknown attribute %s for %s' %
(key, self.name))
def enforce_list(var):
""" Enforces a list of elements
If a single, non-list element is given, a list with one element is returned
args:
var: list or single element
returns:
given list or single element list holding the given var parameter
"""
if type(var) is not list:
return [var]
else:
return var
def enforce_2d(var):
""" Enforce list of 2D numpy arrays.
In case of 1D timeseries (H, ), a singleton dimension is added (H, 1) such
that timeseries data becomes a column vector.
args:
var, list: list of np.ndarrays or Nones
returns:
list of np.ndarrays or Nones where each ndarrays is atleast 2D.
"""
assert type(var) == list, 'enforce_2d expects list type input parameter'
res = []
for x in var:
if x is None:
res.append(x)
else:
assert type(x) == np.ndarray, 'list elements must be ndarray or None'
if x.ndim < 2:
res.append(x[:, None])
else:
res.append(x)
return res
def retrieve_config(config, item, error):
assert item in config, error
return config[item]
def create_dated_directory(path):
import time
import os
assert(os.path.exists(path))
date_str = time.strftime('%y%m%d')
time_str = time.strftime('%H%M')
run = 0
dir_path = os.path.join(path, date_str, time_str, 'run_%d' % run)
path_exists = True
while path_exists is True:
if os.path.exists(dir_path):
path_exists = True
run += 1
dir_path = os.path.join(path, date_str, time_str, 'run_%d' % run)
else:
os.makedirs(dir_path)
path_exists = False
return dir_path
| StarcoderdataPython |
6520633 | print("ANALISADOR DE TRIÂNGULOS . . .\n")
segmento1 = float(input("Primeiro segmento: "))
segmento2 = float(input("Segundo segmento0: "))
segmento3 = float(input("Terceiro segmento0: "))
if segmento1 < segmento2 + segmento3 and segmento2 < segmento1 + segmento3 and segmento3 < segmento1 + segmento2:
triangulo = True
else:
triangulo = False
if triangulo:
print("Os segmentos acima PODEM formar um triângulo!")
else:
print("Os segmentos acima NÃO PODEM formar um triângulo.")
| StarcoderdataPython |
6643343 | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.main import print_setup_info
def test_print_shell_vars_sh(capsys):
print_setup_info('sh')
out, _ = capsys.readouterr()
assert "_sp_sys_type=" in out
assert "_sp_tcl_root=" in out
assert "_sp_lmod_root=" in out
assert "_sp_module_prefix" not in out
def test_print_shell_vars_csh(capsys):
print_setup_info('csh')
out, _ = capsys.readouterr()
assert "set _sp_sys_type = " in out
assert "set _sp_tcl_root = " in out
assert "set _sp_lmod_root = " in out
assert "set _sp_module_prefix = " not in out
def test_print_shell_vars_sh_modules(capsys):
print_setup_info('sh', 'modules')
out, _ = capsys.readouterr()
assert "_sp_sys_type=" in out
assert "_sp_tcl_root=" in out
assert "_sp_lmod_root=" in out
assert "_sp_module_prefix=" in out
def test_print_shell_vars_csh_modules(capsys):
print_setup_info('csh', 'modules')
out, _ = capsys.readouterr()
assert "set _sp_sys_type = " in out
assert "set _sp_tcl_root = " in out
assert "set _sp_lmod_root = " in out
assert "set _sp_module_prefix = " in out
| StarcoderdataPython |
5042476 | <filename>src/qsubsettings.py<gh_stars>0
import re
from functools import wraps
import os
_QSUBCMD = 'qsub'
_QSUBSYNOPSIS = 'qsub [-a date_time] [-A account_string] [-b secs] [-c checkpoint_options]\
[-C directive_prefix] [-cwd] [-clear] [-d path] [-D path] [-e path] [-f] [-F] [-h]\
[-I ] [-j join ] [-k keep ] [-l resource_list ]\
[-m mail_options] [-M user_list] [-n] [-N name] [-o path]\
[-p priority] [-P user] [-q destination] [-r c] [-sync yesno] [-S path_to_shell]\
[-t array_request] [-u user_list]\
[-v variable_list] [-V] [-W additional_attributes] [-x] [-X] [-z] [script]'
smallclustersetting = {
'-cwd': True,
'-P': 'cpu.p',
'-sync': 'y',
'-S': '/usr/bin/python'
}
bigclustersetting = {
'-cwd': True,
'-sync': 'y',
'-S': '/usr/bin/python'
}
_TEMPLATE = {
'-sync': 'y',
'-S': '/usr/bin/python'
}
def _parseSettings(settings):
def executableExists(program):
def is_executeable(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_executeable(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_executeable(exe_file):
return exe_file
qsubset = [_QSUBCMD]
# Do a quick qsub call to identify if qsub is installed on the machine
# TODO: Check if qsub is outside of PATH available
if not executableExists(qsubset[0]):
raise OSError(
"qsub cannot be found on this machine, did you install it?")
for setting in settings:
# Explicitly testing for the boolean, if True we just append the key
# with no value since in cases of e.g. -cwd we don't want to have any
# value
if settings[setting] == True:
qsubset.append(setting)
else:
qsubset.append(setting)
qsubset.append(settings[setting])
return qsubset
def validateSettings(mdict):
'''
Validates the given dict, if the given arguments are supported by qsub
'''
validargs = re.findall('\[(-[a-zA-Z]+)\s?(\w+)?\]', _QSUBSYNOPSIS)
validdict = dict((x, y) for (x, y) in validargs)
for key, value in mdict.iteritems():
if key not in validdict:
raise ValueError(
"The key parameter in the settings (%s) is not valid for qsub!" % (key))
# Check if the valid arguments require some argument after it or not
if validdict[key]:
if not isinstance(value, str):
raise ValueError(
"The value for the key (%s) is wrong!" % (key))
else:
if not isinstance(value, bool):
raise ValueError(
"The value for the key (%s) needs to be a boolean!" % (key))
# Check if the required arguments are given
for key, value in _TEMPLATE.iteritems():
if key not in mdict:
raise ValueError(
"Two arguments are required for QPy to work properly : (%s). I could not find these.", (" ".join(_TEMPLATE.keys)))
def setting(setting):
'''
Decorator to use any other different setting than the default one using
@runluster()
Usage:
@setting(mysetting)
@runcluster(3)
def add(a,b):
return a+b
mysetting needs to be a dict containing the Qsub settings, e.g. {'-o':'out'}
If the setting has no value e.g. -cwd, please use True as it's dict value
'''
appendedSetting = dict(setting.items() + _TEMPLATE.items())
validateSettings(appendedSetting)
def decorate(func):
@wraps(func)
def wrap(*args):
kw = {'settings': appendedSetting}
return func(*args, **kw)
return wrap
return decorate
def newsetting(mdict):
'''
returns a new settings for qsub.
mdict is a dictionary containing keys which are the switches for qsub e.g. -S -P ...
and the values are the corresponding parameters for the switch
'''
validateSettings(mdict) # validate first
return dict(_TEMPLATE.items() + mdict.items())
| StarcoderdataPython |
3514089 | import torch
from torch import nn
from torch import distributions as torch_dist
from itertools import chain
import math
import numpy as np
from torch.nn import functional as F
from operator import itemgetter
from diayn_seq_code_revised.trainer.trainer_seqwise_stepwise_revised import \
DIAYNAlgoStepwiseSeqwiseRevisedTrainer
from diayn_seq_code_revised.networks.my_gaussian import ConstantGaussianMultiDim
from seqwise_cont_skillspace.utils.info_loss import InfoLoss
from seqwise_cont_skillspace.networks.rnn_vae_classifier import RnnVaeClassifierContSkills
import rlkit.torch.pytorch_util as ptu
from rlkit.core.eval_util import create_stats_ordered_dict
import self_supervised.utils.my_pytorch_util as my_ptu
class ContSkillTrainerSeqwiseStepwise(DIAYNAlgoStepwiseSeqwiseRevisedTrainer):
def __init__(self,
*args,
skill_prior_dist: ConstantGaussianMultiDim,
loss_fun: InfoLoss.loss,
**kwargs):
super().__init__(*args, **kwargs)
self.skill_prior = skill_prior_dist
self.loss_fun = loss_fun
# Overwrite Criterion
self.df_criterion = nn.MSELoss()
def create_optimizer_step(self, optimizer_class, df_lr):
return optimizer_class(
chain(
self.df.classifier.parameters(),
self.df.feature_decoder.parameters(),
),
lr=df_lr
)
@property
def num_skills(self):
raise NotImplementedError('Continuous-skills-case: infinite skills')
def _df_loss_intrinsic_reward(self,
skills,
next_obs):
"""
Args:
skills : (N, S, skill_dim)
next_obs : (N, S, obs_dim)
Return:
df_loss : scalar tensor
rewards : (N, S, 1)
log_dict : dict
"""
df_ret_dict = self.df(
next_obs,
train=True
)
classified_steps, \
feature_recon_dist, \
classified_seqs, \
hidden_features_seq = itemgetter(
'classified_steps',
'feature_recon_dist',
'classified_seqs',
'hidden_features_seq')(df_ret_dict)
# Sequence Classification Loss
ret_dict_seq = self._df_loss_seq(
pred_skills_seq=classified_seqs,
skills=skills
)
df_loss_seq = itemgetter(
'df_loss',
)(ret_dict_seq)
# Step Loss and rewards
loss_calc_values = dict(
hidden_feature_seq=hidden_features_seq,
recon_feature_seq=feature_recon_dist,
post_skills=classified_steps
)
ret_dict_step = self._df_loss_step_rewards(
loss_calc_values=loss_calc_values,
skills=skills
)
df_loss_step, \
rewards, \
log_dict_df_step = itemgetter(
'df_loss',
'rewards',
'log_dict')(ret_dict_step)
return dict(
df_loss=dict(
seq=df_loss_seq,
step=df_loss_step,
),
rewards=rewards,
log_dict=log_dict_df_step
)
def _df_loss_step_rewards(
self,
loss_calc_values: dict,
skills: torch.Tensor,
):
"""
Args:
loss_calc_values
hidden_feature_seq : (N, S, hidden_size_rnn)
recon_feature_seq : (N, S, hidden_size_rnn) distributions
post_skills : (N, S, skill_dim) distributions
skill_prior : (N, S, skill_dim) distributions
skills : (N, S, skill_dim)
Return:
df_loss : scalar tensor
rewards : (N, S, 1)
log_dict
kld : scalar tensor
mmd : scalar tensor
mse : scalar tensor
kld_info : scalar tensor
mmd_info : scalar tensor
loss_latent : scalar tensor
loss_data : scalar tensor
info_loss : scalar tensor
"""
batch_dim = 0
seq_dim = 1
data_dim = -1
batch_size = skills.size(batch_dim)
seq_len = skills.size(seq_dim)
skill_dim = skills.size(data_dim)
hidden_feature_seq = loss_calc_values['hidden_feature_seq']
recon_feature_seq = loss_calc_values['recon_feature_seq']
post_skills = loss_calc_values['post_skills']
#assert hidden_feature_seq.shape == torch.Size(
# (batch_size,
# seq_len,
# 2 * self.df.rnn.hidden_size))
assert post_skills.batch_shape == skills.shape
assert hidden_feature_seq.shape == recon_feature_seq.batch_shape
# Rewards
pri_dist = self.skill_prior(hidden_feature_seq)
rewards = post_skills.log_prob(skills) - pri_dist.log_prob(skills)
rewards = torch.sum(rewards, dim=data_dim, keepdim=True)
assert rewards.shape == torch.Size((batch_size, seq_len, 1))
# Reshape Dist
pri_dist = self.reshape_dist(pri_dist)
assert len(pri_dist.batch_shape) == 2
pri = dict(
dist=pri_dist,
sample=pri_dist.sample()
)
# Reshape Dist
post_dist = self.reshape_dist(post_skills)
post = dict(
dist=post_dist,
sample=post_dist.rsample()
)
# Reshape Dist
recon_feature_seq_dist = self.reshape_dist(recon_feature_seq)
assert len(recon_feature_seq_dist.batch_shape) == 2
recon = dict(
dist=recon_feature_seq_dist,
sample=recon_feature_seq_dist.loc,
)
# Reshape
hidden_feature_seq = hidden_feature_seq.detach()
hidden_feature_seq = hidden_feature_seq.reshape(
-1,
hidden_feature_seq.size(data_dim)
)
# Loss Calculation
info_loss, log_dict = self.loss_fun(
pri=pri,
post=post,
recon=recon,
data=hidden_feature_seq.detach().reshape(
-1, hidden_feature_seq.size(data_dim)
),
guide=skills.detach().reshape(
batch_size * seq_len, skill_dim)
)
return dict(
df_loss=info_loss,
rewards=rewards,
log_dict=log_dict
)
def reshape_dist(self, dist: torch_dist.Distribution) -> torch_dist.Distribution:
"""
Args:
dist : (N, S, data_dim)
Return:
dist : (N * S, data_dim)
"""
batch_dim = 0
seq_dim = 1
data_dim = -1
batch_size = dist.batch_shape[batch_dim]
seq_len = dist.batch_shape[seq_dim]
data_size = dist.batch_shape[data_dim]
if isinstance(dist, torch_dist.Normal):
if not dist.loc.is_contiguous():
loc = dist.loc.contiguous()
else:
loc = dist.loc
if not dist.scale.is_contiguous():
scale = dist.scale.contiguous()
else:
scale = dist.scale
assert my_ptu.tensor_equality(loc.view(batch_size * seq_len,
data_size)[:seq_len],
loc[0, :, :])
assert my_ptu.tensor_equality(scale.view(batch_size * seq_len,
data_size)[:seq_len],
scale[0, :, :])
# Reshape/View
loc_reshaped = dist.loc.view(batch_size * seq_len, data_size)
scale_reshaped = dist.scale.view(batch_size * seq_len, data_size)
return torch_dist.Normal(
loc=loc_reshaped,
scale=scale_reshaped
)
elif isinstance(dist, torch_dist.Uniform):
low_reshaped = dist.low.reshape(batch_size * seq_len, data_size)
high_reshaped = dist.high.reshape(batch_size * seq_len, data_size)
return torch_dist.Uniform(
low=low_reshaped,
high=high_reshaped,
)
def _df_loss_seq(self,
pred_skills_seq,
skills):
"""
Args:
pred_skills_seq : (N, skill_dim) predicted skills per seq
skills : (N, S, skill_dim) ground truth skills seq
Return:
df_loss_seq : scalar tensor
pred_skill : (N, skill_dim)
skill_gt : (N, skill_dim)
"""
batch_dim = 0
seq_dim = 1
data_dim = -1
batch_size = skills.size(batch_dim)
seq_len = skills.size(seq_dim)
skill_dim = skills.size(data_dim)
assert pred_skills_seq.shape == torch.Size(
(batch_size,
skill_dim)
)
skills_per_seq_gt = skills[:, 0, :]
assert skills_per_seq_gt.shape == torch.Size((batch_size, skill_dim))
assert my_ptu.tensor_equality(
torch.stack([skills_per_seq_gt] * seq_len, dim=seq_dim),
skills
)
# Apply MSE Loss
df_seq_loss = self.df_criterion(
pred_skills_seq,
skills_per_seq_gt
)
return dict(
df_loss=df_seq_loss,
)
def train_from_torch(self, batch):
batch_dim = 0
seq_dim = 1
data_dim = -1
terminals = batch['terminals']
obs = batch['observations']
actions = batch['actions']
next_obs = batch['next_observations']
skills = batch['skills']
assert terminals.shape[:-1] \
== obs.shape[:-1] \
== actions.shape[:-1] \
== next_obs.shape[:-1] \
== skills.shape[:-1]
assert obs.size(data_dim) == next_obs.size(data_dim)
assert skills.size(data_dim) == self.policy.skill_dim
assert terminals.size(data_dim) == 1
batch_size = next_obs.size(batch_dim)
seq_len = next_obs.size(seq_dim)
obs_dim = obs.size(data_dim)
action_dim = actions.size(data_dim)
"""
DF Loss and Intrinsic Reward
"""
df_ret_dict = self._df_loss_intrinsic_reward(
skills=skills,
next_obs=next_obs
)
df_loss, \
rewards, \
log_dict = itemgetter(
'df_loss',
'rewards',
'log_dict'
)(df_ret_dict)
num_transitions = batch_size * seq_len
assert torch.all(
torch.eq(obs[0, 0, :],
obs.view(num_transitions, obs_dim)[0, :])
)
terminals = terminals.view(num_transitions, 1)
obs = obs.view(num_transitions, obs_dim)
next_obs = next_obs.view(num_transitions, obs_dim)
actions = actions.view(num_transitions, action_dim)
skills = skills.view(num_transitions, self.policy.skill_dim)
assert torch.all(
torch.eq(
rewards[0, 0, :],
rewards.view(num_transitions, 1)[0, :])
)
rewards = rewards.view(num_transitions, 1)
"""
Policy and Alpha Loss
"""
policy_ret_dict = self._policy_alpha_loss(
obs=obs,
skills=skills
)
policy_loss, \
alpha_loss, \
alpha, \
q_new_actions, \
policy_mean, \
policy_log_std, \
log_pi, \
obs_skills = itemgetter(
'policy_loss',
'alpha_loss',
'alpha',
'q_new_actions',
'policy_mean',
'policy_log_std',
'log_pi',
'obs_skills'
)(policy_ret_dict)
"""
QF Loss
"""
qf_ret_dict = self._qf_loss(
actions=actions,
next_obs=next_obs,
alpha=alpha,
rewards=rewards,
terminals=terminals,
skills=skills,
obs_skills=obs_skills
)
qf1_loss, \
qf2_loss, \
q1_pred, \
q2_pred, \
q_target = itemgetter(
'qf1_loss',
'qf2_loss',
'q1_pred',
'q2_pred',
'q_target'
)(qf_ret_dict)
"""
Update networks
"""
self._update_networks(
df_loss=df_loss,
qf1_loss=qf1_loss,
qf2_loss=qf2_loss,
policy_loss=policy_loss
)
"""
Soft Updates
"""
self._soft_updates()
"""
Save some statistics for eval
"""
self._save_stats(
log_dict=log_dict,
pred_z=None,
log_pi=log_pi,
q_new_actions=q_new_actions,
rewards=rewards,
df_loss=df_loss,
qf1_loss=qf1_loss,
qf2_loss=qf2_loss,
q1_pred=q1_pred,
q2_pred=q2_pred,
q_target=q_target,
policy_mean=policy_mean,
policy_log_std=policy_log_std,
alpha=alpha,
alpha_loss=alpha_loss
)
def _save_stats(self,
log_dict,
pred_z,
log_pi,
q_new_actions,
rewards,
df_loss,
qf1_loss,
qf2_loss,
q1_pred,
q2_pred,
q_target,
policy_mean,
policy_log_std,
alpha,
alpha_loss
):
"""
Save some statistics for eval
"""
if self._need_to_update_eval_statistics:
self._need_to_update_eval_statistics = False
"""
Eval should set this to None.
This way, these statistics are only computed for one batch.
"""
policy_loss = (log_pi - q_new_actions).mean()
for key, el in log_dict.items():
self.eval_statistics[key] = ptu.get_numpy(el)
self.eval_statistics['Intrinsic Rewards'] = \
np.mean(ptu.get_numpy(rewards))
self.eval_statistics['DF Loss Seq'] = \
np.mean(ptu.get_numpy(df_loss['seq']))
self.eval_statistics['DF Loss Step'] = \
np.mean(ptu.get_numpy(df_loss['step']))
self.eval_statistics['QF1 Loss'] = \
np.mean(ptu.get_numpy(qf1_loss))
self.eval_statistics['QF2 Loss'] = \
np.mean(ptu.get_numpy(qf2_loss))
self.eval_statistics['Policy Loss'] = \
np.mean(ptu.get_numpy(policy_loss))
self.eval_statistics.update(create_stats_ordered_dict(
'Q1 Predictions',
ptu.get_numpy(q1_pred),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Q2 Predictions',
ptu.get_numpy(q2_pred),
))
#self.eval_statistics.update(create_stats_ordered_dict(
# 'D Predictions Step',
# ptu.get_numpy(pred_z['step']),
#))
#self.eval_statistics.update(create_stats_ordered_dict(
# 'D Predictions Seq',
# ptu.get_numpy(pred_z['seq']),
#))
self.eval_statistics.update(create_stats_ordered_dict(
'Q Targets',
ptu.get_numpy(q_target),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Log Pis',
ptu.get_numpy(log_pi),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Policy mu',
ptu.get_numpy(policy_mean),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Policy log std',
ptu.get_numpy(policy_log_std),
))
if self.use_automatic_entropy_tuning:
self.eval_statistics['Alpha'] = alpha.item()
self.eval_statistics['Alpha Loss'] = alpha_loss.item()
self._n_train_steps_total += 1
| StarcoderdataPython |
6605504 | <gh_stars>0
# MIT License
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import subprocess as sp
import re
import os
from datetime import datetime
DEFAULT_CONFIG = 'debug'
DEFAULT_MAKE_OPERATION = ''
DEFAULT_LOG_FILE_PATH = r"C:\Users\yatesau\Documents\_Logs\project_builder"
DEFAULT_WORKSPACE_PATH = r"C:\Users\yatesau\Documents\momentics_workspace"
DEFAULT_VARIANT_DIRECTORY = "__VariantConfig__"
DEFAULT_BUILD_SCRIPT = ".build_order.txt"
def main():
"""Builds projects based on the .build_order.txt file.
Reads in the .build_order.txt file to determine make operations and
configuration settings for building the projects listed. By default,
every vairiant inside the project's variant.mk file is built
"""
build_script_name = DEFAULT_BUILD_SCRIPT
workspace_path = DEFAULT_WORKSPACE_PATH
variant_directory = DEFAULT_VARIANT_DIRECTORY
master_variant_file = "variant.mk"
build_script_path = os.path.join(workspace_path, build_script_name)
build_operations = parseBuildOrders(build_script_path)
variant_path = os.path.join(
workspace_path, variant_directory, master_variant_file)
variant_files = readBuildVariants(variant_path)
for variant_file in variant_files:
variant_path = os.path.join(
workspace_path, variant_directory, variant_file)
makeVariant(workspace_path, variant_path, build_operations)
def parseBuildOrders(build_script_path):
"""Reads .build_order.txt and returns a list of the make operations.
Args:
build_script_path (string): Path to the .build_order.txt file.
Returns:
dict: Returns a dictionary object containing the build instructions for
each project.
"""
with open(build_script_path) as script:
build_script = script.readlines()
build_script = [x.strip() for x in build_script]
build_operations = {}
config = DEFAULT_CONFIG
make_operation = DEFAULT_MAKE_OPERATION
op_offset = 0
for line in build_script:
if line is '':
continue
elif line.startswith('config'):
config = line[7:]
elif line.startswith('operation'):
make_operation = line[10:]
elif line[0].isdigit():
op = int(line[0]) + op_offset
path = line[2:]
op_entry = build_operations.get(op, {})
dirs = set()
if op_entry.get('config', config) is not config:
print("Warning: Cannot change configuration in the same",
"operation group. Creating a new operation group.")
op += 1
op_offset += 1
op_entry = op_entry.copy()
elif op_entry.get('operation',
make_operation) is not make_operation:
print("Warning: Cannot change make operation in the same",
"operation group. Creating a new operation group.")
op += 1
op_offset += 1
op_entry = op_entry.copy()
else:
dirs = op_entry.get('dirs', dirs)
dirs.add(path)
op_entry['config'] = config
op_entry['operation'] = make_operation
op_entry['dirs'] = dirs
build_operations[op] = op_entry
return build_operations
def readBuildVariants(file_path):
"""Reads variants file and returns a list of the variants to build.
Args:
file_path (string): Path to the variants file.
Returns:
list: Returns a list containing each variant listed in the variants
file.
"""
with open(file_path, 'r') as variant_file:
variant_data = variant_file.readlines()
variants = []
for line in variant_data:
if line.startswith('VARIANTS :='):
variants.extend(line.split()[2:])
variants = [variant + '.mk' for variant in variants]
return variants
def makeVariant(workspace_path, variant_file_path, operations):
"""Configures and performs a make operation for the specified variant.
Args:
workspace_path (string): Path to the directory in which to run make.
variant_file_path (string): Path to this build's variant file in order
to configure build type.
operations (dict): Dictionary containing build information for this
variant.
"""
sequence = list(operations.keys())
sequence.sort()
prev_config = None
for step in sequence:
make_config = operations[step].get('config')
make_op = operations[step].get('operation')
make_directories = operations[step].get('dirs')
if make_config is not prev_config:
prev_config = make_config
updateBuildType(variant_file_path, make_config)
for project_dir in make_directories:
project_path = os.path.join(workspace_path, project_dir)
doMakeOperation(project_path, make_op)
print('\n***********************', project_dir,
'DONE *************************\n')
def updateBuildType(variant_file_path, build_type):
"""Updates the variant BUILD_TYPE if changed.
Args:
variant_file_path (string): path to the variant file
build_type (string): build configuration type to set
"""
config_updated = None
regex_pattern = 'BUILD_TYPES := (.*)'
variant_data = []
readVariantFile(variant_file_path, variant_data)
for i, line in enumerate(variant_data):
match = re.match(regex_pattern, line)
if match is not None:
if match.group(1) is not build_type:
variant_data[i] = 'BUILD_TYPES := ' + build_type + '\n'
config_updated = True
else:
config_updated = False
break
if config_updated is None:
variant_data.append('BUILD_TYPES := ' + build_type + '\n')
config_updated = True
if config_updated is True:
writeVariantFile(variant_file_path, variant_data)
def readVariantFile(variant_file_path, variant_data):
"""Reads variant file into the supplied list.
Args:
variant_file_path (string): Path to the variant file.
variant_data (list): List to store the lines of the variant file.
"""
with open(variant_file_path, 'r') as variant_file:
variant_data[:] = variant_file.readlines()
def writeVariantFile(variant_file_path, variant_data):
with open(variant_file_path, mode='w') as variant_file:
variant_file.writelines(variant_data)
def doMakeOperation(project_dir_path, operation=''):
"""Performs make operation and generates log files.
Args:
project_dir_path (type): Description of parameter `project_dir_path`.
operation (type): Make rule to execute. Defaults to ''.
"""
command = ['make']
if operation is not '' and operation is not None:
command.append(operation)
pattern = r".*\\(.+)\\*"
match = re.match(pattern, project_dir_path)
log_name = datetime.now().strftime('log_' + match.group(1)
+ '_%H_%M_%d_%m_%Y.log')
log_path = os.path.join(DEFAULT_LOG_FILE_PATH, log_name)
build_comment = project_dir_path + ' make ' + operation
print(build_comment)
os.makedirs(os.path.dirname(log_path), exist_ok=True)
with open(log_path, 'w+') as log_file:
log_file.write(build_comment + '\n\n')
proc = sp.run(command, cwd=project_dir_path,
stdout=log_file, stderr=sp.PIPE, encoding='utf-8')
if proc.returncode is not 0:
print(match.group(1), ':', 'make', operation, ': make error',
proc.returncode, ': check logs for details')
print(proc.stderr)
error_stream = proc.stderr
print(match.group(1), ':', error_stream)
error_log_path = log_path[:-4]+'_error.log'
with open(error_log_path, 'w+') as error_log:
error_log.write(error_stream)
print(match.group(1), ': Error log created :', error_log_path)
if __name__ == '__main__':
main()
| StarcoderdataPython |
11324293 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import glob
from setuptools import setup, find_packages
import codecs
from haveibeenpwned_asyncio import __version__
scripts = glob.glob("bin/*")
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()
curr_dir = os.path.abspath(os.path.dirname(__file__))
long_description = (long_description,)
long_description_content_type = "text/markdown"
with codecs.open(os.path.join(curr_dir, "README.md"), encoding="utf-8") as readme:
long_description = readme.read()
tests_require = [
"pytest",
"pytest-cov",
"codecov",
"flake8",
"black",
"bandit",
"pytest-runner",
"python-dateutil",
"aioresponses",
"pytest-asyncio",
"asynctest",
"pytest-mock",
"aiohttp-retry",
]
setup(
name="haveibeenpwned-asyncio",
version=__version__,
description="Asyncio and aiohttp based library and CLI to connect to haveibeenpwned.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/c-goosen/haveibeenpwned-asyncio",
author="<NAME>",
author_email="<EMAIL>",
python_requires=">=3.5.0",
include_package_data=True,
license="MIT",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Natural Language :: English",
"Operating System :: Unix",
"Operating System :: POSIX :: Linux",
"Operating System :: POSIX :: BSD :: FreeBSD",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development",
"Typing :: Typed",
],
keywords="South Africa ID Number",
packages=find_packages(
include=["haveibeenpwned_async", "haveibeenpwned_async", "bin/*"],
exclude=["docs", "docs-src", "tests", "tests.*", "tutorial"],
),
setup_requires=["aiohttp", "setuptools", "click", "aiohttp-retry"],
install_requires=["aiohttp", "click", "aiohttp-retry"],
test_suite="tests",
tests_require=tests_require,
extras_require={"dev": ["bandit", "black", "flake8"] + tests_require},
scripts=scripts,
zip_safe=True,
)
| StarcoderdataPython |
4881122 | import os
from countryinfo import CountryInfo
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton, InlineQueryResultArticle, InputTextMessageContent
from .database import db
START_TEXT = """Hello {} 😌
I am a country information finder bot.
>> `I can find information of any country of the world.`
Made by @FayasNoushad"""
HELP_TEXT = """**Hey, Follow these steps:**
➠ Just send me a country name
➠ Then I will check and send you the informations
**Available Commands**
/start - Checking Bot Online
/help - For more help
/about - For more about me
/status - For bot status
Made by @FayasNoushad"""
ABOUT_TEXT = """--**About Me**-- 😎
🤖 **Name :** [Country Info Bot](https://telegram.me/{})
👨💻 **Developer :** [Fayas](https://github.com/FayasNoushad)
📢 **Channel :** [Fayas Noushad](https://telegram.me/FayasNoushad)
🌐 **Source :** [👉 Click here](https://github.com/FayasNoushad/Country-Info-Bot-V2)
📝 **Language :** [Python3](https://python.org)
🧰 **Framework :** [Pyrogram](https://pyrogram.org)"""
START_BUTTONS = InlineKeyboardMarkup(
[
[
InlineKeyboardButton('⚙ Help', callback_data='help'),
InlineKeyboardButton('About 🔰', callback_data='about'),
InlineKeyboardButton('Close ✖️', callback_data='close')
]
]
)
HELP_BUTTONS = InlineKeyboardMarkup(
[
[
InlineKeyboardButton('🏘 Home', callback_data='home'),
InlineKeyboardButton('About 🔰', callback_data='about'),
InlineKeyboardButton('Close ✖️', callback_data='close')
]
]
)
ABOUT_BUTTONS = InlineKeyboardMarkup(
[
[
InlineKeyboardButton('🏘 Home', callback_data='home'),
InlineKeyboardButton('Help ⚙', callback_data='help'),
InlineKeyboardButton('Close ✖️', callback_data='close')
]
]
)
ERROR_BUTTON = InlineKeyboardMarkup(
[
[
InlineKeyboardButton('⚙ Help', callback_data='help'),
InlineKeyboardButton('Close ✖️', callback_data='close')
]
]
)
@Client.on_callback_query()
async def cb_handler(bot, update):
if update.data == "home":
await update.message.edit_text(
text=START_TEXT.format(update.from_user.mention),
reply_markup=START_BUTTONS,
disable_web_page_preview=True
)
elif update.data == "help":
await update.message.edit_text(
text=HELP_TEXT,
reply_markup=HELP_BUTTONS,
disable_web_page_preview=True
)
elif update.data == "about":
await update.message.edit_text(
text=ABOUT_TEXT.format((await bot.get_me()).username),
reply_markup=ABOUT_BUTTONS,
disable_web_page_preview=True
)
else:
await update.message.delete()
@Client.on_message(filters.private & filters.command(["start"]))
async def start(bot, update):
if not await db.is_user_exist(update.from_user.id):
await db.add_user(update.from_user.id)
await update.reply_text(
text=START_TEXT.format(update.from_user.mention),
disable_web_page_preview=True,
reply_markup=START_BUTTONS
)
@Client.on_message(filters.private & filters.command(["help"]))
async def help(bot, update):
if not await db.is_user_exist(update.from_user.id):
await db.add_user(update.from_user.id)
await update.reply_text(
text=HELP_TEXT,
disable_web_page_preview=True,
reply_markup=HELP_BUTTONS
)
@Client.on_message(filters.private & filters.command(["about"]))
async def about(bot, update):
if not await db.is_user_exist(update.from_user.id):
await db.add_user(update.from_user.id)
await update.reply_text(
text=ABOUT_TEXT.format((await bot.get_me()).username),
disable_web_page_preview=True,
reply_markup=ABOUT_BUTTONS
)
def country_info(country):
country = CountryInfo(country)
info = f"""\
Name : `{country.name()}`
Native Name : `{country.native_name()}`
Capital : `{country.capital()}`
Population : `{country.population()}`
Region : `{country.region()}`
Sub Region : `{country.subregion()}`
Top Level Domains : `{country.tld()}`
Calling Codes : `{country.calling_codes()}`
Currencies : `{country.currencies()}`
Residence : `{country.demonym()}`
Timezone : `{country.timezones()}`"""
buttons = InlineKeyboardMarkup(
[
[
InlineKeyboardButton('Wikipedia', url=country.wiki()),
InlineKeyboardButton('Google', url=country.google())
],
[
InlineKeyboardButton('Join Channel', url='https://telegram.me/FayasNoushad')
]
]
)
return info, buttons
@Client.on_message(filters.private & filters.text)
async def countryinfo(bot, update):
if not await db.is_user_exist(update.from_user.id):
await db.add_user(update.from_user.id)
if update.text.startswith("/"):
return
info, buttons = country_info(update.text)
try:
await bot.send_message(
chat_id=update.chat.id,
text=info,
reply_markup=buttons,
disable_web_page_preview=True,
reply_to_message_id=update.message_id
)
except Exception as error:
print(error)
@Client.on_inline_query()
async def countryinfo_inline(bot, update):
join_channel_text = "Please join my channel for more bots and updates"
channel_reply_markup = InlineKeyboardMarkup(
[[InlineKeyboardButton('😎 Join Channel 😎', url='https://telegram.me/FayasNoushad')]]
)
info, buttons = country_info(update.query)
if update.query == "":
answers = [
InlineQueryResultArticle(
title="Join Channel 😎",
description=join_channel_text,
input_message_content=InputTextMessageContent(join_channel_text),
reply_markup=channel_reply_markup
)
]
else:
answers = [
InlineQueryResultArticle(
title=update.query,
description=f"Information of {update.query}",
input_message_content=InputTextMessageContent(info),
reply_markup=buttons
)
]
await bot.answer_inline_query(
inline_query_id=update.chat.id,
results=answers
)
@Client.on_message(filters.private & filters.command("status"), group=5)
async def status(bot, update):
total_users = await db.total_users_count()
text = "**Bot Status**\n"
text += f"\n**Total Users:** `{total_users}`"
await update.reply_text(
text=text,
quote=True,
disable_web_page_preview=True
)
| StarcoderdataPython |
11253332 | import random
print('주사위를 굴립니다')
com = random.randint(1, 6)
user = random.randint(1, 6)
print('컴퓨터의 주사위 눈은 ' + str(com) + '입니다')
print('당신의 주사위 눈은 ' + str(user) + '입니다')
if com > user :
print('컴퓨터가 승리하였습니다')
elif com == user :
print('비겼습니다. 다시 굴려보세요.')
else :
print('당신이 이겼습니다.') | StarcoderdataPython |
5095339 | <reponame>CLARIN-PL/embeddings<filename>tests/test_pipelinebuilder.py
import os
import tempfile
from typing import Dict
from embeddings.data.data_loader import DataLoader, Input, Output
from embeddings.data.dataset import Dataset
from embeddings.embedding.embedding import Embedding
from embeddings.evaluator.evaluator import Evaluator
from embeddings.model.base_model import BaseModel
from embeddings.pipeline.pipeline_builder import PipelineBuilder
from embeddings.task.task import Task
from embeddings.transformation.transformation import Transformation
from embeddings.utils.json_dict_persister import JsonPersister
class DummyDataset(Dataset[str]):
pass
class DummyLoader(DataLoader[str, str]):
def load(self, dataset: Dataset[Input]) -> Output:
pass
class DummyTransformation(Transformation[str, int]):
def transform(self, data: Input) -> Output:
pass
class DummyEmbedding(Embedding[int, float]):
def embed(self, data: Input) -> Output:
pass
class DummyTask(Task[float, int]):
def fit_predict(self, data: Input) -> Output:
pass
class DummyEvaluator(Evaluator[int, Dict[str, int]]):
def evaluate(self, data: Input) -> Output:
pass
def test_pipeline_builder() -> None:
temp_file: str = os.path.join(tempfile.gettempdir(), "pipelinebuilder.embeddings.json")
dataset = DummyDataset()
data_loader = DummyLoader()
data_transformation = DummyTransformation()
task = DummyTask()
embedding = DummyEmbedding()
model = BaseModel(embedding, task)
evaluator = DummyEvaluator().persisting(JsonPersister(temp_file))
pipeline = (
PipelineBuilder.with_dataset(dataset)
.with_loader(data_loader)
.with_transformation(data_transformation)
.with_model(model)
.with_evaluator(evaluator)
.build()
)
pipeline.run()
| StarcoderdataPython |
1709323 | # ===========================================================
# ========================= imports =========================
import sys
import datetime
from gnsspy.funcs.funcs import (gpsweekday, datetime2doy)
from gnsspy.doc.IGS import IGS, is_IGS
# ===========================================================
def obsFileName(stationName, date, zipped = False):
doy = datetime2doy(date, string = True)
if len(doy) == 1:
rinexFile = stationName + doy + "0." + str(date.year)[-2:] + "o"
elif len(doy) == 2:
rinexFile = stationName + doy + "0." + str(date.year)[-2:] + "o"
else:
rinexFile = stationName + doy + "0." + str(date.year)[-2:] + "o"
if zipped == True:
rinexFile = rinexFile + ".Z"
return rinexFile
def sp3FileName(epoch, product="igs"):
now = datetime.date.today() # today's date
timeDif = now - epoch # time difference between rinex epoch and today
if timeDif.days == 0:
raise Warning("IGS orbit files are not released for", epoch.ctime())
sys.exit("Exiting...")
elif 0 < timeDif.days < 13:
print("IGS final orbit file is not released for", epoch.ctime(), "\nDownloading IGS Rapid orbit file...")
product = 'igr' # sp3 file name
gpsWeek, gpsWeekday = gpsweekday(epoch, Datetime = True)
if len(str(gpsWeek)) == 3:
sp3File = product.lower() + "0" + str(gpsWeekday) + ".sp3"
else:
sp3File = product.lower() + str(gpsWeekday) + ".sp3"
return sp3File
def clockFileName(epoch, interval=30, product="cod"):
now = datetime.date.today()
timeDif = now - epoch
if timeDif.days == 0:
raise Warning("IGS clock files are not released for", epoch.ctime())
sys.exit("Exiting...")
elif 0 < timeDif.days < 13:
product = 'igr'
if interval < 30:
product = 'cod'
extension = '.clk_05s'
else:
extension = '.clk'
gpsWeek, gpsWeekday = gpsweekday(epoch, Datetime = True)
if len(str(gpsWeek)) == 3:
clockFile = product.lower() + "0" + str(gpsWeekday) + extension
else:
clockFile = product.lower() + str(gpsWeekday) + extension
return clockFile
def ionFileName(date, product = "igs", zipped = False):
doy = datetime2doy(date, string = True)
if len(doy) == 1:
ionFile = product + "g" + doy + "0." + str(date.year)[-2:] + "i"
elif len(doy) == 2:
ionFile = product + "g" + doy + "0." + str(date.year)[-2:] + "i"
else:
ionFile = product + "g" + doy + "0." + str(date.year)[-2:] + "i"
if zipped == True:
ionFile = ionFile + ".Z"
return ionFile
def navFileName(stationName, date, zipped = False):
doy = datetime2doy(date, string = True)
if len(doy) == 1:
rinexFile = stationName + doy + "0." + str(date.year)[-2:] + "n"
elif len(doy) == 2:
rinexFile = stationName + doy + "0." + str(date.year)[-2:] + "n"
else:
rinexFile = stationName + doy + "0." + str(date.year)[-2:] + "n"
if zipped == True:
rinexFile = rinexFile + ".Z"
return rinexFile
def nav3FileName(stationName, date, zipped = False):
doy = datetime2doy(date, string = True) # for RINEX data names
siteInfo = IGS(stationName)
if stationName.upper() == "BRDC":
rinexFile = "BRDC00IGS_R_" + str(date.year) + str(doy) + "0000_01D_MN.rnx"
else:
rinexFile = siteInfo.SITE[0] + "_R_" + str(date.year) + str(doy) + "0000_01D_MN.rnx"
"""
if len(doy) == 1:
rinexFile = stationName + doy + "0." + str(date.year)[-2:] + "p"
elif len(doy) == 2:
rinexFile = stationName + doy + "0." + str(date.year)[-2:] + "p"
else:
rinexFile = stationName + doy + "0." + str(date.year)[-2:] + "p"
"""
if zipped == True:
rinexFile = rinexFile + ".gz"
return rinexFile
def obs3FileName(stationName, date, zipped = False):
doy = datetime2doy(date, string = True) # for RINEX data names
siteInfo = IGS(stationName)
rinexFile = siteInfo.SITE[0] + "_R_" + str(date.year) + str(doy) + "0000_01D_30S_MO.crx"
if zipped == True:
rinexFile = rinexFile + ".gz"
return rinexFile
| StarcoderdataPython |
1998756 | <reponame>ulnic/weatherSensor_rpi
#!/usr/bin/python
"""
CPU Sensor
"""
import logging
import subprocess
from data.sensors.AbstractSensor import AbstractSensor
from data.Constants import Constant
logger = logging.getLogger(Constant.LOGGER_NAME)
class CPUSensor(AbstractSensor):
"""
CPU Sensor class which reads the CPU / GPU sensor of it's host
"""
def __init__(self, _use_mock_sensor, _json_key):
super(self.__class__, self).__init__(_json_key, _use_mock_sensor)
def read_sensor(self):
"""
Reads the CPU Sensor's temperature value
:return: the read CPU / GPU temperature value
"""
logger.debug("Reading CPU Temperature Sensor")
_cpu_temp = 0
try:
if self.use_mock_sensor:
_cpu_temp = "temp=40.6'C"
else:
_cpu_temp = subprocess.check_output(["vcgencmd", "measure_temp"])
_cpu_temp = _cpu_temp.replace("temp=", "").replace("'C", "").rstrip('\n')
except Exception as e:
logger.warn("Could not read CPU, due to {0}".format(str(e)))
logger.info("CPU Temperature Reading is {0}".format(_cpu_temp))
return _cpu_temp
| StarcoderdataPython |
3467626 | <filename>core/ClientManager.py
from core.TCPClient import TCPClient
from experiment.RTTAdaptiveClient import RTTAdaptiveClient
from experiment.PowerAdaptiveClient import PowerAdaptiveClient
from experiment.PowerTWClient import PowerTWClient
from experiment.PowerChangeTWClient import PowerChangeTWClient
from experiment.BestRTTClient import BestRTTClient
from library.Configuration import Configuration
import logging
from library.TimeUtils import TimeUtils
class ClientManager:
def __init__(self, timeResolutionUnit, debug=False):
self.clients = {}
self.nextClientId = 1
self.timeResolutionUnit = timeResolutionUnit
self.config = Configuration()
self.name = "ClientManager"
self.debug = debug
pass
def getDelayBetweenPacketsFromDeliveryRatePerS(self, deliveryRatePerS):
# delay_between_packets is in timeResolutionUnit
resolutionAmountPerSec = TimeUtils.convertTime(1, 's', self.timeResolutionUnit)
delay = resolutionAmountPerSec // deliveryRatePerS
if delay == 0:
raise Exception(f"deliveryRatePerS is so high that delay in {self.timeResolutionUnit} becomes 0")
return delay
def createRTTAdaptiveClient(self, rttWindowSize, bandWidthWindowSize, deliveryRatePerS, max_outstanding_packets):
delay_between_packets = self.getDelayBetweenPacketsFromDeliveryRatePerS(deliveryRatePerS)
client = RTTAdaptiveClient(
self.nextClientId,
rttWindowSize=rttWindowSize,
bandWidthWindowSize=bandWidthWindowSize,
delay_between_packets=delay_between_packets,
max_outstanding_packets=max_outstanding_packets,
timeResolutionUnit=self.timeResolutionUnit,
debug=self.debug
)
self.clients[self.nextClientId] = client
self.nextClientId += 1
if self.debug:
logging.info(f"{self.name}: created node {client}")
return client
def createPowerAdaptiveClient(self, rttWindowSize, bandWidthWindowSize, deliveryRatePerS, max_outstanding_packets):
delay_between_packets = self.getDelayBetweenPacketsFromDeliveryRatePerS(deliveryRatePerS)
client = PowerAdaptiveClient(
self.nextClientId,
rttWindowSize=rttWindowSize,
bandWidthWindowSize=bandWidthWindowSize,
delay_between_packets=delay_between_packets,
max_outstanding_packets=max_outstanding_packets,
timeResolutionUnit=self.timeResolutionUnit,
debug=self.debug
)
self.clients[self.nextClientId] = client
self.nextClientId += 1
if self.debug:
logging.info(f"{self.name}: created node {client}")
return client
def createPowerTWClient(self, pollCycle, rttWindowSize, bandWidthWindowSize, deliveryRatePerS, max_outstanding_packets, startAt=0):
delay_between_packets = self.getDelayBetweenPacketsFromDeliveryRatePerS(deliveryRatePerS)
client = PowerTWClient(
self.nextClientId,
pollCycle = pollCycle,
rttWindowSize=rttWindowSize,
bandWidthWindowSize=bandWidthWindowSize,
delay_between_packets=delay_between_packets,
max_outstanding_packets=max_outstanding_packets,
timeResolutionUnit=self.timeResolutionUnit,
startAt=startAt,
debug=self.debug
)
self.clients[self.nextClientId] = client
self.nextClientId += 1
if self.debug:
logging.info(f"{self.name}: created node {client}")
return client
def createPowerChangeTWClient(self, pollCycle, rttWindowSize, bandWidthWindowSize, deliveryRatePerS, max_outstanding_packets, startAt=0):
delay_between_packets = self.getDelayBetweenPacketsFromDeliveryRatePerS(deliveryRatePerS)
client = PowerChangeTWClient(
self.nextClientId,
pollCycle = pollCycle,
rttWindowSize=rttWindowSize,
bandWidthWindowSize=bandWidthWindowSize,
delay_between_packets=delay_between_packets,
max_outstanding_packets=max_outstanding_packets,
timeResolutionUnit=self.timeResolutionUnit,
startAt=startAt,
debug=self.debug
)
self.clients[self.nextClientId] = client
self.nextClientId += 1
if self.debug:
logging.info(f"{self.name}: created node {client}")
return client
def createBestRTTClient(self, pollCycle, rttWindowSize, bandWidthWindowSize, deliveryRatePerS, max_outstanding_packets, startAt=0):
delay_between_packets = self.getDelayBetweenPacketsFromDeliveryRatePerS(deliveryRatePerS)
client = BestRTTClient(
self.nextClientId,
pollCycle = pollCycle,
rttWindowSize=rttWindowSize,
bandWidthWindowSize=bandWidthWindowSize,
delay_between_packets=delay_between_packets,
max_outstanding_packets=max_outstanding_packets,
timeResolutionUnit=self.timeResolutionUnit,
startAt=startAt,
debug=self.debug
)
self.clients[self.nextClientId] = client
self.nextClientId += 1
if self.debug:
logging.info(f"{self.name}: created node {client}")
return client
def createTCPClient(self, deliveryRatePerS, max_outstanding_packets, startAt=0):
delay_between_packets = self.getDelayBetweenPacketsFromDeliveryRatePerS(deliveryRatePerS)
client = TCPClient(
self.nextClientId,
delay_between_packets=delay_between_packets,
max_outstanding_packets=max_outstanding_packets,
timeResolutionUnit=self.timeResolutionUnit,
startAt=startAt,
debug=self.debug
)
self.clients[self.nextClientId] = client
self.nextClientId += 1
if self.debug:
logging.info(f"{self.name}: created node {client}")
return client
def createTCPClients(self, n, deliveryRatePerS, max_outstanding_packets, startAt=0):
clients = []
for _ in range(n):
clients.append(self.createTCPClient(deliveryRatePerS, max_outstanding_packets, startAt=startAt))
return clients | StarcoderdataPython |
5071162 | <filename>server.py
from src.gui.Server import Server
server = Server()
| StarcoderdataPython |
228835 | # UNIDAD 06.D19 - D21
# Programación Orientada a Objetos (POO)
print('\n\n---[Diapo 19]---------------------')
print('POO - Constructor')
class Galletita:
sabor = 'Dulce'
color = 'Negra'
chips_chocolate = False
def __init__(self):
print('Se acaba de crear una galletita')
mi_galletita = Galletita()
print('\n\n---[Diapo 20.a]---------------------')
print('POO - Constructor')
class Galletita:
chips_chocolate = False
def __init__(self, sabor, color):
self.sabor = sabor
self.color = color
print('Nueva galletita de {:7} y color {}'.format(self.sabor, self.color))
mi_galletita1 = Galletita('Dulce', 'Blanca')
mi_galletita1 = Galletita('Salada', 'Marrón')
mi_galletita1 = Galletita('Dulce', 'Verde')
print('\n\n---[Diapo 20.a]---------------------')
print('POO - Constructor - valores default')
class Galletita:
chips_chocolate = False
def __init__(self, sabor = 'Dulce', color = 'Marrón'):
self.sabor = sabor
self.color = color
print('Nueva galletita de {:7} y color {}'.format(self.sabor, self.color))
mi_galletita1 = Galletita('Dulce', 'Blanca')
mi_galletita1 = Galletita('Salada', 'Marrón')
mi_galletita1 = Galletita()
print('\n\n---[Diapo 21]---------------------')
print('POO - Constructor y Destructor')
class Galletita:
chips_chocolate = False
def __init__(self, sabor = 'Dulce', color = 'Marrón'):
self.sabor = sabor
self.color = color
print('Nueva galletita de {:7} y color {}'.format(self.sabor, self.color))
def __del__(self):
print('Se está borrando la galletita de sabor', self.sabor)
mi_galletita1 = Galletita('Dulce', 'Blanca')
del(mi_galletita)
| StarcoderdataPython |
29501 | import math
import sys
from fractions import Fraction
from random import uniform, randint
import decimal as dec
def log10_floor(f):
b, k = 1, -1
while b <= f:
b *= 10
k += 1
return k
def log10_ceil(f):
b, k = 1, 0
while b < f:
b *= 10
k += 1
return k
def log10_floor(f):
if f <= 0: return -1
t, b, k, k_step = 1, 10, 0, 1
while True:
t1 = t * b
if t1 > f:
if k_step == 1:
break
k_step = 1
b = 10
else:
b *= 10
k += k_step
k_step += 1
t = t1
return k
# for i in range(20):
# f = 10 ** i
# print(f'{f}: {log10_floor(f)}, {log10_floor2(f)}')
# print(log10_floor2(100))
# sys.exit(0)
def str_of_pos_float_hi0(prec, x):
assert x > 0
q = Fraction(x)
n = int(q)
if n > 0:
k = log10_floor(n) + 1
if k >= prec:
b = 10 ** (k - prec)
r, e = n // b, k - prec
else:
b = 10 ** (prec - k)
r, e = n * b + int((q - n) * b), k - prec
else:
k = log10_floor(int(1 / q))
b = 10 ** (k + prec)
r, e = int(q * b), -(k + prec)
if r * Fraction(10) ** e < q:
r += 1
s = str(r)
if len(s) > prec:
s = s[:-1]
e += 1
e += prec - 1
s = f'{s[0]}.{s[1:]}'
if e == 0:
return s
return s + ('e+' if e > 0 else 'e') + str(e)
def str_of_pos_float_hi1(prec, x):
assert x > 0
m, exp = math.frexp(x)
m, exp = int(math.ldexp(m, 53)), exp - 53
mask = (1 << abs(exp)) - 1
if exp >= 0:
n, rem = m << exp, 0
else:
n, rem = m >> -exp, m & mask
if n > 0:
k = log10_floor(n) + 1
if k >= prec:
b = 10 ** (k - prec)
(r, rem2), e = divmod(n, b), k - prec
rem2 = rem2 or rem
else:
b = 10 ** (prec - k)
t = rem * b
t, rem2 = t >> -exp, t & mask
r, e = n * b + t, k - prec
else:
k = log10_floor((1 << -exp) // rem)
b = 10 ** (k + prec)
t = rem * b
r, rem2, e = t >> -exp, t & mask, -(k + prec)
if rem2:
r += 1
s = str(r)
assert prec <= len(s) <= prec + 1
if len(s) > prec:
s = s[:-1]
e += 1
e += prec - 1
s = f'{s[0]}.{s[1:]}'
if e == 0:
return s
return s + ('e+' if e > 0 else 'e') + str(e)
def str_of_pos_float_lo(prec, x):
assert x > 0
m, exp = math.frexp(x)
m, exp = int(math.ldexp(m, 53)), exp - 53
if exp >= 0:
n, rem = m << exp, 0
else:
mask = (1 << abs(exp)) - 1
n, rem = m >> -exp, m & mask
if n > 0:
k = log10_floor(n) + 1
if k >= prec:
b = 10 ** (k - prec)
r, e = n // b, k - prec
else:
b = 10 ** (prec - k)
t = (rem * b) >> -exp
r, e = n * b + t, k - prec
else:
k = log10_floor((1 << -exp) // rem)
b = 10 ** (k + prec)
t = rem * b
r, e = (rem * b) >> -exp, -(k + prec)
s = str(r)
assert len(s) == prec
e += prec - 1
s = f'{s[0]}.{s[1:]}'
if e == 0:
return s
return s + ('e+' if e > 0 else 'e') + str(e)
# print(str_of_pos_float_hi(2, 230454523525e+100))
def decimal_test_hi(prec, x, s=None):
if s is None:
s = str_of_pos_float_hi1(prec, x)
with dec.localcontext() as ctx:
ctx.prec = prec
ctx.rounding = dec.ROUND_UP
v = +dec.Decimal(x)
t = +dec.Decimal(s)
if v != t:
print(f'Error (hi): decimal = {v}, my = {s} (prec = {prec}, x = {x})')
def decimal_test_lo(prec, x, s=None):
if s is None:
s = str_of_pos_float_lo(prec, x)
with dec.localcontext() as ctx:
ctx.prec = prec
ctx.rounding = dec.ROUND_DOWN
v = +dec.Decimal(x)
t = +dec.Decimal(s)
if v != t:
print(f'Error (lo): decimal = {v}, my = {s} (prec = {prec}, x = {x})')
def tests(n, a, b):
for _ in range(n):
x = uniform(a, b)
prec = randint(1, 15)
decimal_test_hi(prec, x)
decimal_test_lo(prec, x)
def tests2(n):
for _ in range(n):
prec = randint(1, 15)
t = randint(-100, 100)
decimal_test_hi(prec, 2.0 ** t)
decimal_test_lo(prec, 2.0 ** t)
tests(10000, 1e-300, 1)
tests(10000, 0.5, 1000)
tests(10000, 1e+10, 1e+100)
tests(10000, 1e-300, 1e+300)
tests2(10000)
#print(str_of_pos_float_hi1(1, 0.47))
#print(str_of_pos_float_hi1(1, 0.5))
# print(str_of_pos_float_hi1(100, 0.3))
def check_ocaml_results(fname):
print(f'Checking: {fname}')
with open(fname, 'r') as f:
for line in f:
x, prec, s0, s1, s_lo = line.strip().split(',')
decimal_test_hi(int(prec), float(x), s0)
decimal_test_hi(int(prec), float(x), s1)
decimal_test_lo(int(prec), float(x), s_lo)
check_ocaml_results('out.txt') | StarcoderdataPython |
1871168 | # coding=utf-8
from bs4 import BeautifulSoup
import re
def unstandard_count(soup,tag_name,tag,standard_format):
subjects=soup.select(tag_name)
print("length subs info: ",len(subjects))
sum_all = 0
for sub in subjects:
tags=sub.find_all(tag)
style_tag=sub.find_all(tag,{"style":re.compile(standard_format)})
print("subs length:{} and length style_tag:{}".format(len(tags),len(style_tag)))
tag_standards=len(style_tag)
sum_all+= len(tags)-tag_standards
print("在查找到的标签范围内不匹配的值为:",sum_all)
#unstandard_count(html,"table","col",col_style)
#check levels title
def unstandard_title(soup,tag_name,child_tag,levels,standard_format_num,standard_format_char,standard_format_num2=None):
subjects=soup.select('%s[class="%d a DocDefaults "]' %(tag_name,levels))
print("{} level title select nums: {}".format(levels,len(subjects)))
total_items = 0
cur_level_num = 0
cur_level_char = 0
for sub in subjects:
sub_tags = sub.select(child_tag)
total_items += len(sub_tags)
child_tag_nums=sub.find_all(child_tag,{"style":re.compile(standard_format_num)})
if levels > 1:
standard_format_num2 = highLevel_num_format
child_tag_nums2 = sub.find_all(child_tag,{"style":re.compile(standard_format_num2)})
for child_tag_num in child_tag_nums:
if len(re.sub('\w','',child_tag_num.text))<=1:
cur_level_num += 1
for child_tag_num in child_tag_nums2:
if len(re.sub('\w','',child_tag_num.text))<len(child_tag_num.text):
cur_level_num += 1
child_tag_chars = sub.find_all(child_tag,{"style":standard_format_char})
for _ in child_tag_chars:
cur_level_char += 1
#print("match the length:{} and length style_tag:{}".format(len(tags),len(style_tag)))
#tag_standards=len(style_tag)
#sum_all+= len(tags)-tag_standards
non_match_items = total_items - cur_level_char - cur_level_num
print("当前标题级别{}--总的查找条目:{},在查找到的标签范围内不匹配的值为:{}".format(levels,total_items,non_match_items))
#return subjects
"""
#check table font
span_info=[];ss_info=[]
style_info = re.compile('color: #000000;font-size: 11.0pt;;font-family: "SimSun";')
pattern = re.compile(".*color.")
style_info = 'color'
count = 0;count_style=0
td_style = "background-color: #FFC000;border-bottom-style: \
solid;border-bottom-width: 1px;border-bottom-color: \
#000000;border-left-style: solid;border-left-width: \
1px;border-left-color: #000000;border-right-style: \
solid;border-right-width: 1px;border-right-color: \
#000000;border-top-style: solid;border-top-width: \
1px;border-top-color: #000000;vertical-align: bottom;"
col_style = "width: 13.85%;"
tr_style = "height: 0.19in;"
sum_all = 0
#check col style:width,#check tr standard
tables = html.select('table[id^="docx4j"]')
print("length table",len(tables))
for table in tables:
childs = table.colgroup.children
style_col = table.find_all("col",{"style":re.compile("width: 13.85%;")})
print("length style_col:",len(style_col))
col_standards = len(style_col)
#print("childs",childs)
col_nums = 0
for child in childs:
col_nums += 1
print("col_standard={} and col_nums={}".format(col_standard,col_nums))
sum_all += col_nums-col_standards
print("all tables non-standard col numbers: ",sum_all)
#check td font-size
for table in table_info:
table_style = table.select('[id^="docx4j"]')
table_style = table.find({"id":re.compile('^docx4j')})
if table_style:
count += 1
td_style = table_style.find({"style":td_style})
print("td_style",td_style)
col_style = table_style.find(style=col_style)
print("col_style",col_style)
tr_style = table_style.find(attrs={"style":tr_style})
print("tr_style",tr_style)
if td_style and col_style and tr_style:
count_style += 1
spans = table.find_all('span')
spans_standards = table.find_all('span',attrs={"style":re.compile('font-size: 11.0pt;;font-family: ')})
#print(spans[0])
for span in spans:
span_info.append(span.text)
for ss in spans_standards:
ss_info.append(ss.text)
print("count={},count_style={} and span_info length={},span_style length={}".format(count,count_style,len(span_info),len(ss_info)))
non_standards = count-count_style + len(span_info) - len(ss_info)
print("表格式不符合规范的记录数:",non_standards)
"""
if __name__ == "__main__":
#check title
loc_format = "text-align: center;margin-top: 5mm;margin-bottom: 0.43in;"
title_font = "font-weight: bold;font-size: 16.0pt;"
html = BeautifulSoup(open('data/doc2html.html','r',encoding='utf-8'),'lxml')
title_tag = html.find("p")
standard_title_loc = html.find(attrs={"style":loc_format})
count_title = False
if standard_title_loc:
standard_title = standard_title_loc.find("span",{"style":title_font})
if standard_title:
count_title = True
print("the title match the standard")
#levels title check
title_char_format = "font-size: 12.0pt;"
title_num_format = "font-size: 12.0pt;;font-family: 'Calibri';"
highLevel_num_format = "font-size: 12.0pt;;font-family: 'Cambria';white-space:pre-wrap;"
unstandard_title(html,"p","span",2,title_num_format,title_char_format)
| StarcoderdataPython |
3241303 | <filename>src/forecastga/models/template.py
#! /usr/bin/env python
# coding: utf-8
#
"""ARIMA Model"""
from forecastga.models.base import BaseModel
class ARIMA_Model:
"""ARIMA Model Class"""
def __init__(self, config):
super().__init__(config)
"""
Available model attributes:
self.seasonality (str)
self.forecast_len (int)
self.freq (str)
self.train_proportion (float)
self.in_sample (bool)
self.GPU (bool)
self.dataframe (pd.Series)
self.train_df (pd.Series)
self.forecast_df (pd.Series) or None
self.seasons (int)
self.periods (int)
"""
def train(self):
self.model = pm.auto_arima(self.train_df, seasonal=True, m=self.seasons)
def forecast(self):
if self.insample:
self.prediction = self.model.predict(self.forecast_len)
# Prediction can be a list, np.Array, or pandas series.
else:
# Do something else if outsample.
pass
@staticmethod
def format_input(df, forecast_length, constant=None):
pass
@staticmethod
def format_output(df, forecast_length, constant=None):
pass
| StarcoderdataPython |
5099533 | <gh_stars>0
# -*- coding: utf-8 -*-
def compare_word(targets, word, distance_penalty=0.0):
"""
Select the best matching word out of a list of targets.
:param targets: A list of words from which the best match is chosen
:param word: Word to compare with
:param distance_penalty: A Penalty that is applied to the normalized similarity score.
It is the product of index in the target array and the given value. This can be
used to find triggers at the beginning of a sentence.
:return: Tuple of the index of the best match and the calculated score for this word.
"""
scores = list()
for index, e in enumerate(targets):
scores.append({"i": index, "s": score_word(e, word) + index * distance_penalty})
scores = sorted(scores, key=lambda k: (k["s"]))
if len(scores) == 0:
return -1, -1
else:
return scores[0]["i"], scores[0]["s"]
def score_word(target, word):
"""
Generate a score reflecting the similarity between a target and a given word.
Beginning with the first letter search for occurrences of the letters
in the target word. When a letter is missing in either the target word
or given word or an occurrence is before a previous occurrence of a
different letter, a penalty score is increased.
The result is normalized by the word length of the given word.
A perfect match results in a score of 0. A good partial match results in
a score less then one. For moderate strictness a score between 0.5 and 1.0
should be used.
:return: The normalized penalty score
"""
last_index = -1
score = 0
not_found = 0
word = word.lower()
target = list(target.lower())
index_list = list()
for e in word:
index = find_letter(target, e, last_index)
if index == -1 or index in index_list:
not_found += 1
continue
elif index < last_index:
score += (last_index - index) * 0.5
index_list.append(index)
last_index = max(index, last_index)
score += not_found * 2
score += (len(target) - len(index_list)) * 1
return score*1.0/len(word)
def find_letter(letters, l, index):
"""
Find the first occurrence of a letter in a word after a given index.
Searches forward in the word after index-th letter. If no matching letter is found,
search backwards for the latest occurrence before the index-th letter.
:return: index of the found occurrence, otherwise -1
"""
try:
index_offset = letters.index(l, index + 1)
except ValueError:
letters.reverse()
try:
index_offset = len(letters) - letters.index(l, len(letters) - index) - 1
except ValueError:
index_offset = -1
return index_offset
def compare_sentence(targets, sentence):
"""
Select the best matching sentence out of a list of targets.
:param targets: List of sentences
:param sentence: Sentence to be found
:return: Triple of the index of the best match, score for that
match and list of used words (as indices) from the target
"""
scores = list()
for index, e in enumerate(targets):
score, index_list = score_sentence(e, sentence)
scores.append({"i": index, "s": score, "l": index_list})
scores = sorted(scores, key=lambda k: (k["s"]))
return scores[0]["i"], scores[0]["s"], scores[0]["l"]
def score_sentence(target, sentence, distance_penalty=0,
additional_target_penalty=1, word_match_penalty=0):
"""
Generate a score reflecting the similarity between a target and a given sentence.
Search for matching words and compose a score of penalties.
Compare to scoreWord() for a stripped down version of the algorithm.
:param target: Sentence to compare to
:param sentence: Sentence to be compared
:param distance_penalty: Penalty for skipping words
:param additional_target_penalty: Penalty for unmatched words in the target
:param word_match_penalty: Modifier to be applied to the raw word scores
:return: Tuple of normalized score and a list of words used from the
target that are matched
"""
last_index = -1
score = 0
not_found = 0
found = 0
index_list = list()
target = target.split()
sentence = sentence.split()
for e in sentence:
index, word_score = find_word(target, e, last_index)
if index == -1 or index in index_list:
not_found += 1
continue
elif index < last_index:
score += (last_index - index) * 0.5
else:
score += (index - last_index - 1) * distance_penalty
score += word_score * word_match_penalty
last_index = max(index, last_index)
found += 1
index_list.append(index)
score += not_found * 2
score += (len(target) - found) * additional_target_penalty
return score*1.0/len(sentence), index_list
def find_word(words, w, index, distance_penalty=0.0):
"""
Find the first occurrence of a word in a sentence after a given word.
Searches forward and if no match is found backward around the index-th
word in the sentence.
:param words: List of words that should be searched
:param w: Word to be searched for
:param index: Position in the array at which the search begins
:param distance_penalty: Penalty applied to find the first best matching
word in the targets. See compareWord() for more information
:return: Tuple of index and score for the best matching word
"""
index = min(len(words), max(index, -1))
index_offset = -1
if index < len(words) - 1:
index_offset, rel_score = compare_word(words[index + 1:], w, distance_penalty)
index_offset += index + 1
else:
rel_score = 2
if rel_score > 1:
if index > 0:
words = words[:index]
words.reverse()
index_offset, rel_score = compare_word(words, w, distance_penalty)
index_offset = index - index_offset - 1
else:
rel_score = 2
if rel_score > 1:
index_offset = -1
return index_offset, rel_score
def find_trigger(string, trigger):
"""
Find the best matching word at the beginning of a sentence.
"""
index, score = find_word(string.split(), trigger, -1, 0.5)
return index
| StarcoderdataPython |
107275 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
# set database name
app.database = "agents.db"
# load the config
app.config["DEBUG"] = False
app.config["SECRET_KEY"] = <KEY>'
app.config["SQLALCHEMY_DATABASE_URI"] = 'sqlite:///agents.db'
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
# create the sqlalchemy object
db = SQLAlchemy(app)
from c2.models import Agent, Commands
db.create_all()
db.session.commit()
from c2.api.bot_communication import bp, app
app.register_blueprint(bp)
| StarcoderdataPython |
3386272 | <filename>utils.py<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
#We create our preprocessing function
def preprocess(string):
import nltk
nltk.download('punkt')
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
#Remove punctuation and lower all characters
words = nltk.word_tokenize(string)
words = [word.lower() for word in words if word.isalnum()]
#Remove stop words
stop_words = set(stopwords.words('english'))
words2 = [i for i in words if not i in stop_words]
#Stemming
stemmer= PorterStemmer()
final = [stemmer.stem(word) for word in words2]
return final
# In[ ]:
def load_files():
# we load our simple index
with open('simple_index.json') as json_file:
simple_index = json.load(json_file)
# we load our count index
with open('count_index.json') as json_file:
count_index = json.load(json_file)
# we load our doc_count_words dictonary
with open('doc_count_words.json') as json_file:
doc_count_words = json.load(json_file)
# we load our inverted_index
with open('inverted_index.json') as json_file:
inverted_index = json.load(json_file)
# In[ ]:
| StarcoderdataPython |
11359911 | import logging
import MySQLdb
import time
#I do not claim to write beautiful code
def fixFormatString(fmt):
final = ""
inc = 0
for part in fmt.split("%s"):
final += part + "'{" + str(inc) + "}'"
inc += 1
return final[:-len(str("'{"+str(inc - 1)+"}'"))]
#Michael has signed off on not sanitizing inputs
class DoorKarmaDatabase:
'''Front end to the door karma database
Allows the middleware writer a much easier time interfacing with MySQL so that errors can be avoided '''
def __init__(self, host, username, password, dbname, tablename):
logging.info("Initializing database connection")
try:
self.db = MySQLdb.connect(host, username, password, dbname)
logging.debug("Connected; Acquiring cursor")
self.cur = self.db.cursor()
logging.debug("Selecting database...")
self.cur.execute("USE {0};".format(dbname))
logging.debug("Successfully selected")
except MySQLdb.OperationalError, e:
logging.critical("Database error: {0}".format(str(e)))
raise e
self.tablename = tablename
self.fromuuidToID = dict()
def closeConnection(self):
'''This will immediately close the database connection. Call on cleanup'''
logging.info("Closing database connection")
self.db.close()
def userRequest(self, fromuuid, submitterName, submitterPlatform, submitterVersion):
"""Adds a new user request log to the database for later finishing. Stores the request ID into the dict"""
logging.info("User {0} ({1}::{2}) requested".format(
submitterName, submitterPlatform, submitterVersion))
cmd = "INSERT INTO " + self.tablename + " (rFrom, platSubType, platSubVer, platSubUUID) VALUES(%s, %s, %s, %s);"
try:
logging.debug("About to execute \n{0}".format(fixFormatString(cmd).format(submitterName, submitterPlatform, submitterVersion, fromuuid)))
self.cur.execute(cmd, (submitterName, submitterPlatform, submitterVersion, fromuuid))
logging.debug("Successfully executed; Committing")
self.db.commit()
logging.debug("Committed")
except MySQLdb.OperationalError, e:
logging.debug("Commit failed; Rolling back")
self.db.rollback()
logging.critical("Database error: {0}".format(str(e)))
raise e
self.fromuuidToID[fromuuid] = self.cur.lastrowid
def userFilled(self, fromuuid, byuuid, fillerName, fillerPlatform, fillerVersion):
"""Fills the user request from before with the remaining information"""
logging.info("User {0} is filling {1}'s request ({2}::{3})".format(
byuuid, fromuuid, fillerPlatform, fillerVersion))
cmd = "UPDATE " + self.tablename + " SET rFill=%s, tFill=NOW(), platFillType=%s, platFillVer=%s, platFillUUID=%s WHERE eventNumber=%s;"
try:
logging.debug("About to execute \"{0}\"".format(fixFormatString(cmd).format(fillerName, fillerPlatform, fillerVersion, byuuid, self.fromuuidToID[fromuuid])))
self.cur.execute(cmd, (fillerName, fillerPlatform, fillerVersion, byuuid, self.fromuuidToID[fromuuid]))
logging.debug("Successfully executed; Committing")
self.db.commit()
logging.debug("Committed")
except MySQLdb.OperationalError, e:
logging.debug("Commit failed; Rolling back")
self.db.rollback()
logging.critical("Database error: {0}".format(str(e)))
raise e
| StarcoderdataPython |
1804030 | <filename>examples/hex_board.py
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from hex_maze import Board
panel = Board(9, 9, entry_pos=(0, 2))
panel[4][4].omit()
panel.omit_tiles(
[
(0, 0),
(0, 1),
(1, 1),
(0, 0),
(1, 0),
(2, 0),
(3, 0)
]
)
panel.omit_tiles(
[
(0, 7),
(0, 8),
(1, 8),
(2, 8)
]
)
panel.omit_tiles(
[
(8, 8),
(8, 7),
(7, 8),
(6, 8),
]
)
panel.omit_tiles(
[
(8, 0),
(8, 1),
(7, 1),
(8, 0),
(7, 0),
(6, 0),
(5, 0),
]
)
panel.shuffle()
print('shuffle done')
panel.to_svg('hexa.svg')
| StarcoderdataPython |
272049 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from udkm1Dsim import Atom
from udkm1Dsim import UnitCell
from udkm1Dsim import Structure
from udkm1Dsim import u
u.default_format = '~P'
def test_structure():
Dy = Atom('Dy')
uc = UnitCell('uc', 'Unit Cell', 3.1*u.angstrom, heat_capacity=10*(u.J/u.kg/u.K),
lin_therm_exp=1e-6/u.K, therm_cond=1*(u.W/u.m/u.K),
opt_pen_depth=11*u.nm, sound_vel=5*(u.nm/u.ps))
uc.add_atom(Dy, '0*(s+1)')
uc.add_atom(Dy, '0.5*(s+1)')
S = Structure('sample')
assert S.name == 'sample'
| StarcoderdataPython |
1775829 | '''
@author: HeQingsong
@date: 2020-09-19 21:18
@filename: test.py
@project: huobi_Python
@python version: 3.7 by Anaconda
@description:
'''
from huobi.client.generic import GenericClient, CandlestickInterval
from huobi.client.market import MarketClient
from mycode.market import MarketQuotationUtils
generic_client = GenericClient()
market_client = MarketClient()
# def test(symbol):
# list = MarketQuotationUtils.get_market_quotation(symbol, size=2)
# if list != None and len(list) == 2:
# cur = list[0]
# last1 = list[1]
# if last1.vol != 0 and cur.vol / last1.vol > 2 and cur.close / last1.close > 1.01:
# print(symbol)
#
#
# if __name__ == '__main__':
# symbol_list = BasicInfoUtils.get_all_symbol()
# print(symbol_list)
# for symbol in symbol_list:
# if symbol[len(symbol) - 4:] == 'usdt':
# test(symbol)
def test(symbol):
list = MarketQuotationUtils.get_market_quotation(symbol, wtime=CandlestickInterval.MIN15, size=2000)
if list != None and len(list) > 0:
for i in range(5, len(list) - 2):
cur = list[i]
last1 = list[i + 1]
last2 = list[i + 2]
if last1.vol != 0 and cur.vol / last1.vol > 5 and last1.vol / last2.vol > 5 and cur.close / last1.close > 1.04:
rate = max(list[i - 1].close, list[i - 2].close, list[i - 3].close) / cur.close - 1
print("%s:%s:$d", symbol, i, rate)
if __name__ == '__main__':
test('reqbtc')
# symbol_list = BasicInfoUtils.get_all_symbol()
# print(symbol_list)
# for symbol in symbol_list:
# if symbol[len(symbol) - 4:] == 'usdt':
# test(symbol)
| StarcoderdataPython |
3380200 | # Copyright (c) 2018 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
These functions validate RP_ID and APP_ID according to simplified TLD+1 rules,
using a bundled copy of the public suffix list fetched from:
https://publicsuffix.org/list/public_suffix_list.dat
Advanced APP_ID values pointing to JSON files containing valid facets are not
supported by this implementation.
"""
from __future__ import absolute_import, unicode_literals
import os
import six
from six.moves.urllib.parse import urlparse
tld_fname = os.path.join(os.path.dirname(__file__), "public_suffix_list.dat")
with open(tld_fname, "rb") as f:
suffixes = [
entry
for entry in (line.decode("utf8").strip() for line in f.readlines())
if entry and not entry.startswith("//")
]
def verify_rp_id(rp_id, origin):
"""Checks if a Webauthn RP ID is usable for a given origin.
:param rp_id: The RP ID to validate.
:param origin: The origin of the request.
:return: True if the RP ID is usable by the origin, False if not.
"""
if isinstance(rp_id, six.binary_type):
rp_id = rp_id.decode()
if not rp_id:
return False
if isinstance(origin, six.binary_type):
origin = origin.decode()
url = urlparse(origin)
if url.scheme != "https":
return False
host = url.hostname
if host == rp_id:
return True
if host.endswith("." + rp_id) and rp_id not in suffixes:
return True
return False
def verify_app_id(app_id, origin):
"""Checks if a FIDO U2F App ID is usable for a given origin.
:param app_id: The App ID to validate.
:param origin: The origin of the request.
:return: True if the App ID is usable by the origin, False if not.
"""
if isinstance(app_id, six.binary_type):
app_id = app_id.decode()
url = urlparse(app_id)
if url.scheme != "https":
return False
return verify_rp_id(url.hostname, origin)
| StarcoderdataPython |
1951451 | <filename>humann2/tests/functional_tests_biom_tools.py
import unittest
import tempfile
import os
import cfg
import utils
class TestFunctionalHumann2ToolsBiom(unittest.TestCase):
"""
Test humann2.tools
"""
def test_humann2_join_tables_biom(self):
"""
Test joining biom files with humann2_join_tables
"""
# create a temp file
file_out, new_file=tempfile.mkstemp(prefix="humann2_temp")
# join the files
utils.run_command(["humann2_join_tables","--input",
cfg.data_folder,"--output",new_file,"--file_name",
cfg.multi_sample_genefamilies_split_basename_biom,"--verbose"])
# check the joined file is as expected
self.assertTrue(utils.check_output(new_file))
# remove the temp file
utils.remove_temp_file(new_file)
def test_humann2_split_tables_tsv(self):
"""
Test splitting a tsv file with humann2_split_tables
"""
input_file=cfg.multi_sample_genefamilies_biom
# create a temp directory
temp_directory=utils.create_temp_folder("split_tables_biom")
# split the file
utils.run_command(["humann2_split_table","--input", input_file,
"--output",temp_directory,"--verbose"])
# test the split files are as expected
for file in cfg.multi_sample_split_files_biom:
self.assertTrue(utils.check_output(file,temp_directory))
# remove the temp folder
utils.remove_temp_folder(temp_directory)
def test_humann2_regroup_table_uniref50_rxn_biom(self):
"""
Test regrouping the biom file with humann2_regroup_table
Test with uniref50 to reactions mappings
"""
# create a temp file
file_out, new_file=tempfile.mkstemp(prefix="humann2_temp")
# run the command
utils.run_command(["humann2_regroup_table","--input",cfg.regroup_input_biom,"--output",
new_file,"--groups","uniref50_rxn"])
# check the output is as expected
self.assertTrue(utils.check_output(new_file))
# remove the temp file
utils.remove_temp_file(new_file)
def test_humann2_rename_table_uniref50_biom(self):
"""
Test renaming the biom file entries with humann2_rename_table
Test with uniref50 names
"""
# create a temp file
file_out, new_file=tempfile.mkstemp(prefix="humann2_temp")
# run the command
utils.run_command(["humann2_rename_table","--input",cfg.rename_input_biom,"--output",
new_file,"--names","uniref50"])
# check the output is as expected
self.assertTrue(utils.check_output(new_file))
# remove the temp file
utils.remove_temp_file(new_file)
def test_humann2_renorm_table_cpm_biom(self):
"""
Test renorm the biom file entries with humann2_renorm_table
Test with cpm
"""
# create a temp file
file_out, new_file=tempfile.mkstemp(prefix="humann2_temp")
# run the command
utils.run_command(["humann2_renorm_table","--input",cfg.renorm_input_biom,"--output",
new_file,"--units","cpm"])
# check the output is as expected
self.assertTrue(utils.check_output(new_file))
# remove the temp file
utils.remove_temp_file(new_file)
def test_humann2_renorm_table_cpm_biom_output(self):
"""
Test renorm the biom file entries with humann2_renorm_table
Test with cpm
Test with biom output
"""
# create a temp file
file_out, new_file=tempfile.mkstemp(prefix="humann2_temp",suffix=".biom")
# run the command
utils.run_command(["humann2_renorm_table","--input",cfg.renorm_input_biom,"--output",
new_file,"--units","cpm"])
# check the output is as expected
self.assertTrue(utils.check_output(new_file))
# remove the temp file
utils.remove_temp_file(new_file)
| StarcoderdataPython |
3206121 | <reponame>lzy7071/wc_kb<gh_stars>0
""" Tests of the knowledge base IO
:Author: <NAME> <<EMAIL>>
:Author: <NAME> <<EMAIL>>
:Date: 2018-02-07
:Copyright: 2018, Karr Lab
:License: MIT
"""
from wc_kb import core, prokaryote_schema
from wc_kb import io
import Bio.Seq
import Bio.SeqRecord
import filecmp
import obj_model.io
import os
import random
import shutil
import tempfile
import unittest
import wc_utils.workbook.io
class TestIO(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
self.seq_path = os.path.join(self.dir, 'seq.fna')
self.kb = kb = core.KnowledgeBase(id='genus_species', name='Genus species', version='0.0.1')
cell = kb.cell = core.Cell(id='genus_species_cell')
dna_seqs = []
for i_chr in range(5):
dna = core.DnaSpeciesType(id='chr_{}'.format(i_chr + 1), sequence_path=self.seq_path)
cell.species_types.append(dna)
seq_len = random.randint(100, 200)
bases = 'ACGT'
seq = ''
for i_nt in range(seq_len):
seq += bases[random.randint(0, 3)]
dna_seqs.append(Bio.SeqRecord.SeqRecord(
Bio.Seq.Seq(seq), dna.id))
for i_trn in range(5):
trn = prokaryote_schema.TranscriptionUnitLocus(id='tu_{}_{}'.format(i_chr + 1, i_trn + 1))
trn.cell = cell
dna.loci.append(trn)
trn.start = random.randint(100, 200)
trn.end = ((trn.start + random.randint(1, 200) - 1) % seq_len) + 1
trn.strand = core.PolymerStrand.positive
with open(self.seq_path, 'w') as file:
writer = Bio.SeqIO.FastaIO.FastaWriter(
file, wrap=70, record2title=lambda record: record.id)
writer.write_file(dna_seqs)
def tearDown(self):
shutil.rmtree(self.dir)
def test_write_read(self):
core_path = os.path.join(self.dir, 'core.xlsx')
writer = io.Writer()
writer.run(self.kb, core_path, set_repo_metadata_from_path=False)
reader = io.Reader()
kb = reader.run(core_path, self.seq_path)
core_path = os.path.join(self.dir, 'core2.xlsx')
seq_path = os.path.join(self.dir, 'seq2.fna')
writer.run(kb, core_path, seq_path, set_repo_metadata_from_path=False)
self.assertTrue(self.kb.is_equal(kb))
self.assertTrue(filecmp.cmp(self.seq_path, seq_path, shallow=False))
def test_read_write_prokaryote(self):
fixtures = os.path.join(os.path.dirname(__file__), 'fixtures')
core_path = os.path.join(fixtures, 'core.xlsx')
seq_path = os.path.join(fixtures, 'seq.fna')
reader = io.Reader()
kb = reader.run(core_path, seq_path)
tmp_core_path = os.path.join(self.dir, 'tmp_core.xlsx')
tmp_seq_path = os.path.join(self.dir, 'tmp_seq.fna')
writer = io.Writer()
writer.run(kb, tmp_core_path, tmp_seq_path, set_repo_metadata_from_path=False)
tmp_kb = reader.run(tmp_core_path, seq_path)
self.assertTrue(kb.is_equal(tmp_kb))
self.assertTrue(filecmp.cmp(tmp_seq_path, seq_path, shallow=False))
def test_read_write_eukaryote(self):
fixtures = os.path.join(os.path.dirname(__file__), 'fixtures')
core_path = os.path.join(fixtures, 'eukaryote_core.xlsx')
seq_path = os.path.join(fixtures, 'eukaryote_seq.fna')
reader = io.Reader()
kb = reader.run(core_path, seq_path, schema=False)
tmp_core_path = os.path.join(self.dir, 'tmp_eukaryote_core.xlsx')
tmp_seq_path = os.path.join(self.dir, 'tmp_eukaryote_seq.fna')
writer = io.Writer()
writer.run(kb, tmp_core_path, tmp_seq_path, schema=False, set_repo_metadata_from_path=False)
tmp_kb = reader.run(tmp_core_path, seq_path, schema=False)
self.assertTrue(kb.is_equal(tmp_kb))
self.assertTrue(filecmp.cmp(tmp_seq_path, seq_path, shallow=False))
def test_rewrite_seq_path_in_read_write(self):
path_core_1 = os.path.join(self.dir, 'core_1.xlsx')
path_core_2 = os.path.join(self.dir, 'core_2.xlsx')
path_seq_1 = os.path.join(self.dir, 'seq_1.fna')
path_seq_2 = os.path.join(self.dir, 'seq_2.fna')
io.Writer().run(self.kb, path_core_1, path_seq_1, set_repo_metadata_from_path=False)
kb1 = io.Reader().run(path_core_1, path_seq_1)
kb2 = io.Reader().run(path_core_1, path_seq_1, rewrite_seq_path=False)
self.assertFalse(kb1.is_equal(self.kb))
self.assertTrue(kb2.is_equal(self.kb))
self.assertTrue(filecmp.cmp(path_seq_1, self.seq_path, shallow=False))
io.Writer().run(self.kb, path_core_2, path_seq_2, rewrite_seq_path=True, set_repo_metadata_from_path=False)
kb3 = io.Reader().run(path_core_2, self.seq_path)
kb4 = io.Reader().run(path_core_2, self.seq_path, rewrite_seq_path=False)
self.assertFalse(kb3.is_equal(self.kb))
self.assertTrue(kb4.is_equal(self.kb))
self.assertTrue(filecmp.cmp(path_seq_2, self.seq_path, shallow=False))
def test_write_with_repo_md(self):
_, core_path = tempfile.mkstemp(suffix='.xlsx', dir='.')
_, seq_path = tempfile.mkstemp(suffix='.fna', dir='.')
self.assertEqual(self.kb.url, '')
writer = io.Writer()
writer.run(self.kb, core_path, seq_path, set_repo_metadata_from_path=True)
self.assertIn(self.kb.url, [
'https://github.com/KarrLab/wc_kb.git',
'ssh://git@github.com/KarrLab/wc_kb.git',
'git@github.com:KarrLab/wc_kb.git',
])
os.remove(core_path)
os.remove(seq_path)
def test_write_without_cell_relationships(self):
core_path = os.path.join(self.dir, 'core.xlsx')
seq_path = os.path.join(self.dir, 'test_seq.fna')
with open(seq_path, 'w') as file:
file.write('>chr_x\nACGT\n')
dna = core.DnaSpeciesType(id='chr_x', sequence_path=seq_path)
self.kb.cell.species_types.append(dna)
trn = prokaryote_schema.TranscriptionUnitLocus(id='tu_x_0')
dna.loci.append(trn)
trn.cell = None
writer = io.Writer()
with self.assertRaisesRegex(ValueError, 'must be set to the instance of `Cell`'):
writer.run(self.kb, core_path, seq_path, set_repo_metadata_from_path=False)
def test_write_read_sloppy(self):
core_path = os.path.join(self.dir, 'core.xlsx')
seq_path = os.path.join(self.dir, 'test_seq.fna')
writer = io.Writer()
writer.run(self.kb, core_path, seq_path, set_repo_metadata_from_path=False)
wb = wc_utils.workbook.io.read(core_path)
row = wb['Knowledge base'].pop(0)
wb['Knowledge base'].insert(1, row)
wc_utils.workbook.io.write(core_path, wb)
reader = io.Reader()
with self.assertRaisesRegex(ValueError, "The columns of worksheet 'Knowledge base' must be defined in this order"):
kb = reader.run(core_path, self.seq_path)
kb = reader.run(core_path, self.seq_path, strict=False)
self.assertTrue(kb.is_equal(self.kb))
self.assertTrue(filecmp.cmp(self.seq_path, seq_path, shallow=False))
def test_reader_no_kb(self):
core_path = os.path.join(self.dir, 'core.xlsx')
obj_model.io.WorkbookWriter().run(core_path, [], io.PROKARYOTE_MODEL_ORDER, include_all_attributes=False)
seq_path = os.path.join(self.dir, 'test_seq.fna')
with open(seq_path, 'w') as file:
pass
kb = io.Reader().run(core_path, seq_path)
self.assertEqual(kb, None)
obj_model.io.WorkbookWriter().run(core_path, [core.Cell(id='cell')], io.PROKARYOTE_MODEL_ORDER, include_all_attributes=False)
with self.assertRaisesRegex(ValueError, 'cannot contain instances'):
io.Reader().run(core_path, seq_path)
def test_reader_error_multiple_kbs(self):
kb1 = core.KnowledgeBase(id='kb1', name='kb1', version='0.0.1')
kb2 = core.KnowledgeBase(id='kb2', name='kb2', version='0.0.1')
core_path = os.path.join(self.dir, 'core.xlsx')
obj_model.io.WorkbookWriter().run(core_path, [kb1, kb2], io.PROKARYOTE_MODEL_ORDER, include_all_attributes=False)
seq_path = os.path.join(self.dir, 'test_seq.fna')
with open(seq_path, 'w') as file:
pass
with self.assertRaisesRegex(ValueError, ' should define one knowledge base'):
io.Reader().run(core_path, seq_path)
def test_reader_error_no_cell(self):
kb = core.KnowledgeBase(id='kb', name='kb1', version='0.0.1')
dna = core.DnaSpeciesType(id='chr')
core_path = os.path.join(self.dir, 'core.xlsx')
obj_model.io.WorkbookWriter().run(core_path, [kb, dna], io.PROKARYOTE_MODEL_ORDER, include_all_attributes=False)
seq_path = os.path.join(self.dir, 'test_seq.fna')
with open(seq_path, 'w') as file:
pass
with self.assertRaisesRegex(ValueError, 'cannot contain instances'):
io.Reader().run(core_path, seq_path)
def test_reader_error_multiple_cells(self):
kb = core.KnowledgeBase(id='kb', name='kb1', version='0.0.1')
cell1 = core.Cell(id='cell1', name='cell1')
cell2 = core.Cell(id='cell2', name='cell2')
core_path = os.path.join(self.dir, 'core.xlsx')
obj_model.io.WorkbookWriter().run(core_path, [kb, cell1, cell2], io.PROKARYOTE_MODEL_ORDER, include_all_attributes=False)
seq_path = os.path.join(self.dir, 'test_seq.fna')
with open(seq_path, 'w') as file:
pass
with self.assertRaisesRegex(ValueError, ' should define one cell'):
io.Reader().run(core_path, seq_path)
def test_convert(self):
path_core_1 = os.path.join(self.dir, 'core_1.xlsx')
path_core_2 = os.path.join(self.dir, 'core_2-*.csv')
path_core_3 = os.path.join(self.dir, 'core_3.xlsx')
path_seq_1 = os.path.join(self.dir, 'seq_1.fna')
path_seq_2 = os.path.join(self.dir, 'seq_2.fna')
path_seq_3 = os.path.join(self.dir, 'seq_3.fna')
io.Writer().run(self.kb, path_core_1, path_seq_1, set_repo_metadata_from_path=False)
self.assertTrue(filecmp.cmp(path_seq_1, self.seq_path, shallow=False))
io.convert(path_core_1, path_seq_1, path_core_2, path_seq_2)
kb = io.Reader().run(path_core_2, self.seq_path)
self.assertTrue(kb.is_equal(self.kb))
self.assertTrue(filecmp.cmp(path_seq_1, path_seq_2, shallow=False))
io.convert(path_core_2, path_seq_2, path_core_3, path_seq_3)
kb = io.Reader().run(path_core_3, self.seq_path)
self.assertTrue(kb.is_equal(self.kb))
self.assertTrue(filecmp.cmp(path_seq_2, path_seq_3, shallow=False))
def test_convert_sloppy(self):
path_core_1 = os.path.join(self.dir, 'core_1.xlsx')
path_core_2 = os.path.join(self.dir, 'core_2-*.csv')
path_core_3 = os.path.join(self.dir, 'core_3.xlsx')
path_seq_1 = os.path.join(self.dir, 'seq_1.fna')
path_seq_2 = os.path.join(self.dir, 'seq_2.fna')
path_seq_3 = os.path.join(self.dir, 'seq_3.fna')
io.Writer().run(self.kb, path_core_1, path_seq_1, set_repo_metadata_from_path=False)
self.assertTrue(filecmp.cmp(path_seq_1, self.seq_path, shallow=False))
wb = wc_utils.workbook.io.read(path_core_1)
row = wb['Knowledge base'].pop(0)
wb['Knowledge base'].insert(1, row)
wc_utils.workbook.io.write(path_core_1, wb)
with self.assertRaisesRegex(ValueError, "The columns of worksheet 'Knowledge base' must be defined in this order"):
io.convert(path_core_1, path_seq_1, path_core_2, path_seq_2)
io.convert(path_core_1, path_seq_1, path_core_2, path_seq_2, strict=False)
kb = io.Reader().run(path_core_2, self.seq_path)
self.assertTrue(kb.is_equal(self.kb))
self.assertTrue(filecmp.cmp(path_seq_1, path_seq_2, shallow=False))
io.convert(path_core_2, path_seq_2, path_core_3, path_seq_3)
kb = io.Reader().run(path_core_3, self.seq_path)
self.assertTrue(kb.is_equal(self.kb))
self.assertTrue(filecmp.cmp(path_seq_2, path_seq_3, shallow=False))
def test_create_template(self):
path_core = os.path.join(self.dir, 'template.xlsx')
path_seq = os.path.join(self.dir, 'template_seq.fna')
io.create_template(path_core, path_seq, set_repo_metadata_from_path=False)
kb = io.Reader().run(path_core, path_seq)
def test_validate_implicit_relationships(self):
class TestModel(obj_model.Model):
id = obj_model.StringAttribute(primary=True, unique=True)
try:
core.KnowledgeBase.Meta.attributes['test'] = obj_model.OneToOneAttribute(TestModel, related_name='a')
with self.assertRaisesRegex(Exception, 'Relationships from `KnowledgeBase` not supported:'):
io.Writer.validate_implicit_relationships()
finally:
core.KnowledgeBase.Meta.attributes.pop('test')
try:
core.KnowledgeBase.Meta.related_attributes['test'] = obj_model.OneToManyAttribute(core.Cell, related_name='c')
with self.assertRaisesRegex(Exception,
'Relationships to `KnowledgeBase` that are not one-to-one are prohibited'):
io.Writer.validate_implicit_relationships()
finally:
core.KnowledgeBase.Meta.related_attributes.pop('test')
try:
core.Cell.Meta.attributes['test'] = obj_model.OneToManyAttribute(TestModel, related_name='c')
with self.assertRaisesRegex(Exception,
'Relationships from `Cell` to `KnowledgeBase` that are not one-to-one are prohibited:'):
io.Writer.validate_implicit_relationships()
finally:
core.Cell.Meta.attributes.pop('test')
try:
core.Cell.Meta.attributes['test'] = obj_model.OneToOneAttribute(TestModel, related_name='d')
with self.assertRaisesRegex(Exception,
'Relationships from `Cell` to classes other than `KnowledgeBase` are prohibited:'):
io.Writer.validate_implicit_relationships()
finally:
core.Cell.Meta.attributes.pop('test')
try:
core.Cell.Meta.related_attributes['test'] = obj_model.OneToManyAttribute(TestModel, related_name='d')
with self.assertRaisesRegex(Exception,
'Relationships to `Cell` that are not one-to-one or many-to-one are prohibited: '):
io.Writer.validate_implicit_relationships()
finally:
core.Cell.Meta.related_attributes.pop('test')
try:
core.KnowledgeBase.Meta.related_attributes['test'] = obj_model.OneToOneAttribute(TestModel, related_name='b')
with self.assertRaisesRegex(Exception,
'Relationships to `KnowledgeBase` from classes other than `Cell` are prohibited'):
io.Writer.validate_implicit_relationships()
finally:
core.KnowledgeBase.Meta.related_attributes.pop('test')
| StarcoderdataPython |
6602415 | <gh_stars>1-10
import json
from django_pds.conf import settings
from .manager import BaseManager
OWNER = 'owner'
class GenericInsertCommandManager(BaseManager):
def __modify_ids(self, __defaults, user_id):
items = []
for _id in __defaults:
if _id == OWNER:
items.append(user_id)
else:
items.append(_id)
return items
def json_load(self, json_string):
try:
return False, json.loads(json_string)
except BaseException as e:
return True, str(e)
def already_exists(self, document_name, document_id):
try:
data = self.get_document(document_name).objects(ItemId=document_id)
return data.count() > 0
except BaseException as e:
return False
def insert_one(self, document_name, data, user_id=None, default_permission=None):
try:
base_instance = self.is_base_instance(document_name)
simple_base_instance = self.is_simple_base_doc_instance(document_name)
if not base_instance and not simple_base_instance:
return True, 'Document type must be `BaseDocument` ' \
'or `SimpleBaseDocument` ' \
'from django_pds.core.base Module'
Model = self.get_document(document_name)
mod = Model(**data)
if base_instance:
if user_id:
mod.CreatedBy = user_id
mod.LastUpdateBy = user_id
for item in settings.SECURITY_IDS_ATTRIBUTES:
ids = default_permission.get(item, [])
ids = self.__modify_ids(ids, user_id)
setattr(mod, item, ids)
if default_permission:
for item in settings.SECURITY_ROLES_ATTRIBUTES:
roles = default_permission.get(item, [])
setattr(mod, item, roles)
setattr(mod, 'RolesAllowedToWrite', [])
setattr(mod, 'IdsAllowedToWrite', [])
mod.save()
return False, mod.ItemId
except BaseException as e:
return True, e
def insert_many(self, document_name, data_array, user_id=None, default_permission=None):
results = []
for data in data_array:
err, item_id = self.insert_one(document_name, data, user_id, default_permission)
if err:
results.append(None)
else:
results.append(item_id)
return results
| StarcoderdataPython |
4973659 | import cv2
import numpy as np
from pupil_apriltags import Detector
import time
import glob
from matplotlib import pyplot as plt
import matplotlib.patches as patches
import numpy as np
from skimage.transform import resize
at_detector = Detector(families='tag36h11',nthreads=1,quad_decimate=1.0,quad_sigma=0.0,refine_edges=1,decode_sharpening=0.25,debug=0)
# overhead_path = '/home/dev/scratch/armtui/arm_fixed_base_and_z_200/images/*.png'
overhead_path = 'C:\\Users\\diangd\\Downloads\\drive-download-20201214T142926Z-001\\*.png'
overhead_files = glob.glob(overhead_path)
overhead_files.sort()
targets = []
# arm_path = '/home/dev/scratch/armtui/arm_fixed_base_and_z_200/pipics/*.png'
# arm_path = '/home/dev/hiro_data/12_6/pipics/*.png'
# arm_files = glob.glob(arm_path)
# arm_files.sort()
images = []
grayscale_images = []
target_fields = []
for i, overheadname in enumerate(overhead_files):
# arm_img = cv2.imread(arm_files[i])
# arm_img_gray = cv2.imread(arm_files[i], cv2.IMREAD_GRAYSCALE)
overhead_img = cv2.imread(overheadname, cv2.IMREAD_GRAYSCALE)
_, overhead_img = cv2.threshold(overhead_img, 50,255,cv2.THRESH_BINARY)
tags = at_detector.detect(overhead_img, estimate_tag_pose=False, camera_params=None, tag_size=None)
if len(tags) > 0:
try:
# images.append(arm_img[475:575,790:790+100,:]) #crop to square
# images.append(resize(arm_img, (125,200)))
# grayscale_images.append(arm_img_gray[475:575,790:790+100])
# grayscale_images.append(resize(arm_img_gray, (125,200)))
loc = tags[0].center
# import pdb; pdb.set_trace()
targets.append(loc)
tf = np.zeros((480,600))
tf[int(loc[1]),int(loc[0])] = 1
target_fields.append(tf)
except:
import pdb; pdb.set_trace()
img[int(loc[0])][int(loc[1])] = 0
for corner in tags[0].corners:
x,y = map(int,corner)
img[x][y] = 0
fig,ax = plt.subplots(1)
ax.imshow(img)
circle = patches.Circle(loc, radius=5, edgecolor='r', facecolor='r')
rect = patches.Polygon(tags[0].corners,linewidth=1,edgecolor='r',facecolor='none')
ax.add_patch(rect)
ax.add_patch(circle)
plt.show()
#import pdb; pdb.set_trace()
else:
print(f"No detection in {overheadname}")
# np.save('armpicscolor.npy', np.array(images))
# np.save('armpicsgray.npy', np.array(grayscale_images))
# np.save('locs.npy', np.array(targets))
# np.save('loc_fields.npy', np.array(target_fields))
print(f"Total Number of Samples: {len(targets)}") #3394
# while(t<end_time):
# t = time.time() - t0
# # take webcam picture
# ret, frame = webcam.read() # Capture frame-by-frame
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Our operations on the frame come here
# cv2.imshow('frame',gray)# Display the resulting frame
# cv2.imwrite('/home/pi/HRCD/Apriltag Test Pictures/pic.jpg', gray)
# if cv2.waitKey(1) & 0xFF == ord('q'): # need to include this for preview to work
# break
# img = cv2.imread(imagepath, cv2.IMREAD_GRAYSCALE)
# #gray_image = np.array(ImageOps.grayscale(img))
# tags = at_detector.detect(img, estimate_tag_pose=False, camera_params=None, tag_size=None)
# if tags:
# print("tag detected")
# else:
# print("OH NO! NO TAG! PANIC!")
# webcam.release()
# cv2.destroyAllWindows()
"""
import numpy as np
import cv2
import time
from io import BytesIO
from PIL import Image, ImageOps
from pupil_apriltags import Detector
stream = BytesIO()
at_detector = Detector(families='tag36h11',nthreads=1,quad_decimate=1.0,quad_sigma=0.0,refine_edges=1,decode_sharpening=0.25,debug=0)
webcam = cv2.VideoCapture(1)
t0 = time.time() #start time
t = 0 # time that's passed in seconds
end_time = 60*0.5 # total time for data collection
# data collection loop
while(t<end_time):
t = time.time() - t0
# take webcam picture
ret, frame = webcam.read() # Capture frame-by-frame
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Our operations on the frame come here
cv2.imshow('frame',gray)# Display the resulting frame
if cv2.waitKey(1) & 0xFF == ord('q'):
break
gray_image = np.array(ImageOps.grayscale(frame))
tags = at_detector.detect(frame, estimate_tag_pose=False, camera_params=None, tag_size=None)
print(tags)
time.sleep(2)
webcam.release()
cv2.destroyAllWindows()
""" | StarcoderdataPython |
6469870 | #!/usr/bin/env python
"""
read trained net : model+weights
read test data from HD5
infere for test data
Inference works alwasy on 1 IPU
./predict_one.py -m outY -X
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import numpy as np
import torch
import time
import sys,os
import logging
from toolbox.Model import MyModelWithLoss
from toolbox.Util_IOfunc import read_yaml, write_yaml, restore_checkpoint
from toolbox.Dataloader_h5 import get_data_loader
import poptorch
import popdist.poptorch
sys.path.append(os.path.relpath("../torch/toolbox/"))
from Plotter import Plotter_NeuronInverter
import argparse
#...!...!..................
def get_parser():
parser = argparse.ArgumentParser()
#parser.add_argument("--facility", default='corigpu', type=str)
parser.add_argument('--venue', dest='formatVenue', choices=['prod','poster'], default='prod',help=" output quality/arangement")
parser.add_argument("-m","--modelPath", default='out/', help="trained model ")
parser.add_argument("-o", "--outPath", default='same',help="output path for plots and tables")
parser.add_argument( "-X","--noXterm", dest='noXterm', action='store_true', default=False, help="disable X-term for batch mode")
parser.add_argument("-n", "--numSamples", type=int, default=None, help="limit samples to predict")
parser.add_argument("-v","--verbosity",type=int,choices=[0, 1, 2], help="increase output verbosity", default=1, dest='verb')
parser.add_argument("--cellName", type=str, default=None, help="alternative cell shortName ")
args = parser.parse_args()
args.prjName='neurInfer'
for arg in vars(args): print( 'myArg:',arg, getattr(args, arg))
return args
#...!...!..................
def load_model4infer(sumMD,modelPath):
# ... assemble model
device = torch.device("cuda")
# load entirel model
modelF = os.path.join(modelPath, sumMD['train_params']['blank_model'])
stateF= os.path.join(modelPath, sumMD['train_params']['checkpoint_name'])
print('M: load model:',modelF)
myModel = torch.load(modelF)
modelWloss=MyModelWithLoss(myModel)
print('M: tmp popOpt re-init')
popOpts = popdist.poptorch.Options()
popOpts.deviceIterations(1)
cachePath='./exec_cache'
popOpts.enableExecutableCaching(cachePath)
print("\n----------- restore model for inference, state= ",stateF)
startEpoch=restore_checkpoint( stateF, modelWloss)
model4infer = poptorch.inferenceModel(modelWloss.eval(), options=popOpts)
return model4infer,popOpts
#...!...!..................
def model_infer(model,test_loader,sumMD):
criterion =torch.nn.MSELoss() # Mean Squared Loss
test_loss = 0
# prepare output container, Thorsten's idea
num_samp=len(test_loader.dataset)
outputSize=sumMD['train_params']['model']['outputSize']
print('predict for num_samp=',num_samp,', outputSize=',outputSize)
# clever list-->numpy conversion, Thorsten's idea
Uall=np.zeros([num_samp,outputSize],dtype=np.float32)
Zall=np.zeros([num_samp,outputSize],dtype=np.float32)
nEve=0
nStep=0
cpuLossF=torch.nn.MSELoss(reduction='none' )#returns a loss per element
for j,(data, target) in enumerate(test_loader):
pred, loss_op = model4infer(data, target)
loss=np.mean(loss_op.numpy())
print(j,'=j, type: target=',type(target),target.shape,'pred',type(pred),pred.shape)
cpuLoss2D=cpuLossF(pred,target).numpy()
#print(j,'=j, type: cpuLoss2D=',type(cpuLoss2D),cpuLoss2D.shape)
cpuLossV=np.mean(cpuLoss2D,axis=1)
#print(j,'=j, type: cpuLossV=',type(cpuLossV),cpuLossV.shape)
cpuLoss=np.mean(cpuLossV)
print('pred j=%d ipuLoss=%.4f, cpuLoss=%.4f Shapes: pred=%s, loss=%s, cpuLossV=%s'%(j,loss,cpuLoss,str(pred.shape),str(loss.shape),str(cpuLossV.shape)))
'''
data_dev, target_dev = data.to(device), target.to(device)
output_dev = model(data_dev)
lossOp=criterion(output_dev, target_dev)
print('qq',lossOp,len(test_loader.dataset),len(test_loader)); ok55
output=output_dev.cpu()
'''
test_loss += loss
nEve2=nEve+target.shape[0]
print('nn',nEve,nEve2)
Uall[nEve:nEve2,:]=target[:]
Zall[nEve:nEve2,:]=pred[:]
nEve=nEve2
nStep+=1
test_loss /= nStep
print('infere done, nEve=%d nStep=%d loss=%.4f'%(nEve,nStep,test_loss))
return test_loss,Uall,Zall
#=================================
#=================================
# M A I N
#=================================
#=================================
if __name__ == '__main__':
args=get_parser()
logging.basicConfig(format='%(levelname)s - %(message)s', level=logging.INFO)
if args.outPath=='same' : args.outPath=args.modelPath
sumF=args.modelPath+'/sum_train.yaml'
sumMD = read_yaml( sumF)
parMD=sumMD['train_params']
inpMD=sumMD['input_meta']
model4infer,popOpts=load_model4infer(sumMD,args.modelPath)
#1print(model)
if args.cellName!=None:
parMD['cell_name']=args.cellName
if args.numSamples!=None:
parMD['max_samples_per_epoch' ] = args.numSamples
domain='test'
parMD['world_size']=1
data_loader = get_data_loader(parMD, inpMD,domain, popOpts, verb=args.verb)
startT=time.time()
loss,U,Z=model_infer(model4infer,data_loader,sumMD)
predTime=time.time()-startT
print('M: infer : Average loss: %.4f events=%d , elaT=%.2f min\n'% (loss, Z.shape[0],predTime/60.))
sumRec={}
sumRec['domain']=domain
sumRec[domain+'LossMSE']=float(loss)
sumRec['predTime']=predTime
sumRec['numSamples']=U.shape[0]
sumRec['lossThrHi']=0.50 # for tagging plots
sumRec['inpShape']=sumMD['train_params']['model']['inputShape']
sumRec['short_name']=sumMD['train_params']['cell_name']
sumRec['modelDesign']=sumMD['train_params']['model']['myId']
sumRec['trainRanks']=sumMD['train_params']['world_size']
sumRec['trainTime']=sumMD['trainTime_sec']
sumRec['loss_valid']= sumMD['loss_valid']
#
# - - - - only plotting code is below - - - - -
plot=Plotter_NeuronInverter(args,inpMD ,sumRec )
plot.param_residua2D(U,Z)
write_yaml(sumRec, args.outPath+'/sum_pred.yaml')
#1plot.params1D(U,'true U',figId=7)
plot.params1D(Z,'pred Z',figId=8)
if 0:
print('input data example, it will plot waveforms')
dlit=iter(data_loader)
xx, yy = next(dlit)
#1xx, yy = next(dlit) #another sample
print('batch, X,Y;',xx.shape,xx.dtype,yy.shape,yy.dtype)
print('Y[:2]',yy[:2])
plot.frames_vsTime(xx,yy,9)
plot.display_all('predict')
| StarcoderdataPython |
4989307 | import json
from flask import Flask, Response
app = Flask(__name__)
class empleados:
sueldo = 0
hdiurnas = 0
hnocturnas = 0
auxilio = 0
totalDed = 0
totalDev = 0
total = 0
diurnas = 0
nocturnas = 0
def __init__(self, sueldo, diurnas, nocturnas):
self.sueldo = sueldo
self.hdiurnas = diurnas
self.hnocturnas = nocturnas
def calculoDeduciones(self):
self.totalDed = self.sueldo*0.08
def calculoDevengado(self):
hora = int((self.sueldo/30)/8)
self.totalDev = int(((hora*self.hdiurnas)*0.125)+((hora*self.hnocturnas)*0.175))
def totalF(self):
self.total = (self.sueldo + self.totalDev) - self.totalDed
@app.route('/user/<int:sueldo>/<int:diurnas>/<int:nocturnas>', methods=['GET', 'POST'])
def proceso(sueldo,diurnas,nocturnas):
emp1 = empleados(sueldo,diurnas,nocturnas)
emp1.calculoDeduciones()
emp1.calculoDevengado()
emp1.totalF()
lista = { "empleado":[{ "sueldo" : emp1.total, "devengadoAdicional" : emp1.totalDev, "deducido" : emp1.totalDed}]}
return Response(json.dumps(lista), mimetype='application/json')
if __name__ == "__main__":
app.run(debug=True, ssl_context='adhoc')
#app.run(debug=True) | StarcoderdataPython |
3590984 | # coding=utf-8
import streamlit as st
import numpy as np
import pandas as pd
import pandas_profiling
from streamlit_pandas_profiling import st_profile_report
from matplotlib.image import imread
#######################################################################
# Loading data (labelled)
#---------------------------------------------------------------------
st.set_page_config(layout="wide")
def app():
"""Fonction générant la page 1 du dashboard. Ne prend pas de paramètre en entrée.
"""
logo = imread("./app_pages/logo.jpeg")
st.sidebar.image(logo)
st.sidebar.write("")
st.sidebar.write("")
st.title("Aperçu de la population de prospect (labellisée)")
df = pd.read_csv("./dashboard_data/df_train.csv").astype("object")
#Sample data for Customer profile Analysis
colonnes_pandas_profiling = [
"CODE_GENDER",
"AGE_INT",
"NAME_TYPE_SUITE",
"NAME_EDUCATION_TYPE",
"NAME_INCOME_TYPE",
"ORGANIZATION_TYPE",
"OCCUPATION_TYPE",
"NAME_HOUSING_TYPE",
"CNT_CHILDREN",
"AMT_INCOME_TOTAL",
"AMT_GOODS_PRICE"
]
df_pandas_profiling = df.loc[:,colonnes_pandas_profiling]
#Pandas Profiling Report
st.write("Analyse exploratoire d'un échantillon du dataset labellisé de prospect (seules 11 des 101 variables du dataset sont présentées afin de comprendre le profil des prospects)")
pr = df_pandas_profiling.profile_report(minimal=True)
st_profile_report(pr)
if __name__ == "__main__":
app() | StarcoderdataPython |
1733561 | <reponame>AI-Factor-y/Attendance-automation<filename>eduserver automation/timetable.py
## this code is written and managed by abhinav -p (@_ai_factory)
## <EMAIL>
days=["monday","tuesday","wednesday","thursday","friday"];
#active classes are those classes for which you have to put attendance
#only those classes in the timetable which are in the active classes are considered by the
#automation script
#classes not in active classes are ignored by the script
active_classes=["co","math","dsa","hwl"];
#timetable data with timetable format [[hour,minute],"subject_name"]
#order of slots doesn't matter .
#give the time of slot as the time when the link for attendance comes in the eduserver
#for that specific course
timetable=[
["monday", [[8,0],"free"] , [[8,56],"co"] , [[10,10],"free"], [[11,6],"math"], [[13,0],"free"], [[14,0],"env"] ],
["tuesday", [[8,0],"math"] , [[9,0],"env"] ,[[10,15],"dsa"] ,[[11,15],"free"],[[13,0],"free"], [[14,0],"dsal"] ],
["wednesday", [[8,0],"free"] , [[9,0],"free"], [[10,11],"co"] ,[[11,15],"free"], [[13,0],"free"], [[14,0],"valueed"] ],
["thursday", [[8,0],"free"] , [[20,21],"math"] , [[10,15],"env"] ,[[11,8],"dsa"], [[13,0],"free"], [[13,56],"hwl"] ],
["friday", [[7,56],"dsa"] , [[9,0],"free"] , [[10,15],"free"] ,[[11,11],"co"], [[13,0],"free"], [[14,0],"valueed"] ]
]
#provide the url for the course attandace page..
#its the address that we obtain from the address bar when we open the attendance section in
#eduserver for a course
# format ===> https://eduserver.nitc.ac.in/mod/attendance/view.php?id=$$$$$
course_web_url={
"co": "https://eduserver.nitc.ac.in/mod/attendance/view.php?id=28943",
"math": "https://eduserver.nitc.ac.in/mod/attendance/view.php?id=26861",
"dsa" : "https://eduserver.nitc.ac.in/mod/attendance/view.php?id=28265",
"hwl" :"https://eduserver.nitc.ac.in/mod/attendance/view.php?id=28300"
}
| StarcoderdataPython |
6402438 | <filename>high_lvl_networking/networking.py
"""
a script for simpliefiying the communication between the server and the client
server:
setup() -> inits the server
new_connection() -> adds a new connection to the server with the given id
get() -> tries to get the a message from the client with the specified id
post() -> sends a message to the clients with the given id(s)
client:
setup() -> inits the client
connect() -> conntects to the server with the specified ip and port
get() -> tries to get a message from the server
post() -> sends a message to the server
"""
# TODO: update pypi
from socket import *
from pickle import loads, dumps
from typing import Any
class NetworkingException(Exception):
def __str__(self):
return 'high_lvl_networking.NetworkingException'
class Server:
def __init__(self, debug: bool = True) -> None:
self.ip: str = None; self.port: int = None
self.connections: dict[str, socket] = {}
self.debug: bool = debug
def setup(self, ip: str = gethostbyname(gethostname()), port: int = 1234, listen_to: int = 5) -> None:
"""
Setup the server
"""
self.ip: str = ip; self.port: int = port
# create server
self.server: socket = socket(AF_INET, SOCK_STREAM)
self.server.bind((self.ip, self.port))
self.server.listen(listen_to)
self.__print(f"Server open with IP {self.ip} on Port '{self.port}'\n")
def new_connection(self, id: str) -> None:
"""
Add a new connection named by the given id
"""
# validate id
if id in self.connections:
raise NetworkingException("Id already used") # there is already a connection with this id
else:
self.__print(f"Waiting for new connection with id {id}")
self.connections[id], (remoteinf) = self.server.accept() # accepting the next connection
self.__print(f"{remoteinf[0]}:{remoteinf[1]} connected with id '{id}'\n")
def get(self, id: str) -> Any:
"""
Receive data from the client with the specified id
"""
try:
return loads(self.connections[id].recv(1024))
except ConnectionResetError: # connection lost, client is not available anymore
raise NetworkingException("Connection lost")
def post(self, ids: list[str], content: str) -> None:
"""
post a message to the specified clients
"""
for id in ids:
if id in self.connections: # loop through all given ids
try:
self.connections[id].send(dumps(content)) # send content
except ConnectionResetError: # connection lost, client is not available anymore
raise NetworkingException("Connection lost")
def __print(self, string):
if self.debug:
print(string)
def __str__(self):
return f'<Networking Server ip={self.ip} port={self.port}>'
class Client:
def __init__(self, debug: bool = True) -> None:
self.ip, self.port = None, None
self.debug = debug
def setup(self, ip: str = gethostbyname(gethostname()), port: int = 1234):
"""
Setup the client
"""
self.ip, self.port = ip, port
# create client and connect to server
self.client = socket(AF_INET, SOCK_STREAM)
self.connect()
def connect(self) -> None:
"""
Connect to the server
"""
try:
self.client.connect((self.ip, self.port)) # connect to the server
except ConnectionRefusedError: # server is not available
raise NetworkingException("The server refused a connection")
self.__print(f"Connected to {self.ip} on port {self.port}\n")
def get(self) -> Any:
"""
Get data from the server
"""
try:
return loads(self.client.recv(1024)) # get a message from the server
except ConnectionResetError: # connection lost, server is not available anymore
raise NetworkingException("Connection lost")
def post(self, content: str) -> None:
"""
Send data to the server
"""
try:
self.client.send(dumps(content)) # send a message to the server
except ConnectionResetError: # connection lost, server is not available anymore
raise NetworkingException("Connection lost")
def __print(self, string):
if self.debug:
print(string)
def __str__(self):
return f'<Networking Client ip={self.ip} port={self.port}>'
| StarcoderdataPython |
4908456 | import argparse
from pathlib import Path
from catbird.core import dump, load
from stanza.server import CoreNLPClient
from tqdm import tqdm
def extract_triples(client, text):
ann = client.annotate(text)
triples = []
for sentence in ann.sentence:
for triple in sentence.openieTriple:
triples.append(
{
"subject": triple.subject,
"relation": triple.relation,
"object": triple.object,
}
)
return triples
def is_object_substring(t, triples):
object_substring_list = [
t != u
and t["relation"] == u["relation"]
and all(w in u["object"] for w in t["object"].split())
for u in triples
]
return any(object_substring_list)
def disambiguate_triples(triples):
return [t for t in triples if not is_object_substring(t, triples)]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Triple Extraction")
parser.add_argument(
"--root-path",
type=str,
default="./data/mscoco",
help="specify the root path of dataset",
)
parser.add_argument(
"--out-dir",
type=str,
default="./data/mscoco",
help="specify path to store output",
)
parser.add_argument(
"--extract-for",
type=str,
default="src",
choices=["src", "trg", "both"],
help="number of threads to be used",
)
parser.add_argument(
"--split", type=str, default="train", help="train or val split."
)
args = parser.parse_args()
root_path = Path(args.root_path)
data = load(root_path / f"{root_path.name}_{args.split}.pkl")
annotators = [
"tokenize",
"ssplit",
"pos",
"lemma",
"depparse",
"ner",
"coref",
"natlog",
"openie",
]
properties = {"openie.resolve_coref": True}
triples_list = []
with CoreNLPClient(
annotators=annotators, properties=properties, be_quiet=True,
) as client:
for entry in tqdm(data):
triples = extract_triples(client, entry["src"])
triples = disambiguate_triples(triples)
triples_list.append(triples)
filename = Path(args.out_dir) / f"mscoco_triples_{args.split}.pkl"
print(
f"MSCOCO IE triples {args.split} split ({len(triples_list)} entries) are saved to '{filename}'"
)
dump(triples_list, filename)
| StarcoderdataPython |
3452988 | <gh_stars>1-10
# Twowaits
Twowaits Problem
def up_pattern(n):
s=2*n-2
for i in range(0,n):
for j in range(0,i):
print(end=" ")
print("*",end='')
for k in range(s):
print(end=' ')
s=s-2
print('*',end='')
print('\r')
def low_pattern(n):
for i in range(n):
for j in range(n-i-1):
print(end=' ')
print('*',end='')
for j in range(2*i):
print(end=' ')
print('*',end='')
print('\r')
def complete_pattern(n):
up_pattern(n)
low_pattern(n)
complete_pattern(4)
| StarcoderdataPython |
1695358 | leaf_trait_id_and_name = {
0: 'Class',
1: 'Specimen Number',
2: 'Eccentricity',
3: 'Aspect Ratio',
4: 'Elongation',
5: 'Solidity',
6: 'Stochastic Convexity',
7: 'Isoperimetric Factor',
8: 'Maximal Indentation Depth',
9: 'Lobedness',
10: 'Average Intensity',
11: 'Average Contrast',
12: 'Smoothness',
13: 'Third moment',
14: 'Uniformity',
15: 'Entropy',
}
| StarcoderdataPython |
48007 | # -*- coding: utf-8 -*-
# (The MIT License)
#
# Copyright (c) 2013-2021 Kura
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import os
import tempfile
import pytest
from blackhole.utils import Singleton
logging.getLogger("blackhole").addHandler(logging.NullHandler())
@pytest.fixture()
def cleandir():
newpath = tempfile.mkdtemp()
os.chdir(newpath)
@pytest.fixture()
def reset():
Singleton._instances = {}
def create_config(data):
cwd = os.getcwd()
path = os.path.join(cwd, "test.conf")
with open(path, "w") as cfile:
cfile.write("\n".join(data))
return path
def create_file(name, data=""):
cwd = os.getcwd()
path = os.path.join(cwd, name)
with open(path, "w") as ffile:
ffile.write(str(data))
return path
class Args(object):
def __init__(self, args=None):
if args is not None:
for arg in args:
setattr(self, arg[0], arg[1])
| StarcoderdataPython |
11347198 | #!/usr/bin/env python
import support.states as states
counties = {}
def load_data():
if counties:
return
with open('data/co-est2019-annres.dat') as fp:
for line in fp:
line = line.strip()
county,state,pop = line.split('|')
state = states.us_state_abbrev[state]
if state not in counties.keys():
counties[state] = {}
counties[state][county] = int(pop)
def population(county,state):
load_data()
if state not in counties.keys():
print("Missing county data for {}".format(state))
return 1000000000000
if county not in counties[state].keys():
print("Missing county data for {}, {}".format(county,state))
return 1000000000000
return counties[state][county]
| StarcoderdataPython |
1891735 | <reponame>verkaik/modflow6-parallel
"""
MODFLOW 6 Autotest
Test to compare MODFLOW 6 groundwater transport simulation results to MT3DMS
results. This test was first documented in Zheng and Wang (1999) (MT3DMS:
A Modular Three-Dimensional Multispecies Transport Model for Simulation of
Advection, Dispersion, and Chemical Reactions of Contaminants in Groundwater
Systems; Documentation and User's Guide) on page 130. This is a 1D set of 4
test problems that apply incrementally varying combinations of advection,
dispersion, and reaction (sorption and decay):
* Case 1a: Advection only
* Case 1b: Advection and dispersion
* Case 1c: Advection, dispersion, and sorption
* Case 1d: Advection, dispersion, sorption, and decay
* Case 1e: Advection, dispersion, sorption, decay, immobile domain
* Case 1f: Advection, dispersion, sorption, decay, immobile domain (do not
specify decay_sorbed in mst input file so that mf6 assumes that
decay_sorbed = decay_aqueous. Results should be same as Case 1e.
* Case 1g: Advection and zero-order growth
"""
import os
import shutil
import sys
import numpy as np
try:
import pymake
except:
msg = 'Error. Pymake package is not available.\n'
msg += 'Try installing using the following command:\n'
msg += ' pip install https://github.com/modflowpy/pymake/zipball/master'
raise Exception(msg)
try:
import flopy
except:
msg = 'Error. FloPy package is not available.\n'
msg += 'Try installing using the following command:\n'
msg += ' pip install flopy'
raise Exception(msg)
import targets
exe_name_mf = targets.target_dict['mf2005s']
exe_name_mt = targets.target_dict['mt3dms']
exe_name_mf6 = targets.target_dict['mf6']
testdir = './temp'
testgroup = 'mt3dms_p01'
remove_files = True
def p01mt3d(model_ws, al, retardation, rc1, mixelm,
zeta=None, prsity2=None, rc2=None, zero_order_decay=False):
nlay = 1
nrow = 1
ncol = 101
delr = 10.
delc = 1.
delv = 1.
top = 0.
botm = [top - delv]
Lx = (ncol - 1) * delr
v = 0.24
prsity = 0.25
q = v * prsity
perlen = 2000.
dt0 = perlen / 10.
hk = 1.
laytyp = 1
rhob = 0.25
kd = (retardation - 1.) * prsity / rhob
modelname_mf = 'p01_mf'
mf = flopy.modflow.Modflow(modelname=modelname_mf, model_ws=model_ws,
exe_name=exe_name_mf)
dis = flopy.modflow.ModflowDis(mf, nlay=nlay, nrow=nrow, ncol=ncol,
delr=delr, delc=delc, top=top, botm=botm,
perlen=perlen)
ibound = np.ones((nlay, nrow, ncol), dtype=int)
ibound[0, 0, 0] = -1
ibound[0, 0, -1] = -1
strt = np.zeros((nlay, nrow, ncol), dtype=float)
h1 = q * Lx
strt[0, 0, 0] = h1
bas = flopy.modflow.ModflowBas(mf, ibound=ibound, strt=strt)
lpf = flopy.modflow.ModflowLpf(mf, hk=hk, laytyp=laytyp)
pcg = flopy.modflow.ModflowPcg(mf)
lmt = flopy.modflow.ModflowLmt(mf)
mf.write_input()
mf.run_model(silent=True)
modelname_mt = 'p01_mt'
mt = flopy.mt3d.Mt3dms(modelname=modelname_mt, model_ws=model_ws,
exe_name=exe_name_mt, modflowmodel=mf)
c0 = 1.
icbund = np.ones((nlay, nrow, ncol), dtype=int)
icbund[0, 0, 0] = -1
sconc = np.zeros((nlay, nrow, ncol), dtype=float)
sconc[0, 0, 0] = c0
btn = flopy.mt3d.Mt3dBtn(mt, laycon=laytyp, icbund=icbund,
prsity=prsity, sconc=sconc, dt0=dt0, ifmtcn=1)
dceps = 1.e-5
nplane = 1
npl = 0
nph = 4
npmin = 0
npmax = 8
nlsink = nplane
npsink = nph
adv = flopy.mt3d.Mt3dAdv(mt, mixelm=mixelm, dceps=dceps, nplane=nplane,
npl=npl, nph=nph, npmin=npmin, npmax=npmax,
nlsink=nlsink, npsink=npsink, percel=0.5)
dsp = flopy.mt3d.Mt3dDsp(mt, al=al)
isothm = 1
if zeta is not None:
isothm = 6
ireact = 1
if zero_order_decay:
ireact = 100
if rc2 is None:
rc2 = rc1
rct = flopy.mt3d.Mt3dRct(mt, isothm=isothm,
ireact=ireact,
igetsc=0,
rhob=rhob,
sp1=kd,
sp2=zeta,
prsity2=prsity2,
rc1=rc1,
rc2=rc2)
ssm = flopy.mt3d.Mt3dSsm(mt)
gcg = flopy.mt3d.Mt3dGcg(mt, mxiter=10)
mt.write_input()
fname = os.path.join(model_ws, 'MT3D001.UCN')
if os.path.isfile(fname):
os.remove(fname)
mt.run_model(silent=True)
fname = os.path.join(model_ws, 'MT3D001.UCN')
ucnobj = flopy.utils.UcnFile(fname)
times = ucnobj.get_times()
conc = ucnobj.get_alldata()
fname = os.path.join(model_ws, 'MT3D001.OBS')
if os.path.isfile(fname):
cvt = mt.load_obs(fname)
else:
cvt = None
fname = os.path.join(model_ws, 'MT3D001.MAS')
mvt = mt.load_mas(fname)
return mf, mt, conc, cvt, mvt
def p01mf6(model_ws, al, retardation, decay_rate, mixelm, zeta=None,
prsity2=None, onelambda=False, zero_order_decay=False):
name = 'p01'
nlay, nrow, ncol = 1, 1, 101
nper = 1
perlen = [2000.]
nstp = [10]
tsmult = [1.]
steady = [True]
delr = 10.
delc = 1.
delv = 1.
top = 0.
botm = [top - delv]
strt = 1.
hk = 1.0
laytyp = 1
Lx = (ncol - 1) * delr
v = 0.24
prsity = 0.25
q = v * prsity
rhob = 0.25
kd = (retardation - 1.) * prsity / rhob
nouter, ninner = 100, 300
hclose, rclose, relax = 1e-6, 1e-6, 1.
tdis_rc = []
for i in range(nper):
tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
ws = model_ws
exe_name = os.path.abspath(exe_name_mf6)
sim = flopy.mf6.MFSimulation(sim_name=name, version='mf6',
exe_name=exe_name,
sim_ws=ws)
from flopy.mf6.mfbase import VerbosityLevel
sim.simulation_data.verbosity_level = VerbosityLevel.quiet
sim.name_file.memory_print_option = 'all'
# create tdis package
tdis = flopy.mf6.ModflowTdis(sim, time_units='DAYS',
nper=nper, perioddata=tdis_rc)
# create gwf model
gwfname = 'gwf_' + name
gwf = flopy.mf6.ModflowGwf(sim, modelname=gwfname, save_flows=True,
model_nam_file='{}.nam'.format(gwfname))
# create iterative model solution and register the gwf model with it
imsgwf = flopy.mf6.ModflowIms(sim, print_option='SUMMARY',
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation='NONE',
inner_maximum=ninner,
inner_dvclose=hclose, rcloserecord=rclose,
linear_acceleration='CG',
scaling_method='NONE',
reordering_method='NONE',
relaxation_factor=relax,
filename='{}.ims'.format(gwfname))
sim.register_ims_package(imsgwf, [gwf.name])
dis = flopy.mf6.ModflowGwfdis(gwf, nlay=nlay, nrow=nrow, ncol=ncol,
delr=delr, delc=delc,
top=top, botm=botm,
idomain=np.ones((nlay, nrow, ncol),
dtype=int),
filename='{}.dis'.format(gwfname))
# initial conditions
strt = np.zeros((nlay, nrow, ncol), dtype=float)
h1 = q * Lx
strt[0, 0, 0] = h1
ic = flopy.mf6.ModflowGwfic(gwf, strt=strt,
filename='{}.ic'.format(gwfname))
# node property flow
npf = flopy.mf6.ModflowGwfnpf(gwf, save_flows=False,
icelltype=laytyp,
k=hk,
k33=hk, save_specific_discharge=True)
# chd files
chdspd = [[(0, 0, 0), h1], [(0, 0, ncol - 1), 0.0]]
chd = flopy.mf6.modflow.mfgwfchd.ModflowGwfchd(gwf,
maxbound=len(chdspd),
stress_period_data=chdspd,
save_flows=False,
pname='CHD-1')
# output control
oc = flopy.mf6.ModflowGwfoc(gwf,
budget_filerecord='{}.bud'.format(gwfname),
head_filerecord='{}.hds'.format(gwfname),
headprintrecord=[
('COLUMNS', 10, 'WIDTH', 15,
'DIGITS', 6, 'GENERAL')],
saverecord=[('HEAD', 'LAST'),
('BUDGET', 'LAST')],
printrecord=[('HEAD', 'LAST'),
('BUDGET', 'LAST')])
# create gwt model
gwtname = 'gwt_' + name
gwt = flopy.mf6.ModflowGwt(sim, modelname=gwtname, save_flows=True,
model_nam_file='{}.nam'.format(gwtname))
gwt.name_file.save_flows = True
# create iterative model solution and register the gwt model with it
imsgwt = flopy.mf6.ModflowIms(sim, print_option='SUMMARY',
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation='NONE',
inner_maximum=ninner,
inner_dvclose=hclose, rcloserecord=rclose,
linear_acceleration='BICGSTAB',
scaling_method='NONE',
reordering_method='NONE',
relaxation_factor=relax,
filename='{}.ims'.format(gwtname))
sim.register_ims_package(imsgwt, [gwt.name])
dis = flopy.mf6.ModflowGwtdis(gwt, nlay=nlay, nrow=nrow, ncol=ncol,
delr=delr, delc=delc,
top=top, botm=botm,
idomain=1,
filename='{}.dis'.format(gwtname))
# initial conditions
ic = flopy.mf6.ModflowGwtic(gwt, strt=0.,
filename='{}.ic'.format(gwtname))
# advection
if mixelm == 0:
scheme = 'UPSTREAM'
elif mixelm == -1:
scheme = 'TVD'
else:
raise Exception()
adv = flopy.mf6.ModflowGwtadv(gwt, scheme=scheme,
filename='{}.adv'.format(gwtname))
# dispersion
dsp = flopy.mf6.ModflowGwtdsp(gwt, xt3d_off=True, alh=al, ath1=0.1)
# mass storage and transfer
if onelambda:
# assign sorbed decay to decay rate
decay_rate_sorbed = decay_rate
else:
decay_rate_sorbed = decay_rate
first_order_decay = True
if zero_order_decay:
first_order_decay = False
mst = flopy.mf6.ModflowGwtmst(gwt, porosity=prsity,
first_order_decay=first_order_decay,
zero_order_decay=zero_order_decay,
decay=decay_rate,
decay_sorbed=decay_rate_sorbed,
sorption='linear',
bulk_density=rhob,
distcoef=kd)
# constant concentration
c0 = 1.
cncspd = [[(0, 0, 0), c0]]
cnc = flopy.mf6.ModflowGwtcnc(gwt, maxbound=len(cncspd),
stress_period_data=cncspd,
save_flows=False,
pname='CNC-1')
ssm = flopy.mf6.ModflowGwtssm(gwt, sources=[[]],
filename='{}.ssm'.format(gwtname))
if zeta is not None:
ist = flopy.mf6.ModflowGwtist(gwt, sorption=True,
first_order_decay=first_order_decay,
zero_order_decay=zero_order_decay,
bulk_density=rhob, distcoef=kd,
decay=decay_rate,
decay_sorbed=decay_rate_sorbed,
zetaim=zeta, thetaim=prsity2,
filename='{}.ist'.format(gwtname),
pname='IST-1')
# output control
oc = flopy.mf6.ModflowGwtoc(gwt,
budget_filerecord='{}.bud'.format(gwtname),
concentration_filerecord='{}.ucn'.format(
gwtname),
concentrationprintrecord=[
('COLUMNS', 10, 'WIDTH', 15,
'DIGITS', 6, 'GENERAL')],
saverecord=[('CONCENTRATION', 'LAST'),
('BUDGET', 'LAST')],
printrecord=[('CONCENTRATION', 'LAST'),
('BUDGET', 'LAST')])
# GWF GWT exchange
gwfgwt = flopy.mf6.ModflowGwfgwt(sim, exgtype='GWF6-GWT6',
exgmnamea=gwfname, exgmnameb=gwtname,
filename='{}.gwfgwt'.format(name))
sim.write_simulation()
fname = os.path.join(model_ws, gwtname + '.ucn')
if os.path.isfile(fname):
os.remove(fname)
success, buff = sim.run_simulation(silent=True, report=True)
if not success:
print(buff)
# load concentrations
fname = os.path.join(model_ws, gwtname + '.ucn')
ucnobj = flopy.utils.HeadFile(fname, precision='double',
text='CONCENTRATION')
times = ucnobj.get_times()
conc = ucnobj.get_alldata()
return sim, conc
def test_mt3dmsp01a():
longitudinal_dispersivity = 0.
retardation = 1.0
decay_rate = 0.00
mixelm = 0
zeta = None
prsity2 = None
mf6_ws = os.path.join(testdir, testgroup + 'a')
sim, conc_mf6 = p01mf6(mf6_ws, longitudinal_dispersivity,
retardation, decay_rate,
mixelm, zeta, prsity2)
mt3d_ws = os.path.join(mf6_ws, 'mt3d')
mf, mt, conc_mt3d, cvt, mvt = p01mt3d(mt3d_ws, longitudinal_dispersivity,
retardation, decay_rate,
mixelm, zeta, prsity2)
msg = 'concentrations not equal {} {}'.format(conc_mt3d, conc_mf6)
assert np.allclose(conc_mt3d, conc_mf6, atol=1e-4), msg
# load transport budget
# budget text:
# STORAGE-AQUEOUS, DECAY-AQUEOUS, STORAGE-SORBED,
# DECAY-SORBED, FLOW-JA-FACE, SOURCE-SINK MIX, CONSTANT CONC
gwtname = 'gwt_p01'
fname = os.path.join(mf6_ws, '{}.bud'.format(gwtname))
try:
bobj = flopy.utils.CellBudgetFile(fname, precision='double')
budra = bobj.get_data(kstpkper=(9, 0), text='DECAY-AQUEOUS')[0]
except:
assert False, 'could not load data from "{}"'.format(fname)
# ensure decay aqueous is zero
decay_aqueous = bobj.get_data(kstpkper=(9, 0), text='DECAY-AQUEOUS')[0]
assert np.allclose(0., decay_aqueous)
# ensure decay sorbed is zero
decay_sorbed = bobj.get_data(kstpkper=(9, 0), text='DECAY-SORBED')[0]
assert np.allclose(0., decay_sorbed)
# ensure storage sorbed is zero
storage_sorbed = bobj.get_data(kstpkper=(9, 0), text='STORAGE-SORBED')[0]
bobj.file.close()
assert np.allclose(0., storage_sorbed), '{}'.format(storage_sorbed)
if remove_files:
shutil.rmtree(mf6_ws)
return
def test_mt3dmsp01b():
longitudinal_dispersivity = 10.
retardation = 1.0
decay_rate = 0.00
mixelm = 0
zeta = None
prsity2 = None
mf6_ws = os.path.join(testdir, testgroup + 'b')
sim, conc_mf6 = p01mf6(mf6_ws, longitudinal_dispersivity,
retardation, decay_rate,
mixelm, zeta, prsity2)
mt3d_ws = os.path.join(mf6_ws, 'mt3d')
mf, mt, conc_mt3d, cvt, mvt = p01mt3d(mt3d_ws, longitudinal_dispersivity,
retardation, decay_rate,
mixelm, zeta, prsity2)
msg = 'concentrations not equal {} {}'.format(conc_mt3d, conc_mf6)
assert np.allclose(conc_mt3d, conc_mf6, atol=1e-4), msg
if remove_files:
shutil.rmtree(mf6_ws)
return
def test_mt3dmsp01c():
longitudinal_dispersivity = 10.
retardation = 1.5
decay_rate = 0.00
mixelm = 0
zeta = None
prsity2 = None
mf6_ws = os.path.join(testdir, testgroup + 'c')
sim, conc_mf6 = p01mf6(mf6_ws, longitudinal_dispersivity,
retardation, decay_rate,
mixelm, zeta, prsity2)
mt3d_ws = os.path.join(mf6_ws, 'mt3d')
mf, mt, conc_mt3d, cvt, mvt = p01mt3d(mt3d_ws, longitudinal_dispersivity,
retardation, decay_rate,
mixelm, zeta, prsity2)
msg = 'concentrations not equal {} {}'.format(conc_mt3d, conc_mf6)
assert np.allclose(conc_mt3d, conc_mf6, atol=1e-4), msg
if remove_files:
shutil.rmtree(mf6_ws)
return
def test_mt3dmsp01d():
longitudinal_dispersivity = 10.
retardation = 1.5
decay_rate = 0.002
mixelm = 0
zeta = None
prsity2 = None
mf6_ws = os.path.join(testdir, testgroup + 'd')
sim, conc_mf6 = p01mf6(mf6_ws, longitudinal_dispersivity,
retardation, decay_rate,
mixelm, zeta, prsity2)
mt3d_ws = os.path.join(mf6_ws, 'mt3d')
mf, mt, conc_mt3d, cvt, mvt = p01mt3d(mt3d_ws, longitudinal_dispersivity,
retardation, decay_rate,
mixelm, zeta, prsity2)
msg = 'concentrations not equal {} {}'.format(conc_mt3d, conc_mf6)
assert np.allclose(conc_mt3d, conc_mf6, atol=1e-4), msg
if remove_files:
shutil.rmtree(mf6_ws)
return
def test_mt3dmsp01e():
longitudinal_dispersivity = 10.
retardation = 1.5
decay_rate = 0.002
mixelm = 0
zeta = .1
prsity2 = 0.05
mf6_ws = os.path.join(testdir, testgroup + 'e')
sim, conc_mf6 = p01mf6(mf6_ws, longitudinal_dispersivity,
retardation, decay_rate,
mixelm, zeta, prsity2)
mt3d_ws = os.path.join(mf6_ws, 'mt3d')
mf, mt, conc_mt3d, cvt, mvt = p01mt3d(mt3d_ws, longitudinal_dispersivity,
retardation, decay_rate,
mixelm, zeta, prsity2)
msg = 'concentrations not equal {} {}'.format(conc_mt3d, conc_mf6)
assert np.allclose(conc_mt3d, conc_mf6, atol=1e-1), msg
if remove_files:
shutil.rmtree(mf6_ws)
return
def test_mt3dmsp01f():
longitudinal_dispersivity = 10.
retardation = 1.5
decay_rate = 0.002
mixelm = 0
zeta = .1
prsity2 = 0.05
mf6_ws = os.path.join(testdir, testgroup + 'f')
sim, conc_mf6 = p01mf6(mf6_ws, longitudinal_dispersivity,
retardation, decay_rate,
mixelm, zeta, prsity2, onelambda=True)
mt3d_ws = os.path.join(mf6_ws, 'mt3d')
mf, mt, conc_mt3d, cvt, mvt = p01mt3d(mt3d_ws, longitudinal_dispersivity,
retardation, decay_rate,
mixelm, zeta, prsity2)
msg = 'concentrations not equal {} {}'.format(conc_mt3d, conc_mf6)
assert np.allclose(conc_mt3d, conc_mf6, atol=1e-1), msg
if remove_files:
shutil.rmtree(mf6_ws)
return
def test_mt3dmsp01g():
longitudinal_dispersivity = 0.
retardation = 1.0
decay_rate = -1.
mixelm = 0
zeta = None
prsity2 = None
mf6_ws = os.path.join(testdir, testgroup + 'g')
sim, conc_mf6 = p01mf6(mf6_ws, longitudinal_dispersivity,
retardation, decay_rate,
mixelm, zeta, prsity2,
zero_order_decay=True)
mt3d_ws = os.path.join(mf6_ws, 'mt3d')
mf, mt, conc_mt3d, cvt, mvt = p01mt3d(mt3d_ws, longitudinal_dispersivity,
retardation, decay_rate,
mixelm, zeta, prsity2, rc2=0.,
zero_order_decay=True)
msg = 'concentrations not equal {} {}'.format(conc_mt3d, conc_mf6)
assert np.allclose(conc_mt3d, conc_mf6, atol=1.e-4), msg
if remove_files:
shutil.rmtree(mf6_ws)
return
if __name__ == "__main__":
# print message
print('standalone run of {}'.format(os.path.basename(__file__)))
test_mt3dmsp01a()
test_mt3dmsp01b()
test_mt3dmsp01c()
test_mt3dmsp01d()
test_mt3dmsp01e()
test_mt3dmsp01f()
test_mt3dmsp01g()
| StarcoderdataPython |
4843910 | <reponame>Ahammmad-Shawki8/AS8-repository
# advanced python
# what is advanced python?
# it means python spreading its wings across multiple dimentions and use-cases in many fields.
# python is really a powerful oop language. it can be used in many advanced concepts too.
# some advanced concepts are-
# 1. sys programming
# 2. graph theory
# 3. mathematics
# 4. computer science
# 5. numerical computation
# 6. databases
# General Intro:
# sys programming:
# it is programming for system based softwares.
# it deals the programming that is often part of the operating system.
# in this module, we will learn pip, threads, folks and so on about system programming
# Graph theory:
# it works with models like pygraph and networkX and all of thats use to implement
# a graph class with essential functionality for graph creation, manipulation and calculation.
# mathematics:
# we can check out how to work with polynomials.
# and how easily and beutifully a class is presented
# for the creation and manipulation of polynomial functions that can be returned in python
# computer science:
# advanced concepts of computer science go through all the concepts like
# finance state machine, turing machine and even text classification with nym pys and all the concepts like this.
# numerical computation:
# if we want to get faster and efficient results with arrays and matrices or so
# then the numpy module of python is definately the right tool.
# module matplotlib is all we need to plot data in different way.
# also a introduction with linear combination is a good start for this advanvced concept.
# databases:
# it is pretty simple and straight forward.
# it is basically how python works with different database management software like mysql or postgresql.
| StarcoderdataPython |
258046 | <filename>critic.py<gh_stars>0
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class Critic(nn.Module):
def __init__(self, input_size, seed):
super(Critic, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(input_size, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, 64)
self.fc4 = nn.Linear(64, 32)
self.fc5 = nn.Linear(32, 1)
self.bn1 = nn.BatchNorm1d(256)
self.reset_parameters()
def forward(self, states, actions):
x_state_action = torch.cat((states, actions), dim=1)
x = F.relu(self.fc1(x_state_action))
x = self.bn1(x)
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
x = self.fc5(x)
return x
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(*hidden_init(self.fc3))
self.fc4.weight.data.uniform_(*hidden_init(self.fc4))
self.fc5.weight.data.uniform_(-3e-3, 3e-3)
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1. / np.sqrt(fan_in)
return (-lim, lim)
| StarcoderdataPython |
8133418 | #!/usr/bin/env python3
#
# "Spotlight" over a larger image
# Shows a small window of a full image
# Results transmitted via RabbitMQ
#
# @author <NAME> <<EMAIL>>
# @copyright 2022
#
from PIL import Image
from time import sleep
import random
import pika
import json
from pprint import pprint
import sys
# ****************************************************************************
class Pixel:
def __init__(self, x, y, r, g, b):
self.x = x
self.y = y
self.r = r
self.g = g
self.b = b
def getDat(self) -> dict:
dat = {
"coordinate": {"x": self.x, "y": self.y},
"color": {"r": self.r, "g": self.g, "b": self.b},
}
return dat
def __str__(self) -> str:
return json.dumps(self.getDat())
# ****************************************************************************
class Spotlight:
def __init__(self, image: Image, spotlightSizeX: int, spotlightSizeY: int):
self.image = image
self.sizeX = spotlightSizeX
self.sizeY = spotlightSizeY
print(f"Spotlight on image:")
print(f"\tImage format: {self.image.format}")
print(f"\tSize: {self.image.size}")
print(f"\tMode: {self.image.mode}")
self.minX = 0
self.minY = 0
self.imageWidth, self.imageHeight = self.image.size
self.maxX = self.imageWidth - self.sizeX
self.maxY = self.imageHeight - self.sizeY
self.minTickABSVelocity = 0.3
self.maxTickABSVelocity = 1.0
self.xTickVelocity = 1.0
self.yTickVelocity = 1.0
self.currX = random.randrange(self.minX, self.maxX)
self.currY = random.randrange(self.minY, self.maxY)
print("Spotlight initialized")
def getSpotlightImage(self) -> Image:
box = (self.currX, self.currY, self.currX + self.sizeX, self.currY + self.sizeY)
region = self.image.crop(box)
return region
def updateCoordinates(self) -> None:
self.currX += self.xTickVelocity
self.currY += self.yTickVelocity
def calcNewVelocity(self, oldVelocity) -> float:
velocityChange = float(random.randrange(-1, 2, 1)) / 10.0
newVelocity = oldVelocity + velocityChange
direction = -1 if oldVelocity < 0 else 1
if abs(newVelocity) < self.minTickABSVelocity:
newVelocity = self.minTickABSVelocity * direction
elif abs(newVelocity) > self.maxTickABSVelocity:
newVelocity = self.maxTickABSVelocity * direction
return newVelocity
def handleBounce(self) -> None:
if self.currX <= self.minX:
self.currX = self.minX
self.xTickVelocity = abs(self.xTickVelocity)
self.xTickVelocity = self.calcNewVelocity(self.xTickVelocity)
elif self.currX >= self.maxX:
self.currX = self.maxX
self.xTickVelocity = -1 * abs(self.xTickVelocity)
self.xTickVelocity = self.calcNewVelocity(self.xTickVelocity)
if self.currY <= self.minY:
self.currY = self.minY
self.yTickVelocity = abs(self.yTickVelocity)
self.yTickVelocity = self.calcNewVelocity(self.yTickVelocity)
elif self.currY >= self.maxY:
self.currY = self.maxY
self.yTickVelocity = -1 * abs(self.yTickVelocity)
self.yTickVelocity = self.calcNewVelocity(self.yTickVelocity)
def isAtEdge(self) -> bool:
if (
self.currX <= self.minX
or self.currX >= self.maxX
or self.currY <= self.minY
or self.currY >= self.maxY
):
return True
else:
return False
def tick(self) -> None:
self.updateCoordinates()
if self.isAtEdge():
self.handleBounce()
# ****************************************************************************
class RMQWrapper:
def __init__(self):
self.setupRMQ()
def setupRMQ(self) -> None:
print("Connecting to RMQ server -- ", end="")
self.queueName = "MazeScreen"
self.connection = pika.BlockingConnection(
pika.ConnectionParameters(host="localhost")
)
self.channel = self.connection.channel()
self.channel.queue_declare(queue=self.queueName)
print("Connected.")
def publish(self, msg) -> None:
try:
self.channel.basic_publish(
exchange="", routing_key=self.queueName, body=msg
)
except pika.exceptions.StreamLostError as e:
pprint(e)
self.setupRMQ()
def close(self) -> None:
self.connection.close()
def sendClear(self) -> None:
dat = {"type": "clear"}
msg = json.dumps(dat)
self.publish(msg)
def sendScreenRedraw(self, screen) -> None:
dat = {"type": "redraw", "pixels": []}
width, height = screen.size
for x in range(width):
for y in range(height):
r, g, b, a = screen.getpixel((x, y))
pixel = Pixel(x, y, r, g, b)
dat["pixels"].append(pixel.getDat())
msg = json.dumps(dat)
self.publish(msg)
# ** *************************************************************************
if __name__ == "__main__":
print("Starting Spotlight Generator.")
sleepDelay = 0.25
if len(sys.argv) < 2:
print(f"Usage: {sys.argv[0]} <background image file>")
sys.exit()
try:
fullImage = Image.open(sys.argv[1])
except FileNotFoundError as e:
print(e)
sys.exit()
rmq = RMQWrapper()
spotlight = Spotlight(fullImage, 32, 32)
print("Starting spotlight's main movement")
try:
while True:
spotlight.tick()
newSpotlightImage = spotlight.getSpotlightImage()
rmq.sendScreenRedraw(newSpotlightImage)
sleep(sleepDelay)
except KeyboardInterrupt:
print("Caught keyboard interrupt - quitting")
rmq.sendClear()
rmq.close()
print("Done.")
| StarcoderdataPython |
6415185 | import os
import sys
import numpy as np
from setuptools import setup, Extension
from Cython.Distutils import build_ext
NAME = "mbircone"
VERSION = "0.1"
DESCRIPTION = "Python Package for Cone Beam reconstruction"
REQUIRES = ['numpy','Cython','psutil','Pillow'] # external package dependencies
LICENSE = "BSD-3-Clause"
AUTHOR = "<NAME>"
# Specifies directory containing cython functions to be compiled
PACKAGE_DIR = "mbircone"
SRC_FILES = [PACKAGE_DIR + '/src/allocate.c', PACKAGE_DIR + '/src/MBIRModularUtilities3D.c',
PACKAGE_DIR + '/src/icd3d.c', PACKAGE_DIR + '/src/recon3DCone.c',
PACKAGE_DIR + '/src/computeSysMatrix.c',
PACKAGE_DIR + '/src/interface.c', PACKAGE_DIR + '/interface_cy_c.pyx']
compiler_str = os.environ.get('CC')
# Set default to gcc in case CC is not set
if not compiler_str:
compiler_str = 'gcc'
# Single threaded clang compile
if compiler_str == 'clang':
c_extension = Extension(PACKAGE_DIR+'.interface_cy_c', SRC_FILES,
libraries=[],
language='c',
include_dirs=[np.get_include()])
# OpenMP gcc compile
if compiler_str =='gcc':
c_extension = Extension(PACKAGE_DIR+'.interface_cy_c', SRC_FILES,
libraries=[],
language='c',
include_dirs=[np.get_include()],
# for gcc-10 "-std=c11" can be added as a flag
extra_compile_args=["-std=c11","-O3", "-fopenmp","-Wno-unknown-pragmas"],
extra_link_args=["-lm","-fopenmp"])
# OpenMP icc compile
if compiler_str =='icc':
if sys.platform == 'linux':
os.environ['LDSHARED'] = 'icc -shared'
c_extension = Extension(PACKAGE_DIR+'.interface_cy_c', SRC_FILES,
libraries=[],
language='c',
include_dirs=[np.get_include()],
extra_compile_args=["-O3","-DICC","-qopenmp","-no-prec-div","-restrict","-ipo","-inline-calloc",
"-qopt-calloc","-no-ansi-alias","-xCORE-AVX2"],
extra_link_args=["-lm","-qopenmp"])
setup(install_requires=REQUIRES,
packages=[PACKAGE_DIR],
zip_safe=False,
name=NAME,
version=VERSION,
description=DESCRIPTION,
author=AUTHOR,
license=LICENSE,
cmdclass={"build_ext": build_ext},
ext_modules=[c_extension]
)
| StarcoderdataPython |
304943 | <reponame>knowledgetechnologyuhh/goal_conditioned_RL_baselines
import numpy as np
import gym
import pickle
from baselines import logger
from baselines.herhrl.ddpg_her_hrl_policy import DDPG_HER_HRL_POLICY
from baselines.herhrl.mix_pddl_hrl_policy import MIX_PDDL_HRL_POLICY
from baselines.herhrl.pddl_policy import PDDL_POLICY
from baselines.herhrl.her import make_sample_her_transitions as make_sample_her_transitions_hrl
# from baselines.her.her import make_sample_her_transitions
# from baselines.her_pddl.pddl.pddl_util import obs_to_preds_single
import importlib
from gym.envs.registration import registry
DEFAULT_ENV_PARAMS = {
'FetchReach-v1': {
'n_cycles': 20
},
}
DEFAULT_PARAMS = {
# env
'max_u': 1., # max absolute value of actions on different coordinates
# ddpg
'layers': 3, # number of layers in the critic/actor networks
'hidden': 256, # number of neurons in each hidden layers
'network_class': 'baselines.herhrl.actor_critic:ActorCritic',
'Q_lr': 0.001, # critic learning rate
'pi_lr': 0.001, # actor learning rate
# 'buffer_size': int(1E6), # for experience replay
'buffer_size': int(5E3), # for experience replay
'polyak': 0.95, # polyak averaging coefficient
'action_l2': 1.0, # quadratic penalty on actions (before rescaling by max_u)
'clip_obs': 200.,
'scope': 'ddpg_hrl', # can be tweaked for testing
'relative_goals': False,
# ddpg get actions
'reuse': False,
'use_mpi': True,
# training
'n_cycles': 50, # per epoch
'rollout_batch_size': 1, # per mpi thread
'n_batches': 40, # training batches per cycle
'batch_size': 256, # per mpi thread, measured in transitions and reduced to even multiple of chunk_length.
'n_test_rollouts': 10, # number of test rollouts per epoch, each consists of rollout_batch_size rollouts
'test_with_polyak': False, # run test episodes with the target network
# exploration
'random_eps': 0.3, # percentage of time a random action is taken
'noise_eps': 0.2, # std of gaussian noise added to not-completely-random actions as a percentage of max_u
# 'random_eps': 0.05, # percentage of time a random action is taken
# 'noise_eps': 0.05, # std of gaussian noise added to not-completely-random actions as a percentage of max_u
# HER
'replay_strategy': 'future', # supported modes: future, none
'replay_k': 4, # number of additional goals used for replay, only used if off_policy_data=future
# normalization
'norm_eps': 0.01, # epsilon used for observation normalization
'norm_clip': 5, # normalized observations are cropped to this values
# 'test_subgoal_perc' : 1.0
'has_child': False
}
POLICY_ACTION_PARAMS = {
}
CACHED_ENVS = {}
ROLLOUT_PARAMS = {
'use_demo_states': True,
# 'T': 50,
'policy_action_params': {'exploit': False,
'compute_Q': False,
'noise_eps': 0.2,
'random_eps': 0.3,
# 'noise_eps': 0.2,
# 'random_eps': 0.3,
'use_target_net': False}
}
EVAL_PARAMS = {
'use_demo_states': False,
# 'T': 50,
'policy_action_params': {'exploit': True,
'compute_Q': True,
'noise_eps': 0.0,
'random_eps': 0.0,
'use_target_net': False
# 'use_target_net': params['test_with_polyak'],
}
}
"""
compute_Q=self.compute_Q,
noise_eps=self.noise_eps if not self.exploit else 0.,
random_eps=self.random_eps if not self.exploit else 0.,
use_target_net=self.use_target_net)
"""
# OVERRIDE_PARAMS_LIST = ['network_class', 'rollout_batch_size', 'n_batches', 'batch_size', 'replay_k','replay_strategy']
# OVERRIDE_PARAMS_LIST = ['rollout_batch_size', 'n_batches', 'batch_size', 'n_subgoals_layers', 'policies_layers']
# OVERRIDE_PARAMS_LIST = ['penalty_magnitude', 'n_subgoals_layers', 'policies_layers', 'mix_p_steepness', 'obs_noise_coeff']
# OVERRIDE_PARAMS_LIST = ['penalty_magnitude', 'action_steps', 'policies_layers', 'obs_noise_coeff', 'network_class', 'shared_pi_err_coeff']
OVERRIDE_PARAMS_LIST = ['action_steps', 'policies_layers', 'shared_pi_err_coeff', 'action_l2', 'network_classes']
ROLLOUT_PARAMS_LIST = ['noise_eps', 'random_eps', 'replay_strategy', 'env_name']
def cached_make_env(make_env):
"""
Only creates a new environment from the provided function if one has not yet already been
created. This is useful here because we need to infer certain properties of the env, e.g.
its observation and action spaces, without any intend of actually using it.
"""
if make_env not in CACHED_ENVS:
env = make_env()
CACHED_ENVS[make_env] = env
return CACHED_ENVS[make_env]
def prepare_params(kwargs):
# DDPG params
ddpg_params = dict()
env_name = kwargs['env_name']
def make_env():
return gym.make(env_name)
kwargs['make_env'] = make_env
if 'render' in registry.env_specs[env_name]._kwargs:
registry.env_specs[env_name]._kwargs['render'] = kwargs['render']
tmp_env = cached_make_env(kwargs['make_env'])
action_steps = [int(n_s) for n_s in kwargs['action_steps'][1:-1].split(",") if n_s != '']
kwargs['action_steps'] = action_steps
tmp_env.reset()
kwargs['max_u'] = np.array(kwargs['max_u']) if isinstance(kwargs['max_u'], list) else kwargs['max_u']
if 'lr' in kwargs:
kwargs['pi_lr'] = kwargs['lr']
kwargs['Q_lr'] = kwargs['lr']
del kwargs['lr']
for name in ['buffer_size', 'hidden', 'layers',
'network_class',
'polyak',
'batch_size', 'Q_lr', 'pi_lr',
'norm_eps', 'norm_clip', 'max_u',
'action_l2', 'clip_obs', 'scope', 'relative_goals',
'shared_pi_err_coeff']:
if name in kwargs.keys():
ddpg_params[name] = kwargs[name]
kwargs['_' + name] = kwargs[name]
del kwargs[name]
kwargs['ddpg_params'] = ddpg_params
return kwargs
def log_params(params, logger=logger):
for key in sorted(params.keys()):
logger.info('{}: {}'.format(key, params[key]))
def configure_her(params):
env = cached_make_env(params['make_env'])
env.reset()
def reward_fun(ag_2, g, info): # vectorized
return env.compute_reward(achieved_goal=ag_2, desired_goal=g, info=info)
# Prepare configuration for HER.
her_params = {
'reward_fun': reward_fun,
}
for name in ['replay_strategy', 'replay_k', 'penalty_magnitude', 'has_child']:
her_params[name] = params[name]
params['_' + name] = her_params[name]
del params[name]
sample_her_transitions = make_sample_her_transitions_hrl(**her_params)
return sample_her_transitions
def simple_goal_subtract(a, b):
assert a.shape == b.shape
return a - b
def configure_policy(dims, params):
# Extract relevant parameters.
rollout_batch_size = params['rollout_batch_size']
ddpg_params = params['ddpg_params']
reuse = params['reuse']
use_mpi = params['use_mpi']
p_steepness = params['mix_p_steepness']
# DDPG agent
env = cached_make_env(params['make_env'])
env.reset()
subgoal_scale, subgoal_offset = env.env.get_scale_and_offset_for_normalized_subgoal()
units_per_obs_len = 12
n_obs = len(env.env._get_obs()['observation'])
ddpg_params.update({
'rollout_batch_size': rollout_batch_size,
'subtract_goals': simple_goal_subtract,
'reuse': reuse,
'use_mpi': use_mpi,
'clip_pos_returns': True, # clip positive returns for Q-values
'h_level': 0,
'p_steepness': p_steepness,
'hidden': units_per_obs_len * n_obs
})
ddpg_params['info'] = {
'env_name': params['env_name'],
}
n_subgoals = params['action_steps']
policy_types = [getattr(importlib.import_module('baselines.herhrl.' + (policy_str.lower())), policy_str) for
policy_str in params['policies_layers'][1:-1].split(",") if policy_str != '']
net_classes = [net_class for net_class in params['network_classes'][1:-1].split(",") if net_class != '']
policies = []
for l, (n_s, ThisPolicy, net_class) in enumerate(zip(n_subgoals, policy_types, net_classes)):
if l == (len(n_subgoals) - 1): # If this is the final lowest layer
input_dims = dims.copy()
subgoal_scale = np.ones(input_dims['u'])
subgoal_offset = np.zeros(input_dims['u'])
has_child = False
else:
input_dims = dims.copy()
input_dims['u'] = input_dims['g']
has_child = True # penalty only apply for the non-leaf hierarchical layers
_params = params.copy()
_params['has_child'] = has_child
sample_her_transitions = configure_her(_params)
ddpg_params['sample_transitions'] = sample_her_transitions
ddpg_params['network_class'] = "baselines.herhrl." + net_class
this_params = ddpg_params.copy()
gamma = 1. - 1. / n_s
this_params.update({'input_dims': input_dims, # agent takes an input observations
'T': n_s,
'subgoal_scale': subgoal_scale,
'subgoal_offset': subgoal_offset,
'h_level': l,
'gamma': gamma,
'buffer_size': ddpg_params['buffer_size'] * n_s,
'clip_return': (1. / (1. - gamma)) if params['clip_return'] else np.inf,
})
this_params['scope'] += '_l_{}'.format(l)
policy = ThisPolicy(**this_params)
policies.append(policy)
if len(policies) > 0:
h_level_ctr = 1
for p, p_child in zip(policies[:-1], policies[1:]):
p.child_policy = p_child
p.child_policy.h_level = h_level_ctr
p.child_policy.sess = p.sess
h_level_ctr += 1
return policies[0]
def load_policy(restore_policy_file, params):
# Load policy.
with open(restore_policy_file, 'rb') as f:
policy = pickle.load(f)
# Set sample transitions (required for loading a policy only).
_params = params.copy()
policy = set_policy_params(policy, _params)
return policy
def set_policy_params(policy, params):
child_params = params.copy()
if policy.child_policy is None: # Don't use a penalty for the leaf policy
params['has_child'] = False
else:
params['has_child'] = True
policy.sample_transitions = configure_her(params)
policy.rollout_batch_size = params['rollout_batch_size']
if policy.buffer is not None:
policy.buffer.sample_transitions = policy.sample_transitions
if policy.child_policy is not None:
set_policy_params(policy.child_policy, child_params)
return policy
def configure_dims(params):
env = cached_make_env(params['make_env'])
env.reset()
obs, _, _, info = env.step(env.action_space.sample())
dims = {
'o': obs['observation'].shape[0],
'u': env.action_space.shape[0],
'g': obs['desired_goal'].shape[0],
}
for key, value in info.items():
value = np.array(value)
if value.ndim == 0:
value = value.reshape(1)
dims['info_{}'.format(key)] = value.shape[0]
return dims
| StarcoderdataPython |
6437119 | <filename>katana/cigar.py<gh_stars>1-10
"""Basic CIGAR manipulation and querying. """
from __future__ import print_function, absolute_import, division
import itertools
import re
import katana.util as util
class CigarUtil(object):
_QUERY_CONSUMING = set(list("MIS=X"))
_REF_CONSUMING = set(list("MDNS=X"))
_REGEX_CIGAR = re.compile("([0-9]+)([MIDNSHP=X])")
_REGEX_MATCHING_OP = re.compile("[MX=]")
_REGEX_NON_HARDCLIP = re.compile("[^H]")
_REGEX_REF_CONSUMING = re.compile("[MDNS=X]")
_REGEX_REQUIRED_OPS = re.compile("[MIDN=X]")
_REGEX_QUERY_CONSUMING = re.compile("[MIS=X]")
_REGEX_QUERY_NON_CONSUMING = re.compile("[DNP]")
def __init__(self, reference_start, cigar=None, cigar_profile=None):
self.reference_start = reference_start
self.cigar = ""
self.cigar_profile = ""
if cigar:
self.cigar = cigar
elif cigar_profile:
self.cigar = self._collapse_cigar_profile(cigar_profile)
if cigar_profile:
self.cigar_profile = cigar_profile
else:
self.cigar_profile = self._expand_cigar(self.cigar)
self.query_length = \
len(self._REGEX_QUERY_CONSUMING.findall(self.cigar_profile))
self.is_valid = self._REGEX_REQUIRED_OPS.search(self.cigar) is not None
def __repr__(self):
return ("{}(reference_start={}, "
"cigar='{}')").format(self.__class__,
self.reference_start,
self.cigar)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def _is_ref_consuming(self, cigar_op):
return cigar_op in self._REF_CONSUMING
def _is_match(self, cigar_op):
return self._REGEX_MATCHING_OP.search(cigar_op) != None
def _softclip(self, cigar_profile):
profile = self._REGEX_QUERY_NON_CONSUMING.sub("", cigar_profile)
return self._REGEX_NON_HARDCLIP.sub("S", profile)
def _cut_at_first_match(self, profile):
cut_index = 0
first_match = self._REGEX_MATCHING_OP.search(profile)
if first_match:
cut_index = first_match.start(0)
return (profile[0:cut_index], profile[cut_index:])
def _softclip_to_first_match(self, old_pos, old_profile):
new_pos = old_pos
(pre_match, post_match) = self._cut_at_first_match(old_profile)
new_pos += len(re.findall(self._REGEX_REF_CONSUMING, pre_match))
pre_match = self._softclip(pre_match)
return (new_pos, pre_match + post_match)
def _expand_cigar(self, cigar_string):
expanded_cigar = []
for cigar_tuple in self._REGEX_CIGAR.findall(cigar_string):
expanded_cigar.append(int(cigar_tuple[0]) * cigar_tuple[1])
return "".join(expanded_cigar)
@staticmethod
def _collapse_cigar_profile(profile):
op_strings = ["".join(g) for _, g in itertools.groupby(profile)]
new_cigar = [str(len(op)) + op[0] for op in op_strings]
return "".join(new_cigar)
def _pos_profiles(self, profile):
'''Returns a list of tuple of first match index and a list of profiles.
Each list element is the cigar profile for that reference position; i.e.
the length of this list is the length of consumed reference bases.
'''
pos_profiles = list()
pos = 0
pos_profiles.append([])
first_match_index = -1
for cigar_op in profile:
if pos >= len(pos_profiles):
pos_profiles.append([])
pos_profiles[pos].append(cigar_op)
if self._is_match(cigar_op) and first_match_index == -1:
first_match_index = pos
if self._is_ref_consuming(cigar_op):
pos += 1
profiles = ["".join(x) for x in pos_profiles]
return (first_match_index, profiles)
def _partition_cigar(self, region_start, region_end):
'''Split the profile into 3-tuple: before region, in region,
and after region; each tuple defines the start coordinate and
profile fragment.
ref_start, region start, and region_end are genomic coordinates.
Regions can extend outside the range of the read but the resulting
start coordinates are constrained to the range of the read. For
example, if region_end overhangs the end of the read, the "after region"
start coordinate will be "pushed back" to the trailing edge of the
read.'''
ref_start = self.reference_start
profile = self.cigar_profile
(first_match_index, pos_profile) = self._pos_profiles(profile)
read_start = ref_start - first_match_index
read_end = read_start + len(pos_profile)
constrain = lambda x: max(read_start, min(read_end, x))
region_start_index = max(region_start - read_start, 0)
region_end_index = region_end - read_start
before_profile = "".join(pos_profile[0:region_start_index])
region_profile="".join(pos_profile[region_start_index:region_end_index])
after_profile="".join(pos_profile[region_end_index:])
return (CigarUtil(read_start,
cigar_profile=before_profile),
CigarUtil(constrain(region_start),
cigar_profile=region_profile),
CigarUtil(constrain(region_end),
cigar_profile=after_profile))
def _assert_query_lengths_match(self, new_cigar):
if self.query_length != new_cigar.query_length:
msg = ("Old CIGAR query length [{}] ({}) != new CIGAR length"
"[{}] ({})").format(self.cigar,
self.query_length,
new_cigar.cigar,
new_cigar.query_length)
raise util.KatanaException(msg)
def softclip_target(self, target_start, target_end):
(pre_target, target, post_target) = self._partition_cigar(target_start,
target_end)
pre_profile = self._softclip(pre_target.cigar_profile)
(new_pos,
target_profile) = self._softclip_to_first_match(target.reference_start,
target.cigar_profile)
post_profile = self._softclip(post_target.cigar_profile)
new_profile = pre_profile + target_profile + post_profile
new_cigar = CigarUtil(new_pos, cigar_profile = new_profile)
self._assert_query_lengths_match(new_cigar)
return new_cigar
class NullCigarUtil(object):
#pylint: disable=unused-argument
def __init__(self, reference_start):
self.reference_start = reference_start
self.cigar = "*"
self.is_valid = True
self.query_length = 0
def softclip_target(self, target_start, target_end):
return self
def cigar_factory(read):
if not read.cigarstring or read.cigarstring == "*":
return NullCigarUtil(read.reference_start)
else:
return CigarUtil(read.reference_start, read.cigarstring)
| StarcoderdataPython |
5162722 | class ThecampyException(Exception):
pass
class ThecampyValueError(ThecampyException):
pass
class ThecampyReqError(ThecampyException): #Request오류들
pass | StarcoderdataPython |
157500 | import os
import re
import collections
import shutil
import traceback
from os.path import join
from xml.dom import minidom
print("Start")
a = []
rpt = []
Folders = []
Address = []
def list_duplicates(seq):
global t
t = False
seen = set()
seen_add = seen.add
seen_twice = set( x for x in seq if x in seen or seen_add(x) )
b = ' \n'.join(seen_twice)
e = len(seen_twice)
if e >= 0:
t = True
fo = open("duplicates.txt", "w")
fo.write(b)
fo.close();
return t
def fetch_buildnumbers(address):
try:
Id = 1
for item in os.listdir(address):
if 'Build-' in item:
item_value= str(item)
buildid, buildnumber=item_value.split("-", 1)
Folders.append(buildnumber)
Id += 1
return Folders
except:
print("___________________________ ERROR ___________________________\n" + traceback.format_exc())
def delete_releases(project, family, branch, buildnumber, jenkins):
global path1
path1= str('/var/lib/jenkins/jobs/'+ jenkins + '/builds/' + buildnumber)
print(path1);
global path2
path2= str('/Releases/Jenkins/' + family + '/' + project + '/' + branch + '/Build-' + buildnumber)
try:
path1_exists=os.path.exists(path1)
path2_exists=os.path.exists(path2)
if path1_exists == False and path2_exists == True:
shutil.rmtree(path2)
print("Deleting artifacts : " + path2)
return path2
except:
print("___________________________ ERROR ___________________________\n" + traceback.format_exc())
def start():
try:
fo.write("********* Step 1 - Find All Jenkins Plan duplicate configurations **********\n")
for (dirname, dirs, files) in os.walk('/var/lib/jenkins/jobs'):
for filename in files:
if filename.endswith('config.xml') :
thefile = os.path.join(dirname,filename)
get_jenkins = thefile
path_list = get_jenkins.split(os.sep)
# print(path_list);
print_filename=str(thefile)
if 'configurations' in print_filename or 'promotions' in print_filename:
donothing = 1
else:
fo.write("Adding Metadata from : " + print_filename )
xmldoc = minidom.parse(thefile)
projectlist = xmldoc.getElementsByTagName('project')
matrixlist = xmldoc.getElementsByTagName('matrix-project')
tagnumToKeep = xmldoc.getElementsByTagName('numToKeep')
if tagnumToKeep is not None and tagnumToKeep.length == 1:
numToKeep = int(tagnumToKeep[0].childNodes[0].data)
fo.write("Number to keep :" + tagnumToKeep[0].childNodes[0].data + "\n")
EnvInjectJobProperty = xmldoc.getElementsByTagName('EnvInjectJobProperty')
d = EnvInjectJobProperty.length
fo.write("EnvInjectJobProperty ;" + str(d) + "\n")
if EnvInjectJobProperty is not None and d == 1:
propertiesContent = xmldoc.getElementsByTagName('propertiesContent')[0]
name = propertiesContent.childNodes[0].data
fo.write("propertiesContent :" + name + "\n")
m = re.match("(?P<project_branch>\w+)\W+(?P<branch>\w+)\W+(?P<project_family>\w+)\W+(?P<family>\w+)\W+(?P<project_title>\w+)\W+(?P<title>\w+)", name)
SigniantConfig = xmldoc.getElementsByTagName('org.jenkinsci.plugins.variablecfg.Signiant')
if SigniantConfig.length == 1:
artmanager=(SigniantConfig[0].childNodes[1].childNodes[0].data)
else:
artmanager="skip"
fo.write("Art Manager :" + artmanager + "\n")
if 'skip' in artmanager:
donothing = 1
str_report=("*** Record Skipped " + path_list[5])
rpt.insert(0, str_report)
fo.write("Art Manager said to skip record\n")
fo.write("*******************************************************************************\n")
else:
if m is None:
donothing = 1
fo.write("re match was None\n")
fo.write("*******************************************************************************\n")
else:
str_print = str(m.group("project_branch")+"="+ m.group("branch")+" "+ m.group("project_family")+"="+m.group("family")+" "+m.group("project_title")+"="+m.group("title"))
str_projectlist=str(m.group("title") + "=" + m.group("branch"))
str_address=(m.group("family") + "/" + m.group("title") + "/" + m.group("branch") + "/" + path_list[5] )
a.insert(0, str_projectlist)
str_report=(path_list[5] + "=" + m.group("title") + "-" + m.group("branch"))
rpt.insert(0, str_report)
Address.insert(0, str_address)
fo.write("Match Data :" + str_print + "\n")
fo.write("Address : " + str_address + "\n")
fo.write("add to project list\n")
fo.write("*******************************************************************************\n")
return a
except:
print("___________________________ ERROR ___________________________\n" + traceback.format_exc())
fo = open("runner.log", "w")
start()
fo.close();
fo = open("Artifact_Manager.rpt", "w")
c = ' \n'.join(rpt)
fo.write(c)
fo.close();
# Checking for duplicates
list_duplicates(a)
print(t);
if t == True:
for address in Address:
family, project, branch, jenkins = address.split('/')
search_address = str('/Releases/Jenkins/' + family + '/' + project + '/' + branch)
# print(str('/Releases/Jenkins/' + address));
# family, project, branch= address.split('/')
# check if releases directory exists
if os.path.exists(search_address) == True:
del Folders[:]
# fetch the builds numbers for this project
fetch_buildnumbers(search_address)
# now delete the releases directory's
for buildnumber in Folders:
delete_releases(project, family, branch, buildnumber, jenkins)
else:
print("** Duplicates found no builds will be delete until they have been corrected")
print("Done")
| StarcoderdataPython |
1773953 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_transform.internal.schema_inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_transform import schema_inference
from tensorflow_transform.tf_metadata import dataset_schema
import unittest
class SchemaInferenceTest(unittest.TestCase):
def testInferFeatureSchemaWithoutSession(self):
with tf.Graph().as_default() as graph:
tensors = {
'a': tf.placeholder(tf.float32, (None,)),
'b': tf.placeholder(tf.string, (1, 2, 3)),
'c': tf.placeholder(tf.int64, (None,))
}
schema_inference.set_tensor_schema_override(
tensors['c'], tf.constant(5), tf.constant(6))
schema = schema_inference.infer_feature_schema(tensors, graph)
expected_schema = dataset_schema.Schema(column_schemas={
'a': dataset_schema.ColumnSchema(
tf.float32, [], dataset_schema.FixedColumnRepresentation()),
'b': dataset_schema.ColumnSchema(
tf.string, [2, 3], dataset_schema.FixedColumnRepresentation()),
'c': dataset_schema.ColumnSchema(
dataset_schema.IntDomain(tf.int64, is_categorical=True),
[], dataset_schema.FixedColumnRepresentation())
})
self.assertEqual(schema, expected_schema)
def testInferFeatureSchemaBadRank(self):
with tf.Graph().as_default() as graph:
tensors = {
'a': tf.placeholder(tf.float32, ()),
}
with self.assertRaises(ValueError):
schema_inference.infer_feature_schema(tensors, graph)
def testInferFeatureSchemaWithSession(self):
with tf.Graph().as_default() as graph:
tensors = {
'a': tf.placeholder(tf.float32, (None,)),
'b': tf.placeholder(tf.string, (1, 2, 3)),
'c': tf.placeholder(tf.int64, (None,))
}
schema_inference.set_tensor_schema_override(
tensors['c'], tf.constant(5), tf.constant(6))
with tf.Session(graph=graph) as session:
schema = schema_inference.infer_feature_schema(tensors, graph, session)
expected_schema = dataset_schema.Schema(column_schemas={
'a': dataset_schema.ColumnSchema(
tf.float32, [], dataset_schema.FixedColumnRepresentation()),
'b': dataset_schema.ColumnSchema(
tf.string, [2, 3], dataset_schema.FixedColumnRepresentation()),
'c': dataset_schema.ColumnSchema(
dataset_schema.IntDomain(tf.int64, 5, 6, is_categorical=True),
[], dataset_schema.FixedColumnRepresentation())
})
self.assertEqual(schema, expected_schema)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6613468 | <reponame>tsilifis/quinoa
import numpy as np
import kernel_py as kp
import scipy.stats as st
from scipy import linalg
from matplotlib import cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#%matplotlib inline
def build_up_b(b, rho, dt, u, v, dx, dy):
b[1:-1, 1:-1] = (rho * ( (1. / dt) * ((u[1:-1, 2:] - u[1:-1, 0:-2]) / (2. * dx) +
(v[2:, 1:-1] - v[0:-2, 1:-1]) / (2. * dy)) - ((u[1:-1,2:] - u[1:-1,0:-2]) / (2. * dx)) ** 2 -
2. * ((u[2:,1:-1] - u[0:-2,1:-1]) / (2. * dy)) * ((v[1:-1, 2:] - v[1:-1,0:-2]) / (2. * dx)) -
((v[2:, 1:-1] - v[0:-2, 1:-1]) / (2. * dy))**2 ))
return b
def pressure_poisson(p, dx, dy, b):
pn = np.empty_like(p)
pn = p.copy()
for q in range(nit):
pn = p.copy()
p[1:-1, 1:-1] = ( ( (pn[1:-1, 2:] + pn[1:-1, 0:-2]) * dy**2 + (pn[2:, 1:-1] + pn[0:-2, 1:-1]) * dx**2) / (2. * (dx**2 + dy**2)) - dx**2 * dy**2 * b[1:-1,1:-1] / (2. * (dx**2 + dy**2)))
p[:,-1] = p[:, -2] ## dp/dy = 0 at x = 2
p[0, :] = p[1, :] ## dp/dy = 0 at y = 0
p[:, 0] = p[:, 1] ## dp/dx = 0 at x = 0
p[-1,:] = 0. ## p = 0 at y = 2
return p
def cavity_flow(nt, u, v, dt, dx, dy, p, rho, xi):
un = np.empty_like(u)
vn = np.empty_like(v)
b = np.zeros((ny, nx))
nu = xi[-1]
for n in range(nt):
un = u.copy()
vn = v.copy()
b = build_up_b(b, rho, dt, u, v, dx, dy)
p = pressure_poisson(p, dx, dy, b)
#print p
u[1:-1, 1:-1] = (un[1:-1, 1:-1] - un[1:-1, 1:-1] * (dt / dx) * (un[1:-1, 1:-1] - un[1:-1, 0:-2]) -
vn[1:-1, 1:-1] * (dt / dy) * (un[1:-1, 1:-1] - un[0:-2,1:-1]) -
(dt / (2.*rho*dx)) * (p[1:-1,2:] - p[1:-1,0:-2]) +
nu * ( (dt / dx**2) * (un[1:-1, 2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2]) +
(dt / dy**2) * (un[2:, 1:-1] - 2. * un[1:-1,1:-1] + un[0:-2, 1:-1]) ) )
v[1:-1, 1:-1] = (vn[1:-1, 1:-1] - un[1:-1, 1:-1] * (dt / dx) * (vn[1:-1, 1:-1] - vn[1:-1, 0:-2]) -
vn[1:-1, 1:-1] * (dt / dy) * (vn[1:-1, 1:-1] - vn[0:-2, 1:-1]) -
(dt / (2.*rho*dy)) * (p[2:, 1:-1] - p[0:-2, 1:-1]) +
nu * ( (dt / dx**2) * (vn[1:-1, 2:] - 2 * vn[1:-1, 1:-1] + vn[1:-1, 0:-2]) +
(dt / dy**2) * (vn[2:, 1:-1] - 2. * vn[1:-1, 1:-1] + vn[0:-2, 1:-1]) ) )
u[0, :] = -2 * xi[0] * np.sin(xi[2] * np.pi * np.linspace(0., 2., int(2/dx + 1)))
u[:, 0] = 0.
u[:, -1] = 0.
u[-1, :] = 2 * xi[1] * np.sin(xi[3] * np.pi * np.linspace(0., 2., int(2/dx + 1))) # set velocity on cavity lid equal to 1
v[0, :] = 0.
v[-1, :] = 0.
v[:, 0] = 0. # * np.exp(- xi[2] * np.linspace(0., 2., int(2/dx + 1)))
v[:, -1] = 0. # * np.exp(- xi[3] * np.linspace(0., 2., int(2/dx + 1)))
return u, v, p
nx = 101
ny = 101
#nt = 500
nit = 50
c = 1.
dx = 2. / (nx - 1)
dy = 2. / (ny - 1)
x = np.linspace(0, 2., nx)
y = np.linspace(0, 2., ny)
X, Y = np.meshgrid(x, y)
rho = 1.
#nu = .05
dt = .001
u = np.zeros((ny, nx))
v = np.zeros((ny, nx))
p = np.zeros((ny, nx))
b = np.zeros((ny, nx))
nt = 1000
dim = 5
#xi = st.uniform.rvs(size = (5,))
#xi[4] = xi[4] * 0.04 + 0.01
N_init = 20
XI = 2. * st.uniform.rvs(size = (N_init,dim)) - 1.
YI = np.zeros((N_init,1))
for i in range(XI.shape[0]):
print 'Taking initial sample : ' + str(i)
u = np.zeros((ny, nx))
v = np.zeros((ny, nx))
p = np.zeros((ny, nx))
b = np.zeros((ny, nx))
xi = 0.5 * (XI[i,:].copy() + 1.)
xi[-1] = 0.04 * xi[-1] + 0.01
u, v, p = cavity_flow(nt, u, v, dt, dx, dy, p, rho, xi)
YI[i,0] = p[-2, -1]
print YI
kern = kp.RBF(dim, 1, 1)
gp = kp.GP(XI, YI, kern)
N_quad = 300
gp.optimize()
sig = np.zeros(N_quad + 1)
sig_noise = np.zeros(N_quad + 1)
ell = np.zeros(N_quad + 1)
sig[0] = gp._kern._var
sig_noise[0] = gp._noise_var
ell[0] = gp._kern._lengthscale[0]
kern._var = sig[0]
kern._lengthscale = [ell[0]] * dim
for i in range(N_quad):
u = np.zeros((ny, nx))
v = np.zeros((ny, nx))
p = np.zeros((ny, nx))
b = np.zeros((ny, nx))
x_new = gp.argmaxvar((-1.,1.))
print 'New design :' + str(x_new)
print x_new.shape
xi = 0.5 * (x_new.copy() + 1.)
xi[-1] = 0.04 * xi[-1] + 0.01
print 'New input : ' + str(xi)
u, v, p = cavity_flow(nt, u, v, dt, dx, dy, p, rho, xi)
#y_new = collect_data(x_new).reshape((1,1))# + 0.1 * np.random.normal(size = (1,1))
y_new = p[-2, -1]
XI = np.vstack([XI, x_new])
YI = np.vstack([YI, y_new])
gp_new = kp.GP(XI, YI, kern)
gp_new._noise_var = gp._noise_var
gp_new.optimize()
#gp_new._kern._lengthscale
sig[i+1] = gp_new._kern._var
sig_noise[i+1] = gp_new._noise_var
ell[i+1] = gp_new._kern._lengthscale[0]
kern._var = sig[i+1]
kern._lengthscale = [ell[i+1]] * dim
gp = gp_new
#f, var = gp_new.predict(X_test)
if i % 50 == 0:
np.save('sig_batch_'+str(i)+'.npy', sig)
np.save('ell_batch_'+str(i)+'.npy', ell)
np.save('sig_noise_batch_'+str(i)+'.npy', sig_noise)
np.save('X_batch_'+str(i)+'.npy', XI)
np.save('Y_batch_'+str(i)+'.npy', YI)
print 'Took active data ' + str(i)
np.save('sig.npy', sig)
np.save('sig_noise.npy', sig_noise)
np.save('ell.npy', ell)
np.save('X.npy', XI)
np.save('Y.npy', YI)
#fig = plt.figure(figsize = (11, 7), dpi = 100)
# plotting the pressure field as a contour
#plt.contourf(X, Y, p, alpha = 0.5, cmap = cm.viridis)
#plt.colorbar()
# plotting the pressure field outlines
#plt.contour(X, Y, p, 30, cmap = cm.viridis)
# plotting velocity field
#plt.quiver(X[::2, fdf8:f53e:61e4::18], Y[::2, fdf8:f53e:61e4::18], u[::2, fdf8:f53e:61e4::18], v[::2, ::2])
#plt.xlabel('X')
#plt.ylabel('Y')
#plt.show()
| StarcoderdataPython |
9610245 | #Input The Age, if age>18 print adult , if 10
age = int(input("Type your age: "))
if age > 18:
print("Adult")
else:
print("You are not an adult yet.") | StarcoderdataPython |
8004081 | import json
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--savedir',help='directory to save downloaded InstaVariety videos')
args = parser.parse_args()
savedir = args.savedir
# create the save directory if it doesn't exist
os.system('mkdir -p {}'.format(savedir))
## NOTE: this assumes that you're running the script from the
## directory in which it's located. if not, you need to change
## the path of the InstaVariety.json file
with open('InstaVariety.json','rb') as f:
insta_data = json.load(f)
for post in insta_data:
# make the save directory for the current video, if not exist
savedir_p = '{}/{}'.format(savedir,post['download_tag'])
os.system('mkdir -p {}'.format(savedir_p))
dl_link = post['video_link']
dl_name = post['urls'][0]
print('downloading {}'.format(dl_link))
os.system('youtube-dl {} --output {}/{}'.format(dl_link,savedir_p,dl_name))
| StarcoderdataPython |
8195764 | from InstaBot import InstaBot
from Logger import Logger
from ConfigHandler import ConfigHandler
import sys, os , datetime, time, logging
import pickle
def save(data):
with open('./data.p', 'wb') as fp:
pickle.dump(data, fp, protocol=pickle.HIGHEST_PROTOCOL)
def load_data(path) ->dict:
if os.path.isfile(path):
with open(path, 'rb') as f:
comments = pickle.load(f)
else:
comments = {}
comments['number'] = 0
return comments
def main(logger):
script_dir = os.path.dirname(__file__)
conf = ConfigHandler(os.path.join(script_dir,'config.ini'))
try:
if os.path.isfile(os.path.join(script_dir,'config.ini')) is True:
parser = conf.get_parser()
else:
conf.create_config()
parser = conf.get_parser()
except Exception as e:
logger.log_error("Error: %s" % str(e))
raise RuntimeError
try:
print(parser.get('urls', 'comments_urls'))
bot = InstaBot(parser.get('credentials', 'username'), parser.get('credentials', 'password'), parser.get('urls', 'comments_urls'))
bot.start()
except KeyboardInterrupt:
print(comments)
raise RuntimeError
if __name__ == "__main__":
try:
comments = load_data('./data.p')
dt_now = datetime.datetime.now()
date = [dt_now.second, dt_now.minute, dt_now.hour, dt_now.hour, dt_now.day, dt_now.month, dt_now.year]
logger = Logger('instabot',True,logging.DEBUG, str(date[4])+'-'+str(date[5])+'-'+str(date[6]))
except Exception as error:
print(error)
sys.exit(-1)
try:
main(logger)
except Exception as e:
logger.log_error("Error: %s" % str(e))
finally:
save(comments)
| StarcoderdataPython |
6680575 | <reponame>richard-parks/RAPTR
import django_filters
from django_filters import FilterSet
from shared.models import Contact
from .models import Proposal
class ProposalFilter(FilterSet):
investigator_supported = django_filters.ModelChoiceFilter(queryset=Contact.objects.all().filter(active=True, is_pi=True), lookup_expr='exact', label='PI')
class Meta:
model = Proposal
fields = {
'status': ['exact'],
'year_proposed': ['exact']
}
| StarcoderdataPython |
1847694 | <filename>pychron/lasers/stage_managers/stage_visualizer.py
# ===============================================================================
# Copyright 2012 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from chaco.pdf_graphics_context import PdfPlotGraphicsContext
from enable.component_editor import ComponentEditor
from traits.api import Instance, List, Property, Str
from traitsui.api import View, HGroup, UItem, TabularEditor, Handler, Action
from traitsui.tabular_adapter import TabularAdapter
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.core.helpers.filetools import unique_date_path
from pychron.core.helpers.formatting import floatfmt
from pychron.loggable import Loggable
from pychron.canvas.canvas2D.stage_visualization_canvas import StageVisualizationCanvas
from pychron.pychron_constants import LIGHT_RED
from pychron.stage.maps.laser_stage_map import LaserStageMap
from pychron.paths import paths
class ResultsAdapter(TabularAdapter):
columns = [("Hole", "hole_id"), ("dX", "dx"), ("dY", "dy")]
dx_text = Property
dy_text = Property
def get_bg_color(self, obj, trait, row, column=0):
item = getattr(obj, trait)[row]
if not item.corrected:
return LIGHT_RED
def _get_dx_text(self):
return floatfmt(self.item.dx, n=3)
def _get_dy_text(self):
return floatfmt(self.item.dy, n=3)
class StageVisualizerHandler(Handler):
def save(self, info):
info.object.save()
class StageVisualizer(Loggable):
canvas = Instance(StageVisualizationCanvas, ())
results = List
stage_map_name = Str
def set_stage_map(self, smap, points, calibration):
self.stage_map_name = smap.name
self.canvas.build_map(smap, points, calibration)
def save(self):
root = paths.corrections_dir
base = self.stage_map_name
p = unique_date_path(root, base, extension="")
gp = "{}.{}".format(p, "pdf")
gc = PdfPlotGraphicsContext(filename=gp, pagesize="letter")
from reportlab.lib.pagesizes import letter
bounds = self.canvas.bounds
self.canvas.do_layout(size=letter, force=True)
gc.render_component(self.canvas, valign="center")
gc.save(p)
self.canvas.do_layout(size=bounds, force=True)
self.canvas.invalidate_and_redraw()
tp = "{}.{}".format(p, "txt")
with open(tp, "w") as wfile:
for r in self.results:
args = r.nx, r.ny, r.dx, r.dy
args = ["{:0.5f}".format(x) for x in args]
args = [r.hole_id, str(r.corrected)] + args
line = ",".join(args)
wfile.write("{}\n".format(line))
def traits_view(self):
v = View(
HGroup(
UItem("canvas", editor=ComponentEditor(width=550, height=550)),
UItem("results", editor=TabularEditor(adapter=ResultsAdapter())),
),
handler=StageVisualizerHandler(),
buttons=[
Action(action="save", name="Save"),
],
title="Stage Visualizer",
resizable=True,
)
return v
if __name__ == "__main__":
from pychron.core.helpers.logger_setup import logging_setup
paths.build("_dev")
logging_setup("sv", use_archiver=False, use_file=False)
p = (
"/Users/ross/Programming/github/support_pychron/setupfiles/tray_maps"
"/221-hole.txt"
)
# p = '/Users/argonlab3/Pychron_co2/setupfiles/tray_maps/221-small_hole.txt'
sm = LaserStageMap(file_path=p)
sv = StageVisualizer()
results = [
((-3.9878, 15.9512), True),
((-1.9939, 15.5), False),
((0, 15.9512), True),
]
class CO:
rotation = 1
center = -2, 0
sv.set_stage_map(sm, results, CO())
sv.configure_traits()
# ============= EOF =============================================
# class StageVisualizer(Manager):
# canvas = Instance(StageVisualizationCanvas)
# stage_map = Instance(LaserStageMap)
# status_text = Str
#
# use_calibration = Bool(True)
# flag = True
# center = Tuple(Float, Float)
# rotation = Float(23)
# path = None
#
# def __init__(self, *args, **kw):
# super(StageVisualizer, self).__init__(*args, **kw)
# # p = os.path.join(data_dir, 'stage_visualizer')
# self.path, _ = unique_path(paths.stage_visualizer_dir, 'vis',
# extension='')
#
# def update_calibration(self, obj, name, new):
# self.clear()
# if name == 'calibration_item':
# self.center = new.center
# self.rotation = new.rotation
# else:
# setattr(self, name, new)
#
# self.canvas.build_map(self.stage_map, calibration=[self.center,
# self.rotation])
#
# def set_calibration(self, ca):
# pass
#
# # self.clear()
# # self.center = ca.get_center_position()
# # self.rotation = ca.get_rotation()
# #
# # self.canvas.build_map(self.stage_map, calibration=[self.center,
# # self.rotation])
#
# def clear(self):
# self.info('clearing visualizer')
# # sm = self.stage_map
# #
# # sm.clear_correction_file()
# # sm.clear_interpolations()
#
# self.canvas.clear()
#
# def dump(self):
# with open(self.path, 'wb') as f:
# d = dict(center=self.center,
# rotation=self.rotation,
# markup=self.canvas.markupcontainer)
#
# pickle.dump(d, f)
#
# def load_visualization(self):
# p = self.open_file_dialog()
#
# if p is not None:
# with open(p, 'rb') as f:
# # try:
# d = pickle.load(f)
#
# self.center = d['center']
# self.rotation = d['rotation']
#
# for k, v in d['markup'].iteritems():
# v.set_canvas(self.canvas)
#
# self.canvas.markupcontainer = d['markup']
# # except Exception, e:
# # print 'exception', e
#
# # self.canvas.invalidate_and_redraw()
#
# def set_current_hole(self, h):
# self.canvas.set_current_hole(h)
# self.canvas.request_redraw()
#
# def record_uncorrected(self, h, dump=True, *args):
# self.canvas.record_uncorrected(h)
# if dump:
# self.dump()
#
# def record_correction(self, h, x, y, dump=True):
# self.canvas.record_correction(h, x, y)
# if dump:
# self.dump()
#
# def record_interpolation(self, hole, x, y, color=(1, 1, 0), dump=True):
# if isinstance(hole, (str, int)):
# hole = self.stage_map.get_hole(str(hole))
#
# self.canvas.record_interpolation(hole, x, y, color)
# if dump:
# self.dump()
#
# @on_trait_change('canvas:selected')
# def update_status_bar(self, parent, name, obj):
# if isinstance(obj, SampleHole):
# correction = ''
# if obj.hole.corrected:
# correction = 'cor.= ({:0.2f},{:0.2f})'.format(obj.hole.x_cor,
# obj.hole.y_cor
# )
# # interpolation = ''
# # if obj.hole.interpolated:
# # h = ', '.join(sorted(set([iph.id for iph in obj.hole.interpolation_holes])))
# # interpolation = 'interpolation holes= {}'.format(h)
#
# self.status_text = 'hole = {} nom.= ({:0.2f},{:0.2f}) cal.=({:0.2f},{:0.2f}) {}'.format(obj.name,
# obj.hole.x,
# obj.hole.y,
# obj.x,
# obj.y,
# correction)
#
# def _use_calibration_changed(self):
# ca = self.canvas
# ca.build_map(self.stage_map,
# calibration=[self.center,
# self.rotation] if self.use_calibration else None
# )
#
# def traits_view(self):
# v = View(
# # Item('test'),
# # HGroup(Item('center', style='readonly'), Item('rotation', style='readonly')),
# Item('canvas', editor=ComponentEditor(width=550,
# height=550),
# show_label=False),
#
# statusbar='status_text',
# title='Stage Visualizer',
# resizable=True
# )
# return v
#
# def _stage_map_default(self):
# p = os.path.join(paths.map_dir, '61-hole.txt')
# sm = LaserStageMap(file_path=p)
# sm.load_correction_file()
# return sm
#
# def _canvas_default(self):
# c = StageVisualizationCanvas()
# c.build_map(self.stage_map, calibration=(self.center,
# self.rotation))
#
# return c
#
# # ===============================================================================
# # testing
# # ===============================================================================
# def test_view(self):
# v = View(Item('test'),
# Item('use_calibration'),
# Item('center'),
# Item('rotation'),
# Item('canvas', editor=ComponentEditor(width=700,
# height=700),
# show_label=False),
#
# statusbar='status_text'
# )
# return v
#
# def _test_fired(self):
# t = Thread(target=self._execute_)
# t.start()
#
# def _apply_calibration(self, hole):
# cpos = (0, 0)
# rot = 0
# if self.use_calibration:
# cpos = self.center
# rot = self.rotation
#
# return self.stage_map.map_to_calibration(hole.nominal_position,
# cpos, rot)
#
# def _execute_(self):
#
# ca = self.canvas
#
# self.clear()
# sm = self.stage_map
# sm.clear_correction_file()
# sm.clear_interpolations()
#
# ca.build_map(sm, calibration=[self.center,
# self.rotation] if self.use_calibration else None
# )
# ca.invalidate_and_redraw()
#
# # set some correction values
# vs = range(61)
# # vs.remove(17)
# # vs.remove(26)
# # vs.remove(25)
# # vs.remove(34)
# # vs.remove(35)
# # vs.remove(0)
# # vs.remove(1)
# # vs.remove(2)
# #
# # vs.remove(58)
# # vs.remove(59)
# # vs.remove(60)
# # vs.remove(3)
# # vs.remove(6)
# vs.remove(30)
# # vs = range(50, 60)
# for i in vs:
# # for i in [21, 29, 30]:
#
# h = sm.get_hole(str(i + 1))
# x, y = self._apply_calibration(h)
#
# x = self._add_error(x)
# y = self._add_error(y)
#
# # ca.record_correction(h, x, y)
# # sm.set_hole_correction(h.id, x, y)
# r = random.randint(0, 10)
# # r = 7
# if r > 6:
# self.record_correction(h, x, y, dump=False)
# sm.set_hole_correction(h.id, x, y)
#
# # self._test_interpolate_one()
# self._test_interpolate_all()
#
# def _add_error(self, a):
# # return a
# return a + (0.5 - random.random()) / 2.
#
# def _test_interpolate_one(self):
# sm = self.stage_map
# ca = self.canvas
# h = sm.get_hole('7')
# args = sm.get_interpolated_position('7')
# # print args
# color = (1, 1, 0)
# if args:
# nx = args[0]
# ny = args[1]
# self.record_interpolation(h, nx, ny, color, dump=False)
# ca.invalidate_and_redraw()
#
# def _test_interpolate_all(self):
# sm = self.stage_map
# ca = self.canvas
# colors = [(1, 1, 0), (0, 1, 1), (0, 0.75, 1), (0, 0.5, 1),
# (0, 0.75, 0.75), (0, 0.5, 0.75)
# ]
# for j, color in enumerate(colors[:1]):
# self.info('iteration {}'.format(j + 1))
# s = 0
# for i in range(60, -1, -1):
# h = sm.get_hole(str(i + 1))
# self.set_current_hole(h)
# r = random.randint(0, 10)
# r = 0
# if r > 5:
# nx, ny = self._apply_calibration(h)
# nx = self._add_error(nx)
# ny = self._add_error(ny)
# self.record_correction(h, nx, ny, dump=False)
# sm.set_hole_correction(h.id, nx, ny)
# else:
# kw = dict(cpos=self.center,
# rotation=self.rotation)
# if not self.use_calibration:
# kw['cpos'] = (0, 0)
# kw['rotation'] = 0
#
# args = sm.get_interpolated_position(h.id,
# **kw
# )
# if args:
# s += 1
# nx = args[0]
# ny = args[1]
# self.record_interpolation(h, nx, ny, color, dump=False)
# else:
# if not h.has_correction():
# self.record_uncorrected(h)
# # time.sleep(0.5)
# # do_later(ca.invalidate_and_redraw)
#
# n = 61 - sum([1 for si in sm.sample_holes if si.has_correction()])
# self.info('interpolated holes {} - noncorrected {}'.format(s, n))
#
# if not n or not s:
# break
#
# ca.invalidate_and_redraw()
#
# self.dump()
# self.info('noncorrected holes = {}'.format(n))
#
| StarcoderdataPython |
5177540 | <gh_stars>10-100
# -*- coding: utf-8 -*-
from ocelot.transformations.production_volumes import add_pv_to_allocatable_byproducts
def test_add_pv_to_allocatable_byproducts():
given = [{
'name': '',
'exchanges': [{
'name': '',
'amount': 3,
'type': 'byproduct',
'byproduct classification': 'allocatable product',
'production volume': {'amount': 0}
}, {
'name': '',
'amount': 6,
'type': 'reference product',
'production volume': {'amount': 10}
}]
}]
expected = [{
'name': '',
'exchanges': [{
'name': '',
'amount': 3,
'type': 'byproduct',
'byproduct classification': 'allocatable product',
'production volume': {'amount': 5}
}, {
'name': '',
'amount': 6,
'type': 'reference product',
'production volume': {'amount': 10}
}]
}]
assert add_pv_to_allocatable_byproducts(given) == expected
def test_add_pv_to_allocatable_byproducts_skip_existing():
given = [{
'name': '',
'exchanges': [{
'name': '',
'amount': 3,
'type': 'byproduct',
'byproduct classification': 'allocatable product',
'production volume': {'amount': 1.2}
}, {
'name': '',
'amount': 6,
'type': 'reference product',
'production volume': {'amount': 10}
}]
}]
expected = [{
'name': '',
'exchanges': [{
'name': '',
'amount': 3,
'type': 'byproduct',
'byproduct classification': 'allocatable product',
'production volume': {'amount': 1.2}
}, {
'name': '',
'amount': 6,
'type': 'reference product',
'production volume': {'amount': 10}
}]
}]
assert add_pv_to_allocatable_byproducts(given) == expected
def test_add_pv_to_allocatable_byproducts_waste_treatment():
given = [{
'name': '',
'exchanges': [{
'name': '',
'amount': 3,
'type': 'byproduct',
'byproduct classification': 'allocatable product',
'production volume': {'amount': 0}
}, {
'name': '',
'amount': -6,
'type': 'reference product',
'production volume': {'amount': 10}
}]
}]
expected = [{
'name': '',
'exchanges': [{
'name': '',
'amount': 3,
'type': 'byproduct',
'byproduct classification': 'allocatable product',
'production volume': {'amount': 5}
}, {
'name': '',
'amount': -6,
'type': 'reference product',
'production volume': {'amount': 10}
}]
}]
assert add_pv_to_allocatable_byproducts(given) == expected
| StarcoderdataPython |
3456943 | <reponame>markreidvfx/pct_titles<filename>pct_titles/__init__.py
from pctobjects import PctFile, TitlePage, TitleText, TitleRectangle, TitleOval, TitleLine, TextFormat
| StarcoderdataPython |
19164 | from app.models.classes_basicas.Pessoa import Pessoa
class Empregado(Pessoa):
id_empregado = None
def getIdEmpregado(self):
return self.id_empregado
def setIdEmpregado(self, id_empregado):
self.id_empregado = id_empregado | StarcoderdataPython |
3216775 | <reponame>melster1010/VIAME
# Copyright (c) Microsoft Corporation. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
from kwiver.kwiver_process import KwiverProcess
from sprokit.pipeline import process
from vital.types import Image
from vital.types import ImageContainer
from vital.types import Homography
from PIL import Image as pil_image
from vital.util.VitalPIL import get_pil_image, from_pil
import cv2
import csv
import numpy as np
import scipy.spatial
def compute_transform( optical, thermal, warp_mode = cv2.MOTION_HOMOGRAPHY,
match_low_res=True, good_match_percent = 0.15, ratio_test = .85,
match_height = 512, min_matches = 4, min_inliers = 4 ):
# Convert images to grayscale
if len( thermal.shape ) == 3 and thermal.shape[2] == 3:
thermal_gray = cv2.cvtColor( thermal, cv2.COLOR_RGB2GRAY )
else:
thermal_gray = thermal
if len( optical.shape ) == 3 and optical.shape[2] == 3:
optical_gray = cv2.cvtColor( optical, cv2.COLOR_RGB2GRAY )
else:
optical_gray = optical
# resize if requested
if match_low_res:
aspect = optical_gray.shape[1] / optical_gray.shape[0]
optical_gray = cv2.resize( optical_gray, ( int( match_height*aspect ), match_height) )
# Detect SIFT features and compute descriptors.
sift = cv2.xfeatures2d.SIFT_create()
keypoints1, descriptors1 = sift.detectAndCompute( thermal_gray, None )
keypoints2, descriptors2 = sift.detectAndCompute( optical_gray, None )
if len( keypoints1 ) < 2:
print("not enough keypoints")
return False, np.identity( 3 ), 0
if len( keypoints2 ) < 2:
print("not enough keypoints")
return False, np.identity( 3 ), 0
# scale feature points back to original size
if match_low_res:
scale = optical.shape[0] / optical_gray.shape[0]
for i in range( 0, len( keypoints2 ) ):
keypoints2[i].pt = ( keypoints2[i].pt[0]*scale, keypoints2[i].pt[1]*scale )
# Pick good features
if ratio_test < 1:
# ratio test
matcher = cv2.BFMatcher( cv2.NORM_L2, crossCheck=False )
matches = matcher.knnMatch( descriptors1, descriptors2, k=2 )
# Apply ratio test
good_matches = []
for m, n in matches:
if m.distance < ratio_test * n.distance:
good_matches.append( m )
matches = good_matches
else:
# top percentage matches
matcher = cv2.BFMatcher( cv2.NORM_L2, crossCheck=True )
matches = matcher.match( descriptors1, descriptors2 )
# Sort matches by score
matches.sort( key=lambda x: x.distance, reverse=False )
# Remove not so good matches
num_good_matches = int( len( matches ) * good_match_percent )
matches = matches[:num_good_matches]
print( "%d matches" % len(matches) )
if len( matches ) < min_matches:
print( "not enough matches" )
return False, np.identity( 3 ), 0
# Extract location of good matches
points1 = np.zeros( ( len( matches ), 2 ), dtype=np.float32 )
points2 = np.zeros( ( len( matches ), 2 ), dtype=np.float32 )
for i, match in enumerate( matches ):
points1[ i, : ] = keypoints1[ match.queryIdx ].pt
points2[ i, : ] = keypoints2[ match.trainIdx ].pt
# Find homography
h, mask = cv2.findHomography( points1, points2, cv2.RANSAC )
print( "%d inliers" % sum( mask ) )
if sum( mask ) < min_inliers:
print( "not enough inliers" )
return False, np.identity( 3 ), 0
# Check if we have a robust set of inliers by computing the area of the convex hull
# Good area is 11392
try:
print( 'Inlier area ', scipy.spatial.ConvexHull( points2[ np.isclose( mask.ravel(), 1 ) ] ).area )
if scipy.spatial.ConvexHull( points2[ np.isclose( mask.ravel(), 1 ) ] ).area < 1000:
print("Inliers seem colinear or too close, skipping")
return False, np.identity(3), 0
except:
print( "Inliers seem colinear or too close, skipping" )
return False, np.identity(3), 0
# if non homography requested, compute from inliers
if warp_mode != cv2.MOTION_HOMOGRAPHY:
points1_inliers = []
points2_inliers = []
for i in range(0, len(mask)):
if ( int(mask[i]) == 1):
points1_inliers.append( points1[i,:] )
points2_inliers.append( points2[i,:] )
a = cv2.estimateRigidTransform( np.asarray( points1_inliers ), \
np.asarray( points2_inliers ), ( warp_mode == cv2.MOTION_AFFINE ) )
if a is None:
return False, np.identity(3), 0
h = np.identity(3)
# turn in 3x3 transform
h[0,:] = a[0,:]
h[1,:] = a[1,:]
return True, h, sum( mask )
# normlize thermal image
def normalize_thermal( thermal_image, percent=0.01 ):
if not thermal_image is None and thermal_image.dtype is not np.dtype('uint8'):
thermal_norm = np.floor( ( thermal_image - \
np.percentile( thermal_image, percent) ) / \
( np.percentile( thermal_image, 100 - percent ) - \
np.percentile( thermal_image, percent ) ) * 256 )
else:
thermal_norm = thermal_image
return thermal_norm.astype( np.uint8 )
class register_frames_process( KwiverProcess ):
"""
This process blanks out images which don't have detections on them.
"""
# -------------------------------------------------------------------------
def __init__( self, conf ):
KwiverProcess.__init__( self, conf )
# set up configs
self.add_config_trait( "good_match_percent", "good_match_percent",
'0.15', 'Good match percent [0.0,1.0].' )
self.add_config_trait( "ratio_test", "ratio_test",
'0.85', 'Feature point test ratio' )
self.add_config_trait( "match_height", "match_height",
'512', 'Match height.' )
self.add_config_trait( "min_matches", "min_matches",
'4', 'Minimum number of feature matches' )
self.add_config_trait( "min_inliers", "min_inliers",
'4', 'Minimum number of inliers' )
self.declare_config_using_trait( 'good_match_percent' )
self.declare_config_using_trait( 'ratio_test' )
self.declare_config_using_trait( 'match_height' )
self.declare_config_using_trait( 'min_matches' )
self.declare_config_using_trait( 'min_inliers' )
# set up required flags
optional = process.PortFlags()
required = process.PortFlags()
required.add( self.flag_required )
# declare our ports (port-name, flags)
self.add_port_trait( "optical_image", "image", "Input image" )
self.add_port_trait( "thermal_image", "image", "Input image" )
self.add_port_trait( "warped_optical_image", "image", "Output image" )
self.add_port_trait( "warped_thermal_image", "image", "Output image" )
self.add_port_trait( "optical_to_thermal_homog", "homography", "Output homog" )
self.add_port_trait( "thermal_to_optical_homog", "homography", "Output homog" )
self.declare_input_port_using_trait( 'optical_image', required )
self.declare_input_port_using_trait( 'thermal_image', required )
self.declare_output_port_using_trait( 'warped_optical_image', optional )
self.declare_output_port_using_trait( 'warped_thermal_image', optional )
self.declare_output_port_using_trait( 'optical_to_thermal_homog', optional )
self.declare_output_port_using_trait( 'thermal_to_optical_homog', optional )
# -------------------------------------------------------------------------
def _configure( self ):
self._base_configure()
self._good_match_percent = float( self.config_value( 'good_match_percent' ) )
self._ratio_test = float( self.config_value( 'ratio_test' ) )
self._match_height = int( self.config_value( 'match_height' ) )
self._min_matches = int( self.config_value( 'min_matches' ) )
self._min_inliers = int( self.config_value( 'min_inliers' ) )
# -------------------------------------------------------------------------
def _step( self ):
# grab image container from port using traits
optical_c = self.grab_input_using_trait( 'optical_image' )
thermal_c = self.grab_input_using_trait( 'thermal_image' )
# Get python image from conatiner (just for show)
optical_npy = optical_c.image().asarray().astype('uint8')
thermal_npy = thermal_c.image().asarray().astype('uint16')
thermal_norm = normalize_thermal( thermal_npy )
if thermal_norm is not None and optical_npy is not None:
# compute transform
ret, transform, _ = compute_transform(
optical_npy,
thermal_norm,
warp_mode = cv2.MOTION_HOMOGRAPHY,
match_low_res = True,
good_match_percent = self._good_match_percent,
ratio_test = self._ratio_test,
match_height = self._match_height,
min_matches = self._min_matches,
min_inliers = self._min_inliers )
else:
ret = False
if ret:
# TODO: Make all of these computations conditional on port connection
inv_transform = np.linalg.inv( transform )
thermal_warped = cv2.warpPerspective( thermal_npy, transform, \
( optical_npy.shape[1], optical_npy.shape[0] ) )
optical_warped = cv2.warpPerspective( optical_npy, inv_transform, \
( thermal_npy.shape[1], thermal_npy.shape[0] ) )
#self.push_to_port_using_trait( 'thermal_to_optical_homog',
# Homography.from_matrix( transform, 'd' )
#self.push_to_port_using_trait( 'optical_to_thermal_homog',
# Homography.from_matrix( inv_transform, 'd' )
self.push_to_port_using_trait( 'warped_thermal_image',
ImageContainer.fromarray( thermal_warped ) )
self.push_to_port_using_trait( 'warped_optical_image',
ImageContainer.fromarray( optical_warped ) )
else:
print( 'alignment failed!' )
#self.push_to_port_using_trait( "thermal_to_optical_homog", Homography() )
#self.push_to_port_using_trait( "optical_to_thermal_homog", Homography() )
self.push_to_port_using_trait( 'warped_optical_image', ImageContainer() )
self.push_to_port_using_trait( 'warped_thermal_image', ImageContainer() )
self._base_step()
| StarcoderdataPython |
3345327 | # $Header: /opt/cvs/python/packages/share1.5/Pmv/fileCommandsGUI.py,v 1.9.2.1 2011/04/08 21:17:29 sargis Exp $
from ViewerFramework.VFCommand import CommandGUI, CommandProxy
class PDBWriterProxy(CommandProxy):
def guiCallback(self, **kw):
if self.command:
self.command.guiCallback(**kw)
else:
from Pmv.fileCommands import PDBWriter
command = PDBWriter()
loaded = self.vf.addCommand(command, 'writePDB', self.gui)
if loaded:
command = loaded
self.command = command
self.command.guiCallback(**kw)
PDBWriterGUI = CommandGUI()
PDBWriterGUI.addMenuCommand('menuRoot', 'File', 'Write PDB',
cascadeName='Save', index=3, separatorAbove=1)
class PDBQWriterProxy(CommandProxy):
def guiCallback(self, **kw):
if self.command:
self.command.guiCallback(**kw)
else:
from Pmv.fileCommands import PDBQWriter
command = PDBQWriter()
loaded = self.vf.addCommand(command, 'writePDBQ', self.gui)
if loaded:
command = loaded
self.command = command
self.command.guiCallback(**kw)
PDBQWriterGUI = CommandGUI()
PDBQWriterGUI.addMenuCommand('menuRoot', 'File', 'Write PDBQ',
cascadeName='Save', index=4)
class PDBQSWriterProxy(CommandProxy):
def guiCallback(self, **kw):
if self.command:
self.command.guiCallback(**kw)
else:
from Pmv.fileCommands import PDBQSWriter
command = PDBQSWriter()
loaded = self.vf.addCommand(command, 'writePDBQ', self.gui)
if loaded:
command = loaded
self.command = command
self.command.guiCallback(**kw)
PDBQSWriterGUI = CommandGUI()
PDBQSWriterGUI.addMenuCommand('menuRoot', 'File', 'Write PDBQS',
cascadeName='Save', index=5)
class PDBQTWriterProxy(CommandProxy):
def guiCallback(self, **kw):
if self.command:
self.command.guiCallback(**kw)
else:
from Pmv.fileCommands import PDBQTWriter
command = PDBQTWriter()
loaded = self.vf.addCommand(command, 'writePDBQT', self.gui)
if loaded:
command = loaded
self.command = command
self.command.guiCallback(**kw)
PDBQTWriterGUI = CommandGUI()
PDBQTWriterGUI.addMenuCommand('menuRoot', 'File', 'Write PDBQT',
cascadeName='Save', index=6)
class SaveMMCIFProxy(CommandProxy):
def guiCallback(self, **kw):
if self.command:
self.command.guiCallback(**kw)
else:
from Pmv.fileCommands import SaveMMCIF
command = SaveMMCIF()
loaded = self.vf.addCommand(command, 'SaveMMCIF', self.gui)
if loaded:
command = loaded
self.command = command
self.command.guiCallback(**kw)
SaveMMCIFGUI = CommandGUI()
SaveMMCIFGUI.addMenuCommand('menuRoot', 'File', 'Write MMCIF',
cascadeName='Save', index=7)
class PQRWriterProxy(CommandProxy):
def guiCallback(self, **kw):
if self.command:
self.command.guiCallback(**kw)
else:
from Pmv.fileCommands import PQRWriter
command = PQRWriter()
loaded = self.vf.addCommand(command, 'writePQR', self.gui)
if loaded:
command = loaded
self.command = command
self.command.guiCallback(**kw)
PQRWriterGUI = CommandGUI()
PQRWriterGUI.addMenuCommand('menuRoot', 'File', 'Write PQR',
cascadeName='Save', index=8)
class MoleculeReaderProxy(CommandProxy):
def __init__(self, vf, gui):
from Pmv.fileCommands import MoleculeReader
command = MoleculeReader()
vf.addCommand(command, 'readMolecule', gui)
CommandProxy.__init__(self, vf, gui)
MoleculeReaderGUI = CommandGUI()
MoleculeReaderGUI.addMenuCommand('menuRoot', 'File', 'Read Molecule', index = 0)
class fetchCommandProxy(CommandProxy):
def guiCallback(self, **kw):
if self.command:
self.command.guiCallback(**kw)
else:
from Pmv.fileCommands import fetch
command = fetch()
loaded = self.vf.addCommand(command, 'fetch', self.gui)
if loaded:
command = loaded
self.command = command
self.command.guiCallback(**kw)
fetchGUI = CommandGUI()
fetchGUI.addMenuCommand('menuRoot', 'File', 'Fetch From Web', index=0,
cascadeName='Import')
class VRML2WriterProxy(CommandProxy):
def guiCallback(self, **kw):
if self.command:
self.command.guiCallback(**kw)
else:
from Pmv.fileCommands import VRML2Writer
command = VRML2Writer()
loaded = self.vf.addCommand(command, 'writeVRML2', self.gui)
if loaded:
command = loaded
self.command = command
self.command.guiCallback(**kw)
VRML2WriterGUI = CommandGUI()
VRML2WriterGUI.addMenuCommand('menuRoot', 'File', 'Write VRML 2.0',
cascadeName='Save', cascadeAfter='Read Molecule',
separatorAboveCascade=1)
class STLWriterProxy(CommandProxy):
def guiCallback(self, **kw):
if self.command:
self.command.guiCallback(**kw)
else:
from Pmv.fileCommands import STLWriter
command = STLWriter()
loaded = self.vf.addCommand(command, 'writeSTL', self.gui)
if loaded:
command = loaded
self.command = command
self.command.guiCallback(**kw)
STLWriterGUI = CommandGUI()
STLWriterGUI.addMenuCommand('menuRoot', 'File', 'Write STL',
cascadeName='Save', index=11,separatorBelow=1)
class ReadSourceMoleculeProxy(CommandProxy):
def __init__(self, vf, gui):
from Pmv.fileCommands import ReadSourceMolecule
command =ReadSourceMolecule()
vf.addCommand(command, 'readSourceMolecule', gui)
CommandProxy.__init__(self, vf, gui)
ReadSourceMoleculeGUI = CommandGUI()
ReadSourceMoleculeGUI.addToolBar('Read Molecule or Python Script', icon1='fileopen.gif',
type='ToolBarButton', balloonhelp='Read Molecule or Python Script', index=0)
def initGUI(viewer):
viewer.addCommandProxy(fetchCommandProxy(viewer, fetchGUI))
viewer.addCommandProxy(PDBWriterProxy(viewer, PDBWriterGUI))
viewer.addCommandProxy(PDBQWriterProxy(viewer, PDBQWriterGUI))
viewer.addCommandProxy(PDBQTWriterProxy(viewer, PDBQTWriterGUI))
viewer.addCommandProxy(PDBQSWriterProxy(viewer, PDBQSWriterGUI))
viewer.addCommandProxy(SaveMMCIFProxy(viewer, SaveMMCIFGUI))
viewer.addCommandProxy(PQRWriterProxy(viewer, PQRWriterGUI))
viewer.addCommandProxy(MoleculeReaderProxy(viewer, MoleculeReaderGUI))
viewer.addCommandProxy(VRML2WriterProxy(viewer, VRML2WriterGUI))
viewer.addCommandProxy(STLWriterProxy(viewer, STLWriterGUI))
viewer.addCommandProxy(ReadSourceMoleculeProxy(viewer, ReadSourceMoleculeGUI))
| StarcoderdataPython |
4937074 | <reponame>uxlsl/shop_test<gh_stars>1-10
from django.shortcuts import get_object_or_404
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.decorators import api_view, renderer_classes
from rest_framework import response, schemas
from rest_framework_swagger.renderers import OpenAPIRenderer, SwaggerUIRenderer
from .models import Goods
from .serializers import GoodsSerializer
@api_view()
@renderer_classes([OpenAPIRenderer, SwaggerUIRenderer])
def schema_view(request):
generator = schemas.SchemaGenerator(title='Bookings API')
return response.Response(generator.get_schema(request=request))
class GoodsViewSet(viewsets.ModelViewSet):
serializer_class = GoodsSerializer
queryset = Goods.objects.all()
| StarcoderdataPython |
1653741 | <reponame>DalavanCloud/pysilfont
#!/usr/bin/env python
from __future__ import unicode_literals
'''Update glyph names in a font based on csv file
- Using FontForge rather than UFOlib so it can work with ttf (or sfd) files'''
__url__ = 'http://github.com/silnrsi/pysilfont'
__copyright__ = 'Copyright (c) 2016 SIL International (http://www.sil.org)'
__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
__author__ = '<NAME>'
from silfont.core import execute
argspec = [
('ifont',{'help': 'Input ttf font file'}, {'type': 'infont'}),
('ofont',{'help': 'Output font file','nargs': '?' }, {'type': 'outfont'}),
('-i','--input',{'help': 'Mapping csv file'}, {'type': 'incsv', 'def': 'psnames.csv'}),
('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': '_setPostNames.log'}),
('--reverse',{'help': 'Change names in reverse', 'action': 'store_true', 'default': False},{})]
def doit(args) :
logger = args.paramsobj.logger
font = args.ifont
# Process csv
csv = args.input
csv.numfields = 2
newnames={}
namescheck=[]
missingnames = False
for line in csv :
if args.reverse :
newnames[line[1]] = line[0]
namescheck.append(line[1])
else :
newnames[line[0]] = line[1]
namescheck.append(line[0])
for glyph in font.glyphs():
gname = glyph.glyphname
if gname in newnames :
namescheck.remove(gname)
glyph.glyphname = newnames[gname]
else:
missingnames = True
logger.log(gname + " in font but not csv file","W")
if missingnames : logger.log("Font glyph names missing from csv - see log for details","E")
for name in namescheck : # Any names left in namescheck were in csv but not ttf
logger.log(name + " in csv but not in font","W")
if namescheck != [] : logger.log("csv file names missing from font - see log for details","E")
return font
def cmd() : execute("FF",doit,argspec)
if __name__ == "__main__": cmd()
| StarcoderdataPython |
9638036 | """
Access to relabelling from templates.
"""
import logging
from typing import Sequence
from django import template
from django.template import Context
from CreeDictionary.CreeDictionary.relabelling import read_labels
from CreeDictionary.morphodict.templatetags.morphodict_orth import orth_tag
from CreeDictionary.utils.types import FSTTag
from crkeng.app.preferences import DisplayMode
logger = logging.getLogger(__name__)
register = template.Library()
# If a paradigm label preference is not set, use this one!
DEFAULT_PARADIGM_LABEL = "english"
def label_setting_to_relabeller(label_setting: str):
labels = read_labels()
return {
"english": labels.english,
"linguistic": labels.linguistic_short,
"source_language": labels.source_language,
}.get(label_setting, labels.english)
@register.simple_tag(takes_context=True)
def relabel(context: Context, tags: Sequence[FSTTag], labels=None):
"""
Gets the best matching label for the given object.
"""
if labels is None:
label_setting = label_setting_from_context(context)
else:
label_setting = labels
relabeller = label_setting_to_relabeller(label_setting)
if label := relabeller.get_longest(tags):
if label_setting == "source_language":
return orth_tag(context, label)
return label
logger.warning("Could not find relabelling for tags: %r", tags)
return "+".join(tags)
@register.simple_tag(takes_context=True)
def relabel_one(context: Context, tag: FSTTag, **kwargs):
"""
Relabels exactly one tag (a string). I use this instead of widening the type on
relabel() because polymorphic arguments make me nervous 😬
"""
return relabel(context, (tag,), **kwargs)
def label_setting_from_context(context: Context):
"""
Returns the most appropriate paradigm label preference.
:param context: a simple template Context or a RequestContext
"""
if hasattr(context, "request"):
# We can get the paradigm label from the cookie!
return context.request.COOKIES.get(DisplayMode.cookie_name, DisplayMode.default)
# Cannot get the request context? We can't detect the current cookie :/
return DisplayMode.default
| StarcoderdataPython |
3473374 | from rtamt.operation.abstract_operation import AbstractOperation
from rtamt.operation.sample import Sample
from rtamt.operation.sample import Time
class OnceOperation(AbstractOperation):
def __init__(self):
self.prev_out = Sample()
self.input = Sample()
self.prev_out.seq = 0
self.prev_out.time.sec = 0
self.prev_out.time.msec = 0
self.prev_out.value = -float("inf")
def addNewInput(self, sample):
self.input.seq = sample.seq
self.input.time.sec = sample.time.sec
self.input.time.msec = sample.time.msec
self.input.value = sample.value
def update(self):
out = Sample()
out.seq = self.input.seq
out.time.msec = self.input.time.msec
out.time.sec = self.input.time.sec
out.value = self.input.value
out.value = max(self.input.value, self.prev_out.value)
self.prev_out.seq = out.seq
self.prev_out.time.sec = out.time.sec
self.prev_out.time.msec = out.time.msec
self.prev_out.value = out.value
return out | StarcoderdataPython |
5130831 | import logging
import string
import math
import re
import struct
import itertools
from collections import defaultdict
import claripy
import simuvex
import pyvex
from simuvex.s_errors import SimEngineError, SimMemoryError, SimTranslationError
from ..blade import Blade
from ..analysis import register_analysis
from ..surveyors import Slicecutor
from ..annocfg import AnnotatedCFG
from ..errors import AngrCFGError
from .cfg_node import CFGNode
from .cfg_base import CFGBase, IndirectJump
from .forward_analysis import ForwardAnalysis
from .cfg_arch_options import CFGArchOptions
VEX_IRSB_MAX_SIZE = 400
l = logging.getLogger("angr.analyses.cfg_fast")
class Segment(object):
"""
Representing a memory block. This is not the "Segment" in ELF memory model
"""
__slots__ = ['start', 'end', 'sort']
def __init__(self, start, end, sort):
"""
:param int start: Start address.
:param int end: End address.
:param str sort: Type of the segment, can be code, data, etc.
:return: None
"""
self.start = start
self.end = end
self.sort = sort
def __repr__(self):
s = "[%#x-%#x, %s]" % (self.start, self.end, self.sort)
return s
@property
def size(self):
"""
Calculate the size of the Segment.
:return: Size of the Segment.
:rtype: int
"""
return self.end - self.start
def copy(self):
"""
Make a copy of the Segment.
:return: A copy of the Segment instance.
:rtype: angr.analyses.cfg_fast.Segment
"""
return Segment(self.start, self.end, self.sort)
class SegmentList(object):
"""
SegmentList describes a series of segmented memory blocks. You may query whether an address belongs to any of the
blocks or not, and obtain the exact block(segment) that the address belongs to.
"""
__slots__ = ['_list', '_bytes_occupied']
def __init__(self):
self._list = []
self._bytes_occupied = 0
#
# Overridden methods
#
def __len__(self):
return len(self._list)
#
# Private methods
#
def _search(self, addr):
"""
Checks which segment tha the address `addr` should belong to, and, returns the offset of that segment.
Note that the address may not actually belong to the block.
:param addr: The address to search
:return: The offset of the segment.
"""
start = 0
end = len(self._list)
while start != end:
mid = (start + end) / 2
segment = self._list[mid]
if addr < segment.start:
end = mid
elif addr >= segment.end:
start = mid + 1
else:
# Overlapped :(
start = mid
break
return start
def _insert_and_merge(self, address, size, sort, idx):
"""
Determines whether the block specified by (address, size) should be merged with adjacent blocks.
:param int address: Starting address of the block to be merged.
:param int size: Size of the block to be merged.
:param str sort: Type of the block.
:param int idx: ID of the address.
:return: None
"""
# sanity check
if idx > 0 and address + size <= self._list[idx - 1].start:
# There is a bug, since _list[idx] must be the closest one that is less than the current segment
l.warning("BUG FOUND: new segment should always be greater than _list[idx].")
# Anyways, let's fix it.
self._insert_and_merge(address, size, sort, idx - 1)
return
# Insert the block first
# The new block might be overlapping with other blocks. _insert_and_merge_core will fix the overlapping.
if idx == len(self._list):
self._list.append(Segment(address, address + size, sort))
else:
self._list.insert(idx, Segment(address, address + size, sort))
# Apparently _bytes_occupied will be wrong if the new block overlaps with any existing block. We will fix it
# later
self._bytes_occupied += size
# Search forward to merge blocks if necessary
pos = idx
while pos < len(self._list):
merged, pos, bytes_change = self._insert_and_merge_core(pos, "forward")
if not merged:
break
self._bytes_occupied += bytes_change
# Search backward to merge blocks if necessary
if pos >= len(self._list):
pos = len(self._list) - 1
while pos > 0:
merged, pos, bytes_change = self._insert_and_merge_core(pos, "backward")
if not merged:
break
self._bytes_occupied += bytes_change
def _insert_and_merge_core(self, pos, direction):
"""
The core part of method _insert_and_merge.
:param int pos: The starting position.
:param str direction: If we are traversing forwards or backwards in the list. It determines where the "sort"
of the overlapping memory block comes from. If everything works as expected, "sort" of
the overlapping block is always equal to the segment occupied most recently.
:return: A tuple of (merged (bool), new position to begin searching (int), change in total bytes (int)
:rtype: tuple
"""
bytes_changed = 0
if direction == "forward":
if pos == len(self._list) - 1:
return False, pos, 0
previous_segment = self._list[pos]
previous_segment_pos = pos
segment = self._list[pos + 1]
segment_pos = pos + 1
else: # if direction == "backward":
if pos == 0:
return False, pos, 0
segment = self._list[pos]
segment_pos = pos
previous_segment = self._list[pos - 1]
previous_segment_pos = pos - 1
merged = False
new_pos = pos
if segment.start <= previous_segment.end:
# we should always have new_start+new_size >= segment.start
if segment.sort == previous_segment.sort:
# They are of the same sort - we should merge them!
new_end = max(previous_segment.end, segment.start + segment.size)
new_start = min(previous_segment.start, segment.start)
new_size = new_end - new_start
self._list = self._list[ : previous_segment_pos] + \
[ Segment(new_start, new_end, segment.sort) ] + \
self._list[ segment_pos + 1: ]
bytes_changed = -(segment.size + previous_segment.size - new_size)
merged = True
new_pos = previous_segment_pos
else:
# Different sorts. It's a bit trickier.
if segment.start == previous_segment.end:
# They are adjacent. Just don't merge.
pass
else:
# They are overlapping. We will create one, two, or three different blocks based on how they are
# overlapping
new_segments = [ ]
if segment.start < previous_segment.start:
new_segments.append(Segment(segment.start, previous_segment.start, segment.sort))
sort = previous_segment.sort if direction == "forward" else segment.sort
new_segments.append(Segment(previous_segment.start, previous_segment.end, sort))
if segment.end < previous_segment.end:
new_segments.append(Segment(segment.end, previous_segment.end, previous_segment.sort))
elif segment.end > previous_segment.end:
new_segments.append(Segment(previous_segment.end, segment.end, segment.sort))
else: # segment.start >= previous_segment.start
if segment.start > previous_segment.start:
new_segments.append(Segment(previous_segment.start, segment.start, previous_segment.sort))
sort = previous_segment.sort if direction == "forward" else segment.sort
if segment.end > previous_segment.end:
new_segments.append(Segment(segment.start, previous_segment.end, sort))
new_segments.append(Segment(previous_segment.end, segment.end, segment.sort))
elif segment.end < previous_segment.end:
new_segments.append(Segment(segment.start, segment.end, sort))
new_segments.append(Segment(segment.end, previous_segment.end, previous_segment.sort))
else:
new_segments.append(Segment(segment.start, segment.end, sort))
# merge segments in new_segments array if they are of the same sort
i = 0
while len(new_segments) > 1 and i < len(new_segments) - 1:
s0 = new_segments[i]
s1 = new_segments[i + 1]
if s0.sort == s1.sort:
new_segments = new_segments[ : i] + [ Segment(s0.start, s1.end, s0.sort) ] + new_segments[i + 2 : ]
else:
i += 1
# Put new segments into self._list
old_size = sum([ seg.size for seg in self._list[previous_segment_pos : segment_pos + 1] ])
new_size = sum([ seg.size for seg in new_segments ])
bytes_changed = new_size - old_size
self._list = self._list[ : previous_segment_pos] + new_segments + self._list[ segment_pos + 1 : ]
merged = True
if direction == "forward":
new_pos = previous_segment_pos + len(new_segments)
else:
new_pos = previous_segment_pos
return merged, new_pos, bytes_changed
def _dbg_output(self):
"""
Returns a string representation of the segments that form this SegmentList
:return: String representation of contents
:rtype: str
"""
s = "["
lst = []
for segment in self._list:
lst.append(repr(segment))
s += ", ".join(lst)
s += "]"
return s
def _debug_check(self):
"""
Iterates over list checking segments with same sort do not overlap
:raise: Exception: if segments overlap space with same sort
"""
# old_start = 0
old_end = 0
old_sort = ""
for segment in self._list:
if segment.start <= old_end and segment.sort == old_sort:
raise Exception("Error in SegmentList: blocks are not merged")
# old_start = start
old_end = segment.end
old_sort = segment.sort
#
# Public methods
#
def next_free_pos(self, address):
"""
Returns the next free position with respect to an address, including that address itself
:param address: The address to begin the search with (including itself)
:return: The next free position
"""
idx = self._search(address)
if idx < len(self._list) and self._list[idx].start <= address < self._list[idx].end:
# Occupied
i = idx
while i + 1 < len(self._list) and self._list[i].end == self._list[i + 1].start:
i += 1
if i == len(self._list):
return self._list[-1].end
else:
return self._list[i].end
else:
return address
def is_occupied(self, address):
"""
Check if an address belongs to any segment
:param address: The address to check
:return: True if this address belongs to a segment, False otherwise
"""
idx = self._search(address)
if len(self._list) <= idx:
return False
if self._list[idx].start <= address < self._list[idx].end:
return True
if idx > 0 and address < self._list[idx - 1].end:
# TODO: It seems that this branch is never reached. Should it be removed?
return True
return False
def occupied_by_sort(self, address):
"""
Check if an address belongs to any segment, and if yes, returns the sort of the segment
:param int address: The address to check
:return: Sort of the segment that occupies this address
:rtype: str
"""
idx = self._search(address)
if len(self._list) <= idx:
return None
if self._list[idx].start <= address < self._list[idx].end:
return self._list[idx].sort
if idx > 0 and address < self._list[idx - 1].end:
# TODO: It seems that this branch is never reached. Should it be removed?
return self._list[idx - 1].sort
return None
def occupy(self, address, size, sort):
"""
Include a block, specified by (address, size), in this segment list.
:param int address: The starting address of the block.
:param int size: Size of the block.
:param str sort: Type of the block.
:return: None
"""
if size <= 0:
# Cannot occupy a non-existent block
return
# l.debug("Occpuying 0x%08x-0x%08x", address, address + size)
if len(self._list) == 0:
self._list.append(Segment(address, address + size, sort))
self._bytes_occupied += size
return
# Find adjacent element in our list
idx = self._search(address)
# print idx
self._insert_and_merge(address, size, sort, idx)
# self._debug_check()
def copy(self):
"""
Make a copy of the SegmentList.
:return: A copy of the SegmentList instance.
:rtype: angr.analyses.cfg_fast.SegmentList
"""
n = SegmentList()
n._list = [ a.copy() for a in self._list ]
n._bytes_occupied = self._bytes_occupied
#
# Properties
#
@property
def occupied_size(self):
"""
The sum of sizes of all blocks
:return: An integer
"""
return self._bytes_occupied
@property
def has_blocks(self):
"""
Returns if this segment list has any block or not. !is_empty
:return: True if it's not empty, False otherwise
"""
return len(self._list) > 0
class FunctionReturn(object):
"""
FunctionReturn describes a function call in a specific location and its return location. Hashable and equatable
"""
def __init__(self, callee_func_addr, caller_func_addr, call_site_addr, return_to):
self.callee_func_addr = callee_func_addr
self.caller_func_addr = caller_func_addr
self.call_site_addr = call_site_addr
self.return_to = return_to
def __eq__(self, o):
"""
Comparison
:param FunctionReturn o: The other object
:return: True if equal, False otherwise
"""
return self.callee_func_addr == o.callee_func_addr and \
self.caller_func_addr == o.caller_func_addr and \
self.call_site_addr == o.call_site_addr and \
self.return_to == o.return_to
def __hash__(self):
return hash((self.callee_func_addr, self.caller_func_addr, self.call_site_addr, self.return_to))
class MemoryData(object):
"""
MemoryData describes the syntactic contents of single address of memory along with a set of references to this
address (when not from previous instruction).
"""
def __init__(self, address, size, sort, irsb, irsb_addr, stmt, stmt_idx, pointer_addr=None, max_size=None,
insn_addr=None):
self.address = address
self.size = size
self.sort = sort
self.irsb = irsb
self.irsb_addr = irsb_addr
self.stmt = stmt
self.stmt_idx = stmt_idx
self.insn_addr = insn_addr
self.max_size = max_size
self.pointer_addr = pointer_addr
self.content = None # optional
self.refs = set()
if irsb_addr and stmt_idx:
self.refs.add((irsb_addr, stmt_idx, insn_addr))
def __repr__(self):
return "\\%#x, %s, %s/" % (self.address,
"%d bytes" % self.size if self.size is not None else "size unknown",
self.sort
)
def copy(self):
"""
Make a copy of the MemoryData.
:return: A copy of the MemoryData instance.
:rtype: angr.analyses.cfg_fast.MemoryData
"""
s = MemoryData(self.address, self.size, self.sort, self.irsb, self.irsb_addr, self.stmt, self.stmt_idx,
pointer_addr=self.pointer_addr, max_size=self.max_size, insn_addr=self.insn_addr
)
s.refs = self.refs.copy()
return s
def add_ref(self, irsb_addr, stmt_idx, insn_addr):
"""
Add a reference from code to this memory data.
:param int irsb_addr: Address of the basic block.
:param int stmt_idx: ID of the statement referencing this data entry.
:param int insn_addr: Address of the instruction referencing this data entry.
:return: None
"""
ref = (irsb_addr, stmt_idx, insn_addr)
if ref not in self.refs:
self.refs.add(ref)
class MemoryDataReference(object):
def __init__(self, ref_ins_addr):
self.ref_ins_addr = ref_ins_addr
class CFGJob(object):
"""
Defines a job to work on during the CFG recovery
"""
def __init__(self, addr, func_addr, jumpkind, ret_target=None, last_addr=None, src_node=None, src_ins_addr=None,
src_stmt_idx=None, returning_source=None, syscall=False):
self.addr = addr
self.func_addr = func_addr
self.jumpkind = jumpkind
self.ret_target = ret_target
self.last_addr = last_addr
self.src_node = src_node
self.src_ins_addr = src_ins_addr
self.src_stmt_idx = src_stmt_idx
self.returning_source = returning_source
self.syscall = syscall
def __repr__(self):
return "<CFGEntry%s %#08x @ func %#08x>" % (" syscall" if self.syscall else "", self.addr, self.func_addr)
def __eq__(self, other):
return self.addr == other.addr and \
self.func_addr == other.func_addr and \
self.jumpkind == other.jumpkind and \
self.ret_target == other.ret_target and \
self.last_addr == other.last_addr and \
self.src_node == other.src_node and \
self.src_stmt_idx == other.src_stmt_idx and \
self.src_ins_addr == other.src_ins_addr and \
self.returning_source == other.returning_source and \
self.syscall == other.syscall
def __hash__(self):
return hash((self.addr, self.func_addr, self.jumpkind, self.ret_target, self.last_addr, self.src_node,
self.src_stmt_idx, self.src_ins_addr, self.returning_source, self.syscall)
)
class CFGFast(ForwardAnalysis, CFGBase): # pylint: disable=abstract-method
"""
We find functions inside the given binary, and build a control-flow graph in very fast manners: instead of
simulating program executions, keeping track of states, and performing expensive data-flow analysis, CFGFast will
only perform light-weight analyses combined with some heuristics, and with some strong assumptions.
In order to identify as many functions as possible, and as accurate as possible, the following operation sequence
is followed:
# Active scanning
- If the binary has "function symbols" (TODO: this term is not accurate enough), they are starting points of
the code scanning
- If the binary does not have any "function symbol", we will first perform a function prologue scanning on the
entire binary, and start from those places that look like function beginnings
- Otherwise, the binary's entry point will be the starting point for scanning
# Passive scanning
- After all active scans are done, we will go through the whole image and scan all code pieces
Due to the nature of those techniques that are used here, a base address is often not required to use this analysis
routine. However, with a correct base address, CFG recovery will almost always yield a much better result. A custom
analysis, called GirlScout, is specifically made to recover the base address of a binary blob. After the base
address is determined, you may want to reload the binary with the new base address by creating a new Project object,
and then re-recover the CFG.
"""
# TODO: Move arch_options to CFGBase, and add those logic to CFGAccurate as well.
# TODO: Identify tail call optimization, and correctly mark the target as a new function
PRINTABLES = string.printable.replace("\x0b", "").replace("\x0c", "")
def __init__(self,
binary=None,
start=None,
end=None,
pickle_intermediate_results=False,
symbols=True,
function_prologues=True,
resolve_indirect_jumps=True,
force_segment=False,
force_complete_scan=True,
indirect_jump_target_limit=100000,
collect_data_references=False,
extra_cross_references=False,
normalize=False,
function_starts=None,
extra_memory_regions=None,
data_type_guessing_handlers=None,
arch_options=None,
**extra_arch_options
):
"""
:param binary: The binary to recover CFG on. By default the main binary is used.
:param int start: The beginning address of CFG recovery.
:param int end: The end address of CFG recovery.
:param bool pickle_intermediate_results: If we want to store the intermediate results or not.
:param bool symbols: Get function beginnings from symbols in the binary.
:param bool function_prologues: Scan the binary for function prologues, and use those positions as function
beginnings
:param bool resolve_indirect_jumps: Try to resolve indirect jumps. This is necessary to resolve jump targets
from jump tables, etc.
:param bool force_segment: Force CFGFast to rely on binary segments instead of sections.
:param bool force_complete_scan: Perform a complete scan on the binary and maximize the number of identified
code blocks.
:param bool collect_data_references: If CFGFast should collect data references from individual basic blocks or
not.
:param bool extra_cross_references: True if we should collect data references for all places in the program
that access each memory data entry, which requires more memory, and is
noticeably slower. Setting it to False means each memory data entry has at
most one reference (which is the initial one).
:param bool normalize: Normalize the CFG as well as all function graphs after CFG recovery.
:param list function_starts: A list of extra function starting points. CFGFast will try to resume scanning
from each address in the list.
:param list extra_memory_regions: A list of 2-tuple (start-address, end-address) that shows extra memory
regions. Integers falling inside will be considered as pointers.
:param CFGArchOptions arch_options: Architecture-specific options.
:param dict extra_arch_options: Any key-value pair in kwargs will be seen as an arch-specific option and will
be used to set the option value in self._arch_options.
Extra parameters that angr.Analysis takes:
:param progress_callback: Specify a callback function to get the progress during CFG recovery.
:param bool show_progressbar: Should CFGFast show a progressbar during CFG recovery or not.
:return: None
"""
ForwardAnalysis.__init__(self, allow_merging=False)
CFGBase.__init__(self, 'fast', 0, normalize=normalize, binary=binary, force_segment=force_segment)
# necessary warnings
if self.project.loader._auto_load_libs is True and end is None and len(self.project.loader.all_objects) > 3:
l.warning('"auto_load_libs" is enabled. With libraries loaded in project, CFGFast will cover libraries, '
'which may take significantly more time than expected. You may reload the binary with '
'"auto_load_libs" disabled, or specify "start" and "end" paramenters to limit the scope of CFG '
'recovery.'
)
self._start = start if start is not None else self._binary.get_min_addr()
self._end = end if end is not None else self._binary.get_max_addr()
self._pickle_intermediate_results = pickle_intermediate_results
self._indirect_jump_target_limit = indirect_jump_target_limit
self._collect_data_ref = collect_data_references
self._use_symbols = symbols
self._use_function_prologues = function_prologues
self._resolve_indirect_jumps = resolve_indirect_jumps
self._force_complete_scan = force_complete_scan
self._extra_function_starts = function_starts
self._extra_memory_regions = extra_memory_regions
self._extra_cross_references = extra_cross_references
try:
self._arch_options = arch_options if arch_options is not None else CFGArchOptions(self.project.arch,
**extra_arch_options
)
except KeyError:
raise
self._data_type_guessing_handlers = [ ] if data_type_guessing_handlers is None else data_type_guessing_handlers
l.debug("Starts at %#x and ends at %#x.", self._start, self._end)
# A mapping between address and the actual data in memory
self._memory_data = { }
# A mapping between address of the instruction that's referencing the memory data and the memory data itself
self.insn_addr_to_memory_data = { }
self._initial_state = None
self._next_addr = None
# Create the segment list
self._seg_list = SegmentList()
self._read_addr_to_run = defaultdict(list)
self._write_addr_to_run = defaultdict(list)
self._indirect_jumps_to_resolve = set()
self._jump_tables = { }
self._function_addresses_from_symbols = self._func_addrs_from_symbols()
self._function_prologue_addrs = None
self._remaining_function_prologue_addrs = None
#
# Variables used during analysis
#
self._pending_entries = None
self._traced_addresses = None
self._function_returns = None
self._function_exits = None
self._graph = None
# Start working!
self._analyze()
#
# Utils
#
@staticmethod
def _calc_entropy(data, size=None):
"""
Calculate the entropy of a piece of data
:param data: The target data to calculate entropy on
:param size: Size of the data, Optional.
:return: A float
"""
if not data:
return 0
entropy = 0
if size is None:
size = len(data)
data = str(pyvex.ffi.buffer(data, size))
for x in xrange(0, 256):
p_x = float(data.count(chr(x))) / size
if p_x > 0:
entropy += - p_x * math.log(p_x, 2)
return entropy
#
# Properties
#
@property
def functions(self):
"""
A collection of all functions in current CFG via FunctionManager
:return: FunctionManager with all functions
:rtype: angr.knowedge.function_manager.FunctionManager
"""
return self.kb.functions
@property
def memory_data(self):
return self._memory_data
@property
def _insn_addr_to_memory_data(self):
l.warning('_insn_addr_to_memory_data has been made public and is deprecated. Please fix your code accordingly.')
return self.insn_addr_to_memory_data
#
# Private methods
#
def __setstate__(self, s):
self._graph = s['graph']
self.indirect_jumps = s['indirect_jumps']
self._nodes_by_addr = s['_nodes_by_addr']
self._memory_data = s['_memory_data']
def __getstate__(self):
s = {
"graph": self.graph,
"indirect_jumps": self.indirect_jumps,
'_nodes_by_addr': self._nodes_by_addr,
'_memory_data': self._memory_data,
}
return s
# Methods for scanning the entire image
def _next_unscanned_addr(self, alignment=None):
"""
Find the next address that we haven't processed
:param alignment: Assures the address returns must be aligned by this number
:return: An address to process next, or None if all addresses have been processed
"""
# TODO: Take care of those functions that are already generated
if self._next_addr is None:
self._next_addr = self.project.loader.min_addr()
curr_addr = self._next_addr
else:
curr_addr = self._next_addr + 1
if self._seg_list.has_blocks:
curr_addr = self._seg_list.next_free_pos(curr_addr)
if alignment is not None:
if curr_addr % alignment > 0:
curr_addr = curr_addr - (curr_addr % alignment) + alignment
# Make sure curr_addr exists in binary
accepted = False
for start, end in self._exec_mem_regions:
if start <= curr_addr < end:
# accept
accepted = True
break
if curr_addr < start:
# accept, but we are skipping the gap
accepted = True
curr_addr = start
break
if not accepted:
# No memory available!
return None
self._next_addr = curr_addr
if self._end is None or curr_addr < self._end:
l.debug("Returning new recon address: 0x%08x", curr_addr)
return curr_addr
else:
l.debug("0x%08x is beyond the ending point.", curr_addr)
return None
def _next_code_addr_core(self):
"""
Call _next_unscanned_addr() first to get the next address that is not scanned. Then check if data locates at
that address seems to be code or not. If not, we'll continue to for the next un-scanned address.
"""
next_addr = self._next_unscanned_addr()
if next_addr is None:
return None
start_addr = next_addr
sz = ""
is_sz = True
while is_sz:
# Get data until we meet a 0
while next_addr in self._initial_state.memory:
try:
l.debug("Searching address %x", next_addr)
val = self._initial_state.mem_concrete(next_addr, 1)
if val == 0:
if len(sz) < 4:
is_sz = False
# else:
# we reach the end of the memory region
break
if chr(val) not in self.PRINTABLES:
is_sz = False
break
sz += chr(val)
next_addr += 1
except simuvex.SimValueError:
# Not concretizable
l.debug("Address 0x%08x is not concretizable!", next_addr)
break
if len(sz) > 0 and is_sz:
l.debug("Got a string of %d chars: [%s]", len(sz), sz)
# l.debug("Occpuy %x - %x", start_addr, start_addr + len(sz) + 1)
self._seg_list.occupy(start_addr, len(sz) + 1, "string")
sz = ""
next_addr = self._next_unscanned_addr()
if next_addr is None:
return None
# l.debug("next addr = %x", next_addr)
start_addr = next_addr
if is_sz:
next_addr += 1
instr_alignment = self._initial_state.arch.instruction_alignment
if start_addr % instr_alignment > 0:
# occupy those few bytes
self._seg_list.occupy(start_addr, instr_alignment - (start_addr % instr_alignment), 'alignment')
start_addr = start_addr - start_addr % instr_alignment + \
instr_alignment
return start_addr
def _next_code_addr(self):
while True:
addr = self._next_code_addr_core()
if addr is None:
return None
# if the new address is already occupied
if not self._seg_list.is_occupied(addr):
return addr
# Overriden methods from ForwardAnalysis
def _entry_key(self, entry):
return entry.addr
def _pre_analysis(self):
# Initialize variables used during analysis
self._pending_entries = [ ]
self._traced_addresses = set()
self._function_returns = defaultdict(list)
# Sadly, not all calls to functions are explicitly made by call
# instruction - they could be a jmp or b, or something else. So we
# should record all exits from a single function, and then add
# necessary calling edges in our call map during the post-processing
# phase.
self._function_exits = defaultdict(set)
self._initialize_cfg()
# Create an initial state. Store it to self so we can use it globally.
self._initial_state = self.project.factory.blank_state(mode="fastpath")
initial_options = self._initial_state.options - {simuvex.o.TRACK_CONSTRAINTS} - simuvex.o.refs
initial_options |= {simuvex.o.SUPER_FASTPATH}
# initial_options.remove(simuvex.o.COW_STATES)
self._initial_state.options = initial_options
starting_points = set()
rebase_addr = self._binary.rebase_addr
# clear all existing functions
self.kb.functions.clear()
if self._use_symbols:
starting_points |= set([ addr + rebase_addr for addr in self._function_addresses_from_symbols ])
if self._extra_function_starts:
starting_points |= set(self._extra_function_starts)
# Sort it
starting_points = sorted(list(starting_points), reverse=True)
if self.project.entry is not None and self._start <= self.project.entry < self._end:
# make sure self.project.entry is the first entry
starting_points += [ self.project.entry ]
# Create entries for all starting points
for sp in starting_points:
self._insert_entry(CFGJob(sp, sp, 'Ijk_Boring'))
self._changed_functions = set()
self._nodes = {}
self._nodes_by_addr = defaultdict(list)
if self._use_function_prologues:
self._function_prologue_addrs = sorted(
set([addr + rebase_addr for addr in self._func_addrs_from_prologues()])
)
# make a copy of those prologue addresses, so that we can pop from the list
self._remaining_function_prologue_addrs = self._function_prologue_addrs[::]
# make function_prologue_addrs a set for faster lookups
self._function_prologue_addrs = set(self._function_prologue_addrs)
def _pre_entry_handling(self, entry):
# Do not calculate progress if the user doesn't care about the progress at all
if self._show_progressbar or self._progress_callback:
max_percentage_stage_1 = 50.0
percentage = self._seg_list.occupied_size * max_percentage_stage_1 / self._exec_mem_region_size
if percentage > max_percentage_stage_1:
percentage = max_percentage_stage_1
self._update_progress(percentage)
def _intra_analysis(self):
pass
def _get_successors(self, job):
current_function_addr = job.func_addr
addr = job.addr
jumpkind = job.jumpkind
src_node = job.src_node
src_stmt_idx = job.src_stmt_idx
src_ins_addr = job.src_ins_addr
if current_function_addr != -1:
l.debug("Tracing new exit 0x%08x in function %#08x",
addr, current_function_addr)
else:
l.debug("Tracing new exit %#08x", addr)
return self._scan_block(addr, current_function_addr, jumpkind, src_node, src_ins_addr, src_stmt_idx)
def _handle_successor(self, entry, successor, successors):
return [ successor ]
def _merge_entries(self, *entries):
pass
def _widen_entries(self, *entries):
pass
def _post_process_successors(self, addr, successors):
if self.project.arch.name in ('ARMEL', 'ARMHF') and addr % 2 == 1:
# we are in thumb mode. filter successors
successors = self._arm_thumb_filter_jump_successors(addr,
successors,
lambda tpl: tpl[1],
lambda tpl: tpl[0]
)
return successors
def _post_entry_handling(self, entry, new_entries, successors):
pass
def _entry_list_empty(self):
if self._pending_entries:
# look for an entry that comes from a function that must return
# if we can find one, just use it
entry_index = None
for i, entry in enumerate(self._pending_entries):
src_func_addr = entry.returning_source
if src_func_addr is None or src_func_addr not in self.kb.functions:
continue
function = self.kb.functions[src_func_addr]
if function.returning is True:
entry_index = i
break
if entry_index is not None:
self._insert_entry(self._pending_entries[entry_index])
del self._pending_entries[entry_index]
return
if self._pending_entries:
self._analyze_all_function_features()
self._clean_pending_exits(self._pending_entries)
# Clear _changed_functions set
self._changed_functions = set()
if self._pending_entries:
self._insert_entry(self._pending_entries[0])
del self._pending_entries[0]
return
if self._use_function_prologues and self._remaining_function_prologue_addrs:
while self._remaining_function_prologue_addrs:
prolog_addr = self._remaining_function_prologue_addrs[0]
self._remaining_function_prologue_addrs = self._remaining_function_prologue_addrs[1:]
if self._seg_list.is_occupied(prolog_addr):
continue
self._insert_entry(CFGJob(prolog_addr, prolog_addr, 'Ijk_Boring'))
return
# Try to see if there is any indirect jump left to be resolved
if self._resolve_indirect_jumps and self._indirect_jumps_to_resolve:
jump_targets = list(set(self._process_indirect_jumps()))
for addr, func_addr, source_addr in jump_targets:
to_outside = addr in self.functions
if not to_outside:
src_section = self._addr_belongs_to_section(source_addr)
dst_section = self._addr_belongs_to_section(addr)
to_outside = src_section != dst_section
r = self._function_add_transition_edge(addr, self._nodes[source_addr], func_addr, to_outside=to_outside)
if r:
# TODO: get a better estimate of the function address
target_func_addr = func_addr if not to_outside else addr
self._insert_entry(CFGJob(addr, target_func_addr, "Ijk_Boring", last_addr=source_addr,
src_node=self._nodes[source_addr],
src_stmt_idx=None,
)
)
if self._entries:
return
if self._force_complete_scan:
addr = self._next_code_addr()
if addr is not None:
self._insert_entry(CFGJob(addr, addr, "Ijk_Boring", last_addr=None))
def _post_analysis(self):
self._analyze_all_function_features()
# Scan all functions, and make sure all fake ret edges are either confirmed or removed
for f in self.functions.values():
all_edges = f.transition_graph.edges(data=True)
callsites_to_functions = defaultdict(list) # callsites to functions mapping
for src, dst, data in all_edges:
if 'type' in data:
if data['type'] == 'call':
callsites_to_functions[src.addr].append(dst.addr)
edges_to_remove = [ ]
for src, dst, data in all_edges:
if 'type' in data:
if data['type'] == 'fake_return' and 'confirmed' not in data:
# Get all possible functions being called here
target_funcs = [ self.functions.function(addr=func_addr)
for func_addr in callsites_to_functions[src.addr]
]
if target_funcs and all([ t is not None and t.returning is False for t in target_funcs ]):
# Remove this edge
edges_to_remove.append((src, dst))
else:
# Mark this edge as confirmed
f._confirm_fakeret(src, dst)
for edge in edges_to_remove:
f.transition_graph.remove_edge(*edge)
# Clear the cache
f._local_transition_graph = None
# Scan all functions, and make sure .returning for all functions are either True or False
for f in self.functions.values():
if f.returning is None:
f.returning = len(f.endpoints) > 0
if self.project.arch.name in ('X86', 'AMD64', 'MIPS32'):
self._remove_redundant_overlapping_blocks()
if self._normalize:
# Normalize the control flow graph first before rediscovering all functions
self.normalize()
self.make_functions()
# optional: remove functions that must be alignments
self.remove_function_alignments()
# make return edges
self._make_return_edges()
if self.project.loader.main_bin.sections:
# this binary has sections
# make sure we have data entries assigned at the beginning of each data section
for sec in self.project.loader.main_bin.sections:
if sec.memsize > 0 and not sec.is_executable and sec.is_readable:
addr = sec.vaddr + self.project.loader.main_bin.rebase_addr
for seg in self.project.loader.main_bin.segments:
seg_addr = seg.vaddr + self.project.loader.main_bin.rebase_addr
if seg_addr <= addr < seg_addr + seg.memsize:
break
else:
continue
if addr not in self.memory_data:
self.memory_data[addr] = MemoryData(addr, 0, 'unknown', None, None, None, None)
r = True
while r:
r = self._tidy_data_references()
CFGBase._post_analysis(self)
self._finish_progress()
# Methods to get start points for scanning
def _func_addrs_from_symbols(self):
"""
Get all possible function addresses that are specified by the symbols in the binary
:return: A set of addresses that are probably functions
:rtype: set
"""
symbols_by_addr = self._binary.symbols_by_addr
func_addrs = set()
for addr, sym in symbols_by_addr.iteritems():
if sym.is_function:
func_addrs.add(addr)
return func_addrs
def _func_addrs_from_prologues(self):
"""
Scan the entire program image for function prologues, and start code scanning at those positions
:return: A list of possible function addresses
"""
# Pre-compile all regexes
regexes = list()
for ins_regex in self.project.arch.function_prologs:
r = re.compile(ins_regex)
regexes.append(r)
# TODO: Make sure self._start is aligned
# Construct the binary blob first
# TODO: We shouldn't directly access the _memory of main_bin. An interface
# TODO: to that would be awesome.
strides = self._binary.memory.stride_repr
unassured_functions = []
for start_, _, bytes_ in strides:
for regex in regexes:
# Match them!
for mo in regex.finditer(bytes_):
position = mo.start() + start_
if position % self.project.arch.instruction_alignment == 0:
if self._addr_in_exec_memory_regions(self._binary.rebase_addr + position):
unassured_functions.append(position)
return unassured_functions
# Basic block scanning
def _scan_block(self, addr, current_function_addr, previous_jumpkind, previous_src_node, previous_src_ins_addr,
previous_src_stmt_idx):
"""
Scan a basic block starting at a specific address
:param int addr: The address to begin scanning
:param int current_function_addr: Address of the current function
:param str previous_jumpkind: The jumpkind of the edge going to this node
:param CFGNode previous_src_node: The previous CFGNode
:return: a list of successors
:rtype: list
"""
# Fix the function address
# This is for rare cases where we cannot successfully determine the end boundary of a previous function, and
# as a consequence, our analysis mistakenly thinks the previous function goes all the way across the boundary,
# resulting the missing of the second function in function manager.
if addr in self._function_addresses_from_symbols:
current_function_addr = addr
if self.project.is_hooked(addr) or self.project._simos.syscall_table.get_by_addr(addr) is not None:
entries = self._scan_procedure(addr, current_function_addr, previous_jumpkind, previous_src_node,
previous_src_ins_addr, previous_src_stmt_idx)
else:
entries = self._scan_irsb(addr, current_function_addr, previous_jumpkind, previous_src_node,
previous_src_ins_addr, previous_src_stmt_idx)
return entries
def _scan_procedure(self, addr, current_function_addr, previous_jumpkind, previous_src_node, previous_src_ins_addr,
previous_src_stmt_idx):
"""
Checks the hooking procedure for this address searching for new static
exit points to add to successors (generating entries for them)
if this address has not been traced before. Updates previous CFG nodes
with edges.
:param int addr: The address to begin scanning
:param int current_function_addr: Address of the current function
:param str previous_jumpkind: The jumpkind of the edge going to this node
:param CFGNode previous_src_node: The previous CFGNode
:param int previous_src_stmt_idx: The previous ID of the statement.
:return: List of successors
:rtype: list
"""
try:
if self.project.is_hooked(addr):
hooker = self.project.hooked_by(addr)
name = hooker.name
procedure = hooker.procedure
else:
syscall = self.project._simos.syscall_table.get_by_addr(addr)
name = syscall.name
procedure = syscall.simproc
if addr not in self._nodes:
cfg_node = CFGNode(addr, 0, self, function_address=current_function_addr,
simprocedure_name=name,
no_ret=procedure.NO_RET,
block_id=addr,
)
self._nodes[addr] = cfg_node
self._nodes_by_addr[addr].append(cfg_node)
else:
cfg_node = self._nodes[addr]
except (SimMemoryError, SimEngineError):
return [ ]
self._graph_add_edge(cfg_node, previous_src_node, previous_jumpkind, previous_src_ins_addr,
previous_src_stmt_idx
)
self._function_add_node(addr, current_function_addr)
self._changed_functions.add(current_function_addr)
# If we have traced it before, don't trace it anymore
if addr in self._traced_addresses:
return [ ]
else:
# Mark the address as traced
self._traced_addresses.add(addr)
entries = [ ]
if procedure.ADDS_EXITS:
# Get two blocks ahead
grandparent_nodes = self.graph.predecessors(previous_src_node)
if not grandparent_nodes:
l.warning("%s is supposed to yield new exits, but it fails to do so.", name)
return [ ]
blocks_ahead = [
self.project.factory.block(grandparent_nodes[0].addr).vex,
self.project.factory.block(previous_src_node.addr).vex,
]
new_exits = procedure(addr, self.project.arch, is_function=False).static_exits(blocks_ahead)
for addr, jumpkind in new_exits:
if isinstance(addr, claripy.ast.BV) and not addr.symbolic:
addr = addr._model_concrete.value
if not isinstance(addr, (int, long)):
continue
entries += self._create_entries(addr, jumpkind, current_function_addr, None, addr, cfg_node, None, None)
return entries
def _scan_irsb(self, addr, current_function_addr, previous_jumpkind, previous_src_node, previous_src_ins_addr,
previous_src_stmt_idx):
"""
Generate list of sucessors (generating them each as entries) to IRSB.
Updates previous CFG nodes with edges.
:param int addr: The address to begin scanning
:param int current_function_addr: Address of the current function
:param str previous_jumpkind: The jumpkind of the edge going to this node
:param CFGNode previous_src_node: The previous CFGNode
:param int previous_src_stmt_idx: The previous ID of the statement
:return: a list of successors
:rtype: list
"""
addr, current_function_addr, cfg_node, irsb = self._generate_cfgnode(addr, current_function_addr)
if cfg_node is None:
# exceptions occurred, or we cannot get a CFGNode for other reasons
return [ ]
self._graph_add_edge(cfg_node, previous_src_node, previous_jumpkind, previous_src_ins_addr,
previous_src_stmt_idx
)
self._function_add_node(addr, current_function_addr)
self._changed_functions.add(current_function_addr)
# If we have traced it before, don't trace it anymore
aligned_addr = ((addr >> 1) << 1) if self.project.arch.name in ('ARMLE', 'ARMHF') else addr
if aligned_addr in self._traced_addresses:
return [ ]
else:
# Mark the address as traced
self._traced_addresses.add(aligned_addr)
# irsb cannot be None here
# assert irsb is not None
# IRSB is only used once per CFGNode. We should be able to clean up the CFGNode here in order to save memory
cfg_node.irsb = None
self._process_block_arch_specific(addr, irsb, current_function_addr)
# Scan the basic block to collect data references
if self._collect_data_ref:
self._collect_data_references(irsb, addr)
# Get all possible successors
irsb_next, jumpkind = irsb.next, irsb.jumpkind
successors = [ ]
last_ins_addr = None
ins_addr = addr
for i, stmt in enumerate(irsb.statements):
if isinstance(stmt, pyvex.IRStmt.Exit):
successors.append((i,
last_ins_addr if self.project.arch.branch_delay_slot else ins_addr,
stmt.dst,
stmt.jumpkind
)
)
elif isinstance(stmt, pyvex.IRStmt.IMark):
last_ins_addr = ins_addr
ins_addr = stmt.addr + stmt.delta
successors.append(('default',
last_ins_addr if self.project.arch.branch_delay_slot else ins_addr, irsb_next, jumpkind)
)
entries = [ ]
successors = self._post_process_successors(addr, successors)
# Process each successor
for suc in successors:
stmt_idx, ins_addr, target, jumpkind = suc
entries += self._create_entries(target, jumpkind, current_function_addr, irsb, addr, cfg_node, ins_addr,
stmt_idx
)
return entries
def _create_entries(self, target, jumpkind, current_function_addr, irsb, addr, cfg_node, ins_addr, stmt_idx,
fast_indirect_jump_resolution=True):
"""
Given a node and details of a successor, makes a list of CFGEntrys
and if it is a call or exit marks it appropriately so in the CFG
:param int target: The resultant entry's statement destination
:param str jumpkind: The jumpkind of the edge going to this node
:param int current_function_addr: Address of the current function
:param pyvex.IRSB irsb: IRSB of the predecessor node
:param int addr: The predecessor address
:param CFGNode cfg_node: The CFGNode of the predecessor node
:param int ins_addr: The resultant entry's address
:param int stmt_idx: The resultant entry's ID of their statement
:return: a list of CFGEntrys
:rtype: list
"""
if type(target) is pyvex.IRExpr.Const: # pylint: disable=unidiomatic-typecheck
target_addr = target.con.value
elif type(target) in (pyvex.IRConst.U32, pyvex.IRConst.U64): # pylint: disable=unidiomatic-typecheck
target_addr = target.value
elif type(target) in (int, long): # pylint: disable=unidiomatic-typecheck
target_addr = target
else:
target_addr = None
entries = [ ]
if target_addr is None and (
jumpkind in ('Ijk_Boring', 'Ijk_Call') or jumpkind.startswith('Ijk_Sys'))\
and fast_indirect_jump_resolution:
# try resolving it fast
resolved, resolved_targets = self._resolve_indirect_jump_timelessly(addr, irsb, current_function_addr)
if resolved:
for t in resolved_targets:
ent = self._create_entries(t, jumpkind, current_function_addr, irsb, addr,cfg_node, ins_addr,
stmt_idx, fast_indirect_jump_resolution=False)
entries.extend(ent)
return entries
# pylint: disable=too-many-nested-blocks
if jumpkind == 'Ijk_Boring':
if target_addr is not None:
r = self._function_add_transition_edge(target_addr, cfg_node, current_function_addr, ins_addr=ins_addr,
stmt_idx=stmt_idx
)
if not r:
if cfg_node is not None:
l.debug("An angr exception occurred when adding a transition from %#x to %#x. "
"Ignore this successor.",
cfg_node.addr,
target_addr
)
else:
l.debug("SimTranslationError occurred when creating a new entry to %#x. "
"Ignore this successor.",
target_addr
)
return []
# if the target address is at another section, it has to be jumping to a new function
source_section = self._addr_belongs_to_section(addr)
target_section = self._addr_belongs_to_section(target_addr)
if source_section != target_section:
target_func_addr = target_addr
else:
target_func_addr = current_function_addr
ce = CFGJob(target_addr, target_func_addr, jumpkind, last_addr=addr, src_node=cfg_node,
src_ins_addr=ins_addr, src_stmt_idx=stmt_idx)
entries.append(ce)
else:
l.debug('(%s) Indirect jump at %#x.', jumpkind, addr)
# Add it to our set. Will process it later if user allows.
# Create an IndirectJump instance
if addr not in self.indirect_jumps:
tmp_statements = irsb.statements if stmt_idx == 'default' else irsb.statements[ : stmt_idx]
ins_addr = next(iter(stmt.addr for stmt in reversed(tmp_statements)
if isinstance(stmt, pyvex.IRStmt.IMark)), None
)
ij = IndirectJump(addr, ins_addr, current_function_addr, jumpkind, stmt_idx, resolved_targets=[ ])
self.indirect_jumps[addr] = ij
else:
ij = self.indirect_jumps[addr]
self._indirect_jumps_to_resolve.add(ij)
if irsb:
# Test it on the initial state. Does it jump to a valid location?
# It will be resolved only if this is a .plt entry
tmp_simirsb = simuvex.SimEngineVEX().process(self._initial_state, irsb, force_addr=addr)
if len(tmp_simirsb.successors) == 1:
tmp_ip = tmp_simirsb.successors[0].ip
if tmp_ip._model_concrete is not tmp_ip:
tmp_addr = tmp_ip._model_concrete.value
tmp_function_addr = tmp_addr # TODO: FIX THIS
if (self.project.loader.addr_belongs_to_object(tmp_addr) is not
self.project.loader.main_bin) \
or self.project.is_hooked(tmp_addr):
r = self._function_add_transition_edge(tmp_addr, cfg_node, current_function_addr,
ins_addr=ins_addr, stmt_idx=stmt_idx
)
if r:
ce = CFGJob(tmp_addr, tmp_function_addr, jumpkind, last_addr=tmp_addr,
src_node=cfg_node, src_stmt_idx=stmt_idx, src_ins_addr=ins_addr)
entries.append(ce)
# Fill the IndirectJump object
ij.resolved_targets.add(tmp_addr)
self._function_add_call_edge(tmp_addr, None, None, tmp_function_addr,
stmt_idx=stmt_idx, ins_addr=ins_addr
)
elif jumpkind == 'Ijk_Call' or jumpkind.startswith("Ijk_Sys"):
is_syscall = jumpkind.startswith("Ijk_Sys")
if target_addr is not None:
entries += self._create_entry_call(addr, irsb, cfg_node, stmt_idx, ins_addr, current_function_addr,
target_addr, jumpkind, is_syscall=is_syscall
)
else:
l.debug('(%s) Indirect jump at %#x.', jumpkind, addr)
# Add it to our set. Will process it later if user allows.
if addr not in self.indirect_jumps:
tmp_statements = irsb.statements if stmt_idx == 'default' else irsb.statements[: stmt_idx]
if self.project.arch.branch_delay_slot:
ins_addr = next(itertools.islice(iter(stmt.addr for stmt in reversed(tmp_statements)
if isinstance(stmt, pyvex.IRStmt.IMark)), 1, None
), None)
else:
ins_addr = next(iter(stmt.addr for stmt in reversed(tmp_statements)
if isinstance(stmt, pyvex.IRStmt.IMark)), None
)
ij = IndirectJump(addr, ins_addr, current_function_addr, jumpkind, stmt_idx,
resolved_targets=[])
self.indirect_jumps[addr] = ij
else:
ij = self.indirect_jumps[addr]
self._indirect_jumps_to_resolve.add(ij)
self._create_entry_call(addr, irsb, cfg_node, stmt_idx, ins_addr, current_function_addr, None,
jumpkind, is_syscall=is_syscall
)
elif jumpkind == "Ijk_Ret":
if current_function_addr != -1:
self._function_exits[current_function_addr].add(addr)
self._function_add_return_site(addr, current_function_addr)
cfg_node.has_return = True
else:
# TODO: Support more jumpkinds
l.debug("Unsupported jumpkind %s", jumpkind)
return entries
def _create_entry_call(self, addr, irsb, cfg_node, stmt_idx, ins_addr, current_function_addr, target_addr, jumpkind,
is_syscall=False):
"""
Generate a CFGEntry for target address, also adding to _pending_entries
if returning to succeeding position (if irsb arg is populated)
:param int addr: The predecessor address
:param pyvex.IRSB irsb: IRSB of the predecessor node
:param CFGNode cfg_node: The CFGNode of the predecessor node
:param int stmt_idx: The ID of statement of resultant entry
:param int current_function_addr: Address of the entry function
:param int target_addr: The statement destination of resultant entry
:param str jumpkind: The jumpkind of the edge going to this node
:param bool is_syscall: is the jump kind (and thus this) a system call
:return: a list of CFGEntrys
:rtype: list
"""
entries = [ ]
if is_syscall:
# Fix the target_addr for syscalls
tmp_path = self.project.factory.path(self.project.factory.blank_state(mode="fastpath",
addr=cfg_node.addr
)
)
tmp_path.step()
succ = tmp_path.successors[0]
_, syscall_addr, _, _ = self.project._simos.syscall_info(succ.state)
target_addr = syscall_addr
new_function_addr = target_addr
if irsb is None:
return_site = None
else:
return_site = addr + irsb.size # We assume the program will always return to the succeeding position
if new_function_addr is not None:
r = self._function_add_call_edge(new_function_addr, cfg_node, return_site, current_function_addr,
syscall=is_syscall, stmt_idx=stmt_idx, ins_addr=ins_addr)
if not r:
return [ ]
if new_function_addr is not None:
# Keep tracing from the call
ce = CFGJob(target_addr, new_function_addr, jumpkind, last_addr=addr, src_node=cfg_node,
src_stmt_idx=stmt_idx, src_ins_addr=ins_addr, syscall=is_syscall)
entries.append(ce)
if return_site is not None:
# Also, keep tracing from the return site
ce = CFGJob(return_site, current_function_addr, 'Ijk_FakeRet', last_addr=addr, src_node=cfg_node,
src_stmt_idx=stmt_idx, src_ins_addr=ins_addr, returning_source=new_function_addr,
syscall=is_syscall)
self._pending_entries.append(ce)
if new_function_addr is not None:
callee_function = self.kb.functions.function(addr=new_function_addr, syscall=is_syscall)
if callee_function.returning is True:
if return_site is not None:
self._function_add_fakeret_edge(return_site, cfg_node, current_function_addr,
confirmed=True)
self._function_add_return_edge(new_function_addr, return_site, current_function_addr)
elif callee_function.returning is False:
# The function does not return - there is no fake ret edge
pass
else:
if return_site is not None:
self._function_add_fakeret_edge(return_site, cfg_node, current_function_addr,
confirmed=None)
fr = FunctionReturn(new_function_addr, current_function_addr, addr, return_site)
if fr not in self._function_returns[new_function_addr]:
self._function_returns[new_function_addr].append(fr)
return entries
# Data reference processing
def _collect_data_references(self, irsb, irsb_addr):
"""
Unoptimises IRSB and _add_data_reference's for individual statements or
for parts of statements (e.g. Store)
:param pyvex.IRSB irsb: Block to scan for data references
:param int irsb_addr: Address of block
:return: None
"""
if self.project.arch.name in ('X86', 'AMD64'):
# first pass to see if there are any cross-statement optimizations. if so, we relift the basic block with
# optimization level 0 to preserve as much constant references as possible
empty_insn = False
all_statements = len(irsb.statements)
for i, stmt in enumerate(irsb.statements[:-1]):
if isinstance(stmt, pyvex.IRStmt.IMark) and (
isinstance(irsb.statements[i + 1], pyvex.IRStmt.IMark) or
(i + 2 < all_statements and isinstance(irsb.statements[i + 2], pyvex.IRStmt.IMark))
):
# this is a very bad check...
# the correct way to do it is to disable cross-instruction optimization in VEX
empty_insn = True
break
if empty_insn:
# make sure opt_level is 0
irsb = self.project.factory.block(addr=irsb_addr, size=irsb.size, opt_level=0).vex
# for each statement, collect all constants that are referenced or used.
self._collect_data_references_core(irsb, irsb_addr)
def _collect_data_references_core(self, irsb, irsb_addr):
# helper methods
def _process(irsb_, stmt_, stmt_idx_, data_, insn_addr, next_insn_addr, data_size=None, data_type=None):
"""
Helper method used for calling _add_data_reference after checking
for manipulation of constants
:param pyvex.IRSB irsb_: Edited block (as might be de-optimised)
:param pyvex.IRStmt.* stmt_: Statement
:param int stmt_idx_: Statement ID
:param data_: data manipulated by statement
:param int insn_addr: instruction address
:param int next_insn_addr: next instruction address
:param data_size: Size of the data being manipulated
:param str data_type: Type of the data being manipulated
:return: None
"""
if type(data_) is pyvex.expr.Const: # pylint: disable=unidiomatic-typecheck
val = data_.con.value
elif type(data_) in (int, long):
val = data_
else:
return
if val != next_insn_addr:
self._add_data_reference(irsb_, irsb_addr, stmt_, stmt_idx_, insn_addr, val,
data_size=data_size, data_type=data_type
)
# get all instruction addresses
instr_addrs = [ (i.addr + i.delta) for i in irsb.statements if isinstance(i, pyvex.IRStmt.IMark) ]
# for each statement, collect all constants that are referenced or used.
instr_addr = None
next_instr_addr = None
for stmt_idx, stmt in enumerate(irsb.statements):
if type(stmt) is pyvex.IRStmt.IMark: # pylint: disable=unidiomatic-typecheck
instr_addr = instr_addrs[0]
instr_addrs = instr_addrs[1 : ]
next_instr_addr = instr_addrs[0] if instr_addrs else None
elif type(stmt) is pyvex.IRStmt.WrTmp: # pylint: disable=unidiomatic-typecheck
if type(stmt.data) is pyvex.IRExpr.Load: # pylint: disable=unidiomatic-typecheck
# load
# e.g. t7 = LDle:I64(0x0000000000600ff8)
size = stmt.data.result_size(irsb.tyenv) / 8 # convert to bytes
_process(irsb, stmt, stmt_idx, stmt.data.addr, instr_addr, next_instr_addr,
data_size=size, data_type='integer'
)
elif type(stmt.data) in (pyvex.IRExpr.Binop, ): # pylint: disable=unidiomatic-typecheck
# rip-related addressing
if stmt.data.op in ('Iop_Add32', 'Iop_Add64') and \
all(type(arg) is pyvex.expr.Const for arg in stmt.data.args):
# perform the addition
loc = stmt.data.args[0].con.value + stmt.data.args[1].con.value
_process(irsb, stmt, stmt_idx, loc, instr_addr, next_instr_addr)
else:
# binary operation
for arg in stmt.data.args:
_process(irsb, stmt, stmt_idx, arg, instr_addr, next_instr_addr)
elif type(stmt.data) is pyvex.IRExpr.Const: # pylint: disable=unidiomatic-typecheck
_process(irsb, stmt, stmt_idx, stmt.data, instr_addr, next_instr_addr)
elif type(stmt.data) is pyvex.IRExpr.ITE:
for child_expr in stmt.data.child_expressions:
_process(irsb, stmt, stmt_idx, child_expr, instr_addr, next_instr_addr)
elif type(stmt) is pyvex.IRStmt.Put: # pylint: disable=unidiomatic-typecheck
# put
# e.g. PUT(rdi) = 0x0000000000400714
if stmt.offset not in (self._initial_state.arch.ip_offset, ):
_process(irsb, stmt, stmt_idx, stmt.data, instr_addr, next_instr_addr)
elif type(stmt) is pyvex.IRStmt.Store: # pylint: disable=unidiomatic-typecheck
# store addr
_process(irsb, stmt, stmt_idx, stmt.addr, instr_addr, next_instr_addr)
# store data
_process(irsb, stmt, stmt_idx, stmt.data, instr_addr, next_instr_addr)
elif type(stmt) is pyvex.IRStmt.Dirty:
_process(irsb, stmt, stmt_idx, stmt.mAddr, instr_addr, next_instr_addr,
data_size=stmt.mSize,
data_type='fp'
)
def _add_data_reference(self, irsb, irsb_addr, stmt, stmt_idx, insn_addr, data_addr, # pylint: disable=unused-argument
data_size=None, data_type=None):
"""
Checks addresses are in the correct segments and creates or updates
MemoryData in _memory_data as appropriate, labelling as segment
boundaries or data type
:param pyvex.IRSB irsb: irsb
:param int irsb_addr: irsb address
:param pyvex.IRStmt.* stmt: Statement
:param int stmt_idx: Statement ID
:param int insn_addr: instruction address
:param data_addr: address of data manipulated by statement
:param data_size: Size of the data being manipulated
:param str data_type: Type of the data being manipulated
:return: None
"""
# Make sure data_addr is within a valid memory range
if not self._addr_belongs_to_segment(data_addr):
# data might be at the end of some section or segment...
# let's take a look
for segment in self.project.loader.main_bin.segments:
if self.project.loader.main_bin.rebase_addr + segment.vaddr + segment.memsize == data_addr:
# yeah!
if data_addr not in self._memory_data:
data = MemoryData(data_addr, 0, 'segment-boundary', irsb, irsb_addr, stmt, stmt_idx,
insn_addr=insn_addr
)
self._memory_data[data_addr] = data
else:
if self._extra_cross_references:
self._memory_data[data_addr].add_ref(irsb_addr, stmt_idx, insn_addr)
break
return
if data_addr not in self._memory_data:
if data_type is not None and data_size is not None:
data = MemoryData(data_addr, data_size, data_type, irsb, irsb_addr, stmt, stmt_idx,
insn_addr=insn_addr, max_size=data_size
)
else:
data = MemoryData(data_addr, 0, 'unknown', irsb, irsb_addr, stmt, stmt_idx, insn_addr=insn_addr)
self._memory_data[data_addr] = data
else:
if self._extra_cross_references:
self._memory_data[data_addr].add_ref(irsb_addr, stmt_idx, insn_addr)
self.insn_addr_to_memory_data[insn_addr] = self._memory_data[data_addr]
def _tidy_data_references(self):
"""
:return: True if new data entries are found, False otherwise.
:rtype: bool
"""
# Make sure all memory data entries cover all data sections
keys = sorted(self._memory_data.iterkeys())
for i, data_addr in enumerate(keys):
data = self._memory_data[data_addr]
if self._addr_in_exec_memory_regions(data.address):
# TODO: Handle data among code regions (or executable regions)
pass
else:
if i + 1 != len(keys):
next_data_addr = keys[i + 1]
else:
next_data_addr = None
# goes until the end of the section/segment
# TODO: the logic needs more testing
obj = self.project.loader.addr_belongs_to_object(data_addr)
sec = self._addr_belongs_to_section(data_addr)
next_sec_addr = None
if sec is not None:
last_addr = sec.vaddr + sec.memsize + obj.rebase_addr
else:
# it does not belong to any section. what's the next adjacent section? any memory data does not go
# beyong section boundaries
next_sec = self._addr_next_section(data_addr)
if next_sec is not None:
next_sec_addr = next_sec.vaddr + obj.rebase_addr
seg = self._addr_belongs_to_segment(data_addr)
if seg is not None:
last_addr = seg.vaddr + seg.memsize + obj.rebase_addr
else:
# We got an address that is not inside the current binary...
l.warning('_tidy_data_references() sees an address %#08x that does not belong to any '
'section or segment.', data_addr
)
last_addr = None
if next_data_addr is None:
boundary = last_addr
elif last_addr is None:
boundary = next_data_addr
else:
boundary = min(last_addr, next_data_addr)
if next_sec_addr is not None:
boundary = min(boundary, next_sec_addr)
if boundary is not None:
data.max_size = boundary - data_addr
keys = sorted(self._memory_data.iterkeys())
new_data_found = False
i = 0
# pylint:disable=too-many-nested-blocks
while i < len(keys):
data_addr = keys[i]
i += 1
memory_data = self._memory_data[data_addr]
if memory_data.sort in ('segment-boundary', ):
continue
content_holder = [ ]
# let's see what sort of data it is
if memory_data.sort in ('unknown', None) or \
(memory_data.sort == 'integer' and memory_data.size == self.project.arch.bits / 8):
data_type, data_size = self._guess_data_type(memory_data.irsb, memory_data.irsb_addr,
memory_data.stmt_idx, data_addr, memory_data.max_size,
content_holder=content_holder
)
else:
data_type, data_size = memory_data.sort, memory_data.size
if data_type is not None:
memory_data.size = data_size
memory_data.sort = data_type
if len(content_holder) == 1:
memory_data.content = content_holder[0]
if memory_data.size > 0 and memory_data.size < memory_data.max_size:
# Create another memory_data object to fill the gap
new_addr = data_addr + memory_data.size
new_md = MemoryData(new_addr, None, None, None, None, None, None,
max_size=memory_data.max_size - memory_data.size)
self._memory_data[new_addr] = new_md
keys.insert(i, new_addr)
if data_type == 'pointer-array':
# make sure all pointers are identified
pointer_size = self.project.arch.bits / 8
buf = self._fast_memory_load(data_addr)
# TODO: this part of code is duplicated in _guess_data_type()
# TODO: remove the duplication
if self.project.arch.memory_endness == 'Iend_LE':
fmt = "<"
else:
fmt = ">"
if pointer_size == 8:
fmt += "Q"
elif pointer_size == 4:
fmt += "I"
else:
raise AngrCFGError("Pointer size of %d is not supported", pointer_size)
for j in xrange(0, data_size, pointer_size):
ptr_str = self._ffi.unpack(self._ffi.cast('char*', buf + j), pointer_size)
ptr = struct.unpack(fmt, ptr_str)[0] # type:int
# is this pointer coming from the current binary?
obj = self.project.loader.addr_belongs_to_object(ptr)
if obj is not self.project.loader.main_bin:
# the pointer does not come from current binary. skip.
continue
if self._seg_list.is_occupied(ptr):
sort = self._seg_list.occupied_by_sort(ptr)
if sort == 'code':
continue
elif sort == 'pointer-array':
continue
# TODO: other types
if ptr not in self._memory_data:
self._memory_data[ptr] = MemoryData(ptr, 0, 'unknown', None, None, None, None,
pointer_addr=data_addr + j
)
new_data_found = True
else:
memory_data.size = memory_data.max_size
self._seg_list.occupy(data_addr, memory_data.size, memory_data.sort)
return new_data_found
def _guess_data_type(self, irsb, irsb_addr, stmt_idx, data_addr, max_size, content_holder=None): # pylint: disable=unused-argument
"""
Make a guess to the data type.
Users can provide their own data type guessing code when initializing CFGFast instance, and each guessing
handler will be called if this method fails to determine what the data is.
:param pyvex.IRSB irsb: The pyvex IRSB object.
:param int irsb_addr: Address of the IRSB.
:param int stmt_idx: ID of the statement.
:param int data_addr: Address of the data.
:param int max_size: The maximum size this data entry can be.
:return: a tuple of (data type, size). (None, None) if we fail to determine the type or the size.
:rtype: tuple
"""
if max_size is None:
max_size = 0
if self._seg_list.is_occupied(data_addr) and self._seg_list.occupied_by_sort(data_addr) == 'code':
# it's a code reference
# TODO: Further check if it's the beginning of an instruction
return "code reference", 0
pointer_size = self.project.arch.bits / 8
# who's using it?
plt_entry = self.project.loader.main_bin.reverse_plt.get(irsb_addr, None)
if plt_entry is not None:
# IRSB is owned by plt!
return "GOT PLT Entry", pointer_size
# try to decode it as a pointer array
buf = self._fast_memory_load(data_addr)
if buf is None:
# The data address does not exist in static regions
return None, None
if self.project.arch.memory_endness == 'Iend_LE':
fmt = "<"
else:
fmt = ">"
if pointer_size == 8:
fmt += "Q"
elif pointer_size == 4:
fmt += "I"
else:
raise AngrCFGError("Pointer size of %d is not supported", pointer_size)
pointers_count = 0
max_pointer_array_size = min(512 * pointer_size, max_size)
for i in xrange(0, max_pointer_array_size, pointer_size):
ptr_str = self._ffi.unpack(self._ffi.cast('char*', buf + i), pointer_size)
if len(ptr_str) != pointer_size:
break
ptr = struct.unpack(fmt, ptr_str)[0] # type:int
if ptr is not None:
#if self._seg_list.is_occupied(ptr) and self._seg_list.occupied_by_sort(ptr) == 'code':
# # it's a code reference
# # TODO: Further check if it's the beginning of an instruction
# pass
if self._addr_belongs_to_section(ptr) is not None or self._addr_belongs_to_segment(ptr) is not None or \
(self._extra_memory_regions and
next(((a < ptr < b) for (a, b) in self._extra_memory_regions), None)
):
# it's a pointer of some sort
# TODO: Determine what sort of pointer it is
pointers_count += 1
else:
break
if pointers_count:
return "pointer-array", pointer_size * pointers_count
block = self._fast_memory_load(data_addr)
# Is it an unicode string?
# TODO: Support unicode string longer than the max length
if block[1] == 0 and block[3] == 0 and chr(block[0]) in self.PRINTABLES:
max_unicode_string_len = 1024
unicode_str = self._ffi.string(self._ffi.cast("wchar_t*", block), max_unicode_string_len)
if len(unicode_str) and all([ c in self.PRINTABLES for c in unicode_str]):
if content_holder is not None:
content_holder.append(unicode_str)
return "unicode", (len(unicode_str) + 1) * 2
# Is it a null-terminated printable string?
max_string_len = min(max_size, 4096)
s = self._ffi.string(self._ffi.cast("char*", block), max_string_len)
if len(s):
if all([ c in self.PRINTABLES for c in s ]):
# it's a string
# however, it may not be terminated
if content_holder is not None:
content_holder.append(s)
return "string", min(len(s) + 1, max_string_len)
for handler in self._data_type_guessing_handlers:
sort, size = handler(self, irsb, irsb_addr, stmt_idx, data_addr, max_size)
if sort is not None:
return sort, size
return None, None
# Indirect jumps processing
def _resolve_indirect_jump_timelessly(self, addr, block, func_addr):
"""
Checks if MIPS32 and calls MIPS32 check, otherwise false
:param int addr: irsb address
:param pyvex.IRSB block: irsb
:param int func_addr: instruction address
:return: If it was resolved and targets alongside it
:rtype: tuple
"""
if self.project.arch.name == "MIPS32":
# Prudently search for indirect jump target
return self._resolve_indirect_jump_timelessly_mips32(addr, block, func_addr)
return False, [ ]
def _resolve_indirect_jump_timelessly_mips32(self, addr, block, func_addr): # pylint: disable=unused-argument
"""
Attempts to execute any potential paths forward through jump returning
a sucess boolean and list of sucessors
:param int addr: irsb address
:param pyvex.IRSB block: irsb
:param int func_addr: instruction address
:return: If it was resolved and targets alongside it
:rtype: tuple
"""
b = Blade(self._graph, addr, -1, cfg=self, project=self.project, ignore_sp=True, ignore_bp=True,
ignored_regs=('gp',))
sources = [ n for n in b.slice.nodes() if b.slice.in_degree(n) == 0 ]
if not sources:
return False, [ ]
source = sources[0]
source_addr = source[0]
annotated_cfg = AnnotatedCFG(self.project, None, detect_loops=False)
annotated_cfg.from_digraph(b.slice)
state = self.project.factory.blank_state(addr=source_addr, mode="fastpath",
remove_options=simuvex.options.refs
)
func = self.kb.functions.function(addr=func_addr)
gp_offset = self.project.arch.registers['gp'][0]
if 'gp' not in func.info:
sec = self._addr_belongs_to_section(func.addr)
if sec is None or sec.name != '.plt':
# this might a special case: gp is only used once in this function, and it can be initialized right before
# its use site.
# TODO: handle this case
l.debug('Failed to determine value of register gp for function %#x.', func.addr)
return False, [ ]
else:
state.regs.gp = func.info['gp']
def overwrite_tmp_value(state):
state.inspect.tmp_write_expr = state.se.BVV(func.info['gp'], state.arch.bits)
# Special handling for cases where `gp` is stored on the stack
got_gp_stack_store = False
for block_addr_in_slice in set(slice_node[0] for slice_node in b.slice.nodes()):
for stmt in self.project.factory.block(block_addr_in_slice).vex.statements:
if isinstance(stmt, pyvex.IRStmt.Put) and stmt.offset == gp_offset and \
isinstance(stmt.data, pyvex.IRExpr.RdTmp):
tmp_offset = stmt.data.tmp # pylint:disable=cell-var-from-loop
# we must make sure value of that temporary variable equals to the correct gp value
state.inspect.make_breakpoint('tmp_write', when=simuvex.BP_BEFORE,
condition=lambda s, bbl_addr_=block_addr_in_slice, tmp_offset_=tmp_offset:
s.scratch.bbl_addr == bbl_addr_ and s.inspect.tmp_write_num == tmp_offset_,
action=overwrite_tmp_value
)
got_gp_stack_store = True
break
if got_gp_stack_store:
break
path = self.project.factory.path(state)
slicecutor = Slicecutor(self.project, annotated_cfg=annotated_cfg, start=path)
slicecutor.run()
if slicecutor.cut:
suc = slicecutor.cut[0].successors[0].addr
return True, [ suc ]
return False, [ ]
def _process_indirect_jumps(self):
"""
Resolve indirect jumps found in previous scanning.
Currently we support resolving the following types of indirect jumps:
- Ijk_Call (disabled now): indirect calls where the function address is passed in from a proceeding basic block
- Ijk_Boring: jump tables
:return: a set of 2-tuples: (resolved indirect jump target, function address)
:rtype: set
"""
all_targets = set()
jumps_resolved = {}
# print "We have %d indirect jumps" % len(self._indirect_jumps)
for jump in self._indirect_jumps_to_resolve: # type: IndirectJump
jumps_resolved[jump] = False
# is it a jump table? try with the fast approach
resolvable, targets = self._resolve_jump_table_fast(jump.addr, jump.jumpkind)
if resolvable:
jumps_resolved[jump] = True
# Remove all targets that don't make sense
targets = [ t for t in targets if any(iter((a <= t < b) for a, b in self._exec_mem_regions)) ]
if jump.addr in self.indirect_jumps:
ij = self.indirect_jumps[jump.addr]
ij.jumptable = True
ij.resolved = True
# Fill the IndirectJump object
ij.resolved_targets |= set(targets)
all_targets |= set([ (t, jump.func_addr, jump.addr) for t in targets ])
continue
# is it a slightly more complex jump table? try the slow approach
# resolvable, targets = self._resolve_jump_table_accurate(addr, jumpkind)
# if resolvable:
# all_targets |= set(targets)
# continue
for jump, resolved in jumps_resolved.iteritems():
self._indirect_jumps_to_resolve.remove(jump)
if not resolved:
# add a node from this entry to the Unresolv
# ableTarget node
src_node = self._nodes[jump.addr]
dst_node = CFGNode(self._unresolvable_target_addr, 0, self,
function_address=self._unresolvable_target_addr,
simprocedure_name='UnresolvableTarget',
)
# add the dst_node to self._nodes
if self._unresolvable_target_addr not in self._nodes:
self._nodes[self._unresolvable_target_addr] = dst_node
self._nodes_by_addr[self._unresolvable_target_addr].append(dst_node)
self._graph_add_edge(dst_node, src_node, jump.jumpkind, jump.ins_addr, jump.stmt_idx)
# mark it as a jumpout site for that function
self._function_add_transition_edge(self._unresolvable_target_addr, src_node, jump.func_addr,
to_outside=True,
to_function_addr=self._unresolvable_target_addr,
ins_addr=jump.ins_addr,
stmt_idx=jump.stmt_idx,
)
# tell KnowledgeBase that it's not resolved
# TODO: self.kb._unresolved_indirect_jumps is not processed during normalization. Fix it.
self.kb._unresolved_indirect_jumps.add(jump.addr)
return all_targets
def _resolve_jump_table_fast(self, addr, jumpkind):
"""
Check if the indirect jump is a jump table, and if it is, resolve it and return all possible targets.
This is a fast jump table resolution. For performance concerns, we made the following assumptions:
- The final jump target comes from the memory.
- The final jump target must be directly read out of the memory, without any further modification or altering.
:param int addr: the address of the basic block
:param str jumpkind: the jump kind of the indirect jump
:return: a bool indicating whether the indirect jump is resolved successfully, and a list of resolved targets
:rtype: tuple
"""
bss_regions = [ ]
def bss_memory_read_hook(state):
if not bss_regions:
return
read_addr = state.inspect.mem_read_address
read_length = state.inspect.mem_read_length
if not isinstance(read_addr, (int, long)) and read_addr.symbolic:
# don't touch it
return
concrete_read_addr = state.se.any_int(read_addr)
concrete_read_length = state.se.any_int(read_length)
for start, size in bss_regions:
if start <= concrete_read_addr < start + size:
# this is a read from the .bss section
break
else:
return
if not state.memory.was_written_to(concrete_read_addr):
# it was never written to before. we overwrite it with unconstrained bytes
bits = self.project.arch.bits
for i in xrange(0, concrete_read_length, bits / 8):
state.memory.store(concrete_read_addr + i, state.se.Unconstrained('unconstrained', bits))
# job done :-)
class UninitReadMeta(object):
uninit_read_base = 0xc000000
def init_registers_on_demand(state):
# for uninitialized read using a register as the source address, we replace them in memory on demand
read_addr = state.inspect.mem_read_address
if not isinstance(read_addr, (int, long)) and read_addr.uninitialized:
read_length = state.inspect.mem_read_length
if not isinstance(read_length, (int, long)):
read_length = read_length._model_vsa.upper_bound
if read_length > 16:
return
new_read_addr = state.se.BVV(UninitReadMeta.uninit_read_base, state.arch.bits)
UninitReadMeta.uninit_read_base += read_length
# replace the expression in registers
state.registers.replace_all(read_addr, new_read_addr)
state.inspect.mem_read_address = new_read_addr
# job done :-)
if jumpkind != "Ijk_Boring":
# Currently we only support boring ones
return False, None
# Perform a backward slicing from the jump target
b = Blade(self.graph, addr, -1, cfg=self, project=self.project, ignore_sp=True, ignore_bp=True, max_level=2)
stmt_loc = (addr, 'default')
if stmt_loc not in b.slice:
return False, None
load_stmt_loc, load_stmt = None, None
stmts_to_remove = [ stmt_loc ]
while True:
preds = b.slice.predecessors(stmt_loc)
if len(preds) != 1:
return False, None
block_addr, stmt_idx = stmt_loc = preds[0]
block = self.project.factory.block(block_addr).vex
stmt = block.statements[stmt_idx]
if isinstance(stmt, pyvex.IRStmt.WrTmp) or isinstance(stmt, pyvex.IRStmt.Put):
if isinstance(stmt.data, pyvex.IRExpr.Get) or isinstance(stmt.data, pyvex.IRExpr.RdTmp):
# data transferring
stmts_to_remove.append(stmt_loc)
stmt_loc = (block_addr, stmt_idx)
continue
elif isinstance(stmt.data, pyvex.IRExpr.Load):
# Got it!
stmt_loc = (block_addr, stmt_idx)
load_stmt, load_stmt_loc = stmt, stmt_loc
stmts_to_remove.append(stmt_loc)
break
if load_stmt_loc is None:
# the load statement is not found
return False, None
# skip all statements before the load statement
b.slice.remove_nodes_from(stmts_to_remove)
# Debugging output
# for addr, stmt_idx in sorted(list(b.slice.nodes())):
# irsb = self.project.factory.block(addr).vex
# stmts = irsb.statements
# print "%x: %d | " % (addr, stmt_idx),
# print "%s" % stmts[stmt_idx],
# print "%d" % b.slice.in_degree((addr, stmt_idx))
# print ""
# Get all sources
sources = [n for n in b.slice.nodes() if b.slice.in_degree(n) == 0]
# Create the annotated CFG
annotatedcfg = AnnotatedCFG(self.project, None, detect_loops=False)
annotatedcfg.from_digraph(b.slice)
# pylint: disable=too-many-nested-blocks
for src_irsb, _ in sources:
# Use slicecutor to execute each one, and get the address
# We simply give up if any exception occurs on the way
start_state = self.project.factory.blank_state(
addr=src_irsb,
mode='static',
add_options={
simuvex.o.DO_RET_EMULATION,
simuvex.o.TRUE_RET_EMULATION_GUARD,
simuvex.o.AVOID_MULTIVALUED_READS,
},
remove_options={
simuvex.o.CGC_ZERO_FILL_UNCONSTRAINED_MEMORY,
simuvex.o.UNINITIALIZED_ACCESS_AWARENESS,
}
)
# any read from an uninitialized segment should be unconstrained
# TODO: support other sections other than '.bss'.
# TODO: this is very hackish. fix it after the chaos.
for section in self.project.loader.main_bin.sections:
if section.name == '.bss':
bss_regions.append((self.project.loader.main_bin.rebase_addr + section.vaddr, section.memsize))
bss_memory_read_bp = simuvex.BP(when=simuvex.BP_BEFORE, enabled=True, action=bss_memory_read_hook)
start_state.inspect.add_breakpoint('mem_read', bss_memory_read_bp)
break
start_state.regs.bp = start_state.arch.initial_sp + 0x2000
init_registers_on_demand_bp = simuvex.BP(when=simuvex.BP_BEFORE, enabled=True, action=init_registers_on_demand)
start_state.inspect.add_breakpoint('mem_read', init_registers_on_demand_bp)
start_path = self.project.factory.path(start_state)
# Create the slicecutor
slicecutor = Slicecutor(self.project, annotatedcfg, start=start_path, targets=(load_stmt_loc[0],))
# Run it!
try:
slicecutor.run()
except KeyError as ex:
# This is because the program slice is incomplete.
# Blade will support more IRExprs and IRStmts
l.debug("KeyError occurred due to incomplete program slice.", exc_info=ex)
continue
# Get the jumping targets
for r in slicecutor.reached_targets:
all_states = r.next_run.flat_successors
state = all_states[0] # Just take the first state
# Parse the memory load statement
load_addr_tmp = load_stmt.data.addr.tmp
if load_addr_tmp not in state.scratch.temps:
# the tmp variable is not there... umm...
continue
jump_addr = state.scratch.temps[load_addr_tmp]
total_cases = jump_addr._model_vsa.cardinality
all_targets = [ ]
if total_cases > self._indirect_jump_target_limit:
# We resolved too many targets for this indirect jump. Something might have gone wrong.
l.debug("%d targets are resolved for the indirect jump at %#x. It may not be a jump table",
total_cases, addr)
return False, None
# Or alternatively, we can ask user, which is meh...
#
# jump_base_addr = int(raw_input("please give me the jump base addr: "), 16)
# total_cases = int(raw_input("please give me the total cases: "))
# jump_target = state.se.SI(bits=64, lower_bound=jump_base_addr, upper_bound=jump_base_addr +
# (total_cases - 1) * 8, stride=8)
jump_table = [ ]
for idx, a in enumerate(state.se.any_n_int(jump_addr, total_cases)):
if idx % 100 == 0:
l.debug("Resolved %d targets for the indirect jump at %#x", idx, addr)
jump_target = state.memory.load(a, state.arch.bits / 8, endness=state.arch.memory_endness)
target = state.se.any_int(jump_target)
all_targets.append(target)
jump_table.append(target)
l.info("Jump table resolution: resolved %d targets from %#x", len(all_targets), addr)
ij = self.indirect_jumps[addr]
ij.jumptable = True
ij.jumptable_addr = state.se.min(jump_addr)
ij.jumptable_targets = jump_table
ij.jumptable_entries = total_cases
return True, all_targets
return False, None
def _resolve_jump_table_accurate(self, addr, jumpkind):
"""
Check if the indirect jump is a jump table, and if it is, resolve it and return all possible targets.
This is the accurate (or rather, slower) version jump table resolution.
:param int addr: the address of the basic block
:param str jumpkind: the jump kind of the indirect jump
:return: a bool indicating whether the indirect jump is resolved successfully, and a list of resolved targets
:rtype: tuple
"""
if jumpkind != "Ijk_Boring":
# Currently we only support boring ones
return False, None
# Perform a backward slicing from the jump target
b = Blade(self.graph, addr, -1, cfg=self, project=self.project, ignore_sp=True, ignore_bp=True)
# Debugging output
# for addr, stmt_idx in sorted(list(b.slice.nodes())):
# irsb = self.project.factory.block(addr).vex
# stmts = irsb.statements
# print "%x: %d | " % (addr, stmt_idx),
# print "%s" % stmts[stmt_idx],
# print "%d" % b.slice.in_degree((addr, stmt_idx))
# print ""
# Get all sources
sources = [n for n in b.slice.nodes() if b.slice.in_degree(n) == 0]
# Create the annotated CFG
annotatedcfg = AnnotatedCFG(self.project, None, detect_loops=False)
annotatedcfg.from_digraph(b.slice)
for src_irsb, _ in sources:
# Use slicecutor to execute each one, and get the address
# We simply give up if any exception occurs on the way
start_state = self.project.factory.blank_state(
addr=src_irsb,
mode='static',
add_options={
simuvex.o.DO_RET_EMULATION,
simuvex.o.TRUE_RET_EMULATION_GUARD,
simuvex.o.KEEP_MEMORY_READS_DISCRETE, # Please do not merge values that are read out of the
# memory
}
)
start_state.regs.bp = start_state.arch.initial_sp + 0x2000
start_path = self.project.factory.path(start_state)
# Create the slicecutor
slicecutor = Slicecutor(self.project, annotatedcfg, start=start_path, targets=(addr,))
# Run it!
try:
slicecutor.run()
except KeyError as ex:
# This is because the program slice is incomplete.
# Blade will support more IRExprs and IRStmts
l.debug("KeyError occurred due to incomplete program slice.", exc_info=ex)
continue
# Get the jumping targets
for r in slicecutor.reached_targets:
all_states = r.unconstrained_successor_states + [ s.state for s in r.successors ]
state = all_states[0]
jump_target = state.ip
total_cases = jump_target._model_vsa.cardinality
all_targets = [ ]
if total_cases > self._indirect_jump_target_limit:
# We resolved too many targets for this indirect jump. Something might have gone wrong.
l.debug("%d targets are resolved for the indirect jump at %#x. It may not be a jump table",
total_cases, addr)
return False, None
# Or alternatively, we can ask user, which is meh...
#
# jump_base_addr = int(raw_input("please give me the jump base addr: "), 16)
# total_cases = int(raw_input("please give me the total cases: "))
# jump_target = state.se.SI(bits=64, lower_bound=jump_base_addr, upper_bound=jump_base_addr +
# (total_cases - 1) * 8, stride=8)
jump_table = [ ]
for idx, target in enumerate(state.se.any_n_int(jump_target, total_cases)):
if idx % 100 == 0:
l.debug("Resolved %d targets for the indirect jump at %#x", idx, addr)
all_targets.append(target)
jump_table.append(target)
l.info("Jump table resolution: resolved %d targets from %#x", len(all_targets), addr)
ij = self.indirect_jumps[addr]
ij.jumptable = True
ij.jumptable_addr = state.se.min(jump_target)
ij.jumptable_targets = jump_table
ij.jumptable_entries = total_cases
return True, all_targets
def _resolve_indirect_calls(self):
"""
:return:
"""
# TODO: Fix and enable this method later
function_starts = set()
for jumpkind, irsb_addr in self._indirect_jumps_to_resolve:
# First execute the current IRSB in concrete mode
if len(function_starts) > 20:
break
if jumpkind == "Ijk_Call":
state = self.project.factory.blank_state(addr=irsb_addr, mode="concrete",
add_options={simuvex.o.SYMBOLIC_INITIAL_VALUES}
)
path = self.project.factory.path(state)
print hex(irsb_addr)
try:
r = (path.next_run.successors + path.next_run.unsat_successors)[0]
ip = r.se.exactly_n_int(r.ip, 1)[0]
function_starts.add(ip)
continue
except simuvex.SimSolverModeError:
pass
# Not resolved
# Do a backward slicing from the call
irsb = self.project.factory.block(irsb_addr).vex
# Start slicing from the "next"
b = Blade(self.graph, irsb.addr, -1, project=self.project)
# Debugging output
for addr, stmt_idx in sorted(list(b.slice.nodes())):
irsb = self.project.factory.block(addr).vex
stmts = irsb.statements
print "%x: %d | " % (addr, stmt_idx),
print "%s" % stmts[stmt_idx],
print "%d" % b.slice.in_degree((addr, stmt_idx))
print ""
# Get all sources
sources = [n for n in b.slice.nodes() if b.slice.in_degree(n) == 0]
# Create the annotated CFG
annotatedcfg = AnnotatedCFG(self.project, None, detect_loops=False)
annotatedcfg.from_digraph(b.slice)
for src_irsb, _ in sources:
# Use slicecutor to execute each one, and get the address
# We simply give up if any exception occurs on the way
start_state = self.project.factory.blank_state(
addr=src_irsb,
add_options={
simuvex.o.DO_RET_EMULATION,
simuvex.o.TRUE_RET_EMULATION_GUARD
}
)
start_path = self.project.factory.path(start_state)
# Create the slicecutor
slicecutor = Slicecutor(self.project, annotatedcfg, start=start_path, targets=(irsb_addr,))
# Run it!
try:
slicecutor.run()
except KeyError as ex:
# This is because the program slice is incomplete.
# Blade will support more IRExprs and IRStmts
l.debug("KeyError occurred due to incomplete program slice.", exc_info=ex)
continue
# Get the jumping targets
for r in slicecutor.reached_targets:
if r.next_run.successors:
target_ip = r.next_run.successors[0].ip
se = r.next_run.successors[0].se
if not se.symbolic(target_ip):
concrete_ip = se.exactly_n_int(target_ip, 1)[0]
function_starts.add(concrete_ip)
l.info("Found a function address %x", concrete_ip)
return function_starts
# Removers
def _remove_redundant_overlapping_blocks(self):
"""
On some architectures there are sometimes garbage bytes (usually nops) between functions in order to properly
align the succeeding function. CFGFast does a linear sweeping which might create duplicated blocks for
function epilogues where one block starts before the garbage bytes and the other starts after the garbage bytes.
This method enumerates all blocks and remove overlapping blocks if one of them is aligned to 0x10 and the other
contains only garbage bytes.
:return: None
"""
sorted_nodes = sorted(self.graph.nodes(), key=lambda n: n.addr if n is not None else 0)
# go over the list. for each node that is the beginning of a function and is not properly aligned, if its
# leading instruction is a single-byte or multi-byte nop, make sure there is another CFGNode starts after the
# nop instruction
nodes_to_append = {}
# pylint:disable=too-many-nested-blocks
for a in sorted_nodes:
if a.addr in self.functions:
all_in_edges = self.graph.in_edges(a, data=True)
if not any([data['jumpkind'] == 'Ijk_Call' for _, _, data in all_in_edges]):
# no one is calling it
# this function might be created from linear sweeping
try:
block = self.project.factory.block(a.addr, size=0x10 - (a.addr % 0x10))
except SimTranslationError:
continue
insns = block.capstone.insns
if insns and self._is_noop_insn(insns[0]):
# see where those nop instructions terminate
nop_length = 0
for insn in insns:
if self._is_noop_insn(insn):
nop_length += insn.size
else:
break
if nop_length <= 0:
continue
# leading nop for alignment.
next_node_addr = a.addr + nop_length
if nop_length < a.size and \
not (next_node_addr in self._nodes or next_node_addr in nodes_to_append):
# create a new CFGNode that starts there
next_node_size = a.size - nop_length
next_node = CFGNode(next_node_addr, next_node_size, self,
function_address=next_node_addr,
instruction_addrs=[i for i in a.instruction_addrs
if next_node_addr <= i
< next_node_addr + next_node_size
]
)
# create edges accordingly
all_out_edges = self.graph.out_edges(a, data=True)
for _, dst, data in all_out_edges:
self.graph.add_edge(next_node, dst, **data)
nodes_to_append[next_node_addr] = next_node
# make sure there is a function begins there
try:
self.functions._add_node(next_node_addr, next_node_addr, size=next_node_size)
except (SimEngineError, SimMemoryError):
continue
# append all new nodes to sorted nodes
if nodes_to_append:
sorted_nodes = sorted(sorted_nodes + nodes_to_append.values(), key=lambda n: n.addr if n is not None else 0)
removed_nodes = set()
a = None # it always hold the very recent non-removed node
for i in xrange(len(sorted_nodes)):
if a is None:
a = sorted_nodes[0]
continue
b = sorted_nodes[i]
if b in removed_nodes:
# skip all removed nodes
continue
if a.addr <= b.addr and \
(a.addr + a.size > b.addr):
# They are overlapping
try:
block = self.project.factory.fresh_block(a.addr, b.addr - a.addr)
except SimTranslationError:
a = b
continue
if block.capstone.insns and all([ self._is_noop_insn(insn) for insn in block.capstone.insns ]):
# It's a big nop - no function starts with nop
# add b to indices
self._nodes[b.addr] = b
self._nodes_by_addr[b.addr].append(b)
# shrink a
self._shrink_node(a, b.addr - a.addr, remove_function=False)
a = b
continue
all_functions = self.kb.functions
# now things are a little harder
# if there is no incoming edge to b, we should replace b with a
# this is mostly because we misidentified the function beginning. In fact a is the function beginning,
# but somehow we thought b is the beginning
if a.addr + a.size == b.addr + b.size:
in_edges = len([ _ for _, _, data in self.graph.in_edges(b, data=True) ])
if in_edges == 0:
# we use node a to replace node b
# link all successors of b to a
for _, dst, data in self.graph.out_edges(b, data=True):
self.graph.add_edge(a, dst, **data)
if b.addr in self._nodes:
del self._nodes[b.addr]
if b.addr in self._nodes_by_addr and b in self._nodes_by_addr[b.addr]:
self._nodes_by_addr[b.addr].remove(b)
self.graph.remove_node(b)
if b.addr in all_functions:
del all_functions[b.addr]
# skip b
removed_nodes.add(b)
continue
# next case - if b is directly from function prologue detection, or a basic block that is a successor of
# a wrongly identified basic block, we might be totally misdecoding b
if b.instruction_addrs[0] not in a.instruction_addrs:
# use a, truncate b
new_b_addr = a.addr + a.size # b starts right after a terminates
new_b_size = b.addr + b.size - new_b_addr # this may not be the size we want, since b might be
# misdecoded
# totally remove b
if b.addr in self._nodes:
del self._nodes[b.addr]
if b.addr in self._nodes_by_addr and b in self._nodes_by_addr[b.addr]:
self._nodes_by_addr[b.addr].remove(b)
self.graph.remove_node(b)
if b.addr in all_functions:
del all_functions[b.addr]
removed_nodes.add(b)
if new_b_size > 0:
# there are still some parts left in node b - we don't want to lose it
self._scan_block(new_b_addr, a.function_address, None, None, None, None)
continue
# for other cases, we'll let them be for now
a = b # update a
def _remove_node(self, node):
"""
Remove a CFGNode from self.graph as well as from the function manager (if it is the beginning of a function)
:param CFGNode node: The CFGNode to remove from the graph.
:return: None
"""
self.graph.remove_node(node)
if node.addr in self._nodes:
del self._nodes[node.addr]
# We wanna remove the function as well
if node.addr in self.kb.functions:
del self.kb.functions[node.addr]
if node.addr in self.kb.functions.callgraph:
self.kb.functions.callgraph.remove_node(node.addr)
def _shrink_node(self, node, new_size, remove_function=True):
"""
Shrink the size of a node in CFG.
:param CFGNode node: The CFGNode to shrink
:param int new_size: The new size of the basic block
:param bool remove_function: If there is a function starting at `node`, should we remove that function or not.
:return: None
"""
# Generate the new node
new_node = CFGNode(node.addr, new_size, self,
function_address=None if remove_function else node.function_address,
instruction_addrs=[i for i in node.instruction_addrs
if node.addr <= i < node.addr + new_size
]
)
old_in_edges = self.graph.in_edges(node, data=True)
for src, _, data in old_in_edges:
self.graph.add_edge(src, new_node, **data)
successor_node_addr = node.addr + new_size
if successor_node_addr in self._nodes:
successor = self._nodes[successor_node_addr]
else:
successor = CFGNode(successor_node_addr, new_size, self,
function_address=successor_node_addr if remove_function else node.function_address,
instruction_addrs=[i for i in node.instruction_addrs if i >= node.addr + new_size]
)
self.graph.add_edge(new_node, successor, jumpkind='Ijk_Boring')
old_out_edges = self.graph.out_edges(node, data=True)
for _, dst, data in old_out_edges:
self.graph.add_edge(successor, dst, **data)
# remove the old node from indices
if node.addr in self._nodes and self._nodes[node.addr] is node:
del self._nodes[node.addr]
if node.addr in self._nodes_by_addr and node in self._nodes_by_addr[node.addr]:
self._nodes_by_addr[node.addr].remove(node)
# remove the old node form the graph
self.graph.remove_node(node)
# add the new node to indices
self._nodes[new_node.addr] = new_node
self._nodes_by_addr[new_node.addr].append(new_node)
# the function starting at this point is probably totally incorrect
# hopefull future call to `make_functions()` will correct everything
if node.addr in self.kb.functions:
del self.kb.functions[node.addr]
if not remove_function:
# add functions back
self._function_add_node(node.addr, node.addr)
successor_node = self.get_any_node(successor_node_addr)
if successor_node and successor_node.function_address == node.addr:
# if there is absolutely no predecessors to successor_node, we'd like to add it as a new function
# so that it will not be left behind
if not self.graph.predecessors(successor_node):
self._function_add_node(successor_node_addr, successor_node_addr)
#if node.addr in self.kb.functions.callgraph:
# self.kb.functions.callgraph.remove_node(node.addr)
def _analyze_all_function_features(self):
"""
Iteratively analyze all changed functions, update their returning attribute, until a fix-point is reached (i.e.
no new returning/not-returning functions are found).
:return: None
"""
while True:
new_returning_functions = set()
new_not_returning_functions = set()
while True:
new_changes = self._analyze_function_features()
new_not_returning_functions |= set(new_changes['functions_do_not_return'])
new_returning_functions |= set(new_changes['functions_return'])
if not new_changes['functions_do_not_return'] and not new_changes['functions_return']:
break
if not new_returning_functions and not new_not_returning_functions:
break
for returning_function in new_returning_functions:
if returning_function.addr in self._function_returns:
for fr in self._function_returns[returning_function.addr]:
# Confirm them all
self._changed_functions.add(fr.caller_func_addr)
try:
return_to_node = self._to_snippet(self._nodes[fr.return_to])
except KeyError:
return_to_node = fr.return_to
self.kb.functions._add_return_from_call(fr.caller_func_addr, fr.callee_func_addr,
return_to_node)
del self._function_returns[returning_function.addr]
for not_returning_function in new_not_returning_functions:
if not_returning_function.addr in self._function_returns:
for fr in self._function_returns[not_returning_function.addr]:
# Remove all those FakeRet edges
self._changed_functions.add(fr.caller_func_addr)
# convert them to codenodes
try:
call_site_node = self._to_snippet(self._nodes[fr.call_site_addr])
except KeyError:
call_site_node = fr.call_site_addr
try:
return_to_node = self._to_snippet(self._nodes[fr.return_to])
except KeyError:
return_to_node = fr.return_to
self.kb.functions._remove_fakeret(fr.caller_func_addr, call_site_node, return_to_node)
del self._function_returns[not_returning_function.addr]
def _clean_pending_exits(self, pending_exits):
"""
Remove those pending exits if:
a) they are the return exits of non-returning SimProcedures
b) they are the return exits of non-returning syscalls
b) they are the return exits of non-returning functions
:param pending_exits: A list of all pending exits
:return: None
"""
pending_exits_to_remove = []
for i, pe in enumerate(pending_exits):
if pe.returning_source is None:
# The original call failed. This pending exit must be followed.
continue
func = self.kb.functions.function(pe.returning_source)
if func is None:
# Why does it happen?
l.warning("An expected function at %s is not found. Please report it to Fish.",
hex(pe.returning_source) if pe.returning_source is not None else 'None')
continue
if func.returning is False:
# Oops, it's not returning
# Remove this pending exit
pending_exits_to_remove.append(i)
for index in reversed(pending_exits_to_remove):
del pending_exits[index]
#
# Graph utils
#
def _graph_add_edge(self, cfg_node, src_node, src_jumpkind, src_ins_addr, src_stmt_idx):
"""
Add edge between nodes, or add node if entry point
:param CFGNode cfg_node: node which is jumped to
:param CFGNode src_node: node which is jumped from none if entry point
:param str src_jumpkind: what type of jump the edge takes
:param int src_stmt_idx: source statements ID
:return: None
"""
if src_node is None:
self.graph.add_node(cfg_node)
else:
self.graph.add_edge(src_node, cfg_node, jumpkind=src_jumpkind, ins_addr=src_ins_addr,
stmt_idx=src_stmt_idx)
def _make_return_edges(self):
"""
For each returning function, create return edges in self.graph.
:return: None
"""
for func_addr, function in self.functions.iteritems():
if function.returning is False:
continue
# get the node on CFG
if function.startpoint is None:
l.warning('Function %#x does not have a startpoint (yet).', func_addr)
continue
startpoint = self.get_any_node(function.startpoint.addr)
if startpoint is None:
# weird...
l.warning('No CFGNode is found for function %#x in _make_return_edges().', func_addr)
continue
# get all endpoints
endpoints = function.endpoints
# get all callers
callers = self.get_predecessors(startpoint, jumpkind='Ijk_Call')
# for each caller, since they all end with a call instruction, get the immediate successor
return_targets = itertools.chain.from_iterable(
self.get_successors(caller, excluding_fakeret=False, jumpkind='Ijk_FakeRet') for caller in callers
)
return_targets = set(return_targets)
for ep in endpoints:
src = self.get_any_node(ep.addr)
for rt in return_targets:
if not src.instruction_addrs:
ins_addr = None
else:
if self.project.arch.branch_delay_slot:
if len(src.instruction_addrs) > 1:
ins_addr = src.instruction_addrs[-2]
else:
l.error('At %s: expecting more than one instruction. Only got one.', src)
ins_addr = None
else:
ins_addr = src.instruction_addrs[-1]
self._graph_add_edge(rt, src, 'Ijk_Ret', ins_addr, 'default')
#
# Function utils
#
def _function_add_node(self, addr, function_addr):
"""
Adds node to function manager, converting address to CodeNode if
possible
:param int addr: node address
:param int function_addr: address of function
:return: None
"""
try:
node = self._to_snippet(self._nodes[addr])
except KeyError:
node = addr
self.kb.functions._add_node(function_addr, node)
def _function_add_transition_edge(self, addr, src_node, function_addr, to_outside=False, to_function_addr=None,
stmt_idx=None, ins_addr=None):
"""
Add a transition edge to the function transiton map.
:param int addr: Address that the control flow transits to.
:param CFGNode src_node: The source node that the control flow transits from.
:param int function_addr: Function address.
:return: True if the edge is correctly added. False if any exception occurred (for example, the target address
does not exist)
:rtype: bool
"""
try:
try:
target = self._to_snippet(self._nodes[addr])
except KeyError:
target = addr
if src_node is None:
# Add this basic block into the function manager
self.kb.functions._add_node(function_addr, target)
else:
src_node = self._to_snippet(src_node)
if not to_outside:
self.kb.functions._add_transition_to(function_addr, src_node, target, stmt_idx=stmt_idx,
ins_addr=ins_addr
)
else:
self.kb.functions._add_outside_transition_to(function_addr, src_node, target,
to_function_addr=to_function_addr,
stmt_idx=stmt_idx, ins_addr=ins_addr
)
return True
except (SimMemoryError, SimEngineError):
return False
def _function_add_call_edge(self, addr, src_node, ret_addr, function_addr, syscall=False, stmt_idx=None,
ins_addr=None
):
"""
Add a call edge to the function transition map.
:param int addr: Address that is being called (callee).
:param CFGNode src_node: The source CFG node (caller).
:param int ret_addr: Address that returns to (in case the function returns).
:param int function_addr: Function address..
:param bool syscall: If this is a call to a syscall or not.
:param int or str stmt_idx: Statement ID of this call.
:param int or None ins_addr: Instruction address of this call.
:return: True if the edge is added. False if any exception occurred.
:rtype: bool
"""
try:
if src_node is None:
self.kb.functions._add_node(function_addr, addr, syscall=syscall)
else:
src_node = self._to_snippet(src_node)
try:
ret_node = self._to_snippet(self._nodes[ret_addr])
except KeyError:
ret_node = ret_addr
self.kb.functions._add_call_to(function_addr, src_node, addr, ret_node, syscall=syscall,
stmt_idx=stmt_idx, ins_addr=ins_addr,
)
return True
except (SimMemoryError, SimEngineError):
return False
def _function_add_fakeret_edge(self, addr, src_node, function_addr, confirmed=None):
"""
Generate CodeNodes for target and source, if no source node add node
for function, otherwise creates fake return to in function manager
:param int addr: target address
:param angr.analyses.CFGNode src_node: source node
:param int function_addr: address of function
:param confirmed: used as attribute on eventual digraph
:return: None
"""
try:
target = self._to_snippet(self._nodes[addr])
except KeyError:
target = addr
if src_node is None:
self.kb.functions._add_node(function_addr, target)
else:
src_node = self._to_snippet(src_node)
self.kb.functions._add_fakeret_to(function_addr, src_node, target, confirmed=confirmed)
def _function_add_return_site(self, addr, function_addr):
"""
Generate CodeNodes for target address, registers node for function to
function manager as return site
:param int addr: target address
:param int function_addr: address of function
:return: None
"""
try:
target = self._to_snippet(self._nodes[addr])
except KeyError:
target = addr
self.kb.functions._add_return_from(function_addr, target)
def _function_add_return_edge(self, return_from_addr, return_to_addr, function_addr):
"""
Generate CodeNodes for return_to_addr, add this node for function to
function manager generating new edge
:param int return_from_addr: target address
:param int return_to_addr: target address
:param int function_addr: address of function
:return: None
"""
try:
return_to_ = self._to_snippet(self._nodes[return_to_addr])
except KeyError:
return_to_ = return_to_addr
self.kb.functions._add_return_from_call(function_addr, return_from_addr, return_to_)
#
# Architecture-specific methods
#
def _arm_track_lr_on_stack(self, addr, irsb, function):
"""
At the beginning of the basic block, we check if the first instruction stores the LR register onto the stack.
If it does, we calculate the offset of that store, and record the offset in function.info.
For instance, here is the disassembly of a THUMB mode function:
000007E4 STR.W LR, [SP,#var_4]!
000007E8 MOV R2, R1
000007EA SUB SP, SP, #0xC
000007EC MOVS R1, #0
...
00000800 ADD SP, SP, #0xC
00000802 LDR.W PC, [SP+4+var_4],#4
The very last basic block has a jumpkind of Ijk_Boring, which is because VEX cannot do such complicated analysis
to determine the real jumpkind.
As we can see, instruction 7e4h stores LR at [sp-4], and at the end of this function, instruction 802 loads LR
from [sp], then increments sp by 4. We execute the first instruction, and track the following things:
- if the value from register LR is stored onto the stack.
- the difference between the offset of the LR store on stack, and the SP after the store.
If at the end of the function, the LR is read out from the stack at the exact same stack offset, we will change
the jumpkind of the final IRSB to Ijk_Ret.
This method can be enabled by setting "ret_jumpkind_heuristics", which is an architecture-specific option on
ARM, to True.
:param int addr: Address of the basic block.
:param pyvex.IRSB irsb: The basic block object.
:param Function function: The function instance.
:return: None
"""
if 'lr_saved_on_stack' in function.info:
return
#
# if it does, we log it down to the Function object.
lr_offset = self.project.arch.registers['lr'][0]
sp_offset = self.project.arch.sp_offset
initial_sp = 0x7fff0000
initial_lr = 0xabcdef
tmps = {}
# pylint:disable=too-many-nested-blocks
for stmt in irsb.statements:
if isinstance(stmt, pyvex.IRStmt.IMark):
if stmt.addr + stmt.delta != addr:
break
elif isinstance(stmt, pyvex.IRStmt.WrTmp):
data = stmt.data
if isinstance(data, pyvex.IRExpr.Get):
if data.offset == sp_offset:
tmps[stmt.tmp] = initial_sp
elif data.offset == lr_offset:
tmps[stmt.tmp] = initial_lr
elif isinstance(data, pyvex.IRExpr.Binop):
if data.op == 'Iop_Sub32':
arg0, arg1 = data.args
if isinstance(arg0, pyvex.IRExpr.RdTmp) and isinstance(arg1, pyvex.IRExpr.Const):
if arg0.tmp in tmps:
tmps[stmt.tmp] = tmps[arg0.tmp] - arg1.con.value
elif isinstance(stmt, (pyvex.IRStmt.Store, pyvex.IRStmt.StoreG)):
data = stmt.data
storing_lr = False
if isinstance(data, pyvex.IRExpr.RdTmp):
if data.tmp in tmps:
val = tmps[data.tmp]
if val == initial_lr:
# we are storing LR to somewhere
storing_lr = True
if storing_lr:
if isinstance(stmt.addr, pyvex.IRExpr.RdTmp):
if stmt.addr.tmp in tmps:
storing_addr = tmps[stmt.addr.tmp]
function.info['lr_saved_on_stack'] = True
function.info['lr_on_stack_offset'] = storing_addr - initial_sp
break
if 'lr_saved_on_stack' not in function.info:
function.info['lr_saved_on_stack'] = False
def _arm_track_read_lr_from_stack(self, addr, irsb, function): # pylint:disable=unused-argument
"""
At the end of a basic block, simulate the very last instruction to see if the return address is read from the
stack and written in PC. If so, the jumpkind of this IRSB will be set to Ijk_Ret. For detailed explanations,
please see the documentation of _arm_track_lr_on_stack().
:param int addr: The address of the basic block.
:param pyvex.IRSB irsb: The basic block object.
:param Function function: The function instance.
:return: None
"""
if 'lr_saved_on_stack' not in function.info or not function.info['lr_saved_on_stack']:
return
sp_offset = self.project.arch.sp_offset
initial_sp = 0x7fff0000
last_sp = None
tmps = {}
last_imark = next((stmt for stmt in reversed(irsb.statements)
if isinstance(stmt, pyvex.IRStmt.IMark)
), 0
)
tmp_irsb = self.project.factory.block(last_imark.addr + last_imark.delta).vex
# pylint:disable=too-many-nested-blocks
for stmt in tmp_irsb.statements:
if isinstance(stmt, pyvex.IRStmt.WrTmp):
data = stmt.data
if isinstance(data, pyvex.IRExpr.Get) and data.offset == sp_offset:
# t0 = GET:I32(sp)
tmps[stmt.tmp] = initial_sp
elif isinstance(data, pyvex.IRExpr.Binop):
# only support Add
if data.op == 'Iop_Add32':
arg0, arg1 = data.args
if isinstance(arg0, pyvex.IRExpr.RdTmp) and isinstance(arg1, pyvex.IRExpr.Const):
if arg0.tmp in tmps:
tmps[stmt.tmp] = tmps[arg0.tmp] + arg1.con.value
elif isinstance(data, pyvex.IRExpr.Load):
if isinstance(data.addr, pyvex.IRExpr.RdTmp):
if data.addr.tmp in tmps:
tmps[stmt.tmp] = ('load', tmps[data.addr.tmp])
elif isinstance(stmt, pyvex.IRStmt.Put):
if stmt.offset == sp_offset and isinstance(stmt.data, pyvex.IRExpr.RdTmp):
if stmt.data.tmp in tmps:
# loading things into sp
last_sp = tmps[stmt.data.tmp]
if last_sp is not None and isinstance(tmp_irsb.next, pyvex.IRExpr.RdTmp):
val = tmps[tmp_irsb.next.tmp]
if isinstance(val, tuple) and val[0] == 'load':
# the value comes from memory
memory_addr = val[1]
lr_on_stack_offset = memory_addr - last_sp
if lr_on_stack_offset == function.info['lr_on_stack_offset']:
# the jumpkind should be Ret instead of boring
irsb.jumpkind = 'Ijk_Ret'
#
# Other methods
#
def _generate_cfgnode(self, addr, current_function_addr):
"""
Generate a CFGNode that starts at `addr`.
Since lifting machine code to IRSBs is slow, self._nodes is used as a cache of CFGNodes.
If the current architecture is ARM, this method will try to lift the block in the mode specified by the address
(determined by the parity of the address: even for ARM, odd for THUMB), and in case of decoding failures, try
the other mode. If the basic block is successfully decoded in the other mode (different from the initial one),
`addr` and `current_function_addr` are updated.
:param int addr: Address of the basic block.
:param int current_function_addr: Address of the current function.
:return: A 4-tuple of (new address, new function address, CFGNode instance, IRSB object)
:rtype: tuple
"""
try:
if addr in self._nodes:
cfg_node = self._nodes[addr]
irsb = cfg_node.irsb
if cfg_node.function_address != current_function_addr:
cfg_node.function_address = current_function_addr
else:
# if possible, check the distance between `addr` and the end of this section
distance = None
obj = self.project.loader.addr_belongs_to_object(addr)
if obj:
# is there a section?
has_executable_section = len([ sec for sec in obj.sections if sec.is_executable ]) > 0
section = self._addr_belongs_to_section(addr)
if has_executable_section and section is None:
# the basic block should not exist here...
return None, None, None, None
if section is not None:
if not section.is_executable:
# the section is not executable...
return None, None, None, None
distance = obj.rebase_addr + section.vaddr + section.memsize - addr
distance = min(distance, VEX_IRSB_MAX_SIZE)
# TODO: handle segment information as well
# Let's try to create the pyvex IRSB directly, since it's much faster
nodecode = False
irsb = None
try:
irsb = self.project.factory.block(addr, size=distance).vex
except SimTranslationError:
nodecode = True
if (nodecode or irsb.size == 0 or irsb.jumpkind == 'Ijk_NoDecode') and \
self.project.arch.name in ('ARMHF', 'ARMEL'):
# maybe the current mode is wrong?
nodecode = False
if addr % 2 == 0:
addr_0 = addr + 1
else:
addr_0 = addr - 1
if addr_0 in self._nodes:
# it has been analyzed before
cfg_node = self._nodes[addr_0]
irsb = cfg_node.irsb
return addr_0, cfg_node.function_address, cfg_node, irsb
try:
irsb = self.project.factory.block(addr_0, size=distance).vex
except SimTranslationError:
nodecode = True
if not (nodecode or irsb.size == 0 or irsb.jumpkind == 'Ijk_NoDecode'):
# it is decodeable
if current_function_addr == addr:
current_function_addr = addr_0
addr = addr_0
if nodecode or irsb.size == 0 or irsb.jumpkind == 'Ijk_NoDecode':
# decoding error
# we still occupy that location since it cannot be decoded anyways
self._seg_list.occupy(addr, 1, 'nodecode')
return None, None, None, None
# Occupy the block in segment list
if irsb.size > 0:
if self.project.arch.name in ('ARMHF', 'ARMEL') and addr % 2 == 1:
# thumb mode
real_addr = addr - 1
else:
real_addr = addr
self._seg_list.occupy(real_addr, irsb.size, "code")
# Create a CFG node, and add it to the graph
cfg_node = CFGNode(addr, irsb.size, self, function_address=current_function_addr, block_id=addr,
irsb=irsb
)
self._nodes[addr] = cfg_node
self._nodes_by_addr[addr].append(cfg_node)
return addr, current_function_addr, cfg_node, irsb
except (SimMemoryError, SimEngineError):
return None, None, None, None
def _process_block_arch_specific(self, addr, irsb, func_addr): # pylint: disable=unused-argument
"""
According to arch types ['ARMEL', 'ARMHF', 'MIPS32'] does different
fixes
For ARM deals with link register on the stack
(see _arm_track_lr_on_stack)
For MIPS32 simulates a new state where the global pointer is 0xffffffff
from current address after three steps if the first successor does not
adjust this value updates this function address (in function manager)
to use a conrete global pointer
:param int addr: irsb address
:param pyvex.IRSB irsb: irsb
:param func_addr: function address
:return: None
"""
if self.project.arch.name in ('ARMEL', 'ARMHF'):
if self._arch_options.ret_jumpkind_heuristics:
if addr == func_addr:
self._arm_track_lr_on_stack(addr, irsb, self.functions[func_addr])
elif 'lr_saved_on_stack' in self.functions[func_addr].info and \
self.functions[func_addr].info['lr_saved_on_stack'] and \
irsb.jumpkind == 'Ijk_Boring' and \
irsb.next is not None and \
isinstance(irsb.next, pyvex.IRExpr.RdTmp):
# do a bunch of checks to avoid unnecessary simulation from happening
self._arm_track_read_lr_from_stack(addr, irsb, self.functions[func_addr])
elif self.project.arch.name == "MIPS32":
function = self.kb.functions.function(func_addr)
if addr >= func_addr and addr - func_addr < 15 * 4 and 'gp' not in function.info:
# check if gp is being written to
last_gp_setting_insn_id = None
insn_ctr = 0
for stmt in irsb.statements:
if isinstance(stmt, pyvex.IRStmt.IMark):
insn_ctr += 1
if insn_ctr >= 10:
break
elif isinstance(stmt, pyvex.IRStmt.Put) and stmt.offset == self.project.arch.registers['gp'][0]:
last_gp_setting_insn_id = insn_ctr
break
if last_gp_setting_insn_id is None:
return
# Prudently search for $gp values
state = self.project.factory.blank_state(addr=addr, mode="fastpath",
remove_options={simuvex.options.OPTIMIZE_IR}
)
state.regs.t9 = func_addr
state.regs.gp = 0xffffffff
p = self.project.factory.path(state)
p.step(num_inst=last_gp_setting_insn_id + 1)
if not p.successors:
return
state = p.successors[0].state
if not state.regs.gp.symbolic and state.se.is_false(state.regs.gp == 0xffffffff):
function.info['gp'] = state.regs.gp._model_concrete.value
#
# Public methods
#
def copy(self):
n = CFGFast.__new__(CFGFast)
super(CFGFast, self).make_copy(n)
n._binary = self._binary
n._start = self._start
n._end = self._end
n._pickle_intermediate_results = self._pickle_intermediate_results
n._indirect_jump_target_limit = self._indirect_jump_target_limit
n._collect_data_ref = self._collect_data_ref
n._use_symbols = self._use_symbols
n._use_function_prologues = self._use_function_prologues
n._resolve_indirect_jumps = self._resolve_indirect_jumps
n._force_segment = self._force_segment
n._force_complete_scan = self._force_complete_scan
n._progress_callback = self._progress_callback
n._show_progressbar = self._show_progressbar
n._exec_mem_regions = self._exec_mem_regions[::]
n._exec_mem_region_size = self._exec_mem_region_size
n._memory_data = self._memory_data.copy()
n._seg_list = self._seg_list.copy()
n._function_addresses_from_symbols = self._function_addresses_from_symbols.copy()
n._graph = self._graph
return n
def output(self):
s = "%s" % self._graph.edges(data=True)
return s
def generate_code_cover(self):
"""
Generate a list of all recovered basic blocks.
"""
lst = []
for cfg_node in self.graph.nodes():
size = cfg_node.size
lst.append((cfg_node.addr, size))
lst = sorted(lst, key=lambda x: x[0])
return lst
register_analysis(CFGFast, 'CFGFast')
| StarcoderdataPython |
3412149 | # -*- coding: utf-8 -*-
################################################################################
# Copyright (C) 2013 <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
################################################################################
import sys as _sys
from .. import LOCAL_ENCODING
from .console import Fore as fg
DISABLE_PROMPT = None
'''Whenever a prompt occurs and this value is not ``None`` it can be ``exit``
to call sys.exit (see EXIT_STATUS) or ``raise`` to throw a RunttimeError,
which can be caught if desired.'''
EXIT_STATUS = 2
BOOL_TRUE_RESPONSES = ("yes", "y", "true")
class PromptExit(RuntimeError):
'''Raised when ``DISABLE_PROMPT`` is 'raise' and ``prompt`` is called.'''
pass
def parseIntList(resp):
ints = set()
resp = resp.replace(',', ' ')
for c in resp.split():
i = int(c)
ints.add(i)
return list(ints)
def prompt(msg, default=None, required=True, type_=unicode,
validate=None, choices=None):
'''Prompt user for imput, the prequest is in ``msg``. If ``default`` is
not ``None`` it will be displayed as the default and returned if not
input is entered. The value ``None`` is only returned if ``required`` is
``False``. The response is passed to ``type_`` for conversion (default
is unicode) before being returned. An optional list of valid responses can
be provided in ``choices`.'''
yes_no_prompt = default is True or default is False
if yes_no_prompt:
default_str = "Yn" if default is True else "yN"
else:
default_str = str(default) if default else None
if default is not None:
msg = "%s [%s]" % (msg, default_str)
msg += ": " if not yes_no_prompt else "? "
if DISABLE_PROMPT:
if DISABLE_PROMPT == "exit":
print(msg + "\nPrompting is disabled, exiting.")
_sys.exit(EXIT_STATUS)
else:
raise PromptExit(msg)
resp = None
while resp is None:
try:
resp = raw_input(msg).decode(LOCAL_ENCODING)
except EOFError:
# COnverting this allows main functions to catch without
# catching other eofs
raise PromptExit()
if not resp and default not in (None, ""):
resp = str(default)
if resp:
if yes_no_prompt:
resp = True if resp.lower() in BOOL_TRUE_RESPONSES else False
else:
resp = resp.strip()
try:
resp = type_(resp)
except Exception as ex:
print(fg.red(str(ex)))
resp = None
elif not required:
return None
else:
resp = None
if ((choices and resp not in choices) or
(validate and not validate(resp))):
if choices:
print(fg.red("Invalid response, choose from: ") + str(choices))
else:
print(fg.red("Invalid response"))
resp = None
return resp
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.