id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
12802956 | <reponame>AidanGlickman/sportsreference
PARSING_SCHEME = {
'name': 'a',
'games_played': 'td[data-stat="g"]:first',
'wins': 'td[data-stat="wins"]:first',
'losses': 'td[data-stat="losses"]:first',
'win_percentage': 'td[data-stat="win_loss_perc"]:first',
'points_for': 'td[data-stat="points"]:first',
'points_against': 'td[data-stat="points_opp"]:first',
'points_difference': 'td[data-stat="points_diff"]:first',
'margin_of_victory': 'td[data-stat="mov"]:first',
'strength_of_schedule': 'td[data-stat="sos_total"]:first',
'simple_rating_system': 'td[data-stat="srs_total"]:first',
'offensive_simple_rating_system': 'td[data-stat="srs_offense"]:first',
'defensive_simple_rating_system': 'td[data-stat="srs_defense"]:first',
'yards': 'td[data-stat="total_yards"]:first',
'plays': 'td[data-stat="plays_offense"]:first',
'yards_per_play': 'td[data-stat="yds_per_play_offense"]:first',
'turnovers': 'td[data-stat="turnovers"]:first',
'fumbles': 'td[data-stat="fumbles_lost"]:first',
'first_downs': 'td[data-stat="first_down"]:first',
'pass_completions': 'td[data-stat="pass_cmp"]:first',
'pass_attempts': 'td[data-stat="pass_att"]:first',
'pass_yards': 'td[data-stat="pass_yds"]:first',
'pass_touchdowns': 'td[data-stat="pass_td"]:first',
'interceptions': 'td[data-stat="pass_int"]:first',
'pass_net_yards_per_attempt': 'td[data-stat="pass_net_yds_per_att"]:first',
'pass_first_downs': 'td[data-stat="pass_fd"]:first',
'rush_attempts': 'td[data-stat="rush_att"]:first',
'rush_yards': 'td[data-stat="rush_yds"]:first',
'rush_touchdowns': 'td[data-stat="rush_td"]:first',
'rush_yards_per_attempt': 'td[data-stat="rush_yds_per_att"]:first',
'rush_first_downs': 'td[data-stat="rush_fd"]:first',
'penalties': 'td[data-stat="penalties"]:first',
'yards_from_penalties': 'td[data-stat="penalties_yds"]:first',
'first_downs_from_penalties': 'td[data-stat="pen_fd"]:first',
'percent_drives_with_points': 'td[data-stat="score_pct"]:first',
'percent_drives_with_turnovers': 'td[data-stat="turnover_pct"]:first',
'points_contributed_by_offense': 'td[data-stat="exp_pts_tot"]:first'
}
SCHEDULE_SCHEME = {
'week': 'th[data-stat="week_num"]:first',
'day': 'td[data-stat="game_day_of_week"]:first',
'date': 'td[data-stat="game_date"]:first',
'result': 'td[data-stat="game_outcome"]:first',
'overtime': 'td[data-stat="overtime"]:first',
'location': 'td[data-stat="game_location"]:first',
'opponent_name': 'td[data-stat="opp"]:first',
'points_scored': 'td[data-stat="pts_off"]:first',
'points_allowed': 'td[data-stat="pts_def"]:first',
'pass_completions': 'td[data-stat="pass_cmp"]:first',
'pass_attempts': 'td[data-stat="pass_att"]:first',
'pass_yards': 'td[data-stat="pass_yds"]:first',
'pass_touchdowns': 'td[data-stat="pass_td"]:first',
'interceptions': 'td[data-stat="pass_int"]:first',
'times_sacked': 'td[data-stat="pass_sacked"]:first',
'yards_lost_from_sacks': 'td[data-stat="pass_sacked_yds"]:first',
'pass_yards_per_attempt': 'td[data-stat="pass_yds_per_att"]:first',
'pass_completion_rate': 'td[data-stat="pass_cmp_perc"]:first',
'quarterback_rating': 'td[data-stat="pass_rating"]:first',
'rush_attempts': 'td[data-stat="rush_att"]:first',
'rush_yards': 'td[data-stat="rush_yds"]:first',
'rush_yards_per_attempt': 'td[data-stat="rush_yds_per_att"]:first',
'rush_touchdowns': 'td[data-stat="rush_td"]:first',
'field_goals_made': 'td[data-stat="fgm"]:first',
'field_goals_attempted': 'td[data-stat="fga"]:first',
'extra_points_made': 'td[data-stat="xpm"]:first',
'extra_points_attempted': 'td[data-stat="xpa"]:first',
'punts': 'td[data-stat="punt"]:first',
'punt_yards': 'td[data-stat="punt_yds"]:first',
'third_down_conversions': 'td[data-stat="third_down_success"]:first',
'third_down_attempts': 'td[data-stat="third_down_att"]:first',
'fourth_down_conversions': 'td[data-stat="fourth_down_success"]:first',
'fourth_down_attempts': 'td[data-stat="fourth_down_att"]:first',
'time_of_possession': 'td[data-stat="time_of_poss"]:first'
}
BOXSCORE_SCHEME = {
'game_info': 'div[class="scorebox_meta"]:first',
'home_name': 'a[itemprop="name"]:first',
'summary': 'table[class="linescore nohover stats_table no_freeze"]:first',
'away_name': 'a[itemprop="name"]:last',
'away_points': 'div[class="scorebox"] div[class="score"]',
'away_first_downs': 'td[data-stat="vis_stat"]',
'away_rush_attempts': 'td[data-stat="vis_stat"]',
'away_rush_yards': 'td[data-stat="vis_stat"]',
'away_rush_touchdowns': 'td[data-stat="vis_stat"]',
'away_pass_completions': 'td[data-stat="vis_stat"]',
'away_pass_attempts': 'td[data-stat="vis_stat"]',
'away_pass_yards': 'td[data-stat="vis_stat"]',
'away_pass_touchdowns': 'td[data-stat="vis_stat"]',
'away_interceptions': 'td[data-stat="vis_stat"]',
'away_times_sacked': 'td[data-stat="vis_stat"]',
'away_yards_lost_from_sacks': 'td[data-stat="vis_stat"]',
'away_net_pass_yards': 'td[data-stat="vis_stat"]',
'away_total_yards': 'td[data-stat="vis_stat"]',
'away_fumbles': 'td[data-stat="vis_stat"]',
'away_fumbles_lost': 'td[data-stat="vis_stat"]',
'away_turnovers': 'td[data-stat="vis_stat"]',
'away_penalties': 'td[data-stat="vis_stat"]',
'away_yards_from_penalties': 'td[data-stat="vis_stat"]',
'away_third_down_conversions': 'td[data-stat="vis_stat"]',
'away_third_down_attempts': 'td[data-stat="vis_stat"]',
'away_fourth_down_conversions': 'td[data-stat="vis_stat"]',
'away_fourth_down_attempts': 'td[data-stat="vis_stat"]',
'away_time_of_possession': 'td[data-stat="vis_stat"]',
'home_points': 'div[class="scorebox"] div[class="score"]',
'home_first_downs': 'td[data-stat="home_stat"]',
'home_rush_attempts': 'td[data-stat="home_stat"]',
'home_rush_yards': 'td[data-stat="home_stat"]',
'home_rush_touchdowns': 'td[data-stat="home_stat"]',
'home_pass_completions': 'td[data-stat="home_stat"]',
'home_pass_attempts': 'td[data-stat="home_stat"]',
'home_pass_yards': 'td[data-stat="home_stat"]',
'home_pass_touchdowns': 'td[data-stat="home_stat"]',
'home_interceptions': 'td[data-stat="home_stat"]',
'home_times_sacked': 'td[data-stat="home_stat"]',
'home_yards_lost_from_sacks': 'td[data-stat="home_stat"]',
'home_net_pass_yards': 'td[data-stat="home_stat"]',
'home_total_yards': 'td[data-stat="home_stat"]',
'home_fumbles': 'td[data-stat="home_stat"]',
'home_fumbles_lost': 'td[data-stat="home_stat"]',
'home_turnovers': 'td[data-stat="home_stat"]',
'home_penalties': 'td[data-stat="home_stat"]',
'home_yards_from_penalties': 'td[data-stat="home_stat"]',
'home_third_down_conversions': 'td[data-stat="home_stat"]',
'home_third_down_attempts': 'td[data-stat="home_stat"]',
'home_fourth_down_conversions': 'td[data-stat="home_stat"]',
'home_fourth_down_attempts': 'td[data-stat="home_stat"]',
'home_time_of_possession': 'td[data-stat="home_stat"]'
}
BOXSCORE_ELEMENT_INDEX = {
'date': 0,
'time': 1,
'stadium': 2,
'attendance': 3,
'duration': 4,
'away_points': 1,
'away_first_downs': 0,
'away_rush_attempts': 1,
'away_rush_yards': 1,
'away_rush_touchdowns': 1,
'away_pass_completions': 2,
'away_pass_attempts': 2,
'away_pass_yards': 2,
'away_pass_touchdowns': 2,
'away_interceptions': 2,
'away_times_sacked': 3,
'away_yards_lost_from_sacks': 3,
'away_net_pass_yards': 4,
'away_total_yards': 5,
'away_fumbles': 6,
'away_fumbles_lost': 6,
'away_turnovers': 7,
'away_penalties': 8,
'away_yards_from_penalties': 8,
'away_third_down_conversions': 9,
'away_third_down_attempts': 9,
'away_fourth_down_conversions': 10,
'away_fourth_down_attempts': 10,
'away_time_of_possession': 11,
'home_points': 0,
'home_first_downs': 0,
'home_rush_attempts': 1,
'home_rush_yards': 1,
'home_rush_touchdowns': 1,
'home_pass_completions': 2,
'home_pass_attempts': 2,
'home_pass_yards': 2,
'home_pass_touchdowns': 2,
'home_interceptions': 2,
'home_times_sacked': 3,
'home_yards_lost_from_sacks': 3,
'home_net_pass_yards': 4,
'home_total_yards': 5,
'home_fumbles': 6,
'home_fumbles_lost': 6,
'home_turnovers': 7,
'home_penalties': 8,
'home_yards_from_penalties': 8,
'home_third_down_conversions': 9,
'home_third_down_attempts': 9,
'home_fourth_down_conversions': 10,
'home_fourth_down_attempts': 10,
'home_time_of_possession': 11
}
# Designates the index of the item within the requested tag
BOXSCORE_ELEMENT_SUB_INDEX = {
'away_rush_attempts': 0,
'away_rush_yards': 1,
'away_rush_touchdowns': 2,
'away_pass_completions': 0,
'away_pass_attempts': 1,
'away_pass_yards': 2,
'away_pass_touchdowns': 3,
'away_interceptions': 4,
'away_times_sacked': 0,
'away_yards_lost_from_sacks': 1,
'away_fumbles': 0,
'away_fumbles_lost': 1,
'away_penalties': 0,
'away_yards_from_penalties': 1,
'away_third_down_conversions': 0,
'away_third_down_attempts': 1,
'away_fourth_down_conversions': 0,
'away_fourth_down_attempts': 1,
'home_rush_attempts': 0,
'home_rush_yards': 1,
'home_rush_touchdowns': 2,
'home_pass_completions': 0,
'home_pass_attempts': 1,
'home_pass_yards': 2,
'home_pass_touchdowns': 3,
'home_interceptions': 4,
'home_times_sacked': 0,
'home_yards_lost_from_sacks': 1,
'home_fumbles': 0,
'home_fumbles_lost': 1,
'home_penalties': 0,
'home_yards_from_penalties': 1,
'home_third_down_conversions': 0,
'home_third_down_attempts': 1,
'home_fourth_down_conversions': 0,
'home_fourth_down_attempts': 1,
}
PLAYER_SCHEME = {
'season': 'th[data-stat="year_id"]',
'name': 'h1[itemprop="name"]',
'team_abbreviation': 'td[data-stat="team"]',
'position': 'td[data-stat="pos"]',
'height': 'span[itemprop="height"]',
'weight': 'span[itemprop="weight"]',
'birth_date': 'td[data-stat=""]',
'contract': 'td[data-stat=""]',
'games': 'td[data-stat="g"]',
'games_started': 'td[data-stat="gs"]',
'approximate_value': 'td[data-stat="av"]',
'qb_record': 'td[data-stat="qb_rec"]',
'completed_passes': 'td[data-stat="pass_cmp"]',
'attempted_passes': 'td[data-stat="pass_att"]',
'passing_completion': 'td[data-stat="pass_cmp_perc"]',
'passing_yards': 'td[data-stat="pass_yds"]',
'passing_touchdowns': 'td[data-stat="pass_td"]',
'passing_touchdown_percentage': 'td[data-stat="pass_td_perc"]',
'interceptions_thrown': 'td[data-stat="pass_int"]',
'interception_percentage': 'td[data-stat="pass_int_perc"]',
'longest_pass': 'td[data-stat="pass_long"]',
'passing_yards_per_attempt': 'td[data-stat="pass_yds_per_att"]',
'adjusted_yards_per_attempt': 'td[data-stat="pass_adj_yds_per_att"]',
'yards_per_completed_pass': 'td[data-stat="pass_yds_per_cmp"]',
'yards_per_game_played': 'td[data-stat="pass_yds_per_g"]',
'quarterback_rating': 'td[data-stat="pass_rating"]',
'espn_qbr': 'td[data-stat="qbr"]',
'times_sacked': 'td[data-stat="pass_sacked"]',
'yards_lost_to_sacks': 'td[data-stat="pass_sacked_yds"]',
'net_yards_per_pass_attempt': 'td[data-stat="pass_net_yds_per_att"]',
'adjusted_net_yards_per_pass_attempt':
'td[data-stat="pass_adj_net_yds_per_att"]',
'sack_percentage': 'td[data-stat="pass_sacked_per"]',
'fourth_quarter_comebacks': 'td[data-stat="comebacks"]',
'game_winning_drives': 'td[data-stat="gwd"]',
'yards_per_attempt_index': 'td[data-stat="pass_yds_per_att_index"]',
'net_yards_per_attempt_index':
'td[data-stat="pass_net_yds_per_att_index"]',
'adjusted_yards_per_attempt_index':
'td[data-stat="pass_adj_yds_per_att_index"]',
'adjusted_net_yards_per_attempt_index':
'td[data-stat="pass_adj_net_yds_per_att_index"]',
'completion_percentage_index': 'td[data-stat="pass_cmp_perc_index"]',
'touchdown_percentage_index': 'td[data-stat="pass_td_perc_index"]',
'interception_percentage_index': 'td[data-stat="pass_int_perc_index"]',
'sack_percentage_index': 'td[data-stat="pass_sacked_perc_index"]',
'passer_rating_index': 'td[data-stat="pass_rating_index"]',
'rush_attempts': 'td[data-stat="rush_att"]',
'rush_yards': 'td[data-stat="rush_yds"]',
'rush_touchdowns': 'td[data-stat="rush_td"]',
'longest_rush': 'td[data-stat="rush_long"]',
'rush_yards_per_attempt': 'td[data-stat="rush_yds_per_att"]',
'rush_yards_per_game': 'td[data-stat="rush_yds_per_g"]',
'rush_attempts_per_game': 'td[data-stat="rush_att_per_g"]',
'times_pass_target': 'td[data-stat="targets"]',
'receptions': 'td[data-stat="rec"]',
'receiving_yards': 'td[data-stat="rec_yds"]',
'receiving_yards_per_reception': 'td[data-stat="rec_yds_per_rec"]',
'receiving_touchdowns': 'td[data-stat="rec_td"]',
'longest_reception': 'td[data-stat="rec_long"]',
'receptions_per_game': 'td[data-stat="rec_per_g"]',
'receiving_yards_per_game': 'td[data-stat="rec_yds_per_g"]',
'catch_percentage': 'td[data-stat="catch_pct"]',
'touches': 'td[data-stat="touches"]',
'yards_per_touch': 'td[data-stat="yds_per_touch"]',
'yards_from_scrimmage': 'td[data-stat="yds_from_scrimmage"]',
'rushing_and_receiving_touchdowns': 'td[data-stat="rush_receive_td"]',
'fumbles': 'td[data-stat="fumbles"]',
'punt_returns': 'td[data-stat="punt_ret"]',
'punt_return_yards': 'td[data-stat="punt_ret_yds"]',
'punt_return_touchdown': 'td[data-stat="punt_ret_td"]',
'longest_punt_return': 'td[data-stat="punt_ret_long"]',
'yards_per_punt_return': 'td[data-stat="punt_ret_yds_per_ret"]',
'kickoff_returns': 'td[data-stat="kick_ret"]',
'kickoff_return_yards': 'td[data-stat="kick_ret_yds"]',
'kickoff_return_touchdown': 'td[data-stat="kick_ret_td"]',
'longest_kickoff_return': 'td[data-stat="kick_ret_long"]',
'yards_per_kickoff_return': 'td[data-stat="kick_ret_yds_per_ret"]',
'all_purpose_yards': 'td[data-stat="all_purpose_yds"]',
'less_than_nineteen_yards_field_goal_attempts': 'td[data-stat="fga1"]',
'less_than_nineteen_yards_field_goals_made': 'td[data-stat="fgm1"]',
'twenty_to_twenty_nine_yard_field_goal_attempts': 'td[data-stat="fga2"]',
'twenty_to_twenty_nine_yard_field_goals_made': 'td[data-stat="fgm2"]',
'thirty_to_thirty_nine_yard_field_goal_attempts': 'td[data-stat="fga3"]',
'thirty_to_thirty_nine_yard_field_goals_made': 'td[data-stat="fgm3"]',
'fourty_to_fourty_nine_yard_field_goal_attempts': 'td[data-stat="fga4"]',
'fourty_to_fourty_nine_yard_field_goals_made': 'td[data-stat="fgm4"]',
'fifty_plus_yard_field_goal_attempts': 'td[data-stat="fga5"]',
'fifty_plus_yard_field_goals_made': 'td[data-stat="fgm5"]',
'field_goals_attempted': 'td[data-stat="fga"]',
'field_goals_made': 'td[data-stat="fgm"]',
'longest_field_goal_made': 'td[data-stat="fg_long"]',
'field_goal_percentage': 'td[data-stat="fg_perc"]',
'extra_points_attempted': 'td[data-stat="xpa"]',
'extra_points_made': 'td[data-stat="xpm"]',
'extra_point_percentage': 'td[data-stat="xp_perc"]',
'punts': 'td[data-stat="punt"]',
'total_punt_yards': 'td[data-stat="punt_yds"]',
'longest_punt': 'td[data-stat="punt_long"]',
'blocked_punts': 'td[data-stat="punt_blocked"]',
'yards_per_punt': 'td[data-stat="punt_yds_per_punt"]',
'interceptions': 'td[data-stat="def_int"]',
'yards_returned_from_interception': 'td[data-stat="def_int_yds"]',
'interceptions_returned_for_touchdown': 'td[data-stat="def_int_td"]',
'longest_interception_return': 'td[data-stat="def_int_long"]',
'passes_defended': 'td[data-stat="pass_defended"]',
'fumbles_forced': 'td[data-stat="fumbles_forced"]',
'fumbles_recovered': 'td[data-stat="fumbles_rec"]',
'yards_recovered_from_fumble': 'td[data-stat="fumbles_rec_yds"]',
'fumbles_recovered_for_touchdown': 'td[data-stat="fumbles_rec_yds"]',
'sacks': 'td[data-stat="sacks"]',
'tackles': 'td[data-stat="tackles_solo"]',
'assists_on_tackles': 'td[data-stat="tackles_assists"]',
'safeties': 'td[data-stat="safety_md"]',
'yards_lost_from_sacks': 'td[data-stat="pass_sacked_yds"]',
'fumbles_lost': 'td[data-stat="fumbles_lost"]',
'combined_tackles': 'td[data-stat="tackles_combined"]',
'solo_tackles': 'td[data-stat="tackles_solo"]',
'tackles_for_loss': 'td[data-stat="tackles_loss"]',
'quarterback_hits': 'td[data-stat="qb_hits"]',
'average_kickoff_return_yards': 'td[data-stat="kick_ret_yds_per_ret"]',
'kickoff_return_touchdowns': 'td[data-stat="kick_ret_td"]',
'average_punt_return_yards': 'td[data-stat="punt_ret_yds_per_ret"]',
'punt_return_touchdowns': 'td[data-stat="punt_ret_td"]'
}
SEASON_PAGE_URL = 'http://www.pro-football-reference.com/years/%s/'
SCHEDULE_URL = 'https://www.pro-football-reference.com/teams/%s/%s/gamelog/'
BOXSCORE_URL = 'https://www.pro-football-reference.com/boxscores/%s.htm'
BOXSCORES_URL = 'https://www.pro-football-reference.com/years/%s/week_%s.htm'
PLAYER_URL = 'https://www.pro-football-reference.com/players/%s/%s.htm'
ROSTER_URL = 'https://www.pro-football-reference.com/teams/%s/%s_roster.htm'
WILD_CARD = 100
DIVISION = 101
CONF_CHAMPIONSHIP = 102
SUPER_BOWL = 103
LOST_WILD_CARD = 'Lost WC'
LOST_DIVISIONAL = 'Lost Divisional'
LOST_CONF_CHAMPS = 'Lost Conference Championship'
LOST_SUPER_BOWL = 'Lost Super Bowl'
WON_SUPER_BOWL = 'Won Super Bowl'
| StarcoderdataPython |
155555 | <filename>chainer_chemistry/models/mpnn.py
from functools import partial
from typing import Optional # NOQA
import chainer
from chainer import cuda, functions # NOQA
from chainer_chemistry.config import MAX_ATOMIC_NUM
from chainer_chemistry.links import EmbedAtomID
from chainer_chemistry.links.readout.ggnn_readout import GGNNReadout
from chainer_chemistry.links.readout.mpnn_readout import MPNNReadout
from chainer_chemistry.links.update.ggnn_update import GGNNUpdate
from chainer_chemistry.links.update.mpnn_update import MPNNUpdate
class MPNN(chainer.Chain):
"""Message Passing Neural Networks (MPNN).
Args:
out_dim (int): dimension of output feature vector
hidden_channels (int): dimension of feature vector for each node
n_update_layers (int): number of update layers
n_atom_types (int): number of types of atoms
concat_hidden (bool): If set to True, readout is executed in
each layer and the result is concatenated
weight_tying (bool): enable weight_tying or not
n_edge_types (int): number of edge type.
Defaults to 4 for single, double, triple and aromatic bond.
nn (~chainer.Link): Neural Networks for expanding edge vector
dimension
message_func (str): message function. 'edgenet' and 'ggnn' are
supported.
readout_func (str): readout function. 'set2set' and 'ggnn' are
supported.
"""
def __init__(
self,
out_dim, # type: int
hidden_channels=16, # type: int
n_update_layers=4, # type: int
n_atom_types=MAX_ATOMIC_NUM, # type: int
concat_hidden=False, # type: bool
weight_tying=True, # type: bool
n_edge_types=4, # type: int
nn=None, # type: Optional[chainer.Link]
message_func='edgenet', # type: str
readout_func='set2set', # type: str
):
# type: (...) -> None
super(MPNN, self).__init__()
if message_func not in ('edgenet', 'ggnn'):
raise ValueError(
'Invalid message function: {}'.format(message_func))
if readout_func not in ('set2set', 'ggnn'):
raise ValueError(
'Invalid readout function: {}'.format(readout_func))
n_readout_layer = n_update_layers if concat_hidden else 1
n_message_layer = 1 if weight_tying else n_update_layers
with self.init_scope():
# Update
self.embed = EmbedAtomID(out_size=hidden_channels,
in_size=n_atom_types)
if message_func == 'ggnn':
self.update_layers = chainer.ChainList(*[
GGNNUpdate(
hidden_channels=hidden_channels,
n_edge_types=n_edge_types)
for _ in range(n_message_layer)
])
else:
self.update_layers = chainer.ChainList(*[
MPNNUpdate(hidden_channels=hidden_channels, nn=nn)
for _ in range(n_message_layer)
])
# Readout
if readout_func == 'ggnn':
self.readout_layers = chainer.ChainList(*[
GGNNReadout(out_dim=out_dim,
in_channels=hidden_channels * 2)
for _ in range(n_readout_layer)
])
else:
self.readout_layers = chainer.ChainList(*[
MPNNReadout(
out_dim=out_dim, in_channels=hidden_channels,
n_layers=1)
for _ in range(n_readout_layer)
])
self.out_dim = out_dim
self.hidden_channels = hidden_channels
self.n_update_layers = n_update_layers
self.n_edge_types = n_edge_types
self.concat_hidden = concat_hidden
self.weight_tying = weight_tying
self.message_func = message_func
self.readout_func = readout_func
def __call__(self, atom_array, adj):
# type: (numpy.ndarray, numpy.ndarray) -> chainer.Variable
"""Forward propagation.
Args:
atom_array (numpy.ndarray): minibatch of molecular which is
represented with atom IDs (representing C, O, S, ...)
`atom_array[mol_index, atom_index]` represents `mol_index`-th
molecule's `atom_index`-th atomic number
adj (numpy.ndarray): minibatch of adjancency matrix with edge-type
information
Returns:
~chainer.Variable: minibatch of fingerprint
"""
# reset state
self.reset_state()
if atom_array.dtype == self.xp.int32:
h = self.embed(atom_array)
else:
h = atom_array
if self.readout_func == 'ggnn':
h0 = functions.copy(h, cuda.get_device_from_array(h.data).id)
readout_layers = [
partial(readout_layer, h0=h0)
for readout_layer in self.readout_layers
]
else:
readout_layers = self.readout_layers
g_list = []
for step in range(self.n_update_layers):
message_layer_index = 0 if self.weight_tying else step
h = self.update_layers[message_layer_index](h, adj)
if self.concat_hidden:
g = readout_layers[step](h)
g_list.append(g)
if self.concat_hidden:
return functions.concat(g_list, axis=1)
else:
g = readout_layers[0](h)
return g
def reset_state(self):
# type: () -> None
[update_layer.reset_state() for update_layer in self.update_layers]
| StarcoderdataPython |
1855183 | <gh_stars>0
import pygame
from pygame.locals import *
from pygame.font import *
import time
import random
# import raw assets
raw_background_img = pygame.image.load("assets\\background.png")
raw_upperPillar_img = pygame.image.load("assets\\upper_pillar.png")
raw_lowerPillar_img = pygame.image.load("assets\\lower_pillar.png")
raw_ground_img = pygame.image.load("assets\\ground.png")
gameIcon = pygame.image.load("assets\\icon.png")
pygame.init()
# game properties
red = (255,0,0)
green = (0,150,0)
white = (255,255,255)
black = (0,0,0)
running = True
is_game_over = False
is_game_paused = False
# store the reason of death
reason_of_death = ""
# score board
pillar_passed = 0
# actual drawing height is (window height - ground height)
ground_height = 110
# window properties
window_height = 500
window_width = 300
# box properties
gravity = 0.07
box_f_speed = 0
box_v_speed = 0
box_h = 10
box_w = 10
box_x = 100
box_y = 200
box_jump_speed = 2.2
# random pillar properties
pillar_hole_size = 100
pillar_width = 30
pillar_moving_speed = 1.0
pillar_group = []
# time in ms
pillar_generate_duration = 200
time_past_since_last_pillar = 100000
# ==================================[ code below this line are providing class supports for game process ]===================================
class Pillar(object):
def __init__(self):
self.x = window_width + 20
self.pillar_hole_height = random.randint(150, 250)
self.isPassed = False
# =================================[ code below this line are providing function supports for game process ]==================================
def pillar_move(pillar_group):
# move every pillar in the same speed (right to left)
for pillar in pillar_group:
pillar.x -= pillar_moving_speed
return pillar_group
def push_pillar(pillar_group):
# append a new pillar to the end of pillar_group array
pillar_group.append(Pillar())
def pop_pillar(pillar_group):
# pop out pillar 0
pillar_group = pillar_group[1:]
return pillar_group
def remove_used_pillar(pillar_group):
# execute pop_pillar function if the oldest pillar is out of the screen
for i in range(len(pillar_group)):
try:
if pillar_group[0].x < 0:
pillar_group = pop_pillar(pillar_group)
except:
pass
return pillar_group
def is_box_in_hole(pillar, box_x, box_y, box_h, box_w):
global reason_of_death
# check if the box is colliding with the pillar if that pillar's x coordinate is in collide range (100 + (box width / 2) + (pillar width /2)) to (100 - (box width / 2) + (pillar width /2))
# if True, pass; if False, game over
# box right face clip
if pillar.x < (100 + box_w):
# box left face clip
if pillar.x > (100 - pillar_width):
# box top clip
if box_y > (pillar.pillar_hole_height - (pillar_hole_size / 2)):
# box bottom clip
if box_y < (pillar.pillar_hole_height + (pillar_hole_size / 2) - box_h):
return True
else:
reason_of_death = "you are too low!"
return False
else:
reason_of_death = "you are too high!"
return False
else:
return True
else:
return True
return True
def collide_detect():
global reason_of_death
# detect if the box character hits any of the pillars
for pillar in pillar_group:
if not is_box_in_hole(pillar, box_x, box_y, box_h, box_w):
game_over()
if box_y > (window_height - ground_height):
reason_of_death = "you hit the ground!"
game_over()
def box_fall(box_v_speed, box_y):
box_v_speed += gravity
box_y += box_v_speed
return (box_v_speed, box_y)
def score_increase(pillar_group, pillar_passed, pillar_moving_speed):
# if a pillar's x coordinate is smaller than box_x and the isPassed flag is not raised, raise the isPassed flag and increase the score (pillar_passed) by 1
for pillar in pillar_group:
if (pillar.x + pillar_width) < box_x:
if not pillar.isPassed:
pillar.isPassed = True
pillar_passed += 1
pillar_moving_speed += 0.1
return (pillar_group, pillar_passed, pillar_moving_speed)
def draw_text(surface, text,color=white, pos=(0,0), font_size=30):
# get sys font and set font size
cur_font = pygame.font.SysFont("Consolas", font_size)
# set bold
cur_font.set_bold(True)
# set text content (pillar passed)
text_fmt = cur_font.render(text, 1, color)
# draw text
surface.blit(text_fmt, pos)
def game_over():
# execute this function if game is over (character dead)
global is_game_over
global is_game_paused
global running
global box_f_speed
is_game_over = True
is_game_paused = False
box_f_speed = pillar_moving_speed
# ========================================[ code below this line are resposible for game process ]==========================================
# game initialization
# modify the raw assets into assets that can be used in the game
first_background_img = pygame.transform.scale(raw_background_img, (window_width, window_height))
first_background_img_x = 0
second_background_img = pygame.transform.scale(raw_background_img, (window_width, window_height))
second_background_img_x = window_width
pillar_image_width = pillar_width * 2
pillar_img_height = pillar_width * 8
upper_pillar_img = pygame.transform.scale(raw_upperPillar_img, (pillar_image_width, pillar_img_height))
lower_pillar_img = pygame.transform.scale(raw_lowerPillar_img, (pillar_image_width, pillar_img_height))
first_ground_img = pygame.transform.scale(raw_ground_img, (window_width, window_height))
first_ground_img_x = 0
second_ground_img = pygame.transform.scale(raw_ground_img, (window_width, window_height))
second_ground_img_x = window_width
# create window
(width, height) = (window_width, window_height)
# initialize screen
screen = pygame.display.set_mode((width, height))
# set title
pygame.display.set_caption("flappy box")
# set icon
pygame.display.set_icon(gameIcon)
# update the current "screen" to the actual screen
pygame.display.flip()
while running:
# basic event control
for event in pygame.event.get():
# window exit
if event.type == pygame.QUIT:
running = False
# key monitor
if event.type == pygame.KEYDOWN and not is_game_over:
# box jump control
if event.key == pygame.K_SPACE and not is_game_paused:
box_v_speed = -box_jump_speed
if event.key == pygame.K_ESCAPE:
is_game_paused = not is_game_paused
# do the physics if the game is not over nor paused
if not is_game_over and not is_game_paused:
# physics
time_past_since_last_pillar += 1
# box character falling physics calculation
# pillar physics
if time_past_since_last_pillar >= pillar_generate_duration:
# add a new pillar if time duration is reached
push_pillar(pillar_group)
time_past_since_last_pillar = 0
pillar_group = remove_used_pillar(pillar_group)
pillar_group = pillar_move(pillar_group)
collide_detect()
pillar_group, pillar_passed, pillar_moving_speed = score_increase(pillar_group, pillar_passed, pillar_moving_speed)
# modify the pillar generate duration in order to fit the increasing pillar moving speed
pillar_generate_duration = 200 / pillar_moving_speed
# allow the box to keep falling off the screen after game is over
if not is_game_paused:
# box physics
if box_y > 0:
box_v_speed, box_y = box_fall(box_v_speed, box_y)
box_x += box_f_speed
# graphics
# clear screen by redraw the background image
screen.blit(first_background_img,[first_background_img_x,0])
screen.blit(second_background_img,[second_background_img_x,0])
# the two background images are connected together to leave no space between
if not is_game_over and not is_game_paused:
if first_background_img_x < -window_width:
# respawn the image at initial point if the image is completely gone from the screen
first_background_img_x = window_width
else:
# make the background image to move 1 pixel in 1 loop cycle
first_background_img_x -= 1
if second_background_img_x < -window_width:
# respawn the image at initial point if the image is completely gone from the screen
second_background_img_x = window_width
else:
# make the background image to move 1 pixel in 1 loop cycle
second_background_img_x -= 1
# draw pillars
for pillar in pillar_group:
# top pillar
screen.blit(upper_pillar_img, [pillar.x - 15, pillar.pillar_hole_height - (pillar_hole_size / 2) - pillar_img_height + 22])
# pygame.draw.rect(screen, green, (pillar.x, 0, pillar_width, pillar.pillar_hole_height - (pillar_hole_size / 2)))
# bottom pillar
# pygame.draw.rect(screen, green, (pillar.x, pillar.pillar_hole_height + (pillar_hole_size / 2), pillar_width, window_height - pillar.pillar_hole_height - (pillar_hole_size / 2) - ground_height + 20))
screen.blit(lower_pillar_img, [pillar.x - 15, pillar.pillar_hole_height + (pillar_hole_size / 2) - 7])
# draw box character
pygame.draw.rect(screen, red, (box_x, box_y, box_w, box_h))
# draw ground (in order to cover the bottom of the lower pillar, the ground is illustrated after the pillars)
screen.blit(first_ground_img, [first_ground_img_x, 0])
screen.blit(first_ground_img, [second_ground_img_x, 0])
# the two background images are connected together to leave no space between
if not is_game_over and not is_game_paused:
if first_ground_img_x < -window_width:
# respawn the image at initial point if the image is completely gone from the screen
first_ground_img_x = window_width
else:
# make the background image to move 1 pixel in 1 loop cycle
first_ground_img_x -= pillar_moving_speed
if second_ground_img_x < -window_width:
# respawn the image at initial point if the image is completely gone from the screen
second_ground_img_x = window_width
else:
# make the background image to move 1 pixel in 1 loop cycle
second_ground_img_x -= pillar_moving_speed
# display game paused if game is paused
if is_game_paused:
# display game paused tag
draw_text(screen, "GAME PAUSED", white, (10, 200), font_size=40)
# display game over, the score and the reason of death if the game is over
if is_game_over:
# display game over tag
draw_text(screen, "GAME OVER", white, (25, 50), font_size=43)
# display score
if pillar_passed > 1:
# if score less than or equal to 1, use single form (pillar)
draw_text(screen, "you passed " + str(pillar_passed) + " pillars", white, (25, 200), font_size=20)
else:
# if score more than 1, use plural form (pillars)
draw_text(screen, "you passed " + str(pillar_passed) + " pillar", white, (25, 200), font_size=20)
# display the reason of death
draw_text(screen, reason_of_death, white, (25, 240), font_size=20)
else:
# draw score board
draw_text(screen, str(pillar_passed), white, (window_width / 2, 50))
# update display
pygame.display.update()
time.sleep(0.01) | StarcoderdataPython |
139318 | <filename>backend/party/views.py
from core.models import Party
from rest_framework import generics
from .serializers import PartySerializer
class PartyListView(generics.ListAPIView):
"""
Party list view.
"""
serializer_class = PartySerializer
# permission_classes = (permissions.IsAuthenticated,)
queryset = Party.objects.all()
class PartyDetailView(generics.RetrieveAPIView):
"""
Party list view.
"""
lookup_field = 'id'
serializer_class = PartySerializer
# permission_classes = (permissions.IsAuthenticated,)
queryset = Party.objects.all()
| StarcoderdataPython |
6446588 | <reponame>RoastVeg/cports<filename>main/xlsatoms/template.py
pkgname = "xlsatoms"
pkgver = "1.1.3"
pkgrel = 0
build_style = "gnu_configure"
hostmakedepends = ["pkgconf"]
makedepends = ["libxcb-devel"]
pkgdesc = "List interned atoms defined on the X server"
maintainer = "q66 <<EMAIL>>"
license = "MIT"
url = "https://xorg.freedesktop.org"
source = f"$(XORG_SITE)/app/{pkgname}-{pkgver}.tar.bz2"
sha256 = "57868f958c263976727881f3078e55b86b4a109dc578d2b92f5c6d690850a382"
def post_install(self):
self.install_license("COPYING")
| StarcoderdataPython |
8153073 | """Export best trajectory GULP .gin input endpoint files for calculation on another computer.
"""
import os
import sys
import shutil
from PyLib.TinyParser import TinyParser
class HostGuestAtomDistances:
"""Minimum host-guest atom distances at the window, and on the left and right window-sides, for a particular host-guest.
"""
def __init__(self,parser,sDirPath):
"""Initialise object from line in summary file 'trajectory-NEB-results.txt'.
[parser]: TinyParser, loaded with "trajectory-NEB-results.txt" from FindTrajectory run.
[sDirPath]: string, full directory path where located.
"""
try:
self.sDirPath= sDirPath
self.sHostGuestStep= None
self.sHostGuestStep= parser.EatWord()
iScoreIndex= self.sHostGuestStep.rfind("_")
self.sHostGuest= self.sHostGuestStep[:iScoreIndex]
parser.EatSpace()
self.fLeftWinDistance= float(parser.EatWord())
parser.EatSpace()
self.sLeftWinApproach= parser.EatWord()
parser.EatSpace()
self.fWinDistance= float(parser.EatWord())
parser.EatSpace()
self.fRightWinDistance= float(parser.EatWord())
parser.EatSpace()
self.sRightWinApproach= parser.EatWord()
parser.EatSpace()
except:
print(f"Error on processing {sDirPath}, {self.sHostGuestStep}")
raise
def Print(self):
print(f"{self.sHostGuestStep}\t{self.fLeftWinDistance}\t{self.sLeftWinApproach}\t{self.fWinDistance}\t{self.fRightWinDistance}\t{self.sRightWinApproach}")
def Primary(self,other):
"""Is this the primary trajectory for this host and guest, i.e. that with maximum host-guest atom distance away from the window?
[other]: HostGuestAtomDistances, another trajectory (different step size) in comparison for this host and guest.
<retval>: boolean, is primary trajectory.
"""
return min(self.fLeftWinDistance,self.fRightWinDistance)>min(other.fLeftWinDistance,other.fRightWinDistance)
def DoSlash(sDirName):
"""Add a tailing slash if missing.
"""
return sDirName if sDirName[-1]=="/" else sDirName+"/"
def FindMetricsForBestHostGuests(dtTrajMaxHostGuestAtomMinDist,dtExcludeHostGuest,lsFinalDirPaths):
"""Parse NEB results from given directories to find metrics for the best host-guests.
[dtTrajMaxHostGuestAtomMinDist]: dict, key: formal host_guest name; value: HostGuestAtomDistances object for trajectory with maximum host-guest atom minimum distance.
[dtExcludeHostGuest]: dict, key: formal host_guest name for excluded guests; value: 1.
[lsFinalDirPaths]: list, directories with 'trajectory-NEB-results.txt' files to parse for metrics.
"""
for sDirPath in lsFinalDirPaths:
sFilePath= sDirPath+"trajectory-NEB-results.txt"
sWorkDirPath= sDirPath+"out/"
if not os.access(sFilePath,os.F_OK): continue # no results yet
parser= TinyParser(None,sFilePath)
while not parser.Finished():
trajInfo= HostGuestAtomDistances(parser,sDirPath)
# a hidden CIF ('__HOST_GUEST.cif' rather than 'HOST_GUEST.cif') indicates a duplicate run for this MOF; the designated folder is located elsewhere.
bHiddenCifFile= not os.path.isfile(sWorkDirPath+trajInfo.sHostGuest+".cif")
if dtExcludeHostGuest is not None and trajInfo.sHostGuest in dtExcludeHostGuest or bHiddenCifFile: continue # skip if hidden CIF or HOST_GUEST excluded
# keep this host-guest if new, or if better than the existing
if trajInfo.sHostGuest not in dtTrajMaxHostGuestAtomMinDist \
or not dtTrajMaxHostGuestAtomMinDist[trajInfo.sHostGuest].Primary(trajInfo):
dtTrajMaxHostGuestAtomMinDist[trajInfo.sHostGuest]= trajInfo
def FindTrajectoriesWithMaxHostGuestAtomMinDistance(lsScanDirs,lsParentDirs,sExcludeFileName):
"""parse results from all directories
[lsScanDirs]: list, directory names; subdirectories of each <dir> are scanned for data.
[lsParentDirs]: list, directory names assumed to be a sub-directory of the parent dir.
[sExcludeFileName]: string, name of file listing formal host-guest names to be excluded.
<retval>: dict, key: formal host_guest name; value: HostGuestAtomDistances object for trajectory with maximum host-guest atom.
"""
dtExcludeHostGuest= None
if sExcludeFileName is not None:
parser= TinyParser(None,sExcludeFileName)
parser.EatSpace()
dtExcludeHostGuest= {}
while not parser.Finished():
sHostGuestName= parser.EatWord()
dtExcludeHostGuest[sHostGuestName]=1
parser.EatSpace()
dtTrajMaxHostGuestAtomMinDist={}
lsFinalDirPaths=[]
for sDirPath in lsScanDirs:
for sSubDirName in os.listdir(sDirPath):
lsFinalDirPaths.append(DoSlash(sDirPath)+sSubDirName+"/")
FindMetricsForBestHostGuests(dtTrajMaxHostGuestAtomMinDist,dtExcludeHostGuest,lsFinalDirPaths)
lsFinalDirPaths=[]
for sDirPath in lsParentDirs:
lsFinalDirPaths.append("../"+DoSlash(sDirPath))
FindMetricsForBestHostGuests(dtTrajMaxHostGuestAtomMinDist,dtExcludeHostGuest,lsFinalDirPaths)
return dtTrajMaxHostGuestAtomMinDist
def CopyTrajectoryEndpoints(dtTrajMaxHostGuestAtomMinDist,sCopyToDirName):
"""Copy relevant GULP .gin files to an output directory.
[dtTrajMaxHostGuestAtomMinDist]: dict, key: formal host_guest name; value: HostGuestAtomDistances object for trajectory with maximum host-guest atom minimum distance.
[sCopyToDirName]: string, directory name to which files are copied.
"""
if not os.access(sCopyToDirName,os.F_OK) or not os.path.isdir(sCopyToDirName): os.mkdir(sCopyToDirName)
for traj in dtTrajMaxHostGuestAtomMinDist.values():
sSrcDir= traj.sDirPath+sCopyToDirName+traj.sHostGuestStep+"/GULP/"
if not os.access(sSrcDir,os.F_OK): continue # just ignore missing dirs - we'll fill such gaps later
for sFileName in os.listdir(sSrcDir):
if sFileName[-4:]==".gin" and sFileName[-8:-4]!="_NEB":
shutil.copy(sSrcDir+sFileName,sCopyToDirName)
def TrajectoryReport(dtTrajMaxHostGuestAtomMinDist):
"""Print a report for the selected host-guest and step size.
The report includes the relevant host-guest atom distances and the trajectory discovery method.
[dtTrajMaxHostGuestAtomMinDist]: dict, key: formal host_guest name; value: HostGuestAtomDistances object for trajectory with maximum host-guest atom minimum distance.
"""
print("# host MOF/guest molecule trajectory endpoint systems for GULP relaxation. Columns: host/guest/stepSize, hostGuestDistLeftWindow, methodLeftWindow, hostGuestDistAtWindow, hostGuestDistRightWindow, methodRightWindow.")
for traj in dtTrajMaxHostGuestAtomMinDist.values():
traj.Print()
print(f"{len(dtTrajMaxHostGuestAtomMinDist)} host-guest trajectories ready for endpoint relaxation.")
def GetCmdLineArguments():
"""Read command line arguments (see syntax).
<retval>: tuple (bool, list directory for each REFCODE; string, name of file listing HOST_GUEST names to exclude; list, directories to scan for subdirs; list, subdir directories of parent dir).
"""
iNextArg=1
if sys.argv[iNextArg].lower()=="-l":
bListRefcodeDirs=True
iNextArg+=1
else:
bListRefcodeDirs=True
if sys.argv[iNextArg].lower()=="-x":
sExcludeFileName= sys.argv[iNextArg+1]
iNextArg+=2
else:
sExcludeFileName= None
lsScanDirs=[]
if sys.argv[iNextArg].lower()=="-scan":
iNextArg+=1
while iNextArg<len(sys.argv) and sys.argv[iNextArg]!="-par":
lsScanDirs.append(sys.argv[iNextArg])
iNextArg+=1
lsParentDirs=[]
if iNextArg<len(sys.argv) and sys.argv[iNextArg]=="-par":
while iNextArg<len(sys.argv):
lsParentDirs.append(sys.argv[iNextArg])
iNextArg+=1
return bListRefcodeDirs,sExcludeFileName,lsScanDirs,lsParentDirs
def ReportRefcodeDirectories(dtTrajMaxHostGuestAtomMinDist):
"""Report WORKDIR/out/HOST_GUEST_StepSize/ directories for each HOST_GUEST.
[dtTrajMaxHostGuestAtomMinDist]: dict, key: formal host_guest name; value: HostGuestAtomDistances object for trajectory with maximum host-guest atom minimum distance.
"""
for hostGuestAtomDistances in dtTrajMaxHostGuestAtomMinDist.values():
print(f"{hostGuestAtomDistances.sHostGuestStep}\t{hostGuestAtomDistances.sDirPath}out/{hostGuestAtomDistances.sHostGuestStep}/")
def Main():
"""Entry point
"""
if len(sys.argv)==1:
print("Syntax: python ExportBestTrajectoryEndpointGins.py [-l] [-x <exclude-host-guest-file>] [-scan <dir1> [<dir2> ...]] [-par <dir3> [<dir4> ...]]\n"
"if -l is specified, a list of the directory corresponding to each REFCODE will be output (no .gin files will be copied).\n"
"if -x is specified, any host-guest whose formal name appears in file <exclude-host-guest-file> will be excluded.\n"
"if [-scan] is present, all following subdirectories of each <dir> are scanned for data;\n"
"if [-par] is present, each following <dir> is assumed to be a sub-directory of the parent dir.")
sys.exit(1)
bListRefcodeDirs,sExcludeFileName,lsScanDirs,lsParentDirs= GetCmdLineArguments()
dtTrajMaxHostGuestAtomMinDist= FindTrajectoriesWithMaxHostGuestAtomMinDistance(lsScanDirs,lsParentDirs,sExcludeFileName)
if bListRefcodeDirs:
ReportRefcodeDirectories(dtTrajMaxHostGuestAtomMinDist)
else:
CopyTrajectoryEndpoints(dtTrajMaxHostGuestAtomMinDist,"out/")
TrajectoryReport(dtTrajMaxHostGuestAtomMinDist)
if __name__=="__main__": Main() | StarcoderdataPython |
8186837 | <reponame>simo955/RecSys_2018<filename>Utils/data/IncrementalSparseMatrix.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 09/09/2018
"""
import scipy.sparse as sps
class IncrementalSparseMatrix(object):
def __init__(self, auto_create_col_mapper = False, auto_create_row_mapper = False, n_rows = None, n_cols = None):
super(IncrementalSparseMatrix, self).__init__()
self._row_list = []
self._col_list = []
self._data_list = []
self._n_rows = n_rows
self._n_cols = n_cols
self._auto_create_col_mapper = auto_create_col_mapper
self._auto_create_row_mapper = auto_create_row_mapper
if self._auto_create_col_mapper:
self._column_original_ID_to_index = {}
if self._auto_create_row_mapper:
self._row_original_ID_to_index = {}
def add_data_lists(self, row_list_to_add, col_list_to_add, data_list_to_add):
assert len(row_list_to_add) == len(col_list_to_add) and len(row_list_to_add) == len(data_list_to_add),\
"IncrementalSparseMatrix: element lists must have different length"
col_list_index = [self._get_column_index(column_id) for column_id in col_list_to_add]
row_list_index = [self._get_row_index(row_id) for row_id in row_list_to_add]
self._row_list.extend(row_list_index)
self._col_list.extend(col_list_index)
self._data_list.extend(data_list_to_add)
def add_single_row(self, row_id, col_list, data = 1.0):
n_elements = len(col_list)
col_list_index = [self._get_column_index(column_id) for column_id in col_list]
row_index = self._get_row_index(row_id)
self._row_list.extend([row_index] * n_elements)
self._col_list.extend(col_list_index)
self._data_list.extend([data] * n_elements)
def get_column_token_to_id_mapper(self):
if self._auto_create_col_mapper:
return self._column_original_ID_to_index.copy()
dummy_column_original_ID_to_index = {}
for col in range(self._n_cols):
dummy_column_original_ID_to_index[col] = col
return dummy_column_original_ID_to_index
def get_row_token_to_id_mapper(self):
if self._auto_create_row_mapper:
return self._row_original_ID_to_index.copy()
dummy_row_original_ID_to_index = {}
for row in range(self._n_rows):
dummy_row_original_ID_to_index[row] = row
return dummy_row_original_ID_to_index
def _get_column_index(self, column_id):
if not self._auto_create_col_mapper:
column_index = column_id
else:
if column_id in self._column_original_ID_to_index:
column_index = self._column_original_ID_to_index[column_id]
else:
column_index = len(self._column_original_ID_to_index)
self._column_original_ID_to_index[column_id] = column_index
return column_index
def _get_row_index(self, row_id):
if not self._auto_create_row_mapper:
row_index = row_id
else:
if row_id in self._row_original_ID_to_index:
row_index = self._row_original_ID_to_index[row_id]
else:
row_index = len(self._row_original_ID_to_index)
self._row_original_ID_to_index[row_id] = row_index
return row_index
def get_SparseMatrix(self):
if self._n_rows is None:
self._n_rows = max(self._row_list) + 1
if self._n_cols is None:
self._n_cols = max(self._col_list) + 1
shape = (self._n_rows, self._n_cols)
sparseMatrix = sps.csr_matrix((self._data_list, (self._row_list, self._col_list)), shape=shape)
sparseMatrix.eliminate_zeros()
return sparseMatrix
import numpy as np
class IncrementalSparseMatrix_LowRAM(IncrementalSparseMatrix):
def __init__(self, auto_create_col_mapper = False, auto_create_row_mapper = False, n_rows = None, n_cols = None):
super(IncrementalSparseMatrix_LowRAM, self).__init__(auto_create_col_mapper = auto_create_col_mapper,
auto_create_row_mapper = auto_create_row_mapper,
n_rows = n_rows,
n_cols = n_cols)
self._dataBlock = 10000000
self._next_cell_pointer = 0
self._row_array = np.zeros(self._dataBlock, dtype=np.int32)
self._col_array = np.zeros(self._dataBlock, dtype=np.int32)
self._data_array = np.zeros(self._dataBlock, dtype=np.float64)
def add_data_lists(self, row_list_to_add, col_list_to_add, data_list_to_add):
assert len(row_list_to_add) == len(col_list_to_add) and len(row_list_to_add) == len(data_list_to_add),\
"IncrementalSparseMatrix: element lists must have different length"
for data_point_index in range(len(row_list_to_add)):
if self._next_cell_pointer == len(self._row_array):
self._row_array = np.concatenate((self._row_array, np.zeros(self._dataBlock, dtype=np.int32)))
self._col_array = np.concatenate((self._col_array, np.zeros(self._dataBlock, dtype=np.int32)))
self._data_array = np.concatenate((self._data_array, np.zeros(self._dataBlock, dtype=np.float64)))
self._row_array[self._next_cell_pointer] = row_list_to_add[data_point_index]
self._col_array[self._next_cell_pointer] = self._get_column_index(col_list_to_add[data_point_index])
self._data_array[self._next_cell_pointer] = data_list_to_add[data_point_index]
self._next_cell_pointer += 1
def add_single_row(self, row_index, col_list, data = 1.0):
n_elements = len(col_list)
self.add_data_lists([row_index] * n_elements,
col_list,
[data] * n_elements)
def get_SparseMatrix(self):
if self._n_rows is None:
self._n_rows = self._row_array.max() + 1
if self._n_cols is None:
self._n_cols = self._col_array.max() + 1
shape = (self._n_rows, self._n_cols)
sparseMatrix = sps.csr_matrix((self._data_array[:self._next_cell_pointer],
(self._row_array[:self._next_cell_pointer], self._col_array[:self._next_cell_pointer])),
shape=shape)
sparseMatrix.eliminate_zeros()
return sparseMatrix
| StarcoderdataPython |
3542081 | #!/usr/bin/env python
"""modbusTask.py: PowerPilot python LoRa"""
__version__="0.6.0"
__author__="<NAME>"
__copyright__="ElectroNet Ltd 2018"
from modbus import initModbus, readPilot , getLatestMBError, MB_SUCCESS
from logging import Logger
import logging
import _thread
from globs import *
from helpers import *
import time
def modbusTask():
global mb_wdt_lock, mb_stop_flag, MB_SUCCESS
mb_error_count =0
logger = Logger(name = 'MODBUS ' + __version__,level=logging.DEBUG,filename=None)
logger.debug('** MODBUS Task started **')
initModbus()
CT=int(getConfigurationVariable(NVS_CT_RATIO))
deviceType=int(getConfigurationVariable(NVS_DEVICE_TYPE))
# default is 22 if not defined in the config file
DA=22
try:
from deviceid import modbusAddress
DA=modbusAddress
except :
pass
while not mb_stop_flag.locked():
res = readPilot(DA,deviceType,CT)
if res != MB_SUCCESS:
mb_error_count = mb_error_count + 1
logger.error(str(mb_error_count) + " Read(s) failed - " + getLatestMBError() + " " )
if mb_wdt_lock.locked():
mb_wdt_lock.release()
time.sleep(1)
logger.error('** MODBUS Task ended **') | StarcoderdataPython |
1744122 | import os
import pyfits
import scipy
from scipy import ndimage,optimize
# Function poststamp - cuts out a postage stamp from a larger image
#
# Inputs:
# data - full image data array
# cx - x value of central pixel
# cy - y value of central pixel
# csize - length of one side of the postage stamp
# Output:
# cutout - postage stamp data array
def poststamp(data,cx,cy,csize):
oddeventest = csize / 2.0 - int(csize/2.0)
if oddeventest==0:
halfsize = int(csize/2.0)
else:
halfsize = int((csize+1)/2.0)
# Make the cutout. Remember y coordinates come first
cutout = data[cy-halfsize:cy+halfsize,cx-halfsize:cx+halfsize].copy()
return cutout
def eval_psf_match(p,data1,data2):
amp,x,y = p
coords = scipy.indices(data1.shape).astype(scipy.float64)
coords[0] += y
coords[1] += x
data = amp * data1
shift = ndimage.map_coordinates(data,coords,output=scipy.float64)
return (shift - data2).flatten()
def find_shift(template,target):
#
# Initialize guess for amplitude and shift
#
p = [(template.max()/target.max()),0.,0.]
#
# Solve for the shift between the template and target
#
pfinal,ier = optimize.leastsq(eval_psf_match,p,(template,target))
print pfinal,ier
return pfinal
def shift_template(template,tempcore,target,targcore):
# Get coordinates of each pixel in the full template array
coords = scipy.indices(template.shape).astype(scipy.float64)
# Find the shift between template and target, using just the central
# core region of each PSF
pshift = find_shift(tempcore,targcore)
#
# Shift the template star to match the centering of the target and return
# the result
#
coords[1] += pshift[1]
coords[0] += pshift[2]
shiftstar = ndimage.map_coordinates(template,coords,output=scipy.float64)
return pshift[0] * shiftstar
def putpost(data,cx,cy,cutout):
csize = cutout.shape()[0]
oddeventest = csize / 2.0 - int(csize/2.0)
if oddeventest==0:
halfsize = int(csize/2.0)
else:
halfsize = int((csize+1)/2.0)
# Make the cutout. Remember y coordinates come first
cutout = data[cy-halfsize:cy+halfsize,cx-halfsize:cx+halfsize]
return cutout
#***********************************************************************
#
# Main program
#
# Read in data file
#
data = pyfits.open("1520_nirc2_ao_K.fits")[0].data.astype(scipy.float64)
cutsize = 20
fullsize = 100
s1x = 349
s1y = 420
ax = 691
ay = 700
bx = 548
by = 634
#
# Define the boxes for star 1
#
# *** NB: x coordinate given first here ***
star1_core = poststamp(data,s1x,s1y,cutsize)
star1 = poststamp(data,s1x,s1y,fullsize)
#
# Define the boxes for quasar image A
#
qa_core = poststamp(data,ax,ay,cutsize)
qa = poststamp(data,ax,ay,fullsize)
#
# Define the boxes for quasar image B
#
qb_core = poststamp(data,bx,by,cutsize)
qb = poststamp(data,bx,by,fullsize)
#
# Create a version of star 1, shifted to match centering of quasar A
#
shift1_2_a = shift_template(star1,star1_core,qa,qa_core)
#
# Create a version of star 1, shifted to match centering of quasar B
#
shift1_2_b = shift_template(star1,star1_core,qb,qb_core)
shifta_2_b = shift_template(qa,qa_core,qb,qb_core)
#
# Make a copy of the original data (just in case we screw up)
#
newdata = data.copy()
#
# Subtract the shifted PSF from the original data and write output to
# fits file
#
oddeventest = fullsize / 2.0 - int(fullsize/2.0)
if oddeventest==0:
halfsize = int(fullsize/2.0)
else:
halfsize = int((fullsize+1)/2.0)
newdata[ay-halfsize:ay+halfsize,ax-halfsize:ax+halfsize] -= shift1_2_a
newdata[by-halfsize:by+halfsize,bx-halfsize:bx+halfsize] -= shift1_2_b
#newdata = qb - shift1_2_b
pyfits.PrimaryHDU(newdata).writeto("test.fits")
| StarcoderdataPython |
3506101 | # -*- coding: utf-8 -*-
import random
import logging
from collections.abc import MutableMapping
from weakref import WeakSet
from typing import Dict
logger = logging.getLogger('scuttlebutt') # type: logging.Logger
class RandomlyOrderedDictItem(object):
def __init__(self, key, value = None, previous_item: 'RandomlyOrderedDictItem' = None, next_item: 'RandomlyOrderedDictItem' = None):
self.key = key
self.previous_item = previous_item
self.next_item = next_item
self.iterators = WeakSet() # type: WeakSet[RandomlyOrderedDictIterator]
self.value = value
class RandomlyOrderedDictIterator(object):
nextItem = None # type: RandomlyOrderedDictItem
def __init__(self, nextItem: RandomlyOrderedDictItem):
self.set_next_item(nextItem)
def set_next_item(self, item: RandomlyOrderedDictItem):
if self.nextItem != None:
self.nextItem.iterators.remove(self)
self.nextItem = item
if item != None:
self.nextItem.iterators.add(self)
def get_next_item(self) -> RandomlyOrderedDictItem:
item = self.nextItem
if item != None:
self.set_next_item(item.next_item)
return item
class RandomlyOrderedDict(MutableMapping):
def __init__(self):
self._root = None # type: RandomlyOrderedDictItem
self._map = {} # type: Dict[str,RandomlyOrderedDictItem]
def __setitem__(self, key, value):
if value not in self._map:
if self._root == None:
self._root = RandomlyOrderedDictItem(key, value=value)
self._map[key] = self._root.previous_item = self._root.next_item = self._root
else:
insert_before_key = random.choice(list(self._map.keys()))
previous_item = self._map[insert_before_key].previous_item # type: RandomlyOrderedDictItem
next_item = previous_item.next_item
item = RandomlyOrderedDictItem(key, value=value, previous_item=previous_item, next_item=next_item)
self._map[key] = previous_item.next_item = next_item.previous_item = item
for iterator in next_item.iterators.copy(): # type: RandomlyOrderedDictIterator
if iterator != None:
iterator.set_next_item(item)
if insert_before_key == self._root.key:
self._root = item
def __delitem__(self, key):
item = self._map[key] # type: RandomlyOrderedDictItem
if item is item.next_item:
next_item = None
else:
previous_item = item.previous_item
next_item = item.next_item
previous_item.next_item = next_item
next_item.previous_item = previous_item
if item is self._root:
self._root = next_item
for iterator in item.iterators.copy(): # type: RandomlyOrderedDictIterator
if iterator != None:
iterator.set_next_item(next_item)
del self._map[key]
def __getitem__(self, key):
return self._map[key].value
def __len__(self):
return len(self._map)
def __iter__(self):
if len(self) == 0:
return
iterator = RandomlyOrderedDictIterator(self._root)
item = iterator.get_next_item()
while True:
yield item.key
item = iterator.get_next_item()
if item is self._root:
return
| StarcoderdataPython |
4966617 | from .suggestions.searchsuggestion import SearchSuggestion
from .suggestions.moviesuggestion import MovieSuggestion
from .suggestions.tvshowsuggestion import TVShowSuggestion
from .suggestions.peoplesuggestion import PeopleSuggestion
from .suggestions.mediasuggestion import MediaSuggestion
from .suggestions.textsuggestion import TextSuggestion
SUPPORTED_MEDIA_TYPES = {'movie': 'Movies', 'tv': 'TV', 'people': 'People', 'person': 'People'}
SUGGESTIONS = {
'movie': MovieSuggestion,
'tv': TVShowSuggestion,
'people': PeopleSuggestion,
'person': PeopleSuggestion
}
URL_YOUTUBE_WATCH = 'https://youtube.com/watch?v={}'
URL_MOVIEDB_BASE = 'https://www.themoviedb.org'
| StarcoderdataPython |
3341601 | <reponame>google-cloud-sdk-unofficial/google-cloud-sdk<filename>lib/googlecloudsdk/command_lib/storage/optimize_parameters_util.py
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Util for dynamically setting the best performing app configuration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import multiprocessing
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
COMPONENT_SIZE = '5Mi'
MULTI_FILE_LOW_CPU_PROCESS_COUNT = 2
MULTI_FILE_HIGH_CPU_PROCESS_COUNT = 16
MULTI_FILE_LOW_CPU_THREAD_COUNT = 10
MULTI_FILE_HIGH_CPU_THREAD_COUNT = 4
MULTI_FILE_LOW_CPU_SLICED_OBJECT_DOWNLOAD_THRESHOLD = '50Mi'
MULTI_FILE_HIGH_CPU_SLICED_OBJECT_DOWNLOAD_THRESHOLD = '10Mi'
MULTI_FILE_SLICED_OBJECT_DOWNLOAD_MAX_COMPONENTS = 10
SINGLE_FILE_LOW_CPU_PROCESS_COUNT = 4
SINGLE_FILE_HIGH_CPU_PROCESS_COUNT = 8
SINGLE_FILE_THREAD_COUNT = 2
SINGLE_FILE_SLICED_OBJECT_DOWNLOAD_THRESHOLD = '50Mi'
SINGLE_FILE_LOW_CPU_SLICED_OBJECT_DOWNLOAD_MAX_COMPONENTS = 8
SINGLE_FILE_HIGH_CPU_SLICED_OBJECT_DOWNLOAD_MAX_COMPONENTS = 16
def _set_if_not_user_set(property_name, value):
"""Sets property to opitmized value if user did not set custom one."""
storage_property = getattr(properties.VALUES.storage, property_name)
if storage_property.Get() is None:
storage_property.Set(value)
def detect_and_set_best_config(is_estimated_multi_file_workload):
"""Determines best app config based on system and workload."""
if is_estimated_multi_file_workload:
_set_if_not_user_set('sliced_object_download_component_size',
COMPONENT_SIZE)
_set_if_not_user_set('sliced_object_download_max_components',
MULTI_FILE_SLICED_OBJECT_DOWNLOAD_MAX_COMPONENTS)
if multiprocessing.cpu_count() < 4:
log.info('Using low CPU count, multi-file workload config.')
_set_if_not_user_set('process_count', MULTI_FILE_LOW_CPU_PROCESS_COUNT)
_set_if_not_user_set('thread_count', MULTI_FILE_LOW_CPU_THREAD_COUNT)
_set_if_not_user_set('sliced_object_download_threshold',
MULTI_FILE_LOW_CPU_SLICED_OBJECT_DOWNLOAD_THRESHOLD)
else:
log.info('Using high CPU count, multi-file workload config.')
_set_if_not_user_set('process_count', MULTI_FILE_HIGH_CPU_PROCESS_COUNT)
_set_if_not_user_set('thread_count', MULTI_FILE_HIGH_CPU_THREAD_COUNT)
_set_if_not_user_set(
'sliced_object_download_threshold',
MULTI_FILE_HIGH_CPU_SLICED_OBJECT_DOWNLOAD_THRESHOLD)
else:
_set_if_not_user_set('sliced_object_download_threshold',
SINGLE_FILE_SLICED_OBJECT_DOWNLOAD_THRESHOLD)
_set_if_not_user_set('sliced_object_download_component_size',
COMPONENT_SIZE)
if multiprocessing.cpu_count() < 8:
log.info('Using low CPU count, single-file workload config.')
_set_if_not_user_set('process_count', SINGLE_FILE_LOW_CPU_PROCESS_COUNT)
_set_if_not_user_set('thread_count', SINGLE_FILE_THREAD_COUNT)
_set_if_not_user_set(
'sliced_object_download_max_components',
SINGLE_FILE_LOW_CPU_SLICED_OBJECT_DOWNLOAD_MAX_COMPONENTS)
else:
log.info('Using high CPU count, single-file workload config.')
_set_if_not_user_set('process_count', SINGLE_FILE_HIGH_CPU_PROCESS_COUNT)
_set_if_not_user_set('thread_count', SINGLE_FILE_THREAD_COUNT)
_set_if_not_user_set(
'sliced_object_download_max_components',
SINGLE_FILE_HIGH_CPU_SLICED_OBJECT_DOWNLOAD_MAX_COMPONENTS)
| StarcoderdataPython |
3266648 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
]
| StarcoderdataPython |
6698789 | <reponame>DiceNameIsMy/fastapi-registration
from typing import Optional
from pydantic import BaseModel, root_validator
class _UserBase(BaseModel):
phone: Optional[str] = None
email: Optional[str] = None
class Config:
orm_mode = True
class UserRepr(_UserBase):
pass
class UserProfile(_UserBase):
id: Optional[int]
class User(_UserBase):
id: Optional[int]
password: str
is_active: bool
def update(self, upd_user: UserRepr) -> None:
data = upd_user.dict(exclude_none=True)
for key in data:
setattr(self, key, data[key])
class CreateUser(_UserBase):
password1: str
password2: str
is_active: bool = True
@property
def password(self) -> str:
return <PASSWORD>
@root_validator(pre=True)
def check_passwords_match(cls, values):
if values["password1"] != values["password2"]:
raise ValueError("Passwords do not match")
if not any((values.get("email", None), values.get("phone", None))):
raise ValueError("Email or Phone is required")
return values
def dict_to_create(self) -> dict:
return self.dict(include={"email", "phone", "is_active"}) | {
"password": self.password,
}
class PaginatedUsers(BaseModel):
items: list[UserRepr]
total: int
| StarcoderdataPython |
4867374 | """
Various utils functions for output analysis
"""
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import json, re
from pathlib import Path
from typing import Tuple
from pathlib import Path
from data_loader import DataModule
from basecaller import Basecaller
import utils
def create_train_history_figure(info_path: str, save_path: str = None, loss_lim: Tuple[float, float] = None, accuracy_lim: Tuple[float, float] = (0, 1), figsize: Tuple[int,int] = (6,4)):
"""Create (and display/save) figure with loss, val_loss and val_accuracy from training.
Args:
info_path (str): Path to info file with training history
save_path (str, optional): Where to save figure - if None, it is displayed. Defaults to None.
"""
with open(info_path, 'r') as f:
info = json.load(f)
history = info['train_history']
# extract title from info path file
res = re.match(r'.*info\.(.*)\.json', info_path)
title = res.group(1)
fig, ax1 = plt.subplots(figsize=figsize)
ax1.set_xlabel('epoch')
ax1.set_ylabel('loss')
lns1 = ax1.plot(history['batch_loss'], label='loss', color='red')
lns2 = ax1.plot(history['val_batch_loss'], label='val_loss', color='blue')
min_val_loss_epoch = history['val_batch_loss'].index(min(history['val_batch_loss']))
plt.axvline(x=min_val_loss_epoch, c='grey')
ax2 = ax1.twinx()
ax2.set_ylabel('accuracy')
lns3 = ax2.plot(history['val_accuracy'], label='val_accuracy', color='green')
lns = lns1+lns2+lns3
labs = [l.get_label() for l in lns]
ax2.legend(lns, labs, loc='best')
ax1.grid(axis='x')
ax2.grid(axis='y')
# lims
ax1.set_xlim((0, len(history['batch_loss'])))
if loss_lim is not None:
ax1.set_ylim(loss_lim)
if accuracy_lim is not None:
ax2.set_ylim(accuracy_lim)
ax1.set_title(title)
# saving / display
if save_path is not None:
plt.savefig(save_path, bbox_inches='tight', dpi=300)
else:
plt.show()
def prettify_info_files(dir: str, indent: int = 2):
dir = Path(dir)
for file in dir.iterdir():
if file.suffix != '.json':
continue
with open(file, 'r') as f:
content = json.load(f)
with open(file, 'w') as f:
json.dump(content, f, indent=indent)
def save_train_history_figures_info_dir(dir: str):
dir = Path(dir)
for info_path in [p for p in dir.iterdir() if p.suffix == '.json']:
save_path = info_path.with_suffix('.png')
create_train_history_figure(
info_path=str(info_path),
save_path=str(save_path)
)
def get_params_from_name(filename: str):
params = {}
for type in ['raw', 'event', 'joint']:
if f'{type}.' in filename:
params['DATA_TYPE'] = type
if params['DATA_TYPE'] in ['raw', 'joint']:
res = re.match(r'.*\.rawmax(\d+)\..*', filename)
params['RAW_MAX_LEN'] = int(res.group(1))
if params['DATA_TYPE'] in ['event', 'joint']:
res = re.match(r'.*\.evmax(\d+)\..*', filename)
params['EVENT_MAX_LEN'] = int(res.group(1))
res = re.match(r'.*\.u(\d+)\..*', filename)
params['UNITS'] = int(res.group(1))
res = re.match(r'.*\.b(\d+)\..*', filename)
params['BATCH_SIZE'] = int(res.group(1))
res = re.match(r'.*\.ep(\d+)\..*', filename)
params['EPOCHS'] = int(res.group(1))
res = re.match(r'.*\.pat(\d+)\..*', filename)
params['PATIENCE'] = int(res.group(1))
res = re.match(r'.*\.tf(\d)\..*', filename)
params['TEACHER_FORCING'] = False # bool(res.group(1))
res = re.match(r'.*\.ed(\d)\..*', filename)
params['EVENT_DETECTION'] = bool(res.group(1))
res = re.match(r'.*\.emb(\d)\..*', filename)
if res:
params['EMBEDDING_DIM'] = int(res.group(1))
else:
params['EMBEDDING_DIM'] = 1
for rnn_type in ['gru', 'lstm', 'bigru', 'bilstm']:
if f'{rnn_type}.' in filename:
params['RNN_TYPE'] = rnn_type
for att_type in ['bahdanau', 'luong']:
if f'{att_type}.' in filename:
params['ATTENTION_TYPE'] = att_type
if 'ATTENTION_TYPE' not in params:
params['ATTENTION_TYPE'] = 'bahdanau'
return params
def plot_attention_weights_for_prediction(model_path, input_data, save_path: str = None, seq_id: int = 0, output_max_length=50, figsize=(6, 2)):
params = get_params_from_name(model_path)
print(params)
dm = DataModule(
dir='data/simulator/random_200k_perfect',
max_raw_length=0,
max_event_length=0,
bases_offset=0,
batch_size=0,
load_source=0,
random_seed=0,
verbose=True
)
basecaller = Basecaller(
units=params['UNITS'],
output_text_processor=dm.output_text_processor,
input_data_type=params['DATA_TYPE'],
input_padding_value=dm.input_padding_value,
rnn_type=params['RNN_TYPE'],
teacher_forcing=params['TEACHER_FORCING'],
attention_type=params['ATTENTION_TYPE'],
embedding_dim=params['EMBEDDING_DIM']
)
# Configure the loss and optimizer
basecaller.compile(
optimizer=tf.optimizers.Adam(),
loss=utils.MaskedLoss(basecaller.output_padding_token),
)
basecaller.load_weights(model_path)
basecall_tokens_res = basecaller.tf_basecall_batch_to_tokens(input_data, output_max_length=output_max_length, early_break=True)
att = basecall_tokens_res['attention'][seq_id]
fig, ax = plt.subplots(figsize=figsize)
ax.matshow(att, cmap='viridis', vmin=0.0)
ax.set_xlabel('Encoder outputs id')
ax.set_ylabel('Output bases id')
model_details = model_path.replace('models/', '').replace('/model_chp', '')
# plt.suptitle(f'Attention weights\n{model_details}')
if save_path is None:
plt.show()
else:
plt.savefig(save_path, bbox_inches='tight', dpi=300)
| StarcoderdataPython |
9709736 | """
Global variables for the library
"""
import os
BASE_URL = os.environ.get('BASE_URL', 'https://api.repositpower.com')
AUTH_PATH = os.environ.get('AUTH_PATH', '{}/v2/auth/login/').format(BASE_URL)
| StarcoderdataPython |
6651579 | <reponame>JennaVergeynst/COVID19-Model<filename>src/covid19model/optimization/run_optimization.py
import random
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import datetime
import scipy
from scipy.integrate import odeint
import matplotlib.dates as mdates
import matplotlib
import scipy.stats as st
import math
import xarray as xr
import emcee
import json
import corner
from covid19model.optimization import objective_fcns
from covid19model.optimization import MCMC
from covid19model.models import models
from covid19model.data import google
from covid19model.data import sciensano
from covid19model.data import polymod
from covid19model.data import model_parameters
from covid19model.visualization.optimization import traceplot
initN, Nc_home, Nc_work, Nc_schools, Nc_transport, Nc_leisure, Nc_others, Nc_total = polymod.get_interaction_matrices()
def full_calibration(model, timeseries, spatial_unit, start_date, end_beta, end_ramp,
fig_path, samples_path,
maxiter=50, popsize=50, steps_mcmc=10000):
"""
model : object
initialized model
timeseries : Series
data to fit with date in index
spatial_unit : string
name of the spatial_unit, e.g. Gent, Antwerp, Belgium
start_date, end_beta, end_ramp : string, format YYYY-MM-DD
date of first data point, last date for fitting beta and last date
for fitting the compliance ramp
fig_path : string
path to folder where to save figures
samples_path : string
path to folder where to save samples
maxiter: int (default 50)
maximum number of pso iterations
popsize: int (default 50)
population size of particle swarm
increasing this variable lowers the chance of finding local minima but
slows down calculations
steps_mcmc : int (default 10000)
number of steps in MCMC calibration
"""
plt.ioff()
# define dataset
data=[timeseries[start_date:end_beta]]
states = [["H_in"]]
#############################################
####### CALIBRATING BETA AND LAG_TIME #######
#############################################
# set optimisation settings
parNames_pso = ['sigma_data','extraTime','beta'] # must be a list!
bounds_pso=((1,100),(30,60),(0.02,0.06)) # must be a list!
# run pso optimisation
theta = MCMC.fit_pso(model,data,parNames_pso,states,bounds_pso,maxiter=maxiter,popsize=popsize)
lag_time = int(round(theta[1]))
# Assign 'extraTime' or lag_time as a model attribute --> is needed to perform the optimalization
model.extraTime = int(round(theta[1]))
model.parameters.update({'beta': theta[2]})
parNames_mcmc = ['sigma_data','beta'] # must be a list!
bounds_mcmc=((1,200),(0.01,0.10))
# run MCMC calibration
pos = [theta[0],theta[2]] + [1, 1e-2 ]* np.random.randn(4, 2)
nwalkers, ndim = pos.shape
sampler = emcee.EnsembleSampler(nwalkers, ndim, objective_fcns.log_probability,
args=(model, bounds_mcmc, data, states, parNames_mcmc))
sampler.run_mcmc(pos, steps_mcmc, progress=True);
samples_beta = sampler.get_chain(discard=100,flat=False)
flat_samples_beta = sampler.get_chain(discard=100,flat=True)
try:
sampler.get_autocorr_time()
except:
print('Calibrating beta. Warning: The chain is shorter than 50 times the integrated autocorrelation time for 4 parameter(s). Use this estimate with caution and run a longer chain!')
traceplot(samples_beta,labels=['$\sigma_{data}$','$\\beta$'],plt_kwargs={'linewidth':2,'color': 'red','alpha': 0.15})
plt.savefig(fig_path+'traceplots/beta_'+str(spatial_unit)+'_'+str(datetime.date.today())+'.pdf',
dpi=600, bbox_inches='tight')
fig = corner.corner(flat_samples_beta,labels=['$\sigma_{data}$','$\\beta$'])
fig.set_size_inches(8, 8)
plt.savefig(fig_path+'cornerplots/beta_'+str(spatial_unit)+'_'+str(datetime.date.today())+'.pdf',
dpi=600, bbox_inches='tight')
#############################################
####### CALIBRATING COMPLIANCE PARAMS #######
#############################################
samples_beta = {'beta': flat_samples_beta[:,1].tolist()}
# Create checkpoints dictionary
chk_beta_pso = {
'time': [lag_time],
'Nc': [0.2*Nc_home + 0.3*Nc_work + 0.2*Nc_transport],
}
# define dataset
data=[timeseries[start_date:end_ramp]]
# set optimisation settings
parNames_pso2 = ['sigma_data','l','tau','prevention'] # must be a list!
bounds_pso2=((1,100),(0.1,20),(0,20),(0,1)) # must be a list!
# run optimisation
theta = MCMC.fit_pso(model, data, parNames_pso2, states, bounds_pso2,
checkpoints=chk_beta_pso, samples=samples_beta, maxiter=maxiter,popsize=popsize)
model.parameters.update({'l': theta[1], 'tau': theta[2]})
prevention = theta[2]
# Create checkpoints dictionary
chk_beta_MCMC = {
'time': [lag_time],
'Nc': [prevention*(1.0*Nc_home + 0.4*Nc_work + 0.3*Nc_transport + 0.7*Nc_others + 0.2*Nc_leisure)]}
bounds_mcmc2=((1,100),(0.001,20),(0,20),(0,1)) # must be a list!
pos = theta + [1, 0.1, 0.1, 0.1 ]* np.random.randn(8, 4)
nwalkers, ndim = pos.shape
sampler = emcee.EnsembleSampler(nwalkers, ndim, objective_fcns.log_probability,
args=(model,bounds_mcmc2,data,states,parNames_pso2,chk_beta_MCMC,samples_beta))
sampler.run_mcmc(pos, steps_mcmc, progress=True);
try:
sampler.get_autocorr_time()
except:
print('Calibrating compliance ramp. Warning: The chain is shorter than 50 times the integrated autocorrelation time for 4 parameter(s). Use this estimate with caution and run a longer chain!')
samples_ramp = sampler.get_chain(discard=200,flat=False)
flat_samples_ramp = sampler.get_chain(discard=200,flat=True)
traceplot(samples_ramp, labels=["$\sigma_{data}$","l","$\\tau$","prevention"],
plt_kwargs={'linewidth':2,'color': 'red','alpha': 0.15})
plt.savefig(fig_path+'traceplots/ramp_'+str(spatial_unit)+'_'+str(datetime.date.today())+'.pdf',
dpi=600, bbox_inches='tight')
fig = corner.corner(flat_samples_ramp, labels=["$\sigma_{data}$","l","$\\tau$","$\Omega$"])
fig.set_size_inches(9, 9)
plt.savefig(fig_path+'cornerplots/ramp_'+str(spatial_unit)+'_'+str(datetime.date.today())+'.pdf',
dpi=600, bbox_inches='tight')
#############################################
####### CALCULATING R0 ######################
#############################################
R0 =[]
for i in range(len(samples_beta['beta'])):
R0.append(sum((model.parameters['a']*model.parameters['da']+model.parameters['omega'])*samples_beta['beta'][i]*model.parameters['s']*np.sum(Nc_total,axis=1)*(initN/sum(initN))))
R0_stratified = np.zeros([initN.size,len(samples_beta['beta'])])
for i in range(len(samples_beta['beta'])):
R0_stratified[:,i]= (model.parameters['a']*model.parameters['da']+model.parameters['omega'])*samples_beta['beta'][i]*model.parameters['s']*np.sum(Nc_total,axis=1)
R0_stratified_dict = pd.DataFrame(R0_stratified).T.to_dict(orient='list')
samples_dict={'calibration_data':states[0][0], 'start_date':start_date,
'end_beta':end_beta, 'end_ramp':end_ramp,
'maxiter': maxiter, 'popsize':popsize, 'steps_mcmc':steps_mcmc,
'R0':R0, 'R0_stratified_dict':R0_stratified_dict,
'lag_time': lag_time, 'beta': samples_beta['beta'],
'l': flat_samples_ramp[:,1].tolist(),'tau':flat_samples_ramp[:,2].tolist(),
'prevention':flat_samples_ramp[:,3].tolist()}
with open(samples_path+str(spatial_unit)+'_'+str(datetime.date.today())+'.json', 'w') as fp:
json.dump(samples_dict, fp)
plt.ion()
return samples_dict
| StarcoderdataPython |
6507125 | <filename>log_stats.py
# simple module for reading crawled profile information and logging the stats
import json
import datetime
import csv
import argparse
from util.settings import Settings
def log_stats(username):
profile_file = Settings.profile_location + '/' + username + '.json'
with open(profile_file, 'r') as f_profile:
profile = json.load(f_profile)
timestamp = profile['scrapped']
print('Reading crawled profile info of {}'.format(username))
print(profile)
# sum up likes and comments
likes = 0
comments = 0
for post in profile['posts']:
likes += post['likes']
comments += post['comments']['count']
# append collected stats to stats.csv
with open('stats.csv', 'a', newline='') as f_stats:
writer = csv.writer(f_stats)
writer.writerow([timestamp, profile['username'], profile['followers'], profile['following'],
profile['num_of_posts'], likes, comments])
print('Added stats to stats.csv')
def parse_args():
parser = argparse.ArgumentParser(description="Read and log collected stats from crawled profiles")
parser.add_argument("-u", "--user", help="Username", required=True, default=None, dest="user")
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
log_stats(args.user)
| StarcoderdataPython |
1850306 | <gh_stars>0
import os
from gevent import socket
from gevent.pywsgi import WSGIServer
import app
sock_path = "{0}run/appserver.sock".format(os.environ["OPENSHIFT_ADVANCED_PYTHON_DIR"])
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(sock_path)
sock.listen(256)
WSGIServer(sock, app.application).serve_forever() | StarcoderdataPython |
1665309 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import datetime, time
# Inspired by article: http://www.seehuhn.de/blog/52
class Timezone(datetime.tzinfo):
"""Convert timestamp from nginx log"""
def __init__(self, name="+0000"):
self.name = name
seconds = int(name[:-2])*3600+int(name[-2:])*60
self.offset = datetime.timedelta(seconds=seconds)
def utcoffset(self, dt):
return self.offset
def dst(self, dt):
return timedelta(0)
def tzname(self, dt):
return self.name
# Decorator for cleaning dict from parsed line
def clean_parsed_log(fn):
def wrapped(*args):
try:
result_dict = fn(*args)
except TypeError:
pass
# Convert date string to datetime object
if "datetime" in result_dict:
tt = time.strptime(result_dict["datetime"][:-6], "%d/%b/%Y:%H:%M:%S")
tt = list(tt[:6]) + [ 0, Timezone(result_dict["datetime"][-5:]) ]
result_dict["datetime"] = datetime.datetime(*tt).isoformat()
if 'status' in result_dict:
result_dict['status'] = int(result_dict['status'])
if result_dict["size"] in [0, "0"]:
result_dict['size'] = None
else:
result_dict['size'] = int(result_dict["size"])
for i in result_dict:
if result_dict[i] == "-":
result_dict[i] = None
return result_dict
return wrapped
@clean_parsed_log
def parse(line):
#Parse single line read from 'access.log' file.
parts = [
r'(?P<host>\S+)', # host %h
r'\S+', # indent %l (unused)
r'(?P<user>\S+)', # user %u
r'\[(?P<datetime>.+)\]', # datetime %t
r'"(?P<request>.+)"', # request "%r"
r'(?P<status>[0-9]+)', # status %>s
r'(?P<size>\S+)', # size %b (careful, can be '-')
r'"(?P<referer>.*)"', # referer "%{Referer}i"
r'"(?P<agent>.*)"', # user agent "%{User-agent}i"
]
pattern = re.compile(r'\s+'.join(parts)+r'\s*\Z')
match = pattern.match(line)
result = match.groupdict()
return result
| StarcoderdataPython |
1602238 | <reponame>yasuraok/icassp2010<filename>prg/stft.py
# -*- coding: utf-8 -*-
"""
$Date:: $
$Rev:: $
$Author:: $
至って普通のスペクトログラム算出
"""
from numpy import empty, linspace, r_, zeros, real, conj, hstack, vstack
from scipy.fftpack import fft, ifft
def sec2frm(sec, fftlength, fs, overlap):
hop = fftlength - overlap
tmp = sec * fs - fftlength
return tmp / hop
def sgn2frm(sgnLen, fftlength, overlap):
hop = fftlength - overlap
tmp = sgnLen - fftlength
frmLen = ((tmp / hop + 1) if tmp >= 0 else 1)
return frmLen
def frm2sgn(frmLen, fftlength, overlap):
hop = fftlength - overlap
frmLen = frmLen
sgnLen = hop * (frmLen - 1) + fftlength
return sgnLen
#def specgram(i_x, fftlength, fs, window, overlap):
# # X is assumed to be complex spectrogram obtained by function "specgram"
# if i_x.ndim == 1:
# x = i_x.reshape(len(i_x), 1)
# elif i_x.ndim == 2:
# x = i_x
# else:
# assert False
#
# hop = fftlength - overlap
# tmp = x.shape[0] - fftlength
# #frmLen = ((tmp/hop + 1) + int(bool(tmp%hop)) if tmp >= 0 else 1)
# frmLen = ((tmp/hop + 1) if tmp >= 0 else 1)
# sgnLen = hop * (frmLen-1) + fftlength
# binLen = fftlength / 2.0 + 1
#
# #x = hstack([x, tile(0.0, (sgnLen - x.size))])
# Y = empty((binLen, frmLen), dtype=complex)
# for f in range(0, frmLen):
# offset = f*hop
# Y[:,f] = fft(x[offset:offset+fftlength,:].T * window)[0,0:binLen]
#
# F = linspace(0, fs/2, binLen)
# T = r_[0 : float(tmp)/fs : float(hop)/fs]
# return Y, F, T
def specgram(i_x, fftlength, fs, window, overlap):
# X is assumed to be complex spectrogram obtained by function "specgram"
if i_x.ndim == 1:
x = i_x.reshape(len(i_x), 1)
elif i_x.ndim == 2:
x = i_x
else:
assert False
hop = fftlength - overlap
tmp = x.shape[0] - fftlength
#frmLen = ((tmp/hop + 1) + int(bool(tmp%hop)) if tmp >= 0 else 1)
frmLen = ((tmp / hop + 1) if tmp >= 0 else 1)
sgnLen = hop * (frmLen - 1) + fftlength
binLen = fftlength / 2 + 1
#x = hstack([x, tile(0.0, (sgnLen - x.size))])
Y = empty((frmLen, binLen), dtype=complex)
for f in range(0, frmLen):
offset = f * hop
Y[f, :] = fft(x[offset:offset + fftlength, :].T * window)[0, 0:binLen]
F = linspace(0, fs / 2, binLen)
T = r_[0 : float(tmp) / fs : float(hop) / fs]
return Y, F, T
def overlapdecomp(i_x, fftlength, fs, window, overlap):
if i_x.ndim == 1:
x = i_x.reshape(len(i_x), 1)
elif i_x.ndim == 2:
x = i_x
else:
assert False
hop = fftlength - overlap
tmp = x.shape[0] - fftlength
frmLen = ((tmp / hop + 1) if tmp >= 0 else 1)
#sgnLen = hop * (frmLen-1) + fftlength
#x = vstack([x, zeros((sgnLen-x.shape[0], x.shape[1]), dtype=x.dtype)])
Y = empty((frmLen, fftlength), dtype=x.dtype)
for f in range(0, frmLen):
offset = f * hop
#print offset+fftlength, x.shape[0]
Y[f, :] = x[offset:offset + fftlength, 0] * window
return Y
def overlapsynth(X, fftlength, fs, window, overlap):
hop = fftlength - overlap
frmLen = X.shape[0]
sgnLen = hop * (frmLen - 1) + fftlength
y = zeros((sgnLen), dtype=float)
w2 = zeros((sgnLen), dtype=float)
window2 = window ** 2
for f in xrange(0, frmLen):
offset = f * hop
w2[offset:offset + fftlength] += window2
(w2[0], w2[-1]) = (1, 1)
for f in range(0, frmLen):
offset = f * hop
y[offset:offset + fftlength] += X[f, :] * window
# print "1", y.shape
y = y / w2
# print "2", y.shape
# print "3", y.reshape(sgnLen, 1).shape
return y.reshape(sgnLen, 1)
def ispecgram(X, fftlength, fs, window, overlap):
# X is assumed to be complex spectrogram obtained by function "specgram"
# X[frmLen, binLen]
hop = fftlength - overlap
frmLen = X.shape[0]
sgnLen = hop * (frmLen - 1) + fftlength
[y, w2] = [zeros((sgnLen, 1)) for i in xrange(2)]
window2 = window ** 2
for f in xrange(0, frmLen):
offset = f * hop
w2[offset:offset + fftlength, 0] += window2
(w2[0:fftlength], w2[-fftlength:-1]) = (1, 1)
for f in range(0, frmLen):
Xf = X[f, :]
offset = f * hop
y[offset:offset + fftlength, 0] += real(ifft(hstack([Xf, conj(Xf[-2:0:-1])]))) * window
#for n in xrange(2000):
# print n, y[n], (y / w2)[n]
y = y / w2
#y = y * (hop / sum(window))
return y
def ispecgram_simple(X, fftlength, fs, window, overlap):
# X is assumed to be complex spectrogram obtained by function "specgram"
hop = fftlength - overlap
frmLen = X.shape[1]
sgnLen = hop * (frmLen - 1) + fftlength
[y, w] = [zeros((sgnLen, 1)) for i in xrange(2)]
for f in xrange(0, frmLen):
offset = f * hop
w[offset:offset + fftlength, 0] += window
(w[0:fftlength], w[-fftlength:-1]) = (1, 1)
for f in range(0, frmLen):
Xf = X[:, f]
offset = f * hop
y[offset:offset + fftlength, 0] += real(ifft(hstack([Xf, conj(Xf[-2:0:-1])])))
y = y / w
#y = y * (hop / sum(window))
return y
| StarcoderdataPython |
6547433 | <filename>lintcode/660.py
"""
660. Read N Characters Given Read4 II - Call multiple times
https://www.lintcode.com/problem/read-n-characters-given-read4-ii-call-multiple-times
The read4 API is already defined for you.
@param buf a list of characters
@return an integer
you can call Reader.read4(buf)
"""
class Solution:
def __init__(self):
self.buf = [None] * 4
self.next_w = 0
self.next_r = 0
# @param {char[]} buf destination buffer
# @param {int} n maximum number of characters to read
# @return {int} the number of characters read
def read(self, buf, n):
# Write your code here
i = 0
while i < n:
if self.next_r == self.next_w:
self.next_r, self.next_w = 0, Reader.read4(self.buf)
if self.next_w == self.next_r:
break
buf[i], i, self.next_r = self.buf[self.next_r], i + 1, self.next_r + 1
return i | StarcoderdataPython |
5064900 | # -*- encoding: utf-8 -*-
from YamJam import yamjam
from configurations import Configuration
import os
from os.path import join, expanduser
CFG = yamjam()['RecomendadorUD']
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
HOME_DIR = expanduser("~")+"/www/Django/RecomendadorUD"
MEDIA_DIR_PROD = join(HOME_DIR+"/prod/", 'media')
MEDIA_DIR_DEV = join(HOME_DIR+"/dev/", 'media')
class Base(Configuration):
SECRET_KEY = CFG['SECRET_KEY']#os.environ.get("SECRET_KEY", '')
DEBUG=True
ALLOWED_HOSTS = []
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.request",
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'django.contrib.auth.context_processors.auth',
"apps.establishment_system.context_processors.notificaciones",
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
)
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
#'drealtime.middleware.iShoutCookieMiddleware',
#'simple_history.middleware.HistoryRequestMiddleware',
)
INSTALLED_APPS = (
#'grappelli', #http://django-grappelli.readthedocs.org/en/latest/customization.html
#'admin_tools.theming',
#'admin_tools.menu',
#'admin_tools.dashboard',
#'yawdadmin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.humanize',
'haystack',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'allauth.socialaccount.providers.twitter',
'allauth.socialaccount.providers.facebook',
'djrill',
'parsley',
#'simple_history' ,
'apps.account_system',
'apps.establishment_system', #https://github.com/dcramer/django-ratings
'apps.externals.djangoratings',
#'south', #https://github.com/agiliq/Django-parsley #http://parsleyjs.org/
#'drealtime', #https://bitbucket.org/inzane/django-realtime
#'dajaxice', #http://django-dajaxice.readthedocs.org/en/latest/
#'dajax',
'notifications',#https://github.com/django-notifications/django-notifications
#'dajax', #http://django-dajax.readthedocs.org/en/latest/
'configurations',
#'geoposition', #http://django-geoposition.readthedocs.org/
#'ajax_select',
#'apps.djadmin_ext',
'imagekit',
#'fluent_comments',
'crispy_forms',
#'django_comments_xtd',
'rest_framework',
#'selectable', #http://django-selectable.readthedocs.org/en/latest/admin.html
'autocomplete_light',
#'queued_search', #https://github.com/toastdriven/queued_search
'bootstrap3', #https://github.com/dyve/django-bootstrap3
'mathfilters',#https://github.com/dbrgn/django-mathfilters
'django.contrib.gis',
'apps.externals.recommends',
#'recommends.storages.redis',
'apps.recommender_system',
'avatar', #http://django-avatar.readthedocs.org/en/latest/
)
"""
Configuración imagenes
"""
MAX_UPLOAD_SIZE = 10485760 #10 MB
MAX_UPLOAD_PER_USER=3
MAX_IMAGES_PER_PLACE=8
ITEMS_PAGINATE=20
"""
Configuración comentarios
"""
MAX_COMMENTS_PER_PAGE=10
COMMENT_MAX_LENGTH=500
"""
Comfiguración mensajes
"""
MESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'
"""
Configuración NewRelic
"""
NEW_RELIC_CONFIG_FILE="newrelic.ini"
"""
Configuración Avatars
"""
AVATAR_DEFAULT_URL="/img/default_profile.png"
AVATAR_GRAVATAR_BACKUP=False
"""
Configuración HAYSTACK
"""
#HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.BaseSignalProcessor'
#HAYSTACK_SIGNAL_PROCESSOR = 'apps.establishment_system.signals.QueuedSignalProcessor'
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
# HAYSTACK_CONNECTIONS = {
# 'default': {
# 'ENGINE': 'xapian_backend.XapianEngine',
# 'PATH': os.path.join(os.path.dirname(__file__), 'xapian_index')
# },
# }
# HAYSTACK_CONNECTIONS = {
# 'default': {
# 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
# },
# }
"""
Configuración recomendaciones
"""
RECOMMENDS_STORAGE_BACKEND='recommends.storages.redis.storage.RedisStorage'
RECOMMENDS_STORAGE_REDIS_DATABASE ={
'HOST': 'localhost',
'PORT': 6379,
'NAME': 0,
'ATOMIC_REQUESTS': True
}
"""
Archivos estaticos
"""
STATIC_URL = '/static/'
# STATIC_URL =join(BASE_DIR,'/static/')
ROOT_URLCONF = 'RecomendadorUD.urls'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
#'dajaxice.finders.DajaxiceFinder',
)
WSGI_APPLICATION = 'RecomendadorUD.wsgi.application'
"""
Configuración Media
"""
MEDIA_URL = '/media/'
"""
Templates
"""
TEMPLATE_DIRS = (
join(BASE_DIR, 'templates'),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
"""
Internacioalización
"""
LANGUAGE_CODE = 'es-CO'
#LANGUAGE_CODE = 'us-en'
TIME_ZONE = 'America/Bogota'
USE_I18N = True
USE_L10N = False
USE_TZ = True
"""
Custom model users
"""
AUTH_USER_MODEL = 'account_system.User'
"""
Configuración Mandrill Mail
"""
MANDRILL_API_KEY = CFG['MANDRILL_API_KEY']#os.environ.get("MANDRILL_API_KEY", '')
#EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_BACKEND = "djrill.mail.backends.djrill.DjrillBackend"
DEFAULT_FROM_EMAIL='<EMAIL>'
"""
Configuración Allauth
"""
SITE_ID = 2
ACCOUNT_ADAPTER =('allauth.account.adapter.DefaultAccountAdapter')
#Specifies the adapter class to use, allowing you to alter certain default behaviour.
ACCOUNT_AUTHENTICATION_METHOD ='username'#('username' | 'email' | 'username_email')
#Specifies the login method to use – whether the user logs in by entering his username, e-mail address, or either one of both.
ACCOUNT_CONFIRM_EMAIL_ON_GET =True
#Determines whether or not an e-mail address is automatically confirmed by a mere GET request.
ACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL ='/accounts/new_user/'
#The URL to redirect to after a successful e-mail confirmation, in case no user is logged in.
ACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL ='/accounts/new_user/'
EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL ='/accounts/new_user/'
#The URL to redirect to after a successful e-mail confirmation, in case of an authenticated user.
# Set to None to use settings.LOGIN_REDIRECT_URL.
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS =3
#Determines the expiration date of email confirmation mails (# of days).
ACCOUNT_EMAIL_REQUIRED =True
#The user is required to hand over an e-mail address when signing up.
ACCOUNT_EMAIL_VERIFICATION ='mandatory'
#Determines the e-mail verification method during signup – choose one of 'mandatory', 'optional', or 'none'. When set to '
#mandatory' the user is blocked from logging in until the email address is verified.
#Choose 'optional' or 'none' to allow logins with an unverified e-mail address.
#In case of 'optional', the e-mail verification mail is still sent, whereas in case of 'none' no e-mail verification mails are sent.
ACCOUNT_EMAIL_SUBJECT_PREFIX ='[RecomendadorUD] '
#Subject-line prefix to use for email messages sent. By default, the name of the current Site (django.contrib.sites) is used.
ACCOUNT_DEFAULT_HTTP_PROTOCOL = ('http')
#The default protocol used for when generating URLs, e.g. for the password forgotten procedure.
#Note that this is a default only – see the section on HTTPS for more information.
ACCOUNT_LOGOUT_ON_GET =False
#Determines whether or not the user is automatically logged out by a mere GET request. See documentation for the LogoutView for details.
ACCOUNT_LOGOUT_REDIRECT_URL ='/'
#The URL (or URL name) to return to after the user logs out. This is the counterpart to Django’s LOGIN_REDIRECT_URL.
ACCOUNT_SIGNUP_FORM_CLASS ='apps.account_system.forms.SignupExtendForm'
#A string pointing to a custom form class (e.g. ‘myapp.forms.SignupForm’) that is used during signup to ask the user
#for additional input (e.g. newsletter signup, birth date). This class should implement a def signup(self, request, user)
#method, where user represents the newly signed up user.
ACCOUNT_SIGNUP_PASSWORD_VERIFICATION =True
#When signing up, let the user type in his password twice to avoid typo’s.
ACCOUNT_UNIQUE_EMAIL =True
#Enforce uniqueness of e-mail addresses.
ACCOUNT_USER_MODEL_USERNAME_FIELD ='username'
#The name of the field containing the username, if any. See custom user models.
ACCOUNT_USER_MODEL_EMAIL_FIELD ='email'
#The name of the field containing the email, if any. See custom user models.
#ACCOUNT_USER_DISPLAY ="user.username"
#A callable (or string of the form ‘some.module.callable_name’) that takes a user as its only argument and returns the
#display name of the user. The default implementation returns user.username.
ACCOUNT_USERNAME_MIN_LENGTH =4
#An integer specifying the minimum allowed length of a username.
ACCOUNT_USERNAME_BLACKLIST =[]
#A list of usernames that can’t be used by user.
ACCOUNT_USERNAME_REQUIRED =True
#The user is required to enter a username when signing up. Note that the user will be asked to do so even if
#ACCOUNT_AUTHENTICATION_METHOD is set to email. Set to False when you do not wish to prompt the user to enter a username.
ACCOUNT_PASSWORD_INPUT_RENDER_VALUE =False
#render_value parameter as passed to PasswordInput fields.
ACCOUNT_PASSWORD_MIN_LENGTH =4
#An integer specifying the minimum password length.
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION =True
#The default behaviour is to automatically log the user in once he confirms his email address. By changing this setting to False he will not be logged in, but redirected to the ACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL
ACCOUNT_SESSION_REMEMBER =None
#Controls the life time of the session. Set to None to ask the user ('Remember me?'), False to not remember, and True to always remember.
ACCOUNT_SESSION_COOKIE_AGE =1814400
#How long before the session cookie expires in seconds. Defaults to 1814400 seconds, or 3 weeks.
SOCIALACCOUNT_ADAPTER ='allauth.socialaccount.adapter.DefaultSocialAccountAdapter'
#SOCIALACCOUNT_ADAPTER ='apps.account_system.views.AccountAdapter'
#Specifies the adapter class to use, allowing you to alter certain default behaviour.
SOCIALACCOUNT_QUERY_EMAIL =ACCOUNT_EMAIL_REQUIRED
#Request e-mail address from 3rd party account provider? E.g. using OpenID AX, or the Facebook 'email' permission.
SOCIALACCOUNT_AUTO_SIGNUP =False
#Attempt to bypass the signup form by using fields (e.g. username, email) retrieved from the social account provider.
#If a conflict arises due to a duplicate e-mail address the signup form will still kick in.
SOCIALACCOUNT_EMAIL_REQUIRED =ACCOUNT_EMAIL_REQUIRED
#The user is required to hand over an e-mail address when signing up using a social account.
SOCIALACCOUNT_EMAIL_VERIFICATION =ACCOUNT_EMAIL_VERIFICATION
#As ACCOUNT_EMAIL_VERIFICATION, but for social accounts.
#SOCIALACCOUNT_PROVIDERS (= dict)
#Dictionary containing provider specific settings.
LOGIN_REDIRECT_URLNAME='/home/'
LOGOUT_REDIRECT_URL='/home/'
SOCIALACCOUNT_PROVIDERS = {
'google':{
'SCOPE': ['https://www.googleapis.com/auth/userinfo.profile','email'],
'AUTH_PARAMS': { 'access_type': 'online' },
'VERIFIED_EMAIL': False
},
'facebook':{
'SCOPE': ['email', 'publish_stream'],
#'AUTH_PARAMS': {'auth_type': 'reauthenticate'},
'METHOD': 'oauth2',
'LOCALE_FUNC': lambda request: 'en_us',
#'VERIFIED_EMAIL': False
}
}
"""
Fixtures
"""
FIXTURE_DIRS = (
join(BASE_DIR, '/account_system/fixtures/'),
join(BASE_DIR, '/establishment_system/fixtures/'),
)
"""
Configuración Rest Framework
"""
REST_FRAMEWORK = {
#'DEFAULT_MODEL_SERIALIZER_CLASS':'rest_framework.serializers.HyperlinkedModelSerializer',
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
# 'DEFAULT_PERMISSION_CLASSES': [
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
# ],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
)
}
"""
Configuración Grapelli
"""
GRAPPELLI_ADMIN_TITLE= "RecomendadorUD"
RATINGS_VOTES_PER_IP = 1000
class Dev(Base):
DEBUG = True
TEMPLATE_DEBUG = True
DEBUG_TOOLBAR_PATCH_SETTINGS = False
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
SOCIALACCOUNT_EMAIL_VERIFICATION =None
ACCOUNT_EMAIL_VERIFICATION =None
INSTALLED_APPS = Base.INSTALLED_APPS + (
'django_extensions',
'debug_toolbar',
)
STATICFILES_DIRS = (
join(BASE_DIR, 'static'),
)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
]
MIDDLEWARE_CLASSES = Base.MIDDLEWARE_CLASSES + ('debug_toolbar.middleware.DebugToolbarMiddleware',)
#('debug_toolbar.middleware.DebugToolbarMiddleware',) + Base.MIDDLEWARE_CLASSES
DATABASES = {
'default': {
'NAME': os.path.join(HOME_DIR, 'dev/spatial_db.db'),
'ENGINE': 'django.contrib.gis.db.backends.spatialite'
}
}
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join(HOME_DIR, 'dev/indexs/whoosh_index'),
},
}
"""
Configuración Media
"""
MEDIA_ROOT = MEDIA_DIR_DEV
class Prod(Base):
DEBUG = True
ALLOWED_HOSTS=['*']
EMAIL_BACKEND = "djrill.mail.backends.djrill.DjrillBackend"
POSTGRESDATABASE_USER = CFG['POSTGRESDATABASE_USER']#os.environ.get("POSTGRESDATABASE_USER", '')
POSTGRESDATABASE_PASSWORD = CFG['POSTGRESDATABASE_PASSWORD']#os.environ.get("POSTGRESDATABASE_PASSWORD", '')
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': "recomendadorUD_database",
'USER': POSTGRESDATABASE_USER,
'PASSWORD': <PASSWORD>,
'HOST': 'localhost',
'PORT': '5432',
}
}
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join(HOME_DIR, 'prod/indexs/whoosh_index'),
},
}
STATIC_ROOT =join(BASE_DIR,'static')
"""
Configuración Media
"""
MEDIA_ROOT = MEDIA_DIR_PROD
INSTALLED_APPS = (
'grappelli',
)+Base.INSTALLED_APPS
| StarcoderdataPython |
34003 | """ Database models
"""
from typing import Tuple
import attr
import sqlalchemy as sa
from .settings import DATCORE_STR, SIMCORE_S3_ID, SIMCORE_S3_STR
#FIXME: W0611:Unused UUID imported from sqlalchemy.dialects.postgresql
#from sqlalchemy.dialects.postgresql import UUID
#FIXME: R0902: Too many instance attributes (11/7) (too-many-instance-attributes)
#pylint: disable=R0902
metadata = sa.MetaData()
# File meta data
file_meta_data = sa.Table(
"file_meta_data", metadata,
sa.Column("file_uuid", sa.String, primary_key=True),
sa.Column("location_id", sa.String),
sa.Column("location", sa.String),
sa.Column("bucket_name", sa.String),
sa.Column("object_name", sa.String),
sa.Column("project_id", sa.String),
sa.Column("project_name", sa.String),
sa.Column("node_id", sa.String),
sa.Column("node_name", sa.String),
sa.Column("file_name", sa.String),
sa.Column("user_id", sa.String),
sa.Column("user_name", sa.String)
# sa.Column("state", sa.String())
)
def _parse_datcore(file_uuid: str) -> Tuple[str, str]:
# we should have 12/123123123/111.txt
object_name = "invalid"
dataset_name = "invalid"
parts = file_uuid.split("/")
if len(parts) > 1:
dataset_name = parts[0]
object_name = "/".join(parts[1:])
return dataset_name, object_name
def _locations():
# TODO: so far this is hardcoded
simcore_s3 = {
"name" : SIMCORE_S3_STR,
"id" : 0
}
datcore = {
"name" : DATCORE_STR,
"id" : 1
}
return [simcore_s3, datcore]
def _location_from_id(location_id : str) ->str:
# TODO create a map to sync _location_from_id and _location_from_str
loc_str = "undefined"
if location_id == "0":
loc_str = SIMCORE_S3_STR
elif location_id == "1":
loc_str = DATCORE_STR
return loc_str
def _location_from_str(location : str) ->str:
intstr = "undefined"
if location == SIMCORE_S3_STR:
intstr = "0"
elif location == DATCORE_STR:
intstr = "1"
return intstr
@attr.s(auto_attribs=True)
class FileMetaData:
""" This is a proposal, probably no everything is needed.
It is actually an overkill
file_name : display name for a file
location_id : storage location
location_name : storage location display name
project_id : project_id
projec_name : project display name
node_id : node id
node_name : display_name
bucket_name : name of the bucket
object_name : s3 object name = folder/folder/filename.ending
user_id : user_id
user_name : user_name
file_uuid : unique identifier for a file:
bucket_name/project_id/node_id/file_name = /bucket_name/object_name
state: on of OK, UPLOADING, DELETED
"""
file_uuid: str=""
location_id: str=""
location: str=""
bucket_name: str=""
object_name: str=""
project_id: str=""
project_name: str=""
node_id: str=""
node_name: str=""
file_name: str=""
user_id: str=""
user_name: str=""
def simcore_from_uuid(self, file_uuid: str, bucket_name: str):
parts = file_uuid.split("/")
assert len(parts) == 3
if len(parts) == 3:
self.location = SIMCORE_S3_STR
self.location_id = SIMCORE_S3_ID
self.bucket_name = bucket_name
self.object_name = "/".join(parts[:])
self.file_name = parts[2]
self.project_id = parts[0]
self.node_id = parts[1]
self.file_uuid = file_uuid
| StarcoderdataPython |
6631804 | <reponame>amitsaha/playground<gh_stars>1-10
"""
Find the common elements among two sorted sets
Desired time complexity: O(m+n)
"""
# Uses a hash table (hence uses O(min(m,n)) extra storage
# space
# This doesn't need the arrays to be sorted
def find_common(hash_t, arr):
for item in arr:
if hash_t.has_key(item):
print item
def find_sorted_hash(arr1, arr2):
if len(arr1) < len(arr2):
hash_t = {k:1 for k in arr1}
find_common(hash_t, arr2)
else:
hash_t = {k:1 for k in arr2}
find_common(hash_t, arr1)
# No extra storage space
# The array must be sorted
# O(m+n)
def find_common_traverse(arr1, arr2):
i,j = 0,0
while i < len(arr1) and j < len(arr2):
if arr1[i] == arr2[j]:
print arr1[i]
i += 1
j += 1
elif arr1[i] > arr2[j]:
j += 1
else:
i += 1
arr1 = [1,10,20,25,30]
arr2 = [1,10,30]
#find_sorted_hash(arr1, arr2)
find_common_traverse(arr1, arr2)
| StarcoderdataPython |
5149254 | """Avoid colliding predator polygons.
This task serves to showcase collisions. The predators have a variety of
polygonal shapes and bounce off each other and off the walls with Newtonian
collisions. The subject controls a green agent circle. The subject gets negative
reward if contacted by a predators and positive reward periodically.
"""
import collections
import numpy as np
from moog import action_spaces
from moog import physics as physics_lib
from moog import observers
from moog import sprite
from moog import tasks
from moog import shapes
from moog.state_initialization import distributions as distribs
from moog.state_initialization import sprite_generators
def get_config(_):
"""Get environment config."""
############################################################################
# Sprite initialization
############################################################################
# Agent
agent_factors = distribs.Product(
[distribs.Continuous('x', 0.1, 0.9),
distribs.Continuous('y', 0.1, 0.9)],
shape='circle', scale=0.1, c0=0.33, c1=1., c2=0.66,
)
# Predators
shape_0 = 1.8 * np.array(
[[-0.3, -0.3], [0.1, -0.7], [0.4, 0.6], [-0.1, 0.25]])
shape_1 = 1.5 * np.array(
[[-0.5, -0.3], [-0.1, -0.7], [0.7, 0.1], [0., -0.1], [-0.3, 0.25]])
predator_factors = distribs.Product(
[distribs.Continuous('x', 0.2, 0.8),
distribs.Continuous('y', 0.2, 0.8),
distribs.Discrete(
'shape', [shape_0, shape_1, 'star_5', 'triangle', 'spoke_5']),
distribs.Continuous('angle', 0., 2 * np.pi),
distribs.Continuous('aspect_ratio', 0.75, 1.25),
distribs.Continuous('scale', 0.1, 0.15),
distribs.Continuous('x_vel', -0.03, 0.03),
distribs.Continuous('y_vel', -0.03, 0.03),
distribs.Continuous('angle_vel', -0.05, 0.05)],
c0=0., c1=1., c2=0.8,
)
# Walls
walls = shapes.border_walls(visible_thickness=0.05, c0=0., c1=0., c2=0.5)
# Create callable initializer returning entire state
agent_generator = sprite_generators.generate_sprites(
agent_factors, num_sprites=1)
predator_generator = sprite_generators.generate_sprites(
predator_factors, num_sprites=5)
def state_initializer():
predators = predator_generator(
disjoint=True, without_overlapping=walls)
agent = agent_generator(without_overlapping=walls + predators)
state = collections.OrderedDict([
('walls', walls),
('predators', predators),
('agent', agent),
])
return state
############################################################################
# Physics
############################################################################
agent_friction_force = physics_lib.Drag(coeff_friction=0.25)
asymmetric_collision = physics_lib.Collision(
elasticity=1., symmetric=False, update_angle_vel=True)
symmetric_collision = physics_lib.Collision(
elasticity=1., symmetric=True, update_angle_vel=True)
agent_wall_collision = physics_lib.Collision(
elasticity=0., symmetric=False, update_angle_vel=False)
forces = (
(agent_friction_force, 'agent'),
(symmetric_collision, 'predators', 'predators'),
(asymmetric_collision, 'predators', 'walls'),
(agent_wall_collision, 'agent', 'walls'),
)
physics = physics_lib.Physics(*forces, updates_per_env_step=10)
############################################################################
# Task
############################################################################
predator_task = tasks.ContactReward(
-5, layers_0='agent', layers_1='predators')
stay_alive_task = tasks.StayAlive(
reward_period=20,
reward_value=0.2,
)
task = tasks.CompositeTask(
predator_task, stay_alive_task, timeout_steps=200)
############################################################################
# Action space
############################################################################
action_space = action_spaces.Joystick(
scaling_factor=0.01, action_layers='agent')
############################################################################
# Observer
############################################################################
observer = observers.PILRenderer(
image_size=(64, 64), anti_aliasing=1, color_to_rgb='hsv_to_rgb')
############################################################################
# Final config
############################################################################
config = {
'state_initializer': state_initializer,
'physics': physics,
'task': task,
'action_space': action_space,
'observers': {'image': observer},
}
return config
| StarcoderdataPython |
12839383 | <reponame>debian-janitor/ufo2otf-debian<filename>ufo2otf/compilers.py<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from os import mkdir
from os.path import splitext, dirname, sep, join, exists, basename
from subprocess import Popen
from diagnostics import diagnostics, known_compilers, FontError
import codecs
import re
diagnostics = diagnostics()
class Compiler:
def __init__(self,infiles,webfonts=False,afdko=False):
# we strip trailing slashes from ufo names,
# otherwise we get confused later on when
# generating filenames:
self.infiles = [i.strip(sep) for i in infiles]
self.webfonts = webfonts
self.css = ''
if afdko:
if diagnostics['afdko']:
self.compile = self.afdko
else:
raise FontError("afdko", diagnostics)
else:
if diagnostics['fontforge']:
self.compile = self.fontforge
else:
raise FontError("fontforge", diagnostics)
def fontforge(self):
import fontforge
eot = False
if diagnostics['mkeot']:
eot = True
for infile in self.infiles:
outdir = dirname(infile)
name = splitext(infile)[0]
font = fontforge.open(infile)
otf_file_name = join(outdir, basename(name) + '.otf')
if splitext(infile)[1].lower != 'otf':
"""
Even if the tool is called Ufo2Otf, it can be used on otf’s too:
In that case it’s just to generate webfonts. If an otf file is the
infile, we skip otf generation.
"""
font.generate(otf_file_name, flags=("round"))
if self.webfonts:
# Optimise for Web
font.autoHint()
# Generate Webfonts
webfonts_path = join(outdir, 'webfonts')
if not exists(webfonts_path):
mkdir(webfonts_path)
woff_file_name = join(outdir, 'webfonts', basename(name) + '.woff')
ttf_file_name = join(outdir, 'webfonts', basename(name) + '.ttf')
eot_file_name = join(outdir, 'webfonts', basename(name) + '.eot')
font.generate(woff_file_name, flags=("round"))
font.generate(ttf_file_name, flags=("round"))
if eot:
eot_file = open(eot_file_name, 'wb')
pipe = Popen(['mkeot', ttf_file_name], stdout=eot_file)
pipe.wait()
# Generating CSS
#
# CSS can only cover a limited set of styles:
# it knows about font weight, and about the difference between
# regular and italic.
# It also knows font-style: oblique, but most browser will take
# the regular variant and slant it.
font_style = "normal"
# This tends to work quite well, as long as you have one kind of
# italic in your font family:
if font.italicangle != 0:
font_style = "italic"
# CSS weights map quite well to Opentype, so including families
# with lots of different weights is no problem.
#
# http://www.microsoft.com/typography/otspec/os2ver0.htm#wtc
# ->
# http://www.w3.org/TR/CSS21/fonts.html#font-boldness
font_weight = font.os2_weight
#
# Anything else, like condensed, for example, will need to be
# be put into a different font family, because there is no way
# to encode it into CSS.
#
# What we do here, is try to determine whether this is the case.
# ie:
# >>> font.fullname
# 'Nimbus Sans L Bold Condensed Italic'
# >>> font.familyname
# 'Nimbus Sans L'
# >>> font.weight
# 'Bold'
# >>> re.findall("italic|oblique", f.fullname, re.I)
# ['Italic']
#
# By then removing all these components from the full name,
# we find out there is a specific style such as, in this case,
# 'Condensed'
font_family = font.familyname
specifics = re.sub("italic|oblique", '',
font.fullname.
replace(font.familyname, '').
replace(font.weight, ''),
flags=re.I).strip()
if specifics:
font_family = "%s %s" % (font.familyname, specifics)
if eot:
self.css += """@font-face {
font-family: '%s';
font-style: '%s';
font-weight: '%s';
src: url('%s'); /* IE9 Compat Modes */
src: url('%s?#iefix') format('embedded-opentype'),
url('%s') format('woff'),
url('%s') format('truetype');
}
""" % (font_family,
font_style,
font_weight,
basename(eot_file_name),
basename(eot_file_name),
basename(woff_file_name),
basename(ttf_file_name) )
else:
self.css += """@font-face {
font-family: '%s';
font-style: '%s';
font-weight: '%s';
src: url('%s') format('woff'),
url('%s') format('truetype');
}
""" % (font_family,
font_style,
font_weight,
basename(woff_file_name),
basename(ttf_file_name) )
if self.css:
c = codecs.open(join(dirname(self.infiles[0]), 'webfonts', 'style.css'),'w','UTF-8')
c.write(self.css)
c.close()
def afdko(self):
import ufo2fdk
from robofab.objects.objectsRF import RFont
compiler = ufo2fdk.OTFCompiler()
for infile in self.infiles:
outfile = splitext(infile)[0] + '.otf'
font = RFont(infile)
compiler.compile(font, outfile, releaseMode=True)
| StarcoderdataPython |
3504880 | <filename>pecos/decoders/dummy_decoder/dummy_decoder.py
# -*- coding: utf-8 -*-
# ========================================================================= #
# Copyright 2018 National Technology & Engineering Solutions of Sandia,
# LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS,
# the U.S. Government retains certain rights in this software.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================= #
"""
A dummy decoder that gives no recovery (outputs do nothing) given any input.
"""
class DummyDecoder(object):
"""
This decoder is just a simple look up decoder.
"""
def __init__(self):
pass
@staticmethod
def decode(*args, **kwargs):
"""
Args:
*args:
**kwargs:
Returns:
"""
recovery = []
return recovery
| StarcoderdataPython |
4857116 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
The MIT License:
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Copyright 2019,2020 <NAME> <<EMAIL>>
'''
from tornado import httpclient, ioloop, gen
import sys
from .conf import KBConfig
from .log import KBLogger
from .timer import Timer
from .conn import Connection
from .msg import Stack, CommandData
conf = None
log = None
def ctRun(handler):
if handler.__module__ == "__main__":
#Init configuration and Log system
try:
print("Client initializing...")
conf = KBConfig("config","./")
log = KBLogger(conf.CLIENT.log_file, "rpct")
log.level = conf.CLIENT.log_level
stage1 = log.job("Stage1")
stage1.i("Configuration loaded",
"log_level:"+conf.CLIENT.log_level)
except Exception as inst:
print("Initializing failed")
print(type(inst))
print(inst.args)
print(inst)
sys.exit(-1)
mainloop = ioloop.IOLoop.instance()
mainloop.conf = conf
mainloop.log = log
mainloop.confReload = ioloop.PeriodicCallback(confReloader, 60000)
mainloop.add_callback(confReloader)
application = handler(mainloop)
stage1.i("Starting...")
mainloop.start()
if handler.isRestart:
sys.exit(1)
async def confReloader():
loop = ioloop.IOLoop.current()
if not loop.confReload.is_running():
loop.confReload.start()
else:
try:
loop.conf.reload()
loop.log.level = loop.conf.CLIENT.log_level
except Exception as inst:
log = loop.log.job("ConfReloader")
log.e_tb("Conf updating failed", inst)
class RPCTMain(object):
def __init__(self, mainloop):
self.__mainloop = mainloop
self.__log = mainloop.log.job("Main")
self.__clog = mainloop.log.job("Client")
self.__timer = Timer()
self.__tasklist = []
self.__followup = []
self.__usertask = {}
self.__taskData = Stack()
self.__cmds = CommandData()
self.__conn = Connection(
server = self.conf.SERVER.url,
atype = self.conf.USER.auth_type,
acode = self.conf.USER.auth_code,
uname = self.conf.USER.name,
nname = self.conf.NODE.name,
log = self.logger,
key = self.conf.CLIENT.key,
cert = self.conf.CLIENT.cert)
self.__amap = []
self.__restart = False
self.prepare()
self.__timer.start(self.__auth)
@property
def amap(self):
return(self.__amap)
@amap.setter
def amap(self, amap):
self.__amap = amap
@property
def conf(self):
return(self.__mainloop.conf)
@property
def logger(self):
return(self.__mainloop.log)
@property
def log(self):
return(self.__clog)
@property
def tasklist(self):
return(self.__tasklist)
@property
def followup(self):
return(self.__followup)
@property
def usertask(self):
return(self.__usertask)
def setTask(self, tname, data):
if(tname in self.__tasklist):
self.__taskData.append({
"uname": self.conf.USER.name,
"nname" : self.conf.NODE.name,
"name": tname,
"id": self.conf.USER.name+"/"+self.conf.NODE.name+"/"+tname},
data)
def getTask(self, tname):
return(self.__taskData.data(self.conf.USER.name+"/"+self.conf.NODE.name+"/"+tname))
def updateTask(self, tname, data):
self.__taskData.update(self.conf.USER.name+"/"+self.conf.NODE.name+"/"+tname, data)
if(tname not in self.__tasklist):
self.__log.e("Task name not found", tname)
async def amap_update(self):
for m in self.__amap:
try:
getattr(m[1], m[2])
self.updateTask(m[0], getattr(m[1], m[2]))
except:
self.__log.w("AutoMap failed", str(m))
@property
def isRestart(self):
return(self.__restart)
#
#
#Initializing
def prepare(self):
pass
#Client awakening
async def awake(self):
pass
#Client sleeping
async def sleep(self):
pass
#Runs after login as a thread
async def wheel(self):
pass
#Updates data before every up
async def pre_update(self):
pass
#Runs after every up, so messages contains commands and followed tasks data
async def post_update(self):
pass
async def __dowheel(self):
while True:
await self.wheel()
await gen.sleep(0.01)
def taskAlias(self, tname):
return(TaskAlias(self, tname))
async def __auth(self):
self.__timer.pause()
await self.__conn.auth(self.__authResult)
async def __authResult(self, status, resp={}):
stack = Stack()
if status:
self.__timer.endtiming()
stack.load(resp["stack"])
if(stack.data("root/server/xhrclientauth")["result"] == True):
self.__log.i("Authentication successful.")
await self.__parseCommands(stack)
self.__mainloop.current().spawn_callback(self.__dowheel)
self.__timer.start(self.__ping)
return()
else:
self.__log.w("Authentication error...")
self.__timer.endtiming(30)
self.__timer.play()
async def __parseCommands(self, stack):
try:
self.__tasklist = stack.data("root/server/command")["tasklist"]
self.__log.d("Tasklist update", self.tasklist)
for tname in self.__tasklist:
if(not self.getTask(tname)):
self.setTask(tname, {})
except: pass
try:
self.__followup = stack.data("root/server/command")["followup"]
#self.__log.d("Followings update", self.followup)
self.__cmds.cmd("followup", list(self.__followup.keys()))
except: pass
try:
self.__usertask = stack.data("root/server/command")["task"]
except: pass
async def __ping(self):
self.__timer.pause()
#Send followup uri list
await self.__conn.ping(self.__pingResult, self.__cmds)
async def __pingResult(self, status, resp={}):
stack = Stack()
if status:
self.__timer.endtiming()
stack.load(resp["stack"])
if(stack.data("root/server/xhrclientping")["result"] == True):
if(stack.data("root/server/xhrclientping")["awake"] == True):
self.__log.i("Awakening...")
await self.__parseCommands(stack)
await self.awake()
self.__timer.start(self.__update)
return()
else:
self.__timer.play()
else:
self.__timer.endtiming(10)
self.__timer.start(self.__auth)
async def __update(self):
self.__timer.pause()
sendData = {}
sendStack = Stack()
await self.pre_update()
await self.amap_update()
# Convert data as {"taskname" : {task data...} ,... }
for tdata in self.__taskData.stack:
sendData[tdata["name"]] = tdata["data"]
sendStack.append({
"uname": self.conf.USER.name,
"nname" : self.conf.NODE.name,
"name": "",
"id": self.conf.USER.name+"/"+self.conf.NODE.name},
sendData)
await self.__conn.update(self.__updateResult, sendStack)
async def __updateResult(self, status, resp={}):
self.__timer.endtiming()
stack = Stack()
if status:
stack.load(resp["stack"])
if(stack.data("root/server/xhrclientupdate")["result"] == True):
if(stack.data("root/server/xhrclientupdate")["awake"] == True):
await self.__parseCommands(stack)
await self.post_update()
self.__usertask = {}
self.__timer.play()
return()
else:
self.__log.i("Sleeping...")
await self.sleep()
self.__timer.start(self.__ping)
def restart(self):
self.__log.i("Restarting...")
self.__restart = True
self.__mainloop.stop()
def exit(self):
self.__log.i("Exiting...")
self.__mainloop.stop()
class TaskAlias(object):
def __init__(self, manager, tname):
super(TaskAlias, self).__init__()
self.__manager = manager
self.__name = tname
@property
def name(self):
return(self.__name)
@property
def data(self):
return(self.__manager.getTask(self.__name))
@data.setter
def data(self, data):
self.__manager.updateTask(self.__name, data)
| StarcoderdataPython |
11235406 | #!/usr/bin/env python
#------------------------------------------------------------------------------
# Copyright 2015 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==============================================================================
#Name: RepairMosaicDatasets.py
#
#Purpose: Updates paths referenced in non-reference mosaic datasets stored
# in file geodatabases to point to the Ops Server being setup.
#
#==============================================================================
import os
import sys
import traceback
import arcpy
# Add "Root folder"\SupportFiles to sys path inorder to import
# modules in subfolder
sys.path.append(os.path.join(os.path.dirname(
os.path.dirname(os.path.dirname(sys.argv[0]))), "SupportFiles"))
from Utilities import findFolderPath
print_msg = True
total_success = True
script_name = os.path.basename(sys.argv[0])
def getFileGeodatabases(root_path):
# Get list of all folders ending in ".gdb"
gdbs = findFolderPath(root_path, "*.gdb", False)
# Ensure that list only contains entries that are local geodatabase
# (file geodatabase); just in case there happens to be a folder with ".gdb"
# that is not a geodatabase
gdbs[:] = [gdb for gdb in gdbs if
arcpy.Describe(gdb).workspaceType.upper() == "LOCALDATABASE"]
return gdbs
def check_args():
# ---------------------------------------------------------------------
# Check arguments
# ---------------------------------------------------------------------
if len(sys.argv) <> 3:
print '\n{} <RootFolderToSearch> <RemapPathsList>'.format(script_name)
print '\nWhere:'
print '\n\t<RootFolderToSearch> (required): the root folder path to search for mosaic datasets.'
print '\n\t<RemapPathsList> (required): a list of the paths to remap. Include the current path stored '
print '\t\tin the mosaic dataset and the path to which it will be changed. You can enter an'
print '\t\tasterisk (*) as the original path if you wish to change all your paths.'
print '\n\t\tPattern (surround in double-qoutes): [[original_path new_path];...]'
print '\n\t\tExamples:'
print '\t\t\t"C:\OriginalSource\Data D:\NewSource\Data"'
print '\t\t\t"C:\OriginalSource1\Data D:\NewSource1\Data; C:\OriginalSource2\Data D:\NewSource2\Data"'
print '\t\t\t"\\\\FileServer\OriginalSource\Data \\\\FileServer\NewSource\Data"'
print '\n\tNOTE: script only repairs paths in file geodatabase non-reference mosaic datasets.'
return None
else:
# Set variables from parameter values
root_path = sys.argv[1]
remap_paths = sys.argv[2]
return root_path, remap_paths
def main():
total_success = True
# Check arguments
results = check_args()
if not results:
sys.exit(0)
root_path, remap_paths = results
try:
print '\n{}'.format('=' * 80)
print 'Repair Mosaic Datasets'
print '{}\n'.format('=' * 80)
print '{:<15}{}'.format('Root folder:', root_path)
print '{:<15}{}\n'.format('Remap paths:', remap_paths)
print 'Searching {} looking for file geodatabases...'.format(root_path)
gdbs = getFileGeodatabases(root_path)
for gdb in gdbs:
print '\n\n{}'.format('=' * 80)
print 'Found file geodatabase: {}'.format(gdb)
print '\tChecking for existence of non-referenced mosaic datasets...'
# Get any mosaic datasets in geodatabase
arcpy.env.workspace = gdb
mosaic_datasets = arcpy.ListDatasets('*', 'Mosaic')
# Modify list to contain only non-reference mosaic datasets
mosaic_datasets[:] = [mosaic_dataset for mosaic_dataset in mosaic_datasets if not arcpy.Describe(mosaic_dataset).referenced]
if len(mosaic_datasets) == 0:
print '\tNone found.'
else:
print '\tFound {} non-referenced mosaic dataset(s)...'.format(len(mosaic_datasets))
for mosaic_dataset in mosaic_datasets:
print '\n\t{}'.format('-' * 70)
print '\tRepairing paths in mosaic dataset {}...'.format(mosaic_dataset)
results = arcpy.RepairMosaicDatasetPaths_management(mosaic_dataset, remap_paths)
if results.maxSeverity == 2:
total_success = False
print '\n{}'.format(results.getMessages())
except:
total_success = False
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
# Print Python error messages for use in Python / Python Window
print
print "***** ERROR ENCOUNTERED *****"
print pymsg + "\n"
finally:
if total_success:
print "\n\nDone. Review output for errors.\n"
sys.exit(0)
else:
print "\n\nDone. ERROR(s) occurred during mosaic dataset repair.\n"
sys.exit(1)
if __name__ == "__main__":
main() | StarcoderdataPython |
3557756 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Train a SciKitLearn K Nearest Neighbors classifier using sample data. """
import json
import sys
from math import pow, floor
from os import path, getcwd
import numpy as np
import pwm_wave_lib as pwlib
from sklearn.neighbors import KNeighborsClassifier
from sklearn.externals import joblib
from sklearn.datasets.base import Bunch
from sklearn.model_selection import cross_val_score
import re
from glob import glob
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--directory', default=getcwd(),
help='Path to model training samples. Defaults to current directory.')
args = parser.parse_args()
data = []
target = []
samples = glob(''.join([args.directory, '/??_duty/serial??.json']))
no_samples = 1
end = len(samples)
def status_update(filename, no_samples, end):
""" Return a status update message. """
head, tail = path.split(filename)
directory = path.basename(head)
percent_complete = repr(floor(round(no_samples / end, 2) * 100))
return ''.join([percent_complete, '% complete. ', directory, '/', tail])
print()
print('┌──────────────────────┐')
print('│ Begin Model Training │')
print('└──────────────────────┘\n', flush=True)
for filename in samples:
if path.isfile(filename):
json_data = json.loads(open(filename).read())
if hasattr(json_data, 'values'):
p = re.search('([\d]{2})_duty', path.dirname(filename))
target.append(int(p.group(1)))
limits = pwlib.get_minima(json_data)
volts = np.asarray([round(v, 4) for v in json_data['values'][limits[0]:limits[1]]])
histogram = np.histogram(volts, bins=3, range=(volts.min(), volts.min() + 0.05))
data.append((histogram[0][0], histogram[0][1]))
else:
print('Extracted JSON data has no attribute "values".', file=sys.stderr, flush=True)
else:
print(filename, 'is not a regular file.', file=sys.stderr, flush=True)
print(status_update(filename, no_samples, end), flush=True)
no_samples += 1
if len(data) is 0 or len(target) is 0:
print('Data array collection error: no data found.', file=sys.stderr, flush=True)
exit(1)
X = np.asarray(data)
y = np.asarray(target)
samples = Bunch()
samples.data = data
samples.target = target
samples_file = path.join(args.directory, 'poly2d.pkl.xz')
joblib.dump(samples, samples_file)
cv_neighbors = 5
knn = KNeighborsClassifier(n_neighbors=cv_neighbors, n_jobs=-1)
knn.fit(X, y)
model_file = path.join(args.directory, 'knn_model.pkl.xz')
joblib.dump(knn, model_file)
cv_folds = 5
try:
scores = cross_val_score(knn, X, y, cv=cv_folds)
except ValueError as e:
message = 'Error computing cross_val_score.'
print(message, e, file=sys.stderr, flush=True)
exit(1)
sum_sq = 0
p = knn.predict(X)
for guess, target in zip(p, y):
sum_sq += pow(guess - target, 2)
standard_error_estimate = sum_sq / len(X)
output = json.JSONEncoder().encode({
'cross-validation-accuracy': scores.mean(),
'cross-validation-error': scores.std(),
'cross-validation-neighbors': cv_neighbors,
'cross-validation-folds': cv_folds,
'standard-error-estimate': standard_error_estimate,
'samples': samples_file,
'model': model_file})
print()
print(output, flush=True)
| StarcoderdataPython |
1628433 | <filename>enterprise-repo/enterprepo/pluginrepo/migrations/0007_auto_20170923_1456.py<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-23 14:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pluginrepo', '0006_auto_20170915_0657'),
]
operations = [
migrations.AddField(
model_name='plugin',
name='downloads',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='plugin',
name='iconUrl',
field=models.CharField(max_length=300, null=True),
),
]
| StarcoderdataPython |
3462781 | """
Used to generate the number of realisations for a given list of sources.
Realisation counts are based on a model as provided by model_Mw, model_NumSim
Inputs: A whitespace delimited file with source names and corresponding magnitudes, and a source list file with one source per line.
Outputs: A whitespace delimited file with source names and corresponding number of simulation counts and an image displaying the number of simulations for a given magnitude.
This was created based on an example matlab file found at https://wiki.canterbury.ac.nz/display/QuakeCore/Step+2%3A+Determine+the+number+of+simulation+jobs+for+a+given+fault
This code was tested using python2 and python3
"""
import math
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
model_Mw = [5.5, 6.0, 8.0, 8.5]
model_NumSim = [10, 10, 50, 50]
input_file_name = "Fault_LeonardMag_Length_Width_Rake.txt"
cs_list_file_name = "CS_list_v18p6.txt"
output_file_name = "fault_selection_c18p6.txt"
output_image_file_name = "fig_NumSim_vs_Mw.png"
assert len(model_Mw) == len(
model_NumSim
), "model_MW and model_NumSim must be the same length"
assert len(model_Mw) > 1, "model_MW and model_NumSim must have length greater than 1"
# Name, magnitude
fault = []
for line in open(input_file_name):
parts = line.split()
fault.append((parts[0], float(parts[1])))
# Name
srf = []
for line in open(cs_list_file_name):
srf.append(line.split()[0])
# Name, Magnitude, Num_sims
data = []
for event in fault:
if event[0] in srf:
data.append(list(event[0:2]))
for event in data:
fault_Mw = event[1]
num_sim = 0
for i in range(0, len(model_Mw) - 1):
if (fault_Mw >= model_Mw[i]) and (fault_Mw < model_Mw[i + 1]):
num_sim = (
(model_NumSim[i + 1] - model_NumSim[i])
/ (model_Mw[i + 1] - model_Mw[i])
) * (fault_Mw - model_Mw[i]) + model_NumSim[i]
event.append(math.ceil(num_sim))
print("Total number of realisations =" + str(sum([event[2] for event in data])))
with open(output_file_name, "w") as output_file:
for event in data:
output_file.write("{0} {1}r \n".format(event[0], event[2]))
data.sort(key=lambda x: (x[2], x[1]))
plt.figure()
plt.plot([x[1] for x in data], [y[2] for y in data], linewidth=1.5, markersize=8)
plt.xlabel("Rupture magnitude, Mw")
plt.ylabel("Number of realisations")
plt.axis([model_Mw[0], model_Mw[-2], 0, 1.2 * max([event[2] for event in data])])
plt.grid(True, "both")
# plt.axes().yaxis.set_major_locator(MultipleLocator(5))
plt.savefig(output_image_file_name, format="png")
| StarcoderdataPython |
302644 | from unityagents import UnityEnvironment
from src.config import UNITY_ENV_PATH
class UnityRun:
def __init__(self):
self.env = UnityEnvironment(UNITY_ENV_PATH)
def __enter__(self):
return self.env
def __exit__(self):
self.env.close()
| StarcoderdataPython |
1900952 | import sqlite3
from fpb.base import common
CREATE_TABLE = """
CREATE TABLE IF NOT EXISTS fpb (
id INTEGER PRIMARY KEY,
x REAL
);
"""
INSERT_1D = "INSERT INTO fpb(x) VALUES(?);"
CREATE_TABLE_2D = """
CREATE TABLE IF NOT EXISTS fpb (
id INTEGER PRIMARY KEY,
x REAL,
y REAL
);
"""
INSERT_2D = "INSERT INTO fpb(x, y) VALUES(?, ?);"
SUM = "SELECT SUM(x) FROM fpb;"
SUM_2D = "SELECT SUM(x), SUM(y) FROM fpb;"
DROP_TABLE = "DROP TABLE fpb;"
MAX = "SELECT MAX(x) FROM fpb;"
AVG = "SELECT AVG(x) FROM fpb;"
class BaseSqliteRunner(common.Runner):
extra_data = {
'sqlite_version': sqlite3.version,
}
def get_dtype(self):
"""Used by some framework"""
return self.dtype
def check_output(self, output):
pass
@property
def db(self):
if not hasattr(self, '_db'):
self._db = sqlite3.connect(':memory:')
return self._db
@property
def cursor(self):
if not hasattr(self, '_cursor'):
self._cursor = self.db.cursor()
return self._cursor
def tear_down(self):
self.cursor.execute(DROP_TABLE)
def _set_pragma(self):
self.cursor.execute("PRAGMA shrink_memory")
self.cursor.execute("PRAGMA journal_mode = OFF")
# self.cursor.execute("PRAGMA synchronous = 0")
class BaseSqlite1dRunner(common.Runner1dMixin, BaseSqliteRunner):
"""Helpers for SQLite3 Runners in 1 dimension array"""
def prepare(self, size, **kwargs):
self._set_pragma()
data = ((self.random.random(), ) for i in range(size))
self.cursor.execute(CREATE_TABLE)
self.cursor.executemany(INSERT_1D, data)
return data
class BaseSqlite2dRunner(common.Runner2dMixin, BaseSqliteRunner):
"""Helpers for SQLite3 Runners in 2 dimension array"""
def prepare(self, size, size_y, **kwargs):
self._set_pragma()
data = (
(self.random.random(), self.random.random())
for i in range(size)
)
self.cursor.execute(CREATE_TABLE_2D)
self.cursor.executemany(INSERT_2D, data)
return data
| StarcoderdataPython |
3412944 | <filename>vit_keras/layers.py
# pylint: disable=arguments-differ,missing-function-docstring,missing-class-docstring,unexpected-keyword-arg,no-value-for-parameter
import tensorflow as tf
import tensorflow_addons as tfa
class ClassToken(tf.keras.layers.Layer):
"""Append a class token to an input layer."""
def build(self, input_shape):
cls_init = tf.zeros_initializer()
self.hidden_size = input_shape[-1]
self.cls = tf.Variable(
name="cls",
initial_value=cls_init(shape=(1, 1, self.hidden_size), dtype="float32"),
trainable=True,
)
def call(self, inputs):
batch_size = tf.shape(inputs)[0]
cls_broadcasted = tf.cast(
tf.broadcast_to(self.cls, [batch_size, 1, self.hidden_size]),
dtype=inputs.dtype,
)
return tf.concat([cls_broadcasted, inputs], 1)
class AddPositionEmbs(tf.keras.layers.Layer):
"""Adds (optionally learned) positional embeddings to the inputs."""
def build(self, input_shape):
assert (
len(input_shape) == 3
), f"Number of dimensions should be 3, got {len(input_shape)}"
self.pe = tf.Variable(
name="pos_embedding",
initial_value=tf.random_normal_initializer(stddev=0.06)(
shape=(1, input_shape[1], input_shape[2])
),
dtype="float32",
trainable=True,
)
def call(self, inputs):
return inputs + tf.cast(self.pe, dtype=inputs.dtype)
class MultiHeadSelfAttention(tf.keras.layers.Layer):
def __init__(self, *args, num_heads, **kwargs):
super().__init__(*args, **kwargs)
self.num_heads = num_heads
def build(self, input_shape):
hidden_size = input_shape[-1]
num_heads = self.num_heads
if hidden_size % num_heads != 0:
raise ValueError(
f"embedding dimension = {hidden_size} should be divisible by number of heads = {num_heads}"
)
self.hidden_size = hidden_size
self.projection_dim = hidden_size // num_heads
self.query_dense = tf.keras.layers.Dense(hidden_size, name="query")
self.key_dense = tf.keras.layers.Dense(hidden_size, name="key")
self.value_dense = tf.keras.layers.Dense(hidden_size, name="value")
self.combine_heads = tf.keras.layers.Dense(hidden_size, name="out")
# pylint: disable=no-self-use
def attention(self, query, key, value):
score = tf.matmul(query, key, transpose_b=True)
dim_key = tf.cast(tf.shape(key)[-1], score.dtype)
scaled_score = score / tf.math.sqrt(dim_key)
weights = tf.nn.softmax(scaled_score, axis=-1)
output = tf.matmul(weights, value)
return output, weights
def separate_heads(self, x, batch_size):
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.projection_dim))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, inputs):
batch_size = tf.shape(inputs)[0]
query = self.query_dense(inputs)
key = self.key_dense(inputs)
value = self.value_dense(inputs)
query = self.separate_heads(query, batch_size)
key = self.separate_heads(key, batch_size)
value = self.separate_heads(value, batch_size)
attention, weights = self.attention(query, key, value)
attention = tf.transpose(attention, perm=[0, 2, 1, 3])
concat_attention = tf.reshape(attention, (batch_size, -1, self.hidden_size))
output = self.combine_heads(concat_attention)
return output, weights
class TransformerBlock(tf.keras.layers.Layer):
"""Implements a Transformer block."""
def __init__(self, *args, num_heads, mlp_dim, dropout, **kwargs):
super().__init__(*args, **kwargs)
self.num_heads = num_heads
self.mlp_dim = mlp_dim
self.dropout = dropout
def build(self, input_shape):
self.att = MultiHeadSelfAttention(
num_heads=self.num_heads,
name="MultiHeadDotProductAttention_1",
)
self.mlpblock = tf.keras.Sequential(
[
tf.keras.layers.Dense(
self.mlp_dim,
activation="linear",
name=f"{self.name}/Dense_0",
),
tf.keras.layers.Lambda(
lambda x: tf.keras.activations.gelu(x, approximate=False)
)
if hasattr(tf.keras.activations, "gelu")
else tf.keras.layers.Lambda(
lambda x: tfa.activations.gelu(x, approximate=False)
),
tf.keras.layers.Dropout(self.dropout),
tf.keras.layers.Dense(input_shape[-1], name=f"{self.name}/Dense_1"),
tf.keras.layers.Dropout(self.dropout),
],
name="MlpBlock_3",
)
self.layernorm1 = tf.keras.layers.LayerNormalization(
epsilon=1e-6, name="LayerNorm_0"
)
self.layernorm2 = tf.keras.layers.LayerNormalization(
epsilon=1e-6, name="LayerNorm_2"
)
self.dropout_layer = tf.keras.layers.Dropout(self.dropout)
def call(self, inputs, training):
x = self.layernorm1(inputs)
x, weights = self.att(x)
x = self.dropout_layer(x, training=training)
x = x + inputs
y = self.layernorm2(x)
y = self.mlpblock(y)
return x + y, weights
def get_config(self):
return {"num_heads": self.num_heads,
"mlp_dim": self.mlp_dim,
"dropout": self.dropout}
| StarcoderdataPython |
11216513 | <gh_stars>1-10
import json
"""
Contains the load functions that we use as the public interface of this whole library.
"""
from .parser import JSONParserParams, JSONParser
from .parser_listener import ObjectBuilderParserListener
from .tree_python import PythonObjectBuilderParams, DefaultStringToScalarConverter
from .tree_config import ConfigObjectBuilderParams
from .text_encoding import load_utf_text_file
def loads(s,
parser_params=JSONParserParams(),
object_builder_params=PythonObjectBuilderParams()):
"""
Loads a json string as a python object hierarchy just like the standard json.loads(). Unlike
the standard json.loads() this function uses OrderedDict instances to represent json objects
but the class of the dictionary to be used is configurable.
:param s: The json string to load.
:params parser_params: Parser parameters.
:type parser_params: JSONParserParams
:param object_builder_params: Parameters to the ObjectBuilderParserListener, these parameters
are mostly factories to create the python object hierarchy while parsing.
"""
parser = JSONParser(parser_params)
listener = ObjectBuilderParserListener(object_builder_params)
parser.parse(s, listener)
return listener.result
def loads_config(s,
parser_params=JSONParserParams(),
string_to_scalar_converter=DefaultStringToScalarConverter()):
"""
Works similar to the loads() function but this one returns a json object hierarchy
that wraps all json objects, arrays and scalars to provide a nice config query syntax.
For example:
my_config = loads_config(json_string)
ip_address = my_config.servers.reverse_proxy.ip_address()
port = my_config.servers.reverse_proxy.port(80)
Note that the raw unwrapped values can be fetched with the __call__ operator.
This operator has the following signature: __call__(default=None, mapper=None).
Fetching a value without specifying a default value means that the value is required
and it has to be in the config. If it isn't there then a JSONConfigValueNotFoundError
is raised. The optional mapper parameter can be a function that receives the unwrapped
value and it can return something that may be based on the input parameter. You can
also use this mapper parameter to pass a function that performs checking on the
value and raises an exception (eg. ValueError) on error.
If you specify a default value and the required config value is not present then
default is returned. In this case mapper isn't called with the default value.
"""
parser = JSONParser(parser_params)
object_builder_params = ConfigObjectBuilderParams(
string_to_scalar_converter=string_to_scalar_converter)
listener = ObjectBuilderParserListener(object_builder_params)
parser.parse(s, listener)
return listener.result
def load(file_, *args, **kwargs):
"""
Does exactly the same as loads() but instead of a json string this function
receives the path to a file containing the json string or a file like object with a read()
method.
:param file_: Filename or a file like object with read() method.
:param default_encoding: The encoding to be used if the file doesn't have a BOM prefix.
Defaults to UTF-8.
:param use_utf8_strings: Ignored in case of python3, in case of python2 the default
value of this is True. True means that the loaded json string should be handled as a utf-8
encoded str instead of a unicode object.
"""
json_str = load_utf_text_file(
file_,
default_encoding=kwargs.pop('default_encoding', 'UTF-8'),
use_utf8_strings=kwargs.pop('use_utf8_strings', True),
)
return loads(json_str, *args, **kwargs)
def load_config(file_, *args, **kwargs):
"""
Does exactly the same as loads_config() but instead of a json string this function
receives the path to a file containing the json string or a file like object with a read()
method.
:param file_: Filename or a file like object with read() method.
:param default_encoding: The encoding to be used if the file doesn't have a BOM prefix.
Defaults to UTF-8.
:param use_utf8_strings: Ignored in case of python3, in case of python2 the default
value of this is True. True means that the loaded json string should be handled as a utf-8
encoded str instead of a unicode object.
"""
json_str = load_utf_text_file(
file_,
default_encoding=kwargs.pop('default_encoding', 'UTF-8'),
use_utf8_strings=kwargs.pop('use_utf8_strings', True),
)
return loads_config(json_str, *args, **kwargs)
def save_config(fileName, config):
with open(fileName, 'w') as fobj:
fobj.write(config_to_json_str(config))
def config_to_json_str(config):
json_data = __config_to_json(config)
return json.dumps(json_data, sort_keys=True,
indent=4, separators=(',', ': '))
def __config_to_json(config_json_object):
jsonData = {}
for key, value in config_json_object._dict.items():
if key.startswith('_'):
continue
jsonData[key] = __convert_to_json_type(value)
return jsonData
def __convert_to_json_type(item):
from .config_classes import ConfigJSONObject, ConfigJSONArray, ConfigJSONScalar
if isinstance(item, ConfigJSONObject):
return __config_to_json(item)
if isinstance(item, ConfigJSONArray):
return [__convert_to_json_type(i) for i in item]
if isinstance(item, ConfigJSONScalar):
return item.value
return item
class ConfigWithWrapper:
def __init__(self, config_file_name):
self.__config = load_config(config_file_name)
self.__config_file_name = config_file_name
self.__check_str = None
def __getattr__(self, item):
""" For direct usage, with out the with bock """
if item.startswith('_ConfigWithWrapper__'):
return getattr(self, item)
return getattr(self.__config, item)
def __getitem__(self, item):
""" For direct usage, with out the with bock """
return self.__config[item]
def __setattr__(self, item, value):
""" For direct usage, with out the with bock """
if item.startswith('_ConfigWithWrapper__'):
self.__dict__[item] = value
return
setattr(self.__config, item, value)
def __setitem__(self, item, value):
""" For direct usage, with out the with bock """
self.__config[item] = value
def __call__(self, *args, **kwargs):
""" For direct usage, with out the with bock """
return self.__config(*args, **kwargs)
def __len__(self):
""" For direct usage, with out the with bock """
return len(self.__config)
def __contains__(self, item):
""" For direct usage, with out the with bock """
return item in self.__config
def __enter__(self):
""" Enter the with bloc
Store the current state of the config, so we can compare it to see if we need
to save it.
"""
self.__check_str = config_to_json_str(self.__config)
return self.__config
def __exit__(self, type, value, tb):
""" Exit the with block
See if anything has changed, if it has, save it.
"""
if self.__check_str != config_to_json_str(self.__config):
save_config(self.__config_file_name, self.__config)
| StarcoderdataPython |
1921407 | <reponame>tehnuty/drf-history
from settings import *
django.setup()
from django_nose import NoseTestSuiteRunner
def run_tests(*test_args):
if not test_args:
test_args = ["tests"]
test_runner = NoseTestSuiteRunner(verbosity=1)
failures = test_runner.run_tests(test_args)
if failures:
sys.exit(failures)
if __name__ == "__main__":
run_tests(*sys.argv[1:])
| StarcoderdataPython |
3497917 | inputString = input( )
print( "Hello, World" )
print( inputString ) | StarcoderdataPython |
180266 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as sts
from pprint import pprint
import os
from mnkutil import *
sns.set_style('white')
sns.set_style('white')
sns.set_context('poster')
sns.set_palette(['#E97F02', '#490A3D', '#BD1550'])
def get_computer_sequence(df):
c1 = (df.gi%2 == 0) & (df.mi==2)
c2 = (df.gi%2 == 1) & (df.mi==3)
d = df.loc[~df.status.isin(['AFC2', 'EVAL']) & (c1 | c2) ,:]
return d.computer.values.astype(int)
def append_tables(df):
for i in new_cols:
df.loc[:, i] = np.nan
d = df.loc[df.status=="AFC2", :]
for obs in d.index.values:
c = positions.loc[
(positions.bp == d.loc[obs, 'bp']) & (positions.wp == d.loc[obs, 'wp']),
:]
if len(c) > 1:
c = int(c.loc[c.index.values[0], :])
if len(c) == 1:
for h in ['a', 'b', 'aval', 'bval']:
df.loc[obs, h] = int(c.loc[:, h].values[0])
d = df.loc[df.status=='EVAL', :]
for obs in d.index.values:
o = oberr.loc[
(oberr.bp == d.loc[obs, 'bp']) & (oberr.wp == d.loc[obs, 'wp']),
:]
if len(c) > 1:
o = o.loc[o.index.values[0], :]
if len(c) == 1:
df.loc[obs, 'val'] = o.loc[:, 'gtv'].values[0]
return df
def find_errors(df):
for i in ['error', 'val']:
df.loc[:, i] = np.nan
d = df.loc[df.status=="AFC2", :]
for obs in d.index.values:
c = positions.loc[
(positions.bp == d.loc[obs, 'bp']) & (positions.wp == d.loc[obs, 'wp']),
:]
o = oberr.loc[
(oberr.bp == d.loc[obs, 'bp']) & (oberr.wp == d.loc[obs, 'wp']),
:]
if len(c) > 1:
c = c.loc[c.index.values[0], :]
if len(o) > 1:
o = o.loc[o.index.values[0], :]
if len(c) == 1:
positions.loc[c.index.values[0], 'attempts'] += 1
if d.loc[obs, 'response'] == c.loc[:, 'a'].values[0]:
if c.loc[:, 'aval'].values[0] < c.loc[:, 'bval'].values[0]:
df.loc[obs, 'error'] = 1
positions.loc[c.index.values[0], 'errors'] += 1
else:
df.loc[obs, 'error'] = 0
elif d.loc[obs, 'response'] == c.loc[:, 'b'].values[0]:
if c.loc[:, 'aval'].values[0] > c.loc[:, 'bval'].values[0]:
df.loc[obs, 'error'] = 1
positions.loc[c.index.values[0], 'errors'] += 1
else:
df.loc[obs, 'error'] = 0
if len(o) == 1:
df.loc[obs, 'val'] = o.loc[:, 'gtv'].values[0]
return df
def export_data(ds, save=False):
gam = pd.concat([d.loc[~d.status.isin(['AFC2', 'EVAL']) & (d.rt != 0), gamout]
for d in ds]).reset_index(drop=True)
afc = pd.concat([d.loc[d.status == 'AFC2', afcout]
for d in ds]).reset_index(drop=True)
afc.loc[:, new_cols[:-1]] = afc.loc[:, new_cols[:-1]].astype(int)
eva = pd.concat([d.loc[d.status == 'EVAL', evaout]
for d in ds]).reset_index(drop=True)
eva.loc[:, 'val'] = eva.loc[:, 'val'].astype(int)
if save:
gam.to_csv('../Clean/_summaries/all_games.csv', header=False, index=False)
afc.to_csv('../Clean/_summaries/all_afcs.csv', header=False, index=False)
eva.to_csv('../Clean/_summaries/all_evals.csv', header=False, index=False)
return gam, afc, eva
def ai_performance(subject_list, subject_data):
games_x_computer = pd.DataFrame(index=subject_list, columns=list(range(30)),
data=np.zeros([len(subject_list), 30]))
wins_x_computer = pd.DataFrame(index=subject_list, columns=list(range(30)),
data=np.zeros([len(subject_list), 30]))
draws_x_computer = pd.DataFrame(index=subject_list, columns=list(range(30)),
data=np.zeros([len(subject_list), 30]))
for s in subject_list:
d = subject_data[s]
w = d.loc[(d.gi%2 == d.mi %2) & (d.status == 'win'), 'computer'].values.astype(int)
dr = d.loc[(d.gi%2 == d.mi %2) & (d.status == 'draw'), 'computer'].values.astype(int)
g = get_computer_sequence(d)
for c in w:
wins_x_computer.loc[s, c] += 1
for c in g:
games_x_computer.loc[s, c] += 1
for c in dr:
draws_x_computer.loc[s, c] += 1
return games_x_computer, wins_x_computer, draws_x_computer
gamout = ['subject', 'color', 'bp', 'wp', 'response', 'rt']
afcout = ['subject', 'color', 'bp', 'wp', 'response', 'rt', 'a', 'b', 'aval', 'bval']
evaout = ['subject', 'color', 'bp', 'wp', 'response', 'rt', 'val']
new_cols = ['a', 'b', 'aval', 'bval', 'val']
oberr = pd.read_csv('../objective_errors.csv')
positions = pd.read_csv('../experiment_boards_new.txt', sep='\t',
names=['bp', 'wp', 'a', 'aval', 'b', 'bval', 'c', 'mu'])
oberr.loc[:, 'bp'] = oberr.loc[:, '0_pieces'].map(bits2boards)
oberr.loc[:, 'wp'] = oberr.loc[:, '1_pieces'].map(bits2boards)
oberr = oberr.loc[:, ['bp', 'wp', 'color', 'Game_theoretic_value',
'Confirmed', 'value_Zeyan ', 'confirmed_Zeyan']]
oberr.columns = ['bp', 'wp', 'color', 'gtv', 'gtv_c', 'zv', 'zv_c']
positions.loc[:, 'attempts'] = 0
positions.loc[:, 'errors'] = 0
def main():
files = [f for f in os.listdir('../Clean/') if ((f[0] != "_") & (f[0] != "."))]
subjects = [f[:2] if len(f) == 6 else f[:3] for f in files]
dataset = [pd.read_csv('../Clean/' + f).drop('Unnamed: 0', axis=1) for f in files]
datadict = dict(zip(subjects, dataset))
gxc, wxc, dxc = ai_performance(subjects, datadict)
for df, dfn in list(zip([gxc, wxc, dxc], ['games_x_computer', 'wins_x_computer', 'draws_x_computer'])):
df.to_csv('../Statistics/' + dfn + '.csv')
dataset = list(map(append_tables, dataset))
_, _, _ = export_data(dataset, save=True)
return None
if __name__ == '__main__':
main() | StarcoderdataPython |
5095461 | <reponame>MenglingHettinger/CarND-Advanced-Lane-Lines
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import glob
import cv2
import pickle
def camera_calibration(nx, ny, path, show=True):
images = glob.glob(path)
objpoints = [] # 3D points in real world space
imgpoints = [] # 2D points in the image plane
objp = np.zeros((ny*nx, 3), np.float32)
objp[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2) #x,y coordinates
for fname in images:
img = mpimg.imread(fname)
img_size = (img.shape[1], img.shape[0])
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
if ret == True:
imgpoints.append(corners)
objpoints.append(objp)
# draw and display the corners
img = cv2.drawChessboardCorners(img, (nx, ny), corners, ret)
if show:
plt.figure(figsize=(10,10))
fig = plt.figure()
plt.imshow(img)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)
calib_dict = {}
calib_dict['mtx'] = mtx
calib_dict['dist'] = dist
calib_dict['rvecs'] = rvecs
calib_dict['tvecs'] = tvecs
return calib_dict
def undistort(img, mtx, dist):
"""
Computes the ideal point coordinates from the observed point coordinates.
"""
undist = cv2.undistort(img, mtx, dist, None, mtx)
return undist
if __name__ == '__main__':
calib_dict = camera_calibration(nx=9, ny=6, path="camera_cal/calibration*.jpg")
with open('calibrate_camera.p', 'wb') as f:
pickle.dump(calib_dict, f)
| StarcoderdataPython |
3344416 | <gh_stars>1-10
from django.http import HttpResponse, Http404
from django.shortcuts import get_object_or_404, redirect
from django.core.urlresolvers import reverse
from django.conf import settings
from wagtail.wagtailcore import hooks
from wagtail.wagtailcore.models import Page, PageViewRestriction
from wagtail.wagtailcore.forms import PasswordPageViewRestrictionForm
def serve(request, path):
# we need a valid Site object corresponding to this request (set in wagtail.wagtailcore.middleware.SiteMiddleware)
# in order to proceed
if not request.site:
raise Http404
path_components = [component for component in path.split('/') if component]
page, args, kwargs = request.site.root_page.specific.route(request, path_components)
for fn in hooks.get_hooks('before_serve_page'):
result = fn(page, request, args, kwargs)
if isinstance(result, HttpResponse):
return result
return page.serve(request, *args, **kwargs)
def authenticate_with_password(request, page_view_restriction_id, page_id):
"""
Handle a submission of PasswordPageViewRestrictionForm to grant view access over a
subtree that is protected by a PageViewRestriction
"""
restriction = get_object_or_404(PageViewRestriction, id=page_view_restriction_id)
page = get_object_or_404(Page, id=page_id).specific
if request.POST:
form = PasswordPageViewRestrictionForm(request.POST, instance=restriction)
if form.is_valid():
has_existing_session = (settings.SESSION_COOKIE_NAME in request.COOKIES)
passed_restrictions = request.session.setdefault('passed_page_view_restrictions', [])
if restriction.id not in passed_restrictions:
passed_restrictions.append(restriction.id)
request.session['passed_page_view_restrictions'] = passed_restrictions
if not has_existing_session:
# if this is a session we've created, set it to expire at the end
# of the browser session
request.session.set_expiry(0)
return redirect(form.cleaned_data['return_url'])
else:
form = PasswordPageViewRestrictionForm(instance=restriction)
action_url = reverse('wagtailcore_authenticate_with_password', args=[restriction.id, page.id])
return page.serve_password_required_response(request, form, action_url)
| StarcoderdataPython |
1765061 | <filename>regnety/train.py
"""Script for training RegNetY. Supports TPU training."""
import tensorflow as tf
import argparse
import os
import json
import wandb
import logging
import math
import yaml
from datetime import datetime
from wandb.keras import WandbCallback
from regnety.models.model import RegNetY
from regnety.dataset.imagenet import ImageNet
from regnety.utils import train_utils as tutil
from regnety.config.config import (
get_train_config,
get_train_config_from_yaml,
get_preprocessing_config,
ALLOWED_FLOPS
)
parser = argparse.ArgumentParser(description="Train RegNetY")
parser.add_argument("-f", "--flops", type=str, help="FLOP variant of RegNetY")
parser.add_argument("-taddr", "--tpu_address", type=str,
help="Network address of TPU cluster", default=None)
parser.add_argument("-traintfrec", "--train_tfrecs_path_pattern", type=str,
help="Path for tfrecords. eg. gs://imagenet/*.tfrecord.")
parser.add_argument("-validtfrec", "--valid_tfrecs_path_pattern", type=str,
help="Path for tfrecords. eg. gs://imagenet/*.tfrecord.")
parser.add_argument("-log","--log_location", type=str,
help="Path to store logs in")
parser.add_argument("-trial", "--trial_run", action="store_true")
parser.add_argument("-yaml","train_config_yaml", type=str, default=None, help="Train config yaml file if need to override defaults.")
parser.add_argument("-wandbproject", "wandb_project_id", type=str, default="", help="Project ID for wandb logging")
parser.add_argument("-wandbuser", "wandb_user_id", type=str, default="", help="User ID for wandb logging")
args = parser.parse_args()
flops = args.flops.lower()
tpu_address = args.tpu_address
train_tfrecs_filepath = tf.io.gfile.glob(args.train_tfrecs_path_pattern)
val_tfrecs_filepath = tf.io.gfile.glob(args.valid_tfrecs_path_pattern)
log_location = args.log_location
yaml_path = args.train_config_yaml
trial = args.trial_run
wandbproject = args.wandb_project_id
wandbuser = args.wandb_user_id
logging.basicConfig(format="%(asctime)s %(levelname)s : %(message)s",
datefmt="%d-%b-%y %H:%M:%S", level=logging.INFO)
if "mf" not in flops:
flops += "mf"
if flops not in ALLOWED_FLOPS:
raise ValueError("Flops must be one of %s. Received: %s" % (ALLOWED_FLOPS,
flops.rstrip("mf")))
cluster_resolver, strategy = tutil.connect_to_tpu(tpu_address)
train_cfg = get_train_config(
optimizer="adamw",
base_lr=0.001 * strategy.num_replicas_in_sync,
warmup_epochs=5,
warmup_factor=0.1,
total_epochs=100,
weight_decay=5e-5,
momentum=0.9,
lr_schedule="half_cos",
log_dir=log_location + "/logs",
model_dir=log_location + "/models",
)
train_prep_cfg = get_preprocessing_config(
tfrecs_filepath=train_tfrecs_filepath,
augment_fn="default",
mixup=False
)
val_prep_cfg = get_preprocessing_config(
tfrecs_filepath=val_tfrecs_filepath,
augment_fn="val",
mixup=False
)
logging.info(f"Training options detected: {train_cfg}")
logging.info("Preprocessing options detected.")
logging.info(
f"Training on TFRecords: {train_prep_cfg.tfrecs_filepath[0]} to {train_prep_cfg.tfrecs_filepath[-1]}")
logging.info(
f"Validating on TFRecords: {val_prep_cfg.tfrecs_filepath[0]} to {val_prep_cfg.tfrecs_filepath[-1]}")
with strategy.scope():
model = tutil.make_model(flops, train_cfg)
# model.load_weights(log_location + "/init_weights/" + flops.upper())
logging.info("Model loaded")
train_ds = ImageNet(train_prep_cfg).make_dataset()
val_ds = ImageNet(val_prep_cfg).make_dataset()
now = datetime.now()
date_time = now.strftime("%m_%d_%Y_%Hh%Mm")
wandb.init(entity=wandbuser, project=wandbproject,
job_type="train", name="regnety_" + date_time + "_" + flops.upper())
trial_callbacks = [
tf.keras.callbacks.LearningRateScheduler(
tutil.get_train_schedule(train_cfg)),
tf.keras.callbacks.TensorBoard(
log_dir=os.path.join(train_cfg.log_dir, str(date_time)), histogram_freq=1), # profile_batch="0,1023"
WandbCallback()
]
callbacks = trial_callbacks if trial else tutil.get_callbacks(
train_cfg, date_time)
history = model.fit(
train_ds,
epochs=train_cfg.total_epochs,
validation_data=val_ds,
callbacks=callbacks,
)
with tf.io.gfile.GFile(os.path.join(train_cfg.log_dir, "history_%s.json" % date_time), "a+") as f:
json.dump(str(history.history), f)
| StarcoderdataPython |
5175990 | #!python3
from .test_clients_dummy import test_dummy
from ..lib.configs import get_all_config
from ..reports.clients_reports import BurpReports
import os
from ..lib.files import temp_file
from invpy_libs import csv_as_dict
# Read version from VERSION file
__inventory__ = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..',
'data', 'test_inventory.csv'))
__output__ = temp_file('inventory_test.csv')
class TestInventory:
def test_file(self):
assert os.path.isfile(__inventory__)
def test_inventory(self):
clients_dict = test_dummy()
config = get_all_config()
# Generate burp_reports object to use for reports.
reports = BurpReports(clients_dict,
days_outdated=int(config['common']['days_outdated']),
config=config)
reports.save_compared_inventory(__inventory__, __output__)
compared_clients = csv_as_dict(__output__, config['inventory_columns']['client_name'],
delimiter=config['common']['csv_delimiter'])
return compared_clients
def test_inventory_result(self):
report = self.test_inventory()
config = get_all_config()
status_dict = dict(config['inventory_status'])
status = config['inventory_columns']['status']
assert report['cli10'][status] == status_dict['in_inventory_not_in_burp']
assert report['cli10'][status] not in status_dict['in_many_servers']
assert report['cli20'][status] == status_dict['spare_not_in_burp']
assert report['cli30'][status] == status_dict['in_many_servers']
assert report['cli40'][status] == status_dict['inactive_not_in_burp']
assert report['client_001'][status] == status_dict['in_inventory_updated'] or 'outdated'
assert report['client_002'][status] == status_dict['spare_in_burp']
assert report['client_003'][status] == status_dict['inactive_in_burp']
| StarcoderdataPython |
9625115 | <gh_stars>1-10
#!/usr/bin/env python3
import os
import argparse
import random
import math
from collections import Counter, defaultdict
from typing import NamedTuple
from tabulate import tabulate
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import numba
import torch
torch.set_num_threads(8)
from util.io import store_json, load_json, load_text
from util.proposal import BaseProposalModel, EnsembleProposalModel
from util.video import get_metadata
from action_dataset.load import load_embs, load_actions
from action_dataset.eval import get_test_prefixes
import video_dataset_paths as dataset_paths
class DataConfig(NamedTuple):
video_name_prefix: 'Optional[str]'
classes: 'List[str]'
window_before: float = 0.
window_after: float = 0.
TENNIS_CLASSES = [
'forehand_topspin', 'forehand_slice', 'backhand_topspin', 'backhand_slice',
'forehand_volley', 'backhand_volley', 'overhead', 'serve', 'unknown_swing'
]
TENNIS_WINDOW = 0.1
TENNIS_MIN_SWINGS_FEW_SHOT = 5
DATA_CONFIGS = {
'tennis': DataConfig(
video_name_prefix=None,
classes=TENNIS_CLASSES,
window_before=TENNIS_WINDOW,
window_after=TENNIS_WINDOW
),
'tennis_front': DataConfig(
video_name_prefix='front__',
classes=TENNIS_CLASSES,
window_before=TENNIS_WINDOW,
window_after=TENNIS_WINDOW
),
'tennis_back': DataConfig(
video_name_prefix='back__',
classes=TENNIS_CLASSES,
window_before=TENNIS_WINDOW,
window_after=TENNIS_WINDOW
),
'fs_jump': DataConfig(
video_name_prefix=None,
classes=['axel', 'lutz', 'flip', 'loop', 'salchow', 'toe_loop'],
),
'fx': DataConfig(video_name_prefix=None, classes=[])
}
class Label(NamedTuple):
video: str
value: str
start_frame: int
end_frame: int
fps: float
EMB_FILE_SUFFIX = '.emb.pkl'
SEQ_MODELS = ['lstm', 'gru']
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('dataset', choices=list(DATA_CONFIGS.keys()))
parser.add_argument('-k', type=int, default=1)
parser.add_argument('-o', '--out_dir', type=str)
parser.add_argument('--emb_dir', type=str)
parser.add_argument('-nt', '--n_trials', type=int, default=1)
parser.add_argument('--algorithm', type=str, choices=SEQ_MODELS,
default='gru')
parser.add_argument('-ne', '--n_examples', type=int, default=-1)
parser.add_argument('-tw', '--tennis_window', type=float)
parser.add_argument('--_all', action='store_true')
parser.add_argument('--norm', action='store_true')
parser.add_argument('--hidden_dim', type=int, default=128)
parser.add_argument('--batch_size', type=int)
return parser.parse_args()
def get_video_intervals(examples):
result = defaultdict(list)
for l in examples:
result[l.video].append((l.start_frame, l.end_frame))
def deoverlap(intervals):
ret = []
for a, b in sorted(intervals):
if len(ret) == 0 or ret[-1][1] < a:
ret.append((a, b))
else:
ret[-1] = (ret[-1][0], b)
return tuple(ret)
return {k: deoverlap(v) for k, v in result.items()}
class ProposalModel:
MIN_TRAIN_EPOCHS = 25
NUM_TRAIN_EPOCHS = 200
def __init__(self, arch_type, emb_dict, train_labels, hidden_dim,
ensemble_size, splits=5, **kwargs):
self.embs = emb_dict
train_videos = list({l.video for l in train_labels
if l.video in emb_dict})
def get_gt(video):
vx, _ = emb_dict[video]
vy = np.zeros(vx.shape[0], dtype=np.int32)
for l in train_labels:
if l.video == video:
vy[l.start_frame:l.end_frame] = 1
return vx, vy
X, y = [], []
custom_split = None
for i, v in enumerate(train_videos):
vx, vy = get_gt(v)
if len(vx.shape) == 3:
if custom_split is None:
custom_split = []
for j in range(vx.shape[1]):
X.append(vx[:, j, :])
y.append(vy)
custom_split.append(i)
else:
X.append(vx)
y.append(vy)
if custom_split is not None:
assert len(custom_split) == len(X)
if len(X) < ensemble_size:
ensemble_size = splits = len(X)
print('Too few videos for full ensemble:', ensemble_size)
kwargs.update({
'ensemble_size': ensemble_size, 'splits': splits,
'num_epochs': ProposalModel.NUM_TRAIN_EPOCHS,
'min_epochs': ProposalModel.MIN_TRAIN_EPOCHS,
'custom_split': custom_split
})
if len(X) < ensemble_size:
raise Exception('Not enough examples for ensemble!')
else:
self.model = EnsembleProposalModel(
arch_type, X, y, hidden_dim, **kwargs)
def predict(self, video):
x = self.embs[video][0]
if len(x.shape) == 3:
return self.model.predict_n(
*[x[:, i, :] for i in range(x.shape[1])])
else:
return self.model.predict(x)
LOC_TEMPORAL_IOUS = [0.1 * i for i in range(1, 10)]
@numba.jit(nopython=True)
def calc_iou(a1, a2, b1, b2):
isect = min(a2, b2) - max(a1, b1)
return isect / (max(a2, b2) - min(a1, b1)) if isect > 0 else 0
def compute_precision_recall_curve(is_tp, num_pos):
recall = []
precision = []
tp, fp = 0, 0
for p in is_tp:
if p:
tp += 1
else:
fp += 1
recall.append(tp / num_pos)
precision.append(tp / (tp + fp))
return precision, recall
def compute_interpolated_precision(precision, recall):
interp_recall = []
interp_precision = []
max_precision = 0
min_recall = 1
for i in range(1, len(recall) + 1):
r = recall[-i]
p = precision[-i]
if r < min_recall:
if len(interp_precision) == 0 or p > interp_precision[-1]:
interp_recall.append(min_recall)
interp_precision.append(max_precision)
max_precision = max(max_precision, p)
min_recall = min(min_recall, r)
interp_recall.append(0)
interp_precision.append(1)
interp_precision.reverse()
interp_recall.reverse()
return interp_precision, interp_recall
def compute_ap(pc, rc):
ipc, irc = compute_interpolated_precision(pc, rc)
assert irc[0] == 0, irc[0]
assert irc[-1] == 1, (irc[-1], len(irc))
area = 0
for i in range(len(irc) - 1):
dr = irc[i + 1] - irc[i]
assert dr > 0
p = ipc[i + 1]
if i > 0:
assert p < ipc[i], (p, ipc[i])
area += p * dr
assert area >= 0 and area <= 1, area
return area
def plot_proposal_dist(props):
fig = plt.figure()
plt.hist(x=[b - a for a, b in props], bins=50)
plt.xlabel('num frames')
plt.ylabel('frequency')
plt.show()
plt.close(fig)
def plot_precision_recall_curve(p, r):
fig = plt.figure()
plt.plot(r, p)
ip, ir = compute_interpolated_precision(p, r)
plt.step(ir, ip)
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.xlabel('recall')
plt.ylabel('precision')
plt.show()
plt.close(fig)
def make_split(train_examples, is_tennis):
print('Making a new split!')
train_videos = list({l.video for l in train_examples})
if is_tennis:
# Split off the player
train_videos = list({
v.split('__', 1)[1] for v in train_videos})
print('Videos:', len(train_videos))
train_videos.sort()
random.Random(42).shuffle(train_videos)
if is_tennis:
train_intervals = get_video_intervals(train_examples)
def tennis_filter(t):
front_video = 'front__' + t
back_video = 'back__' + t
# Dont sample videos with too few swings
return (
len(train_intervals.get(front_video, []))
>= TENNIS_MIN_SWINGS_FEW_SHOT
and len(train_intervals.get(back_video, []))
>= TENNIS_MIN_SWINGS_FEW_SHOT)
train_videos = list(filter(tennis_filter, train_videos))
for v in train_videos:
print(v)
return train_videos
def run_localization(dataset_name, emb_dict, train_examples, test_examples,
n_examples, n_trials, algorithm, k, hidden_dim, batch_size,
out_dir, _all=False):
test_video_ints = get_video_intervals(test_examples)
test_video_int_count = sum(len(v) for v in test_video_ints.values())
print('Test examples (non-overlapping):', test_video_int_count)
mean_train_int_len = np.mean([
t.end_frame - t.start_frame for t in train_examples])
min_prop_len = 0.67 * math.ceil(mean_train_int_len)
max_prop_len = 1.33 * math.ceil(mean_train_int_len)
if n_examples == -1:
exp_name = 'full train'
else:
exp_name = '{} shot'.format(n_examples)
# Activation threshold ranges
thresholds = (
np.linspace(0.05, 0.5, 10) if 'tennis' in dataset_name
else np.linspace(0.1, 0.9, 9))
trial_results = []
for trial in range(n_trials):
if n_examples < 0:
exp_train_examples = train_examples
else:
few_shot_file = \
'action_dataset/{}/train.localize.{}.txt'.format(
'fs' if dataset_name.startswith('fs') else dataset_name,
trial)
print('Loading split:', few_shot_file)
train_videos = load_text(few_shot_file)
train_videos = train_videos[:n_examples]
exp_train_examples = [
l for l in train_examples
if (l.video in train_videos or
('tennis' in dataset_name and
l.video.split('__', 1)[1] in train_videos))]
kwargs = {}
if batch_size is not None:
kwargs['batch_size'] = batch_size
model = ProposalModel(algorithm, emb_dict, exp_train_examples,
hidden_dim, ensemble_size=k, **kwargs)
results = []
for video in tqdm(
set(emb_dict) if _all else
{l.video for l in test_examples if l.video in emb_dict},
desc='Running {}'.format(exp_name)
):
scores = model.predict(video)
results.append((video, scores))
if out_dir:
os.makedirs(out_dir, exist_ok=True)
out_path = os.path.join(
out_dir, '{}_trial{}_{}_pred.json'.format(
'train{}'.format(len(exp_train_examples)
if n < 0 else n),
trial, algorithm))
store_json(out_path, {k: v.tolist() for k, v in results})
def calc_ap_at_threshold(act_thresh):
all_props = []
for video, scores in results:
props = BaseProposalModel.get_proposals(scores, act_thresh)
for p, score in props:
all_props.append((video, p, score))
all_props.sort(key=lambda x: -x[-1])
# plot_proposal_dist([x[1] for x in all_props])
aps_at_tiou = []
for t_iou in LOC_TEMPORAL_IOUS:
all_remaining = {}
for video, gt_ints in test_video_ints.items():
all_remaining[video] = set(gt_ints)
is_tp = []
for video, p, score in all_props:
mid = (p[1] + p[0]) // 2
if p[1] - p[0] < min_prop_len:
p = (max(0, mid - min_prop_len // 2),
mid + min_prop_len // 2)
elif p[1] - p[0] > max_prop_len:
p = (max(0, mid - max_prop_len // 2),
mid + max_prop_len // 2)
# Only the first retrieval can be correct
video_remaining = all_remaining.get(video)
if video_remaining is None:
is_tp.append(False)
else:
recalled = []
for gt in video_remaining:
if calc_iou(*p, *gt) >= t_iou:
recalled.append(gt)
# Disallow subsequent recall of these ground truth
# intervals
for gt in recalled:
video_remaining.remove(gt)
if len(video_remaining) == 0:
del all_remaining[video]
is_tp.append(len(recalled) > 0)
if len(is_tp) > 0 and any(is_tp):
pc, rc = compute_precision_recall_curve(
is_tp, test_video_int_count)
# if (
# np.isclose(t_iou, 0.5)
# and np.isclose(act_thresh, 0.2)
# ):
# plot_precision_recall_curve(pc, rc)
aps_at_tiou.append(compute_ap(pc, rc))
else:
aps_at_tiou.append(0)
return aps_at_tiou
all_aps = []
for act_thresh in thresholds:
all_aps.append(calc_ap_at_threshold(act_thresh))
headers = ['tIoU', *['AP@{:0.2f}'.format(x) for x in thresholds]]
rows = []
for i, t_iou in enumerate(LOC_TEMPORAL_IOUS):
rows.append([t_iou, *[x[i] for x in all_aps]])
print(tabulate(rows, headers=headers))
trial_results.append(np.array(all_aps))
if len(trial_results) > 1:
mean_result = trial_results[0] / n_trials
for t in trial_results[1:]:
mean_result += t / n_trials
headers = ['tIoU', *['AP@{:0.2f}'.format(x) for x in thresholds]]
rows = []
for i, t_iou in enumerate(LOC_TEMPORAL_IOUS):
rows.append(
[t_iou, *[mean_result[j, i] for j in range(len(thresholds))]])
print('\nMean across {} trials:'.format(len(trial_results)))
print(tabulate(rows, headers=headers))
def load_tennis_data(config):
def parse_video_name(v):
v = os.path.splitext(v)[0]
video_name, start, end = v.rsplit('_', 2)
return (video_name, int(start), int(end), v)
video_meta_dict = {
parse_video_name(v): get_metadata(
os.path.join(dataset_paths.TENNIS_VIDEO_DIR, v)
) for v in tqdm(os.listdir(dataset_paths.TENNIS_VIDEO_DIR),
desc='Loading video metadata')
if v.endswith('.mp4')
}
actions = load_actions('action_dataset/tennis/all.txt')
test_prefixes = get_test_prefixes('tennis')
train_labels = []
test_labels = []
for action, label in actions.items():
if label not in config.classes:
continue
base_video, player, frame = action.split(':')
frame = int(frame)
for k in video_meta_dict:
if k[0] == base_video and k[1] <= frame and k[2] >= frame:
fps = video_meta_dict[k].fps
mid_frame = frame - k[1]
start_frame = max(
0, int(mid_frame - fps * config.window_before))
end_frame = int(mid_frame + fps * config.window_after)
label = Label(
'{}__{}'.format(player, k[-1]),
'action', start_frame, end_frame, fps)
break
if base_video.startswith(test_prefixes):
test_labels.append(label)
else:
train_labels.append(label)
return train_labels, test_labels
def load_fs_data(config):
video_meta_dict = {
os.path.splitext(v)[0]: get_metadata(
os.path.join(dataset_paths.FS_VIDEO_DIR, v))
for v in tqdm(os.listdir(dataset_paths.FS_VIDEO_DIR),
desc='Loading video metadata')
if v.endswith('.mp4')
}
actions = load_actions('action_dataset/fs/all.txt')
test_prefixes = get_test_prefixes('fs')
durations = []
train_labels = []
test_labels = []
for action, label in actions.items():
if label not in config.classes:
continue
video, start_frame, end_frame = action.split(':')
start_frame = int(start_frame)
end_frame = int(end_frame)
fps = video_meta_dict[video].fps
# Dilate
mid_frame = (start_frame + end_frame) / 2
start_frame = min(
start_frame, int(mid_frame - fps * config.window_before))
end_frame = max(end_frame, int(mid_frame + fps * config.window_after))
durations.append((end_frame - start_frame) / fps)
label = Label(video, 'action', start_frame, end_frame, fps)
if video.startswith(test_prefixes):
test_labels.append(label)
else:
train_labels.append(label)
print(np.mean(durations))
return train_labels, test_labels
def load_fx_data(config):
from finegym.util import ANNOTATION_FILE
from sklearn.model_selection import train_test_split
video_meta_dict = {
os.path.splitext(v)[0]: get_metadata(
os.path.join(dataset_paths.FX_VIDEO_DIR, v))
for v in tqdm(os.listdir(dataset_paths.FX_VIDEO_DIR),
desc='Loading video metadata')
if v.endswith('.mp4')
}
all_labels = []
event_id = 2 # Female fx
annotations = load_json(ANNOTATION_FILE)
for video in annotations:
for event, event_data in annotations[video].items():
if event_data['event'] != event_id:
continue
video_name = '{}_{}'.format(video, event)
if event_data['segments'] is None:
print('{} has no segments'.format(video_name))
continue
for segment, segment_data in event_data['segments'].items():
assert segment_data['stages'] == 1
assert len(segment_data['timestamps']) == 1
start, end = segment_data['timestamps'][0]
fps = video_meta_dict[video_name].fps
start_frame = int(max(0, fps * (start - config.window_before)))
end_frame = int(fps * (end + config.window_after))
all_labels.append(Label(
video_name, 'action', start_frame, end_frame, fps))
_, test_videos = train_test_split(
list(video_meta_dict.keys()), test_size=0.25)
test_videos = set(test_videos)
train_labels = []
test_labels = []
for l in all_labels:
if l.video in test_videos:
test_labels.append(l)
else:
train_labels.append(l)
return train_labels, test_labels
def main(dataset, out_dir, n_trials, n_examples, tennis_window,
emb_dir, _all, algorithm, norm, k, hidden_dim, batch_size):
config = DATA_CONFIGS[dataset]
emb_dict = load_embs(emb_dir, norm)
def print_label_dist(labels):
print('Videos:', len({l.video for l in labels}))
for name, count in Counter(x.value for x in labels).most_common():
print(' {} : {}'.format(name, count))
if dataset.startswith('tennis'):
if tennis_window is not None:
config = config._replace(
window_before=tennis_window,
window_after=tennis_window)
train_labels, test_labels = load_tennis_data(config)
elif dataset.startswith('fs'):
train_labels, test_labels = load_fs_data(config)
else:
train_labels, test_labels = load_fx_data(config)
print('\nLoaded {} train labels'.format(len(train_labels)))
print_label_dist(train_labels)
print('\nLoaded {} test labels'.format(len(test_labels)))
print_label_dist(test_labels)
print('\nTrain / test split: {} / {}\n'.format(
len(train_labels), len(test_labels)))
run_localization(dataset, emb_dict, train_labels, test_labels,
n_examples, n_trials, algorithm, k, hidden_dim, batch_size,
out_dir, _all=_all)
if __name__ == '__main__':
main(**vars(get_args()))
| StarcoderdataPython |
281925 | #!c:\am<NAME>\professional\code\python\udemycourse\profiles-rest-api\my_venv\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| StarcoderdataPython |
8057029 | import sys
import inspect
from typing import *
import collections
implicit_conversions: Dict[type, Set[type]] = {}
warned_for: Set[type] = set()
def qualified_name(t):
return qualified_type(type(t))
def qualified_type(t):
module = t.__module__
if module == 'builtins':
return t.__name__
elif module == 'typing':
return str(t).replace('typing.', '') # HACKY HACK
else:
return t.__qualname__
class TypeCheckFailure(Exception):
__slots__ = ['msg']
def __init__(self, msg: str):
self.msg = msg
def get_signature(f) -> inspect.Signature:
if hasattr(f, '__memo'):
return f.__memo
else:
signature = inspect.signature(f)
f.__memo = signature
return signature
def typecheck(f: Callable):
"""Typecheck a function's arguments against its type annotations.
Examples
--------
>>> def f(i: int, s: str) -> str:
... typecheck(f)
... return i * s
Notes
-----
Functions should call this function with themselves as an argument. This
is required to look up the function signature without hacking around in
the garbage collector and frames too deeply.
Not all types in :mod:`typing` are supported at this time.
Parameters
----------
f
Function to check.
Raises
------
TypeError
"""
try:
frame = inspect.currentframe().f_back
args = frame.f_locals
spec = get_signature(f)
for arg_name, param in spec.parameters.items():
assert isinstance(param, inspect.Parameter)
t = param.annotation
if t:
if param.kind in (param.POSITIONAL_ONLY, param.POSITIONAL_OR_KEYWORD, param.KEYWORD_ONLY):
check_t(args[arg_name], t, f"argument {repr(arg_name)} of {f.__qualname__}")
elif param.kind == param.VAR_POSITIONAL:
varargs = args[arg_name]
assert isinstance(varargs, tuple)
for i, value in enumerate(varargs):
check_t(value, t, f"element {i} of argument {repr(arg_name)} of {f.__qualname__}")
else:
assert param.kind == param.VAR_KEYWORD
varkw = args[arg_name]
assert isinstance(varkw, collections.Mapping)
for name, value in varkw.items():
check_t(value, t, f"keyword argument {repr(arg_name)} of {f.__qualname__}")
finally:
del frame
del args
def register_conversion(from_type: type, to_type: type) -> None:
"""Register a recognized conversion from type to type.
Examples
--------
>>> register_conversion(int, float)
Notes
-----
The only self-initialized conversion is from int to float.
Parameters
----------
from_type : type
to_type : type
"""
if to_type not in implicit_conversions:
implicit_conversions[to_type] = {from_type}
else:
implicit_conversions[to_type].add(from_type)
def check_union(arg, t, context: str) -> None:
cast(t, Union)
union_params = t.__args__
for t_ in union_params:
try:
_check_t(arg, t_, context)
return
except TypeCheckFailure:
pass
options = ', '.join(qualified_type(t_) for t_ in union_params)
raise TypeCheckFailure(f"expected ({options}); found '{qualified_name(arg)}' at {context}: {repr(arg)}")
def _check_collection(arg: collections.Collection, element_type, context: str, indexed: bool) -> None:
if indexed:
for i, v in enumerate(arg):
_check_t(v, element_type, f'element {i} of {context}')
else:
for v in arg:
_check_t(v, element_type, f'element of {context}')
def check_list(arg, t, context: str) -> None:
cast(t, List)
if not isinstance(arg, list):
raise TypeCheckFailure(f"expected 'list', found '{qualified_name(arg)}' at {context}: {repr(arg)}")
if t is List:
return
element_type = getattr(t, '__args__', t.__parameters__)[0]
_check_collection(arg, element_type, context, indexed=True)
def check_sequence(arg, t, context: str) -> None:
cast(t, Sequence)
if not isinstance(arg, collections.Sequence):
raise TypeCheckFailure(f"expected 'Sequence', found '{qualified_name(arg)}' at {context}: {repr(arg)}")
if t is Sequence:
return
element_type = getattr(t, '__args__', t.__parameters__)[0]
_check_collection(arg, element_type, context, indexed=True)
def check_set(arg, t, context: str) -> None:
cast(t, Set)
if not isinstance(arg, set):
raise TypeCheckFailure(f"expected 'set', found '{qualified_name(arg)}' at {context}: {repr(arg)}")
if t is Set:
return
element_type = getattr(t, '__args__', t.__parameters__)[0]
_check_collection(arg, element_type, context, indexed=False)
def check_frozenset(arg, t, context: str) -> None:
cast(t, FrozenSet)
if not isinstance(arg, frozenset):
raise TypeCheckFailure(f"expected 'frozenset', found '{qualified_name(arg)}' at {context}: {repr(arg)}")
if t is FrozenSet:
return
element_type = getattr(t, '__args__', t.__parameters__)[0]
_check_collection(arg, element_type, context, indexed=False)
def _check_mapping(arg: collections.Mapping, key_type, value_type, context: str) -> None:
for k, v in arg.items():
_check_t(k, key_type, f'key of {context}')
_check_t(v, value_type, f'value for key {repr(k)} of {context}')
def check_dict(arg, t, context: str) -> None:
cast(t, Dict)
if not isinstance(arg, dict):
raise TypeCheckFailure(f"expected 'dict', found '{qualified_name(arg)}' at {context}: {repr(arg)}")
if t is Dict:
return
key_type, value_type = getattr(t, '__args__', t.__parameters__)
_check_mapping(arg, key_type, value_type, context)
def check_mapping(arg, t, context: str) -> None:
cast(t, Mapping)
if not isinstance(arg, collections.Mapping):
raise TypeCheckFailure(f"expected 'Mapping', found '{qualified_name(arg)}' at {context}: {repr(arg)}")
if t is Mapping:
return
key_type, value_type = getattr(t, '__args__', t.__parameters__)
_check_mapping(arg, key_type, value_type, context)
def check_tuple(arg, t, context: str) -> None:
cast(t, Tuple)
if not isinstance(arg, tuple):
raise TypeCheckFailure(f"expected 'tuple', found '{qualified_name(arg)}' at {context}: {repr(arg)}")
if t is Tuple:
return
types = t.__args__
if types[-1] is Ellipsis:
# variable-length tuple
element_type = types[0]
_check_collection(arg, element_type, context, indexed=True)
else:
if len(arg) != len(types):
types_str = ', '.join(map(qualified_type, types))
raise TypeCheckFailure(f"expected tuple with {len(types)} elements of types {types_str}, "
f"found {len(arg)} elements at {context}")
for i, element in enumerate(arg):
_check_t(element, types[i], f"element{i} of {context}")
def check_callable(arg, t, context: str) -> None:
cast(t, Callable)
if not callable(arg):
raise TypeCheckFailure(f'expected callable {qualified_type(t)}, found not callable at {context}')
if t is Callable:
return
if isinstance(t.__args__, tuple):
argument_types = t.__args__[:-1]
check_args = argument_types != (Ellipsis,)
if check_args:
signature = inspect.signature(arg)
parameter_types = list(p.kind for p in signature.parameters.values())
if any(kind == inspect.Parameter.KEYWORD_ONLY for kind in parameter_types):
raise TypeCheckFailure(f"found keyword-only arguments on callable signature at {context}")
elif any(kind == inspect.Parameter.VAR_POSITIONAL for kind in parameter_types):
raise TypeCheckFailure(f"found variable-length args on callable signature at {context}")
elif any(kind == inspect.Parameter.VAR_KEYWORD for kind in parameter_types):
raise TypeCheckFailure(f"found variable keyword arguments on callable signature at {context}")
elif len(parameter_types) != len(argument_types):
raise TypeCheckFailure(f"expected {len(argument_types)}-parameter callable, "
f"found {len(parameter_types)}-parameter callable at {context}")
def check_collection(arg, t, context: str) -> None:
if not isinstance(arg, collections.Collection):
raise TypeCheckFailure(f"expected 'Collection', found '{qualified_name(arg)}' at {context}: {repr(arg)}")
if t is Collection:
return
else:
return _check_collection(arg, t.__args__[0], context, indexed=False)
def check_t(arg, t, context: str) -> None:
# the following motif is used to squelch the original (deeply recursive)
# stack trace
try:
_check_t(arg, t, context)
except TypeCheckFailure as e:
raise TypeError(e.msg) from None
known_checkers = {
Union: check_union,
Dict: check_dict,
Mapping: check_mapping,
Sequence: check_sequence,
Set: check_set,
FrozenSet: check_frozenset,
List: check_list,
Tuple: check_tuple,
Collection: check_collection,
Callable: check_callable
}
def _check_t(arg, t, context: str) -> None:
if t is Any:
return
# typing.Type subtypes have the '__origin__' attribute set
typing_type = getattr(t, '__origin__', None)
if typing_type is not None:
f = known_checkers.get(typing_type)
if f:
f(arg, t, context)
else:
if typing_type not in warned_for:
sys.stderr.write(f"WARN: typecheck: type '{typing_type}' is not currently supported\n")
warned_for.add(typing_type)
else:
assert isinstance(t, type)
if not isinstance(arg, t):
if t not in implicit_conversions or all(
not isinstance(arg, compatible_type) for compatible_type in implicit_conversions[t]):
raise TypeCheckFailure(f"expected type '{qualified_type(t)}', "
f"found '{qualified_type(type(arg))}' at {context}: {repr(arg)}")
register_conversion(int, float)
| StarcoderdataPython |
1955989 | import sys
import random
import re
from collections import defaultdict
import pyttsx3
# Initial word boundaries are represented by '#', and final word boundaries are represented by '##'.
PHONE_LIST = (
'#', 'AA', 'AE', 'AH', 'AO', 'AW', 'AY', 'B', 'CH', 'D', 'DH', 'EH', 'ER', 'EY', 'F', 'G', 'HH', 'IH', 'IY', 'JH',
'K', 'L', 'M', 'N', 'NG', 'OW', 'OY', 'P', 'R', 'S', 'SH', 'T', 'TH', 'UH', 'UW', 'V', 'W', 'Y', 'Z', 'ZH', '##'
)
# Reads in 'word_transcriptions.txt' and returns diphone dictionary of items containing each
# first phone paired with all possible second phones and their corresponding probabilities.
def di_training():
phone_count = defaultdict(lambda: 0)
diphone_count = defaultdict(lambda: 0)
train_f = open(sys.argv[1], encoding='utf-8')
for line in train_f.readlines():
line = re.sub(u'^[a-z\']+', '#', line) + ' ##' # Discard all orthographic word forms, append '##'.
phonemes = line.split() # Convert line to list of phonemes.
for i in range(len(phonemes) - 1):
phone_count[phonemes[i]] += 1 # Keep count of each phoneme's frequency.
diphone = tuple(phonemes[i:i + 2]) # Create diphone (bigram) as tuple of 1st & 2nd phonemes.
diphone_count[diphone] += 1 # Keep count of each diphone's frequency.
phone_count['##'] += 1 # Count word final boundary, '##'.
train_f.close()
master_dict = defaultdict(dict) # Create empty dictionary described above.
for diphone in diphone_count.keys(): # P (x|x-1) (Fill that dictionary up, baby.)
master_dict[diphone[0]][diphone[1]] = diphone_count[diphone] / phone_count[(diphone[0])]
return master_dict
# Generate pseudo words with respect to diphone frequencies.
def di_gen(p_diph):
speaker = pyttsx3.init()
voices = speaker.getProperty('voices')
speaker.setProperty('voice', voices[1].id)
speaker.setProperty('rate', 140)
for i in range(25):
while True: # This loop ensures that only words of a sufficient length and composition will be printed.
gen_phone = ''
gen_word = []
while gen_phone != '##': # Phonemes are added until '##' is generated.
gen_word.append(gen_phone) # Adds generated phoneme to word being generated (1st addition is '').
if gen_phone == '':
gen_phone = '#'
gen_phone = gen_phone2(gen_phone, p_diph) # 2nd half of diphone returned.
word = ' '.join(gen_word) # Assembles list of phonemes (gen_word) into string separeated by ' '.
if not re.search(r'[AEIOU]', word): # If generated word has no vowel, re-do this
continue # iteration of for loop (generate new word).
print(word) # Print final word at the end of each for loop.
gen_word = (''.join(gen_word)).lower()
speaker.say(gen_word)
print(gen_word)
speaker.runAndWait()
break # Breaks outer while loop to proceed to next iteration of for loop.
# Generate phonemes on a diphone (bigram) basis.
def gen_phone2(phone1, p_diph):
rand = random.random() # Generate random float between 0 and 1.
for phone2 in p_diph[phone1]: # Go through all possible diphone configurations (and probs) given phone1.
rand -= p_diph[phone1][phone2] # Subtract probability of given phone2 from random float.
if rand < 0.0: # Return 1st phone2 that brings rand below 0.
return phone2
return '#'
# Reads in 'word_transcriptions.txt' and returns triphone dictionary of items containing each
# first diphone paired with all possible third phones and their corresponding probabilities.
def tri_training():
diphone_count = defaultdict(lambda: 0)
triphone_count = defaultdict(lambda: 0)
train_f = open(sys.argv[1], encoding='utf-8')
for line in train_f.readlines():
line = re.sub(u'^[a-z\']+', '#', line) + ' ##' # Discard all orthographic word forms, append '##'.
phonemes = line.split() # Convert line to list of phonemes.
for i in range(len(phonemes) - 2):
diphone = tuple(phonemes[i:i+2])
diphone_count[diphone] += 1 # Keep count of each diphone's frequency.
triphone = tuple(phonemes[i:i+3]) # Create triphone (trigram) as tuple of 1st, 2nd, & 3rd phonemes.
if triphone == ('#', 'NG', 'L'):
print(phonemes)
breakpoint()
triphone_count[triphone] += 1 # Keep count of each triphone's frequency.
diphone_count[(phonemes[-2], '##')] += 1 # Count word final boundary, '##'.
train_f.close()
master_dict = defaultdict(dict) # Create empty dictionary described above.
for triphone in triphone_count.keys(): # P (x|x-1) (Fill that dictionary up, baby.)
master_dict[triphone[:2]][triphone[2]] = triphone_count[triphone] / diphone_count[(triphone[:2])]
return master_dict
# Generate pseudo words with respect to triphone frequencies.
def tri_gen(p_triph):
speaker = pyttsx3.init()
voices = speaker.getProperty('voices')
speaker.setProperty('voice', voices[1].id)
speaker.setProperty('rate', 140)
for i in range(25):
while True: # This loop ensures that only words of a sufficient length and composition will be printed.
gen_phone = ''
gen_diphone = ('', '')
gen_word = []
while gen_phone != '##': # Phonemes are added until '##' is generated.
gen_word.append(gen_phone) # Adds generated phoneme to word being generated (1st addition is '').
if gen_diphone == ('', ''):
gen_diphone = ('#', random.choice(PHONE_LIST[1:-1])) # Assigns random 1st phoneme (excludes '##').
gen_word.append(gen_diphone[1])
gen_phone = gen_triphone3(gen_diphone, p_triph) # 2nd half of diphone returned.
gen_diphone = (gen_diphone[1], gen_phone)
word = ' '.join(gen_word) # Assembles list of phonemes (gen_word) into string separeated by ' '.
if not re.search(r'[AEIOU]', word): # If generated word has no vowel, re-do this
continue # iteration of for loop (generate new word).
print(word) # Print final word at the end of each for loop.
gen_word = (''.join(gen_word)).lower()
speaker.say(gen_word)
print(gen_word)
speaker.runAndWait()
break # Breaks outer while loop to proceed to next iteration of for loop.
# Generate phonemes on a triphone (trigram) basis.
def gen_triphone3(diphone, p_triph):
rand = random.random() # Generate random float between 0 and 1.
for phone3 in p_triph[diphone]: # Go through all possible triphone configurations (and probs) given diphone.
rand -= p_triph[diphone][phone3] # Subtract probability of given phone3 from random float.
if rand < 0.0: # Return 1st phone3 that brings rand below 0.
return phone3
return '##'
def main():
n = int(sys.argv[2])
if n == 2:
master = di_training() # Create dictionary of all diphones and corresponding probabilities.
di_gen(master)
elif n == 3:
master = tri_training() # Create dictionary of all triphones and corresponding probabilities.
tri_gen(master)
else:
print('Invalid argument.')
if __name__ == '__main__':
main()
| StarcoderdataPython |
9704068 | <filename>paper_exp_lastmin.py
# -*- coding: utf-8 -*-
"""
Created on Fri May 28 21:07:00 2021
@author: chait
"""
import matplotlib.pyplot as plt
import numpy as np
from utils import *
from rrt_paths import *
from sd_metric import *
from heatmap import *
import copy
import matplotlib.animation as animation
import cv2
from multiprocessing import Pool
import time
import random
from scipy.stats import poisson
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Load image of ep6 plan %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%#
# NOTE: 0.26m is one pixel
room_path = 'ep6_outline_smallest.jpg'
ep6_plan = plt.imread(room_path)
ep6_plan_copy = copy.deepcopy(ep6_plan)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Get all user inputs %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%#
#### Get path to store results in ####
allPaths = np.load('results/metricTests/Exp3/num_ppl/ppl30/path.npy', allow_pickle=True)
results_path = 'results/metricTests/Exp3/num_ppl/ppl30/'
removed_pixels = None
air_change_percent = 50/100
air_change_rate = 10*60
hl = 9*60
N=30
kappa = 0.7*np.ones(N) #immune particles
v = 1*np.ones(N) # all not immune
#v[random.sample(range(0,N), int(100*30/100))]=0
rho_range = 68*np.ones(N) # age
zeta = 0.7*np.ones(N) # susceptibility
beta = 0.4*np.ones(N) # mask eff
eta = 0.7*np.ones(N) # infectiousness
mu=0.5*np.ones(N)
# Age groups: 0-4, 5-17, 18-29, 30-39, 40-49, 50-64, 65-74, 75-84, 85+
# Risk of hospitalization: 2x, 1x, 6x, 10x, 15x, 25x, 40x, 65x, 95x
# Ref: https://www.cdc.gov/coronavirus/2019-ncov/covid-data/investigations-discovery/hospitalization-death-by-age.html
rho = np.zeros(N)
for i in range(len(rho_range)):
if rho_range[i] >= 0 and rho_range[i] <= 4:
rho[i] = 2/259
elif rho_range[i] > 4 and rho_range[i] <= 17:
rho[i] = 1/259
elif rho_range[i] > 17 and rho_range[i] <= 29:
rho[i] = 6/259
elif rho_range[i] > 29 and rho_range[i] <= 39:
rho[i] = 10/259
elif rho_range[i] > 39 and rho_range[i] <= 49:
rho[i] = 15/259
elif rho_range[i] > 49 and rho_range[i] <= 64:
rho[i] = 25/259
elif rho_range[i] > 64 and rho_range[i] <= 74:
rho[i] = 40/259
elif rho_range[i] > 74 and rho_range[i] <= 84:
rho[i] = 65/259
elif rho_range[i] > 84:
rho[i] = 95/259
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Convert image to black and white and get boundaries and obstacles %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%#
r = ep6_plan[:,:,2]
bw_img = np.zeros_like(r)
for x in range(np.shape(r)[0]):
for y in range(np.shape(r)[1]):
mid = (255+1)/2
if r[x,y] < mid + mid*0.5:
bw_img[x,y] = 0
else:
bw_img[x,y] = 1
bw_img = cv2.cvtColor(ep6_plan, cv2.COLOR_BGR2GRAY)
(thresh, bw_img) = cv2.threshold(bw_img, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
bw_img = cv2.threshold(bw_img, thresh, 255, cv2.THRESH_BINARY)[1]
indices = np.where(bw_img != [255])
obstacles = zip(indices[0], indices[1])
obstacles = list(set(obstacles))
################################################################################################################################
maxLen = 0
for n in range(N):
if len(allPaths[n]) > maxLen:
maxLen = len(allPaths[n])
alpha = 8 # alpha = 2m. 1 pixel = 0.26m, therefore approx. 8 pixels = 2m.
sd_metric_values, framewise_positions, all_distances = get_metric_values(allPaths, maxLen, alpha)
# Get SD metric for each person
sd_personwise = []
sd_avg = []
for i in range(len(framewise_positions)):
sd_temp = []
for n in range(N):
sd = sd_metric_per_person(framewise_positions[i], n, alpha)
sd_temp.append(sd)
sd_personwise.append(sd_temp)
sd_avg.append(np.mean(sd_temp))
sd_avg30=sd_avg
############################################################################################################################
R_reza, C_reza, pixels_to_color, final_inf = risk_detailed_reza_original(framewise_positions, kappa=kappa, v=v, rho=rho,
zeta=zeta, beta=beta, eta=eta, mu=1, D=0.003, air_change_percent=air_change_percent,
air_change_rate=air_change_rate, neighbor_dist=1, F_s=1, hl=hl,
num_initial_inf=[5,2,10,16,27,23],
ep6_plan=ep6_plan, XMIN1=12, XMAX1=115, YMIN1=24, YMAX1=210)
# Only for num_ppl
R_reza, C_reza, pixels_to_color, final_inf = risk_detailed_reza_original(framewise_positions, kappa=kappa, v=v, rho=rho,
zeta=zeta, beta=beta, eta=eta, mu=1, D=0.003, air_change_percent=air_change_percent,
air_change_rate=air_change_rate, neighbor_dist=1, F_s=1, hl=hl,
num_initial_inf=[2,14,6],
ep6_plan=ep6_plan, XMIN1=12, XMAX1=115, YMIN1=24, YMAX1=210)
r_path = results_path + 'R.npy'
np.save(r_path, R_reza)
c_path = results_path + 'C.npy'
np.save(c_path, C_reza)
R = []
R_avg = []
for i in range(len(R_reza)):
R.append(R_reza[i])
R_avg.append(np.mean(R_reza[i]))
x = np.arange(1,len(R_reza)+1, 5)
xlabel = []
for i in range(len(sd_personwise)):
if i%5 == 0:
xlabel.append(i+1)
R_avg30=R_avg[0:len(R_avg5)]
#sd_avg5=sd_avg
# Only for num_ppl
x = np.arange(1,len(R_avg5)+1, 5)
xlabel = []
for i in range(len(sd_personwise)):
if i%5 == 0:
xlabel.append(i+1)
R_avg30=R_avg[0:len(R_avg5)]
plt.figure(figsize=(15,10))
plt.plot(R_avg5, label='5 people')
plt.plot(R_avg15, 'o-', label='15 people')
plt.plot(R_avg30, '*-', label='30 people')
plt.xticks(x, xlabel, size=30)
plt.yticks(size=30)
plt.xlabel('Frame number', fontsize=30)
plt.ylabel('Average Risk Metric', fontsize=30)
plt.legend(prop={'size': 30})
plt.savefig('results/metricTests/Exp3/num_ppl/avgplots_ppl_combined.png')
# Only for num_ppl
plt.figure(figsize=(15,10))
plt.plot(np.array(sd_avg5), label='5 people')
plt.plot(np.array(sd_avg15[0:len(sd_avg5)]), 'o-', label='15 people')
plt.plot(np.array(sd_avg30[0:len(sd_avg5)]), '*-', label='30 people')
plt.xticks(x, xlabel, size=30)
plt.yticks(size=30)
plt.xlabel('Frame number', fontsize=30)
plt.ylabel('Average Social Distancing Metric', fontsize=30)
plt.legend(prop={'size': 30})
plt.savefig('results/metricTests/Exp3/num_ppl/avgsd_ppl_combined.png')
# Only for num_ppl
plt.figure(figsize=(15,10))
plt.plot(R_avg)
plt.xticks(x, xlabel, size=20)
plt.yticks(size=20)
plt.xlabel('Frame number', fontsize=18)
plt.ylabel('Average Risk Metric', fontsize=18)
plt.title('Number of people=30', fontsize=18)
plt.savefig('results/metricTests/Exp3/num_ppl/avgrisk_ppl30.png')
# Only for num_ppl
plt.figure(figsize=(15,10))
plt.plot(sd_avg)
plt.xticks(x, xlabel, size=20)
plt.yticks(size=20)
plt.xlabel('Frame number', fontsize=18)
plt.ylabel('Average Social Distancing Metric', fontsize=18)
plt.title('Number of people=5', fontsize=18)
plt.savefig('results/metricTests/Exp3/num_ppl/avgsd_ppl5.png')
##########################################################################################################################
####################################### EXPERIMENT 1########################################################
######################################################################################################################
# N = 2, 4, 10, 30
# Walking at speed of 1m per second or 1m per frame
results_path = 'results/metricTests/Exp1/'
outer_radius = 8/0.26
inner_radius = 1/0.26
distance = int(outer_radius - inner_radius)
# Define background image
background = 255*np.ones((100,100,3)).astype(np.uint8)
# Defining middle of space as (50,50)
mid = (50,50)
N = 2
start = []
end = []
positions2 = []
frames = []
d_in_m = [] # Distance from center in meters
for d in range(0, distance):
temp_pos = []
bg = deepcopy(background)
r = outer_radius - d
for n in range(N):
theta = n*2*np.pi/N
x = int(r*np.cos(theta) + mid[0])
y = int(r*np.sin(theta) + mid[0])
temp_pos.append((x,y))
bg[int(x), int(y), :] = [0,0,0]
positions2.append(temp_pos)
frames.append(bg)
d_in_m.append(str(round(r*0.26,1)))
for d in range(0, distance):
temp_pos = []
bg = deepcopy(background)
r = inner_radius + d
for n in range(N):
theta = n*2*np.pi/N
x = int(r*np.cos(theta) + mid[0])
y = int(r*np.sin(theta) + mid[0])
temp_pos.append((x,y))
bg[int(x), int(y), :] = [0,0,0]
positions2.append(temp_pos)
frames.append(bg)
d_in_m.append(str(round(r*0.26,1)))
N=4
start = []
end = []
positions4 = []
frames = []
d_in_m = [] # Distance from center in meters
for d in range(0, distance):
temp_pos = []
bg = deepcopy(background)
r = outer_radius - d
for n in range(N):
theta = n*2*np.pi/N
x = int(r*np.cos(theta) + mid[0])
y = int(r*np.sin(theta) + mid[0])
temp_pos.append((x,y))
bg[int(x), int(y), :] = [0,0,0]
positions4.append(temp_pos)
frames.append(bg)
d_in_m.append(str(round(r*0.26,1)))
for d in range(0, distance):
temp_pos = []
bg = deepcopy(background)
r = inner_radius + d
for n in range(N):
theta = n*2*np.pi/N
x = int(r*np.cos(theta) + mid[0])
y = int(r*np.sin(theta) + mid[0])
temp_pos.append((x,y))
bg[int(x), int(y), :] = [0,0,0]
positions4.append(temp_pos)
frames.append(bg)
d_in_m.append(str(round(r*0.26,1)))
N=10
start = []
end = []
positions10 = []
frames = []
d_in_m = [] # Distance from center in meters
for d in range(0, distance):
temp_pos = []
bg = deepcopy(background)
r = outer_radius - d
for n in range(N):
theta = n*2*np.pi/N
x = int(r*np.cos(theta) + mid[0])
y = int(r*np.sin(theta) + mid[0])
temp_pos.append((x,y))
bg[int(x), int(y), :] = [0,0,0]
positions10.append(temp_pos)
frames.append(bg)
d_in_m.append(str(round(r*0.26,1)))
for d in range(0, distance):
temp_pos = []
bg = deepcopy(background)
r = inner_radius + d
for n in range(N):
theta = n*2*np.pi/N
x = int(r*np.cos(theta) + mid[0])
y = int(r*np.sin(theta) + mid[0])
temp_pos.append((x,y))
bg[int(x), int(y), :] = [0,0,0]
positions10.append(temp_pos)
frames.append(bg)
d_in_m.append(str(round(r*0.26,1)))
N=30
start = []
end = []
positions30 = []
frames = []
d_in_m = [] # Distance from center in meters
for d in range(0, distance):
temp_pos = []
bg = deepcopy(background)
r = outer_radius - d
for n in range(N):
theta = n*2*np.pi/N
x = int(r*np.cos(theta) + mid[0])
y = int(r*np.sin(theta) + mid[0])
temp_pos.append((x,y))
bg[int(x), int(y), :] = [0,0,0]
positions30.append(temp_pos)
frames.append(bg)
d_in_m.append(str(round(r*0.26,1)))
for d in range(0, distance):
temp_pos = []
bg = deepcopy(background)
r = inner_radius + d
for n in range(N):
theta = n*2*np.pi/N
x = int(r*np.cos(theta) + mid[0])
y = int(r*np.sin(theta) + mid[0])
temp_pos.append((x,y))
bg[int(x), int(y), :] = [0,0,0]
positions30.append(temp_pos)
frames.append(bg)
d_in_m.append(str(round(r*0.26,1)))
allPaths2 = []
for n in range(2):
temp_path = []
for i in range(len(positions2)):
temp_path.append(positions2[i][n])
allPaths2.append(temp_path)
alpha = 8 # alpha = 2m. 1 pixel = 0.26m, therefore approx. 8 pixels = 2m.
sd_metric_values2, framewise_positions2, all_distances2 = get_metric_values(allPaths2, len(allPaths2[0]), alpha)
allPaths4 = []
for n in range(4):
temp_path = []
for i in range(len(positions4)):
temp_path.append(positions4[i][n])
allPaths4.append(temp_path)
alpha = 8 # alpha = 2m. 1 pixel = 0.26m, therefore approx. 8 pixels = 2m.
sd_metric_values4, framewise_positions4, all_distances4 = get_metric_values(allPaths4, len(allPaths4[0]), alpha)
allPaths10 = []
for n in range(10):
temp_path = []
for i in range(len(positions10)):
temp_path.append(positions10[i][n])
allPaths10.append(temp_path)
alpha = 8 # alpha = 2m. 1 pixel = 0.26m, therefore approx. 8 pixels = 2m.
sd_metric_values10, framewise_positions10, all_distances10 = get_metric_values(allPaths10, len(allPaths10[0]), alpha)
allPaths30 = []
for n in range(30):
temp_path = []
for i in range(len(positions30)):
temp_path.append(positions30[i][n])
allPaths30.append(temp_path)
alpha = 8 # alpha = 2m. 1 pixel = 0.26m, therefore approx. 8 pixels = 2m.
sd_metric_values30, framewise_positions30, all_distances30 = get_metric_values(allPaths30, len(allPaths30[0]), alpha)
N=2
R_reza2, C_reza2, pixels_to_color, _ = risk_detailed_reza_original(framewise_positions2, kappa=0.7*np.ones(N), v=1*np.ones(N), rho=0.2*np.ones(N),
zeta=0.7*np.ones(N), beta=0.3*np.ones(N), eta=0.7*np.ones(N), mu=1, D=0.003,
air_change_percent=0.5, air_change_rate=10*60, neighbor_dist=1, F_s=1, hl=9*60,num_initial_inf=2,
ep6_plan=background, XMIN1=0, XMAX1=100, YMIN1=0, YMAX1=100)
R_avg2 = []
for i in range(len(R_reza2)):
R_avg2.append(np.mean(R_reza2[i]))
N=4
R_reza4, C_reza4, pixels_to_color, _ = risk_detailed_reza_original(framewise_positions4, kappa=0.7*np.ones(N), v=1*np.ones(N), rho=0.2*np.ones(N),
zeta=0.7*np.ones(N), beta=0.3*np.ones(N), eta=0.7*np.ones(N), mu=1, D=0.003,
air_change_percent=0.5, air_change_rate=10*60, neighbor_dist=1, F_s=1, hl=9*60,num_initial_inf=4,
ep6_plan=background, XMIN1=0, XMAX1=100, YMIN1=0, YMAX1=100)
R_avg4 = []
for i in range(len(R_reza4)):
R_avg4.append(np.mean(R_reza4[i]))
N=10
R_reza10, C_reza10, pixels_to_color, _ = risk_detailed_reza_original(framewise_positions10, kappa=0.7*np.ones(N), v=1*np.ones(N), rho=0.2*np.ones(N),
zeta=0.7*np.ones(N), beta=0.3*np.ones(N), eta=0.7*np.ones(N), mu=1, D=0.003,
air_change_percent=0.5, air_change_rate=10*60, neighbor_dist=1, F_s=1, hl=9*60,num_initial_inf=10,
ep6_plan=background, XMIN1=0, XMAX1=100, YMIN1=0, YMAX1=100)
R_avg10 = []
for i in range(len(R_reza10)):
R_avg10.append(np.mean(R_reza10[i]))
N=30
R_reza30, C_reza30, pixels_to_color, _ = risk_detailed_reza_original(framewise_positions30, kappa=0.7*np.ones(N), v=1*np.ones(N), rho=0.2*np.ones(N),
zeta=0.7*np.ones(N), beta=0.3*np.ones(N), eta=0.7*np.ones(N), mu=1, D=0.003,
air_change_percent=0.5, air_change_rate=10*60, neighbor_dist=1, F_s=1, hl=9*60,num_initial_inf=30,
ep6_plan=background, XMIN1=0, XMAX1=100, YMIN1=0, YMAX1=100)
R_avg30 = []
for i in range(len(R_reza30)):
R_avg30.append(np.mean(R_reza30[i]))
x = np.arange(1,len(sd_metric_values30)+1, 5)
xlabel = []
for i in range(len(sd_metric_values30)):
if i%5 == 0:
xlabel.append(d_in_m[i])
plt.figure(figsize=(15,10))
#plt.plot(sd_metric_values2, label='2 people')
plt.plot(sd_metric_values30/(30*(33)), label='30 people')
plt.plot(sd_metric_values10/(10*(10)), 'o-', label='10 people')
plt.plot(sd_metric_values4/(4*(4)), '*-', label='4 people')
plt.xticks(x, xlabel, size=30)
plt.yticks(size=30)
plt.xlabel('Average distance between people in meters', fontsize=30)
plt.ylabel('Average Social Distancing Metric', fontsize=30)
plt.legend(prop={'size': 30})
plt.savefig('results/metricTests/Exp1/sdmetric_exp1_avg.png')
plt.figure(figsize=(15,10))
plt.plot(R_avg10, label='30 people')
plt.plot(R_avg4, 'o-', label='10 people')
plt.plot(R_avg30, '*-', label='4 people')
plt.xticks(x, xlabel, size=30)
plt.yticks(size=30)
plt.xlabel('Average distance between people in meters', fontsize=30)
plt.ylabel('Average Risk Metric', fontsize=30)
plt.legend(prop={'size': 30})
plt.savefig('results/metricTests/Exp1/riskmetric_exp1_avg.png')
##########################################################################################################################
####################################### EXPERIMENT 2 ########################################################
######################################################################################################################
results_path = 'results/metricTests/Exp2/'
N = 5
T = 60
# Define background image
background = 255*np.ones((100,100,3)).astype(np.uint8)
# Defining middle of space as (50,50)
mid = (50,50)
radius = 1/0.26
# Position people on circle
start = []
end = []
positions1 = []
frames = []
for t in range(T):
temp_pos = []
bg = deepcopy(background)
for n in range(N):
theta = n*2*np.pi/N
x = int(radius*np.cos(theta) + mid[0])
y = int(radius*np.sin(theta) + mid[0])
temp_pos.append((x,y))
bg[int(x), int(y), :] = [0,0,0]
positions1.append(temp_pos)
frames.append(bg)
radius = 2/0.26
# Position people on circle
start = []
end = []
positions2 = []
frames = []
for t in range(T):
temp_pos = []
bg = deepcopy(background)
for n in range(N):
theta = n*2*np.pi/N
x = int(radius*np.cos(theta) + mid[0])
y = int(radius*np.sin(theta) + mid[0])
temp_pos.append((x,y))
bg[int(x), int(y), :] = [0,0,0]
positions2.append(temp_pos)
frames.append(bg)
radius = 6/0.26
# Position people on circle
start = []
end = []
positions6 = []
frames = []
for t in range(T):
temp_pos = []
bg = deepcopy(background)
for n in range(N):
theta = n*2*np.pi/N
x = int(radius*np.cos(theta) + mid[0])
y = int(radius*np.sin(theta) + mid[0])
temp_pos.append((x,y))
bg[int(x), int(y), :] = [0,0,0]
positions6.append(temp_pos)
frames.append(bg)
############################################################ Get SD metric ##################################################
# Convert positions to required format
allPaths1 = []
for n in range(N):
temp_path = []
for i in range(len(positions1)):
temp_path.append(positions1[i][n])
allPaths1.append(temp_path)
alpha = 8 # alpha = 2m. 1 pixel = 0.26m, therefore approx. 8 pixels = 2m.
sd_metric_values1, framewise_positions1, all_distances1 = get_metric_values(allPaths1, len(allPaths1[0]), alpha)
allPaths2 = []
for n in range(N):
temp_path = []
for i in range(len(positions2)):
temp_path.append(positions2[i][n])
allPaths2.append(temp_path)
alpha = 8 # alpha = 2m. 1 pixel = 0.26m, therefore approx. 8 pixels = 2m.
sd_metric_values2, framewise_positions2, all_distances2 = get_metric_values(allPaths2, len(allPaths2[0]), alpha)
allPaths6 = []
for n in range(N):
temp_path = []
for i in range(len(positions6)):
temp_path.append(positions6[i][n])
allPaths6.append(temp_path)
alpha = 8 # alpha = 2m. 1 pixel = 0.26m, therefore approx. 8 pixels = 2m.
sd_metric_values6, framewise_positions6, all_distances6 = get_metric_values(allPaths6, len(allPaths6[0]), alpha)
############################################################## Get risk metric ###############################################
R_reza1, C_reza, pixels_to_color, _ = risk_detailed_reza_original(framewise_positions1, kappa=0.7*np.ones(N), v=1*np.ones(N), rho=0.2*np.ones(N),
zeta=0.7*np.ones(N), beta=0.3*np.ones(N), eta=0.7*np.ones(N), mu=1, D=0.003,
air_change_percent=0.5, air_change_rate=10*60, neighbor_dist=1, F_s=1, hl=9*60,num_initial_inf=3,
ep6_plan=background, XMIN1=0, XMAX1=100, YMIN1=0, YMAX1=100)
R_avg1 = []
for i in range(len(R_reza1)):
R_avg1.append(np.mean(R_reza1[i]))
R_reza2, C_reza, pixels_to_color, _ = risk_detailed_reza_original(framewise_positions2, kappa=0.7*np.ones(N), v=1*np.ones(N), rho=0.2*np.ones(N),
zeta=0.7*np.ones(N), beta=0.3*np.ones(N), eta=0.7*np.ones(N), mu=1, D=0.003,
air_change_percent=0.5, air_change_rate=10*60, neighbor_dist=1, F_s=1, hl=9*60,num_initial_inf=3,
ep6_plan=background, XMIN1=0, XMAX1=100, YMIN1=0, YMAX1=100)
R_avg2 = []
for i in range(len(R_reza2)):
R_avg2.append(np.mean(R_reza2[i]))
R_reza6, C_reza, pixels_to_color, _ = risk_detailed_reza_original(framewise_positions6, kappa=0.7*np.ones(N), v=1*np.ones(N), rho=0.2*np.ones(N),
zeta=0.7*np.ones(N), beta=0.3*np.ones(N), eta=0.7*np.ones(N), mu=1, D=0.003,
air_change_percent=0.5, air_change_rate=10*60, neighbor_dist=1, F_s=1, hl=9*60,num_initial_inf=3,
ep6_plan=background, XMIN1=0, XMAX1=100, YMIN1=0, YMAX1=100)
R_avg6 = []
for i in range(len(R_reza6)):
R_avg6.append(np.mean(R_reza6[i]))
xlabel = []
x = np.arange(1,len(R_avg6)+1, 5)
for i in range(len(R_avg6)):
if i%5 == 0:
xlabel.append(i)
plt.figure(figsize=(15,10))
plt.plot(R_avg1, label='1m radius circle')
plt.plot(R_avg2, 'o-', label='2m radius circle')
plt.plot(R_avg6, '*-', label='6m radius circle')
plt.xticks(x, xlabel, size=30)
plt.yticks(size=30)
plt.xlabel('Average distance between people in meters', fontsize=30)
plt.ylabel('Average Risk Metric', fontsize=30)
plt.legend(prop={'size': 30})
plt.savefig('results/metricTests/Exp2/riskmetric_exp2_avg.png') | StarcoderdataPython |
8033141 | <filename>app/classes_feed.py
# -*- coding: utf-8 -*-
import feedparser
import tweepy
import time
from datetime import datetime
import random
from threading import Timer
from flask import current_app, flash
from flask.ext.login import current_user
from . import db
from . import infos_tweet
from .models import Feed, Article
# ---------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------
# THREADING TEST
# ---------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------
class RepeatedTimer(object):
"""
Run function (arg or not) every interval seconds
http://stackoverflow.com/questions/3393612/
run-certain-code-every-n-seconds
"""
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
class RssFlux():
"""
Activate get_articles (and func to deactivate : desactivate_collect).
Activate tweet_articles (and func to deactivate : desactivate_tweet).
functions :
> refresh (default rate = 1800 sec.)
> activate() / deactivate()
> get_articles
> Tweet articles from (self) Feed
"""
def __init__(self, idflux):
"""Connection init."""
self.app = current_app._get_current_object()
self.idflux = idflux
flux_info = Feed.query.filter_by(id=self.idflux).first()
self.name = flux_info.name
self.url = flux_info.url
self.collect_actif = flux_info.collect_actif
self.Tweet_actif = flux_info.Tweet_actif
# resfresh rate for geting articles (28800.0 = 8h)
self.refresh = 610.0 # every 10mn
# self.frequency = (24/flux_info.frequency) * 3600
self.frequency = 600.0 # every 10mn
if flux_info.hashtag:
self.hashtag = flux_info.hashtag
else:
self.hashtag = ''
self.rt = None
self.rt2 = None
# thread name
# self.name_Thread = '{0} {1}'.format('thread', idflux)
# print self.name_Thread
def get_articles(self):
"""Get every self.refresh all new artle of feed and insert bdd."""
# repeat in a thread every self.refresh the get_articles function
# self.name_Thread = threading.Timer(self.refresh, self.get_articles).start()
# Timer(self.refresh, self.get_articles).start()
rss = self.url
feeds = feedparser.parse(rss)
with self.app.app_context():
db.session.expunge_all()
# titles list of all articles in bdd
title_articles = [element.title for element in
Article.query.filter(Article.feed_id == self.idflux)]
# list title/link from last 10 items of Rss feed not in bdd
feedss = [(feeds.entries[i]['title'], feeds.entries[i]['link'])
for i in range(1, 10)
if feeds.entries[i]['title'] not in title_articles]
# Add new items from list feedss to bdd
for elem in feedss:
article = Article(title=elem[0],
url=elem[1],
feed_id=self.idflux)
db.session.add(article)
db.session.commit()
print "SCRAPP ARTICLE EFFECTUE"
def tweet_articles(self):
"""Format and tweet articles from bdd for self.flux."""
with self.app.app_context():
articles_to_tweet = Article.query.\
filter(Article.feed_id == self.idflux).\
filter(Article.tweeted == 0).all()
# checkingarticles to tweet
if articles_to_tweet:
auth = tweepy.OAuthHandler(infos_tweet.Key_consumer, infos_tweet.Consumer_secret)
auth.set_access_token(infos_tweet.Access_token, infos_tweet.Access_token_secret)
api = tweepy.API(auth)
try:
for tweets in articles_to_tweet:
# TITLE // LINK -> tweet_content
title = tweets.title[:100]
link_article = tweets.url
# FEED name for VIA -> tweet_content
name_feed = Feed.query.\
filter(Feed.id == Article.feed_id).first()
via_article = name_feed.name.split()[0]
tweet_content = "%s // %s - via %s" %\
(title, link_article, via_article)
# update twitted
tweets.tweeted = 1
tweets.date_tweeted = datetime.utcnow()
db.session.commit()
# send it
api.update_status(tweet_content)
# wait randomly
time.sleep(600 + random.randint(30, 60))
print "Tweet ID : "+str(tweets.id)+" : ENVOYE"
# check rate limit
except tweepy.RateLimitError:
print "Rate limite reach...sarace"
time.sleep(16 * 60)
else:
# no tweet to send
message = flash('No tweets to send')
print message
def activate_get(self):
"""Activate Flux to get Articles."""
print self.collect_actif
if not self.collect_actif:
print "enter activate_get"
self.rt2 = RepeatedTimer(self.refresh, self.get_articles)
# update Feed
flux_info = Feed.query.filter_by(id=self.idflux).first()
flux_info.collect_actif = True
db.session.commit()
print self.rt2
else:
print 'Collect already enable'
def desactivate_get(self):
"""Desactivate Flux to get Articles."""
if self.rt2:
self.rt2.stop()
# update Feed
flux_info = Feed.query.filter_by(id=self.idflux).first()
flux_info.collect_actif = False
db.session.commit()
else:
print 'Collect already disable'
def activate_tweet(self):
"""Activate Flux to get Articles."""
print "State TWEET (Tweet_actif) : "
print self.Tweet_actif
if not self.Tweet_actif:
print "enter activate_tweet"
self.rt = RepeatedTimer(self.frequency, self.tweet_articles)
# update Feed
flux_info = Feed.query.filter_by(id=self.idflux).first()
flux_info.Tweet_actif = True
db.session.commit()
print self.rt
else:
print 'Tweet already enable'
def desactivate_tweet(self):
"""Desactivate Flux to get Articles."""
if self.rt:
self.rt.stop()
# update Feed
flux_info = Feed.query.filter_by(id=self.idflux).first()
flux_info.Tweet_actif = False
db.session.commit()
else:
print 'Tweet already disable'
def state(self):
"""Print effective actions (tweet_articles / get_articles)."""
if self.rt.is_running is True:
if self.rt2.is_running is True:
return self.name+" : Collecting and Tweeting actif."
return self.name+" : Tweeting is actif."
elif self.rt2.is_running is True:
return self.name+" : Collecting is actif."
else:
print 'No actions'
def print_info(self):
self.attrs = vars(self)
print ', '.join("%s: %s" % item for item in self.attrs.items())
if __name__ == '__main__':
pass
| StarcoderdataPython |
239303 | <reponame>abreza/HOI-CL<filename>scripts/affordance/extract_affordance_feature.py<gh_stars>10-100
# --------------------------------------------------------
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import argparse
import os
import numpy as np
import tensorflow as tf
from ult.config import cfg
from ult.timer import Timer
from ult.ult import obtain_data, obtain_test_data, obtain_coco_data2
tf.set_random_seed(0)
np.random.seed(0)
def parse_args():
parser = argparse.ArgumentParser(description='Test an iCAN on HICO')
parser.add_argument('--num_iteration', dest='iteration',
help='Specify which weight to load',
default=160000, type=int)
parser.add_argument('--model', dest='model',
help='Select model',
default='ATL_union_multi_atl_ml5_l05_t5_def2_aug5_new_VCOCO_coco_CL_21', type=str)
parser.add_argument('--object_thres', dest='object_thres',
help='Object threshold',
default=0.3, type=float)
parser.add_argument('--type', dest='type',
help='Object threshold',
default='train', type=str)
parser.add_argument('--human_thres', dest='human_thres',
help='Human threshold',
default=0.8, type=float)
args = parser.parse_args()
return args
def save_img(img, target_size, name):
import skimage.io as sio
import skimage.transform as transform
img = np.squeeze(img, axis=-1)
img = transform.resize(img, target_size, order=0)
print(img.shape)
sio.imsave(cfg.IMAGE_TEMP+'/'+name+'.jpg', img)
if __name__ == '__main__':
args = parse_args()
weight = cfg.ROOT_DIR + '/Weights/' + args.model + '/HOI_iter_' + str(args.iteration) + '.ckpt'
print ('Human thres = ' + str(args.human_thres) + ', Object thres = ' + str(args.object_thres) + ', iter = ' + str(args.iteration) + ', path = ' + weight )
output_file = cfg.LOCAL_DATA + '/Results/' + str(args.iteration) + '_' + args.model + '.pkl'
HICO_dir = cfg.LOCAL_DATA + '/Results/HICO/' + str(args.iteration) + '_' + args.model + '/'
tfconfig = tf.ConfigProto(device_count={"CPU": 16},
inter_op_parallelism_threads=8,
intra_op_parallelism_threads=8,
allow_soft_placement=True)
# init session
# tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.per_process_gpu_memory_fraction = 0.1
sess = tf.Session(config=tfconfig)
if args.model.__contains__('res101'):
os.environ['DATASET'] = 'HICO_res101'
from networks.HOI import HOI
net = HOI(model_name=args.model)
elif args.model.__contains__('VCOCO'):
os.environ['DATASET'] = 'VCOCO1'
from networks.HOI import HOI
net = HOI(model_name=args.model)
else:
from networks.HOI import HOI
net = HOI(model_name=args.model)
if args.type == 'train':
if not args.model.__contains__('VCOCO'):
large_neg_for_ho = False
image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp = obtain_data(
Pos_augment=0, Neg_select=0, augment_type=-1, pattern_type=False)
net.set_ph(image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp)
else:
image, image_id, num_pos, blobs = obtain_coco_data2(0, 0, augment_type=-1, type=1)
action_HO = blobs['gt_class_C']
net.set_ph(image, image_id, num_pos, blobs['sp'], blobs['H_boxes'],
blobs['O_boxes'], blobs['gt_class_H'], blobs['gt_class_HO'], blobs['gt_class_sp'],
blobs['Mask_HO'], blobs['Mask_H'], blobs['Mask_sp'], blobs['gt_class_C'])
else:
image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp = obtain_test_data(
Pos_augment=0,
Neg_select=0,
augment_type=-1,
large_neg_for_ho=False)
net.set_ph(image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp)
net.create_architecture(False)
saver = tf.train.Saver()
saver.restore(sess, weight)
print('Pre-trained weights loaded.')
detection = {}
# prediction_HO = net.test_image_HO(sess, im_orig, blobs)
# timers
_t = {'im_detect': Timer(), 'misc': Timer()}
last_img_id = -1
count = 0
img_id_list = []
O_list = []
V_list = []
A_list = []
result = {}
result['img_id_list'] = []
result['O_list'] = []
result['V_list'] = []
result['A_list'] = []
while True:
_t['im_detect'].tic()
try:
_image, _image_id, fc7_O, fc7_verbs, actions = sess.run(
[image, image_id,
net.test_visualize['fc7_O_feats'],
net.test_visualize['fc7_verbs_feats'],
action_HO,
])
# print(fc7_verbs.shape, actions.shape, _image_id)
# print(t_Human_augmented, t_Object_augmented)
# print('---')
except tf.errors.OutOfRangeError:
print('END')
break
_t['im_detect'].toc()
count += 1
# print(fc7_O.shape, actions.shape)
# result['O_list'].extend(fc7_O)
# print(_image_id, fc7_verbs)
result['V_list'].extend(fc7_verbs)
result['img_id_list'].append([_image_id]*len(fc7_O))
result['A_list'].extend(actions)
print('im_detect: {:d}/{:d} {:d}, {:.3f}s\r'.format(count, 9658, _image_id, _t['im_detect'].average_time))
# if count > 10:
# exit()
print(len(result['A_list']))
import pickle
if not os.path.exists(cfg.LOCAL_DATA+ '/feats/'):
os.makedirs(cfg.LOCAL_DATA+ '/feats/')
pickle.dump(result, open(cfg.LOCAL_DATA+ '/feats/' +args.model +'_'+args.type+'_'+'HOI_verb_feats.pkl', 'wb'))
sess.close()
# | StarcoderdataPython |
207863 | import logging
import os
import pickle
import tqdm
from pathlib import Path
from ..dataset import GluonCVMotionDataset, DataSample, FieldNames, SplitNames
from ..utils.ingestion_utils import crop_video, process_dataset_splits, crop_fn_ffmpeg, \
crop_fn_frame_folder
_log = logging.getLogger()
_log.setLevel(logging.DEBUG)
def even_chunk(sample, chunk_mins, chunks_per_vid=None, center=False, min_chunk_ms=3000):
"""
A function that when given a sample returns evenly spaced timestamp chunks
that can be used to crop the video
:param sample:
The datasample to be chunked
:param chunk_mins:
The desired length of each chunk in minutes.
This will be the length of all returned chunks unless:
1. The sample is shorter than chunk_mins OR
2. None is provided to chunks_per_vid, then the last chunk might be a shorter length.
:param chunks_per_vid:
The desired number of chunks per video.
If a video is too short to be chunked this many times,
then the number of chunks will be reduced such that they all fit fully.
If the video is shorter than a single,
chunk a single timestamp of the entire video will be returned.
If None is provided (the default),
the video will be chunked fully and the final chunk might be a shorter length
(but not shorter than min_chunk_ms).
:param center:
Whether to center the chunks (add space before the first and after the last chunk).
False by default.
:param min_chunk_ms:
Only used when chunks_per_vid is None (chunk the whole video),
if the final chunk is smaller than this it will be excluded
:return:
A list of timestamp pairs where each pair is a tuple of (start_time, end_time) for the chunk
"""
duration = sample.frame_reader.duration
chunk_ms = int(chunk_mins * 60) * 1000
if chunks_per_vid is None:
num_chunks = duration // chunk_ms
# The final chunk has to be at least a minimum length (or the whole video)
if duration % chunk_ms >= min_chunk_ms or num_chunks == 0:
num_chunks += 1
remaining_ms = 0
else:
num_chunks = chunks_per_vid
remaining_ms = duration - (chunk_ms * num_chunks)
if remaining_ms < 0:
# Reduce num chunks by overrun
# Equivalent of ceil when int dividing with -ve numbers,
num_chunks += remaining_ms // chunk_ms
num_chunks = max(1, num_chunks)
remaining_ms = max(0, duration - (chunk_ms * num_chunks))
num_spaces = max(1, num_chunks - 1 + (center * 2))
spacing_ms = remaining_ms // num_spaces
init_space = spacing_ms if center else 0
time_starts = [i * (chunk_ms + spacing_ms) + init_space for i in range(num_chunks)]
time_pairs = [(x, min(x + chunk_ms, duration)) for x in time_starts]
return time_pairs
def ratio_chunk(sample, ratios):
"""
:param sample:
The datasample to be chunked
:param ratios:
The ratios to be chunked, in order relative to the video.
If negative it will throw away that chunk
:return:
See :func:`even_chunk`
"""
if sum(abs(x) for x in ratios) > 1 + 1e-2:
raise ValueError("Ratios add up to more than 1")
duration = sample.frame_reader.duration
time_pairs = []
start_time = 0
for ratio in ratios:
end_time = start_time + (abs(ratio) * duration)
if ratio > 0:
time_pair = (round(start_time), round(end_time))
time_pairs.append(time_pair)
start_time = end_time
return time_pairs
def align_times_to_framerate(time_pairs, fps):
period = 1000 / fps
aligned_pairs = []
for time_pair in time_pairs:
aligned_pair = tuple(round(round(t / period) * period) for t in time_pair)
aligned_pairs.append(aligned_pair)
return aligned_pairs
def update_anno(sample: DataSample, new_sample: DataSample, time_pair: (int, int)):
start_time, end_time = time_pair
for ts, entities in sample.time_entity_dict.items():
if ts >= start_time and ts < end_time:
for entity in entities:
new_entity = pickle.loads(pickle.dumps(entity))
new_entity.time = entity.time - start_time
# This assumes constant fps
new_entity.frame_num = round((new_entity.time / 1000) * new_sample.fps)
new_sample.add_entity(new_entity)
return new_sample
def add_orig_samples(new_dataset, dataset):
for sid, sample in dataset.get_split_samples("test"):
new_dataset.add_sample(sample)
def write_new_split(dataset, time_pairs):
def split_func(sample):
time_pair = time_pairs.get(sample.id)
if time_pair is None:
return SplitNames.TEST
start_time, end_time = time_pair
if start_time == 0:
return SplitNames.TRAIN
else:
return SplitNames.VAL
process_dataset_splits(dataset, split_func, save=True)
def main(anno_path:str="./annotation/anno.json", new_name=None,
chunk_mins=5, chunks_per_vid=1, chunk_func=even_chunk,
link_unchanged=True, cache_name=None,input_cache=None,
name_suffix="", fps=None, split=None, new_split=None,
overwrite=False, part=0, parts=1):
fps_suffix = "_{}r".format(fps) if fps is not None else ""
if name_suffix:
name_suffix = "_" + name_suffix
if new_name is None:
new_name = "anno_chunks_{}m_{}p{}{}.json".format(chunk_mins,
chunks_per_vid,
fps_suffix,
name_suffix)
if cache_name is None:
cache_name = "vids_chunked/vid_chunks_{}m_{}p{}{}".format(chunk_mins,
chunks_per_vid,
fps_suffix,
name_suffix)
dataset = GluonCVMotionDataset(anno_path)
new_dataset = GluonCVMotionDataset(new_name,
dataset.root_path,
split_file=new_split,
load_anno=False)
chunk_cache_dir = Path(dataset.cache_root_path, cache_name)
chunk_data_link = Path(dataset.data_root_path, cache_name)
os.makedirs(chunk_cache_dir, exist_ok=True)
os.makedirs(chunk_data_link.parent, exist_ok=True)
try:
os.symlink(chunk_cache_dir, chunk_data_link)
except OSError:
pass # already exists
new_sample_times = {}
samples = dataset.get_split_samples(split)
samples = samples[part::parts]
for sample_id, sample in tqdm.tqdm(samples, mininterval=1):
time_pairs = ratio_chunk(sample, [0.5, 0.5])
time_pairs = align_times_to_framerate(time_pairs, sample.fps)
out_suffix = ".mp4"
if input_cache is not None:
input_path = sample.get_cache_file(input_cache, out_suffix)
else:
input_path = sample.data_path
output_path = chunk_data_link / sample.data_relative_path
is_frame_dir = Path(input_path).is_dir()
if not is_frame_dir:
output_path = output_path.with_suffix(out_suffix)
for start_time, end_time in time_pairs:
if link_unchanged and len(time_pairs) == 1 \
and start_time == 0 and abs(sample.frame_reader.duration - end_time) < 50 \
and (fps is None or sample.frame_reader.fps == fps):
if not output_path.exists():
os.makedirs(output_path.parent, exist_ok=True)
os.symlink(input_path, output_path)
cropped = False
full_output_path = output_path
else:
if is_frame_dir:
# fps for folder frame reader, not output fps
addn_args = dict(crop_fn=crop_fn_frame_folder, in_fps=sample.fps)
else:
# output fps
addn_args = dict(crop_fn=crop_fn_ffmpeg, fps=fps)
cropped, full_output_path = crop_video(input_path, str(output_path),
start_time, end_time,
overwrite=overwrite, add_output_ts=True,
make_dirs=True, **addn_args)
new_data_path = Path(full_output_path).relative_to(dataset.data_root_path)
new_id = f"{sample_id}_{start_time}-{end_time}"
new_sample = sample.get_copy_without_entities(new_id=new_id)
new_sample.data_relative_path = new_data_path
fr = new_sample.frame_reader
new_sample.metadata[FieldNames.NUM_FRAMES] = fr.num_frames()
new_sample.metadata[FieldNames.DURATION] = fr.duration
new_sample.metadata[FieldNames.ORIG_ID] = sample_id
update_anno(sample, new_sample, (start_time, end_time))
if cropped:
_log.info("Done crop for {}".format(new_data_path))
new_dataset.add_sample(new_sample)
new_sample_times[new_sample.id] = (start_time, end_time)
if parts == 1:
add_orig_samples(new_dataset, dataset)
new_dataset.dump()
return True, new_dataset
if __name__ == "__main__":
import fire
fire.Fire(main)
| StarcoderdataPython |
5118736 | <reponame>chenjyw/python-twitter
#!/usr/bin/env python
from calendar import timegm
import rfc822
from twitter import json, TwitterError
class DirectMessage(object):
"""A class representing the DirectMessage structure used by the twitter API.
The DirectMessage structure exposes the following properties:
direct_message.id
direct_message.created_at
direct_message.created_at_in_seconds # read only
direct_message.sender_id
direct_message.sender_screen_name
direct_message.recipient_id
direct_message.recipient_screen_name
direct_message.text
"""
def __init__(self,
id=None,
created_at=None,
sender_id=None,
sender_screen_name=None,
recipient_id=None,
recipient_screen_name=None,
text=None):
"""An object to hold a Twitter direct message.
This class is normally instantiated by the twitter.Api class and
returned in a sequence.
Note: Dates are posted in the form "Sat Jan 27 04:17:38 +0000 2007"
Args:
id:
The unique id of this direct message. [Optional]
created_at:
The time this direct message was posted. [Optional]
sender_id:
The id of the twitter user that sent this message. [Optional]
sender_screen_name:
The name of the twitter user that sent this message. [Optional]
recipient_id:
The id of the twitter that received this message. [Optional]
recipient_screen_name:
The name of the twitter that received this message. [Optional]
text:
The text of this direct message. [Optional]
"""
self.id = id
self.created_at = created_at
self.sender_id = sender_id
self.sender_screen_name = sender_screen_name
self.recipient_id = recipient_id
self.recipient_screen_name = recipient_screen_name
self.text = text
# Functions that you should be able to set.
@property
def RecipientScreenName(self):
"""Get the unique recipient screen name of this direct message.
Returns:
The unique recipient screen name of this direct message
"""
return self._recipient_screen_name
@RecipientScreenName.setter
def RecipientScreenName(self, recipient_screen_name):
self._recipient_screen_name = recipient_screen_name
@property
def Text(self):
"""Get the text of this direct message.
Returns:
The text of this direct message.
"""
return self._text
@Text.setter
def Text(self, text):
self._text = text
@property
def RecipientId(self):
"""Get the unique recipient id of this direct message.
Returns:
The unique recipient id of this direct message
"""
return self._recipient_id
@RecipientId.setter
def RecipientId(self, recipient_id):
self._recipient_id = recipient_id
# Functions that are only getters.
@property
def Id(self):
"""Get the unique id of this direct message.
Returns:
The unique id of this direct message
"""
return self._id
@property
def CreatedAt(self):
"""Get the time this direct message was posted.
Returns:
The time this direct message was posted
"""
return self._created_at
@property
def CreatedAtInSeconds(self):
"""Get the time this direct message was posted, in seconds since the epoch.
Returns:
The time this direct message was posted, in seconds since the epoch.
"""
return timegm(rfc822.parsedate(self.created_at))
@property
def SenderScreenName(self):
"""Get the unique sender screen name of this direct message.
Returns:
The unique sender screen name of this direct message
"""
return self._sender_screen_name
@property
def SenderId(self):
"""Get the unique sender id of this direct message.
Returns:
The unique sender id of this direct message
"""
return self._sender_id
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.id == other.id and \
self.created_at == other.created_at and \
self.sender_id == other.sender_id and \
self.sender_screen_name == other.sender_screen_name and \
self.recipient_id == other.recipient_id and \
self.recipient_screen_name == other.recipient_screen_name and \
self.text == other.text
except AttributeError:
return False
def __str__(self):
"""A string representation of this twitter.DirectMessage instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this twitter.DirectMessage instance.
"""
return self.AsJsonString()
def AsJsonString(self):
"""A JSON string representation of this twitter.DirectMessage instance.
Returns:
A JSON string representation of this twitter.DirectMessage instance
"""
return json.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
"""A dict representation of this twitter.DirectMessage instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.DirectMessage instance
"""
data = {}
if self.id:
data['id'] = self.id
if self.created_at:
data['created_at'] = self.created_at
if self.sender_id:
data['sender_id'] = self.sender_id
if self.sender_screen_name:
data['sender_screen_name'] = self.sender_screen_name
if self.recipient_id:
data['recipient_id'] = self.recipient_id
if self.recipient_screen_name:
data['recipient_screen_name'] = self.recipient_screen_name
if self.text:
data['text'] = self.text
return data
@staticmethod
def NewFromJsonDict(data):
"""Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.DirectMessage instance
"""
return DirectMessage(created_at=data.get('created_at', None),
recipient_id=data.get('recipient_id', None),
sender_id=data.get('sender_id', None),
text=data.get('text', None),
sender_screen_name=data.get('sender_screen_name', None),
id=data.get('id', None),
recipient_screen_name=data.get('recipient_screen_name', None))
| StarcoderdataPython |
12857915 | # coding: utf-8
import os
import thriftpy
import json
import logging
from thriftpy.rpc import make_client
from xylose.scielodocument import Article, Journal
LIMIT = 1000
logger = logging.getLogger(__name__)
ratchet_thrift = thriftpy.load(
os.path.join(os.path.dirname(__file__))+'/ratchet.thrift')
articlemeta_thrift = thriftpy.load(
os.path.join(os.path.dirname(__file__))+'/articlemeta.thrift')
citedby_thrift = thriftpy.load(
os.path.join(os.path.dirname(__file__))+'/citedby.thrift')
accessstats_thrift = thriftpy.load(
os.path.join(os.path.dirname(__file__))+'/access_stats.thrift')
publication_stats_thrift = thriftpy.load(
os.path.join(os.path.dirname(__file__))+'/publication_stats.thrift')
class ServerError(Exception):
def __init__(self, message=None):
self.message = message or 'thirftclient: ServerError'
def __str__(self):
return repr(self.message)
class AccessStats(object):
def __init__(self, address, port):
"""
Cliente thrift para o Access Stats.
"""
self._address = address
self._port = port
@property
def client(self):
client = make_client(
accessstats_thrift.AccessStats,
self._address,
self._port
)
return client
def _compute_access_lifetime(self, query_result):
data = []
for publication_year in query_result['aggregations']['publication_year']['buckets']:
for access_year in publication_year['access_year']['buckets']:
data.append([
publication_year['key'],
access_year['key'],
int(access_year['access_html']['value']),
int(access_year['access_abstract']['value']),
int(access_year['access_pdf']['value']),
int(access_year['access_epdf']['value']),
int(access_year['access_total']['value'])
])
return sorted(data)
def access_lifetime(self, issn, collection, raw=False):
body = {
"query": {
"bool": {
"must": [{
"match": {
"collection": collection
}
},
{
"match": {
"issn": issn
}
}
]
}
},
"size": 0,
"aggs": {
"publication_year": {
"terms": {
"field": "publication_year",
"size": 0,
"order": {
"access_total": "desc"
}
},
"aggs": {
"access_total": {
"sum": {
"field": "access_total"
}
},
"access_year": {
"terms": {
"field": "access_year",
"size": 0,
"order": {
"access_total": "desc"
}
},
"aggs": {
"access_total": {
"sum": {
"field": "access_total"
}
},
"access_abstract": {
"sum": {
"field": "access_abstract"
}
},
"access_epdf": {
"sum": {
"field": "access_epdf"
}
},
"access_html": {
"sum": {
"field": "access_html"
}
},
"access_pdf": {
"sum": {
"field": "access_pdf"
}
}
}
}
}
}
}
}
query_parameters = [
accessstats_thrift.kwargs('size', '0')
]
query_result = json.loads(self.client.search(json.dumps(body), query_parameters))
computed = self._compute_access_lifetime(query_result)
return query_result if raw else computed
class PublicationStats(object):
def __init__(self, address, port):
"""
Cliente thrift para o PublicationStats.
"""
self._address = address
self._port = port
@property
def client(self):
client = make_client(
publication_stats_thrift.PublicationStats,
self._address,
self._port
)
return client
def _compute_first_included_document_by_journal(self, query_result):
if len(query_result.get('hits', {'hits': []}).get('hits', [])) == 0:
return None
return query_result['hits']['hits'][0].get('_source', None)
def first_included_document_by_journal(self, issn, collection):
body = {
"query": {
"filtered": {
"query": {
"bool": {
"must": [
{
"match": {
"collection": collection
}
},
{
"match": {
"issn": issn
}
}
]
}
}
}
},
"sort": [
{
"publication_date": {
"order": "asc"
}
}
]
}
query_parameters = [
publication_stats_thrift.kwargs('size', '1')
]
query_result = json.loads(self.client.search('article', json.dumps(body), query_parameters))
return self._compute_first_included_document_by_journal(query_result)
def _compute_last_included_document_by_journal(self, query_result):
if len(query_result.get('hits', {'hits': []}).get('hits', [])) == 0:
return None
return query_result['hits']['hits'][0].get('_source', None)
def last_included_document_by_journal(self, issn, collection, metaonly=False):
body = {
"query": {
"filtered": {
"query": {
"bool": {
"must": [
{
"match": {
"collection": collection
}
},
{
"match": {
"issn": issn
}
}
]
}
},
"filter": {
"exists": {
"field": "publication_date"
}
}
}
},
"sort": [
{
"publication_date": {
"order": "desc"
}
}
]
}
query_parameters = [
publication_stats_thrift.kwargs('size', '1')
]
query_result = json.loads(self.client.search('article', json.dumps(body), query_parameters))
return self._compute_last_included_document_by_journal(query_result)
class Citedby(object):
def __init__(self, address, port):
"""
Cliente thrift para o Citedby.
"""
self._address = address
self._port = port
@property
def client(self):
client = make_client(
citedby_thrift.Citedby,
self._address,
self._port
)
return client
def citedby_pid(self, code, metaonly=False):
data = self.client.citedby_pid(code, metaonly)
return data
class Ratchet(object):
def __init__(self, address, port):
"""
Cliente thrift para o Ratchet.
"""
self._address = address
self._port = port
@property
def client(self):
client = make_client(
ratchet_thrift.RatchetStats,
self._address,
self._port
)
return client
def document(self, code):
data = self.client.general(code=code)
return data
class ArticleMeta(object):
def __init__(self, address, port):
"""
Cliente thrift para o Articlemeta.
"""
self._address = address
self._port = port
@property
def client(self):
client = make_client(
articlemeta_thrift.ArticleMeta,
self._address,
self._port
)
return client
def journals(self, collection=None, issn=None):
offset = 0
while True:
identifiers = self.client.get_journal_identifiers(collection=collection, issn=issn, limit=LIMIT, offset=offset)
if len(identifiers) == 0:
raise StopIteration
for identifier in identifiers:
journal = self.client.get_journal(
code=identifier.code[0], collection=identifier.collection)
jjournal = json.loads(journal)
xjournal = Journal(jjournal)
logger.info('Journal loaded: %s_%s' % ( identifier.collection, identifier.code))
yield xjournal
offset += 1000
def exists_article(self, code, collection):
try:
return self.client.exists_article(
code,
collection
)
except:
msg = 'Error checking if document exists: %s_%s' % (collection, code)
raise ServerError(msg)
def set_doaj_id(self, code, collection, doaj_id):
try:
article = self.client.set_doaj_id(
code,
collection,
doaj_id
)
except:
msg = 'Error senting doaj id for document: %s_%s' % (collection, code)
raise ServerError(msg)
def document(self, code, collection, replace_journal_metadata=True, fmt='xylose'):
try:
article = self.client.get_article(
code=code,
collection=collection,
replace_journal_metadata=True,
fmt=fmt
)
except:
msg = 'Error retrieving document: %s_%s' % (collection, code)
raise ServerError(msg)
jarticle = None
try:
jarticle = json.loads(article)
except:
msg = 'Fail to load JSON when retrienving document: %s_%s' % (collection, code)
raise ServerError(msg)
if not jarticle:
logger.warning('Document not found for : %s_%s' % ( collection, code))
return None
if fmt == 'xylose':
xarticle = Article(jarticle)
logger.info('Document loaded: %s_%s' % ( collection, code))
return xarticle
else:
logger.info('Document loaded: %s_%s' % ( collection, code))
return article
def documents(self, collection=None, issn=None, from_date=None,
until_date=None, fmt='xylose'):
offset = 0
while True:
identifiers = self.client.get_article_identifiers(
collection=collection, issn=issn, from_date=from_date,
until_date=until_date, limit=LIMIT, offset=offset)
if len(identifiers) == 0:
raise StopIteration
for identifier in identifiers:
document = self.document(
code=identifier.code,
collection=identifier.collection,
replace_journal_metadata=True,
fmt=fmt
)
yield document
offset += 1000
def collections(self):
return [i for i in self._client.get_collection_identifiers()] | StarcoderdataPython |
4985942 | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( arr , N , k ) :
maxSum = 0 ;
arr.sort ( ) ;
i = N - 1 ;
while ( i >= 0 ) :
if ( arr [ i ] - arr [ i - 1 ] < k ) :
maxSum += arr [ i ] ;
maxSum += arr [ i - 1 ] ;
i -= 1 ;
i -= 1 ;
return maxSum ;
#TOFILL
if __name__ == '__main__':
param = [
([2, 10, 11, 11, 12, 14, 15, 17, 27, 27, 28, 36, 36, 44, 47, 47, 54, 55, 62, 64, 68, 69, 70, 70, 75, 76, 78, 85, 85, 91, 95, 97],26,18,),
([-36, 78, 10, 30, -12, -70, -98, -14, -44, -66, -40, -8, 78, 2, -70, 40, 92, 58, 30, 10, -84, -62, -86, -50, 82, 36, -92, -30, -2, -34, 88, 2, -4, -72],26,25,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],47,26,),
([77, 78, 58],1,1,),
([-88, -88, -88, -82, -58, -54, -48, -46, -46, -44, -20, -2, 10, 28, 28, 28, 42, 42, 44, 50, 50, 54, 56, 58, 62, 68, 70, 72, 74, 76, 78, 88, 90, 92],21,24,),
([0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1],41,40,),
([5, 7, 10, 11, 15, 17, 20, 20, 29, 29, 32, 37, 38, 39, 40, 41, 45, 51, 60, 64, 64, 68, 68, 70, 71, 71, 71, 75, 76, 82, 84, 87, 88, 88, 95, 98],30,21,),
([-46, -32, 76, -28, 44, -14, 94, -4, 60, -88, -52, 32, -66, 28, 94, 76, 86, -4, 74, 52, 64, -36, -98, -40, 70, 18, -22, -20, -16, -74, 12, 60, 94, 98, -28, -24, 4, -34, -60, 56],33,23,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],28,41,),
([79, 13, 25, 22, 61, 1, 2, 73, 66, 94, 47, 9, 1, 99, 25, 39, 95, 46, 95, 20, 63, 15, 14, 36, 9, 91, 14],19,23,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) | StarcoderdataPython |
3513771 | """
Ginkgo Base image
Contents:
GNU compilers version set by the user
LLVM/Clang version set by the user
Intel ICC and ICPC version set to the latest available version
OpenMP latest apt version for Clang+OpenMP
Python 2 and 3 (upstream)
cmake (upstream)
build-essential, git, openssh, curl, valgrind latest apt version
jq, graphviz, ghostscript, latest apt version
bison, flex latest apt version, required for doxygen compilation
doxygen: install the latest release
texlive: install the latest release
clang-tidy, iwyu: latest apt version
hwloc, libhwloc-dev, pkg-config latest apt version
papi: adds package libpfm4, and copy precompiled papi headers and files
from a directory called 'papi'
gpg-agent: latest apt version, for adding custom keys
"""
# pylint: disable=invalid-name, undefined-variable, used-before-assignment
import os
Stage0.baseimage('ubuntu:18.04')
release_name = 'bionic'
# Setup extra tools
Stage0 += python()
Stage0 += cmake(eula=True)
Stage0 += apt_get(ospackages=['build-essential', 'git', 'openssh-client', 'curl', 'valgrind'])
Stage0 += apt_get(ospackages=['jq', 'graphviz', 'ghostscript'])
Stage0 += apt_get(ospackages=['clang-tidy', 'iwyu'])
Stage0 += apt_get(ospackages=['hwloc', 'libhwloc-dev', 'pkg-config'])
Stage0 += apt_get(ospackages=['gpg-agent'])
Stage0 += apt_get(ospackages=['ca-certificates']) # weird github certificates problem
Stage0 += apt_get(ospackages=['bison', 'flex'])
# GNU compilers
gnu_version = USERARG.get('gnu', '9')
Stage0 += gnu(version=gnu_version, extra_repository=True)
# Clang compilers
llvm_version = USERARG.get('llvm', '8')
clang_ver = 'clang-{}'.format(llvm_version)
repo_ver = ['deb http://apt.llvm.org/{}/ llvm-toolchain-{}-{} main'.format(release_name, release_name, llvm_version)]
Stage0 += apt_get(ospackages=[clang_ver, 'libomp-dev'], repositories=repo_ver, keys=['https://apt.llvm.org/llvm-snapshot.gpg.key'])
clang_update = 'update-alternatives --install /usr/bin/clang clang /usr/bin/clang-{} 90'.format(llvm_version)
clangpp_update = 'update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang-{} 90'.format(llvm_version)
Stage0 += shell(commands=[clang_update, clangpp_update])
# Doxygen
Stage0 += shell(commands=['cd /var/tmp', 'git clone https://github.com/doxygen/doxygen'])
Stage0 += shell(commands=['cd /var/tmp/doxygen', 'git checkout Release_1_8_16',
'mkdir build', 'cd build',
'cmake ..', 'make -j10', 'make install'])
Stage0 += shell(commands=['cd /var/tmp', 'rm -rf doxygen'])
# Texlive
if os.path.isdir('texlive/'):
Stage0 += copy(src='texlive/texlive.profile', dest='/var/tmp')
Stage0 += shell(commands=['cd /var/tmp', 'wget '
'http://mirror.ctan.org/systems/texlive/tlnet/install-tl-unx.tar.gz',
'tar -xvf install-tl-unx.tar.gz', 'cd install-tl-2*',
'./install-tl --profile=../texlive.profile'])
Stage0 += shell(commands=['cd /var/tmp', 'rm -rf install-tl*'])
Stage0 += shell(commands=['tlmgr install mathtools float xcolor varwidth '
'fancyvrb multirow hanging adjustbox xkeyval '
'collectbox stackengine etoolbox listofitems ulem '
'wasysym sectsty tocloft newunicodechar caption etoc '
'pgf ec helvetic courier wasy letltxmacro'])
# Copy PAPI libs
add_papi = USERARG.get('papi', 'False')
if os.path.isdir('papi/') and add_papi == 'True':
Stage0 += apt_get(ospackages=['libpfm4'])
Stage0 += copy(src='papi/include/*', dest='/usr/include/')
Stage0 += copy(src='papi/lib/*', dest='/usr/lib/')
Stage0 += copy(src='papi/bin/*', dest='/usr/bin/')
intel_path = 'intel/parallel_studio_xe_2019/compilers_and_libraries/linux/'
if os.path.isdir(intel_path):
Stage0 += copy(src=intel_path+'bin/intel64/', dest='/opt/intel/bin/')
Stage0 += copy(src=intel_path+'lib/intel64/', dest='/opt/intel/lib/')
Stage0 += copy(src=intel_path+'include/', dest='/opt/intel/include/')
Stage0 += environment(variables={'INTEL_LICENSE_FILE': '<EMAIL>'})
Stage0 += environment(variables={'PATH': '$PATH:/opt/intel/bin'})
Stage0 += environment(variables={'LIBRARY_PATH': '$LIBRARY_PATH:/opt/intel/lib'})
Stage0 += environment(variables={'LD_LIBRARY_PATH': '$LD_LIBRARY_PATH:/opt/intel/lib'})
Stage0 += environment(variables={'LD_RUN_PATH': '$LD_RUN_PATH:/opt/intel/lib'})
| StarcoderdataPython |
4871611 | <reponame>vdloo/raptiformica
from raptiformica.actions.mesh import attempt_join_meshnet
from tests.testcase import TestCase
class TestAttemptJoinMeshnet(TestCase):
def setUp(self):
self.log = self.set_up_patch('raptiformica.actions.mesh.log')
self.update_neighbours_config = self.set_up_patch(
'raptiformica.actions.mesh.update_neighbours_config'
)
self.configure_meshing_services = self.set_up_patch(
'raptiformica.actions.mesh.configure_meshing_services'
)
self.start_meshing_services = self.set_up_patch(
'raptiformica.actions.mesh.start_meshing_services'
)
self.enough_neighbours = self.set_up_patch('raptiformica.actions.mesh.enough_neighbours')
self.join_meshnet = self.set_up_patch('raptiformica.actions.mesh.join_meshnet')
def test_attempt_join_meshnet_logs_meshing_machine_message(self):
attempt_join_meshnet()
self.assertTrue(self.log.info.called)
def test_attempt_join_meshnet_updates_neighbour_config_to_ensure_self_is_registered(self):
attempt_join_meshnet()
self.update_neighbours_config.assert_called_once_with(remove=False)
def test_attempt_join_meshnet_configures_meshing_services(self):
attempt_join_meshnet()
self.configure_meshing_services.assert_called_once_with()
def test_attempt_join_meshnet_starts_meshing_services(self):
attempt_join_meshnet()
self.start_meshing_services.assert_called_once_with()
def test_attempt_join_meshnet_checks_if_there_are_enough_neighbours(self):
attempt_join_meshnet()
self.enough_neighbours.assert_called_once_with()
def test_attempt_join_meshnet_joins_meshnet_if_enough_neighbours(self):
self.enough_neighbours.return_value = True
attempt_join_meshnet()
self.join_meshnet.assert_called_once_with()
def test_attempt_join_meshnet_does_not_join_meshnet_if_not_enough_neighbours(self):
self.enough_neighbours.return_value = False
attempt_join_meshnet()
self.assertFalse(self.join_meshnet.called)
| StarcoderdataPython |
6613935 | import argparse
import sys, pickle , os
parser = argparse.ArgumentParser()
parser.add_argument('ckpt_dir' , help="the folder to save checkpoints")
parser.add_argument('log_file' , help="the file path to save log file")
args = parser.parse_args()
sys.path.append('./transformer_xl/')
sys.path.append('./src/')
import numpy as np
import pandas as pd
from glob import glob
from build_vocab import Vocab
# which gpu to use
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from model_aug import TransformerXL
if __name__ == '__main__':
# load dictionary
# generated from build_vocab.py
vocab = pickle.load(open('pickles/remi_wstruct_vocab.pkl', 'rb'))
event2word, word2event = vocab.event2idx, vocab.idx2event
# load train data
# training_seqs_final.pkl : all songs' remi format
training_data_file = "data/training_seqs_struct_new_final.pkl"
print("loading training data from {}".format(training_data_file))
training_seqs = pickle.load( open(training_data_file, 'rb') )
# show size of trqaining data
print("Training data count: {}".format(len(training_seqs)))
# declare model
model = TransformerXL(
event2word=event2word,
word2event=word2event,
checkpoint=None,
is_training=True,
training_seqs=training_seqs)
# train
model.train_augment(output_checkpoint_folder=args.ckpt_dir, logfile=args.log_file)
# close
model.close()
| StarcoderdataPython |
6568373 | <filename>GUI/covid-tracker.py
from covid import Covid
import matplotlib.pyplot as plt
covid=Covid() #storing calling function of Covid
name=input("Enter your country name: ")
virusdata=covid.get_status_by_country_name(name)
remove=['id', 'country', 'latitude', 'longitude', 'last_update']
for i in remove:
virusdata.pop(i)
all_val = virusdata.pop('confirmed')
ids = list(virusdata.keys())
value = [str(i) for i in virusdata.values()]
plt.pie(value,labels=ids, colors = ['r','y', 'g', 'b'], autopct='%1.1f%%')
plt.title("Country: "+ name.upper() + "\nTotal Cases: " + str(all_val))
plt.legend()
plt.show()
| StarcoderdataPython |
11280296 | <reponame>ZeroHarbor/blockstack-storage-drivers
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2014-2015 by Halfmoon Labs, Inc.
Copyright (c) 2016 by Blocktatck.org
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# This module lets the blockstack client treat local disk as a storage provider.
# This is useful for doing local testing.
import os
import sys
import traceback
import logging
from common import get_logger, DEBUG
log = get_logger("blockstack-storage-driver-disk")
if os.environ.get("BLOCKSTACK_TEST", None) is not None:
DISK_ROOT = "/tmp/blockstack-disk"
else:
DISK_ROOT = os.path.expanduser("~/.blockstack/storage-disk")
IMMUTABLE_STORAGE_ROOT = DISK_ROOT + "/immutable"
MUTABLE_STORAGE_ROOT = DISK_ROOT + "/mutable"
log.setLevel( logging.DEBUG if DEBUG else logging.INFO )
def storage_init(conf):
"""
Local disk implementation of the storage_init API call.
Do one-time global setup--i.e. make directories.
Return True on success
Return False on error
"""
global DISK_ROOT, MUTABLE_STORAGE_ROOT, IMMUTABLE_STORAGE_ROOT
if not os.path.isdir( DISK_ROOT ):
os.makedirs( DISK_ROOT )
if not os.path.isdir( MUTABLE_STORAGE_ROOT ):
os.makedirs( MUTABLE_STORAGE_ROOT )
if not os.path.isdir( IMMUTABLE_STORAGE_ROOT ):
os.makedirs( IMMUTABLE_STORAGE_ROOT )
return True
def handles_url( url ):
"""
Does this storage driver handle this kind of URL?
"""
return url.startswith("file://")
def make_mutable_url( data_id ):
"""
Local disk implementation of the make_mutable_url API call.
Given the ID of the data, generate a URL that
can be used to route reads and writes to the data.
Return a string.
"""
global MUTABLE_STORAGE_ROOT
# replace all /'s with \x2f's
data_id_noslash = data_id.replace( "/", r"\x2f" )
return "file://%s/%s" % (MUTABLE_STORAGE_ROOT, data_id_noslash)
def get_immutable_handler( key, **kw ):
"""
Local disk implementation of the get_immutable_handler API call.
Given the hash of the data, return the data.
Return None if not found.
"""
global IMMUTABLE_STORAGE_ROOT
data = None
path = os.path.join( IMMUTABLE_STORAGE_ROOT, key )
if not os.path.exists(path):
if DEBUG:
log.debug("No such file or directory: '%s'" % path)
return None
try:
with open( path, "r" ) as f:
data = f.read()
return data
except Exception, e:
if DEBUG:
traceback.print_exc()
return None
def get_mutable_handler( url, **kw ):
"""
Local disk implementation of the get_mutable_handler API call.
Given a route URL to data, return the data itself.
Return the data if found.
Return None if not.
"""
if not url.startswith( "file://" ):
# invalid
return None
# get path from URL
path = url[ len("file://"): ]
if not os.path.exists(path):
if DEBUG:
log.debug("No such file or directory: '%s'" % path)
return None
try:
with open( path, "r" ) as f:
data = f.read()
return data
except Exception, e:
if DEBUG:
traceback.print_exc()
return None
def put_immutable_handler( key, data, txid, **kw ):
"""
Local disk implmentation of the put_immutable_handler API call.
Given the hash of the data (key), the serialized data itself,
and the transaction ID in the blockchain that contains the data's hash,
put the data into the storage system.
Return True on success; False on failure.
"""
global IMMUTABLE_STORAGE_ROOT, DEBUG
path = os.path.join( IMMUTABLE_STORAGE_ROOT, key )
pathdir = os.path.dirname(path)
if not os.path.exists(pathdir):
try:
os.makedirs(path_dir, 0700)
except Exception, e:
if DEBUG:
log.exception(e)
return False
try:
with open( path, "w") as f:
f.write( data )
f.flush()
if DEBUG:
log.debug("Stored to '%s'" % path)
except Exception, e:
if DEBUG:
traceback.print_exc()
return False
return True
def put_mutable_handler( data_id, data_bin, **kw ):
"""
Local disk implementation of the put_mutable_handler API call.
Return True on success; False on failure.
"""
global MUTABLE_STORAGE_ROOT, DEBUG
# replace all /'s with \x2f's
data_id_noslash = data_id.replace( "/", r"\x2f" )
path = os.path.join( MUTABLE_STORAGE_ROOT, data_id_noslash )
pathdir = os.path.dirname(path)
if not os.path.exists(pathdir):
try:
os.makedirs(path_dir, 0700)
except Exception, e:
if DEBUG:
log.exception(e)
return False
try:
with open( path, "w" ) as f:
f.write( data_bin )
f.flush()
if DEBUG:
log.debug("Stored to '%s'" % path)
except Exception, e:
if DEBUG:
log.exception(e)
return False
return True
def delete_immutable_handler( key, txid, sig_key_txid, **kw ):
"""
Local disk implementation of the delete_immutable_handler API call.
Given the hash of the data and transaction ID of the update
that deleted the data, remove data from storage.
Return True on success; False if not.
"""
global IMMUTABLE_STORAGE_ROOT
path = os.path.join( IMMUTABLE_STORAGE_ROOT, key )
try:
os.unlink( path )
except Exception, e:
pass
return True
def delete_mutable_handler( data_id, signature, **kw ):
"""
Local disk implementation of the delete_mutable_handler API call.
Given the unchanging data ID for the data and the writer's
signature over the hash of the data_id, remove data from storage.
Return True on success; False if not.
"""
global MUTABLE_STORAGE_ROOT
data_id_noslash = data_id.replace( "/", r"\x2f" )
path = os.path.join( MUTABLE_STORAGE_ROOT, data_id_noslash )
try:
os.unlink( path )
except Exception, e:
pass
return True
if __name__ == "__main__":
"""
Unit tests.
"""
import pybitcoin
import json
# hack around absolute paths
current_dir = os.path.abspath(os.path.join( os.path.dirname(__file__), "..") )
sys.path.insert(0, current_dir)
from storage import serialize_mutable_data, parse_mutable_data
from user import make_mutable_data_zonefile
pk = pybitcoin.BitcoinPrivateKey()
data_privkey = pk.to_hex()
data_pubkey = pk.public_key().to_hex()
test_data = [
["my_first_datum", "hello world", 1, "unused", None],
["/my/second/datum", "hello world 2", 2, "unused", None],
["user_profile", '{"name":{"formatted":"judecn"},"v":"2"}', 3, "unused", None],
["empty_string", "", 4, "unused", None],
]
def hash_data( d ):
return pybitcoin.hash.hex_hash160( d )
rc = storage_init()
if not rc:
raise Exception("Failed to initialize")
# put_immutable_handler
print "put_immutable_handler"
for i in xrange(0, len(test_data)):
d_id, d, n, s, url = test_data[i]
rc = put_immutable_handler( hash_data( d ), d, "unused" )
if not rc:
raise Exception("put_immutable_handler('%s') failed" % d)
# put_mutable_handler
print "put_mutable_handler"
for i in xrange(0, len(test_data)):
d_id, d, n, s, url = test_data[i]
data_url = make_mutable_url( d_id )
data_zonefile = make_mutable_data_zonefile( d_id, n, [data_url] )
data_json = serialize_mutable_data( {"id": d_id, "nonce": n, "data": d}, data_privkey )
rc = put_mutable_handler( d_id, data_json )
if not rc:
raise Exception("put_mutable_handler('%s', '%s') failed" % (d_id, d))
test_data[i][4] = data_url
# get_immutable_handler
print "get_immutable_handler"
for i in xrange(0, len(test_data)):
d_id, d, n, s, url = test_data[i]
rd = get_immutable_handler( hash_data( d ) )
if rd != d:
raise Exception("get_mutable_handler('%s'): '%s' != '%s'" % (hash_data(d), d, rd))
# get_mutable_handler
print "get_mutable_handler"
for i in xrange(0, len(test_data)):
d_id, d, n, s, url = test_data[i]
rd_json = get_mutable_handler( url )
rd = parse_mutable_data( rd_json, data_pubkey )
if rd is None:
raise Exception("Failed to parse mutable data '%s'" % rd_json)
if rd['id'] != d_id:
raise Exception("Data ID mismatch: '%s' != '%s'" % (rd['id'], d_id))
if rd['nonce'] != n:
raise Exception("Nonce mismatch: '%s' != '%s'" % (rd['nonce'], n))
if rd['data'] != d:
raise Exception("Data mismatch: '%s' != '%s'" % (rd['data'], d))
# delete_immutable_handler
print "delete_immutable_handler"
for i in xrange(0, len(test_data)):
d_id, d, n, s, url = test_data[i]
rc = delete_immutable_handler( hash_data(d), "unused", "unused" )
if not rc:
raise Exception("delete_immutable_handler('%s' (%s)) failed" % (hash_data(d), d))
# delete_mutable_handler
print "delete_mutable_handler"
for i in xrange(0, len(test_data)):
d_id, d, n, s, url = test_data[i]
rc = delete_mutable_handler( d_id, "unused" )
if not rc:
raise Exception("delete_mutable_handler('%s') failed" % d_id)
| StarcoderdataPython |
3355826 | from Agents.Agent import Agent
from ZobristHash import ZobristHash
class HumanAgent(Agent):
def play(self, moves : list, board : list):
i = int(input(moves))
return moves[i] | StarcoderdataPython |
6641268 | <reponame>kallyous/mbla<filename>viper/__init__.py
""" Copyright 2017 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. """
SERVER_ADDR = '172.16.17.32'
#SERVER_ADDR = '192.168.25.32'
GAME_HOME = None
SERVER_HOME = None
SCRIPTS_PATH = None
WORLDS = []
SECTOR_SIZE = 32
SECTOR_WIDTH = SECTOR_SIZE
SECTOR_HEIGHT = SECTOR_SIZE
BUFSIZE = 4096
CONF = {
'sec_w':SECTOR_WIDTH,
'sec_h':SECTOR_HEIGHT,
'maker-port':8732,
'player-db':'player.db'
}
DEBUG = False
| StarcoderdataPython |
5170676 | <reponame>mspasiano/uniTicket
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import include, path, re_path
from django.utils.text import slugify
from django.views.generic import RedirectView
from . decorators import is_manager, is_operator, is_the_owner
from . settings import MANAGEMENT_URL_PREFIX
from . views import (datatables, generic, management,
manager, operator, user)
app_name="uni_ticket"
_dashboard_name = 'dashboard'
# System/Generic URLs
ticket = 'ticket/<str:ticket_id>'
urlpatterns = [
path('', RedirectView.as_view(url='/{}/'.format(_dashboard_name))),
# Router url di responsabilità su struttura (manager/operator/user)
re_path(r'^manage/(?:(?P<structure_slug>[-\w]+))?$', generic.manage, name='manage'),
# Attachments download
path('{}/download/attachment/<str:attachment>/'.format(ticket), generic.download_attachment, name='download_attachment'),
path('{}/reply/<str:reply_id>/download/attachment/'.format(ticket), generic.download_message_attachment, name='download_message_attachment'),
path('{}/task/<str:task_id>/download/attachment/'.format(ticket), generic.download_task_attachment, name='download_task_attachment'),
# Delete ticket message
path('messages/delete/<str:ticket_message_id>/', generic.ticket_message_delete, name='message_delete'),
path('email-notify/update/', generic.email_notify_change, name='email_notify_change'),
path('print/ticket/<str:ticket_id>/', generic.ticket_detail_print, name='ticket_detail_print'),
]
# Datatables URLs
structure = '<str:structure_slug>'
urlpatterns += [
# User json
path('user_all_tickets.json', datatables.user_all_tickets, name='user_all_tickets_json'),
path('user_opened_ticket.json', datatables.user_opened_ticket, name='user_opened_ticket_json'),
path('user_closed_ticket.json', datatables.user_closed_ticket, name='user_closed_ticket_json'),
path('user_unassigned_ticket.json', datatables.user_unassigned_ticket, name='user_unassigned_ticket_json'),
# Manager json
path('{}/manager_unassigned_ticket.json'.format(structure), datatables.manager_unassigned_ticket, name='manager_unassigned_ticket_json'),
path('{}/manager_opened_ticket.json'.format(structure), datatables.manager_opened_ticket, name='manager_opened_ticket_json'),
path('{}/manager_closed_ticket.json'.format(structure), datatables.manager_closed_ticket, name='manager_closed_ticket_json'),
path('{}/manager_not_closed_ticket.json'.format(structure), datatables.manager_not_closed_ticket, name='manager_not_closed_ticket_json'),
# Operator json
path('{}/operator_unassigned_ticket.json'.format(structure), datatables.operator_unassigned_ticket, name='operator_unassigned_ticket_json'),
path('{}/operator_opened_ticket.json'.format(structure), datatables.operator_opened_ticket, name='operator_opened_ticket_json'),
path('{}/operator_closed_ticket.json'.format(structure), datatables.operator_closed_ticket, name='operator_closed_ticket_json'),
path('{}/operator_not_closed_ticket.json'.format(structure), datatables.operator_not_closed_ticket, name='operator_not_closed_ticket_json'),
]
# Management URLs (manager and operator)
base = 'manage/<str:structure_slug>'
tickets = '{}/tickets'.format(base)
ticket = '{}/ticket'.format(tickets)
ticket_id = '{}/<str:ticket_id>'.format(ticket)
task = '{}/task'.format(ticket_id)
task_id = '{}/<str:task_id>'.format(task)
urlpatterns += [
# Ticket
path('{}/opened/'.format(tickets), management.manage_opened_ticket_url, name='manage_opened_ticket_url'),
path('{}/unassigned/'.format(tickets), management.manage_unassigned_ticket_url, name='manage_unassigned_ticket_url'),
path('{}/closed/'.format(tickets), management.manage_closed_ticket_url, name='manage_closed_ticket_url'),
path('{}/'.format(tickets), management.manage_not_closed_ticket_url, name='manage_not_closed_ticket_url'),
path('{}/'.format(ticket), management.manage_ticket_url, name='manage_ticket_url'),
path('{}/'.format(ticket_id), management.manage_ticket_url_detail, name='manage_ticket_url_detail'),
path('{}/messages/'.format(ticket_id), management.ticket_message_url, name='manage_ticket_message_url'),
path('{}/competence/add/'.format(ticket_id), management.ticket_competence_add_url, name='add_ticket_competence_url'),
path('{}/dependence/add/'.format(ticket_id), management.ticket_dependence_add_url, name='add_ticket_dependence_url'),
path('{}/dependence/remove/<str:master_ticket_id>/'.format(ticket_id), management.ticket_dependence_remove, name='remove_ticket_dependence'),
path('{}/take/'.format(ticket_id), management.ticket_take, name='prendi_ticket_in_carico'),
path('{}/close/'.format(ticket_id), management.ticket_close_url, name='close_ticket'),
path('{}/reopen/'.format(ticket_id), management.ticket_reopen, name='reopen_ticket'),
# Task
path('{}/add/'.format(task), management.task_add_new_url, name='add_ticket_task_url'),
path('{}/'.format(task_id), management.task_detail_url, name='manage_task_detail_url'),
path('{}/close/'.format(task_id), management.task_close_url, name='close_task'),
path('{}/delete/'.format(task_id), management.task_remove, name='task_remove'),
path('{}/riapri/'.format(task_id), management.task_reopen, name='reopen_task'),
path('{}/edit/remove-attachment/'.format(task_id), management.task_attachment_delete, name='manage_elimina_allegato_task'),
path('{}/edit/'.format(task_id), management.task_edit_url, name='edit_task'),
]
# Manager URLs
base = '{}/<str:structure_slug>'.format(slugify(MANAGEMENT_URL_PREFIX['manager']))
tickets = '{}/tickets'.format(base)
ticket_id = '{}/ticket/<str:ticket_id>'.format(tickets)
task = '{}/activities'.format(ticket_id)
task_id = '{}/<str:task_id>'.format(task)
offices = '{}/offices'.format(base)
office = '{}/office'.format(offices)
office_id = '{}/<str:office_slug>'.format(office)
categories = '{}/categories'.format(base)
category = '{}/category'.format(categories)
category_id = '{}/<str:category_slug>'.format(category)
cat_input = '{}/input'.format(category_id)
cat_input_id = '{}/<int:module_id>'.format(cat_input)
condition = '{}/conditions/condition'.format(category_id)
condition_id = '{}/<int:condition_id>'.format(condition)
urlpatterns += [
path('{}/{}/'.format(base, _dashboard_name), manager.dashboard, name='manager_dashboard'),
# Ticket
path('{}/opened/'.format(tickets), is_manager(generic.opened_ticket), name='manager_opened_ticket'),
path('{}/unassigned/'.format(tickets), is_manager(generic.unassigned_ticket), name='manager_unassigned_ticket'),
path('{}/closed/'.format(tickets), is_manager(generic.closed_ticket), name='manager_closed_ticket'),
path('{}/'.format(tickets), is_manager(management.tickets), name='manager_tickets'),
path('{}/'.format(ticket_id), is_manager(management.ticket_detail), name='manager_manage_ticket'),
path('{}/messages/'.format(ticket_id), is_manager(management.ticket_message), name='manager_ticket_message'),
path('{}/competence/add/'.format(ticket_id), is_manager(management.ticket_competence_add_new), name='manager_add_ticket_competence'),
path('{}/competence/add/<str:str_slug>/'.format(ticket_id), is_manager(management.ticket_competence_add_final), name='manager_add_ticket_competence'),
path('{}/dependence/add/'.format(ticket_id), is_manager(management.ticket_dependence_add_new), name='manager_add_ticket_dependence'),
path('{}/close/'.format(ticket_id), is_manager(management.ticket_close), name='manager_close_ticket'),
# Task
path('{}/add/'.format(task), is_manager(management.task_add_new), name='manager_add_ticket_task'),
path('{}/'.format(task_id), is_manager(management.task_detail), name='manager_task_detail'),
path('{}/close/'.format(task_id), is_manager(management.task_close), name='manager_close_task'),
path('{}/edit/'.format(task_id), is_manager(management.task_edit), name='manager_edit_task'),
# Offices
path('{}/new/'.format(office), manager.office_add_new, name='manager_office_add_new'),
path('{}/'.format(office_id), manager.office_detail, name='manager_office_detail'),
path('{}/edit/'.format(office_id), manager.office_edit, name='manager_office_edit'),
path('{}/remove-operator/<int:employee_id>/'.format(office_id), manager.office_remove_operator, name='manager_remove_office_operator'),
path('{}/add-category/'.format(office_id), manager.office_add_category, name='manager_add_office_category'),
path('{}/remove-category/<str:category_slug>/'.format(office_id), manager.office_remove_category, name='manager_remove_office_category'),
path('{}/disable/'.format(office_id), manager.office_disable, name='manager_disable_office'),
path('{}/enable/'.format(office_id), manager.office_enable, name='manager_enable_office'),
path('{}/delete/'.format(office_id), manager.office_delete, name='manager_delete_office'),
path('{}/'.format(offices), manager.offices, name='manager_offices'),
# Categories
path('{}/'.format(categories), manager.categories, name='manager_categories'),
path('{}/new/'.format(category), manager.category_add_new, name='manager_category_add_new'),
path('{}/'.format(category_id), manager.category_detail, name='manager_category_detail'),
path('{}/edit/'.format(category_id), manager.category_edit, name='manager_category_edit'),
path('{}/disable/'.format(category_id), manager.category_disable, name='manager_disable_category'),
path('{}/enable/'.format(category_id), manager.category_enable, name='manager_enable_category'),
path('{}/delete/'.format(category_id), manager.category_delete, name='manager_delete_category'),
path('{}/new/'.format(category_id).format(cat_input), manager.category_input_module_new, name='manager_category_new_input_module'),
# Category input modules
path('{}/'.format(cat_input_id), manager.category_input_module_details, name='manager_category_input_module'),
path('{}/edit/'.format(cat_input_id), manager.category_input_module_edit, name='manager_category_input_module_edit'),
path('{}/enable/'.format(cat_input_id), manager.category_input_module_enable, name='manager_category_input_module_enable'),
path('{}/disable/'.format(cat_input_id), manager.category_input_module_disable, name='manager_category_input_module_disable'),
path('{}/delete/'.format(cat_input_id), manager.category_input_module_delete, name='manager_category_input_module_delete'),
path('{}/preview/'.format(cat_input_id), manager.category_input_module_preview, name='manager_category_input_module_preview'),
path('{}/field/<int:field_id>/delete/'.format(cat_input_id), manager.category_input_field_delete, name='manager_category_input_field_delete'),
path('{}/field/<int:field_id>/edit/'.format(cat_input_id), manager.category_input_field_edit, name='manager_category_input_field_edit'),
# Category conditions
path('{}/new/'.format(condition), manager.category_condition_new, name='manager_category_condition_new'),
path('{}/edit/'.format(condition_id), manager.category_condition_edit, name='manager_category_condition_edit'),
path('{}/delete/'.format(condition_id), manager.category_condition_delete, name='manager_category_condition_delete'),
path('{}/disable/'.format(condition_id), manager.category_condition_disable, name='manager_category_condition_disable'),
path('{}/enable/'.format(condition_id), manager.category_condition_enable, name='manager_category_condition_enable'),
path('{}/'.format(condition_id), manager.category_condition_detail, name='manager_category_condition_detail'),
path('{}/remove-office/<str:office_slug>/'.format(category_id), manager.category_remove_office, name='manager_remove_category_office'),
path('{}/settings/'.format(base), is_manager(generic.user_settings), name='manager_user_settings'),
path('{}/messages/'.format(base), is_manager(generic.ticket_messages), name='manager_messages'),
]
# Operator URLs
base = '{}/<str:structure_slug>'.format(slugify(MANAGEMENT_URL_PREFIX['operator']))
tickets = '{}/tickets'.format(base)
ticket_id = '{}/ticket/<str:ticket_id>'.format(tickets)
task = '{}/activities'.format(ticket_id)
task_id = '{}/<str:task_id>'.format(task)
urlpatterns += [
path('{}/{}/'.format(base, _dashboard_name), operator.dashboard, name='operator_dashboard'),
# Ticket
path('{}/opened/'.format(tickets), is_operator(generic.opened_ticket), name='operator_opened_ticket'),
path('{}/unassigned/'.format(tickets), is_operator(generic.unassigned_ticket), name='operator_unassigned_ticket'),
path('{}/closed/'.format(tickets), is_operator(generic.closed_ticket), name='operator_closed_ticket'),
path('{}/'.format(tickets), is_operator(management.tickets), name='operator_tickets'),
path('{}/'.format(ticket_id), is_operator(management.ticket_detail), name='operator_manage_ticket'),
path('{}/messages/'.format(ticket_id), is_operator(management.ticket_message), name='operator_ticket_message'),
path('{}/competence/add/'.format(ticket_id), is_operator(management.ticket_competence_add_new), name='operator_add_ticket_competence'),
path('{}/competence/add/<str:str_slug>/'.format(ticket_id), is_operator(management.ticket_competence_add_final), name='operator_add_ticket_competence'),
path('{}/dependence/add/'.format(ticket_id), is_operator(management.ticket_dependence_add_new), name='operator_add_ticket_dependence'),
path('{}/close/'.format(ticket_id), is_operator(management.ticket_close), name='operator_close_ticket'),
# Task
path('{}/add/'.format(task), is_operator(management.task_add_new), name='operator_add_ticket_task'),
path('{}/'.format(task_id), is_operator(management.task_detail), name='operator_task_detail'),
path('{}/close/'.format(task_id), is_operator(management.task_close), name='operator_close_task'),
path('{}/edit/'.format(task_id), is_operator(management.task_edit), name='operator_edit_task'),
path('{}/settings/'.format(base), is_operator(generic.user_settings), name='operator_user_settings'),
path('{}/messages/'.format(base), is_operator(generic.ticket_messages), name='operator_messages'),
]
# User URLs
tickets = 'tickets'
ticket = '{}/ticket'.format(tickets)
ticket_id = '{}/<str:ticket_id>'.format(ticket)
urlpatterns += [
path('{}/'.format(_dashboard_name), user.dashboard, name='user_dashboard'),
path('{}/opened/'.format(tickets), generic.opened_ticket, name='user_opened_ticket'),
path('{}/unassigned/'.format(tickets), generic.unassigned_ticket, name='user_unassigned_ticket'),
path('{}/closed/'.format(tickets), generic.closed_ticket, name='user_closed_ticket'),
path('{}/'.format(ticket), user.ticket_url, name='user_ticket_url'),
path('{}/new/'.format(ticket), user.ticket_new_preload, name='new_ticket_preload'),
path('{}/new/<str:struttura_slug>/'.format(ticket), user.ticket_new_preload, name='new_ticket_preload'),
path('{}/new/<str:struttura_slug>/<str:categoria_slug>/'.format(ticket), user.ticket_add_new, name='add_new_ticket'),
path('{}/messages/'.format(ticket_id), user.ticket_message, name='ticket_message'),
path('{}/edit/'.format(ticket_id), user.ticket_edit, name='ticket_edit'),
path('{}/edit/remove-attachment/<str:attachment>/'.format(ticket_id), user.delete_my_attachment, name='delete_my_attachment'),
path('{}/delete/'.format(ticket_id), user.ticket_delete, name='elimina_ticket'),
path('{}/close/'.format(ticket_id), user.ticket_close, name='user_close_ticket'),
path('{}/activity/<str:task_id>/'.format(ticket_id), user.task_detail, name='task_detail'),
path('{}/'.format(ticket_id), is_the_owner(user.ticket_detail), name='ticket_detail'),
path('settings/', generic.user_settings, name='user_settings'),
path('messages/', generic.ticket_messages, name='messages'),
]
| StarcoderdataPython |
254524 | <gh_stars>0
#!/usr/bin/env python
import pickle
import rospkg
rospack = rospkg.RosPack()
RACECAR_PKG_PATH = rospack.get_path('racecar')
PLANNER_PKG_PATH = rospack.get_path('planning_utils')
CURRENT_PKG_PATH = rospack.get_path('final')
BLUE_FILTER_TOPIC = '/cv_node/blue_data'
RED_FILTER_TOPIC = '/cv_node/red_data'
import collections
import math
import time
import rospy
import numpy as np
import matplotlib.pyplot as plt
from geometry_msgs.msg import PoseArray, PoseStamped, PoseWithCovarianceStamped, PointStamped
from ackermann_msgs.msg import AckermannDriveStamped
from std_msgs.msg import Float64
import utils
# The topic to publish control commands to
PUB_TOPIC = '/vesc/high_level/ackermann_cmd_mux/input/nav_0'
PUB_TOPIC_2 = '/plan_lookahead_follower/pose' # to publish plan lookahead follower to assist with troubleshooting
WINDOW_WIDTH = 5
INIT_POSE_TOPIC = "/initialpose"
'''
Follows a given plan using constant velocity and PID control of the steering angle
'''
class LineFollower:
"""
Initializes the line follower
plan: A list of length T that represents the path that the robot should follow
Each element of the list is a 3-element numpy array of the form [x,y,theta]
pose_topic: The topic that provides the current pose of the robot as a PoseStamped msg
plan_lookahead: If the robot is currently closest to the i-th pose in the plan,
then it should navigate towards the (i+plan_lookahead)-th pose in the plan
translation_weight: How much the error in translation should be weighted in relation
to the error in rotation
rotation_weight: How much the error in rotation should be weighted in relation
to the error in translation
kp: The proportional PID parameter
ki: The integral PID parameter
kd: The derivative PID parameter
error_buff_length: The length of the buffer that is storing past error values
speed: The speed at which the robot should travel
"""
def __init__(self, plan, pose_topic, plan_lookahead, translation_weight,
rotation_weight, kp, ki, kd, error_buff_length, speed):
# print "inside line_follower, constructor"
# Store the passed parameters
self.plan = plan
self.plan_lookahead = plan_lookahead
# Normalize translation and rotation weights
self.translation_weight = translation_weight / (translation_weight + rotation_weight)
self.rotation_weight = rotation_weight / (translation_weight + rotation_weight)
self.kp = kp
self.ki = ki
self.kd = kd
# The error buff stores the error_buff_length most recent errors and the
# times at which they were received. That is, each element is of the form
# [time_stamp (seconds), error]. For more info about the data struct itself, visit
# https://docs.python.org/2/library/collections.html#collections.deque
self.error_buff = collections.deque(maxlen=error_buff_length)
self.speed = speed
self.found_closest_point = False
self.total_error_list = []
self.angle_from_computer_vision = None
# # print "line_follower Initialized!"
# # print "plan[0]", self.plan[0]
# # print "plan[plan_lookahead]", self.plan[plan_lookahead]
# # print "error_buff length: ", len(self.error_buff)
# # print "error_buff: ", self.error_buff
self.min_delta = 99.99
self.max_delta = -99.99
# YOUR CODE HERE
self.cmd_pub = rospy.Publisher(PUB_TOPIC, AckermannDriveStamped,
queue_size=10) # Create a publisher to PUB_TOPIC
self.goal_pub = rospy.Publisher(PUB_TOPIC_2, PoseStamped,
queue_size=10) # create a publisher for plan lookahead follower
self.delete_pose_pub = rospy.Publisher("MaybeDelete", PoseStamped,
queue_size=10) # create a publisher for plan lookahead follower
self.robot = rospy.Publisher("Robot", PoseStamped,
queue_size=10) # create a publisher for plan lookahead follower
self.deleted = rospy.Publisher("Deleted", PoseStamped,
queue_size=10) # create a publisher for plan lookahead follower
self.float_pub = rospy.Publisher("angle_from_line_follower", Float64, queue_size=1)
self.selected_pub = rospy.Publisher("Selected", PoseStamped,
queue_size=1) # create a publisher to visualize some pose from selected rollout
self.line_follower_angle_pub = rospy.Publisher("LineFollowerAngle", PoseStamped,
queue_size=1) # create a publisher to visualize some pose from selected rollout
self.float_blue_sub = rospy.Subscriber(BLUE_FILTER_TOPIC, Float64, self.float_cb_blue)
self.float_red_sub = rospy.Subscriber(RED_FILTER_TOPIC, Float64, self.float_cb_red)
self.error = 0.0
try:
self.f = open('/home/nvidia/line_follower.log', 'w')
except IOError:
pass
# Create a publisher to publish the initial pose
init_pose_pub = rospy.Publisher(INIT_POSE_TOPIC, PoseWithCovarianceStamped,
queue_size=1) # to publish init position x=2500, y=640
PWCS = PoseWithCovarianceStamped() # create a PoseWithCovarianceStamped() msg
PWCS.header.stamp = rospy.Time.now() # set header timestamp value
PWCS.header.frame_id = "map" # set header frame id value
PWCS.pose.pose.position.x = plan[0][0]
PWCS.pose.pose.position.y = plan[0][1]
PWCS.pose.pose.position.z = 0
PWCS.pose.pose.orientation = utils.angle_to_quaternion(plan[0][2])
for i in range(0, 1):
rospy.sleep(0.5)
init_pose_pub.publish(
PWCS) # publish initial pose, now you can add a PoseWithCovariance with topic of "/initialpose" in rviz
# Create a subscriber to pose_topic, with callback 'self.pose_cb'
self.pose_sub = rospy.Subscriber(pose_topic, PoseStamped, self.pose_cb)
# print "inside line_follower, constructor end"
# This part is not used anymore. Old code.
self.new_init_pos = rospy.Subscriber(INIT_POSE_TOPIC, PoseWithCovarianceStamped, self.new_init_pose_cb)
def float_cb_red(self, msg):
pass
def float_cb_blue(self, msg):
# print "BLUE cb", msg.data
self.angle_from_computer_vision = msg.data
def new_init_pose_cb(self, msg):
if len(self.plan) > 0:
rot_mat = utils.rotation_matrix(-1 * self.curr_pose[2])
while len(self.plan) > 0:
distance = np.sqrt(np.square(self.curr_pose[0] - self.plan[0][0]) + np.square(self.curr_pose[1] - self.plan[0][1]))
# Figure out if self.plan[0] is in front or behind car
offset = rot_mat * ((self.plan[0][0:2] - self.curr_pose[0:2]).reshape(2, 1))
offset.flatten()
if offset[0] > 0.0 or distance > 1.0:
break
self.plan.pop(0)
'''
Computes the error based on the current pose of the car
cur_pose: The current pose of the car, represented as a numpy array [x,y,theta]
Returns: (False, 0.0) if the end of the plan has been reached. Otherwise, returns
(True, E) - where E is the computed error
'''
def compute_error(self, cur_pose):
"""
Find the first element of the plan that is in front of the robot, and remove
any elements that are behind the robot. To do this:
Loop over the plan (starting at the beginning) For each configuration in the plan
If the configuration is behind the robot, remove it from the plan
Will want to perform a coordinate transformation to determine if
the configuration is in front or behind the robot
If the configuration is in front of the robot, break out of the loop
"""
# # print "Computing error..."
# check the leftmost pose in the plan pose-array and if it is behind the car then delete it
if len(self.plan) > 0:
# This is the ta_lab1 solution code to delete poses behind the robot.
# Our solution is commented our below.
# Both produce identitical results.
rot_mat = utils.rotation_matrix(-1 * cur_pose[2])
while len(self.plan) > 0:
distance = np.sqrt(np.square(cur_pose[0] - self.plan[0][0]) + np.square(cur_pose[1] - self.plan[0][1]))
# Figure out if self.plan[0] is in front or behind car
offset = rot_mat * ((self.plan[0][0:2] - cur_pose[0:2]).reshape(2, 1))
offset.flatten()
if offset[0] > 0.0 or distance > 1.0:
break
self.plan.pop(0)
# left_edge = (cur_pose[2] + np.pi / 2) * 180 / 3.14 # deg
# right_edge = (cur_pose[2] - np.pi / 2) * 180 / 3.14 # deg
# # if cur_pose[2] < 0:
# # left_edge += 360.0
# # right_edge += 360.0
# angle_robot_path_point = math.atan2(cur_pose[1] - self.plan[0][1],
# cur_pose[0] - self.plan[0][0]) * 180 / 3.14 # deg
#
#
# # for troubleshooting if path points are not deleted correctly
# # converted angles from rad to deg for easier troubleshooting
# # # # print("robot position: ", cur_pose)
# # # # print("path point position: ", self.plan[0])
# # # # print("left_edge: ", left_edge)
# # # # print("right_edge: ", right_edge)
# # # # print("path point to robot vector: ",cur_pose[1] - self.plan[0][1], cur_pose[0] - self.plan[0][0])
# # # # print("angle of path point to robot vector",angle_robot_path_point)
# # # # print("path_point yaw",self.plan[0][2] * 180 / 3.14)
#
#
# # if left_edge <= 0:
# # left_edge += 2.0 * np.pi
# # if angle_robot_path_point <= 0:
# # angle_robot_path_point += 2.0 * np.pi
#
# behind = (angle_robot_path_point > right_edge and angle_robot_path_point < left_edge) # is path point behind robot?
#
# # print cur_pose, behind , left_edge, angle_robot_path_point, right_edge
#
# PS = PoseStamped() # create a PoseStamped() msg
# PS.header.stamp = rospy.Time.now() # set header timestamp value
# PS.header.frame_id = "map" # set header frame id value
# PS.pose.position.x = self.plan[0][0] # set msg x position to value of the x position in the look ahead pose from the path
# PS.pose.position.y = self.plan[0][1] # set msg y position to value of the y position in the look ahead pose from the path
# PS.pose.position.z = 0 # set msg z position to 0 since robot is on the ground
# PS.pose.orientation = utils.angle_to_quaternion(self.plan[0][2]) # set msg orientation to [converted to queternion] value of the yaw angle in the look ahead pose from the path
#
# self.delete_pose_pub.publish(
# PS) # publish look ahead follower, now you can add a Pose with topic of PUB_TOPIC_2 value in rviz
#
#
#
# path_pose_similar_direction = (self.plan[0][2] > right_edge and self.plan[0][
# 2] < left_edge) # is path point in similar direction as robot?
# path_pose_similar_direction = True
# if behind and path_pose_similar_direction and len(
# self.plan) > 0: # delete point if behind robot, similar direction, and not last point in path
# # # print "delete element: ", len(self.plan) # for troubleshooting, show path points before deleting
#
# PS = PoseStamped() # create a PoseStamped() msg
# PS.header.stamp = rospy.Time.now() # set header timestamp value
# PS.header.frame_id = "map" # set header frame id value
# PS.pose.position.x = self.plan[0][
# 0] # set msg x position to value of the x position in the look ahead pose from the path
# PS.pose.position.y = self.plan[0][
# 1] # set msg y position to value of the y position in the look ahead pose from the path
# PS.pose.position.z = 0 # set msg z position to 0 since robot is on the ground
# PS.pose.orientation = utils.angle_to_quaternion(self.plan[0][
# 2]) # set msg orientation to [converted to queternion] value of the yaw angle in the look ahead pose from the path
#
# self.deleted.publish(
# PS)
#
# PS = PoseStamped() # create a PoseStamped() msg
# PS.header.stamp = rospy.Time.now() # set header timestamp value
# PS.header.frame_id = "map" # set header frame id value
# PS.pose.position.x = cur_pose[0]
# PS.pose.position.y = cur_pose[1]
# PS.pose.position.z = 0 # set msg z position to 0 since robot is on the ground
# PS.pose.orientation = utils.angle_to_quaternion(cur_pose[2]) # set msg orientation to [converted to queternion] value of the yaw angle in the look ahead pose from the path
# # print "CURR POSE", cur_pose, "DELETED", self.plan[0][:]
# self.robot.publish(
# PS)
#
#
#
#
#
# self.plan.pop(
# 0) # delete the first element in the path, since that point is behind robot and it's direction is similar to robot
# # # print "element deleted? : ", len(self.plan) # for troubleshooting, show path points after deleting
if len(self.plan) > 0:
PS = PoseStamped() # create a PoseStamped() msg
PS.header.stamp = rospy.Time.now() # set header timestamp value
PS.header.frame_id = "map" # set header frame id value
goal_idx = min(0 + self.plan_lookahead,
len(self.plan) - 1) # get goal index for looking ahead this many indices in the path
PS.pose.position.x = self.plan[goal_idx][
0] # set msg x position to value of the x position in the look ahead pose from the path
PS.pose.position.y = self.plan[goal_idx][
1] # set msg y position to value of the y position in the look ahead pose from the path
PS.pose.position.z = 0 # set msg z position to 0 since robot is on the ground
PS.pose.orientation = utils.angle_to_quaternion(self.plan[goal_idx][
2]) # set msg orientation to [converted to queternion] value of the yaw angle in the look ahead pose from the path
self.goal_pub.publish(
PS) # publish look ahead follower, now you can add a Pose with topic of PUB_TOPIC_2 value in rviz
# Check if the plan is empty. If so, return (False, 0.0)
# YOUR CODE HERE
if len(self.plan) == 0:
return False, 0.0
# At this point, we have removed configurations from the plan that are behind
# the robot. Therefore, element 0 is the first configuration in the plan that is in
# front of the robot. To allow the robot to have some amount of 'look ahead',
# we choose to have the robot head towards the configuration at index 0 + self.plan_lookahead
# We call this index the goal_index
goal_idx = min(0 + self.plan_lookahead, len(self.plan) - 1)
# Compute the translation error between the robot and the configuration at goal_idx in the plan
# YOUR CODE HERE
# # print "cur_pose: ", cur_pose
# # print "lookahead pose: ", self.plan[goal_idx]
look_ahead_position = np.array([self.plan[goal_idx][0], self.plan[goal_idx][1]]).reshape([2, 1])
translation_robot_to_origin = np.array([-cur_pose[0], -cur_pose[1]]).reshape([2, 1])
look_ahead_position_translated = look_ahead_position + translation_robot_to_origin
rotation_matrix_robot_to_x_axis = utils.rotation_matrix(-cur_pose[2])
look_ahead_position_translated_and_rotated = rotation_matrix_robot_to_x_axis * look_ahead_position_translated
# # print "look_ahead_position_translated_and_rotated: ", look_ahead_position_translated_and_rotated
x_error = float(look_ahead_position_translated_and_rotated[0][
0]) # This is the distance that the robot is behind the lookahead point parallel to the path
y_error = float(look_ahead_position_translated_and_rotated[1][
0]) # This is the distance away from the path, perpendicular from the path to the robot
translation_error = -y_error # math.tan(y_error / x_error) * math.pi / 180 # angle in rad to drive along hypotenuse toward the look ahead point
# translation_error *= 10 #float(y_error/x_error) # make the robot turn more sharply if far away from path
# translation_error = np.sqrt(np.square(cur_pose[0] - self.plan[goal_idx][0]) + np.square(cur_pose[1] - self.plan[goal_idx][1]))
# # print "Translation error: ", translation_error
# Compute the total error
# Translation error was computed above
# Rotation error is the difference in yaw between the robot and goal configuration
# Be careful about the sign of the rotation error
# YOUR CODE HERE
rotation_error = cur_pose[2] - self.plan[goal_idx][2]
if rotation_error > np.pi:
rotation_error -= np.pi*2.0
elif rotation_error < -np.pi:
rotation_error += np.pi*2.0
#ToDo: Fix rotation error when moving right to left, it only calculates correctly left to right
# # print "Rotation error: ", rotation_error
error = self.translation_weight * translation_error + self.rotation_weight * rotation_error
# # print "Overall error: ", error
self.total_error_list.append(error)
return True, error
'''
Uses a PID control policy to generate a steering angle from the passed error
error: The current error
Returns: The steering angle that should be executed
'''
def compute_steering_angle(self, error):
# # print "Computing steering angle..."
now = rospy.Time.now().to_sec() # Get the current time
# Compute the derivative error using the passed error, the current time,
# the most recent error stored in self.error_buff, and the most recent time
# stored in self.error_buff
# YOUR CODE HERE
deriv_error = 0 # for the first iteration, this is true
integ_error = 0
# # print "setting deriv and integ error to 0"
# # print "error_buff len", len(self.error_buff)
if len(self.error_buff) > 0:
time_delta = now - self.error_buff[-1][1] # -1 means peeking the rightmost element (most recent)
error_delta = error - self.error_buff[-1][0]
deriv_error = error_delta / time_delta
# # print "computed deriv error: ", deriv_error
# Add the current error to the buffer
self.error_buff.append((error, now))
# Compute the integral error by applying rectangular integration to the elements
# of self.error_buff:
# ://chemicalstatistician.wordpress.com/2014/01/20/rectangular-integration-a-k-a-the-midpoint-rule/
# YOUR CODE HERE
error_array = []
if len(self.error_buff) > 0:
for err in self.error_buff:
error_array.append(err[0])
integ_error = np.trapz(error_array)
# # print "computed integ error: ", integ_error
# Compute the steering angle as the sum of the pid errors
# YOUR CODE HERE
return -(self.kp * error + self.ki * integ_error + self.kd * deriv_error)
'''
Callback for the current pose of the car
msg: A PoseStamped representing the current pose of the car
This is the exact callback that we used in our solution, but feel free to change it
'''
def pose_cb(self, msg):
# print "inside line_follower ,pose_cb"
time.sleep(0)
# # print "Callback received current pose. "
cur_pose = np.array([msg.pose.position.x,
msg.pose.position.y,
utils.quaternion_to_angle(msg.pose.orientation)])
# print "Current pose: ", cur_pose
# # # # print "plan[:,[0,1]]", type(np.array(self.plan)), np.array(self.plan)[:,[0,1]]
# # find closest point and delete all points before it in the plan
# # only done once at the start of following the plan
# if self.found_closest_point == False:
# min_path_distance = np.Infinity # to find closest path point and delete all points before it
# for count, position in enumerate(np.array(self.plan)[:, [0, 1]]):
# distance = np.sqrt(np.square(cur_pose[0] - position[0]) + np.square(cur_pose[1] - position[1]))
# if distance < min_path_distance:
# self.found_closest_point = True
# min_path_distance = distance
# if count > 0:
# self.plan.pop(0)
success, error = self.compute_error(cur_pose)
self.error = error
# print "Success, Error: ", success, error
if not success:
# We have reached our goal
self.pose_sub = None # Kill the subscriber
self.speed = 0.0 # Set speed to zero so car stops
if False: # show error plot?
# plot the error here
title_string = "Error plot with kp=%.2f, kd=%.2f, ki=%.2f t_w=%.2f r_w=%.2f" % \
(self.kp, self.kd, self.ki, self.translation_weight, self.rotation_weight)
fig = plt.figure()
ax = fig.add_subplot(111) #
ax.plot(self.total_error_list)
plt.title(title_string)
plt.text(0.5, 0.85, 'Total error = %.2f' % np.trapz(abs(np.array(self.total_error_list))),
horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes)
plt.xlabel('Iterations')
plt.ylabel('Error')
plt.show()
np.savetxt("/home/joe/Desktop/Error_1.csv", np.array(self.total_error_list), delimiter=",")
return 0
f = None
# if computer vision angle is published then use that angle
pid_angle = self.compute_steering_angle(error)
# if self.angle_from_computer_vision is not None and self.angle_from_computer_vision > -98.0 and self.error < 2:
if False:
delta = self.angle_from_computer_vision
print "CV ANGLE chosen: ", delta
else: # if computer vision angle is not published then use pid controller angle
delta = pid_angle
print "PID ANGLE chosen: ", delta
try:
self.f.write("CV ANGLE: " + str(delta) + "\tPID ANGLE" + str(pid_angle))
except (IOError, AttributeError):
pass
if delta < self.min_delta:
self.min_delta = delta
if delta > self.max_delta:
self.max_delta = delta
print 'min=%f and max=%f' % (self.min_delta, self.max_delta)
#
if True: # not using laser_wanderer_robot.launch
# Setup the control message
ads = AckermannDriveStamped()
ads.header.frame_id = '/map'
ads.header.stamp = rospy.Time.now()
ads.drive.steering_angle = delta
ads.drive.speed = 2.0
self.cmd_pub.publish(ads)
# Send the control message to laser_wanderer_robot.launch
else:
float_msg = Float32()
float_msg.data = delta
self.float_pub.publish(float_msg)
def main():
rospy.init_node('line_follower', anonymous=True) # Initialize the node
"""
Load these parameters from launch file
We provide suggested starting values of params, but you should
tune them to get the best performance for your system
Look at constructor of LineFollower class for description of each var
'Default' values are ones that probably don't need to be changed (but you could for fun)
'Starting' values are ones you should consider tuning for your system
"""
# YOUR CODE HERE
plan_topic = rospy.get_param('~plan_topic') # Default val: '/planner_node/car_plan'
pose_topic = rospy.get_param('~pose_topic') # Default val: '/sim_car_pose/pose'
if True: # if on robot? else in rviz
pose_topic = "/pf/viz/inferred_pose"
plan_lookahead = rospy.get_param('~plan_lookahead') # Starting val: 5
translation_weight = rospy.get_param('~translation_weight') # Starting val: 1.0
rotation_weight = rospy.get_param('~rotation_weight') # Starting val: 0.0
kp = rospy.get_param('~kp') # Startinig val: 1.0
ki = rospy.get_param('~ki') # Starting val: 0.0
kd = rospy.get_param('~kd') # Starting val: 0.0
error_buff_length = rospy.get_param('~error_buff_length') # Starting val: 10
speed = 2.0 # rospy.get_param('~speed') # Default val: 1.0
loadFinalPlan = True # set this to True to use preexisting plan instead of creating a new one in RVIZ
if not loadFinalPlan: # make new plan in RVIZ by setting initial and goal poses
raw_input("Press Enter to when plan available...") # Waits for ENTER key press
# Use rospy.wait_for_message to get the plan msg
# Convert the plan msg to a list of 3-element numpy arrays
# Each array is of the form [x,y,theta]
# Create a LineFollower object
raw_plan = rospy.wait_for_message(plan_topic, PoseArray)
# raw_plan is a PoseArray which has an array of geometry_msgs/Pose called poses
plan_array = []
for pose in raw_plan.poses:
plan_array.append(np.array([pose.position.x, pose.position.y, utils.quaternion_to_angle(pose.orientation)]))
else: # use preexisting plan from plan_creator.launch and plan_cleanup.launch
# # # print "Len of plan array: %d" % len(plan_array)
# # # # print plan_array
plan_relative_path = "/saved_plans/plan_12_9_2018"
# load plan_array
# load raw_plan msg (PoseArray)
loaded_vars = pickle.load(open(CURRENT_PKG_PATH + plan_relative_path, "r"))
plan_array = loaded_vars[0]
raw_plan = loaded_vars[1]
# visualize loaded plan
PA_pub = rospy.Publisher("/LoadedPlan", PoseArray, queue_size=1)
for i in range(0, 5):
rospy.sleep(0.5)
PA_pub.publish(raw_plan)
# # print "LoadedPlan Published"
try:
if raw_plan:
pass
except rospy.ROSException:
exit(1)
lf = LineFollower(plan_array, pose_topic, plan_lookahead, translation_weight,
rotation_weight, kp, ki, kd, error_buff_length, speed) # Create a Line follower
rospy.spin() # Prevents node from shutting down
if __name__ == '__main__':
main()
| StarcoderdataPython |
4817352 | <gh_stars>1-10
# Generated by Django 3.1.6 on 2021-05-20 06:53
import django.contrib.auth.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('authome', '0019_auto_20210406_1517'),
]
operations = [
migrations.CreateModel(
name='SystemUser',
fields=[
],
options={
'verbose_name': 'System User',
'verbose_name_plural': 'System Users',
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('authome.user',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.AddField(
model_name='user',
name='systemuser',
field=models.BooleanField(default=False, editable=False),
),
]
| StarcoderdataPython |
1827365 | <filename>tests/schema/test_transactions.py<gh_stars>1-10
"""Tests for privacy.schema.transaction"""
import pytest
# from privacy.schema import transaction
@pytest.mark.skip(reason="Not Implemented")
def test_transaction(mock_transaction_payload):
...
| StarcoderdataPython |
11253418 | nome = str(input('Digite seu nome completo...')).strip()
print('Seu nome tem {}'.format('Silva' in nome)) | StarcoderdataPython |
187467 | import math
import numpy as np
from scipy.spatial.transform import Rotation
"""
The rotations can of two types:
1. In a global frame of reference (also known as rotation w.r.t. fixed or extrinsic frame)
2. In a body-centred frame of reference (also known as rotation with respect to current frame of reference.
It is also referred as rotation w.r.t. intrinsic frame).
For more details on intrinsic and extrinsic frames refer: https://en.wikipedia.org/wiki/Euler_angles#Definition_by_intrinsic_rotations
Euler angles as ROLL-PITCH-YAW refer the following links:
* [Tait–Bryan angles](https://en.wikipedia.org/wiki/Euler_angles#Tait–Bryan_angles#Conventions)
* [Euler angls as YAW-PITCH-ROLL](https://en.wikipedia.org/wiki/Euler_angles#Conventions_2)
* [Rotation using Euler Angles](https://adipandas.github.io/posts/2020/02/euler-rotation/)
* [scipy: ``from_euler``](https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.from_euler.html#scipy.spatial.transform.Rotation.from_euler)
* [scipy: ``as_euler``](https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.as_euler.html#scipy.spatial.transform.Rotation.as_euler)
To get the angles as yaw-pitch-roll we calculate rotation with intrinsic frame of reference.
1. In intrinsic frame we start with `yaw` to go from inertial frame `0` to frame `1`.
2. Than do `pitch` in frame `1` to go from frame `1` to frame `2`.
3. Than do `roll` in frame `2` to go from frame `2` to body frame `3`.
"""
INTRINSIC_ROTATION = "ZYX"
EXTRINSIC_ROTATION = "xyz"
def add_gaussian_noise(vector, noise_mag):
"""
Add gaussian noise to the input vector.
:param vector: vector of n-dimensions
:type vector: numpy.ndarray
:param noise_mag: magnitude of gaussian noise to add to input vector
:type noise_mag: float
:return: vector of same dimensions as input vector
:rtype: numpy.ndarray
"""
vector = vector + np.random.randn(*vector.shape) * float(noise_mag)
return vector
def euler2quat_raw(rpy):
"""
Euler angles of roll, pitch, yaw in radians. Returns quaternion in scalar first format.
:param rpy: vector of (roll, pitch, yaw) with shape (3,)
:type rpy: numpy.ndarray
:return: quaternion as (w, x, y, z) with shape (4,)
:rtype: numpy.ndarray
"""
roll, pitch, yaw = rpy
cy = math.cos(yaw * 0.5)
sy = math.sin(yaw * 0.5)
cp = math.cos(pitch * 0.5)
sp = math.sin(pitch * 0.5)
cr = math.cos(roll * 0.5)
sr = math.sin(roll * 0.5)
w = cr * cp * cy + sr * sp * sy
x = sr * cp * cy - cr * sp * sy
y = cr * sp * cy + sr * cp * sy
z = cr * cp * sy - sr * sp * cy
return np.array([w, x, y, z])
def quat2euler_raw(quat):
"""
Convert quaternion orientation to euler angles.
:param quat: quaternion as (w, x, y, z) with shape (4,)
:type quat: numpy.ndarray
:return: vector of (roll, pitch, yaw) with shape (3,)
:rtype: numpy.ndarray
"""
w, x, y, z = quat
sinr_cosp = 2.0 * (w * x + y * z)
cosr_cosp = 1.0 - 2.0 * (x * x + y * y)
roll = np.arctan2(sinr_cosp, cosr_cosp)
sinp = 2.0 * (w * y - z * x)
if abs(sinp) >= 1.0:
pitch = np.copysign(math.pi*0.5, sinp) # use 90 degrees if out of range
else:
pitch = np.arcsin(sinp)
siny_cosp = 2. * (w * z + x * y)
cosy_cosp = 1. - 2. * (y * y + z * z)
yaw = np.arctan2(siny_cosp, cosy_cosp)
return np.array([roll, pitch, yaw])
def quat2euler(quat, noise_mag=0):
"""
Convert quaternion to euler.
:param quat: quaternion in scalar first format
:type quat: numpy.ndarray
:param noise_mag: magnitude of gaussian noise added to orientation along each axis in radians
:type noise_mag: float
:return: numpy array of euler angles as roll, pitch, yaw (x, y, z) in radians
:rtype: numpy.ndarray
"""
quat = np.roll(quat, -1) # convert to scalar last
rot = Rotation.from_quat(quat) # rotation object
euler_angles = rot.as_euler(INTRINSIC_ROTATION)
if noise_mag:
euler_angles = add_gaussian_noise(euler_angles, noise_mag)
rpy = euler_angles[::-1]
return rpy
def euler2quat(euler, noise_mag=0):
"""
Euler angles are transformed to corresponding quaternion.
:param euler: vector of euler angles with shape (3,) in the order of roll-pitch-yaw (XYZ) in radians
:type euler: numpy.ndarray
:param noise_mag: magnitude of gaussian noise added to orientation along each axis in radians
:type noise_mag: float
:return: quaternion vector in scalar first format with shape (4,)
:rtype: numpy.ndarray
"""
euler = np.array([euler[2], euler[1], euler[0]]) # convert to YAW-PITCH-ROLL
if noise_mag:
euler = add_gaussian_noise(euler, noise_mag)
rot = Rotation.from_euler(INTRINSIC_ROTATION, euler)
quat_scalar_last = rot.as_quat()
quat = np.roll(quat_scalar_last, 1)
return quat
def quat2rot(quat, noise_mag=0):
"""
Method to convert quaternion vector to 3x3 direction cosine matrix.
:param quat: quaternion (in scalar first format)
:type quat: numpy.ndarray
:param noise_mag: (float) magnitude of gaussian noise added to orientation along each axis in radians
:type noise_mag: float
:return: rotation matrix SO(3)
:rtype: numpy.ndarray
"""
quat = np.roll(quat, -1) # quaternion in scalar last format
rot = Rotation.from_quat(quat) # rotation object
euler_angles = rot.as_euler(INTRINSIC_ROTATION) # yaw-pitch-roll
if noise_mag:
euler_angles = add_gaussian_noise(euler_angles, noise_mag)
rot_ = Rotation.from_euler(INTRINSIC_ROTATION, euler_angles) # yaw-pitch-roll
rot_mat = rot_.as_matrix() # direction cosine matrix 3x3
return rot_mat
def rot2quat(rot_mat, noise_mag=0):
"""
Method to convert rotation matrix (SO3) to quaternion
:param rot_mat: direction cosine matrix of 3x3 dimensions
:type rot_mat: numpy.ndarray
:param noise_mag: magnitude of gaussian noise added to orientation along each axis in radians.
:type noise_mag: float
:return quat: quaternion (in scalar first format) with a shape (4,).
:rtype: numpy.ndarray
"""
rot = Rotation.from_matrix(rot_mat)
euler_angles = rot.as_euler(INTRINSIC_ROTATION) # yaw-pitch-roll
if noise_mag:
euler_angles = add_gaussian_noise(euler_angles, noise_mag)
rot_ = Rotation.from_euler(INTRINSIC_ROTATION, euler_angles) # yaw-pitch-roll
quat_scalar_last = rot_.as_quat()
quat = np.roll(quat_scalar_last, 1)
return quat
def euler2rot(euler, noise_mag=0):
"""
Convert euler angles to rotation (direction cosine) matrix
:param euler: vector with shape (3,) including euler angles as (roll, pitch, yaw) in radians
:type euler: numpy.ndarray
:param noise_mag: magnitude of gaussian noise included in euler angle
:type noise_mag: float
:return: rotation matrix of shape (3, 3)
:rtype: numpy.ndarray
"""
euler = np.array([euler[2], euler[1], euler[0]]) # convert roll-pitch-yaw to yaw-pitch-roll
if noise_mag:
euler = add_gaussian_noise(euler, noise_mag)
rot_ = Rotation.from_euler(INTRINSIC_ROTATION, euler)
rot_mat = rot_.as_matrix()
return rot_mat
def rot2euler(rot_mat, noise_mag=0):
"""
Convert rotation matrix (SO3) to euler angles
:param rot_mat: rotation matrix of shape (3, 3)
:type rot_mat: numpy.ndarray
:param noise_mag: magnitude of gaussian noise included in euler angle
:type noise_mag: float
:return: euler angles as (roll, pitch, yaw) with shape (3,)
:rtype: numpy.ndarray
"""
rot_ = Rotation.from_matrix(rot_mat)
euler_angles = rot_.as_euler(INTRINSIC_ROTATION) # yaw-pitch-roll
if noise_mag:
euler_angles = add_gaussian_noise(euler_angles, noise_mag)
rpy = np.array([euler_angles[2], euler_angles[1], euler_angles[0]])
return rpy
def quat2euler_scipy(quat):
quat = np.roll(quat, shift=-1) # scalar last
rpy = Rotation.from_quat(quat).as_euler('xyz')
return rpy
def euler2quat_scipy(rpy):
quat = Rotation.from_euler('xyz', rpy).as_quat()
quat = np.roll(quat, shift=1) # scalar first
return quat
def rotmat_world2body_scipy(rpy):
rotmat = Rotation.from_euler('xyz', rpy).as_matrix()
return rotmat
def rotmat_pqr2euler_rate(rpy):
rotmat = np.array([
[1, np.sin(rpy[0])*np.tan(rpy[1]), np.cos(rpy[0])*np.tan(rpy[1])],
[0, np.cos(rpy[0]), -np.sin(rpy[1])],
[0, np.sin(rpy[0])/np.cos(rpy[1]), np.cos(rpy[0])/np.cos(rpy[1])]
])
return rotmat
def cross(a, b):
a_skew = np.array(
[0, -a[2], a[1]],
[a[2], 0, -a[0]],
[-a[1], a[0], 0]
)
return np.dot(a_skew, b)
| StarcoderdataPython |
6564716 | from os import path
from setuptools import setup, find_packages
project_directory = path.abspath(path.dirname(__file__))
def load_from(file_name):
with open(path.join(project_directory, file_name), encoding="utf-8") as f:
return f.read()
setup(
name="dogebuild-tex",
version=load_from("dogebuild_tex/dogebuild_tex.version").strip(),
description="Tex dogebuild plugin",
long_description=load_from("README.md"),
long_description_content_type="text/markdown",
author="<NAME>",
author_email="<EMAIL>",
license="mit",
url="https://github.com/dogebuild/dogebuild-tex",
packages=find_packages(
include=[
"dogebuild*",
]
),
package_data={
"": [
"*.version",
]
},
test_suite="tests",
install_requires=[
"dogebuild",
],
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Topic :: Software Development",
],
keywords="dogebuild builder tex latex",
)
| StarcoderdataPython |
62798 | <reponame>coderextreme/x3dpython
from django.apps import AppConfig
class X3DConfig(AppConfig):
name = 'x3d'
| StarcoderdataPython |
6593781 | <reponame>ATrain951/01.python-com_Qproject
import io
import unittest
from contextlib import redirect_stdout
import solution
class TestQ(unittest.TestCase):
def test_case_0(self):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
solution.symmetric_difference({2, 4, 5, 9}, {2, 4, 11, 12})
self.assertEqual(text_trap.getvalue(), '5\n9\n11\n12\n')
def test_case_1(self):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
solution.symmetric_difference({8, -10}, {5, 6, 7})
self.assertEqual(text_trap.getvalue(), '-10\n5\n6\n7\n8\n')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3529032 | <filename>conrad/test/units/adapter/test_base_adapter.py
from conrad.adapter import Base
class TestBase(object):
def test_methods(self):
for method in ['connect', 'find', 'update', 'create',
'delete', 'result_dict']:
assert hasattr(Base, method), 'Base is missing method {}'.format(
method)
def test_result_dict(self):
test_tuple = (('one', 111), ('two', 222), ('three', 333))
d = Base.result_dict(test_tuple)
for k, v in test_tuple:
assert d.has_key(k), 'result is missing key "{}"'.format(k)
assert d[k] == v, 'result[{}] is not {}'.format(k, v) | StarcoderdataPython |
132784 | <filename>tests/test_data.py
import numpy as np
import pytest
import warnings
from nexusformat.nexus import *
x = NXfield(2 * np.linspace(0.0, 10.0, 11, dtype=np.float64), name="x")
y = NXfield(3 * np.linspace(0.0, 5.0, 6, dtype=np.float64), name="y")
z = NXfield(4 * np.linspace(0.0, 2.0, 3, dtype=np.float64), name="z")
v = NXfield(np.linspace(0, 99, num=100, dtype=np.float64), name="v")
v.resize((2, 5, 10))
im = NXfield(np.ones(shape=(10, 10, 4), dtype=np.float32), name='image')
def test_data_creation():
data = NXdata(v, (z, y, x), title="Title")
assert "signal" in data.attrs
assert "axes" in data.attrs
assert len(data.attrs["axes"]) == 3
assert data.ndim == 3
assert data.shape == (2, 5, 10)
assert data.nxsignal.nxname == "v"
assert data.nxsignal.ndim == 3
assert data.nxsignal.shape == (2, 5, 10)
assert data.nxsignal.any()
assert not data.nxsignal.all()
assert [axis.nxname for axis in data.nxaxes] == ["z", "y", "x"]
assert [axis.ndim for axis in data.nxaxes] == [1, 1, 1]
assert [axis.shape for axis in data.nxaxes] == [(3,), (6,), (11,)]
assert data.nxtitle == "Title"
def test_default_data():
data = NXdata(v, (z, y, x), title="Title")
root = NXroot(NXentry(data))
root["entry/data"].set_default()
assert root.get_default() is root["entry/data"]
assert root["entry"].get_default() is root["entry/data"]
assert root["entry/data"].get_default() is root["entry/data"]
assert root.plottable_data is root["entry/data"]
root["entry/subentry"] = NXsubentry(data)
root["entry/subentry/data"].set_default()
assert root.get_default() is root["entry/data"]
assert root["entry/subentry"].get_default() is root["entry/subentry/data"]
assert root["entry/subentry"].plottable_data is root["entry/subentry/data"]
root["entry/subentry/data"].set_default(over=True)
assert root.get_default() is root["entry/subentry/data"]
assert root["entry"].get_default() is root["entry/subentry/data"]
assert root["entry/data"].get_default() is root["entry/data"]
assert root.plottable_data is root["entry/subentry/data"]
def test_plottable_data():
data = NXdata(v, (z, y, x), title="Title")
assert data.is_plottable()
assert data.plottable_data is data
assert data.plot_rank == 3
assert data.plot_rank == data.nxsignal.ndim
assert data.plot_axes == data.nxaxes
assert data.nxsignal.valid_axes(data.nxaxes)
v2 = v[0]
v2.resize((1, 5, 10))
data2 = NXdata(v2)
assert data2.shape == (1, 5, 10)
assert data2.plot_shape == (5, 10)
assert data2.plot_rank == 2
assert data2.plot_rank == data2.nxsignal.ndim - 1
def test_signal_selection():
data = NXdata()
data.nxsignal = v
data.nxaxes = (z, y, x)
assert data.nxsignal.nxname == "v"
assert [axis.nxname for axis in data.nxaxes] == ["z", "y", "x"]
assert np.array_equal(data.nxsignal, v)
assert np.array_equal(data.nxaxes[0], z)
data = NXdata()
data["v"] = v
data["x"] = x
data["y"] = y
data["z"] = z
data.nxsignal = "v"
data.nxaxes = ("z", "y", "x")
assert data.nxsignal.nxname == "v"
assert [axis.nxname for axis in data.nxaxes] == ["z", "y", "x"]
assert np.array_equal(data.nxsignal, v)
assert np.array_equal(data.nxaxes[0], z)
def test_rename():
data = NXdata()
data.nxsignal = v
data.nxaxes = (z, y, x)
data["x"].rename("xx")
data["y"].rename("yy")
data["z"].rename("zz")
data["v"].rename("vv")
assert data.nxsignal.nxname == "vv"
assert [axis.nxname for axis in data.nxaxes] == ["zz", "yy", "xx"]
def test_size_one_axis():
y1 = np.array((1), dtype=np.float64)
v1 = NXfield(np.linspace(0, 10*1*2, num=10*1*2, dtype=np.int64), name="v")
v1.resize((2,1,10))
data = NXdata(v1, (z, y1, x))
assert data.is_plottable()
assert data.plottable_data is data
assert data.plot_rank == 2
assert data.plot_rank == data.nxsignal.ndim - 1
assert len(data.plot_axes) == 2
assert data.plot_shape == (2, 10)
assert data.nxsignal.valid_axes(data.plot_axes)
def test_data_operations():
data = NXdata(v, (z, y, x))
new_data = data + 1
assert np.array_equal(new_data.nxsignal.nxvalue, v + 1)
assert new_data.nxaxes == data.nxaxes
assert new_data.nxsignal.nxname == "v"
assert [axis.nxname for axis in data.nxaxes] == ["z", "y", "x"]
new_data = data - 1
assert np.array_equal(new_data.nxsignal, v - 1)
new_data = data * 2
assert np.array_equal(new_data.nxsignal, v * 2)
new_data = 2 * data
assert np.array_equal(new_data.nxsignal, v * 2)
new_data = data / 2
assert np.array_equal(new_data.nxsignal, v / 2)
new_data = 2 * data - data
assert np.array_equal(new_data.nxsignal, v)
def test_data_errors():
y1 = NXfield(np.linspace(1, 10, 10), name="y")
v1 = NXfield(y1**2, name="v")
e1 = NXfield(np.sqrt(v1))
data = NXdata(v1, (y1), errors=e1)
assert data.nxerrors is not None
assert data.nxerrors.nxname == "v_errors"
assert np.array_equal(data.nxerrors, e1)
data = NXdata(v1, (y1))
data.nxerrors = e1
new_data = 2 * data
assert np.array_equal(new_data.nxerrors, 2 * e1)
new_data = 2 * data - data
assert np.array_equal(new_data.nxerrors, e1 * np.sqrt(5))
new_data = data - data / 2
assert np.array_equal(new_data.nxerrors, e1 * np.sqrt(1.25))
def test_data_weights():
y1 = NXfield(np.linspace(1, 10, 10), name="y")
v1 = NXfield(y1**2, name="v")
w1 = NXfield(np.sqrt(v1))
data = NXdata(v1, (y1), errors=y1, weights=w1)
assert data.nxweights is not None
assert data.nxweights.nxname == "v_weights"
assert np.array_equal(data.nxweights, w1)
weighted_data = data.weighted_data()
assert np.array_equal(weighted_data.nxsignal, v1 / w1)
assert np.array_equal(weighted_data.nxerrors, y1 / w1)
assert weighted_data.nxaxes == data.nxaxes
data = NXdata(v1, (y1))
data.nxweights = w1
new_data = 2 * data
assert np.array_equal(new_data.nxweights, 2 * w1)
new_data = 2 * data - data
assert np.array_equal(new_data.nxweights, w1)
new_data = data - data / 2
assert np.array_equal(new_data.nxweights, w1/2)
def test_data_slabs():
data = NXdata(v, (z, y, x), title="Title")
slab = data[0,:,:]
assert np.array_equal(slab.nxsignal, v[0])
assert slab.plot_rank == 2
assert slab.plot_shape == v[0].shape
assert slab.nxaxes == [y, x]
slab = data[0, 3.:12., 2.:18.]
assert slab.plot_shape == (v.shape[1]-2, v.shape[2]-2)
assert slab.plot_axes == [y[1:-1], x[1:-1]]
slab = data[0, 3.5:11.5, 2.5:17.5]
assert slab.shape == (v.shape[1]-2, v.shape[2]-2)
assert slab.plot_shape == (v.shape[1]-2, v.shape[2]-2)
assert slab.plot_axes == [y[1:-1], x[1:-1]]
slab1 = data[0:0, 3.5:11.5, 2.5:17.5]
slab2 = data[0:1, 3.5:11.5, 2.5:17.5]
assert slab1.shape == slab.shape
assert slab2.shape == slab.shape
def test_data_projections():
d1 = NXdata(v[0], (y, x))
assert d1.nxaxes == [d1["y"], d1["x"]]
p1 = d1.project((1, 0))
p2 = d1.project((0, 1), limits=((3., 9.), (4., 16.)))
assert p1.nxaxes == [p1["x"], p1["y"]]
assert np.array_equal(p1["x"].nxvalue, d1["x"])
assert p2.nxaxes == [p2["y"], p2["x"]]
assert np.array_equal(p2["x"].nxvalue, d1["x"][4.:16.])
assert np.array_equal(p2["x"].nxvalue, d1["x"][2:9])
d2 = NXdata(v, (z, y, x))
p3 = d2.project((0,1),((0.,8.),(3.,9.),(4.,16.)))
assert p3.nxaxes == [p3["z"], p3["y"]]
assert np.array_equal(p3["y"].nxvalue, d2["y"][3.:9.])
assert np.array_equal(p3["y"].nxvalue, d2["y"][1:4])
assert p3["x"] == 10.
assert p3["x"].attrs["minimum"] == 4.
assert p3["x"].attrs["maximum"] == 16.
assert p3["x"].attrs["summed_bins"] == 7
assert p3["v"].sum() == d2.v[:,1:3,2:8].sum()
p4 = d2.project((0,1),((0.,8.),(3.,9.),(4.,16.)), summed=False)
assert p4["v"].sum() == d2.v[:,1:3,2:8].sum() / p4["x"].attrs["summed_bins"]
def test_data_smoothing():
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
data = NXdata(np.sin(x), (x))
smooth_data = data.smooth(n=101, xmin=x.min(), xmax=x.max())
assert smooth_data.nxsignal.shape == (101,)
assert smooth_data.nxaxes[0].shape == (101,)
assert smooth_data.nxsignal[0] == np.sin(x)[0]
assert smooth_data.nxsignal[-1] == np.sin(x)[-1]
smooth_data = data.smooth(factor=4)
assert smooth_data.nxsignal.shape == (41,)
assert smooth_data.nxaxes[0].shape == (41,)
assert smooth_data.nxsignal[0] == np.sin(x)[0]
assert smooth_data.nxsignal[4] == np.sin(x)[1]
assert smooth_data.nxsignal[-1] == np.sin(x)[-1]
def test_data_selection():
xx = np.linspace(0, 20.0, 21, dtype=float)
yy = np.ones(shape=xx.shape, dtype=float)
yy[np.where(np.remainder(xx, 4) == 0.0)] = 2.0
data = NXdata(yy, xx)
selected_data = data.select(4.0)
assert selected_data.shape == (6,)
assert np.all(selected_data.nxsignal==2.0)
yy[(np.array((1,3,5,7,9,11,13,15,17,19)),)] = 1.5
data = NXdata(yy, xx)
selected_data = data.select(4.0, offset=1.0)
assert selected_data.shape == (5,)
assert np.all(selected_data.nxsignal==1.5)
selected_data = data.select(4.0, offset=1.0, symmetric=True)
assert selected_data.shape == (10,)
assert np.all(selected_data.nxsignal==1.5)
def test_image_data():
root = NXroot(NXentry(NXdata(im)))
root["entry"].attrs["default"] = "data"
root["entry/other_data"] = NXdata(v, (z, y, x), title="Title")
assert root["entry/data/image"].is_image()
assert root["entry/data"].is_image()
assert root.plottable_data.is_image()
assert root["entry"].plottable_data.is_image()
assert not root["entry/other_data"].is_image()
def test_smart_indices():
ind = [1,3,5]
assert all(x[ind].nxvalue == x.nxvalue[ind])
assert all(v[v>50].nxvalue == v.nxvalue[v.nxvalue>50])
assert all(v[1,0,ind].nxvalue == v.nxvalue[1,0,ind])
x[ind] = 0
assert x.any() and not x[ind].any()
ind = np.array([[3, 7],[4, 5]])
assert np.all(x[ind].nxvalue == x.nxvalue[ind])
row = np.array([0, 1, 2])
col = np.array([2, 1, 3])
assert all(v[0][row,col].nxvalue == v[0].nxvalue[row,col])
assert np.all(v[0][row[:,np.newaxis],col].nxvalue ==
v[0].nxvalue[row[:,np.newaxis],col])
| StarcoderdataPython |
3293816 | <filename>sdk/cwl/arvados_cwl/pathmapper.py<gh_stars>1-10
import re
import logging
import uuid
import os
import arvados.commands.run
import arvados.collection
from cwltool.pathmapper import PathMapper, MapperEnt, abspath, adjustFileObjs, adjustDirObjs
from cwltool.workflow import WorkflowException
logger = logging.getLogger('arvados.cwl-runner')
class ArvPathMapper(PathMapper):
"""Convert container-local paths to and from Keep collection ids."""
pdh_path = re.compile(r'^keep:[0-9a-f]{32}\+\d+/.+$')
pdh_dirpath = re.compile(r'^keep:[0-9a-f]{32}\+\d+(/.+)?$')
def __init__(self, arvrunner, referenced_files, input_basedir,
collection_pattern, file_pattern, name=None, **kwargs):
self.arvrunner = arvrunner
self.input_basedir = input_basedir
self.collection_pattern = collection_pattern
self.file_pattern = file_pattern
self.name = name
super(ArvPathMapper, self).__init__(referenced_files, input_basedir, None)
def visit(self, srcobj, uploadfiles):
src = srcobj["location"]
if srcobj["class"] == "File":
if "#" in src:
src = src[:src.index("#")]
if isinstance(src, basestring) and ArvPathMapper.pdh_path.match(src):
self._pathmap[src] = MapperEnt(src, self.collection_pattern % src[5:], "File")
if src not in self._pathmap:
# Local FS ref, may need to be uploaded or may be on keep
# mount.
ab = abspath(src, self.input_basedir)
st = arvados.commands.run.statfile("", ab, fnPattern="keep:%s/%s")
if isinstance(st, arvados.commands.run.UploadFile):
uploadfiles.add((src, ab, st))
elif isinstance(st, arvados.commands.run.ArvFile):
self._pathmap[src] = MapperEnt(st.fn, self.collection_pattern % st.fn[5:], "File")
elif src.startswith("_:"):
if "contents" in srcobj:
pass
else:
raise WorkflowException("File literal '%s' is missing contents" % src)
else:
raise WorkflowException("Input file path '%s' is invalid" % st)
if "secondaryFiles" in srcobj:
for l in srcobj["secondaryFiles"]:
self.visit(l, uploadfiles)
elif srcobj["class"] == "Directory":
if isinstance(src, basestring) and ArvPathMapper.pdh_dirpath.match(src):
self._pathmap[src] = MapperEnt(src, self.collection_pattern % src[5:], "Directory")
for l in srcobj.get("listing", []):
self.visit(l, uploadfiles)
def addentry(self, obj, c, path, subdirs):
if obj["location"] in self._pathmap:
src, srcpath = self.arvrunner.fs_access.get_collection(self._pathmap[obj["location"]].resolved)
if srcpath == "":
srcpath = "."
c.copy(srcpath, path + "/" + obj["basename"], source_collection=src, overwrite=True)
for l in obj.get("secondaryFiles", []):
self.addentry(l, c, path, subdirs)
elif obj["class"] == "Directory":
for l in obj["listing"]:
self.addentry(l, c, path + "/" + obj["basename"], subdirs)
subdirs.append((obj["location"], path + "/" + obj["basename"]))
elif obj["location"].startswith("_:") and "contents" in obj:
with c.open(path + "/" + obj["basename"], "w") as f:
f.write(obj["contents"].encode("utf-8"))
else:
raise WorkflowException("Don't know what to do with '%s'" % obj["location"])
def setup(self, referenced_files, basedir):
# type: (List[Any], unicode) -> None
uploadfiles = set()
for k,v in self.arvrunner.get_uploaded().iteritems():
self._pathmap[k] = MapperEnt(v.resolved, self.collection_pattern % v.resolved[5:], "File")
for srcobj in referenced_files:
self.visit(srcobj, uploadfiles)
if uploadfiles:
arvados.commands.run.uploadfiles([u[2] for u in uploadfiles],
self.arvrunner.api,
dry_run=False,
num_retries=self.arvrunner.num_retries,
fnPattern="keep:%s/%s",
name=self.name,
project=self.arvrunner.project_uuid)
for src, ab, st in uploadfiles:
self._pathmap[src] = MapperEnt(st.fn, self.collection_pattern % st.fn[5:], "File")
self.arvrunner.add_uploaded(src, self._pathmap[src])
for srcobj in referenced_files:
if srcobj["class"] == "Directory":
if srcobj["location"] not in self._pathmap:
c = arvados.collection.Collection(api_client=self.arvrunner.api,
keep_client=self.arvrunner.keep_client,
num_retries=self.arvrunner.num_retries)
subdirs = []
for l in srcobj["listing"]:
self.addentry(l, c, ".", subdirs)
check = self.arvrunner.api.collections().list(filters=[["portable_data_hash", "=", c.portable_data_hash()]], limit=1).execute(num_retries=self.arvrunner.num_retries)
if not check["items"]:
c.save_new(owner_uuid=self.arvrunner.project_uuid)
ab = self.collection_pattern % c.portable_data_hash()
self._pathmap[srcobj["location"]] = MapperEnt(ab, ab, "Directory")
for loc, sub in subdirs:
ab = self.file_pattern % (c.portable_data_hash(), sub[2:])
self._pathmap[loc] = MapperEnt(ab, ab, "Directory")
elif srcobj["class"] == "File" and (srcobj.get("secondaryFiles") or
(srcobj["location"].startswith("_:") and "contents" in srcobj)):
c = arvados.collection.Collection(api_client=self.arvrunner.api,
keep_client=self.arvrunner.keep_client,
num_retries=self.arvrunner.num_retries )
subdirs = []
self.addentry(srcobj, c, ".", subdirs)
check = self.arvrunner.api.collections().list(filters=[["portable_data_hash", "=", c.portable_data_hash()]], limit=1).execute(num_retries=self.arvrunner.num_retries)
if not check["items"]:
c.save_new(owner_uuid=self.arvrunner.project_uuid)
ab = self.file_pattern % (c.portable_data_hash(), srcobj["basename"])
self._pathmap[srcobj["location"]] = MapperEnt(ab, ab, "File")
if srcobj.get("secondaryFiles"):
ab = self.collection_pattern % c.portable_data_hash()
self._pathmap["_:" + unicode(uuid.uuid4())] = MapperEnt(ab, ab, "Directory")
for loc, sub in subdirs:
ab = self.file_pattern % (c.portable_data_hash(), sub[2:])
self._pathmap[loc] = MapperEnt(ab, ab, "Directory")
self.keepdir = None
def reversemap(self, target):
if target.startswith("keep:"):
return (target, target)
elif self.keepdir and target.startswith(self.keepdir):
return (target, "keep:" + target[len(self.keepdir)+1:])
else:
return super(ArvPathMapper, self).reversemap(target)
class InitialWorkDirPathMapper(PathMapper):
def visit(self, obj, stagedir, basedir, copy=False):
# type: (Dict[unicode, Any], unicode, unicode, bool) -> None
loc = obj["location"]
if obj["class"] == "Directory":
self._pathmap[loc] = MapperEnt(obj["location"], stagedir, "Directory")
self.visitlisting(obj.get("listing", []), stagedir, basedir)
elif obj["class"] == "File":
if loc in self._pathmap:
return
tgt = os.path.join(stagedir, obj["basename"])
if "contents" in obj and obj["location"].startswith("_:"):
self._pathmap[loc] = MapperEnt(obj["contents"], tgt, "CreateFile")
else:
if copy:
self._pathmap[loc] = MapperEnt(obj["path"], tgt, "WritableFile")
else:
self._pathmap[loc] = MapperEnt(obj["path"], tgt, "File")
self.visitlisting(obj.get("secondaryFiles", []), stagedir, basedir)
def setup(self, referenced_files, basedir):
# type: (List[Any], unicode) -> None
# Go through each file and set the target to its own directory along
# with any secondary files.
self.visitlisting(referenced_files, self.stagedir, basedir)
for path, (ab, tgt, type) in self._pathmap.items():
if type in ("File", "Directory") and ab.startswith("keep:"):
self._pathmap[path] = MapperEnt("$(task.keep)/%s" % ab[5:], tgt, type)
class FinalOutputPathMapper(PathMapper):
def visit(self, obj, stagedir, basedir, copy=False):
# type: (Dict[unicode, Any], unicode, unicode, bool) -> None
loc = obj["location"]
if obj["class"] == "Directory":
self._pathmap[loc] = MapperEnt(loc, stagedir, "Directory")
elif obj["class"] == "File":
if loc in self._pathmap:
return
tgt = os.path.join(stagedir, obj["basename"])
self._pathmap[loc] = MapperEnt(loc, tgt, "File")
self.visitlisting(obj.get("secondaryFiles", []), stagedir, basedir)
def setup(self, referenced_files, basedir):
# type: (List[Any], unicode) -> None
self.visitlisting(referenced_files, self.stagedir, basedir)
| StarcoderdataPython |
1671584 | <filename>ros/catch_mouse/scripts/button.py
#!/home/ros/.pyenv/shims/python3
# -*- coding: utf-8 -*-
import rospy
from std_msgs.msg import String
from getch import _Getch
def 发布指令():
shell输入 = _Getch()
rospy.init_node('keyboard', anonymous=True)
指令发送 = rospy.Publisher('/舵机控制/指令', String, queue_size=10)
循环频率 = rospy.Rate(50)
while not rospy.is_shutdown():
输入 = shell输入()
数据 = String()
if 输入 == 'w':
数据.data = "上"
指令发送.publish(数据)
rospy.loginfo(f"发送指令:{数据.data}")
elif 输入== 'a':
数据.data = "左"
指令发送.publish(数据)
rospy.loginfo(f"发送指令:{数据.data}")
elif 输入 == 's':
数据.data = "下"
指令发送.publish(数据)
rospy.loginfo(f"发送指令:{数据.data}")
elif 输入 == 'd':
数据.data = "右"
指令发送.publish(数据)
rospy.loginfo(f"发送指令:{数据.data}")
elif 输入 == 'q':
rospy.loginfo(f"退出节点")
break
循环频率.sleep()
if __name__ == '__main__':
try:
发布指令()
except KeyboardInterrupt:
rospy.loginfo(f"退出节点") | StarcoderdataPython |
8020012 | <filename>nudging/simulation/utils.py
import numpy as np
from numpy import fabs
from scipy.optimize import bisect
from scipy import stats
from nudging.dataset.matrix import MatrixData
class Bounds():
def __init__(self, value, int_val=False):
self.int_val = int_val
if isinstance(value, (list, tuple, np.ndarray)):
if int_val:
self.value = np.array(value, dtype=int)
else:
self.value = np.array(value)
elif int_val:
self.value = np.array([value, value+1], dtype=int)
else:
self.value = np.array([value, value], dtype=float)
def rand(self):
if self.int_val:
return np.random.randint(*self.value)
r = np.random.rand()
return r*(self.value[1]-self.value[0]) + self.value[0]
def max(self):
return self.value[1]
def min(self):
return self.value[0]
def mixed_features(n_features_uncorrelated=10, n_features_correlated=10,
eigen_power=3, **kwargs):
corr_matrix = create_corr_matrix(
n_features_uncorrelated, n_features_correlated,
eigen_power)
return features_from_cmatrix(corr_matrix, **kwargs)
def find_smallest_alpha(M_zero, M_one):
def f(alpha):
return np.min(np.linalg.eigvalsh(M_zero*(1-alpha)+M_one*(alpha)))
if f(1) > 0 or fabs(f(1)) < 1e-12:
return 1
if f(1e-8) < 0:
return 0
try:
return bisect(f, 1e-8, 1)
except ValueError as e:
print(f(0), f(1e-8), f(1))
raise e
def create_corr_matrix(n_features_uncorrelated=10, n_features_correlated=10,
eigen_power=3, rng=None):
n_tot_features = 2 + n_features_uncorrelated + n_features_correlated
n_iid = n_features_uncorrelated
if rng is None:
eigen_vals = np.random.rand(n_tot_features)**eigen_power
else:
eigen_vals = rng.random(n_tot_features)**eigen_power
eigen_vals *= len(eigen_vals)/np.sum(eigen_vals)
base_corr_matrix = stats.random_correlation.rvs(eigen_vals)
M_zero = np.zeros_like(base_corr_matrix)
M_zero[n_iid:, n_iid:] = base_corr_matrix[n_iid:, n_iid:]
M_zero[:n_iid, :n_iid] = np.identity(n_iid)
M_one = np.copy(base_corr_matrix)
M_one[:n_iid, :n_iid] = np.identity(n_iid)
alpha = find_smallest_alpha(M_zero, M_one) - 1e-10
M = M_zero*(1-alpha)+M_one*alpha
return M
def _transform_outcome(outcome, a, powers=np.array([1, 0.5, 0.1])):
ret_outcome = np.zeros_like(outcome)
a *= powers
for i in range(len(a)):
ret_outcome += (a[i]-powers[i]/2)*outcome**(i+1)
return ret_outcome
def features_from_cmatrix(
corr_matrix, n_samples=500, nudge_avg=0.1,
noise_frac=0.8, control_unique=0.5,
control_precision=0.5, linear=True, **kwargs):
n_features = corr_matrix.shape[0]-2
L = np.linalg.cholesky(corr_matrix)
X = np.dot(L, np.random.randn(n_features+2, n_samples)).T
nudge = np.zeros(n_samples, dtype=int)
nudge[np.random.choice(n_samples, n_samples//2, replace=False)] = 1
control_intrinsic = X[:, -1]
nudge_intrinsic = X[:, -2]
true_outcome_control = (control_intrinsic*control_unique
+ (1-control_unique)*nudge_intrinsic)
true_outcome_control *= control_precision
true_outcome_nudge = nudge_intrinsic + nudge_avg
if not linear:
a = np.random.rand(3)
true_outcome_control = _transform_outcome(true_outcome_control, a)
true_outcome_nudge = _transform_outcome(true_outcome_nudge, a)
for i_col in range(n_features):
a = np.random.rand(3)
X[:, i_col] = _transform_outcome(X[:, i_col], a)
cate = true_outcome_nudge - true_outcome_control
outcome = (true_outcome_control*(1-nudge)
+ true_outcome_nudge*nudge)
outcome += (noise_frac/(1-noise_frac))*np.random.randn(n_samples)
X = X[:, :-2]
truth = {
"cate": cate, "n_samples": n_samples, "nudge_avg": nudge_avg,
"noise_frac": noise_frac, "control_unique": control_unique,
"control_precision": control_precision, "linear": linear,
}
matrix = MatrixData.from_data((X, nudge, outcome), **kwargs, truth=truth)
return matrix
| StarcoderdataPython |
3307880 | <gh_stars>1-10
from peewee import *
conn = SqliteDatabase("./core_elements/data_controller/data.db")
class BaseModel(Model):
class Meta:
database = conn
class UserInfo(BaseModel):
id = AutoField(column_name='id')
balance = IntegerField(column_name='balance', default=0)
experience = IntegerField(column_name='experience', default=0)
voice_activity = IntegerField(column_name='voice_activity', default=0)
biography = TextField(column_name='biography', null=True)
bonus_taked_on_day = IntegerField(column_name='bonus_taked_on_day', default=0)
mute_end_at = IntegerField(column_name='mute_end_at', null=True)
warn = IntegerField(column_name='warn', default=0)
on_server = BooleanField(column_name='on_server', default=True)
class Meta:
table_name = 'user'
class PersonalVoice(BaseModel):
id = AutoField(column_name='id')
user = ForeignKeyField(UserInfo,
column_name='user',
backref="user_personal_voice")
voice_id = IntegerField(column_name='voice_id')
slots = IntegerField(column_name='slots')
max_bitrate = IntegerField(column_name='max_bitrate')
class Meta:
table_name = 'user_personal_voice'
class UserRoles(BaseModel):
id = AutoField(column_name='id')
user = ForeignKeyField(UserInfo, column_name='user', backref="user_roles")
role_id = IntegerField(column_name='role_id')
class Meta:
table_name = 'user_roles'
class Relationship(BaseModel):
id = AutoField(column_name='id')
user = ForeignKeyField(UserInfo,
column_name='user',
backref="relationship")
soul_mate = ForeignKeyField(UserInfo,
column_name='soul_mate',
backref="relationship")
married_time = IntegerField(column_name='married_time')
class Meta:
table_name = 'relationship'
class Likes(BaseModel):
id = AutoField(column_name='id')
user = ForeignKeyField(UserInfo, column_name='user', backref="likes")
to_user = IntegerField(column_name='to_user')
type = IntegerField(column_name='type', null=True)
class Meta:
table_name = 'likes'
class ModLog(BaseModel):
id = AutoField(column_name='id')
moderator = IntegerField(column_name='moderator')
action = TextField(column_name='action')
reason = TextField(column_name='reason')
duration = IntegerField(column_name='duration', null=True)
creation_time = IntegerField(column_name='creation_time')
class Meta:
table_name = 'mod_log'
class ModLogTarget(BaseModel):
id = AutoField(column_name='id')
mod_log = ForeignKeyField(ModLog,
column_name='mod_log',
backref="mod_log_target")
target = IntegerField(column_name='target')
class Meta:
table_name = 'mod_log_target'
class ShopRoles(BaseModel):
id = AutoField(column_name='id')
role_id = IntegerField(column_name='role_id')
price = IntegerField(column_name='price')
class Meta:
table_name = 'shop_roles'
class Suggestions(BaseModel):
message_id = PrimaryKeyField(column_name='message_id')
text = TextField(column_name='text')
url = TextField(column_name='url', null=True)
author = IntegerField(column_name='author')
class Meta:
table_name = 'suggestions'
class Codes(BaseModel):
id = PrimaryKeyField(column_name='id')
code = TextField(column_name='code')
name = TextField(column_name='name', null=True)
group = IntegerField(column_name='group', null=True)
class Meta:
table_name = 'codes'
def close_connection():
conn.close() | StarcoderdataPython |
6493007 | from .tool.func import *
def topic_admin_2(conn, name, sub, num):
curs = conn.cursor()
curs.execute("select block, ip, date from topic where title = ? and sub = ? and id = ?", [name, sub, str(num)])
data = curs.fetchall()
if not data:
return redirect('/topic/' + url_pas(name) + '/sub/' + url_pas(sub))
ban = ''
if admin_check(3) == 1:
ban += '''
</ul>
<br>
<h2>''' + load_lang('admin_tool') + '''</h2>
<ul>
'''
is_ban = '<li><a href="/topic/' + url_pas(name) + '/sub/' + url_pas(sub) + '/b/' + str(num) + '">'
if data[0][0] == 'O':
is_ban += load_lang('hide_release')
else:
is_ban += load_lang('hide')
is_ban += '''
</a>
</li>
<li>
<a href="/topic/''' + url_pas(name) + '/sub/' + url_pas(sub) + '/notice/' + str(num) + '''">
'''
curs.execute("select id from topic where title = ? and sub = ? and id = ? and top = 'O'", [name, sub, str(num)])
if curs.fetchall():
is_ban += load_lang('pinned_release')
else:
is_ban += load_lang('pinned') + ''
is_ban += '</a></li></ul>'
ban += '<li><a href="/ban/' + url_pas(data[0][1]) + '">'
curs.execute("select end from ban where block = ?", [data[0][1]])
if curs.fetchall():
ban += load_lang('ban_release')
else:
ban += load_lang('ban')
ban += '</a></li>' + is_ban
ban += '''
</ul>
<br>
<h2>''' + load_lang('other_tool') + '''</h2>
<ul>
<li>
<a href="/topic/''' + url_pas(name) + '/sub/' + url_pas(sub) + '/raw/' + str(num) + '''">''' + load_lang('raw') + '''</a>
</li>
'''
ban = '<li>' + load_lang('time') + ' : ' + data[0][2] + '</li>' + ban
if ip_or_user(data[0][1]) == 1:
ban = '<li>' + load_lang('writer') + ' : ' + data[0][1] + ' <a href="/record/' + url_pas(data[0][1]) + '">(' + load_lang('record') + ')</a></li>' + ban
else:
ban = '''
<li>
''' + load_lang('writer') + ' : <a href="/w/user:' + data[0][1] + '">' + data[0][1] + '</a> <a href="/record/' + url_pas(data[0][1]) + '">(' + load_lang('record') + ''')</a>
</li>
''' + ban
ban = '<h2>' + load_lang('state') + '</h2><ul>' + ban
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('discussion_tool'), wiki_set(), custom(), other2([' (#' + str(num) + ')', 0])],
data = ban,
menu = [['topic/' + url_pas(name) + '/sub/' + url_pas(sub) + '#' + str(num), load_lang('return')]]
))
| StarcoderdataPython |
6465195 | #!/usr/bin/env python3
#
# Copyright 2022 Graviti. Licensed under MIT License.
#
"""Column Series module."""
| StarcoderdataPython |
1950583 | import logging
import re
from functools import lru_cache
import json
import pandas as pd
from bioservices import KEGG, UniProt
class KeggProteinInteractionsExtractor:
def __init__(self, kegg=None, uniprot =None):
self._logger = logging.getLogger(__name__)
self.kegg = kegg
self.uniprot = uniprot
self._cache_kegg_entry_uniprots = {}
@property
def uniprot(self):
self.__uniprot = self.__uniprot or UniProt(verbose=False)
return self.__uniprot
@uniprot.setter
def uniprot(self, uniprot):
self.__uniprot = uniprot
@property
def kegg(self):
self.__kegg = self.__kegg or KEGG()
return self.__kegg
@kegg.setter
def kegg(self, kegg):
self.__kegg = kegg
def extract_protein_interaction(self, kegg_pathway_id):
self._logger.info("Extracting PPIs for kegg pathway id {} ".format(kegg_pathway_id))
kgml = self.kegg.get(kegg_pathway_id, "kgml")
result_df = self.extract_protein_interactions_kgml(kgml)
# result in a dataframe
self._logger.info("Completed PPIs extraction for kegg pathway id {} ".format(kegg_pathway_id))
return result_df
def extract_protein_interactions_kgml(self, kgml_string):
self._logger.info("Parsing kgml")
self._logger.debug(kgml_string)
kgml_parser = self.kegg.parse_kgml_pathway(pathwayId='', res=kgml_string)
result = []
# Get PPRel (Protein protein relations) type from relation entries
protein_relations = list(filter(lambda d: d['link'] in ['PPrel'], kgml_parser['relations']))
for rel in protein_relations:
self._logger.debug("Parsing relation for entry {}".format(rel))
# Get the uniprot numbers corresponding to the 2 entries in the relation
d_uniprot_numbers = self._cached_get_uniprot_numbers(rel['entry2'], kgml_parser)
s_uniprot_numbers = self._cached_get_uniprot_numbers(rel['entry1'], kgml_parser)
# Each source entry may map to multiple uniprot numbers, so loop through and get the relationships
for s_uniprot in s_uniprot_numbers:
# Same applies for the target entry in the relationship
for d_uniprot in d_uniprot_numbers:
s_gene_name = self._get_gene_names(s_uniprot)
d_gene_name = self._get_gene_names(d_uniprot)
# set up key as the combination of the 2 interacting protein uniprot names in order
key = "#".join(sorted([s_uniprot, d_uniprot]))
# Add to result
rel_dict = {"key": key, "s_uniprot": s_uniprot, "s_gene_name": s_gene_name,
"interaction": rel['name'],
"d_uniprot": d_uniprot, "d_genename": d_gene_name}
self._logger.debug("** Relation extracted {}".format(json.dumps(rel_dict)))
result.append(rel_dict)
result_df = pd.DataFrame(result)
self._logger.info("Extracted {} ppi relations".format(len(result_df)))
return result_df
def _cached_get_uniprot_numbers(self, entry_id, kgml_parser):
if entry_id not in self._cache_kegg_entry_uniprots:
# Uniprot numbers associated with the kegg entryid not in cache..
# Note : The entry id is only unique within a KGML file!!
self._cache_kegg_entry_uniprots[entry_id] = self._get_uniprot_numbers(entry_id, kgml_parser)
return self._cache_kegg_entry_uniprots[entry_id]
def _get_uniprot_numbers(self, entry_id, kgml_parser):
self._logger.debug("Converting kegg Hsa numbers to uniprot for entry id {}".format(entry_id))
kegg_entries = kgml_parser['entries']
hsa_uniprot_numbers_map = {}
# Get the entry corresponding to the entry id
# E.g entry id="49" name="ko:K00922 ko:K02649" type="ortholog" ...
matching_entries = list(filter(lambda d: d['id'] == entry_id, kegg_entries))
if len(matching_entries) != 1:
raise Exception("The number of entries for entry id {} should be 1, but is {}".format(entry_id, len(
matching_entries)))
entry = matching_entries[0]
# Multiple KO numbers are separated by space, but the link query recognises that and returns corresponding HSA numbers
# E.g name="ko:K00922 ko:K02649"
ko_numbers_sep_space = entry['name']
hsa_number_list = self.get_hsa_numbers(ko_numbers_sep_space)
# Check if there are any HSA numbers associated with the KO numbers
if len(hsa_number_list) > 0:
hsa_number = "+".join(hsa_number_list)
# Convert HSA to UniProt
hsa_uniprot_numbers_map = self.kegg.conv("uniprot", hsa_number)
json_hsa_uniprot_map = json.dumps(hsa_uniprot_numbers_map)
self._logger.debug("HSA to Uniprot number map {}".format(json_hsa_uniprot_map))
# Handle case where the uniprot numbers cannot be obtained for a HSA
if json_hsa_uniprot_map.strip() == "" or json_hsa_uniprot_map.strip() == '"\\n"':
self._logger.debug("Could not map to has to Uniprot number map {}".format(json_hsa_uniprot_map))
return []
kegg_uniprot_numbers = list(hsa_uniprot_numbers_map.values())
# Remove the up: prefix from the uniprot numbers, as they look like 'up:B0LPE5', 'up:P31751', 'up:Q9Y243'
result = list(map(lambda x: str(re.findall(r"(?:up:)(.+)", x)[0]), kegg_uniprot_numbers))
self._logger.debug("Uniprot numbers {}".format(result))
return result
def get_hsa_numbers(self, ko_numbers_sep_space):
# Assume the Numbers are all Ko number or all HSA.
self._logger.debug("Obtained KO numbers \n{}".format(ko_numbers_sep_space))
# Undefined..
if ko_numbers_sep_space.strip().lower() == "undefined":
return []
# If HSA then return as is..
if ko_numbers_sep_space.strip().lower().startswith('hsa:'):
return ko_numbers_sep_space.split(' ')
# Check if ko.. else raise exception
if not ko_numbers_sep_space.strip().lower().startswith('ko:'):
raise ValueError("Expecting Ko or hsa numbers only, but found {}".format(ko_numbers_sep_space))
# Get the HSA numbers (Homosapien proteins only for the KO)
ko_number_map_sep_tab_sep_nl = self.kegg.link('hsa', ko_numbers_sep_space)
# Extract just the HSA numbers from the multiline string individual maps
# E.g
# ko:K00922 hsa:5293
# ko:K00922 hsa:5291
# ko:K02649 hsa:5295
self._logger.debug("HSA numbers for the KO numbers \n{}".format(ko_number_map_sep_tab_sep_nl))
regex_hsa = r"(?:\t)(.+)"
hsa_number_list = re.findall(regex_hsa, str(ko_number_map_sep_tab_sep_nl))
return hsa_number_list
@lru_cache(maxsize=100)
def _get_gene_names(self, uniprot_number):
# Get the gene names associated with the uniprot number
self._logger.debug("Retrieving gene names for uniprotid {}".format(uniprot_number))
gene_names_dict = self.uniprot.mapping(fr="ACC,ID", to="GENENAME", query=uniprot_number)
self._logger.debug("Gene names map : {}".format(json.dumps(gene_names_dict)))
return ",".join(map(lambda x: ",".join(x), gene_names_dict.values()))
| StarcoderdataPython |
6699482 |
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras import regularizers
from tensorflow.keras.layers import Layer
class ArcFace(Layer):
def __init__(self, n_classes=10, s=30.0, m=0.50, regularizer=None, **kwargs):
super(ArcFace, self).__init__(**kwargs)
self.n_classes = n_classes
self.s = s
self.m = m
self.regularizer = regularizers.get(regularizer)
def build(self, input_shape):
super(ArcFace, self).build(input_shape[0])
self.W = self.add_weight(name='W',
shape=(input_shape[0][-1], self.n_classes),
initializer='glorot_uniform',
trainable=True,
regularizer=self.regularizer)
def call(self, inputs):
x, y = inputs
c = K.shape(x)[-1]
# normalize feature
x = tf.nn.l2_normalize(x, axis=1)
# normalize weights
W = tf.nn.l2_normalize(self.W, axis=0)
# dot product
logits = x @ W
# add margin
# clip logits to prevent zero division when backward
theta = tf.acos(K.clip(logits, -1.0 + K.epsilon(), 1.0 - K.epsilon()))
target_logits = tf.cos(theta + self.m)
# sin = tf.sqrt(1 - logits**2)
# cos_m = tf.cos(logits)
# sin_m = tf.sin(logits)
# target_logits = logits * cos_m - sin * sin_m
#
logits = logits * (1 - y) + target_logits * y
# feature re-scale
logits *= self.s
out = tf.nn.softmax(logits)
return out
def compute_output_shape(self, input_shape):
return (None, self.n_classes)
| StarcoderdataPython |
96330 | from scipy.signal import butter
from helper import ULogHelper
class DiagnoseFailure:
def __init__(self, ulog):
data_parser = ULogHelper(ulog)
data_parser.extractRequiredMessages(['estimator_status', 'vehicle_status'])
def change_diagnose(self, timestamps, flags, flag_type):
if flag_type == 'innovation_check_flags':
return self.innovation_check(timestamps, flags)
elif flag_type == 'failure_detector_status':
return self.failure_detector(timestamps, flags)
elif flag_type == 'gps_check_fail_flags':
return self.gps_check_fail(timestamps, flags)
def detect_change(rule_check):
def routine(self, timestamps, flags):
prvsflag = 0
for time, flag in zip(timestamps, flags):
if flag == prvsflag:
continue
else:
prvsflag = flag
yield rule_check(time, flag)
return routine
@detect_change
def innovation_check(time, flag):
outcome = []
if flag & 0x01:
outcome.append("vel")
if flag>>1 & 0x01:
outcome.append("hpos")
if flag>>2 & 0x01:
outcome.append("vpos")
if flag>>3 & 0x07:
outcome.append("mag")
if flag>>6 & 0x01:
outcome.append("yaw")
if flag>>7 & 0x01:
outcome.append("airspeed")
if flag>>8 & 0x01:
outcome.append("syn sideslip")
if flag>>9 & 0x01:
outcome.append("height above ground")
if flag>>10 & 0x03:
outcome.append("OF")
return (time, outcome)
@detect_change
def failure_detector(time, flag):
outcome = []
if flag & 0x01:
outcome.append("roll")
if flag>>1 & 0x01:
outcome.append("pitch")
if flag>>2 & 0x01:
outcome.append("yaw")
if flag>>3 & 0x07:
outcome.append("ext")
return (time, outcome)
@detect_change
def gps_check_fail(time, flag):
outcome = []
if flag & 0x01:
outcome.append("gps_fix")
if flag>>1 & 0x01:
outcome.append("min_sat_count")
if flag>>2 & 0x01:
outcome.append("min_pdop")
if flag>>3 & 0x01:
outcome.append("max_horz_err")
if flag>>4 & 0x01:
outcome.append("max_vert_err")
if flag>>5 & 0x01:
outcome.append("max_spd_err")
if flag>>6 & 0x01:
outcome.append("max_horz_drift")
if flag>>7 & 0x01:
outcome.append("max_vert_drift")
if flag>>8 & 0x01:
outcome.append("max_horz_spd_err")
if flag>>9 & 0x01:
outcome.append("max_vert_spd_err")
return (time, outcome)
def failsafe(self, ulog):
vehicle_status = ulog.get_dataset('vehicle_status')
failsafe_status = vehicle_status.list_value_changes('failsafe')
for element in failsafe_status:
time, status = element
if status:
yield (time, 'trigger')
class Vibration:
def __init__(self, ulog):
data_parser = ULogHelper(ulog)
data_parser.extractRequiredMessages(['estimator_status', 'sensor_combined'])
self.time = data_parser.getTimeSeries('estimator_status')
self.normalized_time = data_parser.getTimeSeries('estimator_status', start_from_zero=True)
self.gyro_delta_angle_coning = data_parser.getMessage('estimator_status', 'vibe[0]')
self.gyro_high_freq = data_parser.getMessage('estimator_status', 'vibe[1]')
self.accel_high_freq = data_parser.getMessage('estimator_status', 'vibe[2]')
self.sensor_time = data_parser.getTimeSeries('sensor_combined')
self.raw_accel_x = data.parser.getMessage('accelerometer_m_s2[0]')
self.raw_accel_y = data.parser.getMessage('accelerometer_m_s2[1]')
self.raw_accel_z = data.parser.getMessage('accelerometer_m_s2[2]')
def calcIMUClipping():
pass
def calcVibration():
pass
'''
// calculate vibration levels and check for accelerometer clipping (called by a backends)
void AP_InertialSensor::calc_vibration_and_clipping(uint8_t instance, const Vector3f &accel, float dt)
{
// check for clipping
if (_backends[instance] == nullptr) {
return;
}
if (fabsf(accel.x) > _backends[instance]->get_clip_limit() ||
fabsf(accel.y) > _backends[instance]->get_clip_limit() ||
fabsf(accel.z) > _backends[instance]->get_clip_limit()) {
_accel_clip_count[instance]++;
}
// calculate vibration levels
if (instance < INS_VIBRATION_CHECK_INSTANCES) {
// filter accel at 5hz
Vector3f accel_filt = _accel_vibe_floor_filter[instance].apply(accel, dt);
// calc difference from this sample and 5hz filtered value, square and filter at 2hz
Vector3f accel_diff = (accel - accel_filt);
accel_diff.x *= accel_diff.x;
accel_diff.y *= accel_diff.y;
accel_diff.z *= accel_diff.z;
_accel_vibe_filter[instance].apply(accel_diff, dt);
}
}
The algorithm for calculating the vibration levels can be seen in the AP_InertialSensor.cpp’s calc_vibration_and_clipping() method
but in short it involves calculating the standard deviation of the accelerometer readings like this:
Capture the raw x, y and z accelerometer values from the primary IMU
High-pass filter the raw values at 5hz to remove the vehicle’s movement and create a “accel_vibe_floor” for x,y and z axis.
Calculate the difference between the latest accel values and the accel_vibe_floor.
Square the above differences, filter at 2hz and then calculate the square root (for x, y and z).
These final three values are what appear in the VIBE msg’s VibeX, Y and Z fields.
'''
| StarcoderdataPython |
9642417 | <reponame>jmdecastel/GEOTADMIN<gh_stars>0
# -*- encoding: utf-8 -*-
import os
from django.test import TestCase
from geotrek.common.tasks import import_datas
from geotrek.common.models import FileType
class TasksTest(TestCase):
def setUp(self):
self.filetype = FileType.objects.create(type=u"Photographie")
def test_import_exceptions(self):
self.assertRaises(
ImportError, import_datas, filename='bombadil', class_name='haricot', module_name='toto')
def test_import_message_exception(self):
self.assertRaisesMessage(
ImportError,
"Failed to import parser class 'haricot' from module 'toto'",
import_datas,
filename='bombadil',
class_name='haricot',
module_name='toto'
)
def test_import_return(self):
filename = os.path.join(os.path.dirname(__file__), 'data', 'organism.xls')
task = import_datas.delay('AttachmentParser', filename, 'geotrek.common.tests.test_parsers')
self.assertEqual(task.status, 'SUCCESS')
self.assertEqual(task.result['parser'], 'AttachmentParser')
self.assertEqual(task.result['filename'], 'organism.xls')
self.assertEqual(task.result['current'], 100)
self.assertEqual(task.result['total'], 100)
self.assertEqual(task.result['name'], 'geotrek.common.import-file')
| StarcoderdataPython |
3251449 | <reponame>trisadmeslek/V-Sekai-Blender-tools
"""
Copyright (C) 2021 Adobe.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# Substance 3D in Blender Main Thread
# 4/21/2021
import bpy
import queue
import threading
# post any functions that need to run on the main thread
mainThreadExecutionQueue = queue.Queue()
def RunOnMainThread(function):
""" Queue a function to run on the main thread """
mainThreadExecutionQueue.put(function)
def ExecuteQueuedFunction():
""" Execute all main thread queued functions """
while not mainThreadExecutionQueue.empty():
function = mainThreadExecutionQueue.get()
function()
return 0.33
# on windows this queue must always be processed on the main thread
cusorQueue = queue.Queue()
cursorQueued = False
def PushCursor(cursorName):
""" Push a new cursor """
if threading.current_thread() is threading.main_thread():
global cursorQueued
if cursorQueued:
cusorQueue.put(cursorName)
cursorQueued = True
bpy.context.window.cursor_modal_set(cursorName)
else:
RunOnMainThread(lambda: PushCursor(cursorName))
def PopCursor():
""" Pop the current cursor off and restore if the queue is empty """
if threading.current_thread() is threading.main_thread():
try:
cursorName = cusorQueue.get(False)
bpy.context.window.cursor_modal_set(cursorName)
except Exception:
global cursorQueued
cursorQueued = False
if bpy.context and bpy.context.window:
bpy.context.window.cursor_modal_restore()
else:
RunOnMainThread(lambda: PopCursor())
| StarcoderdataPython |
8079809 | import torch
import random
import os
from pathlib import Path
from flask import Flask, request, jsonify
from flask_cors import CORS
from captchami.loaders import CaptchaDataset
from captchami.neural_net import NeuralNet
from captchami.vision import *
captcha_service = Flask(__name__)
cors = CORS(captcha_service, resources={r"/*": {"origins": "*"}})
mapper = {0: "+", 1: "-", 2: 1, 3: 2, 4: 3, 5: 4, 6: 5, 7: 6, 8: 7}
os.chdir("/home/fabio/CaptchAmI")
bin_net = Path("./bin_net.pt")
number_net = Path("./numbers_net.pt")
temp_file = Path("./temp.png")
@captcha_service.route("/classify/", methods=['POST'])
def classify():
"""
This function takes as input a JSON file with a field called "base64_img" and elaborates it in order to find if
there is an operation or a bunch of stars.
It loads the datasets containing the stars and the number to get the right sizes of the image and perform two
different classification: one to determine whether the image contains stars or not (binary classification) and then
it chooses the correct neural network to use to classify the file.
Returns: a number which is either the result of the operation or the sum of all the stars
"""
bin_loader = CaptchaDataset(Path("./datasets/binary"))
num_loader = CaptchaDataset(Path("./datasets/multiclass/numbers"))
content = request.json
base64_img = content["base64_img"]
base64_to_img(base64_img, temp_file)
nn = NeuralNet(l_i=6400, classes=bin_loader.get_classes(), loaders=bin_loader)
nn.load(bin_net)
classed = nn.classify_file(temp_file)
if classed == 0:
captcha_service.logger.info("Received a number")
# classed == 0 means that we have a number to elaborate
nn = NeuralNet(l_i=720, classes=num_loader.get_classes(), loaders=num_loader)
nn.load(number_net)
try:
elements = elaborate_numbers(temp_file)
except ValueError:
captcha_service.logger.error("Value error on matching elements array. Guessing ...")
result = random.randint(1, 8)
return jsonify(result=str(result))
parsed = []
for e in elements:
e = np.asarray(e[1]).astype(int) * 255
e = torch.Tensor(e)
# We have to classify each image, which could be a number or an operator
result = nn.classify_img(e)
parsed.append(str(result))
e1 = mapper.get(int(parsed[0]))
e2 = mapper.get(int(parsed[2]))
if mapper.get(int(parsed[1])) == "+":
try:
result = e1 + e2
except TypeError:
# store_misclassified(temp_file)
captcha_service.logger.error("Type error occurred on + operator, guessing...")
result = random.randint(1, 8)
else:
try:
result = e1 - e2
except TypeError:
# store_misclassified(temp_file)
captcha_service.logger.error("Type error occurred on - operator, guessing...")
result = random.randint(1, 8)
else:
captcha_service.logger.info("Received some stars")
# Use CV to classify and get the numbers
result = elaborate_stars(temp_file)
if int(result) <= 0:
captcha_service.logger.error("<= 0 error, guessing...")
# store_misclassified(temp_file)
result = random.randint(1, 8)
captcha_service.logger.info("New classification: " + str(result))
return jsonify(result=str(result))
| StarcoderdataPython |
189019 | """
Created on Thu Apr 9
@author: nrw
This plots residuals,
And also takes shelved torque data, adds in torque estimate and residual data
And writes it all to a CSV
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import plotly.plotly as py
import plotly.offline as po
import plotly.graph_objs as go
from plotly import tools
from sklearn import linear_model
from sklearn.linear_model import Ridge
from sklearn import metrics
import shelve
with shelve.open('calculated_data', 'r') as shelf:
BigTheta = shelf['BigTheta']
BigTorque = shelf['BigTorque']
BigForce = shelf['BigForce']
BigPosition = shelf['BigPosition']
#path = "~/Documents/projects_Spring2018/howe299r/Experiments/03April2018/WIP/"
IMUCols = ['timeSysCal', 'XYZ','X', 'Y', 'Z']
#===============================================
#### DECLARE CONSTANTS ####
#===============================================
print('number of datapoints', BigTheta.shape)
#### CALCULATE K ####
#===============================================
#### FIT TO ESTIMATE K ####
#===============================================
## Note: For the IMU, orientation.Y is pitch; X is roll; Z is yaw
torq_names = ['x', 'y', 'z']
dim = 1
#torq_1d = BigTorque[:,dim]
torq = BigTorque
theta = BigTheta
print('torq shape', torq.shape)
myX = BigTheta#theta_1Dreshape(-1,1)
myy = torq
regr= Ridge(fit_intercept=True, alpha=1.0, random_state=0, normalize=True)
regr2 = linear_model.LinearRegression()
regr.fit(myX, myy)
regr2.fit(myX, myy)
K = regr.coef_
K2 = regr2.coef_
yPred= regr.predict(myX)
yPred2= regr2.predict(myX)
print('\n======================')
matK = np.linalg.lstsq(BigTorque, BigTheta, rcond=None)[0]
print(matK.shape)
print('Numpy linalg.lstsq() K coefficients:\n', matK)
print('LinReg K Coefficients: \n', K2)
print('Ridge K Coefficients: \n', K)
print('\n======================')
torq_est = np.dot(K2, theta.T).T #n.3
resid = torq - yPred
mse = (resid ** 2).mean(axis=0)
print('resid shape', resid.shape)
print('RMSE Per Torque Dim', np.sqrt(mse))
#print('Variance score (ideal 1): %.2f' % r2_score(thetaY))
print('\n======= SkLearn Metrics====')
print('\n---- Using LinReg K dot theta. This has worse error as we have no intercept term. ===')
print('Mean Absolute Error: %0.02f' % metrics.mean_absolute_error(torq, torq_est))
print('Mean Squared Error: %0.02f' % metrics.mean_squared_error(torq, torq_est) )
print('Root Mean Squared Error %0.02f' % np.sqrt(metrics.mean_squared_error(torq, torq_est)))
print('\n---- Using sklearn LinearRegression.pred(theta). ========')
print('Mean Absolute Error: %0.02f:' % metrics.mean_absolute_error(torq, yPred2)),
print('Mean Squared Error: %0.02f' % metrics.mean_squared_error(torq, yPred2) )
print('Root Mean Squared Error: %0.02f' % np.sqrt(metrics.mean_squared_error(torq, yPred2)))
print('\n---- Using sklearn Ridge.pred(theta). ========')
print('Mean Absolute Error: %0.02f' % metrics.mean_absolute_error(torq, yPred))
print('Mean Squared Error: %0.02f' % metrics.mean_squared_error(torq, yPred) )
print('Root Mean Squared Error: %0.02f' % np.sqrt(metrics.mean_squared_error(torq, yPred)))
print('\n --- LinRegr has the best fit ----')
print('\nNote: torques about y axis: Min', myy.min(), '; Max', myy.max(), 'grams * cm')
print('\n======================')
'''
#===============================================
#### PLOT: Residuals (of Y torque_est - torque) vs Torque_est (either Y or X axis)
#===============================================
print(resid)
print(resid.shape)
names = ['X', 'Y', 'Z']
param = 'Torque'
dim = 1
xplot = torq_est[:,dim]
xplot2 = torq[:,dim]
print(xplot.shape)
yplot = resid[:,dim]
print(yplot.shape)
trace0 = go.Scatter( x = xplot, y = yplot, mode = 'markers',
name = '%s-axis %s estimated'%(names[dim], param))
trace1 = go.Scatter( x = xplot2, y = yplot, mode = 'markers',
name = '%s-axis %s calculated from data'%(names[dim], param))
data = [trace0]
layout = go.Layout(
title='%s-axis %s: Resid vs Estimate (with 3x3 K, using SkLearn LinReg) (IMU data)' % (names[dim], param),
yaxis=dict(title= 'resid (g cm)'),
xaxis=dict(title='%s (g cm)' % param),
legend=dict(x=.5, y=0.1) )
fig = tools.make_subplots(rows=2, cols=1, subplot_titles=(trace1.name, trace0.name))
fig.append_trace(trace0, 1,1)
fig.append_trace(trace1, 2,1)
fig['layout'].update(title = layout.title)
fig['layout']['xaxis2'].update(title=layout.xaxis['title'])
fig['layout']['yaxis1'].update(title = layout.yaxis['title'])
fig['layout']['yaxis2'].update(title = layout.yaxis['title'])
#fig = go.Figure(data=data, layout=layout)
po.plot(fig)
'''
#===============================================
#### PLOT: Residuals (of Y torque_est - torque) vs Force (Z only)
#===============================================
print(resid.shape)
names = ['X', 'Y', 'Z']
param = 'Torque'
x2param = 'Force'
dim = 0
xplot = torq_est[:,dim]
xplot2 = BigForce[:,2]
yplot = resid[:,dim]
trace0 = go.Scatter( x = xplot, y = yplot, mode = 'markers',
name = 'resid_torqY vs %s-axis %s estimated'%(names[dim], param))
trace1 = go.Scatter( x = xplot2, y = yplot, mode = 'markers',
name = 'resid_torqY vs Resid vs Z-axis Force, as applied')
#data = [trace0]
overall_title='%s-axis %s: Resid vs Force applied (with 3x3 K, using SkLearn LinReg) (IMU data)' % \
(names[dim], param) + '<br>K: ' + np.array_str(K, precision=2) + '<br>'
yaxistitle= 'resid (g cm)'
xaxistitle= 'force (g)'
layout = go.Layout(
title = overall_title,
legend=dict(x=.5, y=0.1) )
fig = tools.make_subplots(rows=2, cols=1, subplot_titles=(trace0.name, trace1.name))
fig.append_trace(trace0, 1,1)
fig.append_trace(trace1, 2,1)
fig['layout'].update(title=overall_title, showlegend=False)
fig['layout']['xaxis1'].update(title='%s torque est (g cm)' % (names[dim]))
fig['layout']['xaxis2'].update(title=xaxistitle)
fig['layout']['yaxis1'].update(title=yaxistitle)
fig['layout']['yaxis2'].update(title=yaxistitle)
#fig = go.Figure(data=data, layout=layout)
#po.plot(fig)
full_data = np.hstack((BigPosition, BigForce, BigTheta, BigTorque))
full_data = np.hstack((full_data, torq_est, resid))
print(torq_est.shape)
print(resid.shape)
np.savetxt("full_calculated_data.csv", full_data, delimiter=",", fmt='%0.02f')
with shelve.open('calculated_data2', 'c') as shelf:
shelf['torq_est'] = torq_est
shelf['resid'] = resid
shelf['K'] = K
| StarcoderdataPython |
3537613 | # Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range('2012-1-1', periods=3, freq='D')
v2 = pd.date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
tm.assert_series_equal(rs, xp)
assert rs.dtype == 'timedelta64[ns]'
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
# series on the rhs
result = df['A'] - df['A'].shift()
assert result.dtype == 'timedelta64[ns]'
result = df['A'] + td
assert result.dtype == 'M8[ns]'
# scalar Timestamp on rhs
maxa = df['A'].max()
assert isinstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
assert resultb.dtype == 'timedelta64[ns]'
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
tm.assert_series_equal(result, expected)
assert result.dtype == 'm8[ns]'
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
assert resulta.dtype == 'm8[ns]'
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df['A'])
assert resultb.dtype == 'M8[ns]'
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(df['A'], resultb)
assert resultb.dtype == 'M8[ns]'
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
nat_series_dtype_timedelta = Series([NaT, NaT],
dtype='timedelta64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
tm.assert_series_equal(timedelta_series - NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
# addition
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
# multiplication
tm.assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
tm.assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(timedelta_series * np.nan,
nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series,
nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / np.nan,
nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box_with_array):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
def test_td64arr_add_sub_float(self, box_with_array, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdarr = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdarr + other
with pytest.raises(TypeError):
other + tdarr
with pytest.raises(TypeError):
tdarr - other
with pytest.raises(TypeError):
other - tdarr
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box_with_array, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box_with_array)
msg = ("cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation")
with pytest.raises(TypeError, match=msg):
idx - Timestamp('2011-01-01')
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp('2011-01-01', tz=tz)
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
def test_td64arr_add_sub_timestamp(self, box_with_array):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdi = timedelta_range('1 day', periods=3)
expected = pd.date_range('2012-01-02', periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range('2011-12-31', periods=3, freq='-1D')
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
with pytest.raises(TypeError):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64('NaT')
tdi = timedelta_range('1 day', periods=3)
expected = pd.DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Operations with int-like others
def test_td64arr_add_int_series_invalid(self, box):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
int_ser = Series([2, 3, 4])
with pytest.raises(err):
tdser + int_ser
with pytest.raises(err):
int_ser + tdser
with pytest.raises(err):
tdser - int_ser
with pytest.raises(err):
int_ser - tdser
def test_td64arr_add_intlike(self, box_with_array):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box_with_array)
err = TypeError
if box_with_array in [pd.Index, tm.to_array]:
err = NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array,
scalar):
box = box_with_array
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box in [pd.Index, tm.to_array] and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
# TODO: this was taken from tests.series.test_ops; de-duplicate
@pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4),
Timedelta(minutes=5, seconds=4),
Timedelta('5m4s').to_timedelta64()])
def test_operators_timedelta64_with_timedelta(self, scalar_td):
# smoke tests
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 + scalar_td
scalar_td + td1
td1 - scalar_td
scalar_td - td1
td1 / scalar_td
scalar_td / td1
# TODO: this was taken from tests.series.test_ops; de-duplicate
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
def test_td64arr_add_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
if box is pd.DataFrame and names[1] == 'Venkman':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_add_sub_td64_nat(self, box):
# GH#23320 special handling for timedelta64("NaT")
tdi = pd.TimedeltaIndex([NaT, Timedelta('1s')])
other = np.timedelta64("NaT")
expected = pd.TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
result = other - obj
tm.assert_equal(result, expected)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - two_hours
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
# TODO: this was taken from tests.series.test_operators; de-duplicate
def test_timedelta64_operations_with_DateOffset(self):
# GH#10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(PerformanceWarning):
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3),
timedelta(minutes=5, seconds=6),
timedelta(hours=2, minutes=5, seconds=3)])
tm.assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
tm.assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box):
# GH#18849
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box):
# GH#18824, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box_df_fail):
# GH#18849
box = box_df_fail
box2 = Series if box in [pd.Index, tm.to_array] else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox,
box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box_with_array)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps:
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# TODO: Moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize("m", [1, 3, 10])
@pytest.mark.parametrize("unit", ['D', 'h', 'm', 's', 'ms', 'us', 'ns'])
def test_timedelta64_conversions(self, m, unit):
startdate = Series(pd.date_range('2013-01-01', '2013-01-03'))
enddate = Series(pd.date_range('2013-03-01', '2013-03-03'))
ser = enddate - startdate
ser[2] = np.nan
# op
expected = Series([x / np.timedelta64(m, unit) for x in ser])
result = ser / np.timedelta64(m, unit)
tm.assert_series_equal(result, expected)
# reverse op
expected = Series([Timedelta(np.timedelta64(m, unit)) / x
for x in ser])
result = np.timedelta64(m, unit) / ser
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, two_hours, box_with_array):
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng * two_hours
def test_tdi_mul_int_array_zerodim(self, box_with_array):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * np.array(5, dtype='int64')
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_with_array):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, xbox)
result = idx * pd.Series(np.arange(5, dtype='int64'))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype='float64')
expected = TimedeltaIndex(rng5f * (rng5f + 1.0))
expected = tm.box_expected(expected, xbox)
result = idx * Series(rng5f + 1.0)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize('other', [
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)
], ids=lambda x: type(x).__name__)
def test_tdi_rmul_arraylike(self, other, box_with_array):
box = box_with_array
xbox = get_upcast_box(box, other)
tdi = TimedeltaIndex(['1 Day'] * 10)
expected = timedelta_range('1 days', '10 days')
expected._data.freq = None
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__, __rdiv__
def test_td64arr_div_nat_invalid(self, box_with_array):
# don't allow division by NaT (maybe could in the future)
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError,
match="'?true_divide'? cannot use operands"):
rng / pd.NaT
with pytest.raises(TypeError, match='Cannot divide NaTType by'):
pd.NaT / rng
def test_td64arr_div_td64nat(self, box_with_array):
# GH#23829
rng = timedelta_range('1 days', '10 days',)
rng = tm.box_expected(rng, box_with_array)
other = np.timedelta64('NaT')
expected = np.array([np.nan] * 10)
expected = tm.box_expected(expected, box_with_array)
result = rng / other
tm.assert_equal(result, expected)
result = other / rng
tm.assert_equal(result, expected)
def test_td64arr_div_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx / 1
tm.assert_equal(result, idx)
with pytest.raises(TypeError, match='Cannot divide'):
# GH#23829
1 / idx
def test_td64arr_div_tdlike_scalar(self, two_hours, box_with_array):
# GH#20088, GH#22163 ensure DataFrame returns correct dtype
rng = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Float64Index((np.arange(10) + 1) * 12, name='foo')
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_tdlike_scalar_with_nat(self, two_hours,
box_with_array):
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = pd.Float64Index([12, np.nan, 24], name='foo')
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_td64_ndarray(self, box_with_array):
# GH#22631
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'])
expected = pd.Float64Index([12, np.nan, 24])
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
other = np.array([2, 4, 2], dtype='m8[h]')
result = rng / other
tm.assert_equal(result, expected)
result = rng / tm.box_expected(other, box_with_array)
tm.assert_equal(result, expected)
result = rng / other.astype(object)
tm.assert_equal(result, expected)
result = rng / list(other)
tm.assert_equal(result, expected)
# reversed op
expected = 1 / expected
result = other / rng
tm.assert_equal(result, expected)
result = tm.box_expected(other, box_with_array) / rng
tm.assert_equal(result, expected)
result = other.astype(object) / rng
tm.assert_equal(result, expected)
result = list(other) / rng
tm.assert_equal(result, expected)
def test_tdarr_div_length_mismatch(self, box_with_array):
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'])
mismatched = [1, 2, 3, 4]
rng = tm.box_expected(rng, box_with_array)
for obj in [mismatched, mismatched[:2]]:
# one shorter, one longer
for other in [obj, np.array(obj), pd.Index(obj)]:
with pytest.raises(ValueError):
rng / other
with pytest.raises(ValueError):
other / rng
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
def test_td64arr_floordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = td1 // scalar_td
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = scalar_td // td1
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar_explicit(self, box_with_array,
scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
def test_td64arr_floordiv_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx // 1
tm.assert_equal(result, idx)
pattern = ('floor_divide cannot use operands|'
'Cannot divide int by Timedelta*')
with pytest.raises(TypeError, match=pattern):
1 // idx
def test_td64arr_floordiv_tdlike_scalar(self, two_hours, box_with_array):
tdi = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Int64Index((np.arange(10) + 1) * 12, name='foo')
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi // two_hours
tm.assert_equal(result, expected)
# TODO: Is this redundant with test_td64arr_floordiv_tdlike_scalar?
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=10, seconds=7),
Timedelta('10m7s'),
Timedelta('10m7s').to_timedelta64()
], ids=lambda x: type(x).__name__)
def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_with_array):
# GH#19125
tdi = TimedeltaIndex(['00:05:03', '00:05:03', pd.NaT], freq=None)
expected = pd.Index([2.0, 2.0, np.nan])
tdi = tm.box_expected(tdi, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
res = tdi.__rfloordiv__(scalar_td)
tm.assert_equal(res, expected)
expected = pd.Index([0.0, 0.0, np.nan])
expected = tm.box_expected(expected, box_with_array, transpose=False)
res = tdi // (scalar_td)
tm.assert_equal(res, expected)
# ------------------------------------------------------------------
# mod, divmod
# TODO: operations with timedelta-like arrays, numeric arrays,
# reversed ops
def test_td64arr_mod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range('1 Day', '9 days')
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(['1 Day', '2 Days', '0 Days'] * 3)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % three_days
tm.assert_equal(result, expected)
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(tdarr, three_days)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // three_days)
def test_td64arr_mod_int(self, box_with_array):
tdi = timedelta_range('1 ns', '10 ns', periods=10)
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(['1 ns', '0 ns'] * 5)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % 2
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
2 % tdarr
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(tdarr, 2)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // 2)
def test_td64arr_rmod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range('1 Day', '9 days')
tdarr = tm.box_expected(tdi, box_with_array)
expected = ['0 Days', '1 Day', '0 Days'] + ['3 Days'] * 6
expected = TimedeltaIndex(expected)
expected = tm.box_expected(expected, box_with_array)
result = three_days % tdarr
tm.assert_equal(result, expected)
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(three_days, tdarr)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], three_days // tdarr)
# ------------------------------------------------------------------
# Operations with invalid others
def test_td64arr_mul_tdscalar_invalid(self, box_with_array, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box_with_array)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = 'operate|unsupported|cannot|not supported'
with pytest.raises(TypeError, match=pattern):
td1 * scalar_td
with pytest.raises(TypeError, match=pattern):
scalar_td * td1
def test_td64arr_mul_too_short_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx * idx[:3]
with pytest.raises(ValueError):
idx * np.array([1, 2])
def test_td64arr_mul_td64arr_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx * idx
# ------------------------------------------------------------------
# Operations with numeric others
@pytest.mark.parametrize('one', [1, np.array(1), 1.0, np.array(1.0)])
def test_td64arr_mul_numeric_scalar(self, box_with_array, one):
# GH#4521
# divide/multiply by integers
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
expected = Series(['-59 Days', '-59 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdser * (-one)
tm.assert_equal(result, expected)
result = (-one) * tdser
tm.assert_equal(result, expected)
expected = Series(['118 Days', '118 Days', 'NaT'],
dtype='timedelta64[ns]')
expected = tm.box_expected(expected, box_with_array)
result = tdser * (2 * one)
tm.assert_equal(result, expected)
result = (2 * one) * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize('two', [2, 2.0, np.array(2), np.array(2.0)])
def test_td64arr_div_numeric_scalar(self, box_with_array, two):
# GH#4521
# divide/multiply by integers
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
expected = Series(['29.5D', '29.5D', 'NaT'], dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdser / two
tm.assert_equal(result, expected)
with pytest.raises(TypeError, match='Cannot divide'):
two / tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])],
ids=lambda x: type(x).__name__)
def test_td64arr_rmul_numeric_array(self, box_with_array, vector, dtype):
# GH#4521
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
vector = vector.astype(dtype)
expected = Series(['1180 Days', '1770 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, xbox)
result = tdser * vector
tm.assert_equal(result, expected)
result = vector * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])],
ids=lambda x: type(x).__name__)
def test_td64arr_div_numeric_array(self, box_with_array, vector, dtype):
# GH#4521
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
vector = vector.astype(dtype)
expected = Series(['2.95D', '1D 23H 12m', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, xbox)
result = tdser / vector
tm.assert_equal(result, expected)
pattern = ('true_divide cannot use operands|'
'cannot perform __div__|'
'cannot perform __truediv__|'
'unsupported operand|'
'Cannot divide')
with pytest.raises(TypeError, match=pattern):
vector / tdser
if not isinstance(vector, pd.Index):
# Index.__rdiv__ won't try to operate elementwise, just raises
result = tdser / vector.astype(object)
if box_with_array is pd.DataFrame:
expected = [tdser.iloc[0, n] / vector[n]
for n in range(len(vector))]
else:
expected = [tdser[n] / vector[n] for n in range(len(tdser))]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
with pytest.raises(TypeError, match=pattern):
vector.astype(object) / tdser
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_mul_int_series(self, box_df_fail, names):
# GH#19042 test for correct name attachment
box = box_df_fail # broadcasts along wrong axis, but doesn't raise
tdi = TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'],
name=names[0])
# TODO: Should we be parametrizing over types for `ser` too?
ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1])
expected = Series(['0days', '1day', '4days', '9days', '16days'],
dtype='timedelta64[ns]',
name=names[2])
tdi = tm.box_expected(tdi, box)
box = Series if (box is pd.Index and type(ser) is Series) else box
expected = tm.box_expected(expected, box)
result = ser * tdi
tm.assert_equal(result, expected)
# The direct operation tdi * ser still needs to be fixed.
result = ser.__rmul__(tdi)
tm.assert_equal(result, expected)
# TODO: Should we be parametrizing over types for `ser` too?
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_float_series_rdiv_td64arr(self, box_with_array, names):
# GH#19042 test for correct name attachment
# TODO: the direct operation TimedeltaIndex / Series still
# needs to be fixed.
box = box_with_array
tdi = TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'],
name=names[0])
ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1])
xname = names[2] if box is not tm.to_array else names[1]
expected = Series([tdi[n] / ser[n] for n in range(len(ser))],
dtype='timedelta64[ns]',
name=xname)
xbox = box
if box in [pd.Index, tm.to_array] and type(ser) is Series:
xbox = Series
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = ser.__rdiv__(tdi)
if box is pd.DataFrame:
# TODO: Should we skip this case sooner or test something else?
assert result is NotImplemented
else:
tm.assert_equal(result, expected)
class TestTimedeltaArraylikeInvalidArithmeticOps:
def test_td64arr_pow_invalid(self, scalar_td, box_with_array):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box_with_array)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = 'operate|unsupported|cannot|not supported'
with pytest.raises(TypeError, match=pattern):
scalar_td ** td1
with pytest.raises(TypeError, match=pattern):
td1 ** scalar_td
| StarcoderdataPython |
8093516 | <gh_stars>0
'''
latin_text_cleaner.py
reads text in from one or more text files, removes the punctuation and header/footer info, and
writes the output to a file
bancks holmes
'''
import string
import numpy as np
from cltk.lemmatize.latin.backoff import BackoffLatinLemmatizer
import os
#first, build list of filenames
filenames = []
for filename in os.listdir('ovid'):
filenames.append(str(filename))
#then, concatenate them into one text file https://stackoverflow.com/questions/13613336/python-concatenate-text-files
with open('corpora/all.txt', 'w') as outfile:
for fname in filenames:
with open('ovid/' + fname, 'r') as infile:
for line in infile:
outfile.write(line)
lemmatizer = BackoffLatinLemmatizer()
ltr_str = ''
file = open('corpora/all.txt', 'r')
for line in file:
ltr_str += str(line)
file.close()
np_str = np.asarray(ltr_str)
for symbol in string.punctuation:
np_str = np.char.replace(np_str, symbol, '')
np_str = np.char.lower(np_str)
tokens = np_str.tolist().split()
lemmatized = lemmatizer.lemmatize(tokens)
with open ('corpora/all_lemmatized.txt', 'w') as lemmata:
for parsed in lemmatized:
lemmata.write(parsed[1] + ' ')
#print('all lemmata written successfully :)')
#make list of heroides filnames
her_filnames = []
for filename in filenames:
if filename[5:8] == 'met':
her_filnames.append(str(filename))
#add their text to corpora/her.txt
with open('corpora/met.txt', 'w') as outfile:
for fname in her_filnames:
with open('ovid/' + fname, 'r') as infile:
for line in infile:
outfile.write(line)
#lemmatize (and clean) the fool out that mf (this should be a function)
her_str = ''
her_file = open('corpora/met.txt', 'r')
for line in her_file:
her_str += str(line)
her_file.close()
her_np_str = np.asarray(her_str)
for symbol in string.punctuation:
her_np_str = np.char.replace(her_np_str, symbol, '')
her_np_str = np.char.lower(her_np_str)
her_tokens = her_np_str.tolist().split()
her_lemmatized = lemmatizer.lemmatize(her_tokens)
with open ('corpora/met_lemmatized.txt', 'w') as lemmata:
for parsed in her_lemmatized:
lemmata.write(parsed[1] + ' ')
print("heroides lemmata written successfully :')")
#write the lemmatized output to corpora/her_lemmatized.txt | StarcoderdataPython |
3218180 | <reponame>zero1number/redash<gh_stars>1000+
import functools
from flask_login import current_user
from flask_restful import abort
from funcy import flatten
view_only = True
not_view_only = False
ACCESS_TYPE_VIEW = "view"
ACCESS_TYPE_MODIFY = "modify"
ACCESS_TYPE_DELETE = "delete"
ACCESS_TYPES = (ACCESS_TYPE_VIEW, ACCESS_TYPE_MODIFY, ACCESS_TYPE_DELETE)
def has_access(obj, user, need_view_only):
if hasattr(obj, "api_key") and user.is_api_user():
return has_access_to_object(obj, user.id, need_view_only)
else:
return has_access_to_groups(obj, user, need_view_only)
def has_access_to_object(obj, api_key, need_view_only):
if obj.api_key == api_key:
return need_view_only
elif hasattr(obj, "dashboard_api_keys"):
# check if api_key belongs to a dashboard containing this query
return api_key in obj.dashboard_api_keys and need_view_only
else:
return False
def has_access_to_groups(obj, user, need_view_only):
groups = obj.groups if hasattr(obj, "groups") else obj
if "admin" in user.permissions:
return True
matching_groups = set(groups.keys()).intersection(user.group_ids)
if not matching_groups:
return False
required_level = 1 if need_view_only else 2
group_level = 1 if all(flatten([groups[group] for group in matching_groups])) else 2
return required_level <= group_level
def require_access(obj, user, need_view_only):
if not has_access(obj, user, need_view_only):
abort(403)
class require_permissions(object):
def __init__(self, permissions, allow_one=False):
self.permissions = permissions
self.allow_one = allow_one
def __call__(self, fn):
@functools.wraps(fn)
def decorated(*args, **kwargs):
if self.allow_one:
has_permissions = any([current_user.has_permission(permission) for permission in self.permissions])
else:
has_permissions = current_user.has_permissions(self.permissions)
if has_permissions:
return fn(*args, **kwargs)
else:
abort(403)
return decorated
def require_permission(permission):
return require_permissions((permission,))
def require_any_of_permission(permissions):
return require_permissions(permissions, True)
def require_admin(fn):
return require_permission("admin")(fn)
def require_super_admin(fn):
return require_permission("super_admin")(fn)
def has_permission_or_owner(permission, object_owner_id):
return int(object_owner_id) == current_user.id or current_user.has_permission(
permission
)
def is_admin_or_owner(object_owner_id):
return has_permission_or_owner("admin", object_owner_id)
def require_permission_or_owner(permission, object_owner_id):
if not has_permission_or_owner(permission, object_owner_id):
abort(403)
def require_admin_or_owner(object_owner_id):
if not is_admin_or_owner(object_owner_id):
abort(403, message="You don't have permission to edit this resource.")
def can_modify(obj, user):
return is_admin_or_owner(obj.user_id) or user.has_access(obj, ACCESS_TYPE_MODIFY)
def require_object_modify_permission(obj, user):
if not can_modify(obj, user):
abort(403)
| StarcoderdataPython |
5160259 | <reponame>sunrabbit123/school-info_python
import asyncio
import datetime
from pytz import timezone as tz
import re
import schoolInfo.util as util
@util.except_keyError
async def meal(
ATPT_OFCDC_SC_CODE: str,
SD_SCHUL_CODE: str,
MLSV_YMD: datetime.datetime = None,
timezone: str = "Asia/Seoul",
auth_key: str = None,
) -> dict:
MLSV_YMD = datetime.datetime.now(tz(timezone)) if MLSV_YMD is None else MLSV_YMD
MLSV__YMD = re.sub("[^0-9]", "", str(MLSV_YMD.__str__()))[2:8]
addition = [
f"MLSV_YMD={MLSV__YMD}",
f"ATPT_OFCDC_SC_CODE={ATPT_OFCDC_SC_CODE}",
f"SD_SCHUL_CODE={SD_SCHUL_CODE}",
]
url: str = util.url_manager(
type="mealServiceDietInfo", additions=addition, auth_key=auth_key
).url
return (await util.HTMLGetter().get_json(url))["mealServiceDietInfo"][1]["row"]
| StarcoderdataPython |
1808380 | import os
import sys
import shutil
import numpy as np
import time, datetime
import torch
import random
import logging
import argparse
import torch.nn as nn
import torch.utils
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.utils.data.distributed
sys.path.append("../../")
from utils.utils import *
from torchvision import datasets, transforms
from torch.autograd import Variable
from resnet import ResNet50, channel_scale
parser = argparse.ArgumentParser("ResNet50")
parser.add_argument('--batch_size', type=int, default=256, help='batch size')
parser.add_argument('--epochs', type=int, default=32, help='num of training epochs')
parser.add_argument('--learning_rate', type=float, default=0.1, help='init learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=1e-4, help='weight decay')
parser.add_argument('--save', type=str, default='./models', help='path for saving trained models')
parser.add_argument('--data', metavar='DIR', help='path to dataset')
parser.add_argument('--label_smooth', type=float, default=0.1, help='label smoothing')
parser.add_argument('--print_freq', type=float, default=1, help='report frequency')
parser.add_argument('-j', '--workers', default=40, type=int, metavar='N',
help='number of data loading workers (default: 4)')
args = parser.parse_args()
CLASSES = 1000
stage_repeat=[1,3,4,6,3]
if not os.path.exists('log'):
os.mkdir('log')
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join('log/log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
def main():
if not torch.cuda.is_available():
sys.exit(1)
start_t = time.time()
cudnn.benchmark = True
cudnn.enabled=True
logging.info("args = %s", args)
model = ResNet50()
logging.info(model)
model = nn.DataParallel(model).cuda()
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
criterion_smooth = criterion_smooth.cuda()
all_parameters = model.parameters()
weight_parameters = []
for pname, p in model.named_parameters():
if 'fc' in pname or 'conv' in pname:
weight_parameters.append(p)
weight_parameters_id = list(map(id, weight_parameters))
other_parameters = list(filter(lambda p: id(p) not in weight_parameters_id, all_parameters))
optimizer = torch.optim.SGD(
[{'params' : other_parameters},
{'params' : weight_parameters, 'weight_decay' : args.weight_decay}],
args.learning_rate,
momentum=args.momentum,
)
#scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lambda step : (1.0-step/args.epochs), last_epoch=-1)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[args.epochs//4, args.epochs//2, args.epochs//4*3], gamma=0.1)
start_epoch = 0
best_top1_acc= 0
checkpoint_tar = os.path.join(args.save, 'checkpoint.pth.tar')
if os.path.exists(checkpoint_tar):
logging.info('loading checkpoint {} ..........'.format(checkpoint_tar))
checkpoint = torch.load(checkpoint_tar)
start_epoch = checkpoint['epoch']
best_top1_acc = checkpoint['best_top1_acc']
model.load_state_dict(checkpoint['state_dict'])
logging.info("loaded checkpoint {} epoch = {}" .format(checkpoint_tar, checkpoint['epoch']))
for epoch in range(start_epoch):
scheduler.step()
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
crop_scale = 0.08
lighting_param = 0.1
train_transforms = transforms.Compose([
transforms.RandomResizedCrop(224, scale=(crop_scale, 1.0)),
Lighting(lighting_param),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize])
train_dataset = datasets.ImageFolder(
traindir,
transform=train_transforms)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
epoch = start_epoch
while epoch < args.epochs:
train_obj, train_top1_acc, train_top5_acc, epoch = train(epoch, train_loader, model, criterion_smooth, optimizer, scheduler)
valid_obj, valid_top1_acc, valid_top5_acc = validate(epoch, val_loader, model, criterion, args)
is_best = False
if valid_top1_acc > best_top1_acc:
best_top1_acc = valid_top1_acc
is_best = True
save_checkpoint({
'epoch': epoch,
'state_dict': model.state_dict(),
'best_top1_acc': best_top1_acc,
'optimizer' : optimizer.state_dict(),
}, is_best, args.save)
epoch += 1
training_time = (time.time() - start_t) / 36000
print('total training time = {} hours'.format(training_time))
def train(epoch, train_loader, model, criterion, optimizer, scheduler):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
model.train()
end = time.time()
scheduler.step()
for i, (images, target) in enumerate(train_loader):
data_time.update(time.time() - end)
images = images.cuda()
target = target.cuda()
# compute output
mid_scale_ids = np.random.randint(low=0, high=len(channel_scale), size=16)
overall_scale_ids = []
for i in range(len(stage_repeat)-1):
overall_scale_ids += [np.random.randint(low=0, high=len(channel_scale))]* stage_repeat[i]
overall_scale_ids += [-1]*(stage_repeat[-1] + 1)
logits = model(images, overall_scale_ids, mid_scale_ids)
loss = criterion(logits, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(logits, target, topk=(1, 5))
n = images.size(0)
losses.update(loss.item(), n) #accumulated loss
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
return losses.avg, top1.avg, top5.avg, epoch
def validate(epoch, val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
model.eval()
mid_scale_ids = np.random.randint(low=0, high=len(channel_scale), size=16)
overall_scale_ids = []
for i in range(len(stage_repeat)-1):
overall_scale_ids += [np.random.randint(low=0, high=len(channel_scale))]* stage_repeat[i]
overall_scale_ids += [-1] * (stage_repeat[-1] + 1)
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
images = images.cuda()
target = target.cuda()
# compute output
logits = model(images, overall_scale_ids, mid_scale_ids)
loss = criterion(logits, target)
# measure accuracy and record loss
pred1, pred5 = accuracy(logits, target, topk=(1, 5))
n = images.size(0)
losses.update(loss.item(), n)
top1.update(pred1[0], n)
top5.update(pred5[0], n)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return losses.avg, top1.avg, top5.avg
if __name__ == '__main__':
main()
| StarcoderdataPython |
4886323 | # -*- coding: utf-8 -*-
"""
Copyright 2018 NAVER Corp.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import tensorflow as tf
import numpy as np
import time
import math
from kin_kor_char_parser import decompose_str_as_one_hot
LOCAL_DATASET_PATH = '../sample_data/kin/'
from soy.soy.nlp.tokenizer import CohesionTokenizer, RegexTokenizer
from gensim.models import Word2Vec
class KinQueryDataset:
"""
지식인 데이터를 읽어서, tuple (데이터, 레이블)의 형태로 리턴하는 파이썬 오브젝트 입니다.
"""
def __init__(self, dataset_path: str, max_length: int):
"""
:param dataset_path: 데이터셋 root path
:param max_length: 문자열의 최대 길이 400
"""
# 데이터, 레이블 각각의 경로
queries_path = os.path.join(dataset_path, 'train', 'train_data')
labels_path = os.path.join(dataset_path, 'train', 'train_label')
# 지식인 데이터를 읽고 preprocess까지 진행합니다
self.test_idx = -1
with open(queries_path, 'rt', encoding='utf8') as f:
#self.queries1, self.queries2,self.queries1_test,self.queries2_test,self.test_idx = preprocess2(f.readlines(), max_length,test_data=True)
self.queries1, self.queries2= preprocess2(f.readlines(), max_length,test_data=False)
#self.queries,self.queries_test,self.test_idx = preprocess_origin(f.readlines(),max_length,test_data=True)
# 지식인 레이블을 읽고 preprocess까지 진행합니다.
with open(labels_path) as f:
self.labels = np.array([[np.float32(x)] for x in f.readlines()])
if self.test_idx != -1:
self.labels_test = self.labels[self.test_idx:]
self.labels = self.labels[:self.test_idx]
print("test data splited size %d" % self.test_idx)
def __len__(self):
"""
:return: 전체 데이터의 수를 리턴합니다
"""
return len(self.labels)
def __getitem__(self, idx):
"""
:param idx: 필요한 데이터의 인덱스
:return: 인덱스에 맞는 데이터, 레이블 pair를 리턴합니다
"""
return self.queries1[idx], self.queries2[idx] ,self.labels[idx]
def add_noise(query):
query = query + (query * (0.001) * ((np.random.rand(1) - 0.5)))
query = np.rint(query)
query = query.astype(np.int32)
return query
def data_augmentation(queries1,queries2,labels):
# Add noise in query data
def get_noised_queries(queries):
# Expand query numpy array size
q_expand = np.zeros((len(queries) * 2, len(queries[0])), dtype=np.int32)
np.random.seed(int(time.time()))
for i in range(len(q_expand)):
if i < len(queries):
q_expand[i] = queries[i]
else:
noised_val = add_noise(queries[i - len(queries)])
q_expand[i] = noised_val
return q_expand
def get_double_labels(labels):
l_expand = np.zeros((len(labels) * 2,1), dtype=np.int32)
for i in range(len(l_expand)):
if i < len(labels):
l_expand[i] = labels[i]
else:
l_expand[i] = labels[i - len(labels)]
return l_expand
q1_expand = get_noised_queries(queries1)
q2_expand = get_noised_queries(queries2)
l_expand = get_double_labels(labels)
return q1_expand, q2_expand, l_expand
def preprocess2(data: list, max_length: int, test_data: bool):
"""
입력을 받아서 딥러닝 모델이 학습 가능한 포맷으로 변경하는 함수입니다.
기본 제공 알고리즘은 char2vec이며, 기본 모델이 MLP이기 때문에, 입력 값의 크기를 모두 고정한 벡터를 리턴합니다.
문자열의 길이가 고정값보다 길면 긴 부분을 제거하고, 짧으면 0으로 채웁니다.
:param data: 문자열 리스트 ([문자열1, 문자열2, ...])
:param max_length: 문자열의 최대 길이
:return: 벡터 리스트 ([[0, 1, 5, 6], [5, 4, 10, 200], ...]) max_length가 4일 때
"""
query1 =[]
query2 =[]
for d in data:
q1,q2 = d.split('\t')
query1.append(q1)
query2.append(q2.replace('\n',''))
vectorized_data1 = [decompose_str_as_one_hot(datum, warning=False) for datum in query1]
vectorized_data2 = [decompose_str_as_one_hot(datum, warning=False) for datum in query2]
if test_data :
data_size = (len(data))
test_size = (int)(data_size * 0.03)
train_size = data_size - test_size
zero_padding1 = np.zeros((train_size, max_length), dtype=np.int32)
zero_padding2 = np.zeros((train_size, max_length), dtype=np.int32)
zero_padding1_test = np.zeros((test_size, max_length), dtype=np.int32)
zero_padding2_test = np.zeros((test_size, max_length), dtype=np.int32)
for idx, seq in enumerate(vectorized_data1):
if idx < train_size:
length = len(seq)
if length >= max_length:
length = max_length
zero_padding1[idx, :length] = np.array(seq)[:length]
else:
zero_padding1[idx, :length] = np.array(seq)
else:
length = len(seq)
if length >= max_length:
length = max_length
zero_padding1_test[idx - train_size, :length] = np.array(seq)[:length]
else:
zero_padding1_test[idx - train_size, :length] = np.array(seq)
for idx, seq in enumerate(vectorized_data2):
if idx < train_size:
length = len(seq)
if length >= max_length:
length = max_length
zero_padding2[idx, :length] = np.array(seq)[:length]
else:
zero_padding2[idx, :length] = np.array(seq)
else:
length = len(seq)
if length >= max_length:
length = max_length
zero_padding2_test[idx - train_size, :length] = np.array(seq)[:length]
else:
zero_padding2_test[idx - train_size, :length] = np.array(seq)
return zero_padding1,zero_padding2, zero_padding1_test,zero_padding2_test, train_size
else:
data_size = (len(data))
test_size = (int)(data_size * 0.03)
train_size = data_size - test_size
zero_padding1 = np.zeros((data_size, max_length), dtype=np.int32)
zero_padding2 = np.zeros((data_size, max_length), dtype=np.int32)
for idx, seq in enumerate(vectorized_data1):
length = len(seq)
if length >= max_length:
length = max_length
zero_padding1[idx, :length] = np.array(seq)[:length]
else:
zero_padding1[idx, :length] = np.array(seq)
for idx, seq in enumerate(vectorized_data2):
length = len(seq)
if length >= max_length:
length = max_length
zero_padding2[idx, :length] = np.array(seq)[:length]
else:
zero_padding2[idx, :length] = np.array(seq)
return zero_padding1, zero_padding2
def preprocess_origin(data: list, max_length: int,test_data: bool):
"""
입력을 받아서 딥러닝 모델이 학습 가능한 포맷으로 변경하는 함수입니다.
기본 제공 알고리즘은 char2vec이며, 기본 모델이 MLP이기 때문에, 입력 값의 크기를 모두 고정한 벡터를 리턴합니다.
문자열의 길이가 고정값보다 길면 긴 부분을 제거하고, 짧으면 0으로 채웁니다.
:param data: 문자열 리스트 ([문자열1, 문자열2, ...])
:param max_length: 문자열의 최대 길이
:return: 벡터 리스트 ([[0, 1, 5, 6], [5, 4, 10, 200], ...]) max_length가 4일 때
"""
vectorized_data = [decompose_str_as_one_hot(datum, warning=False) for datum in data]
if test_data :
data_size = (len(data))
test_size = (int)(data_size * 0.03)
train_size = data_size - test_size
zero_padding = np.zeros((train_size, max_length), dtype=np.int32)
zero_padding_test= np.zeros((test_size,max_length),dtype=np.int32)
for idx, seq in enumerate(vectorized_data):
if idx < train_size:
length = len(seq)
if length >= max_length:
length = max_length
zero_padding[idx, :length] = np.array(seq)[:length]
else:
zero_padding[idx, :length] = np.array(seq)
else:
length = len(seq)
if length >= max_length:
length = max_length
zero_padding_test[idx - train_size, :length] = np.array(seq)[:length]
else:
zero_padding_test[idx - train_size, :length] = np.array(seq)
return zero_padding, zero_padding_test, train_size
else:
zero_padding = np.zeros((len(data), max_length), dtype=np.int32)
for idx, seq in enumerate(vectorized_data):
length = len(seq)
if length >= max_length:
length = max_length
zero_padding[idx, :length] = np.array(seq)[:length]
else:
zero_padding[idx, :length] = np.array(seq)
return zero_padding
def preprocess(data: list, max_length: int):
q_words = []
vector_size = 16
tokenizer = RegexTokenizer()
for d in data:
q_words.append(tokenizer.tokenize(d))
model = Word2Vec(q_words,size=vector_size,window=2,min_count=2,iter=100,sg=1)
vocab_len = len(model.wv.vocab)
print("word2vec -> vector size :", vector_size)
print("word2vec -> vocab size :", vocab_len)
zero_padding = np.zeros((len(data), max_length, vector_size), dtype=np.float32)
for i,d in enumerate(data):
for j,wd in enumerate(tokenizer.tokenize(d)):
if j < max_length and wd in model.wv.vocab:
zero_padding[i,j] = model[wd]
zero_padding = np.expand_dims(zero_padding, axis=3)
return zero_padding
'''
def query2vec_concat(q1,q2):
zero_padding1 = np.zeros((len(q1), max_length , vector_size), dtype=np.float32)
for i,q in enumerate(q1):
idx = 0
for wq in (q):
if wq in model.wv.vocab:
if idx < max_length: # 0 ~ 49 is for query1 vertor
zero_padding1[i,idx] = np.array(model[wq])
idx += 1
zero_padding2 = np.zeros((len(q2), max_length , vector_size), dtype=np.float32)
for i,q in enumerate(q2):
idx = 0
for wq in (q):
if wq in model.wv.vocab:
if idx < max_length: # 50 ~ 99 is for query2 vertor
zero_padding2[i,idx] = np.array(model[wq])
idx += 1
return zero_padding1, zero_padding2
qvec1, qvec2 = query2vec_concat(query1,query2)
del model
print("Query vec :",qvec1.shape)
qvec1_expand = np.expand_dims(qvec1,axis=3)
qvec2_expand = np.expand_dims(qvec2,axis=3)
print("Query vec expand :",qvec1_expand.shape)
return qvec1_expand, qvec2_expand'''
def _batch_loader(iterable, n=1):
"""
데이터를 배치 사이즈만큼 잘라서 보내주는 함수입니다. PyTorch의 DataLoader와 같은 역할을 합니다
:param iterable: 데이터 list, 혹은 다른 포맷
:param n: 배치 사이즈
:return:
"""
length = len(iterable)
for n_idx in range(0, length, n):
yield iterable[n_idx:min(n_idx + n, length)]
#dataset = KinQueryDataset(LOCAL_DATASET_PATH,400) | StarcoderdataPython |
3339636 | # encoding: utf-8
# module PySide.QtGui
# from C:\Python27\lib\site-packages\PySide\QtGui.pyd
# by generator 1.147
# no doc
# imports
import PySide.QtCore as __PySide_QtCore
import Shiboken as __Shiboken
class QCompleter(__PySide_QtCore.QObject):
# no doc
def activated(self, *args, **kwargs): # real signature unknown
""" Signal """
pass
def caseSensitivity(self, *args, **kwargs): # real signature unknown
pass
def complete(self, *args, **kwargs): # real signature unknown
pass
def completionColumn(self, *args, **kwargs): # real signature unknown
pass
def completionCount(self, *args, **kwargs): # real signature unknown
pass
def completionMode(self, *args, **kwargs): # real signature unknown
pass
def completionModel(self, *args, **kwargs): # real signature unknown
pass
def completionPrefix(self, *args, **kwargs): # real signature unknown
pass
def completionRole(self, *args, **kwargs): # real signature unknown
pass
def currentCompletion(self, *args, **kwargs): # real signature unknown
pass
def currentIndex(self, *args, **kwargs): # real signature unknown
pass
def currentRow(self, *args, **kwargs): # real signature unknown
pass
def event(self, *args, **kwargs): # real signature unknown
pass
def eventFilter(self, *args, **kwargs): # real signature unknown
pass
def highlighted(self, *args, **kwargs): # real signature unknown
""" Signal """
pass
def maxVisibleItems(self, *args, **kwargs): # real signature unknown
pass
def model(self, *args, **kwargs): # real signature unknown
pass
def modelSorting(self, *args, **kwargs): # real signature unknown
pass
def pathFromIndex(self, *args, **kwargs): # real signature unknown
pass
def popup(self, *args, **kwargs): # real signature unknown
pass
def setCaseSensitivity(self, *args, **kwargs): # real signature unknown
pass
def setCompletionColumn(self, *args, **kwargs): # real signature unknown
pass
def setCompletionMode(self, *args, **kwargs): # real signature unknown
pass
def setCompletionPrefix(self, *args, **kwargs): # real signature unknown
pass
def setCompletionRole(self, *args, **kwargs): # real signature unknown
pass
def setCurrentRow(self, *args, **kwargs): # real signature unknown
pass
def setMaxVisibleItems(self, *args, **kwargs): # real signature unknown
pass
def setModel(self, *args, **kwargs): # real signature unknown
pass
def setModelSorting(self, *args, **kwargs): # real signature unknown
pass
def setPopup(self, *args, **kwargs): # real signature unknown
pass
def setWidget(self, *args, **kwargs): # real signature unknown
pass
def setWrapAround(self, *args, **kwargs): # real signature unknown
pass
def splitPath(self, *args, **kwargs): # real signature unknown
pass
def widget(self, *args, **kwargs): # real signature unknown
pass
def wrapAround(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
CaseInsensitivelySortedModel = PySide.QtGui.QCompleter.ModelSorting.CaseInsensitivelySortedModel
CaseSensitivelySortedModel = PySide.QtGui.QCompleter.ModelSorting.CaseSensitivelySortedModel
CompletionMode = None # (!) real value is "<type 'PySide.QtGui.QCompleter.CompletionMode'>"
InlineCompletion = PySide.QtGui.QCompleter.CompletionMode.InlineCompletion
ModelSorting = None # (!) real value is "<type 'PySide.QtGui.QCompleter.ModelSorting'>"
PopupCompletion = PySide.QtGui.QCompleter.CompletionMode.PopupCompletion
staticMetaObject = None # (!) real value is '<PySide.QtCore.QMetaObject object at 0x0000000003F5D588>'
UnfilteredPopupCompletion = PySide.QtGui.QCompleter.CompletionMode.UnfilteredPopupCompletion
UnsortedModel = PySide.QtGui.QCompleter.ModelSorting.UnsortedModel
| StarcoderdataPython |
1706341 | import glob
def get_call_in_pa(program, call_in_pa):
f = open(program+"/initial-all/complete");
f.readline()
line = f.readline()
calls = line.split(",")
for i in range(1, len(calls)-1):
call_in_pa.add(calls[i])
def get_call_info(program):
call_in_pa = set()
get_call_in_pa(program, call_in_pa)
#print call_in_pa
call_in_trace = set()
for fname in glob.glob(program+"/strace-translated/*.trace"):
with open(fname) as f:
for line in f:
if line.find("@") != -1:
call = line[0:line.find("\n")]
#print call
if call in call_in_pa:
call_in_trace.add( call )
else:
print call
print program
print "len of call_in_pa ", len(call_in_pa)
print "len of call_in_trace ", len(call_in_trace)
print call_in_trace, "\n"
def main():
# get_call_info("flex_v5")
# get_call_info("grep_v5")
# get_call_info("gzip_v5")
# get_call_info("sed_v7")
# get_call_info("bash_v6")
# get_call_info("vim_v7")
# get_call_info("proftpd")
get_call_info("nginx")
main()
| StarcoderdataPython |
60159 | """Base Class for a Solver. This class contains the different methods that
can be used to solve an environment/problem. There are methods for
mini-batch training, control, etc...
The idea is that this class will contain all the methods that the different
algorithms would need. Then we can simply call this class in the solver scripts
and use its methods.
I'm still torn between using a class or just using a script.
"""
from .evaluator import Evaluator
from .interrogator import Interrogator
import torch
class Solver(object):
"""This class makes absolute sense because there are many types of training
depending on the task. For this reason, in the future, this class can easily
include all instances of such training routines. Of course, transparent to
the user -which is the ultimate goal, complete transparency-.
"""
def __init__(self, slv_params):
print("Creating Solver")
self.env = slv_params['environment']
self.alg = slv_params['algorithm']
self.logger = slv_params['logger']
self.evaluator = Evaluator()
self.interrogator = Interrogator()
def forward(self):
self.interrogator.set_inference(self.alg.model, self.env)
def backward(self):
self.evaluator.evaluate(self.env, self.interrogator.inference)
feedback = (self.evaluator.score)
self.alg.step(feedback)
self.alg.print_state()
def save(self, path=''):
"""Only works with my algorithms, not with SGD."""
fn = path+"model_elite.pth"
torch.save(self.alg.model.state_dict(), fn)
def save_pool_weights(self, models, path):
for i, model in enumerate(models):
fn = path+"model_"+str(i)+".pth"
torch.save(model.state_dict(), fn)
def save_elite_weights(self, path, name=''):
if name == '':
name = "model_elite.pth"
else:
name = name+'.pth'
fn = path+name
torch.save(self.alg.model.state_dict(), fn)
def load(self, path, name="model_elite"):
"""Only works with my algorithms, not with SGD."""
fn = path+name+".pth"
print("Loading weights in: " + fn)
self.alg.model.load_state_dict(torch.load(fn))
self.alg.model.eval()
#
| StarcoderdataPython |
9700096 | <reponame>bigstepinc/metal_cloud_ansible_modules
#!/usr/bin/python
# Copyright: (c) 2018, Bigstep, inc
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'bigstep'
}
DOCUMENTATION = '''
---
module: metalcloud_infrastructure_deploy
short_description: deploy an infrastructure
version_added: "1.0"
description:
- deploys an infrastructure
- parameters match infrastructure_deploy function form the API
- https://api.bigstep.com/metal-cloud#infrastructure_deploy
options:
user:
description:
- username
required: true
api_key:
description:
- api key
required: true
api_endpoint:
description:
- endpoint
required: true
'''
EXAMPLES = '''
# Pass in a message
- name: deploy changes if needed.
metalcloud_infrastructure_deploy:
infrastructure_id: "{{infrastructure.infrastructure_id}}"
user: "{{metalcloud_user}}"
api_key: "{{metalcloud_api_key}}"
api_endpoint: "{{metalcloud_api_endpoint}}"
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from metal_cloud_sdk.clients.api import API
from jsonrpc2_base.plugins.client.signature_add import SignatureAdd
import time
def run_module():
# define available arguments/parameters a user can pass to the module
module_args = dict(
user=dict(type='str', required=True),
api_key=dict(type='str', required=True),
api_endpoint=dict(type='str', required=True),
infrastructure_id=dict(type='int', required=True),
shutdown_options=dict(type='dict', required=False, default=None),
deploy_options=dict(type='dict', required=False, default=None),
allow_data_loss=dict(type='bool', required=False, default=False),
skip_ansible=dict(type='bool', required=False, default=False),
wait_for_deploy=dict(type='bool', required=False, default=True),
wait_timeout=dict(type='int', required=False, default=60*60),
)
# seed the result dict in the object
# we primarily care about changed and state
# change is if this module effectively modified the target
# state will include any data that you want your module to pass back
# for consumption, for example, in a subsequent task
result = dict(
changed=False,
original_message='',
message=''
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=False
)
mc_client = API.getInstance(
{"strJSONRPCRouterURL": module.params['api_endpoint']},
[SignatureAdd(module.params['api_key'], {})]
)
keys=[k for k in module_args.keys() if k not in ['user','api_key','api_endpoint', 'wait_for_deploy','wait_timeout']]
params = dict(zip(keys, [module.params[k] for k in keys]))
obj = mc_client.infrastructure_get(module.params['infrastructure_id'])
#if there are pending operations
if(obj.infrastructure_operation.infrastructure_deploy_status=="not_started"):
mc_client.infrastructure_deploy(
module.params['infrastructure_id'],
module.params['shutdown_options'],
module.params['deploy_options'],
module.params['allow_data_loss'],
module.params['skip_ansible'])
if(module.params['wait_for_deploy']):
timeout = time.time() + module.params['wait_timeout'] # 5 minutes from now
while True:
obj = mc_client.infrastructure_get(module.params['infrastructure_id'])
if obj.infrastructure_operation.infrastructure_deploy_status=='finished':
break
if time.time() < timeout:
time.sleep(30)
else:
raise BaseException("Deploy ongoing for more than "+str(module.params['wait_timeout'])+" seconds")
result['changed'] = True
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
| StarcoderdataPython |
8078086 | r"""Test :py:mod:`lmp.util.dset` signature."""
import inspect
from inspect import Parameter, Signature
from typing import Optional
import lmp.util.dset
from lmp.dset import BaseDset
def test_module_function():
"""Ensure module function's signature."""
assert inspect.isfunction(lmp.util.dset.load)
assert inspect.signature(lmp.util.dset.load) == Signature(
parameters=[
Parameter(
name='dset_name',
kind=Parameter.POSITIONAL_OR_KEYWORD,
default=Parameter.empty,
annotation=str,
),
Parameter(
name='ver',
kind=Parameter.POSITIONAL_OR_KEYWORD,
default=None,
annotation=Optional[str],
),
],
return_annotation=BaseDset,
)
| StarcoderdataPython |
1933599 | # 1.You are given a dataset, which is present in the LMS, containing the number of hurricanes occurring in the
# United States along the coast of the Atlantic. Load the data from the dataset into your program and plot a
# Bar Graph of the data, taking the Year as the x-axis and the number of hurricanes occurring as the Y-axis.
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv("Hurricanes.csv")
x = df["Year"]
y = df["Hurricanes"]
plt.bar(x,y)
plt.xlabel("Year")
plt.ylabel("Hurricanes")
plt.grid()
plt.title("Hurricanes vs Year")
plt.setp(plt.gca().get_xticklabels(), rotation=90, horizontalalignment='right') # Rotate Axis Labels
plt.show() | StarcoderdataPython |
11322361 | <filename>qf_25_导入模块的语法.py
# 模块:在Python里一个py文件,就可以理解为以模块
# 不是所有的py文件都能作为一个模块来导入
# 如果想要让一个py文件能够被导入,模块名字必须要遵守命名规则
# Python为了方便我们开发,提供了很多内置模块
# 5种方式
import time # 1. 使用import 模块名称 直接导入一个模块
from random import randint # 2. from模块名import 函数名,导入一个模块里的方法成者变量
from math import * # 3. from 模块名improt *导入这个模块里的所有方法和变量
import datetime as dt # 4.导入一个模块并给这个模块起一个别名
from copy import deepcopy as dp # 5. from 模块名 import 函数名 as 别名
# --1-------------------------
# 导入这个模块以后,就可以使用这个模块里的方法和变量
print(time.time())
# --2-------------------------
x = randint(0, 2) # 生成[0,2]的随机整数
print(x)
# --3-------------------------
print(pi)
# --4-------------------------
print(dt.time)
# --5-------------------------
dp(['hello', 'good', [1, 2, 3], 'hi'])
# ------------------------------------------------
# 使用from <module_name> import *导入这个模块里"所有"的变量和函数
# 本质是读取模块里的__all__ 属性, 看这个属性里定义了哪些变量和函数
# 如果模块里没用定义__all__ 才会导入所有不以 _ 开头的变量和函数
from module.demo import *
# 使用from demo import * 写法 不需要写模块名
print(m) # yes
test()
# foo() # name 'foo' is not defined
# 不在__all__中也可以调用
import module.demo as demo
print(demo.n)
| StarcoderdataPython |
1751715 | <gh_stars>0
import sqlite3
from tkinter import *
from tkinter import ttk
from PIL import ImageTk,Image
from tkinter import messagebox
import sqlite3
def bookRegister():
bid = bookInfo1.get()
title = bookInfo2.get()
author = bookInfo3.get()
status =selected.get()
if bid =="" or title =="" or author =="" or status =="":
messagebox.showerror("Error","All fileds are required !")
else:
try:
con = sqlite3.connect("main.db")
cur = con.cursor()
cur.execute("""CREATE TABLE IF NOT EXISTS bookTable (book_id varchar(20) PRIMARY KEY,
book_title varchar(50),
author varchar(30),
status varchar(10))""")
cur.execute("insert into bookTable (book_id,book_title,author,status) values(?,?,?,?)",(bid,title,author,status))
con.commit()
messagebox.showinfo('Success',"Book added successfully")
cur.close()
con.close()
print(bid)
print(title)
print(author)
print(status)
except Exception as e:
print(e)
messagebox.showerror("Error","Can't add data into Database")
def addBook():
global bookInfo1, bookInfo2, bookInfo3, bookInfo4, Canvas1, con, cur, bookTable, root, selected
root = Tk()
selected = StringVar()
root.title("Library")
root.minsize(width=400,height=400)
root.geometry("600x500")
headingFrame1 = Frame(root,bd=5)
headingFrame1.place(relx=0.25,rely=0.1,relwidth=0.5,relheight=0.13)
headingLabel = Label(headingFrame1, text="Add Books", fg='green', font=('Courier',25,"bold"))
headingLabel.place(relx=0,rely=0, relwidth=1, relheight=1)
labelFrame = Frame(root,bg="navy")
labelFrame.place(relx=0.1,rely=0.2,relwidth=0.8,relheight=0.6)
# Book ID
lb1 = Label(labelFrame,text="Book ID : ", bg='navy', fg='white',font=("Arial",15))
lb1.place(relx=0.05,rely=0.2, relheight=0.08)
bookInfo1 = Entry(labelFrame,font=("Arial",15))
bookInfo1.place(relx=0.5,rely=0.2, relwidth=0.45,relheight=0.08)
# Title
lb2 = Label(labelFrame,text="Title : ", bg='navy', fg='white',font=("Arial",15))
lb2.place(relx=0.05,rely=0.35, relheight=0.08)
bookInfo2 = Entry(labelFrame,font=("Arial",15))
bookInfo2.place(relx=0.5,rely=0.35, relwidth=0.45, relheight=0.08)
# Book Author
lb3 = Label(labelFrame,text="Author : ", bg='navy', fg='white',font=("Arial",15))
lb3.place(relx=0.05,rely=0.50, relheight=0.08)
bookInfo3 = Entry(labelFrame,font=("Arial",15))
bookInfo3.place(relx=0.5,rely=0.50, relwidth=0.45, relheight=0.08)
# Book Status
lb4 = Label(labelFrame,text="Status : ", bg='navy', fg='white',font=("Arial",15))
lb4.place(relx=0.05,rely=0.65, relheight=0.08)
s = ttk.Style()
s.configure('Wild.TRadiobutton', # First argument is the name of style. Needs to end with: .TRadiobutton
background="navy", # Setting background to our specified color above
foreground = "white")
r1 = ttk.Radiobutton( text='Avaliable', value="avaliable", variable=selected,style = 'Wild.TRadiobutton')
r1.place(relx =.5, rely=0.6 ,relwidth=.17)
r2 = ttk.Radiobutton( text='Issued', value="issued", variable=selected,style = 'Wild.TRadiobutton')
r2.place(relx =.71, rely=0.6 ,relwidth=.17)
# r1 = ttk.Radiobutton( text='Male', value="male", variable=selected)
# r1.place(relx =.5, rely=0.6 ,relwidth=.17)
# r2 = ttk.Radiobutton( text='Female', value="female", variable=selected)
# r2.place(relx =.71, rely=0.6 ,relwidth=.17)
# bookInfo4 = Entry(labelFrame,font=("Arial",15))
# bookInfo4.place(relx=0.5,rely=0.65, relwidth=0.45, relheight=0.08)
#Submit Button
SubmitBtn = Button(root,text="SUBMIT",bg='#d1ccc0', fg='black',command=bookRegister)
SubmitBtn.place(relx=0.28,rely=0.9, relwidth=0.18,relheight=0.08)
quitBtn = Button(root,text="Quit",bg='#f7f1e3', fg='black', command=root.destroy)
quitBtn.place(relx=0.53,rely=0.9, relwidth=0.18,relheight=0.08)
root.mainloop()
if __name__ == '__main__':
addBook()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.