hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
097c50a96728dff3d3f2f66802f6917cbcd87b74
| 20,517
|
py
|
Python
|
scripts/all_to_all_analyzer.py
|
jweckstr/westmetro_scripts
|
a16385b00ac8d80f0068f348226ed89e2d0425a9
|
[
"MIT"
] | null | null | null |
scripts/all_to_all_analyzer.py
|
jweckstr/westmetro_scripts
|
a16385b00ac8d80f0068f348226ed89e2d0425a9
|
[
"MIT"
] | null | null | null |
scripts/all_to_all_analyzer.py
|
jweckstr/westmetro_scripts
|
a16385b00ac8d80f0068f348226ed89e2d0425a9
|
[
"MIT"
] | null | null | null |
import sqlite3
import pandas
import itertools
import networkx as nx
from gtfspy.gtfs import GTFS
from gtfspy.util import timeit
from scripts.all_to_all_settings import *
def attach_database(conn, other_db_path, name="other"):
cur = conn.cursor()
cur.execute("ATTACH '%s' AS '%s'" % (str(other_db_path), name))
cur.execute("PRAGMA database_list")
print("other database attached:", cur.fetchall())
return conn
"""
AllToAllDifferenceAnalyzer calculates the difference between various summary statistics of temporal distance and number
of boardings, stores the values in a database and handles calls to this database.
"""
def stops_to_exclude(return_sqlite_list=False):
gtfs_lm = GTFS(LM_DICT["gtfs_dir"])
areas_to_remove = gtfs_lm.execute_custom_query_pandas(
"SELECT * FROM stops WHERE CASE WHEN substr(stop_id,1, 5) = '__b__' THEN CAST(substr(stop_id,6, 1) AS integer) ELSE CAST(substr(stop_id,1, 1) AS integer) END >4")
if return_sqlite_list:
return "(" + ",".join([str(x) for x in areas_to_remove["stop_I"].tolist()]) + ")"
return areas_to_remove
class AllToAllDifferenceAnalyzer:
def __init__(self, gtfs_path, before_db_path, after_db_path, output_db):
self.gtfs = GTFS(gtfs_path)
print(output_db)
self._create_indecies(before_db_path)
self._create_indecies(after_db_path)
self.conn = sqlite3.connect(output_db)
self.conn = attach_database(self.conn, before_db_path, name="before")
self.conn = attach_database(self.conn, after_db_path, name="after")
def _create_indecies(self, db_path):
conn = sqlite3.connect(db_path)
cur = conn.cursor()
for table in ["journey_duration", "n_boardings", "temporal_distance"]:
query = """CREATE INDEX IF NOT EXISTS %s_from_stop_I_idx ON %s (from_stop_I);
CREATE INDEX IF NOT EXISTS %s_to_stop_I_idx ON %s (to_stop_I);""" % (table, table, table, table)
conn.commit()
def diff_table(self, groupby="to_stop_I", measure="temporal_distance", ignore_stops=None):
"""
Creates a table with the before-after difference of mean, min and max temporal distance or number of boardings
on a stop to stop basis
:return:
"""
cur = self.conn.cursor()
query = """DROP TABLE IF EXISTS diff_{groupby}_{measure}""".format(measure=measure, groupby=groupby)
cur.execute(query)
multiplier = 1
first = 0.5
second = 1
third = 1.5
threshold = 10800 # threshold for change in mean temporal distance
if measure == "temporal_distance" or "journey_duration":
multiplier = 60
first = 5
second = 10
third = 20
first_str = str(first).replace(".", "_")
second_str = str(second).replace(".", "_")
third_str = str(third).replace(".", "_")
if ignore_stops:
ignore_stops = " AND t1.to_stop_I NOT IN " + ignore_stops + " AND t1.from_stop_I NOT IN " + ignore_stops
else:
ignore_stops = ""
query = """CREATE TABLE IF NOT EXISTS diff_{groupby}_{measure} ({groupby} INT, min_diff_mean REAL, mean_diff_mean REAL,
max_diff_mean REAL, incr_count_over_{0} INT, incr_count_over_{1} INT, incr_count_over_{2} INT,
decr_count_over_{0} INT, decr_count_over_{1} INT, decr_count_over_{2} INT )
""".format(first_str, second_str, third_str,
measure=measure, groupby=groupby)
cur.execute(query)
query = """INSERT OR REPLACE INTO diff_{groupby}_{measure} ({groupby}, min_diff_mean, mean_diff_mean, max_diff_mean,
incr_count_over_{first_str}, incr_count_over_{second_str}, incr_count_over_{third_str},
decr_count_over_{first_str}, decr_count_over_{second_str}, decr_count_over_{third_str})
SELECT {groupby}, min(diff_mean) AS min_diff_mean, avg(diff_mean) AS mean_diff_mean,
max(diff_mean) AS max_diff_mean,
sum(CASE WHEN diff_mean >= {0}*{multiplier} THEN 1 ELSE 0 END) AS incr_count_over_{first_str},
sum(CASE WHEN diff_mean >= {1}*{multiplier} THEN 1 ELSE 0 END) AS incr_count_over_{second_str},
sum(CASE WHEN diff_mean >= {2}*{multiplier} THEN 1 ELSE 0 END) AS incr_count_over_{third_str},
sum(CASE WHEN diff_mean <= -{0}*{multiplier} THEN 1 ELSE 0 END) AS decr_count_over_{first_str},
sum(CASE WHEN diff_mean <= -{1}*{multiplier} THEN 1 ELSE 0 END) AS decr_count_over_{second_str},
sum(CASE WHEN diff_mean <= -{2}*{multiplier} THEN 1 ELSE 0 END) AS decr_count_over_{third_str}
FROM
(SELECT t1.from_stop_I AS from_stop_I, t1.to_stop_I AS to_stop_I, t2.mean-t1.mean AS diff_mean
FROM before.{measure} AS t1, after.{measure} AS t2
WHERE t1.from_stop_I = t2.from_stop_I AND t1.to_stop_I = t2.to_stop_I {ignore_stops}
AND abs(t2.mean-t1.mean) < {threshold}) q1
GROUP BY {groupby}""".format(first, second, third,
first_str=first_str, second_str=second_str, third_str=third_str,
measure=measure,
groupby=groupby, multiplier=multiplier, threshold=threshold,
ignore_stops=ignore_stops)
cur.execute(query)
self.conn.commit()
def get_mean_change_for_all_targets(self, groupby="to_stop_I", measure="temporal_distance", ignore_stops=None):
"""
Returns pre generated differences table as pandas DataFrame
:param groupby: "to_stop_I" or "from_stop_I" designating if calculating the measure to the target or from the target
:param measure: "temporal_distance", "n_boardings",
:return:
if ignore_stops:
ignore_stops = " WHERE " + groupby + " IN " + ignore_stops
else:
ignore_stops = ""
"""
query = """SELECT * FROM diff_{groupby}_{measure}""".format(measure=measure, groupby=groupby)
print("running query")
df = pandas.read_sql_query(query, self.conn)
df = self.gtfs.add_coordinates_to_df(df, stop_id_column=groupby, lat_name="lat", lon_name="lon")
if measure == "temporal_distance":
df["mean_diff_mean"] = df["mean_diff_mean"].apply(lambda x: x / 60)
return df
def extreme_change_od_pairs(self, threshold):
"""
Returns O-D pairs where the absolute change is larger than the threshold. Returns increase in travel time with
positive thresholds and decrease in travel time with negative thresholds
:param threshold: int
:return: Pandas DataFrame
"""
if threshold < 0:
string_to_add = " <= " + str(threshold)
else:
string_to_add = " >= " + str(threshold)
query = """SELECT t1.from_stop_I AS from_stop_I, t1.to_stop_I AS to_stop_I, t2.mean-t1.mean AS diff_mean
FROM before.temporal_distance AS t1, after.temporal_distance AS t2
WHERE t1.from_stop_I = t2.from_stop_I AND t1.to_stop_I = t2.to_stop_I
AND t2.mean-t1.mean %s AND t2.mean-t1.mean < 10800""" % (string_to_add,)
df = pandas.read_sql_query(query, self.conn)
return df
def get_global_mean_change(self, measure, threshold=10800, ignore_stops=False):
ignore_list = ""
if ignore_stops:
ignore_list=stops_to_exclude(return_sqlite_list=True)
query = """SELECT before_global_mean, after_global_mean, after_global_mean-before_global_mean AS global_mean_difference FROM
(SELECT avg(mean) AS before_global_mean FROM before.{measure} WHERE mean <= {threshold} AND mean >0
AND from_stop_I NOT IN {ignore_stops} AND to_stop_I NOT IN {ignore_stops}) t1,
(SELECT avg(mean) AS after_global_mean FROM after.{measure} WHERE mean <= {threshold} AND mean >0
AND from_stop_I NOT IN {ignore_stops} AND to_stop_I NOT IN {ignore_stops}) t2
""".format(measure=measure, threshold=threshold, ignore_stops=ignore_list)
df = pandas.read_sql_query(query, self.conn)
return df
@timeit
def get_rows_with_abs_change_greater_than_n(self, stops, measure, n, sign, unit="s"):
stops = ",".join([str(x) for x in stops])
divisors = {"s": 1, "m": 60, "h": 3600}
divisor = divisors[unit]
query = """SELECT t1.{measure}/{divisor} AS before_{measure}, t2.{measure}/{divisor} AS after_{measure},
(t2.{measure}-t1.{measure})/{divisor} AS diff_{measure} FROM before.temporal_distance AS t1,
after.temporal_distance AS t2
WHERE t1.from_stop_I != t1.to_stop_I AND t1.from_stop_I = t2.from_stop_I
AND t1.to_stop_I = t2.to_stop_I AND t1.from_stop_I NOT IN ({stops})
AND t2.to_stop_I NOT IN ({stops})
AND t2.{measure}-t1.{measure} {sign} {n}""".format(measure=measure,
divisor=divisor,
stops=stops,
n=n,
sign=sign)
df = pandas.read_sql_query(query, self.conn)
return df
@timeit
def get_rows_based_on_stop_list(self, from_stops, to_stops, measure, measure_mode, unit="s"):
"""
:param from_stops: list
:param to_stops: list
:param measure: string (mean, min, max, median)
:param unit: string
:param measure_mode: string
:return:
"""
assert measure_mode in ["n_boardings", "temporal_distance"]
from_stops = ",".join([str(x) for x in from_stops])
to_stops = ",".join([str(x) for x in to_stops])
divisors = {"s": 1, "m": 60, "h": 3600}
divisor = divisors[unit]
query = """SELECT t1.{measure}/{divisor} AS before_{measure}, t2.{measure}/{divisor} AS after_{measure},
(t2.{measure}-t1.{measure})/{divisor} AS diff_{measure} FROM before.{mode} AS t1,
after.{mode} AS t2
WHERE t1.from_stop_I != t1.to_stop_I AND t1.from_stop_I = t2.from_stop_I
AND t1.to_stop_I = t2.to_stop_I AND t1.from_stop_I IN ({from_stops})
AND t2.to_stop_I IN ({to_stops})""".format(measure=measure,
mode=measure_mode,
divisor=divisor,
from_stops=from_stops,
to_stops=to_stops)
df = pandas.read_sql_query(query, self.conn)
return df
def get_data_for_target(self, target, measure, direction="to", threshold=10800, unit="s", ignore_stops=False):
divisors = {"s": 1, "m": 60, "h": 3600}
divisor = divisors[unit]
ignore_list = ""
if ignore_stops:
ignore_list = stops_to_exclude(return_sqlite_list=True)
ignore_list = " AND t1.from_stop_I NOT IN {ignore_list} AND t1.to_stop_I NOT IN {ignore_list}".format(ignore_list=ignore_list)
query = """SELECT t1.from_stop_I, t1.to_stop_I, t1.mean/{divisor} AS before_mean, t2.mean/{divisor} AS after_mean,
(t2.mean-t1.mean)/{divisor} AS diff_mean, COALESCE((t2.mean/t1.mean)- 1, 0) AS diff_mean_relative
FROM before.{measure} t1, after.{measure} t2
WHERE t1.from_stop_I=t2.from_stop_I AND t1.to_stop_I=t2.to_stop_I AND t1.mean <= {threshold}
AND t2.mean <= {threshold}
AND t1.{direction}_stop_I={target} {ignore_list}""".format(measure=measure,
target=target,
direction=direction,
threshold=threshold,
divisor=divisor,
ignore_list=ignore_list)
df = pandas.read_sql_query(query, self.conn)
return df
def get_mean_change(self, measure, threshold=10800, descening_order=False, include_list=None):
if descening_order:
order_by = "DESC"
else:
order_by = "ASC"
include_list = "(" + ",".join([str(x) for x in include_list]) + ")"
query = """SELECT t1.to_stop_I, t2.mean AS before, t2.mean-t1.mean AS diff_mean FROM
(SELECT to_stop_I, avg(mean) AS mean FROM before.{measure}
WHERE mean <= {threshold} AND to_stop_I IN {include_list}
GROUP BY to_stop_I) t1,
(SELECT to_stop_I, avg(mean) AS mean FROM after.{measure}
WHERE mean <= {threshold} AND to_stop_I IN {include_list}
GROUP BY to_stop_I) t2
WHERE t1.to_stop_I=t2.to_stop_I
ORDER BY diff_mean {order_by}
""".format(measure=measure,
threshold=threshold,
order_by=order_by,
include_list=include_list)
df = pandas.read_sql_query(query, self.conn)
return df
def get_n_winning_targets_using_change_in_mean(self, n, measure, distance=500, threshold=10800, losers=False, include_list=None):
if losers:
order_by = "DESC"
else:
order_by = "ASC"
include_list = "(" + ",".join([str(x) for x in include_list]) + ")"
query = """SELECT t1.to_stop_I, t2.mean-t1.mean AS diff_mean FROM
(SELECT to_stop_I, avg(mean) AS mean FROM before.{measure}
WHERE mean <= {threshold} AND to_stop_I IN {include_list}
GROUP BY to_stop_I) t1,
(SELECT to_stop_I, avg(mean) AS mean FROM after.{measure}
WHERE mean <= {threshold} AND to_stop_I IN {include_list}
GROUP BY to_stop_I) t2
WHERE t1.to_stop_I=t2.to_stop_I
ORDER BY diff_mean {order_by}
""".format(measure=measure,
threshold=threshold,
order_by=order_by,
include_list=include_list)
df = pandas.read_sql_query(query, self.conn)
# exclude nearby stops
nearby_excluded_stops = []
stops_remaining = []
gtfs = GTFS(GTFS_PATH)
for value in df.itertuples():
if not value.to_stop_I in nearby_excluded_stops:
exclude_df = gtfs.get_stops_within_distance(value.to_stop_I, distance)
nearby_excluded_stops += list(exclude_df["stop_I"])
stops_remaining.append(value.to_stop_I)
if len(stops_remaining) == n:
break
df = df.loc[df['to_stop_I'].isin(stops_remaining)]
return df
def n_inf_stops_per_stop(self, measure, indicator, threshold, group_by="to_stop_I", routing="before"):
if group_by == "to_stop_I":
stop_I = "from_stop_I"
elif group_by == "from_stop_I":
stop_I = "to_stop_I"
else:
raise AssertionError("Group_by should be to_stop_I or from_stop_I")
query = """SELECT {group_by}, count(to_stop_I) AS N_stops FROM {routing}.{measure}
WHERE {indicator} >{threshold}
GROUP by {group_by} ORDER BY count(to_stop_I)""".format(measure=measure,
threshold=threshold,
indicator=indicator,
routing=routing,
group_by=group_by,
stop_I=stop_I)
df = pandas.read_sql_query(query, self.conn)
return df
def find_stops_where_all_indicators_are_finite(self, measure="temporal_distance", indicator="max", routing="after",
threshold=10800):
stops_to_ignore = []
ignore_statement = ""
while True:
query = """SELECT from_stop_I, count(to_stop_I) as invalid_connections FROM {routing}.{measure}
WHERE {indicator} >= {threshold} {ignore_statement} group by from_stop_I order by invalid_connections""".format(measure=measure,
indicator=indicator,
threshold=threshold,
routing=routing,
ignore_statement=ignore_statement)
df = pandas.read_sql_query(query, self.conn)
print("query has run, with {n} stops remaining".format(n=len(df.index)))
df['removal_column'] = df.index+df.invalid_connections
n_stops_in_iteration = len(df.index)
df_to_remove = df.loc[df['removal_column'] > n_stops_in_iteration]
print("{n} stops removed".format(n=len(df_to_remove.index)))
if len(df_to_remove.index) == 0:
break
stops_to_ignore += list(df_to_remove['from_stop_I'])
stops_to_ignore_str = ""
for stop in stops_to_ignore:
if not stops_to_ignore_str == "":
stops_to_ignore_str += ","
stops_to_ignore_str += str(stop)
# stops_to_ignore_str = ','.join(stops_to_ignore_str)
ignore_statement = "AND from_stop_I NOT IN ({stops_comma}) " \
"AND to_stop_I NOT IN ({stops_comma})".format(stops_comma=stops_to_ignore_str)
return list(df['from_stop_I']), stops_to_ignore
def find_stops_where_all_indicators_are_finite_using_network(self, measure="temporal_distance", indicator="max",
routing="after",
threshold=10800):
pass
"""
nodes = [x[0] for x in nodes]
edges = itertools.combinations(nodes, 2)
print("combinations")
G = nx.Graph()
G.add_edges_from(edges)
print("initial edges in place")
for row in df.iterrows():
G.remove_edge(row.from_stop_I, row.to_stop_I)
print("removing stuff")
"""
if __name__ == "__main__":
for time in TIMES:
a2aa = AllToAllDifferenceAnalyzer(GTFS_PATH, get_a2aa_db_path(time, "old"), get_a2aa_db_path(time, "lm"),
get_a2aa_db_path(time, "output"))
ignore_list = stops_to_exclude(return_sqlite_list=True)
a2aa.diff_table(groupby="to_stop_I", measure="n_boardings", ignore_stops=ignore_list)
a2aa.diff_table(groupby="from_stop_I", measure="n_boardings", ignore_stops=ignore_list)
a2aa.diff_table(groupby="to_stop_I", measure="temporal_distance", ignore_stops=ignore_list)
a2aa.diff_table(groupby="from_stop_I", measure="temporal_distance", ignore_stops=ignore_list)
#a2aa.diff_table(groupby="to_stop_I", measure="journey_duration", ignore_stops=ignore_list)
#a2aa.diff_table(groupby="from_stop_I", measure="journey_duration", ignore_stops=ignore_list)
| 53.569191
| 173
| 0.561096
| 2,521
| 20,517
| 4.267751
| 0.107497
| 0.049726
| 0.040338
| 0.013384
| 0.513059
| 0.447346
| 0.401803
| 0.374012
| 0.331443
| 0.322614
| 0
| 0.016798
| 0.344251
| 20,517
| 383
| 174
| 53.569191
| 0.78289
| 0.057318
| 0
| 0.326316
| 0
| 0.077193
| 0.393647
| 0.054255
| 0.007018
| 0
| 0
| 0
| 0.007018
| 1
| 0.05614
| false
| 0.003509
| 0.024561
| 0
| 0.129825
| 0.017544
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
097cf870cfdf8eb690e3cbf5e80ead9f28adc1b0
| 2,258
|
py
|
Python
|
tests/test_flow/test_snakemake_tutorial.py
|
flowsaber/flowsaber
|
7d68d085bbd9165d2bc0e0acd7826e70569c5fa3
|
[
"MIT"
] | 31
|
2021-05-08T06:35:07.000Z
|
2022-03-05T05:58:24.000Z
|
tests/test_flow/test_snakemake_tutorial.py
|
flowsaber/flowsaber
|
7d68d085bbd9165d2bc0e0acd7826e70569c5fa3
|
[
"MIT"
] | 3
|
2021-05-10T12:36:57.000Z
|
2021-05-15T14:01:15.000Z
|
tests/test_flow/test_snakemake_tutorial.py
|
zhqu1148980644/flowsaber
|
7d68d085bbd9165d2bc0e0acd7826e70569c5fa3
|
[
"MIT"
] | 1
|
2021-03-09T06:18:17.000Z
|
2021-03-09T06:18:17.000Z
|
from flowsaber.api import *
def test_snakemake_workflow():
# EnvTask is the real dependent task when using conda/image option
@shell
def bwa(self, fa: File, fastq: File): # input will be automatically converted if has type annotation
"""bwa mem -t {self.config.cpu} {fa} {fastq} | samtools view -Sb - > {fastq.stem}.bam"""
return "*.bam" # for ShellTask, str variable in the return will be treated as File and globed
@shell
def sort(bam: File): # self is optional in case you don't want to access the current task
"""samtools sort -o {sorted_bam} {bam}"""
sorted_bam = f"{bam.stem}.sorted.bam"
return sorted_bam
@shell(publish_dirs=["results/vcf"])
def call(fa: File, bams: list): # In case you need to write some python codes
"""samtools mpileup -g -f {fa} {bam_files} | bcftools call -mv - > all.vcf"""
bam_files = ' '.join(str(bam) for bam in bams)
return "all.vcf"
@task
def stats(vcf: File):
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from pysam import VariantFile
quals = [record.qual for record in VariantFile(str(vcf))]
plt.hist(quals)
plt.savefig("report.svg")
@flow
def call_vcf_flow():
"""Call vcf from fastq file.
Parameters
----------
fa: : str
The path of genome file
fastq: List[str]
list of fastq files
"""
def _call(bams): # task is normal function, use python as wish
return call(fa, bams)
context = flowsaber.context
fa = Channel.value(context.fa)
fastq = Channel.values(*context.fastq)
bam1 = bwa(fa, fastq) # automatically clone channel
bam2 = bwa(fa, fastq)
mix(bam1, bam2) | sort | collect | _call | stats
prefix = 'tests/test_flow/snamke-demo.nosync/data'
with flowsaber.context({
"fa": f'{prefix}/genome.fa',
"fastq": [f'{prefix}/samples/{sample}' for sample in ['A.fastq', 'B.fastq', 'C.fastq']]
}):
# resolve dependency
workflow = call_vcf_flow()
run(workflow)
if __name__ == "__main__":
test_snakemake_workflow()
pass
| 30.931507
| 105
| 0.59876
| 296
| 2,258
| 4.483108
| 0.452703
| 0.026375
| 0.03165
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002468
| 0.282108
| 2,258
| 72
| 106
| 31.361111
| 0.816163
| 0.320638
| 0
| 0.047619
| 0
| 0
| 0.121463
| 0.058661
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0.02381
| 0.095238
| 0.02381
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
098125b6bdbfea383598e527bbb70b034cf26260
| 1,324
|
py
|
Python
|
py_ad_1_4.py
|
aisolab/con-par-python
|
e74cb9c30acfdd78c12c9f7aba039d16ed1f7e78
|
[
"MIT"
] | 1
|
2022-02-20T03:14:50.000Z
|
2022-02-20T03:14:50.000Z
|
py_ad_1_4.py
|
aisolab/con-par-python
|
e74cb9c30acfdd78c12c9f7aba039d16ed1f7e78
|
[
"MIT"
] | null | null | null |
py_ad_1_4.py
|
aisolab/con-par-python
|
e74cb9c30acfdd78c12c9f7aba039d16ed1f7e78
|
[
"MIT"
] | null | null | null |
"""
Section 1
Multithreading - Thread (2) - Daemon, Join
Keyword - DaemonThread, Join
"""
"""
DaemonThread(데몬스레드)
(1). 백그라운드에서 실행
(2). 메인스레드 종료시 즉시 종료 (서브 스레드의 경우는 메인 스레드와 상관없이 자기 작업을 끝까지 수행함.)
(3). 주로 백그라운드 무한 대기 이벤트 발생 실행하는 부분 담당 -> JVM(가비지 컬렉션), 자동 저장
(4). 일반 스레드는 작업 종료시까지 실행
"""
import logging
import threading
# 스레드 실행 함수
def thread_func(name, d):
logging.info("Sub-Thread %s: starting", name)
for i in d:
print(name, i)
logging.info("Sub-Thread %s: finishing", name)
# 메인 영역
if __name__ == "__main__":
# Logging format 설정
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO, datefmt="%H:%M:%S")
logging.info("Main-Thread: before creating thread")
# 함수 인자 확인
# Daemon: Default False
x = threading.Thread(target=thread_func, args=("First", range(200)), daemon=True)
y = threading.Thread(target=thread_func, args=("Two", range(10)), daemon=False)
logging.info("Main-Thread: before running thread")
# 서브 스레드 시작
x.start()
y.start()
# DaemonThread 확인
print(x.isDaemon())
print(y.isDaemon())
# 주석 전후 결과 확인
# x.join() # 서브 스레드의 작업이 끝날 떄까지, 메인 스레드가 기다림.
# y.join()
logging.info("Main-Thread: wait for the thread to finish")
logging.info("Main-Thread: all done")
| 24.981132
| 85
| 0.632175
| 199
| 1,324
| 4.150754
| 0.552764
| 0.09322
| 0.072639
| 0.101695
| 0.200969
| 0.084746
| 0
| 0
| 0
| 0
| 0
| 0.010669
| 0.221299
| 1,324
| 52
| 86
| 25.461538
| 0.790495
| 0.179003
| 0
| 0
| 0
| 0
| 0.259132
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.1
| 0
| 0.15
| 0.15
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09821682a814779b24686f7214f05d5600259f1a
| 287
|
py
|
Python
|
listTest.py
|
diallog/GCPpy
|
dabd55ece1c12c1a390a228cd04cb7eb110e564b
|
[
"Unlicense"
] | null | null | null |
listTest.py
|
diallog/GCPpy
|
dabd55ece1c12c1a390a228cd04cb7eb110e564b
|
[
"Unlicense"
] | null | null | null |
listTest.py
|
diallog/GCPpy
|
dabd55ece1c12c1a390a228cd04cb7eb110e564b
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
# PURPOSE: studying function side effects
import os
os.system('clear')
orgList = [5, 3, 2, 1, 4]
def sumList(myList):
for i in range(1, len(myList)):
myList[i] += myList[i-1]
return myList[len(myList)-1]
print(sumList(orgList))
print(orgList)
| 16.882353
| 41
| 0.655052
| 45
| 287
| 4.177778
| 0.644444
| 0.095745
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 0.184669
| 287
| 16
| 42
| 17.9375
| 0.764957
| 0.212544
| 0
| 0
| 0
| 0
| 0.022321
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.333333
| 0.222222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09828a4b8ceea5e0df2ba0674a51b0b2f6523586
| 2,029
|
py
|
Python
|
class/pandas_class.py
|
Danigore25/python2
|
de6d582fcc35107aa21a1bd73fdf04a0d4209d31
|
[
"MIT"
] | null | null | null |
class/pandas_class.py
|
Danigore25/python2
|
de6d582fcc35107aa21a1bd73fdf04a0d4209d31
|
[
"MIT"
] | null | null | null |
class/pandas_class.py
|
Danigore25/python2
|
de6d582fcc35107aa21a1bd73fdf04a0d4209d31
|
[
"MIT"
] | 2
|
2021-09-07T00:30:49.000Z
|
2021-10-19T15:14:54.000Z
|
import pandas as pd
import numpy as np
serie = pd.Series(['a', 'b', 'c', 'd', 'e'],
index=['a', 'b', 'c', 'd', 'e'],
name="Ejemplo Serie")
print(serie)
ecoli_matraz = pd.Series([0.1, 0.15, 0.19, 0.5,
0.9, 1.4, 1.8, 2.1, 2.3],
index=['t1', 't2', 't3', 't4',
't5', 't6', 't7', 't8', 't9'],
name='Matraz')
print(ecoli_matraz)
ODs = pd.Series([0.2, 0.2, 0.4, 0.1, 0.2, 0.1, 0.2, 0.4, 0.1],
index=[8, 4, 1, 2, 3, 0, 5, 7, 6],
name='Ajustes')
# EJERCICIO 1 ----------------------------------------------------------------------
produccion = pd.Series([5, 11, 4, 7, 2], index=['gen1', 'gen2', 'gen3', 'gen4', 'gen5'])
costos = pd.Series([5, 4.3, 7, 3.5], index=['gen1', 'gen2', 'gen3', 'gen5'])
costo_unitario = costos/produccion.T
print(costo_unitario)
print(costo_unitario.min())
# -----------------------------------------------------
nan_test = pd.Series([0.1, None, 2.1, 2.3], name='Matraz')
print(nan_test.count())
# loc y iloc
series_test = pd.Series([5.1, 2.2, 1.1, 3.1, 4.2], index=[5, 2, 1, 3, 4])
print(series_test)
print(series_test.loc[1])
print(series_test.iloc[1])
# EJERCICIO 2 ------------------------------------------------------------------
bool_min = costo_unitario == costo_unitario.min()
bool_max = costo_unitario == costo_unitario.max()
print(costo_unitario[bool_min | bool_max])
# Repetir índices
regulon = pd.Series(['aidB', 'alaS', 'accB', 'accC', 'bhsA'], index=['AidB', 'AlaS', 'AccB', 'AccB', 'ComR'],
name='Genes regulados')
print(regulon.loc['AccB'])
print(regulon.loc['AidB'])
# Clases en series
class Mamifero:
vertebrado = True
def haz_ruido(self):
print('aaaaaaaaaaaaaaaaaaaaaaaaaaa')
array_clase = pd.Series([np.sum, 'a', Mamifero], name='objetos')
jerbo = array_clase.iloc[2]
print(jerbo.haz_ruido())
| 30.283582
| 110
| 0.485461
| 274
| 2,029
| 3.507299
| 0.332117
| 0.074922
| 0.012487
| 0.008325
| 0.027055
| 0.012487
| 0
| 0
| 0
| 0
| 0
| 0.066537
| 0.237063
| 2,029
| 66
| 111
| 30.742424
| 0.554264
| 0.127649
| 0
| 0
| 0
| 0
| 0.114319
| 0.01591
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025
| false
| 0
| 0.05
| 0
| 0.125
| 0.325
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0986c2b4d466c529bcf1de02d35647e1f00797b3
| 6,209
|
py
|
Python
|
scripts/datasets/mit67_install.py
|
cclauss/archai
|
a5fb8f937f7f1319e3204120803b2a045e9f768b
|
[
"MIT"
] | 344
|
2020-06-12T22:12:56.000Z
|
2022-03-29T06:48:20.000Z
|
scripts/datasets/mit67_install.py
|
cclauss/archai
|
a5fb8f937f7f1319e3204120803b2a045e9f768b
|
[
"MIT"
] | 29
|
2020-06-13T19:56:49.000Z
|
2022-03-30T20:26:48.000Z
|
scripts/datasets/mit67_install.py
|
cclauss/archai
|
a5fb8f937f7f1319e3204120803b2a045e9f768b
|
[
"MIT"
] | 68
|
2020-06-12T19:32:43.000Z
|
2022-03-05T06:58:40.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
""" Script to prepare mit67 dataset for pytorch dataloader.
"""
from typing import List, Dict, Tuple, Union, Optional
import os
import pdb
import time
import argparse
import os
import tempfile
import requests
from torchvision.datasets.utils import download_and_extract_archive, download_url
from torch.utils.model_zoo import tqdm
from PIL import Image
import shutil
from collections import defaultdict
import pathlib
from archai.common import utils
def check_mit67(dataroot: str) -> bool:
mit67 = os.path.join(dataroot, 'mit67')
train = os.path.join(mit67, 'train')
test = os.path.join(mit67, 'test')
meta = os.path.join(mit67, 'meta')
if not os.path.isdir(mit67) or not os.path.isdir(train) or not os.path.isdir(test) or not os.path.isdir(meta):
return False
num_train_files = 0
for base, dirs, files in os.walk(train):
for file in files:
num_train_files += 1
if num_train_files != 12466:
return False
num_test_files = 0
for base, dirs, files in os.walk(test):
for file in files:
num_test_files += 1
if num_test_files != 3153:
return False
# all checks passed
return True
def download(dataroot: str):
DOWNLOAD_URL = 'http://groups.csail.mit.edu/vision/LabelMe/NewImages/indoorCVPR_09.tar'
with tempfile.TemporaryDirectory() as tempdir:
download_and_extract_archive(
DOWNLOAD_URL, tempdir, extract_root=dataroot, remove_finished=True)
def load_test_csv_data(filename: str) -> Dict[str, List[str]]:
''' Loads the data in csv files into a dictionary with
class names as keys and list of image names as values. Works only for test data csv'''
data_dict = defaultdict(list)
with open(filename, 'r') as f:
lines = f.readlines()
assert len(lines) > 0
for line in lines[1:]:
words = line.rstrip().split(',')
assert len(words) > 0
data_dict[words[0]] = words[1:]
return data_dict
def load_train_csv_data(filename: str) -> Dict[str, List[str]]:
''' Loads the data in csv files into a dictionary with
class names as keys and list of image names as values. Works only for train data csv '''
data_dict = defaultdict(list)
with open(filename, 'r') as f:
lines = f.readlines()
assert len(lines) > 0
for line in lines[1:]:
words = line.rstrip().split(',')
assert len(words) > 0
data_dict[words[1]] = words[2:]
return data_dict
def copy_data_helper(data: Dict[str, List[str]], imagesroot: str, foldername: str) -> None:
for key in data.keys():
images = data[key]
for im in images:
if not im:
continue
source = os.path.join(imagesroot, key, im)
target = os.path.join(foldername, key, im)
if not os.path.isfile(target):
utils.copy_file(source, target)
def prepare_data(mit67_root: str):
test_file = os.path.join(mit67_root, 'meta', 'MIT67_test.csv')
test_data = load_test_csv_data(test_file)
# train data is split into 4 files for some reason
train1_file = os.path.join(mit67_root, 'meta', 'MIT67_train1.csv')
train2_file = os.path.join(mit67_root, 'meta', 'MIT67_train2.csv')
train3_file = os.path.join(mit67_root, 'meta', 'MIT67_train3.csv')
train4_file = os.path.join(mit67_root, 'meta', 'MIT67_train4.csv')
train_files = [train1_file, train2_file, train3_file, train4_file]
train_data = defaultdict(list)
for tf in train_files:
this_data = load_train_csv_data(tf)
train_data.update(this_data)
# make classname directories for train and test
for key in test_data.keys():
os.makedirs(os.path.join(mit67_root, 'test', key), exist_ok=True)
os.makedirs(os.path.join(mit67_root, 'train', key), exist_ok=True)
# copy images to the right locations
imagesroot = os.path.join(mit67_root, 'Images')
testfoldername = os.path.join(mit67_root, 'test')
copy_data_helper(test_data, imagesroot, testfoldername)
trainfoldername = os.path.join(mit67_root, 'train')
copy_data_helper(train_data, imagesroot, trainfoldername)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataroot', type=str, default='C:\\Users\\dedey\\dataroot',
help='root directory where mit67 folder is intended to exist. If mit67 already exists in the format required this script will skip downloading')
args = parser.parse_args()
# check that dataset is in format required
# else download and prepare dataset
if not check_mit67(args.dataroot):
# make mit67 directory
mit67 = os.path.join(args.dataroot, 'mit67')
train = os.path.join(mit67, 'train')
test = os.path.join(mit67, 'test')
meta = os.path.join(mit67, 'meta')
os.makedirs(mit67, exist_ok=True)
os.makedirs(train, exist_ok=True)
os.makedirs(test, exist_ok=True)
os.makedirs(meta, exist_ok=True)
# this step will create folder mit67/Images
# which has all the images for each class in its own subfolder
download(mit67)
# download the csv files for the train and test split
# from 'NAS Evaluation is Frustrating' repo
# note that download_url doesn't work in vscode debug mode
test_file_url = 'https://raw.githubusercontent.com/antoyang/NAS-Benchmark/master/data/MIT67_test.csv'
train_file_urls = ['https://raw.githubusercontent.com/antoyang/NAS-Benchmark/master/data/MIT67_train1.csv', 'https://raw.githubusercontent.com/antoyang/NAS-Benchmark/master/data/MIT67_train2.csv',
'https://raw.githubusercontent.com/antoyang/NAS-Benchmark/master/data/MIT67_train3.csv', 'https://raw.githubusercontent.com/antoyang/NAS-Benchmark/master/data/MIT67_train4.csv']
download_url(test_file_url, meta, filename=None, md5=None)
for tu in train_file_urls:
download_url(tu, meta, filename=None, md5=None)
prepare_data(mit67)
| 35.683908
| 204
| 0.671445
| 876
| 6,209
| 4.619863
| 0.244292
| 0.037064
| 0.049419
| 0.059303
| 0.410427
| 0.355819
| 0.324191
| 0.309859
| 0.270324
| 0.255498
| 0
| 0.026562
| 0.223869
| 6,209
| 173
| 205
| 35.890173
| 0.813239
| 0.144146
| 0
| 0.258929
| 0
| 0.053571
| 0.158354
| 0.004931
| 0
| 0
| 0
| 0
| 0.035714
| 1
| 0.053571
| false
| 0
| 0.133929
| 0
| 0.241071
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09887c8ffc4485168a4cf1dc2d552eb82e642cda
| 713
|
py
|
Python
|
src/python/T0/WMBS/Oracle/RunConfig/InsertRecoReleaseConfig.py
|
silviodonato/T0
|
a093729d08b31175ed35cd20e889bd7094ce152a
|
[
"Apache-2.0"
] | 6
|
2016-03-09T14:36:19.000Z
|
2021-07-27T01:28:00.000Z
|
src/python/T0/WMBS/Oracle/RunConfig/InsertRecoReleaseConfig.py
|
silviodonato/T0
|
a093729d08b31175ed35cd20e889bd7094ce152a
|
[
"Apache-2.0"
] | 193
|
2015-01-07T21:03:43.000Z
|
2022-03-31T12:22:18.000Z
|
src/python/T0/WMBS/Oracle/RunConfig/InsertRecoReleaseConfig.py
|
silviodonato/T0
|
a093729d08b31175ed35cd20e889bd7094ce152a
|
[
"Apache-2.0"
] | 36
|
2015-01-28T19:01:54.000Z
|
2021-12-15T17:18:20.000Z
|
"""
_InsertRecoReleaseConfig_
Oracle implementation of InsertRecoReleaseConfig
"""
from WMCore.Database.DBFormatter import DBFormatter
class InsertRecoReleaseConfig(DBFormatter):
def execute(self, binds, conn = None, transaction = False):
sql = """INSERT INTO reco_release_config
(RUN_ID, PRIMDS_ID, FILESET, DELAY, DELAY_OFFSET)
VALUES (:RUN,
(SELECT id FROM primary_dataset WHERE name = :PRIMDS),
:FILESET,
0,
0)
"""
self.dbi.processData(sql, binds, conn = conn,
transaction = transaction)
return
| 28.52
| 79
| 0.548387
| 59
| 713
| 6.491525
| 0.677966
| 0.046997
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004505
| 0.377279
| 713
| 24
| 80
| 29.708333
| 0.858108
| 0.105189
| 0
| 0
| 0
| 0
| 0.505564
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0988ffb2a91dd9ac6ea127ee5939338c9d7b530e
| 1,652
|
py
|
Python
|
split_wav.py
|
tanacchi/sound-dataset-generator
|
a74363c35652dbb7e7cb2dfd390cf89302f3827e
|
[
"MIT"
] | 1
|
2020-12-02T02:31:33.000Z
|
2020-12-02T02:31:33.000Z
|
split_wav.py
|
tanacchi/sound_dataset_generator
|
a74363c35652dbb7e7cb2dfd390cf89302f3827e
|
[
"MIT"
] | null | null | null |
split_wav.py
|
tanacchi/sound_dataset_generator
|
a74363c35652dbb7e7cb2dfd390cf89302f3827e
|
[
"MIT"
] | null | null | null |
import wave
import os
import sys
from glob import glob
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--length", type=int, default=30)
parser.add_argument("--offset", type=int, default=15)
args = parser.parse_args()
unit_time_length = args.length
start_time_offset = args.offset
output_dir = os.path.join(".", "output")
os.makedirs(output_dir, exist_ok=True)
downloads_dir = os.path.join(".", "downloads")
target_files = glob(os.path.join(downloads_dir, "*.wav"))
for base_filepath in target_files:
base_filename = os.path.basename(base_filepath)
print(f"Processing for {base_filename}...")
params = None
data_raw = None
with wave.open(base_filepath, "rb") as wave_read:
params = wave_read.getparams()
data_raw = wave_read.readframes(params.nframes)
wave_read.close()
unit_nframes = unit_time_length * params.framerate * params.nchannels * params.sampwidth
start_frame_offset = start_time_offset * params.framerate * params.nchannels * params.sampwidth
file_count = 0
for t in range(0, len(data_raw), start_frame_offset):
file_count += 1
picked_data = data_raw[t:t+unit_nframes]
output_filename = os.path.join(output_dir, f"{base_filename}_{file_count:09}.wav")
with wave.open(output_filename, "wb") as wave_write:
wave_write.setparams((
params.nchannels, params.sampwidth, params.framerate,
len(picked_data), params.comptype, params.compname
))
wave_write.writeframes(picked_data)
wave_write.close()
# os.remove(base_filepath)
print("Done.")
| 33.714286
| 99
| 0.696126
| 221
| 1,652
| 4.963801
| 0.352941
| 0.027347
| 0.036463
| 0.082042
| 0.082042
| 0.082042
| 0
| 0
| 0
| 0
| 0
| 0.006711
| 0.188257
| 1,652
| 48
| 100
| 34.416667
| 0.811335
| 0.014528
| 0
| 0
| 0
| 0
| 0.070726
| 0.021525
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.128205
| 0
| 0.128205
| 0.051282
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
098a8775723a6e3a315440de72e96cd1befcdb31
| 2,454
|
py
|
Python
|
ex075A.py
|
gabrieleliasdev/python-cev
|
45390963b5112a982e673f6a6866da422bf9ae6d
|
[
"MIT"
] | null | null | null |
ex075A.py
|
gabrieleliasdev/python-cev
|
45390963b5112a982e673f6a6866da422bf9ae6d
|
[
"MIT"
] | null | null | null |
ex075A.py
|
gabrieleliasdev/python-cev
|
45390963b5112a982e673f6a6866da422bf9ae6d
|
[
"MIT"
] | null | null | null |
from tkinter import *
janela = Tk()
lista = []
texto1 = StringVar()
texto2 = StringVar()
texto3 = StringVar()
texto4 = StringVar()
#--------------------- PROCESSAMENTO DO COMANDO ------
def click_bt1():
lista.append(int(et1.get()))
lista.append(int(et2.get()))
lista.append(int(et3.get()))
lista.append(int(et4.get()))
txt1.delete(0.0,'end')
for i in lista:
if i % 2 == 0:
txt1.insert(0.0, f'{i} ')
txt1.insert(0.0, '- Os valores pares digitados foram: ')
if 3 in lista:
txt1.insert(0.0, f'- O número 3 apareceu na {lista.index(3)+1} posição.\n')
else:
txt1.insert(0.0,'- O número 3 não apareceu na lista.\n')
txt1.insert(0.0, f'- Você digitou o número "9" {lista.count(9)} vezes.\n')
texto1.set(str(''))
texto2.set(str(''))
texto3.set(str(''))
texto4.set(str(''))
print(lista)
#------------------------------------------------------
#---------------INSERÇÃO DOS WIDGETS ---------------
lb1 = Label(janela, text='Digite o primeiro número: ')
lb1.grid(row=0,column=0, stick=W)
lb2 = Label(janela, text='Digite o segundo número: ')
lb2.grid(row=1,column=0, stick=W)
lb3 = Label(janela, text='Digite o terceiro número: ')
lb3.grid(row=2,column=0, stick=W)
lb4 = Label(janela, text='Digite o quarto número: ')
lb4.grid(row=3,column=0, stick=W)
et1 = Entry(janela, textvariable=texto1, width=5)
et1.grid(row=0,column=1,sticky=E)
et2 = Entry(janela, textvariable=texto2, width=5)
et2.grid(row=1,column=1,sticky=E)
et3 = Entry(janela, textvariable=texto3, width=5)
et3.grid(row=2,column=1,sticky=E)
et4 = Entry(janela, textvariable=texto4, width=5)
et4.grid(row=3,column=1,sticky=E)
bt1 = Button(janela,text='PROCESSAR', font=('arialblack',11,'bold'),command=click_bt1)
bt1.grid(row=0,column=2,rowspan=4)
txt1 = Text(janela,width=40,height=10,bd=5)
txt1.grid(row=5,column=0,columnspan=3)
#----------------------------------------------------------------------
#------------------- DIMENSIONAMENTO E CENTRALIZAÇÃO DA JANELA --------
janela.title('Exercicio - Ex075')
janela_width = 330
janela_height = 260
scream_width = janela.winfo_screenwidth()
scream_height = janela.winfo_screenheight()
cord_x = int((scream_width/2) - (janela_width/2))
cord_y = int((scream_height/2) - (janela_height/2))
janela.geometry(f'{janela_width}x{janela_height}+{cord_x}+{cord_y}')
#---------------------------------------------------------------------
janela.mainloop()
| 31.87013
| 86
| 0.601059
| 351
| 2,454
| 4.150997
| 0.327635
| 0.048044
| 0.037749
| 0.041181
| 0.087165
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051893
| 0.128362
| 2,454
| 77
| 87
| 31.87013
| 0.629266
| 0.149552
| 0
| 0
| 0
| 0
| 0.180596
| 0.023055
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017857
| false
| 0
| 0.017857
| 0
| 0.035714
| 0.017857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
098d7c6c55f7415535fddaa88a483e5bc3bc96a3
| 650
|
py
|
Python
|
Python/[4 kyu] Sum of Intervals.py
|
KonstantinosAng/CodeWars
|
9ec9da9ed95b47b9656a5ecf77f486230fd15e3a
|
[
"MIT"
] | null | null | null |
Python/[4 kyu] Sum of Intervals.py
|
KonstantinosAng/CodeWars
|
9ec9da9ed95b47b9656a5ecf77f486230fd15e3a
|
[
"MIT"
] | null | null | null |
Python/[4 kyu] Sum of Intervals.py
|
KonstantinosAng/CodeWars
|
9ec9da9ed95b47b9656a5ecf77f486230fd15e3a
|
[
"MIT"
] | null | null | null |
# More details on this kata
# https://www.codewars.com/kata/52b7ed099cdc285c300001cd
def sum_of_intervals(intervals):
s, ret = [list(x) for x in sorted(intervals)], 0
if len(s) == 1: return abs(s[0][0] - s[0][1])
for i in range(len(s)):
if i + 1 > len(s) - 1: break
if s[i][0] <= s[i + 1][0] <= s[i][1]:
if i + 1 > len(s) - 1: break
while s[i][0] <= s[i + 1][0] <= s[i][1]:
if s[i][1] <= s[i + 1][1]:
s[i][1] = s[i + 1][1]
del s[i + 1]
if i + 1 > len(s) - 1: break
for i in s:
ret += abs(i[0] - i[1])
return ret
| 32.5
| 56
| 0.432308
| 118
| 650
| 2.364407
| 0.271186
| 0.09319
| 0.096774
| 0.057348
| 0.290323
| 0.290323
| 0.290323
| 0.189964
| 0.189964
| 0.189964
| 0
| 0.106796
| 0.366154
| 650
| 19
| 57
| 34.210526
| 0.570388
| 0.123077
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0990bfe14e23c72b11bf2defe5e3302294dbdd91
| 11,197
|
py
|
Python
|
unit_list.py
|
guliverza/AdditionalPylons
|
37336dcd1678c6cdfa22d881c2178ba65cb1fd61
|
[
"MIT"
] | null | null | null |
unit_list.py
|
guliverza/AdditionalPylons
|
37336dcd1678c6cdfa22d881c2178ba65cb1fd61
|
[
"MIT"
] | null | null | null |
unit_list.py
|
guliverza/AdditionalPylons
|
37336dcd1678c6cdfa22d881c2178ba65cb1fd61
|
[
"MIT"
] | null | null | null |
import sc2
from sc2.constants import *
#our own classes
from unit_counters import UnitCounter
from warpprism import WarpPrism as wpControl
from immortal import Immortal as imControl
from stalker import Stalker as skControl
from zealot import Zealot as zlControl
from sentry import Sentry as snControl
from adept import Adept as adControl
from colossus import Colossus as coControl
from voidray import VoidRay as vrControl
from tempest import Tempest as tpControl
from phoenix import Phoenix as pxControl
from probe import Probe as pbControl
from shade import Shade as sdControl
from hightemplar import HighTemplar as htControl
from observer import Observer as obControl
from disruptor import Disruptor as dsControl
from disruptor_phased import DisruptorPhased as dpControl
from carrier import Carrier as crControl
from mothership import Mothership as msControl
from archon import Archon as arControl
from cannon import Cannon as cnControl
class UnitList():
def __init__(self):
self.unit_objects = {}
self.unitCounter = UnitCounter()
def make_decisions(self, game):
self.game = game
self.update_units()
for unit in self.game.units():
obj = self.unit_objects.get(unit.tag)
if obj:
obj.make_decision(self.game, unit)
def update_units(self):
for unit in self.game.units():
obj = self.unit_objects.get(unit.tag)
if obj:
obj.unit = unit
def getObjectByTag(self, unit_tag):
if self.unit_objects.get(unit_tag):
return self.unit_objects.get(unit_tag)
return None
def remove_object(self, unit_tag):
if self.unit_objects.get(unit_tag):
unit_obj = self.unit_objects.get(unit_tag)
#check to see if it's a probe, if so remove it from gathering.
if unit_obj.unit.name == 'Probe':
unit_obj.removeGatherer()
if unit_obj.unit.name == 'DisruptorPhased':
unit_obj.clearMines()
unit_obj.clearLurkers()
#check to see if it's our probe scout, if so create another.
# if unit_obj.unit.name == 'Probe' and unit_obj.scout:
# #was a scout, create a new one.
# self.assignScout()
del self.unit_objects[unit_tag]
def load_object(self, unit):
#print ('Unit Created:', unit.name, unit.tag)
#check to see if an object already exists for this tag
if self.getObjectByTag(unit.tag):
return
if unit.name == 'WarpPrism':
obj = wpControl(unit)
self.unit_objects.update({unit.tag:obj})
elif unit.name == 'Immortal':
obj = imControl(unit)
self.unit_objects.update({unit.tag:obj})
elif unit.name == 'Stalker':
obj = skControl(unit)
self.unit_objects.update({unit.tag:obj})
elif unit.name == 'Zealot':
obj = zlControl(unit)
self.unit_objects.update({unit.tag:obj})
elif unit.name == 'Sentry':
obj = snControl(unit)
self.unit_objects.update({unit.tag:obj})
elif unit.name == 'Adept':
obj = adControl(unit)
self.unit_objects.update({unit.tag:obj})
elif unit.name == 'Colossus':
obj = coControl(unit)
self.unit_objects.update({unit.tag:obj})
elif unit.name == 'VoidRay':
obj = vrControl(unit)
self.unit_objects.update({unit.tag:obj})
elif unit.name == 'Phoenix':
obj = pxControl(unit)
self.unit_objects.update({unit.tag:obj})
elif unit.name == 'Probe':
obj = pbControl(unit)
self.unit_objects.update({unit.tag:obj})
elif unit.name == 'Tempest':
obj = tpControl(unit)
self.unit_objects.update({unit.tag:obj})
elif unit.name == 'AdeptPhaseShift':
obj = sdControl(unit)
self.unit_objects.update({unit.tag:obj})
elif unit.name == 'HighTemplar':
obj = htControl(unit)
self.unit_objects.update({unit.tag:obj})
elif unit.name == 'Observer':
obj = obControl(unit)
self.unit_objects.update({unit.tag:obj})
elif unit.name == 'Disruptor':
obj = dsControl(unit)
self.unit_objects.update({unit.tag:obj})
elif unit.name == 'DisruptorPhased':
obj = dpControl(unit)
self.unit_objects.update({unit.tag:obj})
elif unit.name == 'Carrier':
obj = crControl(unit)
self.unit_objects.update({unit.tag:obj})
elif unit.name == 'Mothership':
obj = msControl(unit)
self.unit_objects.update({unit.tag:obj})
elif unit.name == 'Archon':
obj = arControl(unit)
self.unit_objects.update({unit.tag:obj})
elif unit.name == 'PhotonCannon':
obj = cnControl(unit)
self.unit_objects.update({unit.tag:obj})
# else:
# print ('Unit Created:', unit.name, unit.tag)
def unitPosition(self, ownerUnit):
if self.unit_objects.get(ownerUnit.tag):
unit_obj = self.unit_objects.get(ownerUnit.tag)
return unit_obj.saved_position
return None
def phaseTargets(self):
phaseList = {k : v for k,v in self.unit_objects.items() if v.unit.name == 'DisruptorPhased' }
targets = []
for key, phase in phaseList.items():
targets.append(phase.currentTarget)
return targets
def adeptChaseTarget(self, ownerUnit):
#get the object by the unit_tag.
if self.unit_objects.get(ownerUnit.tag):
unit_obj = self.unit_objects.get(ownerUnit.tag)
return unit_obj.chasePosition
return None
def unitDamaged(self, ownerUnit):
if self.unit_objects.get(ownerUnit.tag):
unit_obj = self.unit_objects.get(ownerUnit.tag)
return unit_obj.wasDamaged
return False
def unitHomeTarget(self, ownerUnit):
#get the object by the unit_tag.
if self.unit_objects.get(ownerUnit.tag):
unit_obj = self.unit_objects.get(ownerUnit.tag)
return unit_obj.homeTarget
return None
def unitTarget(self, ownerUnit):
#get the object by the unit_tag.
if self.unit_objects.get(ownerUnit.tag):
unit_obj = self.unit_objects.get(ownerUnit.tag)
return unit_obj.last_target
return None
def disruptorBallCancel(self, owner_tag) -> bool:
ballList = {k : v for k,v in self.unit_objects.items() if v.unit.type_id == DISRUPTORPHASED and v.requestCancel and v.ownerTag == owner_tag}
if len(ballList) > 0:
return True
return False
def adeptOrder(self, ownerUnit):
#get the object by the unit_tag.
if self.unit_objects.get(ownerUnit.tag):
unit_obj = self.unit_objects.get(ownerUnit.tag)
return unit_obj.shadeOrder
return None
def assignScout(self):
#if it's late in the game and we aren't attacking, then don't make a replacement.
if self.game.defend_only and self.game.time > 360:
return
#find a probe to assign as a scout.
probeList = {k : v for k,v in self.unit_objects.items() if v.unit.name == 'Probe' and not v.collect_only and not v.scout }
for key, probe in probeList.items():
probe.becomeScout()
probe.removeGatherer()
return
def unitCount(self, unit_name):
unitList = {k : v for k,v in self.unit_objects.items() if v.unit.name == unit_name }
return len(unitList)
def shieldSafe(self, inc_unit):
#check for other sentries near by with shields that are active.
shieldingList = {k : v for k,v in self.unit_objects.items() if v.unit.name == 'Sentry' and v.shieldActive and v.unit.distance_to(inc_unit.unit) < 2.5 }
if len(shieldingList) > 0:
return False
return True
def freeNexusBuilders(self):
probeList = {k : v for k,v in self.unit_objects.items() if v.unit.name == 'Probe' and v.nexus_builder }
if len(probeList) > 0:
for key, probe in probeList.items():
probe.nexus_builder = False
probe.nexus_position = None
@property
def nexusBuilderAssigned(self) -> bool:
probeList = {k : v for k,v in self.unit_objects.items() if v.unit.name == 'Probe' and v.nexus_builder }
if len(probeList) > 0:
return True
return False
@property
def hallucinationScore(self) -> int:
hallList = {k : v for k,v in self.unit_objects.items() if v.isHallucination }
hall_score = 0
for key, unit_obj in hallList.items():
hall_score += self.unitCounter.getUnitPower(unit_obj.unit.name)
return hall_score
def phoenixScouting(self):
phoenixList = {k : v for k,v in self.unit_objects.items() if v.unit.name == 'Phoenix' and v.isHallucination }
if len(phoenixList) > 0:
return True
return False
def getGravitonTarget(self, inc_unit):
phoenixList = {k : v for k,v in self.unit_objects.items() if v.unit.name == 'Phoenix' and v.isBeaming }
#print (len(phoenixList), inc_unit.unit.name, len(self.unit_objects))
target = None
#get the closest.
mindist = 1000
for key, phoenix in phoenixList.items():
#get the distance to th
if inc_unit.unit.position.to2.distance_to(phoenix.position.to2) < mindist:
target = phoenix.beam_unit
mindist = inc_unit.unit.position.to2.distance_to(phoenix.unit.position.to2)
if mindist < 10:
return target
return None
def getWorkers(self):
return {k : v for k,v in self.unit_objects.items() if v.unit.name == 'Probe' }.items()
def friendlyEngagedFighters(self, closestEnemy, friendRange=10):
#find all the units near the closest Enemy that aren't retreating.
baselist = {k : v for k,v in self.unit_objects.items() if v.unit.position.to2.distance_to(closestEnemy.position.to2) < friendRange }
#find out how much DPS we have going on.
friendDPStoGround = 0
friendDPStoAir = 0
friendAirHealth = 0
friendGroundHealth = 0
friendTotalDPS = 0
for k, friendObj in baselist.items():
if friendObj.unit.is_flying:
friendAirHealth += friendObj.unit.health + friendObj.unit.shield
else:
friendGroundHealth += friendObj.unit.health + friendObj.unit.shield
friendDPStoGround += friendObj.unit.ground_dps
friendDPStoAir += friendObj.unit.air_dps
if friendObj.unit.ground_dps > friendObj.unit.air_dps:
friendTotalDPS += friendObj.unit.ground_dps
else:
friendTotalDPS += friendObj.unit.air_dps
return [friendDPStoGround, friendDPStoAir, friendAirHealth, friendGroundHealth, friendTotalDPS]
def friendlyFighters(self, inc_unit, friendRange=10):
#find all the units near the passed units position that aren't retreating.
#baselist = {k : v for k,v in self.unit_objects.items() if not v.isRetreating and v.unit.position.to2.distance_to(inc_unit.position.to2) < friendRange }
baselist = {k : v for k,v in self.unit_objects.items() if v.unit.position.to2.distance_to(inc_unit.position.to2) < friendRange }
#find out how much DPS we have going on.
friendDPStoGround = 0
friendDPStoAir = 0
friendAirHealth = 0
friendGroundHealth = 0
friendTotalDPS = 0
for k, friendObj in baselist.items():
if friendObj.unit.is_flying:
friendAirHealth += friendObj.unit.health + friendObj.unit.shield
else:
friendGroundHealth += friendObj.unit.health + friendObj.unit.shield
friendDPStoGround += friendObj.unit.ground_dps
friendDPStoAir += friendObj.unit.air_dps
if friendObj.unit.ground_dps > friendObj.unit.air_dps:
friendTotalDPS += friendObj.unit.ground_dps
else:
friendTotalDPS += friendObj.unit.air_dps
return [friendDPStoGround, friendDPStoAir, friendAirHealth, friendGroundHealth, friendTotalDPS]
#properties.
@property
def amount(self) -> int:
return len(self.unit_objects)
| 34.24159
| 155
| 0.703224
| 1,573
| 11,197
| 4.91227
| 0.149396
| 0.06212
| 0.10871
| 0.049178
| 0.556231
| 0.551314
| 0.533713
| 0.508477
| 0.484794
| 0.484794
| 0
| 0.004752
| 0.191926
| 11,197
| 326
| 156
| 34.346626
| 0.849248
| 0.10476
| 0
| 0.408
| 0
| 0
| 0.025129
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104
| false
| 0
| 0.092
| 0.008
| 0.336
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09912f75595653975287507558557321b7720adb
| 619
|
py
|
Python
|
src/lib/bver/Versioned/Addon.py
|
backboneHQ/bver
|
c3c929442fadb28a3f39d0ddec19fb2dfc7a4732
|
[
"MIT"
] | 1
|
2021-09-09T01:22:37.000Z
|
2021-09-09T01:22:37.000Z
|
src/lib/bver/Versioned/Addon.py
|
backboneHQ/bver
|
c3c929442fadb28a3f39d0ddec19fb2dfc7a4732
|
[
"MIT"
] | null | null | null |
src/lib/bver/Versioned/Addon.py
|
backboneHQ/bver
|
c3c929442fadb28a3f39d0ddec19fb2dfc7a4732
|
[
"MIT"
] | 1
|
2021-09-03T18:45:15.000Z
|
2021-09-03T18:45:15.000Z
|
from .Versioned import Versioned
class Addon(Versioned):
"""
Implements the addon support to the versioned.
"""
def __init__(self, *args, **kwargs):
"""
Create an addon object.
"""
super(Addon, self).__init__(*args, **kwargs)
# setting default options
self.setOption('enabled', True)
def bverEnabledName(self, software):
"""
Return the enabled environment variable name for the addon versioned.
"""
return 'BVER_{}_{}_ENABLED'.format(
software.name().upper(),
self.name().upper()
)
| 23.807692
| 77
| 0.568659
| 60
| 619
| 5.683333
| 0.566667
| 0.082111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.308562
| 619
| 25
| 78
| 24.76
| 0.796729
| 0.266559
| 0
| 0
| 0
| 0
| 0.063939
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.1
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09928e74c332f7b48d51ab003cf566958a601031
| 5,988
|
py
|
Python
|
backend/filing/admin.py
|
bhardwajRahul/sec-filings-app
|
8cf7f5956717db8fee1f9a20445986ad9cb831ca
|
[
"MIT"
] | 36
|
2020-12-04T08:16:38.000Z
|
2022-03-22T02:30:49.000Z
|
backend/filing/admin.py
|
bhardwajRahul/sec-filings-app
|
8cf7f5956717db8fee1f9a20445986ad9cb831ca
|
[
"MIT"
] | 1
|
2021-10-14T22:20:40.000Z
|
2021-10-17T17:29:50.000Z
|
backend/filing/admin.py
|
briancaffey/sec-filings-app
|
8cf7f5956717db8fee1f9a20445986ad9cb831ca
|
[
"MIT"
] | 16
|
2020-11-30T18:46:51.000Z
|
2022-01-20T23:01:58.000Z
|
from datetime import date
from django.contrib import admin, messages
from django.core.management import call_command
from django.utils.html import format_html
from django.http import HttpResponseRedirect
from django.urls import path
# Register your models here.
from .models import (
FilingList,
Filing,
Holding,
Cik,
Cusip,
CikObservation,
CusipObservation,
)
from .tasks import process_filing, process_filing_list
class CikAdmin(admin.ModelAdmin):
class Meta:
model = Cik
search_fields = ("cik_number",)
list_display = ("cik_number", "filer_name")
class CusipAdmin(admin.ModelAdmin):
class Meta:
model = Cusip
search_fields = ("cusip_number", "company_name", "symbol")
list_display = ("cusip_number", "company_name", "symbol")
class CikObservationAdmin(admin.ModelAdmin):
class Meta:
model = CikObservation
search_fields = ("cik__cik_number", "name")
readonly_fields = ("cik", "filing_list", "name")
list_display = ("id", "cik", "name", "filing_list")
class CusipObservationAdmin(admin.ModelAdmin):
class Meta:
model = CusipObservation
search_fields = ("cusip__cusip_number", "name")
# raw_id_fields = ["cusip", "name"]
readonly_fields = ("cusip", "name", "filing")
list_display = ("id", "cusip", "name", "filing")
class FilingListAdmin(admin.ModelAdmin):
class Meta:
model = FilingList
list_display = ("id", "datafile", "quarter", "filing_count")
readonly_fields = ("quarter",)
change_form_template = "admin/filing/filinglist/change_form.html"
def save_model(self, request, obj, form, change):
if not obj.quarter:
obj.quarter = date(int(obj.filing_year), ((int(obj.filing_quarter) - 1) * 3) + 1, 1)
super(FilingListAdmin, self).save_model(request, obj, form, change)
def process_filings(self, request, queryset):
for filing_list in queryset:
filing_list.process_filing_list()
messages.add_message(request, messages.INFO, 'Processing filing')
actions = [process_filings]
process_filings.short_description = "Process Filings"
def get_urls(self):
urls = super().get_urls()
filing_list_urls = [path("generate/", self.generate_filing_lists)]
return urls + filing_list_urls
def generate_filing_lists(self, request):
call_command("generate_filing_lists")
self.message_user(
request, "Filing lists have been generated (1993 - 2020)."
)
return HttpResponseRedirect("../")
def response_change(self, request, obj):
if "_process_filing_list" in request.POST:
process_filing_list.apply_async(args=(obj.id,))
self.message_user(request, "Filing list is being processed.")
return HttpResponseRedirect(".")
return super().response_change(request, obj)
class FilingAdmin(admin.ModelAdmin):
class Meta:
model = Filing
# https://stackoverflow.com/questions/46756086/django-admin-edit-model-select-prefetch-related
list_select_related = ("filing_list", "cik")
readonly_fields = ("filing_list",)
list_display = (
"id",
"cik",
"form_type",
"date_filed",
"filing_list_link",
"datafile",
"holding_count",
)
search_fields = ("form_type",)
# def holding_count(self, obj=None):
# return obj.holding_count()
def holding_count(self, obj=None):
return format_html(
f"<a href='/admin/filing/holding/?filing__id={obj.id}'>{obj.holding_count()}</a>" # noqa
)
holding_count.admin_order_field = "holdingcount"
def filing_list_link(self, obj=None):
return format_html(
f'<a target="_blank" href="/admin/filing/filinglist/{obj.filing_list.id}/change/">{str(obj.filing_list)}</a>' # noqa
)
change_form_template = "admin/filing/filing/change_form.html"
def response_change(self, request, obj):
if "_process_filing" in request.POST:
process_filing.apply_async(args=(obj.id,))
self.message_user(request, "Filing is being processed.")
return HttpResponseRedirect(".")
return super().response_change(request, obj)
class HoldingAdmin(admin.ModelAdmin):
class Meta:
model = Holding
# raw_id_fields = ["filing"]
list_select_related = (
"filing",
"cusip",
"filing__cik",
"filing__filing_list",
)
readonly_fields = ("filing",)
list_display = (
"id",
"cik",
"filing_link",
"filing",
"date_filed",
"nameOfIssuer",
"titleOfClass",
"cusip",
"value",
"sshPrnamt",
"sshPrnamtType",
"investmentDiscretion",
"putCall",
"otherManager",
"sole",
"shared",
"nonee",
)
def cik(self, obj=None):
return format_html(
f'<a target="_blank" href="/cik/{obj.filing.cik}">{obj.filing.cik}</a>' # noqa
)
def date_filed(self, obj=None):
return obj.filing.date_filed
# https://stackoverflow.com/questions/2168475/django-admin-how-to-sort-by-one-of-the-custom-list-display-fields-that-has-no-d
date_filed.admin_order_field = "filing__date_filed"
def filing_link(self, obj=None):
return format_html(
f'<a target="_blank" href="/admin/filing/filing/{obj.filing.id}/change/">Link</a>' # noqa
)
search_fields = (
"nameOfIssuer",
"cusip__cusip_number",
"cusip__company_name",
)
admin.site.register(Holding, HoldingAdmin)
admin.site.register(FilingList, FilingListAdmin)
admin.site.register(Filing, FilingAdmin)
admin.site.register(Cik, CikAdmin)
admin.site.register(CikObservation, CikObservationAdmin)
admin.site.register(Cusip, CusipAdmin)
admin.site.register(CusipObservation, CusipObservationAdmin)
| 27.981308
| 129
| 0.644122
| 671
| 5,988
| 5.530551
| 0.223547
| 0.056589
| 0.037726
| 0.045271
| 0.271625
| 0.158448
| 0.158448
| 0.145783
| 0.137968
| 0.113177
| 0
| 0.005861
| 0.230628
| 5,988
| 213
| 130
| 28.112676
| 0.799653
| 0.06513
| 0
| 0.202614
| 0
| 0.026144
| 0.217508
| 0.065879
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071895
| false
| 0
| 0.052288
| 0.03268
| 0.437909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0996f3d1f1ac8a9ea6f99a214f2486805b79d23f
| 3,742
|
py
|
Python
|
__init__.py
|
FabienBasset/evolucare-skill
|
4ecce1615cb11d72196ea745d2753fec19117b12
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
FabienBasset/evolucare-skill
|
4ecce1615cb11d72196ea745d2753fec19117b12
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
FabienBasset/evolucare-skill
|
4ecce1615cb11d72196ea745d2753fec19117b12
|
[
"Apache-2.0"
] | null | null | null |
# TODO: Add an appropriate license to your skill before publishing. See
# the LICENSE file for more information.
# Below is the list of outside modules you'll be using in your skill.
# They might be built-in to Python, from mycroft-core or from external
# libraries. If you use an external library, be sure to include it
# in the requirements.txt file so the library is installed properly
# when the skill gets installed later by a user.
from adapt.intent import IntentBuilder
from mycroft.skills.core import MycroftSkill, intent_handler
from mycroft.util.log import LOG
from mycroft.skills.context import adds_context, removes_context
class EvolucareSkill(MycroftSkill):
def __init__(self):
super(EvolucareSkill, self).__init__(name="EvolucareSkill")
self.last_tension = 0
@intent_handler(IntentBuilder("TensionMesure")
.require("mesure")
.require("tension"))
@adds_context('TensionProtocol')
def handle_tension_question_mesure(self, message):
self.speak_dialog('tension.mesure.protocol')
@intent_handler(IntentBuilder("TensionQuestion")
.require("tension"))
@adds_context('TensionContext')
def handle_tension_question(self, message):
self.speak_dialog('tension.question', expect_response=True)
@intent_handler(IntentBuilder("TensionQuestionDecline")
.require("negation")
.require("TensionContext")
.build())
@removes_context('TensionContext')
def handle_tension_question_decline(self, message):
self.speak_dialog('tension.question.decline')
@intent_handler(IntentBuilder("TensionProtocolIntent")
.require("acceptation")
.require("TensionContext")
.build())
@adds_context('TensionProtocol')
@removes_context('TensionContext')
def handle_tension_question_accept(self, message):
self.speak_dialog('tension.mesure.protocol')
@intent_handler(IntentBuilder("TensionCalculIntent")
.require("pret")
.require("TensionProtocol")
.optionally("negation")
.build())
def handle_tension_calcul_intent(self, message):
neg = message.data.get("negation")
if not neg:
self.TensionCalulate()
else:
self.speak_dialog("tension.protocol.wait")
@removes_context('TensionProtocol')
def TensionCalulate(self):
self.speak_dialog('tension.calcul')
# TO DO : calculate and return tension
self.last_tension = 0
self.speak_dialog("tension.response", data={"tension": self.last_tension} )
#@intent_handler(IntentBuilder(""))
#def handle_default_intent(self, message):
#self.speak_dialog("response", data={"response": message.data["utterance"]})
#@intent_handler(IntentBuilder("").require("Count").require("Dir"))
#def handle_count_intent(self, message):
#if message.data["Dir"] == "up":
#self.count += 1
#else: # assume "down"
#self.count -= 1
#self.speak_dialog("count.is.now", data={"count": self.count})
# The "stop" method defines what Mycroft does when told to stop during
# the skill's execution. In this case, since the skill's functionality
# is extremely simple, there is no need to override it. If you DO
# need to implement stop, you should return True to indicate you handled
# it.
#
# def stop(self):
# return False
def create_skill():
return EvolucareSkill()
| 33.711712
| 84
| 0.638696
| 401
| 3,742
| 5.820449
| 0.366584
| 0.034704
| 0.057841
| 0.065981
| 0.172665
| 0.161525
| 0.142245
| 0.062554
| 0.062554
| 0.062554
| 0
| 0.001442
| 0.258685
| 3,742
| 110
| 85
| 34.018182
| 0.839942
| 0.312667
| 0
| 0.288462
| 0
| 0
| 0.171777
| 0.052673
| 0
| 0
| 0
| 0.009091
| 0
| 1
| 0.153846
| false
| 0
| 0.076923
| 0.019231
| 0.269231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0997cd0a89022a9406ffd19fb23a90e8f3cec543
| 300
|
py
|
Python
|
web/searching.py
|
Kabanosk/JAVRIS
|
f3fac115eb537e689c59bd093da34e7f0b34a035
|
[
"MIT"
] | null | null | null |
web/searching.py
|
Kabanosk/JAVRIS
|
f3fac115eb537e689c59bd093da34e7f0b34a035
|
[
"MIT"
] | null | null | null |
web/searching.py
|
Kabanosk/JAVRIS
|
f3fac115eb537e689c59bd093da34e7f0b34a035
|
[
"MIT"
] | null | null | null |
import webbrowser as web
from bs4 import BeautifulSoup
STARTING_URL = 'https://www.google.com/search?q='
def get_first_website(phrase):
phrase_split = phrase.split()
phrase_url = '+'.join(phrase_split)
search_url = STARTING_URL + phrase_url
web.open_new_tab(search_url)
| 25
| 50
| 0.716667
| 42
| 300
| 4.833333
| 0.595238
| 0.162562
| 0.167488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004082
| 0.183333
| 300
| 11
| 51
| 27.272727
| 0.82449
| 0
| 0
| 0
| 0
| 0
| 0.114583
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.25
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
099c6d4626feec61b7b00c6c857042abd77c6c2a
| 2,039
|
py
|
Python
|
ai_script_writer.py
|
FLWL/aoc-ai-parser
|
2e08fc7b0909579aced5a84bda3645dbe8834d39
|
[
"MIT"
] | 10
|
2019-03-17T00:48:35.000Z
|
2022-02-06T18:15:48.000Z
|
ai_script_writer.py
|
FLWL/aoc-ai-parser
|
2e08fc7b0909579aced5a84bda3645dbe8834d39
|
[
"MIT"
] | null | null | null |
ai_script_writer.py
|
FLWL/aoc-ai-parser
|
2e08fc7b0909579aced5a84bda3645dbe8834d39
|
[
"MIT"
] | 1
|
2022-01-16T12:38:52.000Z
|
2022-01-16T12:38:52.000Z
|
from ai_constants import *
import ai_generator
def get_tab_string(tabment):
return '\t' * tabment
def express_node(cur_node, tabment = 0):
child_nodes = cur_node.children
if cur_node.type == 'DEFRULE':
return "(defrule\n" \
+ express_node(child_nodes[0], tabment + 1) \
+ "=>\n" \
+ express_node(child_nodes[1], tabment + 1) \
+ ")"
elif cur_node.type == 'CONDITIONS' or cur_node.type == 'ACTIONS':
variable_amount_return = ""
for child_node in child_nodes:
variable_amount_return += str(express_node(child_node, tabment))
return variable_amount_return
elif cur_node.value in FLOW:
variable_amount_return = get_tab_string(tabment) + "(" + str(cur_node.value) + "\n"
for child_node in child_nodes:
variable_amount_return += express_node(child_node, tabment + 1)
variable_amount_return += get_tab_string(tabment) + ")\n"
return variable_amount_return
elif cur_node.type == 'FACT*' or cur_node.type == 'ACTION*':
variable_amount_return = get_tab_string(tabment) + "(" + str(cur_node.value)
for child_node in child_nodes:
variable_amount_return += " " + str(express_node(child_node, tabment))
variable_amount_return += ")\n"
return variable_amount_return
return cur_node.value
def express_script(script_tree):
script_text = ""
for rule in script_tree:
script_text += express_node(rule)
script_text += "\n\n"
return script_text
def write_script(script_tree, file_path):
with open(file_path, 'w') as f:
f.write(express_script(script_tree))
f.flush()
if __name__ == '__main__':
# generate and express a rule
rule_tree = ai_generator.generate_rule()
rule_script = express_node(rule_tree)
print(rule_script)
# generate and write a script
script_tree = ai_generator.generate_script()
write_script(script_tree, "random_script.per")
| 30.432836
| 91
| 0.650809
| 262
| 2,039
| 4.698473
| 0.221374
| 0.062551
| 0.178716
| 0.061738
| 0.415922
| 0.3355
| 0.3355
| 0.243704
| 0.243704
| 0.207961
| 0
| 0.003901
| 0.245709
| 2,039
| 66
| 92
| 30.893939
| 0.796489
| 0.026974
| 0
| 0.130435
| 0
| 0
| 0.047451
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.043478
| 0.021739
| 0.282609
| 0.021739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
099d1dd35cd095ae208ec87e7df60676dd935b0a
| 1,316
|
py
|
Python
|
euler-148.py
|
simonolander/euler
|
4d7c4cd9333201cd0065419a511f111b6d75d90c
|
[
"MIT"
] | null | null | null |
euler-148.py
|
simonolander/euler
|
4d7c4cd9333201cd0065419a511f111b6d75d90c
|
[
"MIT"
] | null | null | null |
euler-148.py
|
simonolander/euler
|
4d7c4cd9333201cd0065419a511f111b6d75d90c
|
[
"MIT"
] | null | null | null |
import numpy as np
from tabulate import tabulate
np.set_printoptions(linewidth=400, threshold=100000)
def product(gen):
ans = 1
for g in gen:
ans *= g + 1
return ans
def count_divs_pow(p):
if p == 0 or p == 1:
return 0
else:
full_size = 7**(p-1) * (7**(p-1) - 1) // 2
fulls = 21 * full_size
smalls = 28 * count_divs_pow(p-1)
return fulls + smalls
def base7(n):
ans = []
while n > 0:
ans.append(n % 7)
n //= 7
return ans
def num_not_divisible(i):
return product(base7(i))
def pascal(n):
pascal = np.zeros((n, n))
for x, y in np.ndindex(n, n):
if x == 0 or y == 0:
pascal[x, y] = 1
else:
pascal[x, y] = (pascal[x-1, y] + pascal[x, y-1]) % 7
print(pascal)
def pascal_zeroes(n):
row = [1]
zeroes = [[0, 0, 0]]
for i in range(1, n):
row = [1] + [(a + b) % 7 for a, b in zip(row, row[1::])] + [1]
count = len([r for r in row if r == 0])
zeroes.append([i, count, count - zeroes[i-1][1]])
return tabulate(zeroes, ['Row index', 'Count Zeros'])
def c(n):
ans = 0
for i in range(n):
ans += num_not_divisible(i)
if i % 1000000 == 0:
print(i, ans)
return ans
print(c(1000000000))
| 19.939394
| 70
| 0.506839
| 215
| 1,316
| 3.046512
| 0.293023
| 0.042748
| 0.036641
| 0.039695
| 0.036641
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0.338146
| 1,316
| 65
| 71
| 20.246154
| 0.675086
| 0
| 0
| 0.104167
| 0
| 0
| 0.015198
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.145833
| false
| 0
| 0.041667
| 0.020833
| 0.333333
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
099e3f2b24bd01bfd5b7e1350533a5d17bf7ffdd
| 1,695
|
py
|
Python
|
abandoned-ideas/yml-generator.py
|
HenryZheng1/sengrep-cli-py
|
89d2ffad813706a534290f248220f0d32aeb4c3c
|
[
"Apache-2.0"
] | null | null | null |
abandoned-ideas/yml-generator.py
|
HenryZheng1/sengrep-cli-py
|
89d2ffad813706a534290f248220f0d32aeb4c3c
|
[
"Apache-2.0"
] | null | null | null |
abandoned-ideas/yml-generator.py
|
HenryZheng1/sengrep-cli-py
|
89d2ffad813706a534290f248220f0d32aeb4c3c
|
[
"Apache-2.0"
] | 2
|
2021-07-23T16:46:16.000Z
|
2021-07-30T02:59:43.000Z
|
from csv import reader
import yaml
import json
def splitrow(row, DELIMETER):
x = row.split(DELIMETER)
return ([] if row == '' else x)
def get_data_from_csv(settings, DELIMETER = '|'):
rules = []
with open(settings['CSV_FILENAME'], 'r') as csv_file:
csv_reader = reader(csv_file)
for row in csv_reader:
bug_id = row[0]
pattern_either = splitrow(row[1], DELIMETER)
pattern_inside = splitrow(row[2], DELIMETER)
pattern_not_inside = splitrow(row[3], DELIMETER)
languages = splitrow(row[4], DELIMETER)
message = row[5]
severity = row[6]
patterns = { "pattern-either": {"pattern":patt for patt in pattern_either} ,
"pattern-not-inside" : { "pattern": patt for patt in pattern_not_inside },
"pattern-inside" : {"pattern" : patt for patt in pattern_inside},
}
patterns = {k: v for k, v in patterns.items() if v}
single_rule_obj = { "id" : bug_id, "patterns" : patterns, "message" : message, "languages" : languages, "severity" : severity }
rules.append(single_rule_obj)
return {"rules" : rules}
def convert_json_to_yaml(yml_dict, settings):
with open(settings['OUTPUT_FILENAME'], 'w') as ymlfile:
yaml.dump(yml_dict, ymlfile, allow_unicode=True)
def go(config_filename = 'yml-generator-config.json'):
with open(config_filename, 'r') as json_file:
settings = json.load(json_file)
yml_dict = get_data_from_csv(settings, settings['DELIMETER'])
convert_json_to_yaml(yml_dict, settings)
go()
| 35.3125
| 150
| 0.60118
| 207
| 1,695
| 4.724638
| 0.323672
| 0.056237
| 0.04908
| 0.055215
| 0.205521
| 0.160532
| 0.132924
| 0
| 0
| 0
| 0
| 0.005752
| 0.282006
| 1,695
| 47
| 151
| 36.06383
| 0.797864
| 0
| 0
| 0
| 0
| 0
| 0.100945
| 0.014758
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114286
| false
| 0
| 0.085714
| 0
| 0.257143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
099e6785d5350ed75115c74f1a4e9bf333839d99
| 4,018
|
py
|
Python
|
linearizer/utils.py
|
Max1993Liu/Linearizer
|
739c47c0d98d262a0bc962a450729bcf83c61212
|
[
"MIT"
] | null | null | null |
linearizer/utils.py
|
Max1993Liu/Linearizer
|
739c47c0d98d262a0bc962a450729bcf83c61212
|
[
"MIT"
] | null | null | null |
linearizer/utils.py
|
Max1993Liu/Linearizer
|
739c47c0d98d262a0bc962a450729bcf83c61212
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from types import FunctionType
import warnings
from .transform import BaseTransformer
def drop_na(x, y, according='both'):
""" Drop the values in both x and y if the element in `according` is missing
ex. drop_na([1, 2, np.nan], [1, 2, 3], 'x') => [1, 2], [1, 2]
"""
if according == 'x':
valid_index = ~np.isnan(x)
elif according == 'y':
valid_index = ~np.isnan(y)
elif according == 'both':
valid_index = (~np.isnan(x)) & (~np.isnan(y))
else:
raise ValueError('According should be one of {}'.format(['x', 'y', 'both']))
return np.array(x)[valid_index], np.array(y)[valid_index]
def check_binary_label(y):
""" Make sure the label contains only 0 and 1 """
if set(y) != set([0, 1]):
raise ValueError('The label must be binary 0 or 1.')
def check_numerical(x):
if isinstance(x, list):
x = x[0]
if not pd.api.types.is_numeric_dtype(x):
raise ValueError('The input must be a numerical array.')
def as_positive_rate(x, y, bins, interval_value='mean'):
""" Group numerical variable x into several bins
and calculate the positive rate within each bin
:param bins: Integer or a sequence of values as cutoff points
:param interval_value: One of ['left', 'right', 'mean'], how the interval is converted to a scalar
"""
if isinstance(x, list):
x = np.array(x)
check_numerical(x)
check_binary_label(y)
if len(set(x)) <= bins:
pos_pct = pd.Series(y).groupby(x).mean()
else:
intervals = pd.cut(x, bins)
if interval_value == 'left':
intervals = [i.left for i in intervals]
elif interval_value == 'right':
intervals = [i.right for i in intervals]
elif interval_value == 'mean':
intervals = [(i.left + i.right) / 2.0 for i in intervals]
else:
raise ValueError('Only {} is supported.'.format(['left', 'right', 'mean']))
pos_pct = pd.Series(y).groupby(intervals).mean()
return pos_pct.index.values, pos_pct.values
EPILSON = 1e-15
def _odds(p):
p = np.clip(p, EPILSON, 1 - EPILSON)
return p / (1 - p)
def _logodds(p):
return np.log(_odds(p))
_TRANSFORMS = {
'odds': _odds,
'logodds': _logodds
}
def preprocess(x, y, binary_label=True, bins=50, transform_y=None, interval_value='mean', ignore_na=True):
""" Preprocess the input before finding the best transformations
:param binary_label: Whether the label is binary (0, 1), in other words. whether the problem
is classification or regression.
:param transform_y: Transformation applied to y, can either be a string within ['odds', 'logodds'],
or a function
:param bins: Integer or a sequence of values as cutoff points
:param interval_value: One of ['left', 'right', 'mean'], how the interval is converted to a scalar
:ignore_na: Whether to ignore NA
"""
if binary_label:
x, y = as_positive_rate(x, y, bins, interval_value)
if transform_y is not None:
# make sure y is an array
y = np.array(y)
if isinstance(transform_y, str):
if transform_y not in _TRANSFORMS:
raise ValueError('Only {} is supported.'.format(_TRANSFORMS.keys()))
y = _TRANSFORMS[transform_y](y)
elif isinstance(transform_y, FunctionType):
y = transform_y(y)
else:
raise ValueError('Only string and function is supported for `transform_y`.')
if ignore_na:
x, y = drop_na(x, y, according='both')
return x, y
def _check_complexity():
cpl = {}
for cls in BaseTransformer.__subclasses__():
complexity = cls.complexity
if complexity in cpl:
warnings.warn('{} and {} has the same complexity {}.'.\
format(cls.__name__, cpl[complexity].__name__, complexity))
cpl[complexity] = cls
| 31.637795
| 106
| 0.612245
| 561
| 4,018
| 4.26025
| 0.253119
| 0.007531
| 0.020084
| 0.021339
| 0.250209
| 0.220084
| 0.153975
| 0.127197
| 0.099582
| 0.099582
| 0
| 0.009174
| 0.267546
| 4,018
| 126
| 107
| 31.888889
| 0.802922
| 0.245893
| 0
| 0.078947
| 0
| 0
| 0.100952
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.065789
| 0.013158
| 0.236842
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09a082c5b766d52ac4bd284843a07b1bfbf38eba
| 325
|
py
|
Python
|
testings.py
|
GYosifov88/Python-Fundamentals
|
b46ba2822bd2dac6ff46830c6a520e559b448442
|
[
"MIT"
] | null | null | null |
testings.py
|
GYosifov88/Python-Fundamentals
|
b46ba2822bd2dac6ff46830c6a520e559b448442
|
[
"MIT"
] | null | null | null |
testings.py
|
GYosifov88/Python-Fundamentals
|
b46ba2822bd2dac6ff46830c6a520e559b448442
|
[
"MIT"
] | null | null | null |
class Object:
def __init__(self, type):
self.type = type
def square(self, a, b):
if self.type == 'square':
return a * b
if self.type == 'triangle':
return (a * b) / 2
vid = input()
object = Object(vid)
a = int(input())
b = int(input())
print(f'{object.square(a,b)}')
| 18.055556
| 35
| 0.52
| 46
| 325
| 3.586957
| 0.391304
| 0.193939
| 0.048485
| 0.09697
| 0.145455
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004464
| 0.310769
| 325
| 17
| 36
| 19.117647
| 0.732143
| 0
| 0
| 0
| 0
| 0
| 0.104615
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0
| 0
| 0.384615
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09a0e0698bb4f209bcf75379d2f58d655b33426a
| 809
|
py
|
Python
|
argdeco/__main__.py
|
klorenz/python-argdeco
|
eb614d63430c5da68a972bdc40f8a1541070089d
|
[
"MIT"
] | null | null | null |
argdeco/__main__.py
|
klorenz/python-argdeco
|
eb614d63430c5da68a972bdc40f8a1541070089d
|
[
"MIT"
] | null | null | null |
argdeco/__main__.py
|
klorenz/python-argdeco
|
eb614d63430c5da68a972bdc40f8a1541070089d
|
[
"MIT"
] | null | null | null |
from .main import Main
from .arguments import arg
from textwrap import dedent
main = Main()
command = main.command
@command('install-bash-completions',
arg('--dest', help="destination file. Typically ~/.bashrc or ~/.profile", default="~/.bashrc"),
arg('script_name'),
)
def install_bash_completions(dest, script_name):
main.install_bash_completion(dest=dest, script_name=script_name)
print(dedent("""
To activate bash completions of script_name run:
. %s
""" % dest))
@command('uninstall-bash-completions',
arg('--dest', help="destination file. Typically ~/.bashrc or ~/.profile", default="~/.bashrc"),
arg('script_name'),
)
def uninstall_bash_completions(dest, script_name):
main.uninstall_bash_completion(dest=dest, script_name=script_name)
main()
| 28.892857
| 100
| 0.700865
| 100
| 809
| 5.5
| 0.31
| 0.163636
| 0.101818
| 0.08
| 0.614545
| 0.614545
| 0.494545
| 0.494545
| 0.341818
| 0.341818
| 0
| 0
| 0.15204
| 809
| 27
| 101
| 29.962963
| 0.801749
| 0
| 0
| 0.181818
| 0
| 0
| 0.352287
| 0.061805
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.136364
| 0
| 0.227273
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09a25fafbbc8341875cf49512a6963ebb67af9a9
| 1,092
|
py
|
Python
|
working-with-data/part1/reading-and-writing-text-files.py
|
LucasHelal/data-science
|
9b243be1dea23a521e6ebb49dc358708a9b17dbd
|
[
"MIT"
] | null | null | null |
working-with-data/part1/reading-and-writing-text-files.py
|
LucasHelal/data-science
|
9b243be1dea23a521e6ebb49dc358708a9b17dbd
|
[
"MIT"
] | null | null | null |
working-with-data/part1/reading-and-writing-text-files.py
|
LucasHelal/data-science
|
9b243be1dea23a521e6ebb49dc358708a9b17dbd
|
[
"MIT"
] | null | null | null |
import sys
import pandas as pd
# Can open csv files as a dataframe
dframe = pd.read_csv('lec25.csv')
# Can also use read_table with ',' as a delimiter
dframe = pd.read_table('lec25.csv', sep=',')
# If we dont want the header to be the first row
dframe = pd.read_csv('lec25.csv', header=None)
# We can also indicate a particular number of rows to be read
pd.read_csv('lec25.csv', header=None, nrows=2)
# Now let's see how we can write DataFrames out to text files
dframe.to_csv('mytextdata_out.csv')
# You'll see this file where you're ipython Notebooks are saved (Usually
# under my documents)
# We can also use other delimiters
# we'll import sys to see the output
# Use sys.stdout to see the output directly and not save it
dframe.to_csv(sys.stdout, sep='_')
# Just to make sure we understand the delimiter
dframe.to_csv(sys.stdout, sep='?')
# We can also choose to write only a specific subset of columns
dframe.to_csv(sys.stdout, columns=[0, 1, 2])
# You should also checkout pythons built-in csv reader and writer fot more info
# https://docs.python.org/2/library/csv.html
| 28.736842
| 79
| 0.739011
| 199
| 1,092
| 4
| 0.492462
| 0.030151
| 0.055276
| 0.052764
| 0.187186
| 0.16206
| 0.067839
| 0
| 0
| 0
| 0
| 0.014254
| 0.164835
| 1,092
| 37
| 80
| 29.513514
| 0.858553
| 0.636447
| 0
| 0
| 0
| 0
| 0.149215
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09a2b27d6e9143175c3eaab5d912857ca2f60085
| 1,428
|
py
|
Python
|
comments/urls.py
|
ggetzie/greaterdebater
|
fb1739f3db42717f3d63fe6c9dbf0c2402fb1fd5
|
[
"MIT"
] | null | null | null |
comments/urls.py
|
ggetzie/greaterdebater
|
fb1739f3db42717f3d63fe6c9dbf0c2402fb1fd5
|
[
"MIT"
] | 1
|
2020-05-02T02:03:08.000Z
|
2020-05-02T02:03:08.000Z
|
comments/urls.py
|
ggetzie/greaterdebater
|
fb1739f3db42717f3d63fe6c9dbf0c2402fb1fd5
|
[
"MIT"
] | null | null | null |
from django.conf.urls import patterns
from comments.views import CommentDebateList
# This url file is included from items.urls with the prefix /comments/
urlpatterns = patterns('',
# Add a comment to a topic
(r'^(?P<topic_id>\d+)/add/$', 'comments.views.add'),
# Edit a comment
(r'^(?P<topic_id>\d+)/edit/$', 'comments.views.edit'),
# View a single comment on a page by itself
(r'^(?P<comment_id>\d+)/?$', 'comments.views.comment_detail'),
# Delete a comment
(r'[delete|undelete]/$', 'comments.views.delete'),
# View all arguments associated with a comment
(r'^(?P<comment_id>\d+)/arguments/?(?P<page>\d+)?/?$',
CommentDebateList.as_view(paginate_by=10,
template_name='comments/comment_args.html',
context_object_name='args_list')),
# Flag a comment as spam
(r'^flag/$', 'comments.views.flag'),
# Follow or unfollow a topic or comment for
# updates when new replies are made
(r'^follow/$', 'comments.views.toggle_follow'),
)
| 42
| 93
| 0.464286
| 142
| 1,428
| 4.577465
| 0.450704
| 0.14
| 0.041538
| 0.027692
| 0.067692
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00241
| 0.418768
| 1,428
| 33
| 94
| 43.272727
| 0.780723
| 0.217787
| 0
| 0
| 0
| 0
| 0.293852
| 0.203436
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09a4876d8faaee60c05b563c48e7b9207133b300
| 2,022
|
py
|
Python
|
src/engine/app.py
|
vxlk/stoinks
|
afea92824a21d203098dd41137957f2343ec363d
|
[
"MIT"
] | 1
|
2020-12-30T23:54:58.000Z
|
2020-12-30T23:54:58.000Z
|
src/engine/app.py
|
vxlk/stoinks
|
afea92824a21d203098dd41137957f2343ec363d
|
[
"MIT"
] | null | null | null |
src/engine/app.py
|
vxlk/stoinks
|
afea92824a21d203098dd41137957f2343ec363d
|
[
"MIT"
] | null | null | null |
import sys
from threading import Thread
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from pyqtconsole.console import PythonConsole
from view.console import Console
from view.gui_dock import *
from util.logger import *
from model.engine import *
# clear logs
logger.ClearLogs()
# make Qapp
app = QApplication([])
app.setApplicationName("Stoinks Alpha")
window = QMainWindow()
console = PythonConsole()
logConsole = Console()
# stop debug console from resizing
logConsoleFrame = QScrollArea()
logConsoleFrame.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
logConsoleFrame.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
logConsoleFrame.setWidgetResizable(True)
logConsoleFrame.setWidget(logConsole)
#temp for now
gui = finance_tab_container()
consoleContainer = QDockWidget("Input")
consoleContainer.setAllowedAreas(Qt.LeftDockWidgetArea)
consoleContainer.setWidget(console)
logConsoleContainer = QDockWidget("Output")
logConsoleContainer.setAllowedAreas(Qt.RightDockWidgetArea)
logConsoleContainer.setWidget(logConsoleFrame)
guiContainer = QDockWidget("GUI View")
guiContainer.setAllowedAreas(Qt.TopDockWidgetArea)
guiContainer.setWidget(gui)
window.addDockWidget(Qt.LeftDockWidgetArea, consoleContainer)
window.addDockWidget(Qt.RightDockWidgetArea, logConsoleContainer)
window.addDockWidget(Qt.TopDockWidgetArea, guiContainer)
#console.show() add dock widget calls show on its widget i think
console.eval_in_thread() # let the input terminal go
# make an engine
engine.connectConsole(console)
engine.connectDebugConsole(logConsole)
# Force the style to be the same on all OSs:
app.setStyle("Fusion")
# Now use a palette to switch to dark colors:
palette = QPalette()
palette.setColor(QPalette.Window, QColor(53, 53, 53))
palette.setColor(QPalette.WindowText, Qt.white)
app.setPalette(palette)
window.setMinimumSize(820, 800)
window.show()
app.exec_()
# sys.exit(app.exec_())
engine.stop()
| 27.69863
| 67
| 0.787834
| 218
| 2,022
| 7.275229
| 0.481651
| 0.031526
| 0.039723
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008479
| 0.125124
| 2,022
| 73
| 68
| 27.69863
| 0.888072
| 0.137488
| 0
| 0
| 0
| 0
| 0.022864
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.217391
| 0
| 0.217391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09a65708ef251f13b9781f1e9b250a16f7eb5521
| 8,710
|
py
|
Python
|
Agents/agent.py
|
TylerJamesMalloy/bullet3
|
e357853815c1e0297683218273de79e586b574c8
|
[
"Zlib"
] | null | null | null |
Agents/agent.py
|
TylerJamesMalloy/bullet3
|
e357853815c1e0297683218273de79e586b574c8
|
[
"Zlib"
] | null | null | null |
Agents/agent.py
|
TylerJamesMalloy/bullet3
|
e357853815c1e0297683218273de79e586b574c8
|
[
"Zlib"
] | null | null | null |
import logging, os, time, multiprocessing, sys, signal
logging.disable(logging.WARNING)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf
import gym
import pybullet, pybullet_envs, pybullet_data
import numpy as np
import pandas as pd
from stable_baselines.sac.policies import MlpPolicy
from stable_baselines.clac.policies import MlpPolicy as CLAC_MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines import SAC, CLAC
#from tensorflow.python.client import device_lib
#print(device_lib.list_local_devices())
# ENVIRONMENT_NAMES Walker2DBulletEnv-v0, Robots/AntBulletEnv-v0 , HopperBulletEnv-v0 , HumanoidBulletEnv-v0, HalfCheetahBulletEnv-v0
FOLDER = "Results/InvertedDoublePendulumBulletEnv"
NUM_RESAMPLES = 50
NUM_TRAINING_STEPS = 100000
NUM_TESTING_STEPS = 50000
ENVIRONMENT_NAME = "InvertedDoublePendulumBulletEnv-v0"
if(not os.path.exists(FOLDER + '/Extreme/results')):
os.mkdir(FOLDER + '/Extreme/results')
if(not os.path.exists(FOLDER + '/Generalization/results')):
os.mkdir(FOLDER + '/Generalization/results')
if(not os.path.exists(FOLDER + '/Training/results')):
os.mkdir(FOLDER + '/Training/results')
if(not os.path.exists(FOLDER + '/Training/models')):
os.mkdir(FOLDER + '/Training/models')
CLAC_COEFS = [2.0]
SAC_COEFS = [2.0]
def eval_model(model, env, model_name, coef, testing_timesteps, training_timestep, agent_step, resample_step, randomization):
obs = env.reset()
states = None
reward_sum = 0
Data = pd.DataFrame()
all_rewards = []
allPlayedCards = []
if(randomization > 0):
env.env_method("randomize", randomization)
for test_time in range(testing_timesteps):
action, states = model.predict(obs, states)
obs, rewards, dones, infos = env.step(action)
reward_sum += rewards[0]
if(dones[0]):
d = {"Model": model_name, "Reward": reward_sum, "Timestep": training_timestep, "Coef": coef, "Randomization": randomization, "AgentID": agent_step, "Resample": resample_step}
Data = Data.append(d, ignore_index=True)
all_rewards.append(reward_sum)
reward_sum = 0
if(randomization > 0):
env.env_method("randomize", randomization)
Avg = np.mean(all_rewards)
return Data
def test_agent(agent_step):
now = time.time()
for coef_index in range(len(CLAC_COEFS)):
mut_coef = CLAC_COEFS[coef_index]
ent_coef = SAC_COEFS[coef_index]
training_timestep = 0
clac_env = gym.make(ENVIRONMENT_NAME)
clac_env = DummyVecEnv([lambda: clac_env])
clac_model = CLAC(CLAC_MlpPolicy, clac_env, mut_inf_coef=mut_coef, verbose=1)
sac_env = gym.make(ENVIRONMENT_NAME)
sac_env = DummyVecEnv([lambda: sac_env])
sac_model = SAC(MlpPolicy, sac_env, ent_coef=ent_coef, verbose=1)
mirl_env = gym.make(ENVIRONMENT_NAME)
mirl_env = DummyVecEnv([lambda: mirl_env])
mirl_model = CLAC(CLAC_MlpPolicy, mirl_env, mut_inf_coef=mut_coef, coef_schedule=3.3e-3, verbose=1)
for resample_step in range(0, NUM_RESAMPLES):
features = pd.DataFrame()
if(agent_step == 1):
print(mut_coef, " ", ent_coef, " ", NUM_TRAINING_STEPS, " ", ENVIRONMENT_NAME, " ", FOLDER, " ", resample_step)
(clac_model, learning_results) = clac_model.learn(total_timesteps=NUM_TRAINING_STEPS, log_interval=1000)
(sac_model, learning_results) = sac_model.learn(total_timesteps=NUM_TRAINING_STEPS, log_interval=1000)
(mirl_model, learning_results) = mirl_model.learn(total_timesteps=NUM_TRAINING_STEPS, log_interval=1000)
# Save models
clac_model.save(FOLDER + "/Training/models/CLAC_" + str(mut_coef).replace(".", "p") + "_" + str(agent_step) + "_" + str(resample_step))
sac_model.save(FOLDER + "/Training/models/CLAC_" + str(ent_coef).replace(".", "p") + "_" + str(agent_step) + "_" + str(resample_step))
mirl_model.save(FOLDER + "/Training/models/CLAC_" + str(mut_coef).replace(".", "p") + "_" + str(agent_step) + "_" + str(resample_step))
training_timestep += NUM_TRAINING_STEPS
# Test Normal
eval_results = eval_model(clac_model, clac_env, "CLAC", mut_coef, NUM_TESTING_STEPS, training_timestep, agent_step, resample_step, 0)
eval_results.to_pickle(FOLDER + "/Training/results/CLAC_" + str(mut_coef).replace(".", "p") + "_" + str(agent_step) + "_" + str(resample_step) + ".pkl")
eval_results = eval_model(sac_model, sac_env, "SAC", ent_coef, NUM_TESTING_STEPS, training_timestep, agent_step, resample_step, 0)
eval_results['AgentID'] = agent_step
eval_results.to_pickle(FOLDER + "/Training/results/SAC_" + str(ent_coef).replace(".", "p") + "_" + str(agent_step) + "_" + str(resample_step) + ".pkl")
eval_results = eval_model(mirl_model, mirl_env, "MIRL", mut_coef, NUM_TESTING_STEPS, training_timestep, agent_step, resample_step, 0)
eval_results['AgentID'] = agent_step
eval_results.to_pickle(FOLDER + "/Training/results/MIRL_" + str(mut_coef).replace(".", "p") + "_" + str(agent_step) + "_" + str(resample_step) + ".pkl")
# Test generalization
eval_results = eval_model(clac_model, clac_env, "CLAC", mut_coef, NUM_TESTING_STEPS, training_timestep, agent_step, resample_step, 1)
eval_results['AgentID'] = agent_step
eval_results.to_pickle(FOLDER + "/Generalization/results/CLAC_" + str(mut_coef).replace(".", "p") + "_" + str(agent_step) + "_" + str(resample_step) + ".pkl")
eval_results = eval_model(sac_model, sac_env, "SAC", ent_coef, NUM_TESTING_STEPS, training_timestep, agent_step, resample_step, 1)
eval_results['AgentID'] = agent_step
eval_results.to_pickle(FOLDER + "/Generalization/results/SAC_" + str(ent_coef).replace(".", "p") + "_" + str(agent_step) + "_" + str(resample_step) + ".pkl")
eval_results = eval_model(mirl_model, mirl_env, "MIRL", mut_coef, NUM_TESTING_STEPS, training_timestep, agent_step, resample_step, 1)
eval_results['AgentID'] = agent_step
eval_results.to_pickle(FOLDER + "/Generalization/results/MIRL_" + str(mut_coef).replace(".", "p") + "_" + str(agent_step) + "_" + str(resample_step) + ".pkl")
# Test generalization Extreme
eval_results = eval_model(clac_model, clac_env, "CLAC", mut_coef, NUM_TESTING_STEPS, training_timestep, agent_step, resample_step, 2)
eval_results['AgentID'] = agent_step
eval_results.to_pickle(FOLDER + "/Extreme/results/CLAC_" + str(mut_coef).replace(".", "p") + "_" + str(agent_step) + "_" + str(resample_step) + ".pkl")
eval_results = eval_model(sac_model, sac_env, "SAC", ent_coef, NUM_TESTING_STEPS, training_timestep, agent_step, resample_step, 2)
eval_results['AgentID'] = agent_step
eval_results.to_pickle(FOLDER + "/Extreme/results/SAC_" + str(ent_coef).replace(".", "p") + "_" + str(agent_step) + "_" + str(resample_step) + ".pkl")
eval_results = eval_model(mirl_model, mirl_env, "MIRL", mut_coef, NUM_TESTING_STEPS, training_timestep, agent_step, resample_step, 2)
eval_results['AgentID'] = agent_step
eval_results.to_pickle(FOLDER + "/Extreme/results/MIRL_" + str(mut_coef).replace(".", "p") + "_" + str(agent_step) + "_" + str(resample_step) + ".pkl")
clac_env.env_method("reset_features")
sac_env.env_method("reset_features")
mirl_env.env_method("reset_features")
del sac_model
del sac_env
del clac_model
del clac_env
del mirl_model
del mirl_env
later = time.time()
difference = int(later - now)
print("Tested Agent Time: ", difference)
def main():
Agents = [1, 2]
print("Initializng workers: ", Agents)
original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
pool = multiprocessing.Pool(processes=len(Agents))
signal.signal(signal.SIGINT, original_sigint_handler)
try:
print("Starting jobs")
res = pool.map_async(test_agent, Agents)
print("Waiting for results")
#res.get(1000000) # Without the timeout this blocking call ignores all signals.
except KeyboardInterrupt:
print("Caught Keyboard Interrupt, terminating workers")
pool.terminate()
pool.join()
else:
print("Normal termination")
pool.close()
pool.join()
if __name__ == "__main__":
main()
| 46.57754
| 186
| 0.666475
| 1,083
| 8,710
| 5.036934
| 0.171745
| 0.054445
| 0.026398
| 0.032997
| 0.520257
| 0.492759
| 0.474427
| 0.458845
| 0.426581
| 0.426581
| 0
| 0.010124
| 0.2062
| 8,710
| 186
| 187
| 46.827957
| 0.778855
| 0.042939
| 0
| 0.120301
| 0
| 0
| 0.115301
| 0.048523
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022556
| false
| 0
| 0.075188
| 0
| 0.105263
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09a9810fc20f0e86e48fafc4f5dbb9adb6c5702a
| 1,274
|
py
|
Python
|
ToDoApp/todo/urls.py
|
akmcinto/ToDoApp
|
2176294c1cfc33a2e651f613f23922a2c8879a84
|
[
"Apache-2.0"
] | null | null | null |
ToDoApp/todo/urls.py
|
akmcinto/ToDoApp
|
2176294c1cfc33a2e651f613f23922a2c8879a84
|
[
"Apache-2.0"
] | null | null | null |
ToDoApp/todo/urls.py
|
akmcinto/ToDoApp
|
2176294c1cfc33a2e651f613f23922a2c8879a84
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2016 Andrea McIntosh
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.conf.urls import url, include
from . import views
app_name = 'todo'
urlpatterns = [
url(r'^$', views.index_view, name='index'),
url(r'^(?P<pk>[0-9]+)/$', views.list_details, name='detail'),
url(r'^(?P<pk>[0-9]+)/newitem/$', views.new_item, name='new_item'),
url(r'^newlist/$', views.new_list, name='new_list'),
url(r'^register/$', views.register, name='register'),
url(r'^accounts/login/$', 'django.contrib.auth.views.login', name='login'),
url(r'^accounts/logout/$', views.user_logout, name='logout'),
url(r'^accounts/viewlists/$', views.view_lists, name='viewlists'),
url(r'^accounts/', include('django.contrib.auth.urls')),
]
| 39.8125
| 79
| 0.689953
| 188
| 1,274
| 4.62766
| 0.515957
| 0.041379
| 0.055172
| 0.036782
| 0.02069
| 0.02069
| 0
| 0
| 0
| 0
| 0
| 0.011194
| 0.158556
| 1,274
| 31
| 80
| 41.096774
| 0.800373
| 0.435636
| 0
| 0
| 0
| 0
| 0.360294
| 0.148529
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09a9fcdb559137e2907018a24bc26f28eb5ecd69
| 81,886
|
py
|
Python
|
froi/main.py
|
sunshineDrizzle/FreeROI
|
e2bae1a19835667988e9dbe4a1a88e5b2778d819
|
[
"BSD-3-Clause"
] | 13
|
2016-02-12T05:10:23.000Z
|
2021-01-13T01:40:12.000Z
|
froi/main.py
|
sunshineDrizzle/FreeROI
|
e2bae1a19835667988e9dbe4a1a88e5b2778d819
|
[
"BSD-3-Clause"
] | 14
|
2015-05-04T05:56:45.000Z
|
2021-01-24T11:49:13.000Z
|
froi/main.py
|
sunshineDrizzle/FreeROI
|
e2bae1a19835667988e9dbe4a1a88e5b2778d819
|
[
"BSD-3-Clause"
] | 8
|
2016-03-07T06:29:51.000Z
|
2017-10-30T13:59:27.000Z
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Graphic User Interface."""
import sys
import os
import glob
import ConfigParser
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from version import __version__
from algorithm.imtool import label_edge_detection as vol_label_edge_detection
from algorithm.imtool import inverse_transformation
from algorithm.meshtool import label_edge_detection as surf_label_edge_detection
from core.labelconfig import LabelConfig
from utils import get_icon_dir
from widgets.listwidget import LayerView
from widgets.gridwidget import GridView
from widgets.orthwidget import OrthView
from widgets.datamodel import VolumeListModel
from widgets.drawsettings import PainterStatus, ViewSettings, MoveSettings
from widgets.binarizationdialog import VolBinarizationDialog, SurfBinarizationDialog
from widgets.intersectdialog import VolIntersectDialog, SurfIntersectDialog
from widgets.localmaxdialog import LocalMaxDialog
from widgets.no_gui_tools import gen_label_color
from widgets.smoothingdialog import SmoothingDialog
from widgets.growdialog import GrowDialog, VolumeRGDialog
from widgets.watersheddialog import WatershedDialog
from widgets.slicdialog import SLICDialog
from widgets.clusterdialog import SurfClusterDialog, VolClusterDialog
from widgets.regularroidialog import RegularROIDialog
from widgets.regularroifromcsvfiledialog import RegularROIFromCSVFileDialog
from widgets.roi2gwmidialog import Roi2gwmiDialog
from widgets.roimergedialog import ROIMergeDialog
from widgets.opendialog import OpenDialog
from widgets.labelmanagedialog import LabelManageDialog
from widgets.labelconfigcenter import LabelConfigCenter
from widgets.roidialog import VolROIDialog, SurfROIDialog
from widgets.atlasdialog import AtlasDialog
from widgets.binaryerosiondialog import VolBinErosionDialog, SurfBinErosionDialog
from widgets.binarydilationdialog import VolBinDilationDialog, SurfBinDilationDialog
from widgets.greydilationdialog import GreydilationDialog
from widgets.greyerosiondialog import GreyerosionDialog
from widgets.meants import MeanTSDialog
from widgets.voxelstatsdialog import VoxelStatsDialog
from widgets.registervolume import RegisterVolumeDialog
from widgets.treemodel import TreeModel
from widgets.surfacetreewidget import SurfaceTreeView
from widgets.surfaceview import SurfaceView
from widgets.scribingdialog import ScribingDialog
from widgets.surfaceRGdialog import SurfaceRGDialog
from widgets.prob_map_dialog import SurfProbMapDialog
from widgets.concatenate_dialog import SurfConcatenateDialog
class BpMainWindow(QMainWindow):
"""Class BpMainWindow provides UI interface of FreeROI.
Example:
--------
>>> from PyQt4.QtGui import QApplication
>>> import main
>>> app = QApplication([])
>>> win = main.BpMainWindow()
......
>>> win.show()
>>> app.exec_()
"""
def __init__(self, parent=None):
"""Initialize an instance of BpMainWindow."""
# Inherited from QMainWindow
if sys.platform == 'darwin':
# Workaround for Qt issue on OSX that causes QMainWindow to
# hide when adding QToolBar, see
# https://bugreports.qt-project.org/browse/QTBUG-4300
super(BpMainWindow, self).__init__(parent,
Qt.MacWindowToolBarButtonHint)
else:
super(BpMainWindow, self).__init__(parent)
# temporary variable
self._save_dir = None
self._temp_dir = None
self.is_save_configure = False
# pre-define model variables, one for volume dataset, another
# for suface dataset
self.volume_model = None
self.surface_model = None
self.tabWidget = None
self.volume_actions_status = {}
self.surface_actions_status = {}
self.volume_view = None
self.surface_view = None
self.list_view = None
self.surface_tree_view = None
self.painter_status = PainterStatus(ViewSettings())
def config_extra_settings(self, data_dir):
"""Set data directory and update some configurations."""
# load data directory configuration
self.label_path = data_dir
self.label_config_dir = os.path.join(self.label_path, 'labelconfig')
self.label_config_suffix = 'lbl'
# set icon configuration
self._icon_dir = get_icon_dir()
self.setWindowTitle('FreeROI')
self.setWindowIcon(QIcon(os.path.join(self._icon_dir, 'logo.png')))
self._init_configuration()
self.center()
self._create_actions()
self._create_menus()
def center(self):
"""Display main window in the center of screen."""
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def _init_configuration(self):
"""Load configuration for GUI."""
config_file = os.path.expanduser('~/.froi.conf')
if os.path.exists(config_file):
config = ConfigParser.RawConfigParser()
config.read(config_file)
self.window_width = config.getint('width', 'int')
self.window_height = config.getint('height', 'int')
self.orth_scale_factor = config.getint('orth_scale', 'int')
self.grid_scale_factor = config.getint('grid_scale', 'int')
self.window_xpos = config.getint('xpos', 'int')
self.window_ypos = config.getint('ypos', 'int')
self.resize(self.window_width, self.window_height)
self.move(self.window_xpos, self.window_ypos)
self.default_orth_scale_factor = float(self.orth_scale_factor) / 100
self.default_grid_scale_factor = float(self.grid_scale_factor) / 100
else:
# self.setWindowState(Qt.WindowMaximized)
screen_geo = QDesktopWidget().screenGeometry()
self.setMinimumSize(screen_geo.width()*2/3, screen_geo.height()*8/9)
self.default_orth_scale_factor = 1.0
self.default_grid_scale_factor = 2.0
def _init_tab_widget(self):
# set tab widget
self.tabWidget = QTabWidget()
self.tabWidget.setTabShape(QTabWidget.Rounded)
self.tabWidget.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Expanding)
self.tabWidget.setMaximumWidth(280)
self.tabWidget.currentChanged.connect(self._tabwidget_index_changed)
# set central widget
central_widget = QWidget()
layout = QHBoxLayout()
central_widget.setLayout(layout)
central_widget.layout().addWidget(self.tabWidget)
self.setCentralWidget(central_widget)
# add tool bar
self._add_toolbar()
# self.setUnifiedTitleAndToolBarOnMac(True)
# change actions status
self._actions['add_image'].setEnabled(True)
self._actions['new_image'].setEnabled(True)
self._actions['save_image'].setEnabled(True)
self._actions['close'].setEnabled(True)
def _init_vol_actions(self):
self._actions['duplicate_image'].setEnabled(True)
# self._actions['ld_lbl'].setEnabled(True)
# self._actions['ld_glbl'].setEnabled(True)
self._actions['orth_view'].setEnabled(True)
self._actions['cross_hover_view'].setEnabled(True)
self._actions['original_view'].setEnabled(True)
self._actions['remove_image'].setEnabled(False)
self._actions['undo'].setEnabled(False)
self._actions['redo'].setEnabled(False)
self._vol_func_module_set_enabled(True)
self._actions['binarization'].setEnabled(True)
self._actions['binaryerosion'].setEnabled(True)
self._actions['binarydilation'].setEnabled(True)
self._actions['edge_dete'].setEnabled(True)
self._actions['inverse'].setEnabled(True)
self._actions['label_management'].setEnabled(True)
self._actions['cluster'].setEnabled(True)
self._actions['intersect'].setEnabled(True)
if not self.volume_model.is_mni_space():
self._actions['atlas'].setEnabled(False)
def _init_surf_actions(self):
self._actions['duplicate_image'].setEnabled(True)
self._actions['undo'].setEnabled(False)
self._actions['redo'].setEnabled(False)
self._spinbox.setEnabled(False)
self._surf_func_module_set_enabled(True)
self._actions['binarization'].setEnabled(True)
self._actions['binaryerosion'].setEnabled(True)
self._actions['binarydilation'].setEnabled(True)
self._actions['edge_dete'].setEnabled(True)
self._actions['inverse'].setEnabled(True)
self._actions['label_management'].setEnabled(True)
self._actions['cluster'].setEnabled(True)
self._actions['intersect'].setEnabled(True)
def _save_configuration(self):
"""Save GUI configuration to a file."""
config_file = os.path.expanduser('~/.freeroi.conf')
config = ConfigParser.RawConfigParser()
config.add_section('width')
config.add_section('height')
config.add_section('orth_scale')
config.add_section('grid_scale')
config.add_section('xpos')
config.add_section('ypos')
config.set('width', 'int', self.width())
config.set('height', 'int', self.height())
config.set('xpos', 'int', self.x())
config.set('ypos', 'int', self.y())
if hasattr(self, 'volume_model') and isinstance(self.volume_model, VolumeListModel):
config.set('orth_scale', 'int',
int(self.volume_model.get_scale_factor('orth')*100))
config.set('grid_scale', 'int',
int(self.volume_model.get_scale_factor('grid')*100))
else:
config.set('orth_scale', 'int',
int(self.default_orth_scale_factor * 100))
config.set('grid_scale', 'int',
int(self.default_grid_scale_factor * 100))
with open(config_file, 'wb') as conf:
config.write(conf)
def closeEvent(self, e):
if self.is_save_configure:
self._save_configuration()
e.accept()
def _create_actions(self):
"""Create actions."""
# create a dictionary to store actions info
self._actions = {}
# Open template action
self._actions['add_template'] = QAction(QIcon(os.path.join(
self._icon_dir, 'open.png')),
self.tr("&Open standard template"),
self)
self._actions['add_template'].setShortcut(self.tr("Ctrl+O"))
self._actions['add_template'].triggered.connect(self._add_template)
self._actions['add_template'].setEnabled(True)
# Add a new volume image action
self._actions['add_volume_image'] = QAction(QIcon(os.path.join(
self._icon_dir, 'add.png')),
self.tr("&Add volume file ... "),
self)
self._actions['add_volume_image'].triggered.connect(self._add_volume_image)
self._actions['add_volume_image'].setEnabled(True)
# Add a new surface image action
self._actions['add_surface_image'] = QAction(QIcon(os.path.join(
self._icon_dir, 'add.png')),
self.tr("&Add surface file ... "),
self)
self._actions['add_surface_image'].triggered.connect(self._add_surface_image)
self._actions['add_surface_image'].setEnabled(True)
# Add a new image action
self._actions['add_image'] = QAction(QIcon(os.path.join(self._icon_dir, 'add.png')),
self.tr("&Add image ... "),
self)
self._actions['add_image'].setShortcut(self.tr("Ctrl+A"))
self._actions['add_image'].triggered.connect(self._add_image)
self._actions['add_image'].setEnabled(False)
# Remove an image
self._actions['remove_image'] = QAction(QIcon(os.path.join(
self._icon_dir, 'remove.png')),
self.tr("&Remove image"),
self)
self._actions['remove_image'].setShortcut(self.tr("Ctrl+R"))
self._actions['remove_image'].triggered.connect(self._remove_image)
self._actions['remove_image'].setEnabled(False)
# New image
self._actions['new_image'] = QAction(QIcon(os.path.join(
self._icon_dir, 'create.png')),
self.tr("&New image"),
self)
self._actions['new_image'].setShortcut(self.tr("Ctrl+N"))
self._actions['new_image'].triggered.connect(self._new_image)
self._actions['new_image'].setEnabled(False)
# Duplicate image
self._actions['duplicate_image'] = QAction(self.tr("Duplicate"), self)
self._actions['duplicate_image'].triggered.connect(
self._duplicate_image)
self._actions['duplicate_image'].setEnabled(False)
# Save image
self._actions['save_image'] = QAction(QIcon(os.path.join(
self._icon_dir, 'save.png')),
self.tr("&Save image as..."),
self)
self._actions['save_image'].setShortcut(self.tr("Ctrl+S"))
self._actions['save_image'].triggered.connect(self._save_image)
self._actions['save_image'].setEnabled(False)
## Load Label Config
#self._actions['ld_lbl'] = QAction('Load Label', self)
#self._actions['ld_lbl'].triggered.connect(self._ld_lbl)
#self._actions['ld_lbl'].setEnabled(False)
## Load Global Label Config
#self._actions['ld_glbl'] = QAction('Load Global Label', self)
#self._actions['ld_glbl'].triggered.connect(self._ld_glbl)
#self._actions['ld_glbl'].setEnabled(False)
# Close display
self._actions['close'] = QAction(self.tr("Close tab"), self)
self._actions['close'].setShortcut(self.tr("Ctrl+W"))
self._actions['close'].triggered.connect(self._close_display)
self._actions['close'].setEnabled(False)
# Quit action
self._actions['quit'] = QAction(QIcon(os.path.join(
self._icon_dir, 'quit.png')),
self.tr("&Quit"),
self)
self._actions['quit'].setShortcut(self.tr("Ctrl+Q"))
self._actions['quit'].triggered.connect(self.close)
# Grid view action
self._actions['grid_view'] = QAction(QIcon(os.path.join(
self._icon_dir, 'gridview.png')),
self.tr("Lightbox"),
self)
self._actions['grid_view'].triggered.connect(self._grid_view)
self._actions['grid_view'].setEnabled(False)
# Orth view action
self._actions['orth_view'] = QAction(QIcon(os.path.join(
self._icon_dir, 'orthview.png')),
self.tr("Orthographic"),
self)
self._actions['orth_view'].triggered.connect(self._orth_view)
self._actions['orth_view'].setEnabled(False)
# return original size
self._actions['original_view'] = QAction(QIcon(os.path.join(
self._icon_dir, 'original_size.png')),
self.tr("Reset view"),
self)
self._actions['original_view'].triggered.connect(self._reset_view)
self._actions['original_view'].setEnabled(False)
# whether display the cross hover
self._actions['cross_hover_view'] = QAction(QIcon(os.path.join(
self._icon_dir, 'cross_hover_enable.png')),
self.tr("Disable cross hover"),
self)
self._actions['cross_hover_view'].triggered.connect(self._display_cross_hover)
self._actions['cross_hover_view'].setEnabled(False)
# Binarization view action
self._actions['binarization'] = QAction(QIcon(os.path.join(
self._icon_dir, 'binarization.png')),
self.tr("Binarization"),
self)
self._actions['binarization'].triggered.connect(self._binarization)
self._actions['binarization'].setEnabled(False)
# Intersection action
self._actions['intersect'] = QAction(QIcon(os.path.join(
self._icon_dir, 'intersect.png')),
self.tr("Intersection"),
self)
self._actions['intersect'].triggered.connect(self._intersect)
self._actions['intersect'].setEnabled(False)
# Extract mean time course
self._actions['meants'] = QAction(QIcon(os.path.join(
self._icon_dir, 'voxel_curve.png')),
self.tr("Extract Mean Time Course"),
self)
self._actions['meants'].triggered.connect(self._meants)
self._actions['meants'].setEnabled(False)
# Voxel Stats
self._actions['voxelstats'] = QAction(self.tr("Voxel number stats"),
self)
self._actions['voxelstats'].triggered.connect(self._voxelstats)
self._actions['voxelstats'].setEnabled(False)
# Local Max action
self._actions['localmax'] = QAction(QIcon(os.path.join(
self._icon_dir, 'localmax.png')),
self.tr("Local Max"),
self)
self._actions['localmax'].triggered.connect(self._local_max)
self._actions['localmax'].setEnabled(False)
# Inversion action
self._actions['inverse'] = QAction(QIcon(os.path.join(
self._icon_dir, 'inverse.png')),
self.tr("Inversion"),
self)
self._actions['inverse'].triggered.connect(self._inverse)
self._actions['inverse'].setEnabled(False)
# Smoothing action
self._actions['smoothing'] = QAction(QIcon(os.path.join(
self._icon_dir, 'smoothing.png')),
self.tr("Smoothing"),
self)
self._actions['smoothing'].triggered.connect(self._smooth)
self._actions['smoothing'].setEnabled(False)
# Concatenate overlays to one overlay
self._actions['concatenate'] = QAction(self.tr('Concatenate'), self)
self._actions['concatenate'].triggered.connect(self._concatenate)
self._actions['concatenate'].setEnabled(False)
# Calculate probability map action
self._actions['probability_map'] = QAction(self.tr('ProbabilityMap'), self)
self._actions['probability_map'].triggered.connect(self._prob_map)
self._actions['probability_map'].setEnabled(False)
# Region Growing action
self._actions['region_grow'] = QAction(QIcon(os.path.join(
self._icon_dir, 'grow.png')),
self.tr("Region Growing"),
self)
self._actions['region_grow'].triggered.connect(self._region_grow)
self._actions['region_grow'].setEnabled(False)
# Lable Management action
self._actions['label_management'] = QAction(self.tr("Label Management"),
self)
self._actions['label_management'].triggered.connect(self._label_manage)
self._actions['label_management'].setEnabled(False)
# Snapshot
self._actions['snapshot'] = QAction(self.tr("Snapshot"), self)
self._actions['snapshot'].triggered.connect(self._snapshot)
self._actions['snapshot'].setEnabled(False)
# Watershed action
self._actions['watershed'] = QAction(QIcon(os.path.join(
self._icon_dir, 'watershed.png')),
self.tr("Watershed"),
self)
self._actions['watershed'].triggered.connect(self._watershed)
self._actions['watershed'].setEnabled(False)
# SLIC action
self._actions['slic'] = QAction(QIcon(os.path.join(
self._icon_dir, 'slic.png')),
self.tr("SLIC"),
self)
self._actions['slic'].triggered.connect(self._slic)
self._actions['slic'].setEnabled(False)
# Cluster action
self._actions['cluster'] = QAction(QIcon(os.path.join(
self._icon_dir, 'cluster.png')),
self.tr("Cluster"),
self)
self._actions['cluster'].triggered.connect(self._cluster)
self._actions['cluster'].setEnabled(False)
# Opening
self._actions['opening'] = QAction(self.tr("Opening"), self)
self._actions['opening'].triggered.connect(self._opening)
self._actions['opening'].setEnabled(False)
# Binary_erosion view action
self._actions['binaryerosion'] = QAction(self.tr("Binary Erosion"), self)
self._actions['binaryerosion'].triggered.connect(self._binaryerosion)
self._actions['binaryerosion'].setEnabled(False)
# Binary_dilation view action
self._actions['binarydilation'] = QAction(self.tr("Binary Dilation"), self)
self._actions['binarydilation'].triggered.connect(self._binarydilation)
self._actions['binarydilation'].setEnabled(False)
# grey_erosion view action
self._actions['greyerosion'] = QAction(self.tr("Grey Erosion"), self)
self._actions['greyerosion'].triggered.connect(self._greyerosion)
self._actions['greyerosion'].setEnabled(False)
# grey_dilation view action
self._actions['greydilation'] = QAction(self.tr("Grey Dilation"), self)
self._actions['greydilation'].triggered.connect(self._greydilation)
self._actions['greydilation'].setEnabled(False)
# About software
self._actions['about_freeroi'] = QAction(self.tr("About FreeROI"), self)
self._actions['about_freeroi'].triggered.connect(self._about_freeroi)
# About Qt
self._actions['about_qt'] = QAction(QIcon(os.path.join(
self._icon_dir, 'qt.png')),
self.tr("About Qt"),
self)
self._actions['about_qt'].triggered.connect(qApp.aboutQt)
# Hand
self._actions['hand'] = QAction(QIcon(os.path.join(
self._icon_dir, 'hand.png')),
self.tr("Hand"),
self)
self._actions['hand'].triggered.connect(self._hand_enable)
self._actions['hand'].setCheckable(True)
self._actions['hand'].setChecked(False)
self._actions['hand'].setEnabled(False)
# Cursor
self._actions['cursor'] = QAction(QIcon(os.path.join(
self._icon_dir, 'cursor.png')),
self.tr("Cursor"),
self)
self._actions['cursor'].triggered.connect(self._cursor_enable)
self._actions['cursor'].setCheckable(True)
self._actions['cursor'].setChecked(True)
self._actions['cursor'].setEnabled(True)
# Edit
self._actions['edit'] = QAction(QIcon(os.path.join(
self._icon_dir, 'edit.png')),
self.tr("Edit"),
self)
self._actions['edit'].triggered.connect(self._roidialog_enable)
self._actions['edit'].setCheckable(True)
self._actions['edit'].setChecked(False)
# Undo
self._actions['undo'] = QAction(QIcon(os.path.join(
self._icon_dir, 'undo.png')),
self.tr("Undo"),
self)
self._actions['undo'].triggered.connect(self._undo)
# Redo
self._actions['redo'] = QAction(QIcon(os.path.join(
self._icon_dir, 'redo.png')),
self.tr("Redo"),
self)
self._actions['redo'].triggered.connect(self._redo)
# sphere and cube roi
self._actions['regular_roi'] = QAction(QIcon(os.path.join(
self._icon_dir, 'sphere_and_cube.png')),
self.tr("Regular ROI"),
self)
self._actions['regular_roi'].triggered.connect(self._regular_roi)
self._actions['regular_roi'].setEnabled(False)
# sphere and cube roi from csv file
self._actions['regular_roi_from_csv'] = QAction(QIcon(os.path.join(
self._icon_dir, 'sphere_and_cube.png')),
self.tr("Regular ROI From CSV File"),
self)
self._actions['regular_roi_from_csv'].triggered.connect(self._regular_roi_from_csv_file)
self._actions['regular_roi_from_csv'].setEnabled(False)
# ROI to Interface
self._actions['r2i'] = QAction(QIcon(os.path.join(
self._icon_dir, 'r2i.png')),
self.tr("ROI2Interface"),
self)
self._actions['r2i'].triggered.connect(self._r2i)
self._actions['r2i'].setEnabled(False)
# Edge detection for ROI
self._actions['edge_dete'] = QAction(QIcon(os.path.join(
self._icon_dir, 'edge_detection.png')),
self.tr("Edge Detection"),
self)
self._actions['edge_dete'].triggered.connect(self._label_edge_detection)
self._actions['edge_dete'].setEnabled(False)
# Atlas information
self._actions['atlas'] = QAction(QIcon(os.path.join(
self._icon_dir, 'atlas.png')),
self.tr("Candidate Label"),
self)
self._actions['atlas'].triggered.connect(self._atlas_dialog)
self._actions['atlas'].setEnabled(False)
# ROI Merging
self._actions['roi_merge'] = QAction(QIcon(os.path.join(
self._icon_dir, 'merging.png')),
self.tr("ROI Merging"),
self)
self._actions['roi_merge'].triggered.connect(self._roi_merge)
self._actions['roi_merge'].setEnabled(False)
# ROI scribing
self._actions['scribing'] = QAction(self.tr("scribing"), self)
self._actions['scribing'].triggered.connect(self._roi_scribing)
self._actions['scribing'].setEnabled(False)
# surface region grow
self._actions['surf_region_grow'] = QAction(self.tr("surf_RG"), self)
self._actions['surf_region_grow'].triggered.connect(self._surf_rg)
self._actions['surf_region_grow'].setEnabled(False)
def _surf_rg(self):
new_dialog = SurfaceRGDialog(self.surface_model, self.surface_view, self)
new_dialog.show()
def _roi_scribing(self):
new_dialog = ScribingDialog(self.surface_view, self)
new_dialog.show()
def _add_toolbar(self):
"""Add toolbar."""
# Initialize a spinbox for zoom-scale selection
self._spinbox = QSpinBox()
self._spinbox.setMaximum(500)
self._spinbox.setMinimum(50)
self._spinbox.setSuffix('%')
self._spinbox.setSingleStep(10)
self._spinbox.setValue(self.default_grid_scale_factor * 100)
self._spinbox.valueChanged.connect(self._set_scale_factor)
# Add a toolbar
self._toolbar = self.addToolBar("Tools")
#self._toolbar.setIconSize(QSize(38,38))
# Add file actions
self._toolbar.addAction(self._actions['add_image'])
self._toolbar.addAction(self._actions['remove_image'])
self._toolbar.addAction(self._actions['new_image'])
self._toolbar.addAction(self._actions['save_image'])
# Add view actions
self._toolbar.addSeparator()
self._toolbar.addAction(self._actions['grid_view'])
self._toolbar.addAction(self._actions['orth_view'])
self._toolbar.addAction(self._actions['original_view'])
self._toolbar.addAction(self._actions['cross_hover_view'])
# Add cursor status
self._toolbar.addSeparator()
self._toolbar.addAction(self._actions['hand'])
self._toolbar.addAction(self._actions['cursor'])
self._toolbar.addAction(self._actions['edit'])
# Add undo redo
self._toolbar.addSeparator()
self._toolbar.addAction(self._actions['undo'])
self._toolbar.addAction(self._actions['redo'])
self._toolbar.addSeparator()
self._toolbar.addWidget(self._spinbox)
def _set_scale_factor(self, value):
"""Set scale factor."""
value = float(value) / 100
self.volume_model.set_scale_factor(value, self.volume_view.display_type())
def _add_template(self):
"""Open a dialog window and select a template file."""
template_dir = os.path.join(self.label_path, 'standard',
'MNI152_T1_2mm_brain.nii.gz')
template_name = QFileDialog.getOpenFileName(
self,
'Open standard file',
template_dir,
'Nifti files (*.nii.gz *.nii)')
if not template_name == '':
if sys.platform == 'win32':
template_path = unicode(template_name).encode('gb2312')
else:
template_path = str(template_name)
self._add_volume_img(template_path)
def _add_image(self):
if self.tabWidget.currentWidget() == self.list_view:
self._add_volume_image()
else:
self._add_surface_image()
def _add_volume_image(self):
"""Add new item."""
if self._temp_dir == None:
temp_dir = QDir.currentPath()
else:
temp_dir = self._temp_dir
file_name = QFileDialog.getOpenFileName(self,
'Add new volume file',
temp_dir,
"Nifti files (*.nii *.nii.gz)")
if file_name != '':
if sys.platform == 'win32':
file_path = unicode(file_name).encode('gb2312')
else:
file_path = str(file_name)
self._add_volume_img(file_path)
def _add_surface_image(self):
"""Add new surface image."""
if self._temp_dir is None:
temp_dir = QDir.currentPath()
else:
temp_dir = self._temp_dir
file_name = QFileDialog.getOpenFileName(self,
'Add new surface file',
temp_dir)
if file_name != '':
if sys.platform == 'win32':
file_path = unicode(file_name).encode('gb2312')
else:
file_path = str(file_name)
self._add_surface_img(file_path)
def _duplicate_image(self):
"""Duplicate image."""
if self.tabWidget.currentWidget() is self.list_view:
index = self.volume_model.currentIndex()
dup_img = self.volume_model._data[index.row()].duplicate()
self.volume_model.insertRow(0, dup_img)
self.list_view.setCurrentIndex(self.volume_model.index(0))
# change button status
self._actions['remove_image'].setEnabled(True)
elif self.tabWidget.currentWidget() is self.surface_tree_view:
index = self.surface_model.current_index()
depth = self.surface_model.index_depth(index)
if depth != 2:
QMessageBox.warning(self,
'Warning!',
'Get overlay failed!\nYou may have not selected any overlay!',
QMessageBox.Yes)
return
self.surface_model.add_item(index,
source=self.surface_model.data(index, Qt.UserRole + 5).copy(),
vmin=self.surface_model.data(index, Qt.UserRole),
vmax=self.surface_model.data(index, Qt.UserRole + 1),
colormap=self.surface_model.data(index, Qt.UserRole + 3),
alpha=self.surface_model.data(index, Qt.UserRole + 2),
visible=self.surface_model.data(index, Qt.UserRole + 8),
islabel=self.surface_model.data(index, Qt.UserRole + 7),
name=self.surface_model.data(index, Qt.DisplayRole))
def _add_volume_img(self, source, name=None, header=None, view_min=None,
view_max=None, alpha=255, colormap='gray'):
""" Add image."""
# If model is NULL, then re-initialize it.
if not self.volume_model:
self._vol_label_config_center = self._init_label_config_center()
self._vol_label_config_center.size_edit.setRange(1, 10)
self._vol_label_config_center.size_edit.setValue(4)
self.volume_model = VolumeListModel([], self._vol_label_config_center)
self.volume_model.set_scale_factor(self.default_grid_scale_factor, 'grid')
self.volume_model.set_scale_factor(self.default_orth_scale_factor, 'orth')
self._init_vol_roidialog(self.volume_model)
# Save previous opened directory (except `standard` directory)
file_path = source
if sys.platform == 'win32':
temp_dir = os.path.dirname(unicode(file_path, 'gb2312'))
if not os.stat(temp_dir) == os.stat(os.path.join(self.label_path,
'standard')):
self._temp_dir = temp_dir
else:
temp_dir = os.path.dirname(file_path)
if not os.path.samefile(temp_dir, os.path.join(self.label_path,
'standard')):
self._temp_dir = temp_dir
if self.volume_model.addItem(file_path, None, name, header, view_min,
view_max, alpha, colormap):
# If only one data in VolumeList, then initialize views.
if self.volume_model.rowCount() == 1:
# initialize views
self.list_view = LayerView(self._vol_label_config_center)
self.list_view.setModel(self.volume_model)
self.volume_view = GridView(self.volume_model, self.painter_status)
# connect signals with slots
self.list_view.current_changed.connect(self._update_undo)
self.list_view.current_changed.connect(self._update_redo)
self.list_view._list_view.selectionModel().currentChanged.connect(self.vol_roidialog.clear_rois)
self.volume_model.rowsInserted.connect(self._update_remove_image)
self.volume_model.undo_stack_changed.connect(self._update_undo)
self.volume_model.redo_stack_changed.connect(self._update_redo)
# set current volume index
self.list_view.setCurrentIndex(self.volume_model.index(0))
# set crosshair as the center of the data
self.volume_model.set_cross_pos([self.volume_model.getY()/2,
self.volume_model.getX()/2,
self.volume_model.getZ()/2])
# Enable cursor tracking
# self.list_view._list_view.selectionModel().currentChanged.connect(
# self._switch_cursor_status)
if not self.tabWidget:
self._init_tab_widget()
if self.tabWidget.count() == 0:
self.tabWidget.addTab(self.list_view, "Volume")
self._init_vol_actions()
elif self.tabWidget.count() == 1 and self.tabWidget.currentWidget() != self.list_view:
self.tabWidget.addTab(self.list_view, "Volume")
self.tabWidget.setCurrentIndex(1)
self._init_vol_actions()
elif self.tabWidget.count() == 2 and self.tabWidget.currentWidget() != self.list_view:
self.tabWidget.setCurrentIndex(self.tabWidget.count() - self.tabWidget.currentIndex() - 1)
if self.centralWidget().layout().indexOf(self.volume_view) == -1: # Could not find the self.volume_view
if self.centralWidget().layout().indexOf(self.surface_view) != -1:
self.centralWidget().layout().removeWidget(self.surface_view)
self.surface_view.setParent(None)
self.centralWidget().layout().addWidget(self.volume_view)
if self.volume_model.rowCount() > 1:
self._actions['remove_image'].setEnabled(True)
# set current volume index
self.list_view.setCurrentIndex(self.volume_model.index(0))
self.is_save_configure = True
else:
ret = QMessageBox.question(self,
'FreeROI',
'Cannot load ' + file_path + ': due to mismatch data size.\nNeed registration?',
QMessageBox.Cancel,
QMessageBox.Yes)
if ret == QMessageBox.Yes:
register_volume_dialog = RegisterVolumeDialog(self.volume_model, file_path)
register_volume_dialog.exec_()
def _add_surface_img(self, source, index=None, offset=None, vmin=None, vmax=None,
colormap='jet', alpha=1.0, visible=True, islabel=False):
""" Add surface image."""
# If model is NULL, then re-initialize it.
if not self.surface_model:
self._surf_label_config_center = self._init_label_config_center()
self._surf_label_config_center.size_edit.setRange(0, 10)
self._surf_label_config_center.size_edit.setValue(1)
self.surface_model = TreeModel([])
self.surface_tree_view = SurfaceTreeView(self.surface_model, self._surf_label_config_center)
self.surface_tree_view_control = self.surface_tree_view.get_treeview()
self._init_surf_roidialog(self.surface_model)
if index is None:
index = self.surface_tree_view_control.currentIndex()
# Save previous opened directory (except `standard` directory)
file_path = source
if sys.platform == 'win32':
temp_dir, basename = os.path.split(unicode(file_path, 'gb2312'))
if not os.stat(temp_dir) == os.stat(os.path.join(self.label_path,
'standard')):
self._temp_dir = temp_dir
else:
temp_dir, basename = os.path.split(file_path)
if not os.path.samefile(temp_dir, os.path.join(self.label_path,
'standard')):
self._temp_dir = temp_dir
ends = basename.split('.')[-1]
if len(self.surface_model.get_data()) == 0 and ends not in ('pial', 'white', 'inflated', 'gii'):
QMessageBox.warning(self,
'Warning',
'You must choose the brain surface file first!',
QMessageBox.Yes)
elif self.surface_model.add_item(index, file_path, vmin=vmin, vmax=vmax, alpha=alpha,
colormap=colormap, visible=visible, islabel=islabel):
# Initial the tabwidget.
if not self.tabWidget:
self._init_tab_widget()
if self.tabWidget.count() == 0:
self.tabWidget.addTab(self.surface_tree_view, "Surface")
self._init_surf_actions()
elif self.tabWidget.count() == 1 and self.tabWidget.currentWidget() != self.surface_tree_view:
self.tabWidget.addTab(self.surface_tree_view, "Surface")
self.tabWidget.setCurrentIndex(1)
self._init_surf_actions()
elif self.tabWidget.count() == 2 and self.tabWidget.currentWidget() != self.surface_tree_view:
self.tabWidget.setCurrentIndex(self.tabWidget.count() - self.tabWidget.currentIndex() - 1)
# Initial surface_view
if not self.surface_view:
self.surface_view = SurfaceView()
self.surface_view.set_model(self.surface_model)
self.surface_view.set_painter_status(self.painter_status)
if self.centralWidget().layout().indexOf(self.surface_view) == -1: # Could not find the self.surface_view
if self.centralWidget().layout().indexOf(self.volume_view) != -1:
self.centralWidget().layout().removeWidget(self.volume_view)
self.volume_view.setParent(None)
self.centralWidget().layout().addWidget(self.surface_view)
self._actions['remove_image'].setEnabled(True)
else:
QMessageBox.question(self,
'FreeROI',
'Cannot load ' + file_path + ' !',
QMessageBox.Yes)
def _save_actions_status(self, actions_status):
actions_status['grid_view'] = self._actions['grid_view'].isEnabled()
actions_status['orth_view'] = self._actions['orth_view'].isEnabled()
actions_status['hand'] = self._actions['hand'].isEnabled()
actions_status['snapshot'] = self._actions['snapshot'].isEnabled()
actions_status['orth_view'] = self._actions['orth_view'].isEnabled()
actions_status['cross_hover_view'] = self._actions['cross_hover_view'].isEnabled()
actions_status['original_view'] = self._actions['original_view'].isEnabled()
actions_status['remove_image'] = self._actions['remove_image'].isEnabled()
actions_status['undo'] = self._actions['undo'].isEnabled()
actions_status['redo'] = self._actions['redo'].isEnabled()
# actions_status['functional_module_set_enabled'] = self._actions['binarization'].isEnabled()
actions_status['atlas'] = self._actions['atlas'].isEnabled()
def _disable_vol_actions(self):
# set enabled status volume-specific actions
self._actions['grid_view'].setEnabled(False)
self._actions['orth_view'].setEnabled(False)
self._actions['hand'].setEnabled(False)
self._actions['snapshot'].setEnabled(False)
self._actions['orth_view'].setEnabled(False)
self._actions['cross_hover_view'].setEnabled(False)
self._actions['original_view'].setEnabled(False)
self._actions['undo'].setEnabled(False)
self._actions['redo'].setEnabled(False)
self._vol_func_module_set_enabled(False)
self._spinbox.setEnabled(False)
def _disable_surf_actions(self):
# Disable surface-specific actions for volume
self._surf_func_module_set_enabled(False)
def _restore_actions_status(self, actions_status):
# Restore all toolbar controls
if actions_status:
self._actions['grid_view'].setEnabled(actions_status['grid_view'])
self._actions['hand'].setEnabled(actions_status['hand'])
self._actions['snapshot'].setEnabled(actions_status['snapshot'])
self._actions['orth_view'].setEnabled(actions_status['orth_view'])
self._actions['cross_hover_view'].setEnabled(actions_status['cross_hover_view'])
self._actions['original_view'].setEnabled(actions_status['original_view'])
self._actions['remove_image'].setEnabled(actions_status['remove_image'])
self._actions['undo'].setEnabled(actions_status['undo'])
self._actions['redo'].setEnabled(actions_status['redo'])
if actions_status == self.volume_actions_status:
self._vol_func_module_set_enabled(True)
self._spinbox.setEnabled(True)
if not self.volume_model.is_mni_space():
self._actions['atlas'].setEnabled(actions_status['atlas'])
else:
self._surf_func_module_set_enabled(True)
def _tabwidget_index_changed(self):
if self.tabWidget.count() == 2:
if self.tabWidget.currentWidget() == self.list_view:
self.centralWidget().layout().removeWidget(self.surface_view)
self.surface_view.setParent(None)
self.centralWidget().layout().addWidget(self.volume_view)
self._save_actions_status(self.surface_actions_status)
self._disable_surf_actions()
self._restore_actions_status(self.volume_actions_status)
else:
self.centralWidget().layout().removeWidget(self.volume_view)
self.volume_view.setParent(None)
self.centralWidget().layout().addWidget(self.surface_view)
self._save_actions_status(self.volume_actions_status)
self._disable_vol_actions()
self._restore_actions_status(self.surface_actions_status)
self._roidialog_disable()
def _new_image(self):
"""Create new image."""
if self.tabWidget.currentWidget() == self.list_view:
self.new_volume_image()
else:
self.new_surface_image()
def _update_remove_image(self):
"""Update the display after removing an image."""
if self.volume_model.rowCount() == 1:
self._actions['remove_image'].setEnabled(False)
else:
self._actions['remove_image'].setEnabled(True)
def new_volume_image(self, data=None, name=None, colormap=None):
"""Create a new volume for brain parcellation."""
if colormap is None:
colormap = self._vol_label_config_center.get_first_label_config()
self.volume_model.new_image(data, name, None, colormap)
self.list_view.setCurrentIndex(self.volume_model.index(0))
# change button status
self._actions['remove_image'].setEnabled(True)
def new_surface_image(self):
self.surface_model.add_item(self.surface_tree_view_control.currentIndex())
def new_image_action(self):
"""Change the related status of other actions after creating an image."""
self._actions['remove_image'].setEnabled(True)
def _remove_image(self):
"""Remove current image."""
if self.tabWidget.currentWidget() == self.list_view:
self._remove_volume_image()
else:
self._remove_surface_image()
def _remove_volume_image(self):
row = self.list_view.currentRow()
self.volume_model.delItem(row)
if self.volume_model.rowCount() == 1:
self._actions['remove_image'].setEnabled(False)
def _remove_surface_image(self):
self.surface_model.del_item(self.surface_tree_view_control.currentIndex())
if self.surface_model.rowCount(QModelIndex()) == 0:
self._actions['remove_image'].setEnabled(False)
def _save_image(self):
"""Save overlay as a file."""
if self._save_dir is not None:
temp_dir = self._save_dir
else:
temp_dir = str(QDir.currentPath()) if self._temp_dir is None else self._temp_dir
if self.tabWidget.currentWidget() == self.list_view:
index = self.volume_model.currentIndex()
file_types = "Compressed NIFTI file(*.nii.gz);;NIFTI file(*.nii)"
file_path = os.path.join(temp_dir,
str(self.volume_model.data(index, Qt.DisplayRole)))
overlay = self.volume_model._data[index.row()]
else:
index = self.surface_tree_view_control.currentIndex()
if not index.isValid():
QMessageBox.warning(self, 'Error',
'You have not specified a overlay!',
QMessageBox.Yes)
return
else:
parent = index.parent()
if not parent.isValid():
QMessageBox.warning(self, 'Error',
'You have not specified a overlay!',
QMessageBox.Yes)
return
file_types = "Compressed NIFTI file(*.nii.gz);;NIFTI file(*.nii);;FS label(*.label)"
file_path = os.path.join(temp_dir,
str(self.surface_model.data(index, Qt.DisplayRole)))
overlay = index.internalPointer()
path, filter = QFileDialog.getSaveFileNameAndFilter(self, 'Save image as...',
file_path, file_types)
if str(path) != '':
if sys.platform == 'win32':
path = unicode(path).encode('gb2312')
self._temp_dir = os.path.dirname(unicode(path, 'gb2312'))
else:
path = str(path)
self._temp_dir = os.path.dirname(path)
if filter == 'FS label(*.label)':
index = self.surface_model.get_surface_index()
# FIXME coordinates in freesurfer-style label file should come from '.white' file
coords = self.surface_model.data(index, Qt.UserRole + 6).coords
overlay.save2label(path, hemi_coords=coords)
else:
overlay.save2nifti(path)
def _close_display(self):
"""Close current display."""
old_index = self.tabWidget.currentIndex()
if self.tabWidget.count() == 1:
self.setCentralWidget(QWidget())
self.removeToolBar(self._toolbar)
if self.tabWidget.currentWidget() == self.list_view:
self._set_scale_factor(self.default_grid_scale_factor)
self.volume_model = None
self.volume_view = None
self.volume_actions_status.clear()
else:
self.surface_model = None
self.surface_view = None
self.surface_actions_status.clear()
self._actions['add_image'].setEnabled(False)
self._actions['remove_image'].setEnabled(False)
self._actions['new_image'].setEnabled(False)
self._actions['save_image'].setEnabled(False)
#self._actions['ld_glbl'].setEnabled(False)
#self._actions['ld_lbl'].setEnabled(False)
self._actions['close'].setEnabled(False)
self._disable_vol_actions()
self._disable_surf_actions()
elif self.tabWidget.count() == 2 and self.tabWidget.currentWidget() == self.list_view:
self.tabWidget.setCurrentIndex(self.tabWidget.count() - old_index - 1)
self.tabWidget.removeTab(old_index)
self._set_scale_factor(self.default_grid_scale_factor)
self.volume_model = None
self.volume_view = None
self.volume_actions_status.clear()
elif self.tabWidget.count() == 2 and self.tabWidget.currentWidget() == self.surface_tree_view:
self.tabWidget.setCurrentIndex(self.tabWidget.count() - old_index - 1)
self.tabWidget.removeTab(old_index)
self.surface_model = None
self.surface_view = None
self.surface_actions_status.clear()
def _about_freeroi(self):
""" About software."""
QMessageBox.about(self, self.tr("About FreeROI"),
self.tr("<p><b>FreeROI</b> is a versatile image "
"processing software developed for "
"neuroimaging data.</p>"
"<p>Its goal is to provide a user-friendly "
"interface for neuroimaging researchers "
"to visualize and analyze their data, "
"especially in defining region of interest "
"(ROI) for ROI analysis.</p>"
"<p>Version: " + __version__ + "</p>"
"<p>Written by: Lijie Huang, Zetian Yang, "
"Guangfu Zhou, Zhaoguo Liu, Xiaobin Dang, "
"Xiangzhen Kong, Xu Wang, and Zonglei Zhen."
"</p>"
"<p><b>FreeROI</b> is under Revised BSD "
"License.</p>"
"<p>Copyright(c) 2012-2015 "
"Neuroinformatic Team in LiuLab "
"from Beijing Normal University</p>"
"<p></p>"
"<p>Please join and report bugs to:</p>"
"<p><b>nitk-user@googlegroups.com</b></p>"))
def _create_menus(self):
"""Create menus."""
self.file_menu = self.menuBar().addMenu(self.tr("File"))
self.file_menu.addAction(self._actions['add_volume_image'])
self.file_menu.addAction(self._actions['add_template'])
self.file_menu.addSeparator()
self.file_menu.addAction(self._actions['add_surface_image'])
self.file_menu.addSeparator()
self.file_menu.addAction(self._actions['new_image'])
self.file_menu.addAction(self._actions['remove_image'])
self.file_menu.addAction(self._actions['duplicate_image'])
self.file_menu.addAction(self._actions['save_image'])
#self.file_menu.addAction(self._actions['ld_lbl'])
#self.file_menu.addAction(self._actions['ld_glbl'])
self.file_menu.addSeparator()
self.file_menu.addAction(self._actions['close'])
self.file_menu.addAction(self._actions['quit'])
#self.volume_menu = self.menuBar().addMenu(self.tr("Volume"))
#self.volume_menu.addAction(self._actions['new_image'])
#self.volume_menu.addAction(self._actions['remove_image'])
self.view_menu = self.menuBar().addMenu(self.tr("View"))
self.view_menu.addAction(self._actions['grid_view'])
self.view_menu.addAction(self._actions['orth_view'])
self.view_menu.addAction(self._actions['original_view'])
self.view_menu.addAction(self._actions['cross_hover_view'])
self.tool_menu = self.menuBar().addMenu(self.tr("Tools"))
# Basic tools
basic_tools = self.tool_menu.addMenu(self.tr("Basic Tools"))
basic_tools.addAction(self._actions['binarization'])
basic_tools.addAction(self._actions['intersect'])
basic_tools.addAction(self._actions['localmax'])
basic_tools.addAction(self._actions['inverse'])
basic_tools.addAction(self._actions['smoothing'])
basic_tools.addAction(self._actions['concatenate'])
basic_tools.addAction(self._actions['probability_map'])
basic_tools.addAction(self._actions['meants'])
basic_tools.addAction(self._actions['voxelstats'])
# Segment tools
segment_tools = self.tool_menu.addMenu(self.tr("Segmentation"))
segment_tools.addAction(self._actions['region_grow'])
segment_tools.addAction(self._actions['watershed'])
segment_tools.addAction(self._actions['slic'])
segment_tools.addAction(self._actions['cluster'])
segment_tools.addAction(self._actions['surf_region_grow'])
# ROI tools
roi_tools = self.tool_menu.addMenu(self.tr("ROI Tools"))
roi_tools.addAction(self._actions['edge_dete'])
roi_tools.addAction(self._actions['roi_merge'])
roi_tools.addAction(self._actions['regular_roi'])
roi_tools.addAction(self._actions['regular_roi_from_csv'])
roi_tools.addAction(self._actions['r2i'])
roi_tools.addAction(self._actions['scribing'])
# Morphological tools
morphological_tools = self.tool_menu.addMenu(
self.tr("Morphological Processing"))
morphological_tools.addAction(self._actions['opening'])
morphological_tools.addAction(self._actions['binarydilation'])
morphological_tools.addAction(self._actions['binaryerosion'])
morphological_tools.addAction(self._actions['greydilation'])
morphological_tools.addAction(self._actions['greyerosion'])
# label management
self.tool_menu.addAction(self._actions['atlas'])
self.tool_menu.addAction(self._actions['label_management'])
self.tool_menu.addAction(self._actions['snapshot'])
self.help_menu = self.menuBar().addMenu(self.tr("Help"))
self.help_menu.addAction(self._actions['about_freeroi'])
self.help_menu.addAction(self._actions['about_qt'])
def _cursor_enable(self):
"""Cursor enabled."""
if self._actions['cursor'].isChecked():
self._actions['cursor'].setChecked(True)
if self.tabWidget.currentWidget() is self.list_view:
if isinstance(self.volume_view, OrthView):
self._actions['hand'].setChecked(False)
self.volume_view.set_cursor(Qt.ArrowCursor)
self.volume_view.set_label_mouse_tracking(True)
self._roidialog_disable()
self.painter_status.set_draw_settings(ViewSettings())
else:
self._actions['cursor'].setChecked(True)
def _voxel_edit_enable(self):
"""Voxel brush enabled."""
self._vol_label_config_center.set_is_roi_edit(False)
self.painter_status.set_draw_settings(self._vol_label_config_center)
self.volume_view.set_cursor(Qt.CrossCursor)
self.volume_view.set_label_mouse_tracking(False)
def _vertex_edit_enable(self):
"""Vertex brush enabled."""
self._surf_label_config_center.set_is_roi_edit(False)
self.painter_status.set_draw_settings(self._surf_label_config_center)
def _vol_roi_edit_enable(self):
"""Volume ROI brush enabled."""
self._vol_label_config_center.set_is_roi_edit(True)
self.painter_status.set_draw_settings(self._vol_label_config_center)
self.volume_view.set_cursor(Qt.CrossCursor)
self.volume_view.set_label_mouse_tracking(False)
def _surf_roi_edit_enable(self):
"""Surface ROI brush enabled."""
self._surf_label_config_center.set_is_roi_edit(True)
self.painter_status.set_draw_settings(self._surf_label_config_center)
def _vol_roi_batch_enable(self):
"""Volume ROI batch enabled."""
self.volume_view.set_label_mouse_tracking(False)
self._vol_label_config_center.set_is_roi_edit(False)
self.painter_status.set_draw_settings(self.vol_roidialog)
def _surf_roi_batch_enable(self):
"""Surface ROI batch enabled."""
self._surf_label_config_center.set_is_roi_edit(False)
self.painter_status.set_draw_settings(self.surf_roidialog)
def _roidialog_enable(self):
"""ROI dialog enabled."""
if self._actions['edit'].isChecked():
self._actions['cursor'].setChecked(False)
self._actions['edit'].setChecked(True)
if self.tabWidget.currentWidget() is self.list_view:
if isinstance(self.volume_view, OrthView):
self._actions['hand'].setChecked(False)
self.vol_roidialog.show_dialog()
elif self.tabWidget.currentWidget() is self.surface_tree_view:
self.surf_roidialog.show_dialog()
else:
self._actions['edit'].setChecked(True)
def _roidialog_disable(self):
"""Disable the roi dialog."""
if hasattr(self, "vol_roidialog"):
if self.vol_roidialog.isVisible():
self.vol_roidialog.hide_dialog()
if hasattr(self, "surf_roidialog"):
if self.surf_roidialog.isVisible():
self.surf_roidialog.hide_dialog()
self._actions['edit'].setChecked(False)
def _atlas_dialog(self):
"""Atlas information dialog."""
if 'atlasdialog' in self.__dict__:
self.atlasdialog.show()
else:
self.atlasdialog = AtlasDialog(self.volume_model, self)
self.atlasdialog.show()
def _hand_enable(self):
"""Hand enabled."""
if self._actions['hand'].isChecked():
self._actions['cursor'].setChecked(False)
self._actions['hand'].setChecked(True)
self._roidialog_disable()
self.painter_status.set_draw_settings(MoveSettings())
self.volume_view.set_cursor(Qt.OpenHandCursor)
self.volume_view.set_label_mouse_tracking(True)
else:
self._actions['hand'].setChecked(True)
def _switch_cursor_status(self):
"""Change the cursor status."""
self._actions['cursor'].setChecked(True)
self._cursor_enable()
def _update_undo(self):
"""Update the undo status."""
if self.volume_model.current_undo_available():
self._actions['undo'].setEnabled(True)
else:
self._actions['undo'].setEnabled(False)
def _update_redo(self):
"""Update the redo status."""
if self.volume_model.current_redo_available():
self._actions['redo'].setEnabled(True)
else:
self._actions['redo'].setEnabled(False)
def _init_vol_roidialog(self, model):
"""Initialize volume ROI Dialog."""
self.vol_roidialog = VolROIDialog(model, self._vol_label_config_center, self)
self.vol_roidialog.vx_edit_enabled.connect(self._voxel_edit_enable)
self.vol_roidialog.roi_edit_enabled.connect(self._vol_roi_edit_enable)
self.vol_roidialog.roi_batch_enabled.connect(self._vol_roi_batch_enable)
def _init_surf_roidialog(self, model):
"""Initialize Surface ROI Dialog."""
self.surf_roidialog = SurfROIDialog(model, self._surf_label_config_center, self)
self.surf_roidialog.vx_edit_enabled.connect(self._vertex_edit_enable)
self.surf_roidialog.roi_edit_enabled.connect(self._surf_roi_edit_enable)
self.surf_roidialog.roi_batch_enabled.connect(self._surf_roi_batch_enable)
def _init_label_config_center(self):
"""Initialize LabelConfigCenter."""
lbl_path = os.path.join(self.label_config_dir,
'*.' + self.label_config_suffix)
label_configs = glob.glob(lbl_path)
self.label_configs = map(LabelConfig, label_configs)
self._list_view_model = QStandardItemModel()
# _list_view_model.appendRow(QStandardItem("None"))
for x in self.label_configs:
self._list_view_model.appendRow(QStandardItem(x.get_name()))
self._label_models = []
for item in self.label_configs:
model = QStandardItemModel()
indexs = sorted(item.get_index_list())
for index in indexs:
text_index_icon_item = QStandardItem(gen_label_color(item.get_label_color(item.get_index_label(index))),
str(index) + ' ' + item.get_index_label(index))
model.appendRow(text_index_icon_item)
self._label_models.append(model)
return LabelConfigCenter(self.label_configs, self._list_view_model, self._label_models)
def _get_label_config(self, file_path):
"""Get label config file."""
# Get label config file
dir = os.path.dirname(file_path)
file = os.path.basename(file_path)
split_list = file.split('.')
nii_index = split_list.index('nii')
file = ''.join(split_list[:nii_index])
config_file = os.path.join(file, 'lbl')
if os.path.isfile(config_file):
label_config = LabelConfig(config_file, False)
else:
label_config = self.label_config
return label_config
def _undo(self):
"""The undo action."""
self.volume_model.undo_current_image()
def _redo(self):
"""The redo action."""
self.volume_model.redo_current_image()
def _regular_roi(self):
"""Generate regular(cube, sphere, etc.) roi dialog."""
regular_roi_dialog = RegularROIDialog(self.volume_model)
regular_roi_dialog.exec_()
def _regular_roi_from_csv_file(self):
"""Generate regular(cube, sphere, etc.) roi from csv file."""
regular_roi_from_csv_file = RegularROIFromCSVFileDialog(self.volume_model)
regular_roi_from_csv_file.exec_()
def _label_edge_detection(self):
"""edge detection for labels"""
if self.tabWidget.currentWidget() is self.list_view:
# get information from the model
index = self.volume_model.currentIndex()
data = self.volume_model.data(index, Qt.UserRole + 6)
name = self.volume_model.data(index, Qt.DisplayRole)
new_name = "edge_" + name
# detect edges
new_data = vol_label_edge_detection(data)
# save result as a new overlay
self.volume_model.addItem(new_data, None, new_name,
self.volume_model.data(index, Qt.UserRole + 11),
None, None, 255, 'green')
elif self.tabWidget.currentWidget() is self.surface_tree_view:
# get information from the model
index = self.surface_model.current_index()
depth = self.surface_model.index_depth(index)
if depth != 2:
QMessageBox.warning(self,
'Warning!',
'Get overlay failed!\nYou may have not selected any overlay!',
QMessageBox.Yes)
return
if not self.surface_model.data(index, Qt.UserRole + 7):
QMessageBox.warning(self,
'Warning!',
"Current overlay isn't for ROIs.\nThis tool should be used for ROIs",
QMessageBox.Yes)
return
data = self.surface_model.data(index, Qt.UserRole + 10)
name = self.surface_model.data(index, Qt.DisplayRole)
new_name = "edge_" + name
# detect the edges
new_data = surf_label_edge_detection(data,
self.surface_model.data(index.parent(), Qt.UserRole + 6).faces)
# save result as a new overlay
self.surface_model.add_item(index,
source=new_data.astype(int),
colormap=self.surface_model.data(index, Qt.UserRole + 3),
islabel=True,
name=new_name)
else:
return
def _roi_merge(self):
"""ROI merge dialog."""
new_dialog = ROIMergeDialog(self.volume_model)
new_dialog.exec_()
def _r2i(self):
"""ROI to gwmi dialog."""
new_dialog = Roi2gwmiDialog(self.volume_model)
new_dialog.exec_()
def _opening(self):
"""Opening Dialog which using the opening algorithm to process the image."""
new_dialog = OpenDialog(self.volume_model)
new_dialog.exec_()
def _voxelstats(self):
"""Voxel statistical analysis dialog."""
new_dialog = VoxelStatsDialog(self.volume_model, self)
new_dialog.show()
def _label_manage(self):
"""Label management dialog."""
self.label_manage_dialog = LabelManageDialog(self.label_configs,
self._list_view_model,
self._label_models,
self.label_config_dir,
self.label_config_suffix,
self)
self.label_manage_dialog.exec_()
def _ld_lbl(self):
"""Local label config file."""
file_name = QFileDialog.getOpenFileName(self,
'Load Label File',
QDir.currentPath(),
"Label files (*.lbl)")
if file_name:
label_config = LabelConfig(str(file_name), False)
self.volume_model.set_cur_label(label_config)
def _ld_glbl(self):
"""Local global label config file."""
file_name = QFileDialog.getOpenFileName(self,
'Load Label File',
QDir.currentPath(),
"Label files (*.lbl)")
if file_name:
label_config = LabelConfig(str(file_name), True)
self.volume_model.set_global_label(label_config)
def _grid_view(self):
"""Grid view option."""
self._actions['grid_view'].setEnabled(False)
self._actions['orth_view'].setEnabled(True)
self._actions['hand'].setEnabled(False)
self._actions['snapshot'].setEnabled(False)
self._actions['cursor'].trigger()
self.centralWidget().layout().removeWidget(self.volume_view)
self.volume_view.set_display_type('grid')
self.volume_model.scale_changed.disconnect()
self.volume_model.repaint_slices.disconnect()
self.volume_model.cross_pos_changed.disconnect(self.volume_view.update_cross_pos)
self.volume_view.deleteLater()
self._spinbox.setValue(100 * self.volume_model.get_scale_factor('grid'))
self.volume_view = GridView(self.volume_model, self.painter_status,
self._gridview_vertical_scrollbar_position)
self.centralWidget().layout().addWidget(self.volume_view)
def _orth_view(self):
"""Orth view option."""
self._actions['orth_view'].setEnabled(False)
self._actions['grid_view'].setEnabled(True)
self._actions['snapshot'].setEnabled(True)
self._actions['hand'].setEnabled(True)
self._actions['cursor'].trigger()
self._gridview_vertical_scrollbar_position = \
self.volume_view.get_vertical_srollbar_position()
self.centralWidget().layout().removeWidget(self.volume_view)
self.volume_view.set_display_type('orth')
self.volume_model.scale_changed.disconnect()
self.volume_model.repaint_slices.disconnect()
self.volume_model.cross_pos_changed.disconnect(self.volume_view.update_cross_pos)
self.volume_view.deleteLater()
self._spinbox.setValue(100 * self.volume_model.get_scale_factor('orth'))
self.volume_view = OrthView(self.volume_model, self.painter_status)
self.centralWidget().layout().addWidget(self.volume_view)
def _display_cross_hover(self):
"""Display the cross hover on the image."""
if self.volume_model._display_cross:
self.volume_model.set_cross_status(False)
self._actions['cross_hover_view'].setText('Enable cross hover')
self._actions['cross_hover_view'].setIcon(QIcon(os.path.join(self._icon_dir,'cross_hover_disable.png')))
else:
self.volume_model.set_cross_status(True)
self._actions['cross_hover_view'].setText('Disable cross hover')
self._actions['cross_hover_view'].setIcon(QIcon(os.path.join(self._icon_dir,'cross_hover_enable.png')))
def _reset_view(self):
"""Reset view parameters."""
if self.volume_view.display_type() == 'orth':
if not self.volume_model.get_scale_factor('orth') == \
self.default_orth_scale_factor:
self._spinbox.setValue(100 * self.default_orth_scale_factor)
self.volume_view.reset_view()
elif self.volume_view.display_type() == 'grid':
if not self.volume_model.get_scale_factor('grid') == \
self.default_grid_scale_factor:
self._spinbox.setValue(100 * self.default_grid_scale_factor)
def _binarization(self):
"""Image binarization dialog."""
if self.tabWidget.currentWidget() is self.list_view:
binarization_dialog = VolBinarizationDialog(self.volume_model)
elif self.tabWidget.currentWidget() is self.surface_tree_view:
binarization_dialog = SurfBinarizationDialog(self.surface_model)
else:
return
binarization_dialog.exec_()
def _binaryerosion(self):
"""Image binary erosion dialog."""
if self.tabWidget.currentWidget() is self.list_view:
binaryerosion_dialog = VolBinErosionDialog(self.volume_model)
elif self.tabWidget.currentWidget() is self.surface_tree_view:
binaryerosion_dialog = SurfBinErosionDialog(self.surface_model)
else:
return
binaryerosion_dialog.exec_()
def _binarydilation(self):
"""Image binarydilation dialog."""
if self.tabWidget.currentWidget() is self.list_view:
binarydilation_dialog = VolBinDilationDialog(self.volume_model)
elif self.tabWidget.currentWidget() is self.surface_tree_view:
binarydilation_dialog = SurfBinDilationDialog(self.surface_model)
else:
return
binarydilation_dialog.exec_()
def _greyerosion(self):
"""Image greyerosion dialog."""
greyerosiondialog = GreyerosionDialog(self.volume_model)
greyerosiondialog.exec_()
def _greydilation(self):
"""Image greydilation dialog."""
greydilation_dialog = GreydilationDialog(self.volume_model)
greydilation_dialog.exec_()
def _intersect(self):
"""Image intersect dialog."""
if self.tabWidget.currentWidget() is self.list_view:
intersect_dialog = VolIntersectDialog(self.volume_model)
elif self.tabWidget.currentWidget() is self.surface_tree_view:
intersect_dialog = SurfIntersectDialog(self.surface_model)
else:
return
intersect_dialog.exec_()
def _meants(self):
"""Image meants dialog."""
new_dialog = MeanTSDialog(self.volume_model)
new_dialog.exec_()
def _local_max(self):
"""Compute image local max value dialog."""
new_dialog = LocalMaxDialog(self.volume_model, self)
new_dialog.exec_()
def _inverse(self):
"""Inverse the given image."""
if self.tabWidget.currentWidget() is self.list_view:
index = self.volume_model.currentIndex()
data = self.volume_model.data(index, Qt.UserRole + 6)
name = self.volume_model.data(index, Qt.DisplayRole)
# inverse process
new_data = inverse_transformation(data)
new_name = 'inv_' + name
# save result as a new image
self.volume_model.addItem(new_data, None, new_name,
self.volume_model.data(index, Qt.UserRole + 11))
elif self.tabWidget.currentWidget() is self.surface_tree_view:
index = self.surface_model.current_index()
depth = self.surface_model.index_depth(index)
if depth != 2:
QMessageBox.warning(self,
'Warning!',
'Get overlay failed!\nYou may have not selected any overlay!',
QMessageBox.Yes)
return
data = self.surface_model.data(index, Qt.UserRole + 10)
name = self.surface_model.data(index, Qt.DisplayRole)
new_data = inverse_transformation(data)
new_name = "inv_" + name
# save result as a new overlay
self.surface_model.add_item(index,
source=new_data,
name=new_name)
else:
return
def _smooth(self):
"""Image smooth dialog."""
new_dialog = SmoothingDialog(self.volume_model)
new_dialog.exec_()
def _prob_map(self):
"""Calculate probability map"""
dialog = SurfProbMapDialog(self.surface_model)
dialog.exec_()
def _concatenate(self):
dialog = SurfConcatenateDialog(self.surface_model)
dialog.exec_()
def _region_grow(self):
"""Image region grow dialog."""
# new_dialog = GrowDialog(self.volume_model, self)
new_dialog = VolumeRGDialog(self.volume_model)
new_dialog.exec_()
def _watershed(self):
"""Image watershed dialog."""
new_dialog = WatershedDialog(self.volume_model, self)
new_dialog.exec_()
def _slic(self):
"""Image supervoxel segmentation dialog."""
new_dialog = SLICDialog(self.volume_model, self)
new_dialog.exec_()
def _cluster(self):
"""Image cluster dialog."""
if self.tabWidget.currentWidget() is self.list_view:
cluster_dialog = VolClusterDialog(self.volume_model)
elif self.tabWidget.currentWidget() is self.surface_tree_view:
cluster_dialog = SurfClusterDialog(self.surface_model)
else:
return
cluster_dialog.exec_()
def _vol_func_module_set_enabled(self, status):
"""
set enabled status for actions of volume functional module.
"""
self._actions['meants'].setEnabled(status)
self._actions['voxelstats'].setEnabled(status)
self._actions['localmax'].setEnabled(status)
self._actions['smoothing'].setEnabled(status)
self._actions['atlas'].setEnabled(status)
self._actions['region_grow'].setEnabled(status)
self._actions['watershed'].setEnabled(status)
self._actions['slic'].setEnabled(status)
self._actions['opening'].setEnabled(status)
self._actions['greydilation'].setEnabled(status)
self._actions['greyerosion'].setEnabled(status)
self._actions['regular_roi'].setEnabled(status)
self._actions['regular_roi_from_csv'].setEnabled(status)
self._actions['r2i'].setEnabled(status)
self._actions['roi_merge'].setEnabled(status)
def _surf_func_module_set_enabled(self, status):
"""
set enabled status for actions of surface functional module.
"""
self._actions['scribing'].setEnabled(status)
self._actions['surf_region_grow'].setEnabled(status)
self._actions['concatenate'].setEnabled(status)
self._actions['probability_map'].setEnabled(status)
def _snapshot(self):
"""Capture images from OrthView."""
self.volume_view.save_image()
def set_save_dir(self, path):
self._save_dir = path
| 46.97992
| 120
| 0.591078
| 8,508
| 81,886
| 5.415609
| 0.084391
| 0.084751
| 0.030602
| 0.013369
| 0.546532
| 0.449822
| 0.367198
| 0.31218
| 0.251519
| 0.219919
| 0
| 0.003673
| 0.301737
| 81,886
| 1,742
| 121
| 47.006889
| 0.802162
| 0.07624
| 0
| 0.338685
| 0
| 0
| 0.087442
| 0.002332
| 0
| 0
| 0
| 0.000574
| 0
| 1
| 0.070336
| false
| 0
| 0.037462
| 0
| 0.120031
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09abc457e5bd1caa1d8046d6ee92bbfdae5edefe
| 1,475
|
py
|
Python
|
backend/naki/naki/model/digital_item.py
|
iimcz/emod
|
432094c020247597a94e95f76cc524c20b68b685
|
[
"MIT"
] | null | null | null |
backend/naki/naki/model/digital_item.py
|
iimcz/emod
|
432094c020247597a94e95f76cc524c20b68b685
|
[
"MIT"
] | 6
|
2021-03-08T23:32:15.000Z
|
2022-02-26T08:11:38.000Z
|
backend/naki/naki/model/digital_item.py
|
iimcz/emod
|
432094c020247597a94e95f76cc524c20b68b685
|
[
"MIT"
] | null | null | null |
import colander
from sqlalchemy import Column, ForeignKey
from sqlalchemy.types import DateTime, Integer, Unicode, UnicodeText
from naki.model.meta import Base
class DigitalItem(Base):
__tablename__ = "tDigitalItem"
id_item = Column('sID_Item', Unicode(64), primary_key = True, info={'colanderalchemy': {'missing': None}})
mime = Column('sMime', Unicode(64))
created = Column('dCreated', DateTime)
description = Column('sDescription', UnicodeText, info={'colanderalchemy': {'missing': ''}})
id_user = Column('sAuthor', Unicode(64))
rights = Column('sRights', Integer, info={'colanderalchemy': {'missing': 0}})
def __init__(self, id_item, mime, created, description, id_user, rights):
self.id_item = id_item
self.mime = mime
self.created = created
self.description = description
self.id_user = id_user
self.rights = rights
def get_dict(self):
return ({
'id_item': self.id_item,
'mime': self.mime,
'created': str(self.created),
'description': self.description,
'id_user': self.id_user,
'rights': self.rights,
})
def set_from_dict(self, d):
#self.id_item = d['id_item']
self.mime = d['mime']
#self.created = d['created']
self.description = d['description']
self.id_user = d['id_user']
self.rights = d['rights']
| 34.302326
| 110
| 0.602034
| 165
| 1,475
| 5.206061
| 0.30303
| 0.055879
| 0.046566
| 0.032596
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006452
| 0.264407
| 1,475
| 42
| 111
| 35.119048
| 0.785253
| 0.03661
| 0
| 0
| 0
| 0
| 0.137518
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.121212
| 0.030303
| 0.484848
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09ae724cdc803309af6a236723605a5ad5b9d098
| 4,389
|
py
|
Python
|
z3/labeled_dice.py
|
Wikunia/hakank
|
030bc928d2efe8dcbc5118bda3f8ae9575d0fd13
|
[
"MIT"
] | 279
|
2015-01-10T09:55:35.000Z
|
2022-03-28T02:34:03.000Z
|
z3/labeled_dice.py
|
Wikunia/hakank
|
030bc928d2efe8dcbc5118bda3f8ae9575d0fd13
|
[
"MIT"
] | 10
|
2017-10-05T15:48:50.000Z
|
2021-09-20T12:06:52.000Z
|
z3/labeled_dice.py
|
Wikunia/hakank
|
030bc928d2efe8dcbc5118bda3f8ae9575d0fd13
|
[
"MIT"
] | 83
|
2015-01-20T03:44:00.000Z
|
2022-03-13T23:53:06.000Z
|
#!/usr/bin/python -u
# -*- coding: latin-1 -*-
#
# Labeled dice and Building block problems in Z3
#
# * Labeled dice
#
# From Jim Orlin 'Colored letters, labeled dice: a logic puzzle'
# http://jimorlin.wordpress.com/2009/02/17/colored-letters-labeled-dice-a-logic-puzzle/
# '''
# My daughter Jenn bough a puzzle book, and showed me a cute puzzle. There
# are 13 words as follows: BUOY, CAVE, CELT, FLUB, FORK, HEMP, JUDY,
# JUNK, LIMN, QUIP, SWAG, VISA, WISH.
#
# There are 24 different letters that appear in the 13 words. The question
# is: can one assign the 24 letters to 4 different cubes so that the
# four letters of each word appears on different cubes. (There is one
# letter from each word on each cube.) It might be fun for you to try
# it. I'll give a small hint at the end of this post. The puzzle was
# created by Humphrey Dudley.
# '''
#
# Also, see Jim Orlin's followup 'Update on Logic Puzzle':
# http://jimorlin.wordpress.com/2009/02/21/update-on-logic-puzzle/
#
#
# * Building Blocks puzzle (Dell Logic Puzzles) in MiniZinc.
#
# From http://brownbuffalo.sourceforge.net/BuildingBlocksClues.html
# """
# Each of four alphabet blocks has a single letter of the alphabet on each
# of its six sides. In all, the four blocks contain every letter but
# Q and Z. By arranging the blocks in various ways, you can spell all of
# the words listed below. Can you figure out how the letters are arranged
# on the four blocks?
#
# BAKE ONYX ECHO OVAL
#
# GIRD SMUG JUMP TORN
#
# LUCK VINY LUSH WRAP
# """
#
# This Z3 model was written by Hakan Kjellerstrand (hakank@gmail.com)
# See also my Z3 page: http://hakank.org/z3/
#
#
from __future__ import print_function
from z3_utils_hakank import *
def labeled_dice():
print("Labeled dice\n")
#
# data
#
n = 4
m = 24
A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, Y = (
list(range(m)))
letters = [A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, Y]
letters_s = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M",
"N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "Y"]
num_words = 13
words = [
[B,U,O,Y],
[C,A,V,E],
[C,E,L,T],
[F,L,U,B],
[F,O,R,K],
[H,E,M,P],
[J,U,D,Y],
[J,U,N,K],
[L,I,M,N],
[Q,U,I,P],
[S,W,A,G],
[V,I,S,A],
[W,I,S,H]
]
solve_it(n,m,letters,letters_s,num_words,words)
def building_blocks():
print("Building blocks\n")
n = 4
m = 24
A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, R, S, T, U, V, W, X, Y = (
list(range(m)))
letters = [A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, R, S, T, U, V, W, X, Y]
letters_s = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M",
"N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X","Y"]
num_words = 12
words = [
[B,A,K,E],
[O,N,Y,X],
[E,C,H,O],
[O,V,A,L],
[G,I,R,D],
[S,M,U,G],
[J,U,M,P],
[T,O,R,N],
[L,U,C,K],
[V,I,N,Y],
[L,U,S,H],
[W,R,A,P]
]
solve_it(n,m,letters,letters_s,num_words,words)
def solve_it(n,m,letters,letters_s,num_words,words):
sol = Solver()
#
# declare variables
#
dice = [makeIntVar(sol, "dice[%i]" % i, 0, n - 1) for i in range(m)]
# constraints
# the letters in a word must be on a different die
for i in range(num_words):
sol.add(Distinct([dice[words[i][j]] for j in range(n)]))
# there must be exactly 6 letters of each die
for i in range(n):
sol.add(Sum([If(dice[j] == i,1,0) for j in range(m)]) == 6)
#
# solution and search
num_solutions = 0
while sol.check() == sat:
num_solutions += 1
mod = sol.model()
for d in range(n):
print("die %i:" % d, end=' ')
for i in range(m):
if mod.eval(dice[i]) == d:
print(letters[i], end=' ')
print()
print("The words with the cube label:")
for i in range(num_words):
for j in range(n):
print("%s (%i)" % (letters_s[words[i][j]], mod.eval(dice[words[i][j]]).as_long()), end=' ')
print()
sol.add(Or([dice[i] != mod.eval(dice[i]) for i in range(m)]))
print()
print()
print("num_solutions:", num_solutions)
if __name__ == "__main__":
labeled_dice()
print("\n\n\n")
building_blocks()
| 26.281437
| 99
| 0.549784
| 790
| 4,389
| 3.003797
| 0.275949
| 0.029499
| 0.007585
| 0.010114
| 0.245259
| 0.212811
| 0.196797
| 0.170249
| 0.13485
| 0.13485
| 0
| 0.015077
| 0.259512
| 4,389
| 166
| 100
| 26.439759
| 0.715077
| 0.398724
| 0
| 0.216867
| 0
| 0
| 0.063154
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036145
| false
| 0
| 0.024096
| 0
| 0.060241
| 0.156627
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09b071406342703275a6a5f8df9c8ce73299146c
| 1,602
|
py
|
Python
|
scripts/ai/mle/test_ai.py
|
AlexGustafsson/word-frequencies
|
21a73dc1e56770f5563f928b7e3943874c995bd9
|
[
"Unlicense"
] | null | null | null |
scripts/ai/mle/test_ai.py
|
AlexGustafsson/word-frequencies
|
21a73dc1e56770f5563f928b7e3943874c995bd9
|
[
"Unlicense"
] | null | null | null |
scripts/ai/mle/test_ai.py
|
AlexGustafsson/word-frequencies
|
21a73dc1e56770f5563f928b7e3943874c995bd9
|
[
"Unlicense"
] | null | null | null |
import pickle
import random
from argparse import ArgumentParser
# Requires NLTK to be installed:
# python3 -m pip install nltk
# python3 -c 'import nltk;nltk.download("punkt")'
# May be slow at first start due to NLTK preparing its dependencies
from nltk.tokenize.treebank import TreebankWordDetokenizer
from nltk.lm import MLE
detokenize = TreebankWordDetokenizer().detokenize
def generate_sentence(model: MLE, length: int, seed=random.randint(0, 1e10)):
content = []
for token in model.generate(length, random_seed=seed):
if token == '<s>':
continue
if token == '</s>':
break
content.append(token)
return detokenize(content)
def main() -> None:
"""Main entrypoint."""
# Create an argument parser for parsing CLI arguments
parser = ArgumentParser(description="A tool to train an AI to predict the probability of a word in a sentence")
# Add parameters for the server connection
parser.add_argument("-i", "--input", required=True, type=str, help="The serialized model previously trained")
parser.add_argument("-w", "--word", required=True, type=str, help="The word to check the probability for")
parser.add_argument("-c", "--context", required=True, type=str, help="The context / sentence for the word")
# Parse the arguments
options = parser.parse_args()
model = None
with open(options.input, "rb") as file:
model = pickle.load(file)
print(model.logscore(options.word, options.context.split()))
print(generate_sentence(model, 10))
if __name__ == '__main__':
main()
| 32.693878
| 115
| 0.692884
| 210
| 1,602
| 5.214286
| 0.495238
| 0.024658
| 0.046575
| 0.052055
| 0.071233
| 0.071233
| 0
| 0
| 0
| 0
| 0
| 0.006197
| 0.194132
| 1,602
| 48
| 116
| 33.375
| 0.841983
| 0.189139
| 0
| 0
| 0
| 0
| 0.177156
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.178571
| 0
| 0.285714
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09b38cadc8a5b66d765f9f62596709fa7325c773
| 7,529
|
py
|
Python
|
lib/common/render_utils.py
|
YuliangXiu/ICON
|
ece5a09aa2d56aec28017430e65a0352622a0f30
|
[
"Intel"
] | 486
|
2021-12-16T03:13:31.000Z
|
2022-03-30T04:26:48.000Z
|
lib/common/render_utils.py
|
YuliangXiu/ICON
|
ece5a09aa2d56aec28017430e65a0352622a0f30
|
[
"Intel"
] | 33
|
2021-12-30T07:28:10.000Z
|
2022-03-30T08:04:06.000Z
|
lib/common/render_utils.py
|
YuliangXiu/ICON
|
ece5a09aa2d56aec28017430e65a0352622a0f30
|
[
"Intel"
] | 38
|
2021-12-17T10:55:01.000Z
|
2022-03-30T23:25:39.000Z
|
# -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# Contact: ps-license@tuebingen.mpg.de
import torch
from torch import nn
import trimesh
import math
from typing import NewType
from pytorch3d.structures import Meshes
from pytorch3d.renderer.mesh import rasterize_meshes
Tensor = NewType('Tensor', torch.Tensor)
def solid_angles(points: Tensor,
triangles: Tensor,
thresh: float = 1e-8) -> Tensor:
''' Compute solid angle between the input points and triangles
Follows the method described in:
The Solid Angle of a Plane Triangle
A. VAN OOSTEROM AND J. STRACKEE
IEEE TRANSACTIONS ON BIOMEDICAL ENGINEERING,
VOL. BME-30, NO. 2, FEBRUARY 1983
Parameters
-----------
points: BxQx3
Tensor of input query points
triangles: BxFx3x3
Target triangles
thresh: float
float threshold
Returns
-------
solid_angles: BxQxF
A tensor containing the solid angle between all query points
and input triangles
'''
# Center the triangles on the query points. Size should be BxQxFx3x3
centered_tris = triangles[:, None] - points[:, :, None, None]
# BxQxFx3
norms = torch.norm(centered_tris, dim=-1)
# Should be BxQxFx3
cross_prod = torch.cross(centered_tris[:, :, :, 1],
centered_tris[:, :, :, 2],
dim=-1)
# Should be BxQxF
numerator = (centered_tris[:, :, :, 0] * cross_prod).sum(dim=-1)
del cross_prod
dot01 = (centered_tris[:, :, :, 0] * centered_tris[:, :, :, 1]).sum(dim=-1)
dot12 = (centered_tris[:, :, :, 1] * centered_tris[:, :, :, 2]).sum(dim=-1)
dot02 = (centered_tris[:, :, :, 0] * centered_tris[:, :, :, 2]).sum(dim=-1)
del centered_tris
denominator = (norms.prod(dim=-1) + dot01 * norms[:, :, :, 2] +
dot02 * norms[:, :, :, 1] + dot12 * norms[:, :, :, 0])
del dot01, dot12, dot02, norms
# Should be BxQ
solid_angle = torch.atan2(numerator, denominator)
del numerator, denominator
torch.cuda.empty_cache()
return 2 * solid_angle
def winding_numbers(points: Tensor,
triangles: Tensor,
thresh: float = 1e-8) -> Tensor:
''' Uses winding_numbers to compute inside/outside
Robust inside-outside segmentation using generalized winding numbers
Alec Jacobson,
Ladislav Kavan,
Olga Sorkine-Hornung
Fast Winding Numbers for Soups and Clouds SIGGRAPH 2018
Gavin Barill
NEIL G. Dickson
Ryan Schmidt
David I.W. Levin
and Alec Jacobson
Parameters
-----------
points: BxQx3
Tensor of input query points
triangles: BxFx3x3
Target triangles
thresh: float
float threshold
Returns
-------
winding_numbers: BxQ
A tensor containing the Generalized winding numbers
'''
# The generalized winding number is the sum of solid angles of the point
# with respect to all triangles.
return 1 / (4 * math.pi) * solid_angles(points, triangles,
thresh=thresh).sum(dim=-1)
def batch_contains(verts, faces, points):
B = verts.shape[0]
N = points.shape[1]
verts = verts.detach().cpu()
faces = faces.detach().cpu()
points = points.detach().cpu()
contains = torch.zeros(B, N)
for i in range(B):
contains[i] = torch.as_tensor(
trimesh.Trimesh(verts[i], faces[i]).contains(points[i]))
return 2.0 * (contains - 0.5)
def dict2obj(d):
# if isinstance(d, list):
# d = [dict2obj(x) for x in d]
if not isinstance(d, dict):
return d
class C(object):
pass
o = C()
for k in d:
o.__dict__[k] = dict2obj(d[k])
return o
def face_vertices(vertices, faces):
"""
:param vertices: [batch size, number of vertices, 3]
:param faces: [batch size, number of faces, 3]
:return: [batch size, number of faces, 3, 3]
"""
bs, nv = vertices.shape[:2]
bs, nf = faces.shape[:2]
device = vertices.device
faces = faces + (torch.arange(bs, dtype=torch.int32).to(device) *
nv)[:, None, None]
vertices = vertices.reshape((bs * nv, vertices.shape[-1]))
return vertices[faces.long()]
class Pytorch3dRasterizer(nn.Module):
""" Borrowed from https://github.com/facebookresearch/pytorch3d
Notice:
x,y,z are in image space, normalized
can only render squared image now
"""
def __init__(self, image_size=224):
"""
use fixed raster_settings for rendering faces
"""
super().__init__()
raster_settings = {
'image_size': image_size,
'blur_radius': 0.0,
'faces_per_pixel': 1,
'bin_size': None,
'max_faces_per_bin': None,
'perspective_correct': True,
'cull_backfaces': True,
}
raster_settings = dict2obj(raster_settings)
self.raster_settings = raster_settings
def forward(self, vertices, faces, attributes=None):
fixed_vertices = vertices.clone()
fixed_vertices[..., :2] = -fixed_vertices[..., :2]
meshes_screen = Meshes(verts=fixed_vertices.float(),
faces=faces.long())
raster_settings = self.raster_settings
pix_to_face, zbuf, bary_coords, dists = rasterize_meshes(
meshes_screen,
image_size=raster_settings.image_size,
blur_radius=raster_settings.blur_radius,
faces_per_pixel=raster_settings.faces_per_pixel,
bin_size=raster_settings.bin_size,
max_faces_per_bin=raster_settings.max_faces_per_bin,
perspective_correct=raster_settings.perspective_correct,
)
vismask = (pix_to_face > -1).float()
D = attributes.shape[-1]
attributes = attributes.clone()
attributes = attributes.view(attributes.shape[0] * attributes.shape[1],
3, attributes.shape[-1])
N, H, W, K, _ = bary_coords.shape
mask = pix_to_face == -1
pix_to_face = pix_to_face.clone()
pix_to_face[mask] = 0
idx = pix_to_face.view(N * H * W * K, 1, 1).expand(N * H * W * K, 3, D)
pixel_face_vals = attributes.gather(0, idx).view(N, H, W, K, 3, D)
pixel_vals = (bary_coords[..., None] * pixel_face_vals).sum(dim=-2)
pixel_vals[mask] = 0 # Replace masked values in output.
pixel_vals = pixel_vals[:, :, :, 0].permute(0, 3, 1, 2)
pixel_vals = torch.cat(
[pixel_vals, vismask[:, :, :, 0][:, None, :, :]], dim=1)
return pixel_vals
| 33.914414
| 79
| 0.592376
| 919
| 7,529
| 4.723613
| 0.311208
| 0.045151
| 0.014513
| 0.003686
| 0.156876
| 0.130615
| 0.10182
| 0.096752
| 0.096752
| 0.075098
| 0
| 0.023652
| 0.298048
| 7,529
| 221
| 80
| 34.067873
| 0.79754
| 0.321424
| 0
| 0.037037
| 0
| 0
| 0.021409
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064815
| false
| 0.009259
| 0.064815
| 0
| 0.212963
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09b41443c1ba334ee6ad9dc77418ea29db20354e
| 456
|
py
|
Python
|
merge_string.py
|
mrillusi0n/compete
|
ac798e2b1ff27abddd8bebf113d079228f038e56
|
[
"MIT"
] | null | null | null |
merge_string.py
|
mrillusi0n/compete
|
ac798e2b1ff27abddd8bebf113d079228f038e56
|
[
"MIT"
] | null | null | null |
merge_string.py
|
mrillusi0n/compete
|
ac798e2b1ff27abddd8bebf113d079228f038e56
|
[
"MIT"
] | null | null | null |
######################### AABCAAADA
from collections import OrderedDict
def remove_duplicates(block):
"""
>>> remove_duplicates('AAB')
>>> 'AB'
"""
freq = OrderedDict()
for c in block:
freq[c] = freq.get(c, 0) + 1
return ''.join(freq.keys())
def solve(text, block_size):
return '\n'.join(map(remove_duplicates,
[text[i:i+block_size] for i in range(0, len(text), block_size)]))
print(solve('AABCAAADA', 3))
| 18.24
| 65
| 0.58114
| 59
| 456
| 4.389831
| 0.525424
| 0.185328
| 0.100386
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011019
| 0.203947
| 456
| 24
| 66
| 19
| 0.702479
| 0.105263
| 0
| 0
| 0
| 0
| 0.03022
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.1
| 0.1
| 0.5
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09b5d250f780316cd9c06c021e66be29bc76a8ed
| 884
|
py
|
Python
|
tests/views/test_is_component_field_model_or_unicorn_field.py
|
nerdoc/django-unicorn
|
e512b8f64f5c276a78127db9a05d9d5c042232d5
|
[
"MIT"
] | 1
|
2021-12-21T16:20:49.000Z
|
2021-12-21T16:20:49.000Z
|
tests/views/test_is_component_field_model_or_unicorn_field.py
|
teury/django-unicorn
|
5e9142b8a7e13b862ece419d567e805cc783b517
|
[
"MIT"
] | null | null | null |
tests/views/test_is_component_field_model_or_unicorn_field.py
|
teury/django-unicorn
|
5e9142b8a7e13b862ece419d567e805cc783b517
|
[
"MIT"
] | 1
|
2022-02-10T07:47:01.000Z
|
2022-02-10T07:47:01.000Z
|
from django_unicorn.components import UnicornView
from django_unicorn.views.utils import _is_component_field_model_or_unicorn_field
from example.coffee.models import Flavor
class TypeHintView(UnicornView):
model: Flavor = None
class ModelInstanceView(UnicornView):
model = Flavor()
def test_type_hint():
component = TypeHintView(component_name="asdf", component_id="hjkl")
name = "model"
actual = _is_component_field_model_or_unicorn_field(component, name)
assert actual
assert component.model is not None
assert type(component.model) == Flavor
def test_model_instance():
component = ModelInstanceView(component_name="asdf", component_id="hjkl")
name = "model"
actual = _is_component_field_model_or_unicorn_field(component, name)
assert actual
assert component.model is not None
assert type(component.model) == Flavor
| 27.625
| 81
| 0.7681
| 110
| 884
| 5.890909
| 0.3
| 0.067901
| 0.074074
| 0.097222
| 0.566358
| 0.566358
| 0.566358
| 0.512346
| 0.512346
| 0.512346
| 0
| 0
| 0.154977
| 884
| 31
| 82
| 28.516129
| 0.86747
| 0
| 0
| 0.47619
| 0
| 0
| 0.029412
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 1
| 0.095238
| false
| 0
| 0.142857
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09bb144937911126f0899a4f90a8ca7646246b73
| 431
|
py
|
Python
|
data_wrangling/data_manipulation/check_if_even.py
|
dkedar7/Notes
|
08a9e710a774fd46ec525e0041c1cbd67fbe6c20
|
[
"MIT"
] | 3
|
2021-05-28T09:00:56.000Z
|
2021-12-21T01:12:20.000Z
|
data_wrangling/data_manipulation/check_if_even.py
|
dkedar7/Notes
|
08a9e710a774fd46ec525e0041c1cbd67fbe6c20
|
[
"MIT"
] | null | null | null |
data_wrangling/data_manipulation/check_if_even.py
|
dkedar7/Notes
|
08a9e710a774fd46ec525e0041c1cbd67fbe6c20
|
[
"MIT"
] | null | null | null |
import pytest
testdata = [
(2, True),
(3, False),
(4, True),
(5, True) # We expect this test to fail
]
def check_if_even(a):
"""
Returns True if 'a' is an even number
"""
return a % 2 == 0
@pytest.mark.parametrize('sample, expected_output', testdata)
def test_check_if_even(sample, expected_output):
"""
Define test cases
"""
assert check_if_even(sample) == expected_output
| 17.958333
| 61
| 0.612529
| 59
| 431
| 4.305085
| 0.576271
| 0.082677
| 0.129921
| 0.133858
| 0.244094
| 0.244094
| 0
| 0
| 0
| 0
| 0
| 0.01875
| 0.257541
| 431
| 23
| 62
| 18.73913
| 0.775
| 0.194896
| 0
| 0
| 0
| 0
| 0.073016
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 1
| 0.166667
| false
| 0
| 0.083333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09c0c79a0b5cfaa45266d9d9675a6a0f9435dae8
| 6,234
|
py
|
Python
|
orwell/agent/main.py
|
dchilot/agent-server-game-python
|
ce8db9560047a06960343cc66a9eddb11e77f5a1
|
[
"BSD-3-Clause"
] | null | null | null |
orwell/agent/main.py
|
dchilot/agent-server-game-python
|
ce8db9560047a06960343cc66a9eddb11e77f5a1
|
[
"BSD-3-Clause"
] | null | null | null |
orwell/agent/main.py
|
dchilot/agent-server-game-python
|
ce8db9560047a06960343cc66a9eddb11e77f5a1
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import sys
import socket
from cliff.app import App
from cliff.command import Command
from cliff.commandmanager import CommandManager
class RegisteredCommand(Command):
def __init__(self, app, app_args):
super(RegisteredCommand, self).__init__(app, app_args)
@classmethod
def register_to(klass, command_manager):
command_manager.add_command(klass._command_name, klass)
class SingleCommand(RegisteredCommand):
def take_action(self, parsed_args):
self.app.send(self._command_name + ' ' + parsed_args.object[0])
class List(SingleCommand):
"List something."
log = logging.getLogger(__name__)
port = None
host = socket.gethostbyname(socket.getfqdn())
def take_action(self, parsed_args):
self.app.send(
' '.join((
self._command_name,
List.host,
List.port)))
message = self.app.receive()
self.log.info(message)
class ListPlayer(List):
"List all players."
log = logging.getLogger(__name__)
_command_name = 'list player'
class ListRobot(List):
"List all robots."
log = logging.getLogger(__name__)
_command_name = 'list robot'
class Add(SingleCommand):
"Add something."
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(Add, self).get_parser(prog_name)
parser.add_argument(
'object',
nargs=1)
return parser
class AddPlayer(Add):
"Add a player."
log = logging.getLogger(__name__)
_command_name = 'add player'
class AddRobot(Add):
"Add a robot."
log = logging.getLogger(__name__)
_command_name = 'add robot'
class Remove(SingleCommand):
"Remove something."
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(Remove, self).get_parser(prog_name)
parser.add_argument(
'object',
nargs=1)
return parser
class RemovePlayer(Remove):
"Remove a player."
log = logging.getLogger(__name__)
_command_name = 'remove player'
class RemoveRobot(Remove):
"Remove a robot."
log = logging.getLogger(__name__)
_command_name = 'remove robot'
class Start(SingleCommand):
"Start something."
log = logging.getLogger(__name__)
_command_name = 'start'
def get_parser(self, prog_name):
parser = super(Start, self).get_parser(prog_name)
parser.add_argument(
'object',
nargs=1,
choices=('game',))
return parser
class Stop(SingleCommand):
"Stop something."
log = logging.getLogger(__name__)
_command_name = 'stop'
def get_parser(self, prog_name):
parser = super(Stop, self).get_parser(prog_name)
parser.add_argument(
'object',
nargs=1,
choices=('application', 'game'))
return parser
class AgentApp(App):
log = logging.getLogger(__name__)
def __init__(self):
command = CommandManager('orwell.agent')
super(AgentApp, self).__init__(
description='Orwell agent.',
version='0.0.1',
command_manager=command,
)
Start.register_to(command)
Stop.register_to(command)
ListPlayer.register_to(command)
ListRobot.register_to(command)
AddPlayer.register_to(command)
AddRobot.register_to(command)
RemovePlayer.register_to(command)
RemoveRobot.register_to(command)
self._zmq_context = None
self._zmq_publish_socket = None
self._zmq_pull_socket = None
def build_option_parser(
self,
description,
version,
argparse_kwargs=None):
parser = super(AgentApp, self).build_option_parser(
description,
version,
argparse_kwargs)
parser.add_argument(
'-p',
'--port',
type=int,
default=9003,
help='The port to send commands to.')
parser.add_argument(
'-a',
'--address',
type=str,
default='127.0.0.1',
help='The address to send commands to.')
parser.add_argument(
'-l',
'--listen',
type=int,
default=9004,
help='The port to listen to for replies.')
return parser
def initialize_app(self, argv):
self.log.debug('initialize_app')
import zmq
self._zmq_context = zmq.Context()
self.log.debug('created context = %s' % self._zmq_context)
self._zmq_publish_socket = self._zmq_context.socket(zmq.PUB)
self.log.debug(
'created publish socket = %s' % self._zmq_publish_socket)
self._zmq_publish_socket.setsockopt(zmq.LINGER, 1)
self._zmq_publish_socket.connect("tcp://%s:%i" % (
self.options.address,
self.options.port))
self._zmq_pull_socket = self._zmq_context.socket(zmq.PULL)
self.log.debug('created pull socket = %s' % self._zmq_pull_socket)
self._zmq_pull_socket.setsockopt(zmq.LINGER, 1)
self._zmq_pull_socket.bind("tcp://0.0.0.0:%i" % self.options.listen)
List.port = str(self.options.listen)
import time
time.sleep(0.001)
def send(self, command):
self.log.debug('send command "%s"' % command)
self.log.debug('call socket.send("%s")' % command)
self._zmq_publish_socket.send(command)
def receive(self):
self.log.debug('try to receive a message')
message = self._zmq_pull_socket.recv()
self.log.debug('received: %s', message)
return message
def prepare_to_run_command(self, cmd):
self.log.debug('prepare_to_run_command %s', cmd.__class__.__name__)
def clean_up(self, cmd, result, err):
self.log.debug('clean_up %s', cmd.__class__.__name__)
if err:
self.log.debug('got an error: %s', err)
def main(argv=sys.argv[1:]):
myapp = AgentApp()
return myapp.run(argv)
if ("__main__" == __name__):
sys.exit(main(sys.argv[1:])) # pragma: no coverage
| 26.193277
| 76
| 0.611806
| 722
| 6,234
| 4.984765
| 0.185596
| 0.033065
| 0.063351
| 0.076688
| 0.331759
| 0.315643
| 0.278411
| 0.19172
| 0.127258
| 0.106141
| 0
| 0.007539
| 0.276548
| 6,234
| 237
| 77
| 26.303797
| 0.790466
| 0.031601
| 0
| 0.24581
| 0
| 0
| 0.111862
| 0.003541
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089385
| false
| 0
| 0.044693
| 0
| 0.374302
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09c267a3cb1cc17f6f5bb5ef69492d09f87a64fa
| 1,475
|
py
|
Python
|
tests/test_models.py
|
jimimvp/CausalProb
|
900527725ad43eac258df2b16ef93fd1643deb3a
|
[
"MIT"
] | 3
|
2021-11-04T16:37:45.000Z
|
2022-03-08T10:24:19.000Z
|
tests/test_models.py
|
jimimvp/CausalProb
|
900527725ad43eac258df2b16ef93fd1643deb3a
|
[
"MIT"
] | 13
|
2021-11-07T11:11:54.000Z
|
2021-11-20T10:40:39.000Z
|
tests/test_models.py
|
jimimvp/CausalProb
|
900527725ad43eac258df2b16ef93fd1643deb3a
|
[
"MIT"
] | 1
|
2021-11-17T21:40:49.000Z
|
2021-11-17T21:40:49.000Z
|
from causalprob import CausalProb
import unittest
import jax.numpy as jnp
import numpy as np
class TestNFConfounderModel(unittest.TestCase):
def test_is_inverse_function(self):
from models.nf_confounder_model import define_model
dim = 2
model = define_model(dim=dim)
cp = CausalProb(model=model)
theta = {k: cp.init_params[k](i) for i, k in enumerate(cp.init_params)}
u, v = cp.fill({k: cp.draw_u[k](1, theta, seed) for seed, k in enumerate(cp.draw_u)}, {}, theta, cp.draw_u.keys())
for rv in cp.f:
assert jnp.allclose(cp.finv[rv](cp.f[rv](u[rv], theta, v), theta, v), u[rv])
def test_determinant(self):
from models.nf_confounder_model import define_model
dim = 2
model = define_model(dim=dim)
cp = CausalProb(model=model)
theta = {k: cp.init_params[k](i) for i, k in enumerate(cp.init_params)}
u, v = cp.fill({k: cp.draw_u[k](1, theta, seed) for seed, k in enumerate(cp.draw_u)}, {}, theta, cp.draw_u.keys())
for rv in cp.ldij:
assert jnp.allclose(jnp.round(cp.ldij[rv](v[rv], theta, v).squeeze(), 4),
jnp.round(
jnp.log(
jnp.abs(
jnp.linalg.det(
cp.dfinvv_dv(rv, {k: _v.squeeze(0) for k, _v in v.items()}, theta)))), 4))
| 39.864865
| 122
| 0.547119
| 213
| 1,475
| 3.671362
| 0.276995
| 0.046036
| 0.053708
| 0.071611
| 0.567775
| 0.567775
| 0.567775
| 0.567775
| 0.567775
| 0.567775
| 0
| 0.007035
| 0.325424
| 1,475
| 36
| 123
| 40.972222
| 0.778894
| 0
| 0
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 1
| 0.071429
| false
| 0
| 0.214286
| 0
| 0.321429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09c9a15d0f7a17f53680be679c2a6066d5b21c97
| 1,335
|
py
|
Python
|
tools/prepare_data.py
|
xrick/CTC_DySpeechCommands
|
d92cb97f7344fb5acdb6aa3fc3dfb7c022fffc6e
|
[
"MIT"
] | 74
|
2018-05-05T18:43:28.000Z
|
2022-03-21T13:00:14.000Z
|
tools/prepare_data.py
|
xrick/CTC_DySpeechCommands
|
d92cb97f7344fb5acdb6aa3fc3dfb7c022fffc6e
|
[
"MIT"
] | 5
|
2018-07-20T16:18:57.000Z
|
2021-01-26T11:52:31.000Z
|
tools/prepare_data.py
|
xrick/CTC_DySpeechCommands
|
d92cb97f7344fb5acdb6aa3fc3dfb7c022fffc6e
|
[
"MIT"
] | 21
|
2018-06-18T07:21:19.000Z
|
2021-04-11T06:49:03.000Z
|
"""Downloads the training dataset and removes bad samples.
"""
import csv
import os
import urllib.request
import tarfile
import glob
DATA_URL = 'http://download.tensorflow.org/data/speech_commands_v0.01.tar.gz'
TRAIN_DIR = '../dataset/train/audio/'
FILE_BAD = 'bad_samples.txt'
def maybe_download(data_url, dest_directory):
"""Download and extract data set tar file.
"""
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = data_url.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
print('Downloading %s ...' % filename)
filepath, _ = urllib.request.urlretrieve(data_url, filepath)
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
print('Successfully unzipped %s' % filename)
def remove_bad(f_bad, train_dir):
"""Deletes bad samples in the dataset.
"""
num_bad = 0
with open(f_bad, 'r') as fp:
for wav in csv.reader(fp, delimiter=','):
try:
os.remove(train_dir + wav[0])
num_bad += 1
except FileNotFoundError:
pass
print('bad_training_samples removed: %d' % num_bad)
wav_paths = glob.glob(os.path.join(train_dir, '*', '*nohash*.wav'))
print('num_training_samples = %d' % len(wav_paths))
maybe_download(DATA_URL, TRAIN_DIR)
remove_bad(FILE_BAD, TRAIN_DIR)
| 26.7
| 77
| 0.698876
| 191
| 1,335
| 4.691099
| 0.413613
| 0.053571
| 0.037946
| 0.044643
| 0.037946
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006272
| 0.164045
| 1,335
| 49
| 78
| 27.244898
| 0.796595
| 0.102622
| 0
| 0
| 0
| 0
| 0.18713
| 0.019475
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0.03125
| 0.15625
| 0
| 0.21875
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09caab87d8b63d185ae16695cb5079d8b60078ed
| 4,232
|
py
|
Python
|
Dashboard_Relay/tests/unit/api/test_authorization.py
|
weiwa6/SecValidation
|
e899b7aa3f46ded3b39aeb6a1eeab95cc8dc21b5
|
[
"BSD-3-Clause"
] | null | null | null |
Dashboard_Relay/tests/unit/api/test_authorization.py
|
weiwa6/SecValidation
|
e899b7aa3f46ded3b39aeb6a1eeab95cc8dc21b5
|
[
"BSD-3-Clause"
] | null | null | null |
Dashboard_Relay/tests/unit/api/test_authorization.py
|
weiwa6/SecValidation
|
e899b7aa3f46ded3b39aeb6a1eeab95cc8dc21b5
|
[
"BSD-3-Clause"
] | null | null | null |
from http import HTTPStatus
from authlib.jose import jwt
from pytest import fixture
from .utils import get_headers
from api.errors import AUTH_ERROR
def routes():
yield '/health'
yield '/deliberate/observables'
yield '/observe/observables'
yield '/refer/observables'
yield '/respond/observables'
yield '/respond/trigger'
@fixture(scope='module', params=routes(), ids=lambda route: f'POST {route}')
def route(request):
return request.param
@fixture(scope='module')
def wrong_jwt_structure():
return 'wrong_jwt_structure'
@fixture(scope='module')
def wrong_payload_structure_jwt(client):
header = {'alg': 'HS256'}
payload = {'not_key': 'something'}
secret_key = client.application.secret_key
return jwt.encode(header, payload, secret_key).decode('ascii')
@fixture(scope='session')
def invalid_jwt(valid_jwt):
header, payload, signature = valid_jwt.split('.')
def jwt_decode(s: str) -> dict:
from authlib.common.encoding import urlsafe_b64decode, json_loads
return json_loads(urlsafe_b64decode(s.encode('ascii')))
def jwt_encode(d: dict) -> str:
from authlib.common.encoding import json_dumps, urlsafe_b64encode
return urlsafe_b64encode(json_dumps(d).encode('ascii')).decode('ascii')
payload = jwt_decode(payload)
# Corrupt the valid JWT by tampering with its payload.
payload['superuser'] = True
payload = jwt_encode(payload)
return '.'.join([header, payload, signature])
@fixture(scope='module')
def authorization_errors_expected_payload(route):
def _make_payload_message(message):
payload = {
'errors': [{
'code': AUTH_ERROR,
'message': f'Authorization failed: {message}',
'type': 'fatal'}]
}
return payload
return _make_payload_message
def test_call_with_authorization_header_failure(
route, client,
authorization_errors_expected_payload
):
response = client.post(route)
assert response.status_code == HTTPStatus.OK
assert response.json == authorization_errors_expected_payload(
'Authorization header is missing'
)
def test_call_with_wrong_authorization_type(
route, client, valid_jwt,
authorization_errors_expected_payload
):
response = client.post(
route, headers=get_headers(valid_jwt, auth_type='wrong_type')
)
assert response.status_code == HTTPStatus.OK
assert response.json == authorization_errors_expected_payload(
'Wrong authorization type'
)
def test_call_with_wrong_jwt_structure(
route, client, wrong_jwt_structure,
authorization_errors_expected_payload
):
response = client.post(route, headers=get_headers(wrong_jwt_structure))
assert response.status_code == HTTPStatus.OK
assert response.json == authorization_errors_expected_payload(
'Wrong JWT structure'
)
def test_call_with_jwt_encoded_by_wrong_key(
route, client, invalid_jwt,
authorization_errors_expected_payload
):
response = client.post(route, headers=get_headers(invalid_jwt))
assert response.status_code == HTTPStatus.OK
assert response.json == authorization_errors_expected_payload(
'Failed to decode JWT with provided key'
)
def test_call_with_wrong_jwt_payload_structure(
route, client, wrong_payload_structure_jwt,
authorization_errors_expected_payload
):
response = client.post(route,
headers=get_headers(wrong_payload_structure_jwt))
assert response.status_code == HTTPStatus.OK
assert response.json == authorization_errors_expected_payload(
'Wrong JWT payload structure'
)
def test_call_with_missed_secret_key(
route, client, valid_jwt,
authorization_errors_expected_payload
):
right_secret_key = client.application.secret_key
client.application.secret_key = None
response = client.post(route, headers=get_headers(valid_jwt))
client.application.secret_key = right_secret_key
assert response.status_code == HTTPStatus.OK
assert response.json == authorization_errors_expected_payload(
'<SECRET_KEY> is missing'
)
| 27.660131
| 79
| 0.712193
| 493
| 4,232
| 5.821501
| 0.204868
| 0.086063
| 0.1223
| 0.154007
| 0.479443
| 0.416028
| 0.366551
| 0.366551
| 0.322648
| 0.303136
| 0
| 0.00324
| 0.197779
| 4,232
| 152
| 80
| 27.842105
| 0.842121
| 0.012287
| 0
| 0.268519
| 0
| 0
| 0.107707
| 0.005505
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.138889
| false
| 0
| 0.064815
| 0.018519
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09cbcab75f8e35ba54cb7a9b30b5581da605210d
| 1,562
|
py
|
Python
|
ops-implementations/ads-ml-service/app/gunicorn.init.py
|
IBM/open-prediction-service-hub
|
8b7db98f46a81b731d0dddfde8e3fb6f91ebc71a
|
[
"Apache-2.0"
] | 1
|
2021-09-14T18:40:33.000Z
|
2021-09-14T18:40:33.000Z
|
ops-implementations/ads-ml-service/app/gunicorn.init.py
|
IBM/open-prediction-service-hub
|
8b7db98f46a81b731d0dddfde8e3fb6f91ebc71a
|
[
"Apache-2.0"
] | 7
|
2021-04-23T13:41:39.000Z
|
2021-08-12T09:33:10.000Z
|
ops-implementations/ads-ml-service/app/gunicorn.init.py
|
IBM/open-prediction-service-hub
|
8b7db98f46a81b731d0dddfde8e3fb6f91ebc71a
|
[
"Apache-2.0"
] | 5
|
2020-12-10T14:27:23.000Z
|
2022-03-29T08:44:22.000Z
|
#!/usr/bin/env python3
#
# Copyright 2020 IBM
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.IBM Confidential
#
import os
from multiprocessing import cpu_count
TRUE = ('TRUE', 'True', 'true', '1')
use_ssl = True if os.getenv('ENABLE_SSL') in TRUE else False
settings = os.getenv('SETTINGS')
# Gunicorn config variables
workers = int(os.getenv('GUNICORN_WORKER_NUM')) \
if os.getenv('GUNICORN_WORKER_NUM') and int(os.getenv('GUNICORN_WORKER_NUM')) > 0 \
else cpu_count() * 2 + 1
# Gunicorn needs to store its temporary file in memory (e.g. /dev/shm)
worker_tmp_dir = '/dev/shm'
# Container schedulers typically expect logs to come out on stdout/stderr, thus gunicorn is configured to do so
log_file = '-'
ssl_version = 'TLSv1_2'
bind = ':8080'
ca_certs = f'{settings}/ca.crt' if use_ssl else None
certfile = f'{settings}/server.crt' if use_ssl else None
keyfile = f'{settings}/server.key' if use_ssl else None
timeout = int(os.getenv('GUNICORN_TIMEOUT')) \
if os.getenv('GUNICORN_TIMEOUT') and int(os.getenv('GUNICORN_TIMEOUT')) > 0 \
else 30
| 33.956522
| 111
| 0.734955
| 247
| 1,562
| 4.558704
| 0.518219
| 0.056838
| 0.085258
| 0.067496
| 0.171403
| 0.083481
| 0
| 0
| 0
| 0
| 0
| 0.01673
| 0.158131
| 1,562
| 45
| 112
| 34.711111
| 0.839544
| 0.50064
| 0
| 0
| 0
| 0
| 0.283465
| 0.055118
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09ce6912201c289c5c7f36b054105384590d7dc8
| 2,143
|
py
|
Python
|
graphics/place_camera.py
|
bdemin/M113_Visualization
|
bf863af9dfc2902ae9123afeae8d5bd413a4bedb
|
[
"MIT"
] | null | null | null |
graphics/place_camera.py
|
bdemin/M113_Visualization
|
bf863af9dfc2902ae9123afeae8d5bd413a4bedb
|
[
"MIT"
] | null | null | null |
graphics/place_camera.py
|
bdemin/M113_Visualization
|
bf863af9dfc2902ae9123afeae8d5bd413a4bedb
|
[
"MIT"
] | null | null | null |
import numpy as np
def place_camera(time, data, camera, camera_distance, view):
# Define camera parameters
camera.SetViewUp([0,0,1])
if view == 1:
# General view
chs_pos = data[0][0].path_loc[time] # Chassis CG @ time
cam_d = 12 # [m]
cam_h = 4.5 # [m]
chs2cam = [2 , -cam_d, cam_h] # vector from chassis to camera position
chs_fix = [0,0,0]
camera_pos = chs_pos + chs2cam
cam_focal_point = chs_pos
elif view == 2:
# Rear view
chassis_pos = data[0][0].path_loc[time] # Chassis CG @ time
chs2cam = [-7,0,-0.5]
# camera_pos = chassis_pos + chs2cam
# Cam direction is locked on the chassis
chassis_dir = data[0][0].path_dir[time]
cam_d = 10
camera_pos = chassis_pos + [-cam_d*np.cos(chassis_dir[2]), -cam_d*np.sin(chassis_dir[2]), cam_d*np.sin(chassis_dir[1]) + 1.5]
camera.Roll(np.rad2deg(chassis_dir[0]))
cam_focal_point = chassis_pos
elif view == 3:
# Wheel view
wheel_pos = data[1][7].path_loc[time] # Wheel #7 CG @ time
# Cam direction is locked on the wheel
wheel_dir = data[1][7].path_dir[time]
cam_d = 1.5
camera_pos = wheel_pos + [cam_d*np.sin(wheel_dir[2]), -cam_d*np.cos(wheel_dir[2]), -np.sin(wheel_dir[0]) + 0.2]
cam_focal_point = wheel_pos
# camera_pos = wheel_pos + [0,-1.6,0.1]
elif view == 4:
# Top view
# NEED TO FIX
cam_d = 10
cam_focal_point = [0,0,0]
camera_pos = [30,4,60]
elif view == 5:
# Cool side view test
chassis_pos = data[0][0].path_loc[time] # Chassis CG @ time
chs2cam = [-7,0,-0.5]
camera_pos = chassis_pos + chs2cam
# Cam direction is locked on the chassis
chassis_dir = data[0][0].path_dir[time]
cam_d = 7
cam_focal_point = chassis_pos + [cam_d*np.sin(chassis_dir[2]), -cam_d*np.cos(chassis_dir[2]), -np.sin(chassis_dir[0]) + 0.2]
# Place camera and set focal point:
camera.SetPosition(camera_pos)
camera.SetFocalPoint(cam_focal_point)
| 31.985075
| 133
| 0.585161
| 340
| 2,143
| 3.476471
| 0.2
| 0.023689
| 0.035533
| 0.042301
| 0.471235
| 0.380711
| 0.348562
| 0.329103
| 0.329103
| 0.308799
| 0
| 0.053665
| 0.286981
| 2,143
| 67
| 134
| 31.985075
| 0.719895
| 0.20532
| 0
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026316
| false
| 0
| 0.026316
| 0
| 0.052632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09cf011d62fddefba9f4507356da24e66db71898
| 16,405
|
py
|
Python
|
src/tools/api_compiler/compiler.py
|
facade-technologies-inc/facile
|
4c9134dced71734641fed605e152880cd9ddefe3
|
[
"MIT"
] | 2
|
2020-09-17T20:51:18.000Z
|
2020-11-03T15:58:10.000Z
|
src/tools/api_compiler/compiler.py
|
facade-technologies-inc/facile
|
4c9134dced71734641fed605e152880cd9ddefe3
|
[
"MIT"
] | 97
|
2020-08-26T05:07:08.000Z
|
2022-03-28T16:01:49.000Z
|
src/tools/api_compiler/compiler.py
|
facade-technologies-inc/facile
|
4c9134dced71734641fed605e152880cd9ddefe3
|
[
"MIT"
] | null | null | null |
"""
..
/------------------------------------------------------------------------------\
| -- FACADE TECHNOLOGIES INC. CONFIDENTIAL -- |
|------------------------------------------------------------------------------|
| |
| Copyright [2019] Facade Technologies Inc. |
| All Rights Reserved. |
| |
| NOTICE: All information contained herein is, and remains the property of |
| Facade Technologies Inc. and its suppliers if any. The intellectual and |
| and technical concepts contained herein are proprietary to Facade |
| Technologies Inc. and its suppliers and may be covered by U.S. and Foreign |
| Patents, patents in process, and are protected by trade secret or copyright |
| law. Dissemination of this information or reproduction of this material is |
| strictly forbidden unless prior written permission is obtained from Facade |
| Technologies Inc. |
| |
\------------------------------------------------------------------------------/
This file contains the Compiler class - the part of Facile that interprets a user's
work in the gui, and converts it into the desired API.
"""
import os
import sys
import json
from subprocess import check_call, DEVNULL, STDOUT, check_output
from shutil import copyfile, rmtree
from PySide2.QtCore import QObject, Signal
from PySide2.QtWidgets import QApplication
import data.statemachine as sm
from data.compilationprofile import CompilationProfile
from tools.api_compiler.copy_file_manifest import compilation_copy_files
from libs.logging import compiler_logger as logger
from libs.logging import log_exceptions
import libs.env as env
from multiprocessing.pool import ThreadPool
curPath = os.path.abspath(os.path.join(env.FACILE_DIR, "tools/api_compiler/compiler.py"))
dir, filename = os.path.split(curPath)
def nongui(fun):
"""Decorator running the function in non-gui thread while
processing the gui events."""
def wrap(*args, **kwargs):
pool = ThreadPool(processes=1)
a_sync = pool.apply_async(fun, args, kwargs)
while not a_sync.ready():
a_sync.wait(0.01)
QApplication.processEvents()
return a_sync.get()
return wrap
class Compiler(QObject):
stepStarted = Signal(str)
stepComplete = Signal()
finished = Signal()
def __init__(self, compProf: 'CompilationProfile' = None) -> None:
"""
Initializes the compiler with required information.
:return: None
"""
logger.debug("Instantiating compiler")
QObject.__init__(self)
self.statem = sm.StateMachine.instance
self._compProf = compProf
self._name = self.statem._project.getName()
self._apiName = self.statem._project.getAPIName()
self._backend = self.statem._project.getBackend()
self._exeLoc = self.statem._project.getExecutableFile()
self._opts = compProf.compResOpts
self._apim = self.statem._project.getAPIModel()
self._tguim = self.statem._project.getTargetGUIModel()
# Save Folders
self._saveFolder = os.path.join(compProf.apiFolderDir, self._name + '_API_Files')
self._srcFolder = os.path.join(self._saveFolder, self._apiName)
self._docFolder = os.path.join(self._srcFolder, 'Documentation')
# Make all save folders if they don't exist
if not os.path.exists(self._saveFolder): # If the user enters a path that doesn't exist, it is created
os.mkdir(self._saveFolder) # TODO: Should notify them of this in compiler dialog
if not os.path.exists(self._srcFolder):
os.mkdir(self._srcFolder)
if not os.path.exists(self._docFolder):
os.mkdir(self._docFolder)
self._necessaryFiles = ['apicore.pyd']
# THIS IS WHEN OBFUSCATING ALL FILES INDEPENDENTLY
#
# if sys.executable.endswith('facile.exe'):
# self._necessaryFiles = [filepath + 'd' for tmp, filepath in compilation_copy_files]
#
# # baseapplication is out of place when we make facile into an executable
# for filepath in self._necessaryFiles:
# if filepath.endswith('baseapplication.pyd'):
# self._necessaryFiles.remove(filepath)
# self._necessaryFiles.append('baseapplication.pyd')
# break
#
# else:
# self._necessaryFiles = [filepath for tmp, filepath in compilation_copy_files]
@nongui
def _dev_generateAPICore(self):
"""
Makes the api core file and places it in facile's root directory
NOTE: Should only ever be called in a development setting, never by a facile executable.
"""
msg = 'Generating API core file, this will take a while'
logger.info(msg)
self.stepStarted.emit(msg)
os.chdir(os.path.abspath(os.path.join(env.FACILE_DIR, '..', 'scripts', 'obfuscation')))
exit_code = check_call([sys.executable, "obfuscate_files.py"], stdout=DEVNULL, stderr=STDOUT)
if exit_code != 0:
logger.critical("File compilation was unsuccessful, which will cause the API not to work.")
raise Exception("File compilation was unsuccessful, which will cause the API not to work.")
copyfile(os.path.abspath(os.path.join('compiled', 'apicore.pyd')),
os.path.join(env.FACILE_DIR, 'apicore.pyd'))
rmtree('compiled')
os.chdir(dir)
logger.info("Finished compiling api core and moving it to facile directory.")
self.stepComplete.emit()
def generateCustomApp(self) -> None:
"""
Creates the custom application class/file.
:return: None
"""
msg = "Generating custom application driver"
logger.info(msg)
self.stepStarted.emit(msg)
with open(os.path.join(self._srcFolder, "application.py"), "w+") as f:
# TODO: The Facade Tech watermark thing is a little intense when the user needs
# to use it for their own purposes and may want to share their generated API online.
# Could make a custom tag. I put the original in for the moment though.
logger.debug("Reading application-unfilled.py")
try:
with open(os.path.join(dir, 'application-template.py'), 'r') as g:
appStr = g.read()
except Exception as e:
appStr = 'There was an error generating your API.\n'
logger.exception(e)
logger.debug("Generating options set")
optStr = '{'
for opt in self._opts:
optStr += str(opt) + ', '
optStr = optStr[:-2] + '}'
logger.debug("Generating str of required compIDs")
alreadyWritten = []
aps, cas = self._apim.getActionsByType()
compIDs = '['
for action in cas:
alreadyWritten.append(action.getTargetComponent().getId())
compIDs += str(action.getTargetComponent().getId()) + ', '
# We also want the visibilitybehaviors' triggeractions' components' IDs
vbs = self._tguim.getVisibilityBehaviors()
for id in vbs:
vb = vbs[id]
name = vb.methodName
triggerAction = vb.getTriggerAction()
if name not in alreadyWritten and triggerAction is not None:
compIDs += str(triggerAction.getTargetComponent().getId()) + ', '
compIDs = compIDs[:-2] + ']' # remove the final ", " and close bracket
logger.debug("Format BaseApp superclass call with necessary info")
try:
appStr = appStr.format(exeLoc="'" + self._exeLoc + "'", options=optStr, name="'" + self._name + "'",
backend="'" + self._backend + "'", reqCompIDs=compIDs)
except Exception as e:
logger.exception(e)
logger.debug("Writing BaseApp")
f.write(appStr)
logger.debug("Writing methods generated from actions that are used in action pipelines.")
alreadyWritten = []
for action in cas:
alreadyWritten.append(action.getMethodName())
f.write(action.getMethod())
logger.debug("Writing methods generated from actions that are used by visibility behaviors.")
for id in vbs:
vb = vbs[id]
name = vb.methodName
triggerAction = vb.getTriggerAction()
if name not in alreadyWritten and triggerAction is not None:
f.write(triggerAction.getMethod())
logger.debug("Writing methods generated from action pipelines.")
for ap in aps:
f.write(ap.getMethod())
logger.info("Finished generating custom application driver.")
self.stepComplete.emit()
def copyNecessaryFiles(self) -> None:
"""
Adds all necessary files for compiler to work into created directory
:return: None
"""
self.stepStarted.emit("Copying necessary files")
# Only necessary when using multiple files
#
# make necessary directories before copying files
# targetDirs = ['data', 'data/tguim', 'tguiil', 'libs'] # 'data/apim',
# for tdir in targetDirs:
# tdir = os.path.join(self._srcFolder, tdir)
# if not os.path.exists(tdir):
# os.mkdir(tdir)
for path in self._necessaryFiles:
src = os.path.abspath(os.path.join(env.FACILE_SRC_DIR, path))
dest = os.path.abspath(os.path.join(self._srcFolder, path))
logger.info(f"Copying file: {src} -> {dest}")
try:
copyfile(src, dest)
except Exception as e:
logger.critical("Unable to copy file.")
logger.exception(e)
self.stepComplete.emit()
def saveTGUIM(self):
"""
Saves the tguim in the API folder. Saves project as well.
:return: None
"""
msg = "Saving target GUI model"
self.stepStarted.emit(msg)
logger.info(msg)
self.statem._project.save()
with open(os.path.join(self._srcFolder, "tguim.json"), "w+") as f:
f.write(json.dumps(self._tguim.asDict()))
self.stepComplete.emit()
def generateSetupFile(self):
"""
Generates the setup file for installing the API
"""
# Create setup.py so user can install install API as a package with pip.
msg = "Generating setup.py file"
self.stepStarted.emit(msg)
logger.info(msg)
setupTempFile = open(os.path.join(dir, "setup-template.txt"), 'r')
setupStr = setupTempFile.read().format(projectName=self.statem._project.getAPIName(),
projectVersion='0.1.0') # TODO Add versioning
setupTempFile.close()
setupFile = open(os.path.join(self._saveFolder, 'setup.py'), 'w')
setupFile.write(setupStr)
setupFile.close()
self.stepComplete.emit()
def generateInitFile(self):
"""
Generates the init file so the package can be installed as an API
"""
# Create __init__.py so API is a package.
msg = "Generating __init__.py file"
self.stepStarted.emit(msg)
logger.info(msg)
with open(os.path.join(dir, "__init__template.txt"), 'r') as initTempFile:
targetAppName = self.statem._project.getExecutableFile().split('/')[-1].split('.')[0] # '/app.exe' -> 'app'
targetAppName = targetAppName[0].upper() + targetAppName[1:] # 'app' -> 'App'
initStr = initTempFile.read().format(targetApplicationName=targetAppName)
with open(os.path.join(self._srcFolder, '__init__.py'), 'w') as initFile:
initFile.write(initStr)
self.stepComplete.emit()
def installAPI(self):
"""
Installs the generated API to PATH
"""
msg = "Installing as python package"
self.stepStarted.emit(msg)
logger.info(msg)
os.chdir(self._saveFolder)
os.system(self._compProf.interpExeDir + " -m pip install . 1>install.log 2>&1")
rmtree('setup.py') # Delete setup.py after it's used
logger.info("Finished installing python package")
self.stepComplete.emit()
def copyHelpFiles(self):
"""
Generates files that give the basic structure and outline of a functional script.
Will only write them if they do not yet exist, to avoid overwriting any existing work in the automate.py file.
"""
msg = "Copying help files"
self.stepStarted.emit(msg)
logger.info(msg)
if not os.path.exists(os.path.join(self._saveFolder, "automate.py")):
with open(os.path.join(self._saveFolder, "automate.py"), "w+") as f:
with open(os.path.join(dir, 'automate-template.txt'), 'r') as g:
autoStr = g.read()
targetAppName = self.statem._project.getExecutableFile().split('/')[-1].split('.')[0]
targetAppName = targetAppName[0].upper() + targetAppName[1:] # 'app' -> 'App'
f.write(autoStr.format(name=self._name, targetapp=targetAppName))
# Remove run script and rewrite every time so that interpreter gets written to it
if os.path.exists(os.path.join(self._saveFolder, "run-script.bat")):
os.remove(os.path.join(self._saveFolder, "run-script.bat"))
with open(os.path.join(self._saveFolder, "run-script.bat"), "w+") as f:
with open(os.path.join(dir, "run-script-template.bat"), 'r') as g:
rsStr = g.read()
f.write(rsStr.format(interpreterLocation=self._compProf.interpExeDir))
self.stepComplete.emit()
@nongui
def installRequirements(self):
"""
Installs the necessary requirements to the chosen python interpreter, if they aren't already installed.
"""
# Get currently installed packages in a list
current = check_output([self._compProf.interpExeDir, '-m', 'pip', 'freeze'])
installed = [r.decode().split('==')[0] for r in current.split()]
# Get necessary packages in a list
with open(os.path.join(dir, "api_requirements.txt"), 'r') as f:
reqFile = f.read()
required = [r.split('==')[0] for r in reqFile.split()]
# Check for each package and install the missing ones
diff = set(required) - set(installed)
for package in diff:
msg = "Installing package: " + package
self.stepStarted.emit(msg)
logger.info(msg)
check_call([self._compProf.interpExeDir, '-m', 'pip', 'install', package], stdout=DEVNULL, stderr=STDOUT)
self.stepComplete.emit()
@log_exceptions(logger=logger)
def compileAPI(self):
"""
Generates the functional API: the final result of compilation.
"""
logger.info("Compiling API")
self.installRequirements()
if not sys.executable.endswith('facile.exe'):
self._dev_generateAPICore()
self.copyNecessaryFiles()
self.saveTGUIM()
if self._compProf.installApi:
self.generateSetupFile()
self.generateInitFile() # We want this regardless of installing the api or not
self.generateCustomApp()
if self._compProf.installApi:
self.installAPI()
self.copyHelpFiles()
if not sys.executable.endswith('facile.exe'):
os.remove(os.path.join(env.FACILE_DIR, 'apicore.pyd'))
self.finished.emit()
logger.info("Finished compiling API")
| 40.208333
| 120
| 0.585431
| 1,801
| 16,405
| 5.262077
| 0.247085
| 0.024058
| 0.027435
| 0.019204
| 0.271288
| 0.22887
| 0.188773
| 0.128522
| 0.082093
| 0.048328
| 0
| 0.002521
| 0.298689
| 16,405
| 407
| 121
| 40.307125
| 0.821208
| 0.271381
| 0
| 0.255507
| 0
| 0
| 0.134869
| 0.010355
| 0
| 0
| 0
| 0.007371
| 0
| 1
| 0.057269
| false
| 0
| 0.061674
| 0
| 0.145374
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09d4d5139b90907a08147b1f476920cdd503f04c
| 15,484
|
py
|
Python
|
src/testing/functionaltests/webtest.py
|
pgecsenyi/piepy
|
37bf6cb5bc8c4f9da3f695216beda7353d79fb29
|
[
"MIT"
] | 1
|
2018-03-26T22:39:36.000Z
|
2018-03-26T22:39:36.000Z
|
src/testing/functionaltests/webtest.py
|
pgecsenyi/piepy
|
37bf6cb5bc8c4f9da3f695216beda7353d79fb29
|
[
"MIT"
] | null | null | null |
src/testing/functionaltests/webtest.py
|
pgecsenyi/piepy
|
37bf6cb5bc8c4f9da3f695216beda7353d79fb29
|
[
"MIT"
] | null | null | null |
"""
Web unit tests
"""
# pylint: disable=too-many-public-methods
import time
import unittest
import requests
from testing.communicationhelper import get_json, put_json
from testing.functions import are_expected_items_in_list, are_expected_kv_pairs_in_list, \
get_item_from_embedded_dictionary
from testing.servermanager import ServerManager
from testing.testhelper import TestHelper
from testing.videotestenvironment import VideoTestEnvironment
class WebTest(unittest.TestCase):
####################################################################################################################
# Initialization and cleanup.
####################################################################################################################
@classmethod
def setUpClass(cls):
# Set private static attributes.
cls._episode_title_id = 0
cls._file_id = 0
cls._language_id = 0
cls._main_executable = 'main.py'
cls._parent_id = 0
cls._playlist_id = 0
cls._quality_id = 0
# Create TestHelper.
cls._helper = TestHelper()
cls._helper.add_environment(VideoTestEnvironment())
# Create test configuration and files.
cls._helper.create_configuration()
cls._helper.create_files()
# Create Server Manager and start the server.
cls._server_manager = ServerManager(cls._main_executable, cls._helper.config_path)
cls._server_manager.start()
if not cls._server_manager.wait_for_initialization(cls._helper.test_service_base_url):
print('The service is unavailable.')
cls.tearDownClass()
@classmethod
def tearDownClass(cls):
cls._server_manager.stop()
cls._helper.clean()
####################################################################################################################
# Real test methods.
####################################################################################################################
def test_1_rebuild(self):
# Arrange.
rebuild_url = WebTest._helper.build_url('rebuild')
status_url = WebTest._helper.build_url('status')
# Act.
requests.get(rebuild_url)
# Wait until database is building. Poll status in every 2 seconds.
number_of_retries = 0
result = ''
while number_of_retries < 10:
data = get_json(status_url)
result = data['status']['synchronization']
if result == 'not running':
break
number_of_retries += 1
time.sleep(2)
# Assert.
self.assertEqual(result, 'not running', 'Rebuild failed.')
def test_2_categories(self):
# Arrange.
url = WebTest._helper.build_url('categories')
# Act.
data = get_json(url)
# Assert.
are_expected_items_in_list(self, data, 'categories')
are_expected_items_in_list(self, data['categories'], 'audio', 'image', 'video')
def test_3_video_languages(self):
# Arrange.
url = WebTest._helper.build_url('video/languages')
# Act.
data = get_json(url)
# Assert.
expected_languages = ['(Uncategorized)', 'English', 'Finnish', 'German', 'Greek', 'Hindi', 'Hungarian']
are_expected_items_in_list(self, data, 'languages')
are_expected_kv_pairs_in_list(self, data['languages'], 'language', expected_languages)
WebTest._language_id = get_item_from_embedded_dictionary(
data['languages'],
'language',
'Greek',
'id')
def test_4_video_qualities(self):
# Arrange.
url = WebTest._helper.build_url('video/qualities')
# Act.
data = get_json(url)
# Assert.
expected_qualities = ['(Uncategorized)', 'LQ', 'HQ', 'HD (720p)', 'HD (1080p)']
are_expected_items_in_list(self, data, 'qualities')
are_expected_kv_pairs_in_list(self, data['qualities'], 'quality', expected_qualities)
WebTest._quality_id = get_item_from_embedded_dictionary(
data['qualities'],
'quality',
'HD (720p)',
'id')
def test_5_01_video_titles(self):
# Arrange.
url = WebTest._helper.build_url('video/titles')
# Act.
data = get_json(url)
# Assert.
expected_titles = ['(Uncategorized)', 'Triple Payback', 'Battle of Impact', 'Double Prejudice',
'Screw driver 4 (1975)', 'Compressor Head (2014)', 'Family']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
WebTest._parent_id = get_item_from_embedded_dictionary(
data['titles'],
'title',
'Compressor Head (2014)',
'id')
def test_5_02_video_titles_by_l(self):
"""
Query video titles by language.
"""
# Arrange.
url = WebTest._helper.build_url('video/titles?language={}'.format(WebTest._language_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Battle of Impact', 'Compressor Head (2014)']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
def test_5_03_video_titles_by_p(self):
"""
Query video titles by parent.
"""
# Arrange.
url = WebTest._helper.build_url('video/titles?parent={}'.format(WebTest._parent_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = [
'Compressor Head [1x01] Variable Length Codes',
'Compressor Head [1x03] Markov Chain Compression',
'Compressor Head [1x01] Variable Length Codes']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
def test_5_04_video_titles_by_q(self):
"""
Query video titles by quality.
"""
# Arrange.
url = WebTest._helper.build_url('video/titles?quality={}'.format(WebTest._quality_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Triple Payback', 'Compressor Head (2014)']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
def test_5_05_video_titles_by_l_p(self):
"""
Query video titles by language and parent.
"""
# Arrange.
url = WebTest._helper.build_url('video/titles?language={}&parent={}'.format(
WebTest._language_id,
WebTest._parent_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Compressor Head [1x01] Variable Length Codes']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
def test_5_06_video_titles_by_l_q(self):
"""
Query video titles by language and quality.
"""
# Arrange.
url = WebTest._helper.build_url('video/titles?language={}&quality={}'.format(
WebTest._language_id,
WebTest._quality_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Compressor Head (2014)']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
def test_5_07_video_titles_by_p_q(self):
"""
Query video titles by parent and quality.
"""
# Arrange.
url = WebTest._helper.build_url(
'video/titles?parent={}&quality={}'.format(WebTest._parent_id, WebTest._quality_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Compressor Head [1x01] Variable Length Codes']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
WebTest._episode_title_id = get_item_from_embedded_dictionary(
data['titles'],
'title',
'Compressor Head [1x01] Variable Length Codes',
'id')
def test_5_08_video_titles_by_sl(self):
"""
Query video titles by subtitle language.
"""
# Arrange.
url = WebTest._helper.build_url('video/titles?subtitle={}'.format(WebTest._language_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Compressor Head (2014)']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
def test_5_09_video_titles_by_l_sl(self):
"""
Query video titles by language and subtitle language.
"""
# Arrange.
url = WebTest._helper.build_url('video/titles?language={}&subtitle={}'.format(
WebTest._language_id,
WebTest._language_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Compressor Head (2014)']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
def test_5_10_video_titles_by_p_sl(self):
"""
Query video titles by parent and subtitle language.
"""
# Arrange.
url = WebTest._helper.build_url('video/titles?parent={}&subtitle={}'.format(
WebTest._parent_id,
WebTest._language_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Compressor Head [1x01] Variable Length Codes']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
def test_5_11_video_titles_by_q_sl(self):
"""
Query video titles by quality and subtitle language.
"""
# Arrange.
url = WebTest._helper.build_url('video/titles?quality={}&subtitle={}'.format(
WebTest._quality_id,
WebTest._language_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Compressor Head (2014)']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
def test_5_12_video_titles_by_l_p_sl(self):
"""
Query video titles by language, parent and subtitle language.
"""
# Arrange.
url = WebTest._helper.build_url('video/titles?language={}&parent={}&subtitle={}'.format(
WebTest._language_id,
WebTest._parent_id,
WebTest._language_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Compressor Head [1x01] Variable Length Codes']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
def test_5_13_video_titles_by_l_q_sl(self):
"""
Query video titles by language, quality and subtitle language.
"""
# Arrange.
url = WebTest._helper.build_url('video/titles?language={}&quality={}&subtitle={}'.format(
WebTest._language_id,
WebTest._quality_id,
WebTest._language_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Compressor Head (2014)']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
def test_5_14_video_titles_by_l_p_q_sl(self):
"""
Query video titles by language, parent, quality and subtitle language.
"""
# Arrange.
url = WebTest._helper.build_url('video/titles?language={}&parent={}&quality={}&subtitle={}'.format(
WebTest._language_id,
WebTest._parent_id,
WebTest._quality_id,
WebTest._language_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Compressor Head [1x01] Variable Length Codes']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
def test_6_search(self):
# Arrange.
url = WebTest._helper.build_url('search/family')
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Family', 'Family [01] Intro']
are_expected_items_in_list(self, data, 'videos')
are_expected_kv_pairs_in_list(self, data['videos'], 'title', expected_titles)
def test_7_details(self):
# Arrange.
url = WebTest._helper.build_url('video/details/{}'.format(WebTest._episode_title_id))
# Act.
data = get_json(url)
# Assert.
are_expected_items_in_list(self, data['details'], 'id', 'files', 'subtitles', 'title')
self.assertEqual('Compressor Head [1x01] Variable Length Codes', data['details']['title'], 'Wrong title.')
are_expected_kv_pairs_in_list(
self,
data['details']['files'],
'language',
['Finnish', 'Greek', 'Greek'])
are_expected_kv_pairs_in_list(
self,
data['details']['files'],
'quality',
['HD (720p)', 'HD (720p)', 'LQ'])
are_expected_kv_pairs_in_list(
self,
data['details']['subtitles'],
'language',
['English', 'Greek', 'Greek', 'Hungarian'])
WebTest._file_id = data['details']['files'][0]['id']
def test_8_01_playlist_add(self):
# Arrange.
url = WebTest._helper.build_url('playlist/add')
payload = {
'title' : 'Test playlist',
'tracks': [
{'category' : 'video', 'file' : WebTest._file_id}]}
# Act.
data = put_json(url, payload)
# Assert.
self.assertEqual('Test playlist', data['playlist']['title'], 'Wrong title for the playlist.')
WebTest._playlist_id = data['playlist']['id']
def test_8_02_playlist_add_track(self):
# Arrange.
url = WebTest._helper.build_url('playlist/add-track')
payload = {'playlist' : WebTest._playlist_id, 'category' : 'video', 'file' : WebTest._file_id}
# Act.
data = put_json(url, payload)
# Assert.
self.assertEqual('video', data['track']['category'], 'Wrong category.')
self.assertEqual('Compressor Head [1x01] Variable Length Codes', data['track']['title'], 'Wrong title.')
def test_8_03_playlists(self):
# Arrange.
url = WebTest._helper.build_url('playlist/all')
# Act.
data = get_json(url)
# Assert.
self.assertNotEqual(None, data['playlists'], 'There are no playlists in the response.')
self.assertEqual(1, len(data['playlists']), 'Incorrect number of playlists.')
self.assertEqual('Test playlist', data['playlists'][0]['title'], 'Incorrect playlist title.')
| 32.529412
| 120
| 0.58822
| 1,703
| 15,484
| 5.018203
| 0.11744
| 0.05406
| 0.046806
| 0.065528
| 0.685116
| 0.647905
| 0.623684
| 0.55956
| 0.497192
| 0.444184
| 0
| 0.014447
| 0.266856
| 15,484
| 475
| 121
| 32.597895
| 0.738372
| 0.091837
| 0
| 0.429167
| 0
| 0
| 0.191525
| 0.034052
| 0
| 0
| 0
| 0
| 0.033333
| 1
| 0.104167
| false
| 0
| 0.033333
| 0
| 0.141667
| 0.004167
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09d79f0d227847749db1ddb7eb6acbb60326e8b8
| 862
|
py
|
Python
|
10_Name_Card_Detection/pytorch-faster-rcnn/lib/datasets/factory.py
|
ZeroWeight/Pattern-Recognize
|
ce18ab7d218840978f546a94d02d4183c9dc1aac
|
[
"MIT"
] | 4
|
2018-07-30T01:46:22.000Z
|
2019-04-09T12:23:52.000Z
|
10_Name_Card_Detection/pytorch-faster-rcnn/lib/datasets/factory.py
|
ZeroWeight/Pattern-Recognize
|
ce18ab7d218840978f546a94d02d4183c9dc1aac
|
[
"MIT"
] | null | null | null |
10_Name_Card_Detection/pytorch-faster-rcnn/lib/datasets/factory.py
|
ZeroWeight/Pattern-Recognize
|
ce18ab7d218840978f546a94d02d4183c9dc1aac
|
[
"MIT"
] | 1
|
2020-02-25T05:09:06.000Z
|
2020-02-25T05:09:06.000Z
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Factory method for easily getting imdbs by name."""
__sets = {}
from datasets.name_card import name_card
import numpy as np
for split in ['trainval', 'test']:
name = 'name_card_real_{}'.format(split)
__sets[name] = (lambda split=split: name_card(split,'NameCardReal'))
__sets['name_card_fake_train'] = (lambda: name_card('trainval','NameCardFake'))
def get_imdb(name):
"""Get an imdb (image database) by name."""
if name not in __sets:
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]()
def list_imdbs():
"""List all registered imdbs."""
return list(__sets.keys())
| 29.724138
| 79
| 0.612529
| 106
| 862
| 4.754717
| 0.575472
| 0.095238
| 0.055556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005355
| 0.133411
| 862
| 28
| 80
| 30.785714
| 0.669344
| 0.406032
| 0
| 0
| 0
| 0
| 0.203666
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.153846
| 0
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09dbc482da6f2620a0ec95d44dab6ffbe0c052f9
| 4,439
|
py
|
Python
|
monopoly.py
|
michaelhutton/monopoly
|
d3adcf524dfb015dbdaaadf905ca8cc4396fde3e
|
[
"MIT"
] | null | null | null |
monopoly.py
|
michaelhutton/monopoly
|
d3adcf524dfb015dbdaaadf905ca8cc4396fde3e
|
[
"MIT"
] | null | null | null |
monopoly.py
|
michaelhutton/monopoly
|
d3adcf524dfb015dbdaaadf905ca8cc4396fde3e
|
[
"MIT"
] | null | null | null |
import random
squares = [
"Go",
"Mediterranean Ave.",
"Community Chest",
"Baltic Ave.",
"Income Tax",
"Reading Railroad",
"Oriental Ave.",
"Chance",
"Vermont Ave.",
"Connecticut Ave.",
"Jail",
"St. Charles Place",
"Electric Company",
"States Ave.",
"Virginia Ave.",
"Pennsylvania Railroad",
"St. James Place",
"Community Chest",
"Tennessee Ave.",
"New York Ave.",
"Free Parking",
"Kentucky Ave.",
"Chance",
"Indiana Ave.",
"Illinois Ave.",
"B. & O. Railroad",
"Atlantic Ave.",
"Ventnor Ave.",
"Water Works",
"Marvin Gardens",
"Go To Jail",
"Pacific Ave.",
"North Carolina Ave.",
"Community Chest",
"Pennsylvania Ave.",
"Short Line Railroad",
"Chance",
"Park Place",
"Luxury Tax",
"Boardwalk"
]
SQUARES_LENGTH = len(squares)
chance_cards = [
"Advance to Go",
"Advance to Illinois Ave.",
"Advance to St. Charles Place",
"Advance token to nearest Utility",
"Advance token to the nearest Railroad",
"Bank pays you dividend of $50",
"Get out of Jail Free Card",
"Go Back 3 Spaces",
"Go to Jail",
"Make general repairs on all your property",
"Pay poor tax of $15",
"Take a trip to Reading Railroad",
"Take a walk on the Boardwalk",
"You have been elected Chairman of the Board",
"Your building loan matures - Collect $150"
]
community_chest_cards = [
"Advance to Go",
"Bank error in your favor - Collect $200",
"Doctor's fees - Pay $50",
"From sale of stock you get $50",
"Get Out of Jail Free Card",
"Go to Jail",
"Grand Opera Night - Collect $50 from every player for opening night seats",
"Holiday Fund matures - Receive $100",
"Income tax refund - Collect $20",
"Life insurance matures - Collect $100",
"Pay hospital fees of $100",
"Pay school fees of $150",
"Receive $25 consultancy fee",
"You are assessed for street repairs - $40 per house - $115 per hotel",
"You have won second prize in a beauty contest - Collect $10",
"You inherit $100"
]
def roll_dice():
return [random.randint(1,6),random.randint(1,6)]
def pick_card(player, deck):
# Take a random card from either the chance or cc deck
# and return players new position
last_card = len(deck)-1
choice = random.randint(0,last_card)
card = deck[choice]
print("Started at: " + str(player["pos"]))
if(card == "Advance to Go"):
player["pos"] = 0
elif(card == "Advance to Illinois Ave."):
player["pos"] = 24
elif(card == "Advance to St. Charles Place"):
player["pos"] = 11
elif(card == "Advance token to nearest Utility"):
if(player["pos"] == 7):
player["pos"] = 12 # Electric Company
else: # Pos 22 and 36 go to the same place
player["pos"] = 28 # Water Works
elif(card == "Advance token to the nearest Railroad"):
if(player["pos"] == 7):
player["pos"] = 5 # Reading
elif(player["pos"] == 22):
player["pos"] = 25 # B and O
elif(player["pos"] == 36):
player["pos"] = 35 # Short Line
elif(card == "Go Back 3 Spaces"):
player["pos"] = player["pos"] - 3
elif(card == "Go to Jail"):
player["pos"] = 10
player["in_jail"] = True
elif(card == "Take a trip to Reading Railroad"):
player["pos"] = 5
elif(card == "Take a walk on the Boardwalk"):
player["pos"] = 39
print("Received card: " + card)
print("Ended at: " + str(player["pos"]))
return player
player1 = {
"pos": 0,
"doubles_in_a_row": 0,
"in_jail": False
}
for turn in range(1,100):
dice = roll_dice()
print(dice)
if(dice[0] == dice[1]):
player1["doubles_in_a_row"] = player1["doubles_in_a_row"] + 1
else:
player1["doubles_in_a_row"] = 0
# TODO: if the player has rolled 3 doubles, go to jail!
player1["pos"] = (player1["pos"] + dice[0] + dice[1]) % SQUARES_LENGTH
# TODO: Check if its a go to jail space
if(squares[player1["pos"]] == "Chance"):
print("chance!")
print(player1)
pick_card(player1, chance_cards)
print(player1)
if(squares[player1["pos"]] == "Community Chest"):
print("CC!")
pick_card(player1, community_chest_cards)
print("Turn " + str(turn) + ": " + squares[player1["pos"]])
| 29.593333
| 80
| 0.581437
| 586
| 4,439
| 4.354949
| 0.331058
| 0.067006
| 0.018809
| 0.020376
| 0.185345
| 0.098746
| 0.018809
| 0.018809
| 0
| 0
| 0
| 0.032807
| 0.272133
| 4,439
| 149
| 81
| 29.791946
| 0.757041
| 0.060149
| 0
| 0.130435
| 0
| 0
| 0.457823
| 0
| 0
| 0
| 0
| 0.006711
| 0
| 1
| 0.014493
| false
| 0
| 0.007246
| 0.007246
| 0.036232
| 0.065217
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09ddae526c3cd9bcfe820b2b4ae3706b5e1e7c32
| 7,769
|
py
|
Python
|
coinzdense/app.py
|
pibara/coinzdense-python
|
f051770b71fa0afe935eb0d2079dab21eea9432d
|
[
"BSD-3-Clause"
] | null | null | null |
coinzdense/app.py
|
pibara/coinzdense-python
|
f051770b71fa0afe935eb0d2079dab21eea9432d
|
[
"BSD-3-Clause"
] | null | null | null |
coinzdense/app.py
|
pibara/coinzdense-python
|
f051770b71fa0afe935eb0d2079dab21eea9432d
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python3
from coinzdense.signing import SigningKey as _SigningKey
from coinzdense.validation import ValidationEnv as _ValidationEnv
from coinzdense.wallet import create_wallet as _create_wallet
from coinzdense.wallet import open_wallet as _open_wallet
def _keys_per_signature(hashlen, otsbits):
return 2*(((hashlen*8-1) // otsbits)+1)
def _sub_sub_keyspace_usage(hashlen, otsbits, height):
return 1 + _keys_per_signature(hashlen, otsbits) * (1 << height)
def _sub_keyspace_usage(hashlen, otsbits, heights):
usage = _sub_sub_keyspace_usage(hashlen, otsbits,heights[0])
if len(heights) > 1:
usage += (1 << heights[0]) * _sub_keyspace_usage(hashlen, otsbits, heights[1:])
return usage
def _keyspace_usage(hashlen, otsbits, keyspace):
usage = (1 << sum(keyspace[0]["heights"])) + _sub_keyspace_usage(hashlen, otsbits, keyspace[0]["heights"])
if len(keyspace) > 1:
usage += (1 << keyspace[0]["reserve"]) * _keyspace_usage(hashlen, otsbits, keyspace[1:])
return usage
class KeySpace:
def __init__(self, hashlen, otsbits, keyspace, offset=0, size=1<<64, state=None):
self.hashlen = hashlen
self.otsbits = otsbits
self.keyspace = keyspace
if state is None:
self.state = {}
self.state["offset"] = offset
self.state["stack"] = size
reserve_bits = keyspace[0].get("reserve", None)
if reserve_bits is None:
self.state["heap_start"] = offset
self.state["heap"] = offset
self.state["has_reserved"] = False
self.state["reserved_heap_start"] = offset
self.state["reserved_heap"] = offset
else:
reserved = (1 << reserve_bits) * _keyspace_usage(hashlen, otsbits, keyspace[1:])
self.state["heap_start"] = offset + reserved
self.state["heap"] = offset + reserved
self.state["has_reserved"] = True
self.state["reserved_heap_start"] = offset
self.state["reserved_heap"] = offset
self.state["own_offset"] = self.state["heap"]
self.state["heap"] += (1 << sum(keyspace[0]["heights"])) + _sub_keyspace_usage(hashlen, otsbits, keyspace[0]["heights"])
else:
self.state = state
def own_offset(self):
return self.state["own_offset"]
def allocate_subspace(self):
keyspace_size = _keyspace_usage(hashlen, otsbits, keyspace[1:])
self.state["stack"] -= keyspace_size
return KeySpace(self.hashlen, self.otsbits, self.keyspace[1:], self.state["stack"], keyspace_size)
def get_state(self):
return self.state
class BlockChainEnv:
def __init__(self, conf):
assert "appname" in conf, "Please run coinzdense-lint on your blockchain RC"
assert "hashlen" in conf, "Please run coinzdense-lint on your blockchain RC"
assert "otsbits" in conf, "Please run coinzdense-lint on your blockchain RC"
assert "keyspace" in conf, "Please run coinzdense-lint on your blockchain RC"
self.appname = conf["appname"]
self.hashlen = conf["hashlen"]
self.otsbits = conf["otsbits"]
self.keyspace = conf["keyspace"]
if "hierarchy" in conf:
self.hierarchy = conf["hierarchy"]
else:
self.hierarchy = {}
if "sub_path" in conf:
self.subpath = conf["sub_path"]
else:
self.subpath = []
assert isinstance(self.appname, str), "Please run coinzdense-lint on your blockchain RC"
assert isinstance(self.hashlen, int), "Please run coinzdense-lint on your blockchain RC"
assert isinstance(self.otsbits, int), "Please run coinzdense-lint on your blockchain RC"
assert isinstance(self.keyspace, list), "Please run coinzdense-lint on your blockchain RC"
assert isinstance(self.hierarchy, dict), "Please run coinzdense-lint on your blockchain RC"
assert isinstance(self.subpath, list), "Please run coinzdense-lint on your blockchain RC"
assert self.hashlen > 15
assert self.hashlen < 65
assert self.otsbits > 3
assert self.otsbits < 17
self.depth = 0
self._check_hierarchy()
for idx, val in enumerate(self.keyspace):
assert isinstance(val, dict), "Please run coinzdense-lint on your blockchain RC"
total_height = 0
assert "heights" in val, "Please run coinzdense-lint on your blockchain RC"
assert isinstance(val["heights"], list), "Please run coinzdense-lint on your blockchain RC"
assert len(val["heights"]) > 1, "Please run coinzdense-lint on your blockchain RC"
assert len(val["heights"]) < 33, "Please run coinzdense-lint on your blockchain RC"
for idx2,height in enumerate(val["heights"]):
assert isinstance(height, int), "Please run coinzdense-lint on your blockchain RC"
assert height > 2, "Please run coinzdense-lint on your blockchain RC"
assert height < 17, "Please run coinzdense-lint on your blockchain RC"
total_height += height
if idx < len(self.keyspace) -1:
assert "reserve" in val, "Please run coinzdense-lint on your blockchain RC"
assert isinstance(val["reserve"], int), "Please run coinzdense-lint on your blockchain RC"
assert val["reserve"] > 1, "Please run coinzdense-lint on your blockchain RC"
assert val["reserve"] < total_height - 1, "Please run coinzdense-lint on your blockchain RC"
else:
assert "reserve" not in val, "Please run coinzdense-lint on your blockchain RC"
for subpath_part in self.subpath:
assert isinstance(subpath_part, str), "Please run coinzdense-lint on your blockchain RC"
total = _keyspace_usage(self.hashlen, self.otsbits, self.keyspace)
assert total.bit_length() < 65, "Please run coinzdense-lint on your blockchain RC"
def _check_hierarchy(self, sub_hierarchy=None, depth=0):
if sub_hierarchy is not None:
my_hierarchy = sub_hierarchy
else:
my_hierarchy = self.hierarchy
my_depth = depth + 1
if my_depth > self.depth:
self.depth = my_depth
for key, val in my_hierarchy.items():
assert isinstance(val, dict), "Please run coinzdense-lint on your blockchain RC"
self._check_hierarchy(val, my_depth)
def __getitem__(self, key):
if key in self.hierarchy:
subconf = {}
subconf["appname"] = self.appname
subconf["hashlen"] = self.hashlen
subconf["otsbits"] = self.otsbits
subconf["keyspace"] = self.keyspace[1:]
subconf["hierarchy"] = self.hierarchy[key]
subconf["sub_path"] = self.subpath[:] + [key]
return BlockChainEnv(subconf)
else:
raise KeyError("No sub-key hierarchy named " + key)
def get_signing_key(self, wallet, idx=0, idx2=0, backup=None):
path = [self.appname] + self.subpath
return _SigningKey(self.hashlen, self.otsbits, self.keyspace, path, self.hierarchy, wallet, idx, idx2, backup)
def get_validator(self):
path = [self.appname] + self.subpath
return _ValidationEnv(self.hashlen, self.otsbits, self.keyspace, path, self.hierarchy)
def create_wallet(self, salt, key, password):
path = [self.appname] + self.subpath
return _create_wallet(salt, key, password, path)
def open_wallet(self, wdata, password):
path = [self.appname] + self.subpath
return _open_wallet(wdata, password, path)
| 50.122581
| 132
| 0.635603
| 945
| 7,769
| 5.10582
| 0.113228
| 0.048497
| 0.102383
| 0.123938
| 0.52
| 0.490363
| 0.426736
| 0.399378
| 0.372228
| 0.319378
| 0
| 0.010256
| 0.259493
| 7,769
| 154
| 133
| 50.448052
| 0.828437
| 0.002188
| 0
| 0.134752
| 0
| 0
| 0.218036
| 0
| 0
| 0
| 0
| 0
| 0.212766
| 1
| 0.106383
| false
| 0.028369
| 0.028369
| 0.028369
| 0.234043
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09de00e54d3860203b7729e1854754335ac141d7
| 1,296
|
py
|
Python
|
src/asyncdataflow/inspector.py
|
tomaszkingukrol/async-data-flow
|
1572ef101cb0e6a0f27a77401538a4620ee9939f
|
[
"Apache-2.0"
] | null | null | null |
src/asyncdataflow/inspector.py
|
tomaszkingukrol/async-data-flow
|
1572ef101cb0e6a0f27a77401538a4620ee9939f
|
[
"Apache-2.0"
] | null | null | null |
src/asyncdataflow/inspector.py
|
tomaszkingukrol/async-data-flow
|
1572ef101cb0e6a0f27a77401538a4620ee9939f
|
[
"Apache-2.0"
] | null | null | null |
from collections.abc import Iterable
from typing import Callable, Tuple
import inspect
from .definition import DataFlowInspector
from .exceptions import DataFlowFunctionArgsError, DataFlowNotCallableError, DataFlowEmptyError, DataFlowNotTupleError
class DataFlowInspect(DataFlowInspector):
''' Function inspection defined in DataFlow
'''
def check_dataflow_args(self, dataflow: tuple):
if isinstance(dataflow, tuple):
if dataflow:
for task in dataflow:
if isinstance(task, Iterable):
self.check_dataflow_args(task)
elif isinstance(task, Callable):
_check_positional_or_keyword_args(task)
else:
raise DataFlowNotCallableError(task)
else:
raise DataFlowEmptyError()
else:
raise DataFlowNotTupleError(dataflow)
def _check_positional_or_keyword_args(func: Callable) -> bool:
''' Check that function has only POSITIONAL_OR_KEYWORD arguments.
'''
inspect_args = inspect.signature(func).parameters.values()
for arg in inspect_args:
if str(arg.kind) != 'POSITIONAL_OR_KEYWORD':
raise DataFlowFunctionArgsError(func.__name__, arg)
| 35.027027
| 118
| 0.655864
| 120
| 1,296
| 6.883333
| 0.416667
| 0.058111
| 0.09201
| 0.058111
| 0.067797
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.280093
| 1,296
| 36
| 119
| 36
| 0.885316
| 0.08179
| 0
| 0.12
| 0
| 0
| 0.017918
| 0.017918
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.2
| 0
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09e5ab892fd8685aedec11f8378615ed2931fa1c
| 891
|
py
|
Python
|
processing_pipeline/extractionless_registration.py
|
SijRa/Brain-Image-Analysis-using-Deep-Learning
|
a35411bda6e39eff57f715a695b7fb6a30997706
|
[
"MIT"
] | 2
|
2022-01-04T16:54:20.000Z
|
2022-01-24T03:01:14.000Z
|
processing_pipeline/extractionless_registration.py
|
SijRa/Brain-Image-Analysis-using-Deep-Learning
|
a35411bda6e39eff57f715a695b7fb6a30997706
|
[
"MIT"
] | null | null | null |
processing_pipeline/extractionless_registration.py
|
SijRa/Brain-Image-Analysis-using-Deep-Learning
|
a35411bda6e39eff57f715a695b7fb6a30997706
|
[
"MIT"
] | 1
|
2020-07-05T09:30:11.000Z
|
2020-07-05T09:30:11.000Z
|
from ants import registration, image_read, image_write, resample_image, crop_image
from os import listdir
mri_directory = "ADNI_baseline_raw/"
template_loc = "MNI152_2009/mni_icbm152_t1_tal_nlin_sym_09a.nii"
template = image_read(template_loc)
template = resample_image(template, (192, 192, 160), True, 4)
#template = crop_image(template)
for scan in listdir(mri_directory):
id = scan.split('.')[0]
filename = "ADNI_original_registered/" + id + ".nii"
img_path = mri_directory + scan
image = image_read(img_path, reorient=True)
if image.shape[1] != 192:
print("- Resampling -")
image = resample_image(image, (192, 192, 160), True, 4)
registered_dict = registration(fixed=template, moving=image, type_of_transform="SyNRA")
#img = crop_image(registered_dict['warpedmovout'])
image_write(registered_dict['warpedmovout'], filename=filename)
print("Registered:",scan)
| 40.5
| 89
| 0.751964
| 122
| 891
| 5.213115
| 0.467213
| 0.042453
| 0.059748
| 0.040881
| 0.044025
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048593
| 0.122334
| 891
| 22
| 90
| 40.5
| 0.764706
| 0.089787
| 0
| 0
| 0
| 0
| 0.169136
| 0.088889
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.117647
| 0
| 0.117647
| 0.117647
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09e7e9329ecb594a1ce5f26cf6f1dcdac3d78aef
| 15,237
|
py
|
Python
|
sp_api/api/finances/models/shipment_item.py
|
lionsdigitalsolutions/python-amazon-sp-api
|
7374523ebc65e2e01e37d03fc4009a44fabf2c3b
|
[
"MIT"
] | null | null | null |
sp_api/api/finances/models/shipment_item.py
|
lionsdigitalsolutions/python-amazon-sp-api
|
7374523ebc65e2e01e37d03fc4009a44fabf2c3b
|
[
"MIT"
] | null | null | null |
sp_api/api/finances/models/shipment_item.py
|
lionsdigitalsolutions/python-amazon-sp-api
|
7374523ebc65e2e01e37d03fc4009a44fabf2c3b
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Selling Partner API for Finances
The Selling Partner API for Finances helps you obtain financial information relevant to a seller's business. You can obtain financial events for a given order, financial event group, or date range without having to wait until a statement period closes. You can also obtain financial event groups for a given date range. # noqa: E501
OpenAPI spec version: v0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ShipmentItem(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'seller_sku': 'str',
'order_item_id': 'str',
'order_adjustment_item_id': 'str',
'quantity_shipped': 'int',
'item_charge_list': 'ChargeComponentList',
'item_charge_adjustment_list': 'ChargeComponentList',
'item_fee_list': 'FeeComponentList',
'item_fee_adjustment_list': 'FeeComponentList',
'item_tax_withheld_list': 'TaxWithheldComponentList',
'promotion_list': 'PromotionList',
'promotion_adjustment_list': 'PromotionList',
'cost_of_points_granted': 'Currency',
'cost_of_points_returned': 'Currency'
}
attribute_map = {
'seller_sku': 'SellerSKU',
'order_item_id': 'OrderItemId',
'order_adjustment_item_id': 'OrderAdjustmentItemId',
'quantity_shipped': 'QuantityShipped',
'item_charge_list': 'ItemChargeList',
'item_charge_adjustment_list': 'ItemChargeAdjustmentList',
'item_fee_list': 'ItemFeeList',
'item_fee_adjustment_list': 'ItemFeeAdjustmentList',
'item_tax_withheld_list': 'ItemTaxWithheldList',
'promotion_list': 'PromotionList',
'promotion_adjustment_list': 'PromotionAdjustmentList',
'cost_of_points_granted': 'CostOfPointsGranted',
'cost_of_points_returned': 'CostOfPointsReturned'
}
def __init__(self, seller_sku=None, order_item_id=None, order_adjustment_item_id=None, quantity_shipped=None, item_charge_list=None, item_charge_adjustment_list=None, item_fee_list=None, item_fee_adjustment_list=None, item_tax_withheld_list=None, promotion_list=None, promotion_adjustment_list=None, cost_of_points_granted=None, cost_of_points_returned=None): # noqa: E501
"""ShipmentItem - a model defined in Swagger""" # noqa: E501
self._seller_sku = None
self._order_item_id = None
self._order_adjustment_item_id = None
self._quantity_shipped = None
self._item_charge_list = None
self._item_charge_adjustment_list = None
self._item_fee_list = None
self._item_fee_adjustment_list = None
self._item_tax_withheld_list = None
self._promotion_list = None
self._promotion_adjustment_list = None
self._cost_of_points_granted = None
self._cost_of_points_returned = None
self.discriminator = None
if seller_sku is not None:
self.seller_sku = seller_sku
if order_item_id is not None:
self.order_item_id = order_item_id
if order_adjustment_item_id is not None:
self.order_adjustment_item_id = order_adjustment_item_id
if quantity_shipped is not None:
self.quantity_shipped = quantity_shipped
if item_charge_list is not None:
self.item_charge_list = item_charge_list
if item_charge_adjustment_list is not None:
self.item_charge_adjustment_list = item_charge_adjustment_list
if item_fee_list is not None:
self.item_fee_list = item_fee_list
if item_fee_adjustment_list is not None:
self.item_fee_adjustment_list = item_fee_adjustment_list
if item_tax_withheld_list is not None:
self.item_tax_withheld_list = item_tax_withheld_list
if promotion_list is not None:
self.promotion_list = promotion_list
if promotion_adjustment_list is not None:
self.promotion_adjustment_list = promotion_adjustment_list
if cost_of_points_granted is not None:
self.cost_of_points_granted = cost_of_points_granted
if cost_of_points_returned is not None:
self.cost_of_points_returned = cost_of_points_returned
@property
def seller_sku(self):
"""Gets the seller_sku of this ShipmentItem. # noqa: E501
The seller SKU of the item. The seller SKU is qualified by the seller's seller ID, which is included with every call to the Selling Partner API. # noqa: E501
:return: The seller_sku of this ShipmentItem. # noqa: E501
:rtype: str
"""
return self._seller_sku
@seller_sku.setter
def seller_sku(self, seller_sku):
"""Sets the seller_sku of this ShipmentItem.
The seller SKU of the item. The seller SKU is qualified by the seller's seller ID, which is included with every call to the Selling Partner API. # noqa: E501
:param seller_sku: The seller_sku of this ShipmentItem. # noqa: E501
:type: str
"""
self._seller_sku = seller_sku
@property
def order_item_id(self):
"""Gets the order_item_id of this ShipmentItem. # noqa: E501
An Amazon-defined order item identifier. # noqa: E501
:return: The order_item_id of this ShipmentItem. # noqa: E501
:rtype: str
"""
return self._order_item_id
@order_item_id.setter
def order_item_id(self, order_item_id):
"""Sets the order_item_id of this ShipmentItem.
An Amazon-defined order item identifier. # noqa: E501
:param order_item_id: The order_item_id of this ShipmentItem. # noqa: E501
:type: str
"""
self._order_item_id = order_item_id
@property
def order_adjustment_item_id(self):
"""Gets the order_adjustment_item_id of this ShipmentItem. # noqa: E501
An Amazon-defined order adjustment identifier defined for refunds, guarantee claims, and chargeback events. # noqa: E501
:return: The order_adjustment_item_id of this ShipmentItem. # noqa: E501
:rtype: str
"""
return self._order_adjustment_item_id
@order_adjustment_item_id.setter
def order_adjustment_item_id(self, order_adjustment_item_id):
"""Sets the order_adjustment_item_id of this ShipmentItem.
An Amazon-defined order adjustment identifier defined for refunds, guarantee claims, and chargeback events. # noqa: E501
:param order_adjustment_item_id: The order_adjustment_item_id of this ShipmentItem. # noqa: E501
:type: str
"""
self._order_adjustment_item_id = order_adjustment_item_id
@property
def quantity_shipped(self):
"""Gets the quantity_shipped of this ShipmentItem. # noqa: E501
The number of items shipped. # noqa: E501
:return: The quantity_shipped of this ShipmentItem. # noqa: E501
:rtype: int
"""
return self._quantity_shipped
@quantity_shipped.setter
def quantity_shipped(self, quantity_shipped):
"""Sets the quantity_shipped of this ShipmentItem.
The number of items shipped. # noqa: E501
:param quantity_shipped: The quantity_shipped of this ShipmentItem. # noqa: E501
:type: int
"""
self._quantity_shipped = quantity_shipped
@property
def item_charge_list(self):
"""Gets the item_charge_list of this ShipmentItem. # noqa: E501
:return: The item_charge_list of this ShipmentItem. # noqa: E501
:rtype: ChargeComponentList
"""
return self._item_charge_list
@item_charge_list.setter
def item_charge_list(self, item_charge_list):
"""Sets the item_charge_list of this ShipmentItem.
:param item_charge_list: The item_charge_list of this ShipmentItem. # noqa: E501
:type: ChargeComponentList
"""
self._item_charge_list = item_charge_list
@property
def item_charge_adjustment_list(self):
"""Gets the item_charge_adjustment_list of this ShipmentItem. # noqa: E501
:return: The item_charge_adjustment_list of this ShipmentItem. # noqa: E501
:rtype: ChargeComponentList
"""
return self._item_charge_adjustment_list
@item_charge_adjustment_list.setter
def item_charge_adjustment_list(self, item_charge_adjustment_list):
"""Sets the item_charge_adjustment_list of this ShipmentItem.
:param item_charge_adjustment_list: The item_charge_adjustment_list of this ShipmentItem. # noqa: E501
:type: ChargeComponentList
"""
self._item_charge_adjustment_list = item_charge_adjustment_list
@property
def item_fee_list(self):
"""Gets the item_fee_list of this ShipmentItem. # noqa: E501
:return: The item_fee_list of this ShipmentItem. # noqa: E501
:rtype: FeeComponentList
"""
return self._item_fee_list
@item_fee_list.setter
def item_fee_list(self, item_fee_list):
"""Sets the item_fee_list of this ShipmentItem.
:param item_fee_list: The item_fee_list of this ShipmentItem. # noqa: E501
:type: FeeComponentList
"""
self._item_fee_list = item_fee_list
@property
def item_fee_adjustment_list(self):
"""Gets the item_fee_adjustment_list of this ShipmentItem. # noqa: E501
:return: The item_fee_adjustment_list of this ShipmentItem. # noqa: E501
:rtype: FeeComponentList
"""
return self._item_fee_adjustment_list
@item_fee_adjustment_list.setter
def item_fee_adjustment_list(self, item_fee_adjustment_list):
"""Sets the item_fee_adjustment_list of this ShipmentItem.
:param item_fee_adjustment_list: The item_fee_adjustment_list of this ShipmentItem. # noqa: E501
:type: FeeComponentList
"""
self._item_fee_adjustment_list = item_fee_adjustment_list
@property
def item_tax_withheld_list(self):
"""Gets the item_tax_withheld_list of this ShipmentItem. # noqa: E501
:return: The item_tax_withheld_list of this ShipmentItem. # noqa: E501
:rtype: TaxWithheldComponentList
"""
return self._item_tax_withheld_list
@item_tax_withheld_list.setter
def item_tax_withheld_list(self, item_tax_withheld_list):
"""Sets the item_tax_withheld_list of this ShipmentItem.
:param item_tax_withheld_list: The item_tax_withheld_list of this ShipmentItem. # noqa: E501
:type: TaxWithheldComponentList
"""
self._item_tax_withheld_list = item_tax_withheld_list
@property
def promotion_list(self):
"""Gets the promotion_list of this ShipmentItem. # noqa: E501
:return: The promotion_list of this ShipmentItem. # noqa: E501
:rtype: PromotionList
"""
return self._promotion_list
@promotion_list.setter
def promotion_list(self, promotion_list):
"""Sets the promotion_list of this ShipmentItem.
:param promotion_list: The promotion_list of this ShipmentItem. # noqa: E501
:type: PromotionList
"""
self._promotion_list = promotion_list
@property
def promotion_adjustment_list(self):
"""Gets the promotion_adjustment_list of this ShipmentItem. # noqa: E501
:return: The promotion_adjustment_list of this ShipmentItem. # noqa: E501
:rtype: PromotionList
"""
return self._promotion_adjustment_list
@promotion_adjustment_list.setter
def promotion_adjustment_list(self, promotion_adjustment_list):
"""Sets the promotion_adjustment_list of this ShipmentItem.
:param promotion_adjustment_list: The promotion_adjustment_list of this ShipmentItem. # noqa: E501
:type: PromotionList
"""
self._promotion_adjustment_list = promotion_adjustment_list
@property
def cost_of_points_granted(self):
"""Gets the cost_of_points_granted of this ShipmentItem. # noqa: E501
:return: The cost_of_points_granted of this ShipmentItem. # noqa: E501
:rtype: Currency
"""
return self._cost_of_points_granted
@cost_of_points_granted.setter
def cost_of_points_granted(self, cost_of_points_granted):
"""Sets the cost_of_points_granted of this ShipmentItem.
:param cost_of_points_granted: The cost_of_points_granted of this ShipmentItem. # noqa: E501
:type: Currency
"""
self._cost_of_points_granted = cost_of_points_granted
@property
def cost_of_points_returned(self):
"""Gets the cost_of_points_returned of this ShipmentItem. # noqa: E501
:return: The cost_of_points_returned of this ShipmentItem. # noqa: E501
:rtype: Currency
"""
return self._cost_of_points_returned
@cost_of_points_returned.setter
def cost_of_points_returned(self, cost_of_points_returned):
"""Sets the cost_of_points_returned of this ShipmentItem.
:param cost_of_points_returned: The cost_of_points_returned of this ShipmentItem. # noqa: E501
:type: Currency
"""
self._cost_of_points_returned = cost_of_points_returned
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ShipmentItem, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShipmentItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 35.270833
| 377
| 0.671458
| 1,881
| 15,237
| 5.1042
| 0.095694
| 0.083116
| 0.09749
| 0.089366
| 0.695136
| 0.595563
| 0.505156
| 0.431622
| 0.369441
| 0.262993
| 0
| 0.013961
| 0.257268
| 15,237
| 431
| 378
| 35.352668
| 0.834408
| 0.376386
| 0
| 0.091398
| 0
| 0
| 0.106636
| 0.053739
| 0
| 0
| 0
| 0
| 0
| 1
| 0.172043
| false
| 0
| 0.016129
| 0
| 0.306452
| 0.010753
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09e89b2450d77d8cea8acdf70dfa8deb4095af90
| 3,370
|
py
|
Python
|
my_plugins/youcompleteme/python/ycm/tests/diagnostic_interface_test.py
|
VirtualLG/vimrc
|
33f961b0e465b852753479bc4aa0a32a6ff017cf
|
[
"MIT"
] | null | null | null |
my_plugins/youcompleteme/python/ycm/tests/diagnostic_interface_test.py
|
VirtualLG/vimrc
|
33f961b0e465b852753479bc4aa0a32a6ff017cf
|
[
"MIT"
] | null | null | null |
my_plugins/youcompleteme/python/ycm/tests/diagnostic_interface_test.py
|
VirtualLG/vimrc
|
33f961b0e465b852753479bc4aa0a32a6ff017cf
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2015-2018 YouCompleteMe contributors
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from ycm import diagnostic_interface
from ycm.tests.test_utils import VimBuffer, MockVimModule, MockVimBuffers
from hamcrest import assert_that, contains_exactly, has_entries, has_item
from unittest import TestCase
MockVimModule()
def SimpleDiagnosticToJson( start_line, start_col, end_line, end_col ):
return {
'kind': 'ERROR',
'location': { 'line_num': start_line, 'column_num': start_col },
'location_extent': {
'start': {
'line_num': start_line,
'column_num': start_col
},
'end': {
'line_num': end_line,
'column_num': end_col
}
},
'ranges': [
{
'start': {
'line_num': start_line,
'column_num': start_col
},
'end': {
'line_num': end_line,
'column_num': end_col
}
}
]
}
def YcmTextPropertyTupleMatcher( start_line, start_col, end_line, end_col ):
return has_item( contains_exactly(
start_line,
start_col,
'YcmErrorProperty',
has_entries( { 'end_col': end_col, 'end_lnum': end_line } ) ) )
class DiagnosticInterfaceTest( TestCase ):
def test_ConvertDiagnosticToTextProperties( self ):
for diag, contents, result in [
# Error in middle of the line
[
SimpleDiagnosticToJson( 1, 16, 1, 23 ),
[ 'Highlight this error please' ],
YcmTextPropertyTupleMatcher( 1, 16, 1, 23 )
],
# Error at the end of the line
[
SimpleDiagnosticToJson( 1, 16, 1, 21 ),
[ 'Highlight this warning' ],
YcmTextPropertyTupleMatcher( 1, 16, 1, 21 )
],
[
SimpleDiagnosticToJson( 1, 16, 1, 19 ),
[ 'Highlight unicøde' ],
YcmTextPropertyTupleMatcher( 1, 16, 1, 19 )
],
# Non-positive position
[
SimpleDiagnosticToJson( 0, 0, 0, 0 ),
[ 'Some contents' ],
YcmTextPropertyTupleMatcher( 1, 1, 1, 1 )
],
[
SimpleDiagnosticToJson( -1, -2, -3, -4 ),
[ 'Some contents' ],
YcmTextPropertyTupleMatcher( 1, 1, 1, 1 )
],
]:
with self.subTest( diag = diag, contents = contents, result = result ):
current_buffer = VimBuffer( 'foo', number = 1, contents = [ '' ] )
target_buffer = VimBuffer( 'bar', number = 2, contents = contents )
with MockVimBuffers( [ current_buffer, target_buffer ],
[ current_buffer, target_buffer ] ):
actual = diagnostic_interface._ConvertDiagnosticToTextProperties(
target_buffer.number,
diag )
print( actual )
assert_that( actual, result )
| 32.403846
| 77
| 0.625223
| 373
| 3,370
| 5.50134
| 0.380697
| 0.035088
| 0.011696
| 0.02924
| 0.23538
| 0.22271
| 0.195419
| 0.119396
| 0.103314
| 0.068226
| 0
| 0.025809
| 0.275668
| 3,370
| 103
| 78
| 32.718447
| 0.81483
| 0.230861
| 0
| 0.272727
| 0
| 0
| 0.106143
| 0
| 0
| 0
| 0
| 0
| 0.025974
| 1
| 0.038961
| false
| 0
| 0.051948
| 0.025974
| 0.12987
| 0.012987
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09edfb321e8839956c0dd18d657713402150647f
| 2,043
|
py
|
Python
|
examples/design_studies/ihm_fingergait/check_progress.py
|
cbteeple/somo
|
53a1a94f7d9d624bc4c43e582c80f24a0e98df24
|
[
"MIT"
] | null | null | null |
examples/design_studies/ihm_fingergait/check_progress.py
|
cbteeple/somo
|
53a1a94f7d9d624bc4c43e582c80f24a0e98df24
|
[
"MIT"
] | null | null | null |
examples/design_studies/ihm_fingergait/check_progress.py
|
cbteeple/somo
|
53a1a94f7d9d624bc4c43e582c80f24a0e98df24
|
[
"MIT"
] | null | null | null |
# Be sure to run this file from the "palm_sweeps" folder
# cd examples/palm_sweeps
import os
import sys
from datetime import datetime
path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
sys.path.insert(0, path)
from somo.sweep import iter_utils
config_file = "sweeps/grid_diam_height.yaml"
todo_file = "_runs_todo.yaml"
num_files_per_folder_end = 5
num_files_per_folder_start = 1
time_per_run = 25 # seconds
avg_size = 40 # MB
parallel_cores = 4
# Get data from config files
config = iter_utils.load_yaml(config_file)
todo = iter_utils.load_yaml(todo_file)
total_runs = len(todo)
# Calculate the time
total_time_min = (time_per_run / 60.0) * total_runs / parallel_cores
total_time_hr = total_time_min / 60.0
total_time_day = total_time_hr / 24.0
# Calculate total data size
total_size_GB = float(avg_size) * total_runs / 1000.0
# Calculate the percent complete
folder_to_count = iter_utils.get_group_folder(config)
cpt = sum([len(files) for r, d, files in os.walk(folder_to_count)])
total_files_expected_end = total_runs * num_files_per_folder_end
total_files_expected_start = total_runs * num_files_per_folder_start
progress = (cpt - total_files_expected_start) / (
total_files_expected_end - total_files_expected_start
)
eta_min = total_time_min * (1.0 - progress)
eta_hr = eta_min / 60.0
eta_day = eta_hr / 24.0
# Print info
print("")
print("Current time: " + datetime.now().strftime("%H:%M:%S"))
print("=================================")
print("Number of runs to complete: %d" % (total_runs))
print(
"Estimated total data saved @ %0.1f MB per run: %0.2f GB"
% (avg_size, total_size_GB)
)
print(
"Estimated total time @ %0.1f sec per run with %d cores: %0.1f min, %0.2f hrs, %0.3f days"
% (time_per_run, parallel_cores, total_time_min, total_time_hr, total_time_day)
)
print("---------------------------------")
print("Percent Complete: %0.3f %%" % (progress * 100))
print(
"Estimated time left: %0.1f min, %0.2f hrs, %0.3f days" % (eta_min, eta_hr, eta_day)
)
print("")
| 29.185714
| 94
| 0.708762
| 335
| 2,043
| 4.008955
| 0.298507
| 0.067014
| 0.067014
| 0.050633
| 0.216679
| 0.067014
| 0.028295
| 0.028295
| 0.028295
| 0
| 0
| 0.030251
| 0.142438
| 2,043
| 69
| 95
| 29.608696
| 0.736301
| 0.101322
| 0
| 0.104167
| 0
| 0.041667
| 0.211939
| 0.051479
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0.208333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09f226a5810e82fde46ce6d76eb7db7321ca355b
| 3,998
|
py
|
Python
|
Projects/Project 1/Handin/program.py
|
ymirthor/T-215-STY1
|
b888da1e88c5aa16eac03353f525e9e0b9d901df
|
[
"MIT"
] | null | null | null |
Projects/Project 1/Handin/program.py
|
ymirthor/T-215-STY1
|
b888da1e88c5aa16eac03353f525e9e0b9d901df
|
[
"MIT"
] | null | null | null |
Projects/Project 1/Handin/program.py
|
ymirthor/T-215-STY1
|
b888da1e88c5aa16eac03353f525e9e0b9d901df
|
[
"MIT"
] | null | null | null |
from collections import deque as LL
class Process:
def __init__(self, parent, priority):
self.state = 1 # State: 1=ready / 0=blocked
self.parent = parent
self.children = LL()
self.resources = LL()
self.priority = priority
self.blocked_on = None
class Resource:
def __init__(self):
self.state = 1 # State: 1=ready / 0=allocated
self.waitlist = LL()
class PCB:
def __init__(self, size=16):
self.size = size # Nr of processses in PCB
self.priorities = 3 # Nr of priorties for RL
self.resources = 4 # Nr of resources for RCB
self.RL = [LL() for _ in range(3)] # RL with n priorities
self.RCB = [Resource() for _ in range(4)] # RCB with n resources
self.PCB = [None] * self.size # Empty PCB
self.running = 0 # Running process, starts on 0
self.PCB[0] = Process(None, 0)
self.RL[0].append(0)
def create(self, priority):
for idx, process in enumerate(self.PCB):
if process == None:
self.PCB[idx] = Process(parent=self.running, priority=priority)
self.PCB[self.running].children.append(idx)
self.RL[priority].append(idx)
self.scheduler()
return f'process {idx} created'
def scheduler(self):
for priority in reversed(self.RL):
if priority:
self.running = priority[0]
break
def _destroy_recur(self, index):
count = 1
# Recur destroy children
for child in list(self.PCB[index].children):
count += self._destroy_recur(child)
# Release all resources
for resource in list(self.PCB[index].resources):
self.release(resource, index)
# Remove from ready list or from waitlist
try:
pri = self.PCB[index].priority
self.RL[pri].remove(index)
except ValueError:
resource = self.PCB[index].blocked_on
self.RCB[resource].waitlist.remove(index)
# Remove parent
parent = self.PCB[self.PCB[index].parent]
parent.children.remove(index)
self.PCB[index] = None
return count
def destroy(self, index):
count = self._destroy_recur(index)
self.scheduler()
return f'{count} processes destroyed'
def timeout(self):
i = self.running
ready_list = self.RL[self.PCB[i].priority]
ready_list.remove(i)
ready_list.append(i)
self.scheduler()
return f'process {self.running} running'
def request(self, index_resource):
resource = self.RCB[index_resource]
running_process = self.PCB[self.running]
if index_resource in running_process.resources:
return f'process {self.running} already has resource'
ready_list = self.RL[running_process.priority]
if resource.state == 1:
resource.state = 0
running_process.resources.append(index_resource)
return f'resource {index_resource} allocated'
else:
running_process.state = 0
running_process.blocked_on = index_resource
ready_list.remove(self.running)
resource.waitlist.append(self.running)
self.scheduler()
return f'process {self.running} blocked'
def release(self, index_resource, index=None):
curr_process = self.PCB[index or self.running]
resource = self.RCB[index_resource]
curr_process.resources.remove(index_resource)
if len(resource.waitlist) == 0:
resource.state = 1
else:
index_process = resource.waitlist.popleft()
process = self.PCB[index_process]
self.RL[process.priority].append(index_process)
process.state = 1
process.resources.append(index_resource)
return f'resource {index_resource} released'
| 35.070175
| 79
| 0.598049
| 477
| 3,998
| 4.907757
| 0.171908
| 0.047843
| 0.041008
| 0.034173
| 0.166595
| 0.105083
| 0.105083
| 0.053823
| 0.053823
| 0.053823
| 0
| 0.009369
| 0.305903
| 3,998
| 114
| 80
| 35.070175
| 0.834234
| 0.076538
| 0
| 0.106383
| 0
| 0
| 0.059799
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.106383
| false
| 0
| 0.010638
| 0
| 0.234043
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09f240acbe9b8fa80d51945cdcc670845719d41c
| 2,394
|
py
|
Python
|
pg_methods/interfaces/state_processors.py
|
zafarali/policy-gradient-methods
|
f0d83a80ddc772dcad0c851aac9bfd41d436c274
|
[
"MIT"
] | 28
|
2018-06-12T21:37:20.000Z
|
2021-12-27T15:13:14.000Z
|
pg_methods/interfaces/state_processors.py
|
zafarali/policy-gradient-methods
|
f0d83a80ddc772dcad0c851aac9bfd41d436c274
|
[
"MIT"
] | 3
|
2018-05-10T16:33:05.000Z
|
2018-06-19T18:17:37.000Z
|
pg_methods/interfaces/state_processors.py
|
zafarali/policy-gradient-methods
|
f0d83a80ddc772dcad0c851aac9bfd41d436c274
|
[
"MIT"
] | 7
|
2018-05-08T04:13:21.000Z
|
2021-04-02T12:31:55.000Z
|
import gym
import torch
import numpy as np
from pg_methods.interfaces import common_interfaces as common
class SimpleStateProcessor(common.Interface):
"""
Allows one to interface states between a single instance of gym
"""
def __init__(self, environment_observation_space, one_hot=False, use_cuda=False, normalize=False):
self.observation_space = environment_observation_space
if isinstance(environment_observation_space, gym.spaces.Box):
# continous environment
self.continous = True
self.state_size = environment_observation_space.shape
if len(self.state_size) == 1:
self.state_size = self.state_size[0]
self.one_hot = False
self.normalize = False
else:
self.continous = False
self.one_hot = one_hot
if self.one_hot:
self.state_size = environment_observation_space.n
self.normalize = False
self.max_obs = environment_observation_space.n
else:
self.normalize = normalize
self.state_size = 1
self.max_obs = environment_observation_space.n
self.use_cuda = use_cuda
def state2pytorch(self, state_idx):
if self.one_hot and not self.continous:
state = np.zeros(self.state_size)
state[self.state_idx] = 1
state = torch.from_numpy(state.reshape(1, -1))
if self.use_cuda:
return state.float().cuda()
else:
return state.float()
else:
state = None
if not self.continous:
state = torch.from_numpy(np.array([state_idx]).reshape(1, -1))
else:
state = torch.from_numpy(np.array(state_idx).reshape(1, -1))
if self.normalize:
state = state / self.max_obs
if self.use_cuda:
return state.float().cuda()
else:
return state.float()
def pytorch2state(self, tensor):
if self.continous:
return common.pytorch2list(tensor)
else:
list_state = list(map(int, common.pytorch2list(tensor)))
if self.state_size == 1:
return list_state[0]
else:
return list_state
| 36.272727
| 102
| 0.575188
| 269
| 2,394
| 4.921933
| 0.245353
| 0.067976
| 0.07855
| 0.031722
| 0.305136
| 0.26284
| 0.202417
| 0.145015
| 0.145015
| 0.145015
| 0
| 0.010237
| 0.347118
| 2,394
| 66
| 103
| 36.272727
| 0.836852
| 0.035923
| 0
| 0.321429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053571
| false
| 0
| 0.071429
| 0
| 0.267857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09f69dea9d9541fb1a471fe9f8d7ffca1d756933
| 3,935
|
py
|
Python
|
tests/test_emlib.py
|
mjpekala/faster-membranes
|
f203fc8608603bc7b16a1abeac324d52e9dfe96a
|
[
"Apache-2.0"
] | null | null | null |
tests/test_emlib.py
|
mjpekala/faster-membranes
|
f203fc8608603bc7b16a1abeac324d52e9dfe96a
|
[
"Apache-2.0"
] | null | null | null |
tests/test_emlib.py
|
mjpekala/faster-membranes
|
f203fc8608603bc7b16a1abeac324d52e9dfe96a
|
[
"Apache-2.0"
] | null | null | null |
"""Unit test for emlib.py
To run:
PYTHONPATH=../src python test_emlib.py
"""
__author__ = "Mike Pekala"
__copyright__ = "Copyright 2015, JHU/APL"
__license__ = "Apache 2.0"
import unittest
import numpy as np
from sklearn.metrics import precision_recall_fscore_support as smetrics
import emlib
class TestEmlib(unittest.TestCase):
def test_metrics(self):
Y = np.random.randint(0,2,size=(2,5,5))
Yhat = np.random.randint(0,2,size=(2,5,5))
C,acc,prec,recall,f1 = emlib.metrics(Y, Yhat, display=False)
prec2, recall2, f12, supp = smetrics(np.reshape(Y, (Y.size,)),
np.reshape(Yhat, (Yhat.size,)))
self.assertAlmostEqual(prec, prec2[1])
self.assertAlmostEqual(recall, recall2[1])
self.assertAlmostEqual(f1, f12[1])
def test_mirror_edges(self):
X = np.random.rand(10,3,3);
b = 2 # b := border size
Xm = emlib.mirror_edges(X,b)
# make sure the result has the proper size
assert(Xm.shape[0] == X.shape[0]);
assert(Xm.shape[1] == X.shape[1]+2*b);
assert(Xm.shape[2] == X.shape[2]+2*b);
# make sure the data looks reasonable
self.assertTrue(np.all(Xm[:,:,b-1] == Xm[:,:,b]))
self.assertTrue(np.all(Xm[:, b:-b, b:-b] == X))
def test_interior_pixel_generator(self):
b = 10 # b := border size
Z = np.zeros((2,100,100), dtype=np.int32)
for idx, pct in emlib.interior_pixel_generator(Z,b,30):
Z[idx[:,0],idx[:,1],idx[:,2]] += 1
self.assertTrue(np.all(Z[:,b:-b,b:-b]==1))
Z[:,b:-b,b:-b] = 0
self.assertTrue(np.all(Z==0))
def test_stratified_interior_pixel_generator(self):
b = 10 # b := border size
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# For a 50/50 split of pixels in the interior, the generator
# should reproduce the entire interior.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Y = np.zeros((2,100,100))
Y[:,0:50,:] = 1
Z = np.zeros(Y.shape)
for idx,pct in emlib.stratified_interior_pixel_generator(Y,b,30):
Z[idx[:,0],idx[:,1],idx[:,2]] += 1
self.assertTrue(np.all(Z[:,b:-b,b:-b]==1))
Z[:,b:-b,b:-b] = 0
self.assertTrue(np.all(Z==0))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# For a random input, should see a 50/50 split of class
# labels, but not necessarily hit the entire interior.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Y = np.random.rand(2,100,100) > 0.5
nOne=0; nZero=0;
for idx,pct in emlib.stratified_interior_pixel_generator(Y,b,30):
slices = idx[:,0]; rows = idx[:,1]; cols = idx[:,2]
nOne += np.sum(Y[slices,rows,cols] == 1)
nZero += np.sum(Y[slices,rows,cols] == 0)
self.assertTrue(nOne == nZero)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# For an input tensor with "no-ops", the sampler should only
# return pixels with a positive or negative label.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Y = np.zeros((2,100,100))
Y[:,0:20,0:20] = 1
Y[:,50:70,50:70] = -1
Z = np.zeros(Y.shape)
nPos=0; nNeg=0; nTotal=0;
for idx,pct in emlib.stratified_interior_pixel_generator(Y,0,10,omitLabels=[0]):
slices = idx[:,0]; rows = idx[:,1]; cols = idx[:,2]
Z[slices,rows,cols] = Y[slices,rows,cols]
nPos += np.sum(Y[slices,rows,cols] == 1)
nNeg += np.sum(Y[slices,rows,cols] == -1)
nTotal += len(slices)
self.assertTrue(nPos == 20*20*2);
self.assertTrue(nNeg == 20*20*2);
self.assertTrue(nTotal == 20*20*2*2);
self.assertTrue(np.all(Y == Z))
if __name__ == "__main__":
unittest.main()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| 33.067227
| 88
| 0.519695
| 550
| 3,935
| 3.634545
| 0.26
| 0.015008
| 0.015008
| 0.066533
| 0.395198
| 0.330665
| 0.283642
| 0.252126
| 0.235118
| 0.145073
| 0
| 0.057074
| 0.265311
| 3,935
| 118
| 89
| 33.347458
| 0.634383
| 0.205083
| 0
| 0.264706
| 0
| 0
| 0.016753
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.058824
| false
| 0
| 0.058824
| 0
| 0.132353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09f91afeaca4a61947c025a6985fde971a2433a0
| 727
|
py
|
Python
|
app/core/bluetooth/models.py
|
FHellmann/MLWTF
|
582c3505d638907a848d5a6c739ee99981300f17
|
[
"Apache-2.0"
] | null | null | null |
app/core/bluetooth/models.py
|
FHellmann/MLWTF
|
582c3505d638907a848d5a6c739ee99981300f17
|
[
"Apache-2.0"
] | null | null | null |
app/core/bluetooth/models.py
|
FHellmann/MLWTF
|
582c3505d638907a848d5a6c739ee99981300f17
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
"""
Author: Fabio Hellmann <info@fabio-hellmann.de>
"""
from attr import s, ib
from attr.validators import instance_of
@s(frozen=True)
class BLEDevice(object):
"""
Device MAC address (as a hex string separated by colons).
"""
addr = ib(validator=instance_of(str), type=str)
"""
The name which is set
"""
name = ib(validator=instance_of(str), type=str)
"""
Received Signal Strength Indication for the last received broadcast from the device. This is an integer value
measured in dB, where 0 dB is the maximum (theoretical) signal strength, and more negative numbers indicate a
weaker signal.
"""
rssi = ib(validator=instance_of(int), type=int)
| 27.961538
| 114
| 0.68088
| 103
| 727
| 4.76699
| 0.640777
| 0.081466
| 0.11609
| 0.12831
| 0.126273
| 0.126273
| 0.126273
| 0
| 0
| 0
| 0
| 0.001748
| 0.213205
| 727
| 25
| 115
| 29.08
| 0.856643
| 0.167813
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.857143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09f949d20672656308f4b25b2fb52c7d29555163
| 1,511
|
py
|
Python
|
Algorithms_medium/1102. Path With Maximum Minimum Value.py
|
VinceW0/Leetcode_Python_solutions
|
09e9720afce21632372431606ebec4129eb79734
|
[
"Xnet",
"X11"
] | 4
|
2020-08-11T20:45:15.000Z
|
2021-03-12T00:33:34.000Z
|
Algorithms_medium/1102. Path With Maximum Minimum Value.py
|
VinceW0/Leetcode_Python_solutions
|
09e9720afce21632372431606ebec4129eb79734
|
[
"Xnet",
"X11"
] | null | null | null |
Algorithms_medium/1102. Path With Maximum Minimum Value.py
|
VinceW0/Leetcode_Python_solutions
|
09e9720afce21632372431606ebec4129eb79734
|
[
"Xnet",
"X11"
] | null | null | null |
"""
1102. Path With Maximum Minimum Value
Medium
Given a matrix of integers A with R rows and C columns, find the maximum score of a path starting at [0,0] and ending at [R-1,C-1].
The score of a path is the minimum value in that path. For example, the value of the path 8 → 4 → 5 → 9 is 4.
A path moves some number of times from one visited cell to any neighbouring unvisited cell in one of the 4 cardinal directions (north, east, west, south).
Example 1:
Input: [[5,4,5],[1,2,6],[7,4,6]]
Output: 4
Explanation:
The path with the maximum score is highlighted in yellow.
Example 2:
Input: [[2,2,1,2,2,2],[1,2,2,2,1,2]]
Output: 2
Example 3:
Input: [[3,4,6,3,4],[0,2,1,1,7],[8,8,3,2,7],[3,2,4,9,8],[4,1,2,0,0],[4,6,5,4,3]]
Output: 3
Note:
1 <= R, C <= 100
0 <= A[i][j] <= 10^9
"""
class Solution:
def maximumMinimumPath(self, A: List[List[int]]) -> int:
dire = [(0, 1), (1, 0), (0, -1), (-1, 0)]
R, C = len(A), len(A[0])
maxHeap = [(-A[0][0], 0, 0)]
seen = [[0 for _ in range(C)] for _ in range(R)]
while maxHeap:
val, x, y = heapq.heappop(maxHeap)
# seen[x][y] = 1 # got TLE
if x == R - 1 and y == C - 1: return -val
for dx, dy in dire:
nx, ny = x + dx, y + dy
if 0 <= nx < R and 0 <= ny < C and not seen[nx][ny]:
seen[nx][ny] = 1 # passed
heapq.heappush(maxHeap, (max(val, -A[nx][ny]), nx, ny))
return -1
| 26.982143
| 154
| 0.536069
| 287
| 1,511
| 2.825784
| 0.337979
| 0.014797
| 0.011097
| 0.014797
| 0.014797
| 0.014797
| 0.014797
| 0.014797
| 0
| 0
| 0
| 0.093809
| 0.294507
| 1,511
| 56
| 155
| 26.982143
| 0.664165
| 0.547981
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09f9da8e8fb3a2cb6c40b0627a6fdbf5844460e0
| 1,436
|
py
|
Python
|
tests/extractor/test_factory.py
|
albertteoh/data_pipeline
|
a99f1c7412375b3e9f4115108fcdde517b2e71a6
|
[
"Apache-2.0"
] | null | null | null |
tests/extractor/test_factory.py
|
albertteoh/data_pipeline
|
a99f1c7412375b3e9f4115108fcdde517b2e71a6
|
[
"Apache-2.0"
] | null | null | null |
tests/extractor/test_factory.py
|
albertteoh/data_pipeline
|
a99f1c7412375b3e9f4115108fcdde517b2e71a6
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import data_pipeline.db.factory as db_factory
import data_pipeline.extractor.factory as extractor_factory
import tests.unittest_utils as utils
import data_pipeline.constants.const as const
from pytest_mock import mocker
from data_pipeline.db.exceptions import UnsupportedDbTypeError
@pytest.fixture()
def setup(tmpdir, mocker):
mockargv_config = utils.get_default_argv_config(tmpdir)
mockargv = mocker.Mock(**mockargv_config)
pc_config = {'insert.return_value': None, 'update.return_value': None}
mock_pc = mocker.Mock(**pc_config)
af_config = {'build_process_control.return_value': mock_pc}
mock_audit_factory = mocker.Mock(**af_config)
utils.mock_build_kafka_producer(mocker)
yield (mockargv, mock_audit_factory)
@pytest.mark.parametrize("dbtype, expect_class", [
(const.ORACLE, "OracleCdcExtractor"),
(const.MSSQL, "MssqlCdcExtractor"),
])
def test_build(dbtype, expect_class, setup):
(mockargv, mock_audit_factory) = setup
mode = const.CDCEXTRACT
db = db_factory.build(dbtype)
extractor = extractor_factory.build(mode, db, mockargv, mock_audit_factory)
assert type(extractor).__name__ == expect_class
def test_build_unsupported(setup):
(mockargv, mock_audit_factory) = setup
with pytest.raises(ImportError):
db = db_factory.build("AnUnsupportedDatabase")
extractor = extractor_factory.build(db, mockargv, mock_audit_factory)
| 34.190476
| 79
| 0.766017
| 181
| 1,436
| 5.78453
| 0.337017
| 0.051576
| 0.091691
| 0.114613
| 0.114613
| 0.064947
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137187
| 1,436
| 41
| 80
| 35.02439
| 0.845036
| 0
| 0
| 0.0625
| 0
| 0
| 0.103064
| 0.038301
| 0
| 0
| 0
| 0
| 0.03125
| 1
| 0.09375
| false
| 0
| 0.25
| 0
| 0.34375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09fa1379267ff36d7eaf0c8f04ba9a7c23bd124b
| 3,424
|
py
|
Python
|
suremco/tracker.py
|
modsim/SurEmCo
|
71fc0cfc62f8733de93ee2736421574a154e3db3
|
[
"BSD-2-Clause"
] | null | null | null |
suremco/tracker.py
|
modsim/SurEmCo
|
71fc0cfc62f8733de93ee2736421574a154e3db3
|
[
"BSD-2-Clause"
] | null | null | null |
suremco/tracker.py
|
modsim/SurEmCo
|
71fc0cfc62f8733de93ee2736421574a154e3db3
|
[
"BSD-2-Clause"
] | null | null | null |
# SurEmCo - C++ tracker wrapper
import ctypes
from enum import IntEnum
import sys
import os
import numpy
import numpy.ctypeslib
class Tracker(object):
class Mode(IntEnum):
MOVING = 0
STATIC = 1
class Strategy(IntEnum):
BRUTE_FORCE = 0
KD_TREE = 1
track_input_type = {'dtype': [
('x', 'float64'),
('y', 'float64'),
('precision', 'float64'),
('frame', 'int64'),
('index', 'intp'),
('label', 'int64'),
('square_displacement', 'float64')
]}
debug = False
def __init__(self, debug=False):
self.debug = debug
file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'_tracker.' + ('so' if sys.platform == 'linux' else 'dll'))
old_cwd = os.getcwd()
os.chdir(os.path.dirname(file))
_track_so = ctypes.CDLL(file)
os.chdir(old_cwd)
_track_so.track.argtypes = (
numpy.ctypeslib.ndpointer(**self.track_input_type), # , flags='C_CONTIGUOUS'),
ctypes.c_size_t,
ctypes.c_float,
ctypes.c_int32,
ctypes.c_int32,
ctypes.c_int32
)
_track_so.track.restype = None
_track_so.msd.argtypes = (
numpy.ctypeslib.ndpointer(**self.track_input_type), # , flags='C_CONTIGUOUS'),
ctypes.c_size_t,
ctypes.c_float,
ctypes.c_float
)
_track_so.msd.restype = ctypes.c_float
self._track_so = _track_so
self._track = _track_so.track
self._msd = _track_so.msd
if self.debug:
_track_so.getBuildDate.restype = ctypes.c_char_p
# noinspection PyProtectedMember
print("Loaded %s compiled at %s" % (_track_so._name, _track_so.getBuildDate().decode(),))
def track(self, transfer, maximum_displacement=1.0, memory=0, mode=None, strategy=None):
if mode is None:
mode = self.Mode.MOVING
if strategy is None:
strategy = self.Strategy.BRUTE_FORCE
if len(transfer) == 0:
raise RuntimeError('Empty data!')
if self.debug:
from tempfile import NamedTemporaryFile
with NamedTemporaryFile(prefix='track_dataset', delete=False) as tf:
transfer.tofile(tf)
print("track(\"%s\", %d, %f, %d, %d, %d)" % (
tf.name, len(transfer), maximum_displacement, memory, mode, strategy
))
return self._track(transfer, len(transfer), maximum_displacement, memory, mode, strategy)
def msd(self, transfer, micron_per_pixel=1.0, frames_per_second=1.0):
# the MSD calculation was not thoroughly verified
if len(transfer) == 0:
raise RuntimeError('Empty data!')
return self._msd(transfer, len(transfer), micron_per_pixel, frames_per_second)
def __del__(self):
if not self.debug:
return
# noinspection PyProtectedMember
_handle = self._track_so._handle
del self._track_so
if sys.platform == 'linux':
dl = ctypes.CDLL('libdl.so')
dl.dlclose.argtypes = [ctypes.c_void_p]
dl.dlclose(_handle)
# elif # handle windows?
@classmethod
def empty_track_input_type(cls, count):
return numpy.zeros(count, **cls.track_input_type)
| 29.517241
| 101
| 0.580023
| 396
| 3,424
| 4.775253
| 0.323232
| 0.051824
| 0.037017
| 0.015865
| 0.231095
| 0.209942
| 0.194606
| 0.143839
| 0.101534
| 0.101534
| 0
| 0.013036
| 0.305491
| 3,424
| 115
| 102
| 29.773913
| 0.78217
| 0.061916
| 0
| 0.166667
| 0
| 0
| 0.065876
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059524
| false
| 0
| 0.083333
| 0.011905
| 0.25
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09fb1d9a7c357f2bc49fb2f43274b073bfff333e
| 4,026
|
py
|
Python
|
foreign_languages/anderson.py
|
ds-modules/NESTUD-190A
|
54ca1cd9a8f369f48946147f72377f874738f7d5
|
[
"MIT"
] | 6
|
2017-11-06T03:18:12.000Z
|
2019-10-02T19:41:06.000Z
|
foreign_languages/anderson.py
|
admndrsn/NESTUD-190A
|
54ca1cd9a8f369f48946147f72377f874738f7d5
|
[
"MIT"
] | null | null | null |
foreign_languages/anderson.py
|
admndrsn/NESTUD-190A
|
54ca1cd9a8f369f48946147f72377f874738f7d5
|
[
"MIT"
] | 2
|
2018-02-09T01:04:58.000Z
|
2019-06-19T17:45:34.000Z
|
from IPython.core.display import display, HTML
import translation
class translate(object):
id_start = 0
def __init__(self, column_types, language_to='en'):
self.num_of_columns = len(column_types) + 1
column_types.insert(0, 'original text')
self.column_types = column_types
self.language_to = language_to
self.funcs = {'original text':self.original_text_pls, 'translate':self.tranlate_pls,
'parts of speech':self.polyglot_pos, 'language':self.polyglot_languages}
self.header = {'original text':'Original Text:', 'translate':'Translation:',
'parts of speech':'Parts of Speech:', 'language':'Language(s) Detected:'}
self.fonttype = 'Courier New'
self.additionalcss = ''
# these are the functions that will go within the body calls
# need to fill in these functions so that we get the right things
def tranlate_pls(self, txt):
return translation.bing(txt, dst = self.language_to)
def original_text_pls(self, txt):
return txt
def parts_of_speech_pls(self, txt):
import nltk
tokenized = nltk.word_tokenize(txt)
return nltk.pos_tag(tokenized)
def polyglot_languages(self, txt):
from polyglot.detect import Detector
langs = Detector(txt, quiet=True).languages
selected_items = [(x.name, x.confidence) for x in langs]
# converting to readable from Detector objects
stringy_list = ['Name: ' + str(x) + ' Confidence: ' +str(y) for x,y in selected_items]
return '<br><br>'.join(stringy_list)
def polyglot_pos(self, txt):
from polyglot.text import Text
return Text(txt).pos_tags
# make a function for part of speech counts
# and names in the text
# maybe name counts
# make it so that we can try different translating services
# a google integration may be necessary :(
# incrementing the ids so that the css of a new one doesn't change an old one
def increment_ids(self):
strt_id = translate.id_start
translate.id_start += self.num_of_columns
return range(strt_id, strt_id + self.num_of_columns)
# creating the divs and the content that will go in them
def create_body(self, id_numbers, txt):
# setting up all of the divs that will be there
base_column = '<div id="{}">{}<br>{}</div>'
blank_divs = base_column * self.num_of_columns
# calling the functions specified in our constructor on our body of text
content = [self.funcs[col](txt) for col in self.column_types]
headers = [self.header[col] for col in self.column_types]
# zipping them together so we can make a string in the correct order, then flattening
nested_order = list(zip(id_numbers, headers, content))
unnested = [item for sublist in nested_order for item in sublist]
return '<div id="wrapper">' + blank_divs.format(*(unnested)) + '</div>'
def create_css(self, id_numbers):
# picking alternating colors for columns
clrs = ['#e6f3f7', 'lightgray']
def alternate():
while True:
yield 0
yield 1
gen = alternate()
clr_list = [clrs[next(gen)] for i in range(self.num_of_columns)]
# width evenly divided by number of columns
width = "width:{}%;".format(str(100 / self.num_of_columns))
# setting up for all different css that will be there
base_css = "#{} {{background-color: {};" + width + "float:left;padding: .5vw;border-right: solid black 1.5px;}}"
blank_csss = base_css * self.num_of_columns
# zipping them together so we can make a string in the correct order, then flattening
nested_order = list(zip(id_numbers, clr_list))
unnested = [item for sublist in nested_order for item in sublist]
final_css = blank_csss.format(*(unnested))
wrapper = "{} #wrapper {{width:100%;clear:both;display: flex;font-family:{};}}".format(self.additionalcss, self.fonttype)
return wrapper + final_css
def create(self, initial_text):
id_list = self.increment_ids()
string_ids = ['d'+str(x) for x in id_list]
display(HTML('<style>{}</style> <body>{}</body>'.format(self.create_css(string_ids), self.create_body(string_ids, initial_text))))
# Add a return statement so that the values are accessible
| 31.209302
| 132
| 0.721063
| 611
| 4,026
| 4.607201
| 0.327332
| 0.025577
| 0.02238
| 0.039787
| 0.13357
| 0.120071
| 0.10373
| 0.10373
| 0.10373
| 0.10373
| 0
| 0.00507
| 0.167163
| 4,026
| 128
| 133
| 31.453125
| 0.834477
| 0.23696
| 0
| 0.03125
| 0
| 0
| 0.15498
| 0.024902
| 0
| 0
| 0
| 0
| 0
| 1
| 0.171875
| false
| 0
| 0.078125
| 0.03125
| 0.40625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61cb92b7eff849f550f556cfcf71f302f039dac7
| 1,315
|
py
|
Python
|
landdox/core.py
|
natefduncan/landdox
|
58908554034577cc20c6f89ee6056da90cbfbd4e
|
[
"MIT"
] | 1
|
2019-12-13T16:19:56.000Z
|
2019-12-13T16:19:56.000Z
|
landdox/core.py
|
natefduncan/landdox
|
58908554034577cc20c6f89ee6056da90cbfbd4e
|
[
"MIT"
] | null | null | null |
landdox/core.py
|
natefduncan/landdox
|
58908554034577cc20c6f89ee6056da90cbfbd4e
|
[
"MIT"
] | null | null | null |
import requests
import json
import pandas as pd
import os
from .endpoints import *
class Client:
endpoints = {
"contacts" : contacts,
"leases" : leases,
"units" : units,
"wells" : wells,
"custom" : custom,
"tracts" : tracts,
"payments" : payments
}
def __init__(self, client_id, client_secret):
self.client_id = client_id
self.client_secret = client_secret
self.authorize()
def __getattr__(self, name):
endpoint = self.endpoints.get(name)
endpoint.access_token = self.access_token
return endpoint
def authorize(self):
payload = {
"client_id" : self.client_id,
"client_secret" : self.client_secret,
"audience" : "api.landdox.com",
"grant_type" : "client_credentials"
}
url = "https://landdox.auth0.com/oauth/token"
response = requests.post(url, data=payload)
if response.status_code != 200:
raise ValueError("{error}".format(error=response))
else:
response = response.json()
self.access_token = response.get("access_token")
self.expires_in = response.get("expires_in")
self.expires_in = response.get("token_type")
| 26.3
| 62
| 0.580989
| 137
| 1,315
| 5.364964
| 0.416058
| 0.068027
| 0.04898
| 0.073469
| 0.157823
| 0.092517
| 0.092517
| 0
| 0
| 0
| 0
| 0.004415
| 0.311027
| 1,315
| 49
| 63
| 26.836735
| 0.806843
| 0
| 0
| 0
| 0
| 0
| 0.14688
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.128205
| 0
| 0.282051
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61cd1b7623e09e8563a60f3d87a7caf270f2faa2
| 589
|
py
|
Python
|
src/signalplotter/qt/makePyUI.py
|
jowanpittevils/Databasemanager_Signalplotter
|
993152ad15793054df2acf386eb1c9a76610b789
|
[
"BSD-3-Clause"
] | null | null | null |
src/signalplotter/qt/makePyUI.py
|
jowanpittevils/Databasemanager_Signalplotter
|
993152ad15793054df2acf386eb1c9a76610b789
|
[
"BSD-3-Clause"
] | null | null | null |
src/signalplotter/qt/makePyUI.py
|
jowanpittevils/Databasemanager_Signalplotter
|
993152ad15793054df2acf386eb1c9a76610b789
|
[
"BSD-3-Clause"
] | null | null | null |
#%%
def makeUI(uiNames):
import sys, os
print('Check the pwd first, It must be at .../SignalPlotter/qt.')
print(os.getcwd())
p0 = os.path.dirname(sys.executable)
for uiName in (uiNames):
print('===== for: ',uiName,' ======')
p1 = '"'+p0+'\Scripts\pyuic5.exe'+'" '
p1 += ' -x "' + uiName + '.ui"'
p1 += ' -o "' + uiName + '.py"'
print(p1)
import subprocess
res = subprocess.call(p1) != 0
print('Done.')
print('Is there any error: ', res)
uiNames = ['plotter_uiDesign']
makeUI(uiNames)
# %%
| 21.035714
| 69
| 0.50764
| 68
| 589
| 4.382353
| 0.647059
| 0.087248
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02148
| 0.288625
| 589
| 27
| 70
| 21.814815
| 0.689737
| 0.008489
| 0
| 0
| 0
| 0
| 0.266323
| 0.036082
| 0.058824
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.117647
| 0
| 0.176471
| 0.352941
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61cea84c27bf7df9b0289ed47ffee2781ddbdc17
| 3,148
|
py
|
Python
|
mpcontribs-users/mpcontribs/users/swf/pre_submission.py
|
josuav1/MPContribs
|
3cbf0e83ba6cd749dd4fc988c9f6ad076b05f935
|
[
"MIT"
] | 1
|
2019-07-03T04:38:58.000Z
|
2019-07-03T04:38:58.000Z
|
mpcontribs-users/mpcontribs/users/swf/pre_submission.py
|
josuav1/MPContribs
|
3cbf0e83ba6cd749dd4fc988c9f6ad076b05f935
|
[
"MIT"
] | null | null | null |
mpcontribs-users/mpcontribs/users/swf/pre_submission.py
|
josuav1/MPContribs
|
3cbf0e83ba6cd749dd4fc988c9f6ad076b05f935
|
[
"MIT"
] | 1
|
2019-07-03T04:39:04.000Z
|
2019-07-03T04:39:04.000Z
|
from mpcontribs.config import mp_level01_titles
from mpcontribs.io.core.recdict import RecursiveDict
from mpcontribs.io.core.utils import clean_value, get_composition_from_string
from mpcontribs.users.utils import duplicate_check
def round_to_100_percent(number_set, digit_after_decimal=1):
unround_numbers = [
x / float(sum(number_set)) * 100 * 10**digit_after_decimal
for x in number_set
]
decimal_part_with_index = sorted([
(index, unround_numbers[index] % 1)
for index in range(len(unround_numbers))
], key=lambda y: y[1], reverse=True)
remainder = 100 * 10**digit_after_decimal - sum(map(int, unround_numbers))
index = 0
while remainder > 0:
unround_numbers[decimal_part_with_index[index][0]] += 1
remainder -= 1
index = (index + 1) % len(number_set)
return [int(x)/float(10**digit_after_decimal) for x in unround_numbers]
@duplicate_check
def run(mpfile, **kwargs):
import pymatgen
import pandas as pd
from mpcontribs.users.swf.rest.rester import SwfRester
# load data from google sheet
google_sheet = mpfile.document[mp_level01_titles[0]].pop('google_sheet')
google_sheet += '/export?format=xlsx'
df_dct = pd.read_excel(google_sheet, sheet_name=None)
# rename sheet columns
elements = ['Fe', 'V', 'Co']
df_dct['IP Energy Product'].columns = ['IP_Energy_product'] + elements
df_dct['total'].columns = elements
df_dct['MOKE'].columns = elements + ['thickness', 'MOKE_IP_Hc']
df_dct['VSM'].columns = elements + ['thickness', 'VSM_IP_Hc']
df_dct['formula'].columns = elements
df_dct['Kondorsky'].columns = ['angle', 'Kondorsky_Model', 'Experiment']
# round all compositions to 100%
for sheet, df in df_dct.items():
if sheet != 'Kondorsky':
for idx, row in df.iterrows():
df.loc[idx:idx, elements] = round_to_100_percent(row[elements])
row5 = df_dct['formula'].iloc[0]
formula5 = get_composition_from_string(
pymatgen.Composition(10*row5).formula.replace(' ', '')
)
dct = dict((k, clean_value(v, '%')) for k,v in row5.to_dict().items())
mpfile.add_hierarchical_data({'data': dct}, identifier=formula5)
mpfile.add_data_table(
formula5, df_dct['Kondorsky'], name='Angular Dependence of Switching Field'
)
for sheet, df in df_dct.items():
if sheet == 'formula' or sheet == 'Kondorsky' or sheet == 'total':
continue
for idx, row in df.iterrows():
composition = pymatgen.Composition(row[elements]*10)
formula = get_composition_from_string(composition.formula.replace(' ', ''))
dct = dict((k, clean_value(v, '%')) for k,v in row[elements].to_dict().items())
mpfile.add_hierarchical_data({'data': dct}, identifier=formula)
columns = [x for x in row.index if x not in elements]
if columns:
data = row[columns].round(decimals=1)
dct = dict((k, clean_value(v)) for k,v in data.to_dict().items())
mpfile.add_hierarchical_data({'data': dct}, identifier=formula)
| 43.123288
| 91
| 0.656607
| 422
| 3,148
| 4.699052
| 0.303318
| 0.027736
| 0.034291
| 0.036309
| 0.228946
| 0.216339
| 0.195159
| 0.169945
| 0.169945
| 0.140696
| 0
| 0.019013
| 0.21474
| 3,148
| 72
| 92
| 43.722222
| 0.783172
| 0.025095
| 0
| 0.098361
| 0
| 0
| 0.086162
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032787
| false
| 0
| 0.114754
| 0
| 0.163934
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61cf7342efb940a3f5d7c9b44e90c3d3f4d12610
| 21,205
|
py
|
Python
|
src/trails/flow_model.py
|
BenDickens/trails
|
a89a1a901c7be38cdcb7a59339587e518ab8f14d
|
[
"MIT"
] | 4
|
2020-09-14T07:20:19.000Z
|
2021-04-22T14:23:04.000Z
|
src/trails/flow_model.py
|
BenDickens/trails
|
a89a1a901c7be38cdcb7a59339587e518ab8f14d
|
[
"MIT"
] | 5
|
2021-03-17T17:02:27.000Z
|
2021-08-31T10:09:38.000Z
|
src/trails/flow_model.py
|
BenDickens/trails
|
a89a1a901c7be38cdcb7a59339587e518ab8f14d
|
[
"MIT"
] | 3
|
2020-09-07T07:35:28.000Z
|
2021-04-22T14:23:39.000Z
|
import os,sys
import numpy as np
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import pygeos
from osgeo import gdal
from tqdm import tqdm
import igraph as ig
import contextily as ctx
from rasterstats import zonal_stats
import time
import pylab as pl
from IPython import display
import seaborn as sns
import subprocess
import shutil
from multiprocessing import Pool,cpu_count
import pathlib
code_path = (pathlib.Path(__file__).parent.absolute())
gdal.SetConfigOption("OSM_CONFIG_FILE", os.path.join(code_path,'..','..',"osmconf.ini"))
from shapely.wkb import loads
data_path = os.path.join('..','data')
from simplify import *
from extract import railway,ferries,mainRoads,roads
from population_OD import create_bbox,create_grid
pd.options.mode.chained_assignment = None
def closest_node(node, nodes):
"""[summary]
Args:
node ([type]): [description]
nodes ([type]): [description]
Returns:
[type]: [description]
"""
dist_2 = np.sum((nodes - node)**2, axis=1)
return np.argmin(dist_2)
def load_network(osm_path,mainroad=True):
"""[summary]
Args:
osm_path ([type]): [description]
mainroad (bool, optional): [description]. Defaults to True.
Returns:
[type]: [description]
"""
if mainroad:
df = mainRoads(osm_path)
else:
df = roads(osm_path)
net = Network(edges=df)
net = clean_roundabouts(net)
net = split_edges_at_nodes(net)
net = add_endpoints(net)
net = add_ids(net)
net = add_topology(net)
net = drop_hanging_nodes(net)
net = merge_edges(net)
net = reset_ids(net)
net = add_distances(net)
net = merge_multilinestrings(net)
net = fill_attributes(net)
net = add_travel_time(net)
return net
def make_directed(edges):
save_edges = []
for ind,edge in edges.iterrows():
if edge.oneway == 'yes':
save_edges.append(edge)
else:
edge.oneway = 'yes'
edge.lanes = np.round(edge.lanes/2,0)
save_edges.append(edge)
edge2 = edge.copy()
from_id = edge.from_id
to_id = edge.to_id
edge2.from_id = to_id
edge2.to_id = from_id
save_edges.append(edge2)
new_edges = pd.DataFrame(save_edges).reset_index(drop=True)
new_edges.id = new_edges.index
return new_edges
def get_gdp_values(gdf,data_path):
"""[summary]
Args:
gdf ([type]): [description]
Returns:
[type]: [description]
"""
world_pop = os.path.join(data_path,'global_gdp','GDP_2015.tif')
gdf['geometry'] = gdf.geometry.apply(lambda x: loads(pygeos.to_wkb(x)))
gdp = list(item['sum'] for item in zonal_stats(gdf.geometry,world_pop,
stats="sum"))
gdp = [x if x is not None else 0 for x in gdp]
gdf['geometry'] = pygeos.from_shapely(gdf.geometry)
return gdp
def country_grid_gdp_filled(trans_network,country,data_path,rough_grid_split=100,from_main_graph=False):
"""[summary]
Args:
trans_network ([type]): [description]
rough_grid_split (int, optional): [description]. Defaults to 100.
Returns:
[type]: [description]
"""
if from_main_graph==True:
node_df = trans_network.copy()
envelop = pygeos.envelope(pygeos.multilinestrings(node_df.geometry.values))
height = np.sqrt(pygeos.area(envelop)/rough_grid_split)
else:
node_df = trans_network.nodes.copy()
node_df.geometry,approximate_crs = convert_crs(node_df)
envelop = pygeos.envelope(pygeos.multilinestrings(node_df.geometry.values))
height = np.sqrt(pygeos.area(envelop)/rough_grid_split)
gdf_admin = pd.DataFrame(create_grid(create_bbox(node_df),height),columns=['geometry'])
#load data and convert to pygeos
country_shape = gpd.read_file(os.path.join(data_path,'GADM','gadm36_levels.gpkg'),layer=0)
country_shape = pd.DataFrame(country_shape.loc[country_shape.GID_0==country])
country_shape.geometry = pygeos.from_shapely(country_shape.geometry)
gdf_admin = pygeos.intersection(gdf_admin,country_shape.geometry)
gdf_admin = gdf_admin.loc[~pygeos.is_empty(gdf_admin.geometry)]
gdf_admin['centroid'] = pygeos.centroid(gdf_admin.geometry)
gdf_admin['km2'] = area(gdf_admin)
gdf_admin['gdp'] = get_gdp_values(gdf_admin,data_path)
gdf_admin = gdf_admin.loc[gdf_admin.gdp > 0].reset_index()
gdf_admin['gdp_area'] = gdf_admin.gdp/gdf_admin['km2']
return gdf_admin
def convert_crs(gdf,current_crs="epsg:4326"):
"""[summary]
Args:
gdf ([type]): [description]
Returns:
[type]: [description]
"""
if current_crs == "epsg:4326":
lat = pygeos.geometry.get_y(pygeos.centroid(gdf['geometry'].iloc[0]))
lon = pygeos.geometry.get_x(pygeos.centroid(gdf['geometry'].iloc[0]))
# formula below based on :https://gis.stackexchange.com/a/190209/80697
approximate_crs = "epsg:" + str(int(32700-np.round((45+lat)/90,0)*100+np.round((183+lon)/6,0)))
else:
approximate_crs = "epsg:4326"
#from pygeos/issues/95
geometries = gdf['geometry']
coords = pygeos.get_coordinates(geometries)
transformer=pyproj.Transformer.from_crs(current_crs, approximate_crs,always_xy=True)
new_coords = transformer.transform(coords[:, 0], coords[:, 1])
result = pygeos.set_coordinates(geometries.copy(), np.array(new_coords).T)
return result,approximate_crs
def area(gdf,km=True):
"""[summary]
Args:
gdf ([type]): [description]
km (bool, optional): [description]. Defaults to True.
Returns:
[type]: [description]
"""
if km:
return pygeos.area(convert_crs(gdf)[0])/1e6
else:
return pygeos.area(convert_crs(gdf)[0])
def get_basetable(country,data_path):
io_data_path = os.path.join(data_path,'country_IO_tables')
df = pd.read_csv(os.path.join(io_data_path,'IO_{}_2015_BasicPrice.txt'.format(country)),
sep='\t', skiprows=1,header=[0,1,2],index_col = [0,1,2,3],
skipfooter=2617,engine='python')
basetable = df.iloc[:26,:26]
return basetable.astype(int)
def create_OD(gdf_admin,country_name,data_path):
"""[summary]
Args:
gdf_admin ([type]): [description]
country_name ([type]): [description]
Returns:
[type]: [description]
"""
# create list of sectors
sectors = [chr(i).upper() for i in range(ord('a'),ord('o')+1)]
# add a region column if not existing yet.
if 'NAME_1' not in gdf_admin.columns:
gdf_admin['NAME_1'] = ['reg'+str(x) for x in list(gdf_admin.index)]
# prepare paths to downscale a country. We give a country its own directory
# to allow for multiple unique countries running at the same time
downscale_basepath = os.path.join(code_path,'..','..','downscale_od')
downscale_countrypath = os.path.join(code_path,'..','..','run_downscale_od_{}'.format(country_name))
# copy downscaling method into the country directory
shutil.copytree(downscale_basepath,downscale_countrypath)
# save national IO table as basetable for downscaling
get_basetable(country_name,data_path).to_csv(os.path.join(downscale_countrypath,'basetable.csv'),
sep=',',header=False,index=False)
# create proxy table with GDP values per region/area
proxy_reg = pd.DataFrame(gdf_admin[['NAME_1','gdp_area']])
proxy_reg['year'] = 2016
proxy_reg = proxy_reg[['year','NAME_1','gdp_area']]
proxy_reg.columns = ['year','id','gdp_area']
proxy_reg.to_csv(os.path.join(downscale_countrypath,'proxy_reg.csv'),index=False)
indices = pd.DataFrame(sectors,columns=['sector'])
indices['name'] = country_name
indices = indices.reindex(['name','sector'],axis=1)
indices.to_csv(os.path.join(downscale_countrypath,'indices.csv'),index=False,header=False)
# prepare yaml file
yaml_file = open(os.path.join(downscale_countrypath,"settings_basic.yml"), "r")
list_of_lines = yaml_file.readlines()
list_of_lines[6] = ' - id: {}\n'.format(country_name)
list_of_lines[8] = ' into: [{}] \n'.format(','.join(['reg'+str(x) for x in list(gdf_admin.index)]))
yaml_file = open(os.path.join(downscale_countrypath,"settings_basic.yml"), "w")
yaml_file.writelines(list_of_lines)
yaml_file.close()
# run libmrio
p = subprocess.Popen([os.path.join(downscale_countrypath,'mrio_disaggregate'), 'settings_basic.yml'],
cwd=os.path.join(downscale_countrypath))
p.wait()
# create OD matrix from libmrio results
OD = pd.read_csv(os.path.join(downscale_countrypath,'output.csv'),header=None)
OD.columns = pd.MultiIndex.from_product([gdf_admin.NAME_1,sectors])
OD.index = pd.MultiIndex.from_product([gdf_admin.NAME_1,sectors])
OD = OD.groupby(level=0,axis=0).sum().groupby(level=0,axis=1).sum()
OD = (OD*5)/365
OD_dict = OD.stack().to_dict()
gdf_admin['import'] = list(OD.sum(axis=1))
gdf_admin['export'] = list(OD.sum(axis=0))
gdf_admin = gdf_admin.rename({'NAME_1': 'name'}, axis='columns')
# and remove country folder again to avoid clutter in the directory
shutil.rmtree(downscale_countrypath)
return OD,OD_dict,sectors,gdf_admin
def prepare_network_routing(transport_network):
"""[summary]
Args:
transport_network ([type]): [description]
Returns:
[type]: [description]
"""
gdf_roads = make_directed(transport_network.edges)
gdf_roads = gdf_roads.rename(columns={"highway": "infra_type"})
gdf_roads['GC'] = gdf_roads.apply(gc_function,axis=1)
gdf_roads['max_flow'] = gdf_roads.apply(set_max_flow,axis=1)
gdf_roads['flow'] = 0
gdf_roads['wait_time'] = 0
return gdf_roads
def create_graph(gdf_roads):
"""[summary]
Args:
gdf_roads ([type]): [description]
Returns:
[type]: [description]
"""
gdf_in = gdf_roads.reindex(['from_id','to_id'] + [x for x in list(gdf_roads.columns) if x not in ['from_id','to_id']],axis=1)
g = ig.Graph.TupleList(gdf_in.itertuples(index=False), edge_attrs=list(gdf_in.columns)[2:],directed=True)
sg = g.clusters().giant()
gdf_in.set_index('id',inplace=True)
return sg,gdf_in
def nearest_network_node_list(gdf_admin,gdf_nodes,sg):
"""[summary]
Args:
gdf_admin ([type]): [description]
gdf_nodes ([type]): [description]
sg ([type]): [description]
Returns:
[type]: [description]
"""
gdf_nodes = gdf_nodes.loc[gdf_nodes.id.isin(sg.vs['name'])]
gdf_nodes.reset_index(drop=True,inplace=True)
nodes = {}
for admin_ in gdf_admin.itertuples():
nodes[admin_.name] = gdf_nodes.iloc[pygeos.distance((admin_.centroid),gdf_nodes.geometry).idxmin()].id
return nodes
def set_max_flow(segment):
"""[summary]
Args:
segment ([type]): [description]
Returns:
[type]: [description]
"""
empty_trip_correction = 0.7 #available capacity for freight reduces
# standard lane capacity = 1000 passenger vehicles per lane per hour
# trunk and motorway correct by factor 4
# primary correct by factor 2
# secondary correct by factor 1
# tertiary correct factor 0.5
# other roads correct factor 0.5
# passenger vehicle equvalent for trucks: 3.5
# average truck load: 8 tonnes
# 30 % of trips are empty
# median value per ton: 2,000 USD
# median truck value: 8*2000 = 16,000 USD
standard_max_flow = 1000/3.5*16000*empty_trip_correction
if (segment.infra_type == 'trunk') | (segment.infra_type == 'trunk_link'):
return standard_max_flow*4
elif (segment.infra_type == 'motorway') | (segment.infra_type == 'motorway_link'):
return standard_max_flow*4
elif (segment.infra_type == 'primary') | (segment.infra_type == 'primary_link'):
return standard_max_flow*2
elif (segment.infra_type == 'secondary') | (segment.infra_type == 'secondary_link'):
return standard_max_flow*1
elif (segment.infra_type == 'tertiary') | (segment.infra_type == 'tertiary_link'):
return standard_max_flow*0.5
else:
return standard_max_flow*0.5
def gc_function(segment):
"""[summary]
Args:
segment ([type]): [description]
Returns:
[type]: [description]
"""
# GC = α ∗ WaitT + β ∗ TrvlT + μ ∗ Trate + γ ∗ stddev
Wait_time = 0
if segment.infra_type in ['primary','primary_link']:
Trate = 0.5
return 0.57*Wait_time+0.49*segment['time']+1*Trate+0.44*1
elif segment.infra_type in ['secondary','secondary_link']:
Trate = 1
return 0.57*Wait_time+0.49*segment['time']+1*Trate+0.44*1
elif segment.infra_type in ['tertiary','tertiary_link']:
Trate = 1.5
return 0.57*Wait_time+0.49*segment['time']+1*Trate+0.44*1
else:
Trate = 2
return 0.57*Wait_time+0.49*segment['time']+1*Trate+0.44*1
def update_gc_function(segment):
"""[summary]
Args:
segment ([type]): [description]
Returns:
[type]: [description]
"""
# GC = α ∗ WaitT + β ∗ TrvlT + μ ∗ Trate + γ ∗ stddev
if segment['flow'] > segment['max_flow']:
segment['wait_time'] += 1
elif segment['wait_time'] > 0:
segment['wait_time'] - 1
else:
segment['wait_time'] = 0
if segment['infra_type'] in ['primary','primary_link']:
Trate = 0.5
return 0.57*segment['wait_time']+0.49*segment['time']+1*Trate+0.44*1
elif segment['infra_type'] in ['secondary','secondary_link']:
Trate = 1
return 0.57*segment['wait_time']+0.49*segment['time']+1*Trate+0.44*1
elif segment['infra_type'] in ['tertiary','tertiary_link']:
Trate = 1.5
return 0.57*segment['wait_time']+0.49*segment['time']+1*Trate+0.44*1
else:
Trate = 2
return 0.57*segment['wait_time']+0.49*segment['time']+1*Trate+0.44*1
def run_flow_analysis(country,transport_network,gdf_admin,OD_dict,notebook=False):
"""[summary]
Args:
transport_network ([type]): [description]
gdf_admin ([type]): [description]
Returns:
[type]: [description]
"""
plt.rcParams['figure.figsize'] = [5, 5]
gdf_roads = prepare_network_routing(transport_network)
sg,gdf_in = create_graph(gdf_roads)
nearest_node = nearest_network_node_list(gdf_admin,transport_network.nodes,sg)
dest_nodes = [sg.vs['name'].index(nearest_node[x]) for x in list(nearest_node.keys())]
# this is where the iterations goes
iterator = 0
optimal = False
max_iter = 100
save_fits = []
if not notebook:
plt.ion() ## Note this correction
# run flow optimization model
while optimal == False:
#update cost function per segment, dependent on flows from previous iteration.
sg.es['GC'] = [(lambda segment: update_gc_function(segment))(segment) for segment in list(sg.es)]
sg.es['flow'] = 0
#(re-)assess shortest paths between all regions
for admin_orig in (list(gdf_admin.name)):
paths = sg.get_shortest_paths(sg.vs[sg.vs['name'].index(nearest_node[admin_orig])],dest_nodes,weights='GC',output="epath")
for path,admin_dest in zip(paths,list(gdf_admin.name)):
flow_value = OD_dict[(admin_orig,admin_dest)]
sg.es[path]['flow'] = [x + flow_value for x in sg.es[path]['flow']]
fitting_edges = (sum([x<y for x,y in zip(sg.es['flow'],sg.es['max_flow'])])/len(sg.es))
save_fits.append(fitting_edges)
# if at least 99% of roads are below max flow, we say its good enough
if (sum([x<y for x,y in zip(sg.es['flow'],sg.es['max_flow'])])/len(sg.es)) > 0.99:
optimal = True
iterator += 1
# when running the code in a notebook, the figure updates instead of a new figure each iteration
if notebook:
pl.plot(save_fits)
display.display(pl.gcf())
display.clear_output(wait=True)
else:
plt.plot(save_fits)
plt.xlabel('# iteration')
plt.ylabel('Share of edges below maximum flow')
plt.show()
plt.pause(0.0001) #Note this correction
if iterator == max_iter:
break
# save output
plt.savefig(os.path.join(code_path,'..','..','figures','{}_flow_modelling.png'.format(country)))
gdf_in['flow'] = pd.DataFrame(sg.es['flow'],columns=['flow'],index=sg.es['id'])
gdf_in['max_flow'] = pd.DataFrame(sg.es['max_flow'],columns=['max_flow'],index=sg.es['id'])
gdf_in['wait_time'] = pd.DataFrame(sg.es['wait_time'],columns=['wait_time'],index=sg.es['id'])
gdf_in['overflow'] = gdf_in['flow'].div(gdf_in['max_flow'])
return gdf_in
def plot_OD_matrix(OD):
"""[summary]
Args:
OD ([type]): [description]
"""
plt.rcParams['figure.figsize'] = [20, 15]
sns.heatmap(OD,vmin=0,vmax=1e5,cmap='Reds')
def plot_results(gdf_in):
"""[summary]
Args:
gdf_in ([type]): [description]
"""
gdf_in['geometry'] = gdf_in.geometry.apply(lambda x : loads(pygeos.to_wkb(x)))
gdf_plot = gpd.GeoDataFrame(gdf_in)
gdf_plot.crs = 4326
gdf_plot = gdf_plot.to_crs(3857)
plt.rcParams['figure.figsize'] = [20, 10]
fig, axes = plt.subplots(1, 2)
for iter_,ax in enumerate(axes.flatten()):
if iter_ == 0:
gdf_plot.loc[gdf_plot.flow>1].plot(ax=ax,column='flow',legend=False,cmap='Reds',linewidth=3) #loc[gdf_plot.flow>1]
ctx.add_basemap(ax, source=ctx.providers.Stamen.TonerLite,zoom=15)
ax.set_axis_off()
ax.set_title('Flows along the network')
else:
pd.DataFrame(gdf_in.loc[gdf_in.max_flow>1].groupby(
'infra_type').sum()['distance']/gdf_in.groupby('infra_type').sum()['distance']).dropna().sort_values(
by='distance',ascending=False).plot(type='bar',color='red',ax=ax)
ax.set_ylabel('Percentage of edges > max flow')
ax.set_xlabel('Road type')
#plt.show(block=True)
def country_run(country,data_path=os.path.join('C:\\','Data'),plot=False,save=True):
"""[summary]
Args:
country ([type]): [description]
plot (bool, optional): [description]. Defaults to True.
"""
osm_path = os.path.join(data_path,'country_osm','{}.osm.pbf'.format(country))
transport_network = load_network(osm_path)
print('NOTE: Network created')
gdf_roads = prepare_network_routing(transport_network)
sg = create_graph(gdf_roads)[0]
main_graph = pd.DataFrame(list(sg.es['geometry']),columns=['geometry'])
gdf_admin = country_grid_gdp_filled(main_graph,country,data_path,rough_grid_split=100,from_main_graph=True)
print('NOTE: GDP values extracted')
# OD,OD_dict,sectors,gdf_admin = create_OD(gdf_admin,country,data_path)
# print('NOTE: OD created')
# gdf_out = run_flow_analysis(country,transport_network,gdf_admin,OD_dict)
# print('NOTE: Flow analysis finished')
# if save:
# gdf_admin['geometry'] = gdf_admin.geometry.apply(lambda x: loads(pygeos.to_wkb(x)))
# gdf_out = gdf_out.loc[~gdf_out.max_flow.isna()].reset_index(drop=True)
# gdf_out_save = gdf_out.copy()
# gdf_out_save['geometry'] = gdf_out_save.geometry.apply(lambda x: loads(pygeos.to_wkb(x)))
# gpd.GeoDataFrame(gdf_admin.drop('centroid',axis=1)).to_file(
# os.path.join(code_path,'..','..','data',
# '{}.gpkg'.format(country)),layer='grid',driver='GPKG')
# gpd.GeoDataFrame(gdf_out_save).to_file(os.path.join('..','..','data',
# '{}.gpkg'.format(country)),layer='network',driver='GPKG')
# if plot:
# plot_results(gdf_out)
if __name__ == '__main__':
#country_run(sys.argv[1],os.path.join('C:\\','Data'),plot=False)
#country_run(sys.argv[1],os.path.join(code_path,'..','..','Data'),plot=False)
#data_path = os.path.join('C:\\','Data')
if (len(sys.argv) > 1) & (len(sys.argv[1]) == 3):
country_run(sys.argv[1])
elif (len(sys.argv) > 1) & (len(sys.argv[1]) > 3):
glob_info = pd.read_excel(os.path.join('/scistor','ivm','eks510','projects','trails','global_information.xlsx'))
glob_info = glob_info.loc[glob_info.continent==sys.argv[1]]
countries = list(glob_info.ISO_3digit)
if len(countries) == 0:
print('FAILED: Please write the continents as follows: Africa, Asia, Central-America, Europe, North-America,Oceania, South-America')
with Pool(cpu_count()) as pool:
pool.map(country_run,countries,chunksize=1)
else:
print('FAILED: Either provide an ISO3 country name or a continent name')
| 35.400668
| 145
| 0.636831
| 2,919
| 21,205
| 4.438164
| 0.188421
| 0.029641
| 0.019298
| 0.022076
| 0.343342
| 0.274334
| 0.209031
| 0.183404
| 0.163952
| 0.149904
| 0
| 0.021345
| 0.217873
| 21,205
| 599
| 146
| 35.400668
| 0.759301
| 0.212497
| 0
| 0.126183
| 0
| 0.003155
| 0.107006
| 0.005622
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059937
| false
| 0
| 0.07571
| 0
| 0.223975
| 0.012618
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61cffba0eebf31780c12f21faf032f94e065f6a5
| 1,238
|
py
|
Python
|
offsite/core/utils.py
|
wh1te909/backup-offsite
|
694f773583eb825b44ff20c51598ac9e1106cd32
|
[
"MIT"
] | 4
|
2021-01-20T15:45:35.000Z
|
2021-07-09T02:15:31.000Z
|
offsite/core/utils.py
|
wh1te909/backup-offsite
|
694f773583eb825b44ff20c51598ac9e1106cd32
|
[
"MIT"
] | 6
|
2020-08-02T23:31:07.000Z
|
2021-09-22T19:19:50.000Z
|
offsite/core/utils.py
|
wh1te909/backup-offsite
|
694f773583eb825b44ff20c51598ac9e1106cd32
|
[
"MIT"
] | null | null | null |
from channels.auth import AuthMiddlewareStack
from knox.auth import TokenAuthentication
from django.contrib.auth.models import AnonymousUser
from channels.db import database_sync_to_async
@database_sync_to_async
def get_user(access_token):
try:
auth = TokenAuthentication()
token = access_token.decode().split("access_token=")[1]
user = auth.authenticate_credentials(token.encode())
except Exception:
return AnonymousUser()
else:
return user[0]
class KnoxAuthMiddlewareInstance:
"""
https://github.com/django/channels/issues/1399
"""
def __init__(self, scope, middleware):
self.middleware = middleware
self.scope = dict(scope)
self.inner = self.middleware.inner
async def __call__(self, receive, send):
q = self.scope["query_string"]
self.scope["user"] = await get_user(q)
inner = self.inner(self.scope)
return await inner(receive, send)
class KnoxAuthMiddleware:
def __init__(self, inner):
self.inner = inner
def __call__(self, scope):
return KnoxAuthMiddlewareInstance(scope, self)
KnoxAuthMiddlewareStack = lambda inner: KnoxAuthMiddleware(AuthMiddlewareStack(inner))
| 25.265306
| 86
| 0.697092
| 136
| 1,238
| 6.132353
| 0.411765
| 0.064748
| 0.046763
| 0.045564
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006129
| 0.209208
| 1,238
| 48
| 87
| 25.791667
| 0.845761
| 0.037157
| 0
| 0
| 0
| 0
| 0.02466
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.133333
| 0.033333
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61d14e7bc92cdd86e7f3f92f3039ee396ac2a457
| 6,841
|
py
|
Python
|
unik/indexing.py
|
balbasty/unik
|
7b8b2a0989495eec7bc0db6c672ce904cbcb1063
|
[
"MIT"
] | null | null | null |
unik/indexing.py
|
balbasty/unik
|
7b8b2a0989495eec7bc0db6c672ce904cbcb1063
|
[
"MIT"
] | null | null | null |
unik/indexing.py
|
balbasty/unik
|
7b8b2a0989495eec7bc0db6c672ce904cbcb1063
|
[
"MIT"
] | null | null | null |
"""Access / change tensor shape."""
import tensorflow as tf
import numpy as np
from .magik import tensor_compat
from .alloc import zeros_like
from .types import has_tensor, as_tensor, cast, dtype
from .shapes import shape, reshape, flatten, transpose, unstack
from ._math_for_indexing import cumprod, minimum, maximum
from ._utils import pop
@tensor_compat
def gather(input, indices, validate_indices=None,
axis=None, batch_dims=0, name=None):
"""Gather / Take values from a tensor / array along an axis."""
if tf.is_tensor(input) or tf.is_tensor(indices) \
or tf.is_tensor(axis) or tf.is_tensor(batch_dims):
return tf.gather(input, indices, validate_indices,
axis, batch_dims, name)
else:
if batch_dims > 0:
raise NotImplementedError()
return np.take(input, indices, axis=axis, mode='raise')
@tensor_compat
def scatter(indices, updates, *args, **kwargs):
"""Scatter `updates` at `indices` into a tensor.
Signatures
----------
scatter(indices, updates, shape, mode='new', axis=0, name=None)
scatter(indices, updates, input, mode, axis=0, name=None)
Parameters
----------
indices - (*ind_shape, L) tensor_like[int]
ND-indices in which to place the `updates`. The last dimension
maps to dimensions of the output tensor.
updates - (*up_shape, *slice_shape) tensor_like or scalar
Values to place in the tensor.
shape - vector_like[int], if mode == 'new'
Shape of the output tensor.
input - (*shape) tensor_like, if mode != 'new'
Tensor in which to place `updates`.
mode - {'new', 'update', 'add', 'sub', 'min', 'max'}, default='new'
Scatter mode.
name - str, optional
A name for the operation.
Returns
-------
output - (*shape) tensor or array
Tensor with updated values.
"""
# Parse arguments
args = list(args)
kwargs = dict(kwargs)
mode = pop(args, 1) if len(args) > 1 else kwargs.pop('mode', 'new')
if mode == 'new':
input = []
_shape = pop(args, 0) if len(args) > 0 else kwargs.pop('shape', None)
else:
input = pop(args, 0) if len(args) > 0 else kwargs.pop('input', None)
_shape = shape(input)
name = pop(args, 0) if len(args) > 0 else kwargs.pop('name', None)
# Ensure tensors
if has_tensor([indices, updates, _shape, input], 'tf'):
updates = as_tensor(updates, 'tf')
indices = as_tensor(indices, 'tf')
elif has_tensor([indices, updates, _shape, input], 'np'):
updates = as_tensor(updates, 'np')
indices = as_tensor(indices, 'np')
else:
updates = as_tensor(updates)
indices = as_tensor(indices)
if mode == 'new':
# Mode new: allocate tensor and populate
if has_tensor([indices, updates, _shape], 'tf'):
print(indices.dtype)
return tf.scatter_nd(indices, updates, _shape, name=name)
else:
# np.put works with linear indices only.
# NOTE: with this implementation, ind_shape and up_shape
# must be exactly equal, not just broadcastable.
output = zeros_like(updates, shape=_shape)
indices = reshape(indices, [-1, shape(indices)[-1]])
indices = sub2ind(transpose(indices), _shape)
updates = flatten(updates)
np.put(output, indices, updates)
return output
else:
if has_tensor([indices, updates, input], 'tf'):
if mode == 'update':
scatter_fn = tf.tensor_scatter_nd_update
elif mode == 'add':
scatter_fn = tf.tensor_scatter_nd_add
elif mode == 'sub':
scatter_fn = tf.tensor_scatter_nd_sub
elif mode == 'min':
scatter_fn = tf.tensor_scatter_nd_min
elif mode == 'max':
scatter_fn = tf.tensor_scatter_nd_max
else:
raise ValueError('Unknown operation {}'.format(mode))
updates = cast(updates, dtype(input))
return scatter_fn(input, indices, updates, name=name)
else:
# If mode != 'update', equivalent to:
# 0) the left-hand side is the input tensor
# 1) generate right-hand side using mode scatter with mode 'new'
# 2) apply op(LHS, RHS),
if mode == 'update':
output = input.copy()
indices = reshape(indices, [-1, shape(indices)[-1]])
indices = sub2ind(transpose(indices), _shape)
updates = flatten(updates)
np.put(output, indices, updates)
return output
elif mode == 'add':
op = lambda x, y: x + y
elif mode == 'sub':
op = lambda x, y: x - y
elif mode == 'min':
op = lambda x, y: minimum(x, y)
elif mode == 'max':
op = lambda x, y: maximum(x, y)
else:
raise ValueError('Unknown operation {}'.format(mode))
updates = scatter(indices, updates, shape=_shape, mode='new')
return op(input, updates)
@tensor_compat
def sub2ind(subs, shape):
"""Convert sub indices (i, j, k) into linear indices.
The rightmost dimension is the most rapidly changing one
-> if shape == [D, H, W], the strides are therefore [H*W, W, 1]
Parameters
----------
subs : (D, *shape) tensor_like
List of sub-indices. The first dimension is the number of dimension.
Each element should have the same number of elements and shape.
shape : (D,) vector_like
Size of each dimension. Its length should be the same as the
first dimension of ``subs``.
Returns
-------
ind : (*shape) tensor or array
Linear indices
"""
*subs, ind = unstack(subs)
stride = cumprod(shape[1:], reverse=True)
for i, s in zip(subs, stride):
ind = ind + as_tensor(i) * s
return ind
@tensor_compat
def where(cond, x=None, y=None, name=None):
"""Select values from two tensors based on a condition."""
if has_tensor([cond, x, y], 'tf'):
return tf.where(cond, x, y, name)
else:
if x is None and y is None:
return np.where(cond)
else:
return np.where(cond, x, y)
@tensor_compat
def boolean_mask(input, mask, axis=0, name='boolean_mask'):
"""Gather elements from a tensor / array using a mask."""
input = as_tensor(input)
if has_tensor([input, mask], 'tf'):
return tf.boolean_mask(input, mask, axis=axis, name=name)
else:
axis = axis or 0
slices = (slice(None, None),) * axis + (mask,) + (Ellipsis,)
return input.__getitem__(slices)
| 35.262887
| 77
| 0.582517
| 874
| 6,841
| 4.45881
| 0.217391
| 0.04311
| 0.029253
| 0.021812
| 0.220683
| 0.185014
| 0.126508
| 0.126508
| 0.089556
| 0.089556
| 0
| 0.005427
| 0.299664
| 6,841
| 193
| 78
| 35.445596
| 0.807973
| 0.2887
| 0
| 0.357143
| 0
| 0
| 0.030647
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044643
| false
| 0
| 0.071429
| 0
| 0.232143
| 0.008929
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61d192d69ecdae0462072ff12464ac90f01f69d0
| 1,478
|
py
|
Python
|
aleph/views/alerts_api.py
|
adikadashrieq/aleph
|
acc03197c10e511a279ae3a05120187223f173d2
|
[
"MIT"
] | 1
|
2019-06-18T21:35:59.000Z
|
2019-06-18T21:35:59.000Z
|
aleph/views/alerts_api.py
|
heartofstone/aleph
|
d66b6615d2bfa10c291c63754f53b468de8bebde
|
[
"MIT"
] | null | null | null |
aleph/views/alerts_api.py
|
heartofstone/aleph
|
d66b6615d2bfa10c291c63754f53b468de8bebde
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, request
from aleph.core import db
from aleph.model import Alert
from aleph.search import DatabaseQueryResult
from aleph.views.forms import AlertSchema
from aleph.views.serializers import AlertSerializer
from aleph.views.util import require, obj_or_404
from aleph.views.util import parse_request
from aleph.views.context import tag_request
blueprint = Blueprint('alerts_api', __name__)
@blueprint.route('/api/2/alerts', methods=['GET'])
def index():
require(request.authz.logged_in)
query = Alert.by_role_id(request.authz.id)
result = DatabaseQueryResult(request, query)
return AlertSerializer.jsonify_result(result)
@blueprint.route('/api/2/alerts', methods=['POST', 'PUT'])
def create():
require(request.authz.session_write)
data = parse_request(AlertSchema)
alert = Alert.create(data, request.authz.id)
db.session.commit()
tag_request(alert_id=alert.id)
return AlertSerializer.jsonify(alert)
@blueprint.route('/api/2/alerts/<int:alert_id>', methods=['GET'])
def view(alert_id):
require(request.authz.logged_in)
alert = obj_or_404(Alert.by_id(alert_id, role_id=request.authz.id))
return AlertSerializer.jsonify(alert)
@blueprint.route('/api/2/alerts/<int:alert_id>', methods=['DELETE'])
def delete(alert_id):
require(request.authz.session_write)
alert = obj_or_404(Alert.by_id(alert_id, role_id=request.authz.id))
alert.delete()
db.session.commit()
return ('', 204)
| 31.446809
| 71
| 0.750338
| 207
| 1,478
| 5.198068
| 0.2657
| 0.066915
| 0.065056
| 0.066915
| 0.472119
| 0.288104
| 0.230483
| 0.230483
| 0.230483
| 0.230483
| 0
| 0.012327
| 0.121786
| 1,478
| 46
| 72
| 32.130435
| 0.816641
| 0
| 0
| 0.277778
| 0
| 0
| 0.075101
| 0.037889
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.25
| 0
| 0.472222
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61d210a06894e407303586520efa2e44fe445461
| 11,283
|
py
|
Python
|
run.py
|
Acforest/LogPrompt
|
199766cea9988bc6e8b1c71352b090da68bbb71d
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
Acforest/LogPrompt
|
199766cea9988bc6e8b1c71352b090da68bbb71d
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
Acforest/LogPrompt
|
199766cea9988bc6e8b1c71352b090da68bbb71d
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to train and evaluate either a regular supervised model or a PET/iPET model on
one of the supported tasks and datasets.
"""
import os
import log
import pet
import torch
import argparse
from pet.config import load_configs
from pet.tasks import PROCESSORS, UNLABELED_SET, TRAIN_SET, DEV_SET, TEST_SET, METRICS, DEFAULT_METRICS, load_examples
from pet.utils import eq_div
from pet.wrapper import WRAPPER_TYPES, MODEL_CLASSES
logger = log.get_logger('root')
def main():
parser = argparse.ArgumentParser(description="Command line interface for PET/iPET")
# Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data directory, which should contain the data files for the task")
parser.add_argument("--model_type", default=None, type=str, required=True, choices=MODEL_CLASSES.keys(),
help="The type of the pretrained language model to use")
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to the pre-trained model or shortcut name")
parser.add_argument("--task_name", default=None, type=str, required=True, choices=PROCESSORS.keys(),
help="The name of the task to train/evaluate on")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written")
# PET-specific optional parameters
parser.add_argument("--wrapper_type", default="mlm", choices=WRAPPER_TYPES,
help="The wrapper type. Set this to 'mlm' for a masked language model like BERT or to 'plm' "
"for a permuted language model like XLNet")
parser.add_argument("--pattern_ids", default=[0], type=int, nargs='+',
help="The ids of the PVPs to be used")
parser.add_argument("--lm_training", action='store_true',
help="Whether to use language modeling as auxiliary task")
parser.add_argument("--alpha", default=0.9999, type=float,
help="Weighting term for the auxiliary language modeling task")
parser.add_argument("--temperature", default=2, type=float,
help="Temperature used for combining PVPs")
parser.add_argument("--verbalizer_file", default=None,
help="The path to a file to override default verbalizers")
parser.add_argument("--reduction", default='wmean', choices=['wmean', 'mean'],
help="Reduction strategy for merging predictions from multiple PET models. Select either "
"uniform weighting (mean) or weighting based on train set accuracy (wmean)")
parser.add_argument("--decoding_strategy", default='default', choices=['default', 'ltr', 'parallel'],
help="The decoding strategy with multiple masks")
parser.add_argument("--no_distillation", action='store_true',
help="If set to true, no distillation is performed")
parser.add_argument("--repetitions", default=3, type=int,
help="The number of times to repeat training and testing with different seeds")
parser.add_argument("--max_seq_length", default=256, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation")
parser.add_argument("--per_gpu_unlabeled_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for auxiliary language modeling examples")
parser.add_argument("--gradient_accumulation_steps", type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass")
parser.add_argument("--num_train_epochs", default=3, type=float,
help="Total number of training epochs to perform")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform, override num_train_epochs")
# Other optional parameters
parser.add_argument("--train_examples", default=-1, type=int,
help="The total number of train examples to use, where -1 equals all examples")
parser.add_argument("--eval_examples", default=-1, type=int,
help="The total number of evaluation examples to use, where -1 equals all examples")
parser.add_argument("--dev_examples", default=-1, type=int,
help="The total number of development examples to use, where -1 equals all examples")
parser.add_argument("--unlabeled_examples", default=-1, type=int,
help="The total number of unlabeled examples to use, where -1 equals all examples")
parser.add_argument("--split_examples_evenly", action='store_true',
help="If true, train examples are not chosen randomly, but split evenly across all labels")
parser.add_argument("--cache_dir", default="pretrained", type=str,
help="Where to store the pre-trained models downloaded")
parser.add_argument("--learning_rate", default=1e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.01, type=float,
help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument("--early_stop_epochs", default=5, type=int,
help="Threshold epochs for early stop")
parser.add_argument("--logging_steps", type=int, default=50,
help="Log every X updates steps")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument("--overwrite_output_dir", action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument("--seed", type=int, default=42,
help="random seed for initialization")
parser.add_argument("--do_train", action='store_true',
help="Whether to perform training")
parser.add_argument("--do_eval", action='store_true',
help="Whether to perform evaluation")
parser.add_argument("--priming", action='store_true',
help="Whether to use priming for evaluation")
parser.add_argument("--eval_set", choices=['dev', 'test'], default='test',
help="Whether to perform evaluation on the dev set or the test set")
parser.add_argument("--embed_size", default=128, type=int, help="The embedding size of prompt")
parser.add_argument("--prompt_encoder_type", type=str, default="lstm", choices=['lstm', 'mlp', 'manual'],
help="The type of encoder")
parser.add_argument("--eval_every_step", default=20, type=int, help="Evaluate between two `eval_every_step` steps")
args = parser.parse_args()
logger.info("Parameters: {}".format(args))
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) \
and args.do_train and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
assert args.do_train or args.do_eval, "`do_train` and `do_eval` should be at least true for one"
# Setup CUDA, GPU & distributed training
args.device = "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu"
args.n_gpu = torch.cuda.device_count()
# Prepare task
if args.task_name not in PROCESSORS:
raise ValueError("Task '{}' not found".format(args.task_name))
processor = PROCESSORS[args.task_name]()
args.label_list = processor.get_labels()
train_ex_per_label, eval_ex_per_label, dev_ex_per_label = None, None, None
train_ex, eval_ex, dev_ex = args.train_examples, args.eval_examples, args.dev_examples
if args.split_examples_evenly:
train_ex_per_label = eq_div(args.train_examples, len(args.label_list)) if args.train_examples != -1 else -1
eval_ex_per_label = eq_div(args.eval_examples, len(args.label_list)) if args.eval_examples != -1 else -1
dev_ex_per_label = eq_div(args.dev_examples, len(args.label_list)) if args.dev_examples != -1 else -1
train_ex, eval_ex, dev_ex = None, None, None
eval_set = TEST_SET if args.eval_set == 'test' else DEV_SET
train_data = load_examples(
args.task_name, args.data_dir, TRAIN_SET, num_examples=train_ex, num_examples_per_label=train_ex_per_label)
eval_data = load_examples(
args.task_name, args.data_dir, eval_set, num_examples=eval_ex, num_examples_per_label=eval_ex_per_label)
dev_data = load_examples(
args.task_name, args.data_dir, DEV_SET, num_examples=dev_ex, num_examples_per_label=dev_ex_per_label)
unlabeled_data = load_examples(
args.task_name, args.data_dir, UNLABELED_SET, num_examples=args.unlabeled_examples)
args.metrics = METRICS.get(args.task_name, DEFAULT_METRICS)
pet_model_cfg, pet_train_cfg, pet_eval_cfg = load_configs(args)
pet.train_pet(train_data=train_data,
eval_data=eval_data,
dev_data=dev_data,
unlabeled_data=unlabeled_data,
model_config=pet_model_cfg,
train_config=pet_train_cfg,
eval_config=pet_eval_cfg,
do_train=args.do_train,
do_eval=args.do_eval,
pattern_ids=args.pattern_ids,
output_dir=args.output_dir,
repetitions=args.repetitions,
reduction=args.reduction,
no_distillation=args.no_distillation,
seed=args.seed)
if __name__ == "__main__":
main()
| 58.46114
| 119
| 0.660197
| 1,498
| 11,283
| 4.781709
| 0.215621
| 0.056541
| 0.106799
| 0.02122
| 0.226721
| 0.17744
| 0.160128
| 0.107078
| 0.107078
| 0.074131
| 0
| 0.006516
| 0.238323
| 11,283
| 192
| 120
| 58.765625
| 0.826972
| 0.070637
| 0
| 0
| 0
| 0.006667
| 0.329542
| 0.016821
| 0
| 0
| 0
| 0
| 0.006667
| 1
| 0.006667
| false
| 0.006667
| 0.06
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61d24122d7792980c0b72c95b9dc3ec6c9efd631
| 2,282
|
py
|
Python
|
data/external/repositories_2to3/253384/national_data_science_bowl_2-master/alexcode/code/model.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories_2to3/253384/national_data_science_bowl_2-master/alexcode/code/model.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories_2to3/253384/national_data_science_bowl_2-master/alexcode/code/model.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | 1
|
2019-12-04T08:23:33.000Z
|
2019-12-04T08:23:33.000Z
|
from keras.models import Sequential
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.layers.core import Activation, Dense, Flatten, Dropout
from keras.optimizers import Adam
from keras.regularizers import l2
from keras import backend as K
def center_normalize(x):
"""
Custom activation for online sample-wise center and std. normalization
"""
return (x - K.mean(x)) / K.std(x)
def get_model():
model = Sequential()
model.add(Activation(activation=center_normalize, input_shape=(45, 64, 64)))
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3, border_mode='valid'))
model.add(Activation('relu'))
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(96, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(96, 3, 3, border_mode='valid'))
model.add(Activation('relu'))
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(128, 2, 2, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(128, 2, 2, border_mode='valid'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(256, 2, 2, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(256, 2, 2, border_mode='valid'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(512, 2, 2, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1024, W_regularizer=l2(3e-3)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
adam = Adam(lr=0.0001)
model.compile(optimizer=adam, loss='rmse')
return model
| 34.575758
| 82
| 0.659509
| 318
| 2,282
| 4.672956
| 0.226415
| 0.193809
| 0.133244
| 0.148048
| 0.631225
| 0.631225
| 0.611036
| 0.611036
| 0.583445
| 0.583445
| 0
| 0.064293
| 0.175285
| 2,282
| 65
| 83
| 35.107692
| 0.725292
| 0.030675
| 0
| 0.44898
| 0
| 0
| 0.039455
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040816
| false
| 0
| 0.122449
| 0
| 0.204082
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61d29e48cb817ece86e476bffbf91b00d5532c33
| 8,685
|
py
|
Python
|
BuildDeb.py
|
KOLANICH/GraalVM_deb_packages_CI
|
f41786b4daa11efebe24402f5000111137365b4f
|
[
"Apache-2.0",
"Unlicense"
] | null | null | null |
BuildDeb.py
|
KOLANICH/GraalVM_deb_packages_CI
|
f41786b4daa11efebe24402f5000111137365b4f
|
[
"Apache-2.0",
"Unlicense"
] | null | null | null |
BuildDeb.py
|
KOLANICH/GraalVM_deb_packages_CI
|
f41786b4daa11efebe24402f5000111137365b4f
|
[
"Apache-2.0",
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
import sys
import struct
import re
import os
from itertools import chain
import warnings
import tarfile
import sh
from tqdm import tqdm
from pydebhelper import *
from getLatestVersionAndURLWithGitHubAPI import getTargets
def genGraalProvides(start=6, end=8): # java 12 still not supported yet
graalvmProvides = ["default-jre", "default-jre-headless", "java-compiler"]
for i in range(start, end + 1):
si = str(i)
graalvmProvides += ["openjdk-" + si + "-jre", "openjdk-" + si + "-jre-headless", "java" + si + "-runtime", "java" + si + "-runtime-headless", "java" + si + "-sdk-headless"]
return graalvmProvides
config = OrderedDict()
config["llvm"] = {
"descriptionLong": "LLVM engine for GraalVM",
"homepage": "https://www.graalvm.org/docs/reference-manual/languages/llvm/",
"rip": {
"bin": ["lli"],
"other": ["jre/languages/llvm"]
}
}
config["js"] = {
"descriptionLong": "JavaScript engine & node.js runtime for GraalVM",
"homepage": "https://www.graalvm.org/docs/reference-manual/languages/js/",
"rip": {
"bin": ["js", "node", "npm"],
"other": ["jre/languages/js", "jre/lib/graalvm/graaljs-launcher.jar"]
}
}
config["python"] = {
"descriptionLong": "python runtime for GraalVM",
"homepage": "https://www.graalvm.org/docs/reference-manual/languages/python/",
"rip": {
"bin": ["graalpython"],
"other": ["jre/languages/python", "jre/lib/graalvm/graalpython-launcher.jar", "LICENSE_GRAALPYTHON", "jre/languages/python/LICENSE_GRAALPYTHON"]
}
}
config["ruby"] = {
"descriptionLong": "ruby runtime for GraalVM",
"homepage": "https://www.graalvm.org/docs/reference-manual/languages/ruby/",
"rip": {
"bin": ["truffleruby", "ruby", "bundle", "bundler", "gem", "irb", "rake", "rdoc", "ri"],
"other": ["jre/languages/ruby", "jre/lib/boot/truffleruby-services.jar", "jre/lib/graalvm/truffleruby-launcher.jar", "LICENSE_TRUFFLERUBY.md", "3rd_party_licenses_truffleruby.txt"]
}
}
config["r"] = {
"descriptionLong": "R runtime for GraalVM",
"homepage": "https://www.graalvm.org/docs/reference-manual/languages/R/",
"rip": {
"bin": ["R", "Rscript"],
"other": ["jre/languages/R", "LICENSE_FASTR", "3rd_party_licenses_fastr.txt"]
}
}
config["gu"] = {
"descriptionLong": "Package manager for GraalVM",
"homepage": "https://www.graalvm.org/docs/reference-manual/graal-updater/",
"rip": {
"bin": ["gu"],
"other": ["jre/lib/installer", "bin/gu"]
}
}
config["polyglot"] = {
"descriptionLong": "Polyglot for GraalVM",
"homepage": "https://www.graalvm.org/docs/reference-manual/polyglot/",
"rip": {
"bin": ["polyglot"],
"other": ["jre/lib/polyglot"]
}
}
config["samples"] = {
"descriptionLong": "Example code for GraalVM",
"homepage": "https://www.graalvm.org/",
"rip": {
"other": ["sample"]
}
}
config["visualvm"] = {
"descriptionLong": "VisualVM for GraalVM",
"homepage": "https://www.graalvm.org/docs/reference-manual/tools/#heap-viewer",
"rip": {
"bin": ["jvisualvm"],
"other": ["lib/visualvm"]
}
}
def removeUnneededSources(unpackedDir):
for f in chain(unpackedDir.glob("**/src.zip"), unpackedDir.glob("**/*.src.zip")):
f.unlink()
def ripGraalPackage(unpackedDir, packagesDir, version, maintainer, builtDir):
mainPackageName = "graalvm"
systemPrefix = "usr/lib/jvm/graalvm-ce-amd64"
removeUnneededSources(unpackedDir)
results = []
for pkgPostfix, pkgCfg in config.items():
pkgCfg = type(pkgCfg)(pkgCfg)
rip = pkgCfg["rip"]
del pkgCfg["rip"]
with Package(mainPackageName + "-" + pkgPostfix, packagesDir, version=version, section="java", maintainer=maintainer, builtDir=builtDir, **pkgCfg) as pkg:
if "other" in rip:
for el in rip["other"]:
pkg.rip(unpackedDir / el, systemPrefix + "/" + el)
if "bin" in rip:
for el in rip["bin"]:
a = "bin/" + el
aUnp = unpackedDir / a
if aUnp.exists() or aUnp.is_symlink():
pkg.rip(aUnp, systemPrefix + "/" + a)
else:
warnings.warn(str(aUnp) + " doesn't exist")
b = "jre/" + a
bUnp = unpackedDir / b
if aUnp.exists() or aUnp.is_symlink():
pkg.rip(bUnp, systemPrefix + "/" + b)
else:
warnings.warn(str(bUnp) + " doesn't exist")
results.append(pkg)
with Package(mainPackageName, packagesDir, version=version, section="java", homepage="https://github.com/oracle/graal/releases", provides=genGraalProvides(), descriptionShort="graalvm", descriptionLong="GraalVM is a high-performance, embeddable, polyglot virtual machine for running applications written in JavaScript, Python, Ruby, R, JVM-based languages like Java, Scala, Kotlin, and LLVM-based languages such as C and C++. \nAdditionally, GraalVM allows efficient interoperability between programming languages and compiling Java applications ahead-of-time into native executables for faster startup time and lower memory overhead.", maintainer=maintainer, builtDir=builtDir) as graalVM:
graalVM.rip(unpackedDir, systemPrefix)
results.append(graalVM)
return results
def isSubdir(parent: Path, child: Path) -> bool:
parent = parent.absolute().resolve()
child = child.absolute().resolve().relative_to(parent)
for p in child.parts:
if p == "..":
return False
return True
def unpack(archPath, extrDir):
extrDir = extrDir.resolve()
packedSize = archPath.stat().st_size
with archPath.open("rb") as arch:
arch.seek(packedSize - 4)
unpackedSize = struct.unpack("<I", arch.read(4))[0]
with tarfile.open(archPath, "r:gz") as arch:
with tqdm(total=unpackedSize, unit="B", unit_divisor=1024, unit_scale=True) as pb:
for f in arch:
fp = (extrDir / f.name).absolute()
if isSubdir(extrDir, fp):
if fp.is_file() or fp.is_symlink():
fp.unlink()
fp.parent.mkdir(parents=True, exist_ok=True)
arch.extract(f, extrDir, set_attrs=True)
pb.set_postfix(file=str(fp.relative_to(extrDir)), refresh=False)
pb.update(f.size)
currentProcFileDescriptors = Path("/proc") / str(os.getpid()) / "fd"
fj = sh.firejail.bake(noblacklist=str(currentProcFileDescriptors), _fg=True)
aria2c = fj.aria2c.bake(_fg=True, **{"continue": "true", "check-certificate": "true", "enable-mmap": "true", "optimize-concurrent-downloads": "true", "j": 16, "x": 16, "file-allocation": "falloc"})
def download(targets):
args = []
for dst, uri in targets.items():
args += [uri, linesep, " ", "out=", str(dst), linesep]
pO, pI = os.pipe()
with os.fdopen(pI, "w") as pIF:
pIF.write("".join(args))
pIF.flush()
try:
aria2c(**{"input-file": str(currentProcFileDescriptors / str(pO))})
finally:
os.close(pO)
try:
os.close(pI)
except:
pass
vmTagRx = re.compile("^vm-((?:\\d+\\.){2}\\d+(?:-rc\\d+))?$")
vmTitleMarker = "GraalVM Community Edition .+$"
platformMarker = "linux-amd64"
versionFileNameMarker = "[\\w\\.-]+"
releaseFileNameMarker = versionFileNameMarker + "-" + platformMarker
def getLatestGraalVMRelease():
downloadFileNameRx = re.compile("^" + releaseFileNameMarker + "\\.tar\\.gz$")
return max(getTargets("oracle/graal", re.compile("^" + vmTitleMarker), vmTagRx, downloadFileNameRx))
def getLatestGraalRuntimeRelease(repoPath):
downloadFileNameRx = re.compile(".+installable-ce-" + releaseFileNameMarker + "\\.jar$")
return max(getTargets(repoPath, re.compile(".+- " + vmTitleMarker), vmTagRx, downloadFileNameRx))
def doBuild():
thisDir = Path(".")
downloadDir = Path(thisDir / "downloads")
archPath = Path(downloadDir / "graalvm-github.tar.gz")
unpackDir = thisDir / "graalvm-unpacked"
packagesRootsDir = thisDir / "packagesRoots"
builtDir = thisDir / "packages"
repoDir = thisDir / "public" / "repo"
selT = getLatestGraalVMRelease()
print("Selected release:", selT, file=sys.stderr)
runtimesRepos = {"python": "graalvm/graalpython", "ruby": "oracle/truffleruby", "R": "oracle/fastr"}
runtimeReleases = {k: getLatestGraalRuntimeRelease(v) for k, v in runtimesRepos.items()}
runtimeFiles = {(downloadDir / (k + ".jar")): v.uri for k, v in runtimeReleases.items()}
downloadTargets = {archPath: selT.uri, **runtimeFiles}
download(downloadTargets)
unpack(archPath, unpackDir)
graalUnpackedRoot = unpackDir / ("graalvm-ce-" + selT.version)
guCmd = fj.bake(str(graalUnpackedRoot / "bin/gu"), _fg=True)
guCmd("-L", "install", *runtimeFiles.keys())
builtDir.mkdir(parents=True, exist_ok=True)
maintainer = Maintainer()
pkgs = ripGraalPackage(graalUnpackedRoot, packagesRootsDir, selT.version, maintainer=maintainer, builtDir=builtDir)
for pkg in pkgs:
pkg.build()
with Repo(root=repoDir, descr=maintainer.name+"'s repo for apt with GraalVM binary packages, built from the official builds on GitHub") as r:
for pkg in pkgs:
r += pkg
print(r.packages2add)
if __name__ == "__main__":
doBuild()
| 32.773585
| 691
| 0.687737
| 1,041
| 8,685
| 5.705091
| 0.331412
| 0.021889
| 0.027277
| 0.034854
| 0.146658
| 0.134534
| 0.103553
| 0.097491
| 0.097491
| 0.086378
| 0
| 0.003736
| 0.137018
| 8,685
| 264
| 692
| 32.897727
| 0.788659
| 0.006102
| 0
| 0.081731
| 0
| 0.004808
| 0.338355
| 0.045423
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043269
| false
| 0.004808
| 0.052885
| 0
| 0.125
| 0.009615
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61d2ae9ec01343c7273afc66fcb5912f5895801a
| 6,267
|
py
|
Python
|
mergify_engine/utils.py
|
Madhu-1/mergify-engine
|
9ca4f4697cc825230b1584f5587f10393cabc971
|
[
"Apache-2.0"
] | null | null | null |
mergify_engine/utils.py
|
Madhu-1/mergify-engine
|
9ca4f4697cc825230b1584f5587f10393cabc971
|
[
"Apache-2.0"
] | null | null | null |
mergify_engine/utils.py
|
Madhu-1/mergify-engine
|
9ca4f4697cc825230b1584f5587f10393cabc971
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
#
# Copyright © 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import datetime
import hashlib
import hmac
import logging
import shutil
import subprocess
import sys
import tempfile
from billiard import current_process
import celery.app.log
import daiquiri
import github
import redis
from mergify_engine import config
LOG = daiquiri.getLogger(__name__)
global REDIS_CONNECTION_CACHE
REDIS_CONNECTION_CACHE = None
def get_redis_for_cache():
global REDIS_CONNECTION_CACHE
if REDIS_CONNECTION_CACHE is None:
REDIS_CONNECTION_CACHE = redis.StrictRedis.from_url(
config.STORAGE_URL, decode_responses=True,
)
p = current_process()
REDIS_CONNECTION_CACHE.client_setname("cache:%s" % p.name)
return REDIS_CONNECTION_CACHE
global REDIS_CONNECTION_HTTP_CACHE
REDIS_CONNECTION_HTTP_CACHE = None
def get_redis_for_http_cache():
global REDIS_CONNECTION_HTTP_CACHE
if REDIS_CONNECTION_HTTP_CACHE is None:
REDIS_CONNECTION_HTTP_CACHE = redis.StrictRedis.from_url(config.HTTP_CACHE_URL)
p = current_process()
REDIS_CONNECTION_HTTP_CACHE.client_setname("http-cache:%s" % p.name)
return REDIS_CONNECTION_HTTP_CACHE
def utcnow():
return datetime.datetime.now(tz=datetime.timezone.utc)
def unicode_truncate(s, length, encoding="utf-8"):
"""Truncate a string to length in bytes.
:param s: The string to truncate.
:param length: The length in number of bytes — not characters."""
return s.encode(encoding)[:length].decode(encoding, errors="ignore")
class CustomFormatter(
daiquiri.formatter.ColorExtrasFormatter, celery.app.log.TaskFormatter
):
pass
CELERY_EXTRAS_FORMAT = (
"%(asctime)s [%(process)d] %(color)s%(levelname)-8.8s "
"[%(task_id)s] "
"%(name)s%(extras)s: %(message)s%(color_stop)s"
)
def GithubPullRequestLog(self):
return daiquiri.getLogger(
__name__,
gh_owner=self.base.user.login,
gh_repo=self.base.repo.name,
gh_private=self.base.repo.private,
gh_branch=self.base.ref,
gh_pull=self.number,
gh_pull_url=self.html_url,
gh_pull_state=("merged" if self.merged else (self.mergeable_state or "none")),
)
github.PullRequest.PullRequest.log = property(GithubPullRequestLog)
def setup_logging():
outputs = []
if config.LOG_STDOUT:
outputs.append(
daiquiri.output.Stream(
sys.stdout,
formatter=CustomFormatter(fmt=CELERY_EXTRAS_FORMAT),
level=config.LOG_STDOUT_LEVEL,
)
)
if config.LOG_DATADOG:
outputs.append(daiquiri.output.Datadog())
daiquiri.setup(
outputs=outputs, level=(logging.DEBUG if config.DEBUG else logging.INFO),
)
daiquiri.set_default_log_levels(
[
("celery", "INFO"),
("kombu", "WARN"),
("github.Requester", "WARN"),
("urllib3.connectionpool", "WARN"),
("urllib3.util.retry", "WARN"),
("vcr", "WARN"),
("httpx", "WARN"),
("cachecontrol", "WARN"),
]
)
config.log()
def compute_hmac(data):
mac = hmac.new(
config.WEBHOOK_SECRET.encode("utf8"), msg=data, digestmod=hashlib.sha1
)
return str(mac.hexdigest())
def get_github_pulls_from_sha(repo, sha):
try:
return list(
github.PaginatedList.PaginatedList(
github.PullRequest.PullRequest,
repo._requester,
"%s/commits/%s/pulls" % (repo.url, sha),
None,
headers={"Accept": "application/vnd.github.groot-preview+json"},
)
)
except github.GithubException as e:
if e.status in [404, 422]:
return []
raise
class Gitter(object):
def __init__(self):
self.tmp = tempfile.mkdtemp(prefix="mergify-gitter")
LOG.info("working in: %s", self.tmp)
def __call__(self, *args, **kwargs): # pragma: no cover
LOG.info("calling: %s", " ".join(args))
kwargs["cwd"] = self.tmp
kwargs["stderr"] = subprocess.STDOUT
try:
return subprocess.check_output(["git"] + list(args), **kwargs)
except subprocess.CalledProcessError as e:
LOG.info("output: %s", e.output)
raise
def cleanup(self):
LOG.info("cleaning: %s", self.tmp)
try:
self("credential-cache", "--socket=%s/.git/creds/socket" % self.tmp, "exit")
except subprocess.CalledProcessError: # pragma: no cover
LOG.warning("git credential-cache exit fail")
shutil.rmtree(self.tmp)
def configure(self):
self("config", "user.name", "%s-bot" % config.CONTEXT)
self("config", "user.email", config.GIT_EMAIL)
# Use one git cache daemon per Gitter
self("config", "credential.useHttpPath", "true")
self(
"config",
"credential.helper",
"cache --timeout=300 --socket=%s/.git/creds/socket" % self.tmp,
)
def add_cred(self, username, password, path):
domain = config.GITHUB_DOMAIN
self(
"credential",
"approve",
input=(
"url=https://%s:%s@%s/%s\n\n" % (username, password, domain, path)
).encode("utf8"),
)
@contextlib.contextmanager
def ignore_client_side_error():
try:
yield
except github.GithubException as e:
if 400 <= e.status < 500:
return
raise
def Github(*args, **kwargs):
kwargs["base_url"] = "https://api.%s" % config.GITHUB_DOMAIN
return github.Github(*args, **kwargs)
| 27.977679
| 88
| 0.6322
| 749
| 6,267
| 5.142857
| 0.368491
| 0.054517
| 0.036345
| 0.043614
| 0.126947
| 0.095535
| 0.031153
| 0
| 0
| 0
| 0
| 0.006811
| 0.250359
| 6,267
| 223
| 89
| 28.103139
| 0.812686
| 0.1251
| 0
| 0.107595
| 0
| 0
| 0.129677
| 0.035583
| 0
| 0
| 0
| 0
| 0
| 1
| 0.094937
| false
| 0.018987
| 0.094937
| 0.012658
| 0.272152
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61d440e6d71c032e6b0102e0319c9ad174f35ff4
| 1,750
|
py
|
Python
|
milefrienddb/models/vehicles.py
|
jcrjaci/mil_test
|
ed54f55c5aacd8ffd110b7c173422dbd0cac631f
|
[
"MIT"
] | null | null | null |
milefrienddb/models/vehicles.py
|
jcrjaci/mil_test
|
ed54f55c5aacd8ffd110b7c173422dbd0cac631f
|
[
"MIT"
] | null | null | null |
milefrienddb/models/vehicles.py
|
jcrjaci/mil_test
|
ed54f55c5aacd8ffd110b7c173422dbd0cac631f
|
[
"MIT"
] | null | null | null |
"""Vehicle's app models."""
import uuid
from django.db import models
from .clients import Client
class Vehicle(models.Model):
"""Model representing a vehicle."""
road_worthiness_path = 'vehicles/certs/road_worthiness'
ownership_path = 'vehicles/certs/ownership'
photo_path = 'vehicles/photos'
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
client = models.ForeignKey(Client, null=True)
make = models.CharField(max_length=20)
model = models.CharField(max_length=20)
year = models.IntegerField(null=True)
license_plate_number = models.CharField(max_length=20)
tracker_id = models.CharField(max_length=64)
car_value = models.FloatField(null=True)
cert_road_worthiness = models.FileField(upload_to=road_worthiness_path)
cert_ownership = models.FileField(upload_to=ownership_path)
policy_number = models.CharField(max_length=255)
photo = models.FileField(upload_to=photo_path)
date_insurance = models.DateTimeField(null=True)
premium_paid = models.FloatField(null=True)
bonus_paid = models.FloatField(null=True)
net_premium = models.FloatField(null=True)
driven_meters = models.IntegerField(default=0)
driven_minutes = models.IntegerField(default=0)
total_fuel_consumption = models.FloatField(null=True, blank=True)
car_health = models.TextField(null=True)
date_created = models.DateTimeField(auto_now_add=True)
date_updated = models.DateTimeField(auto_now=True)
def __str__(self):
"""String representation of the object."""
return "{0}, {1}, {2}".format(self.make, self.model, self.license_plate_number)
class Meta:
db_table = 'vehicles_vehicle'
app_label = 'milefrienddb'
| 38.043478
| 87
| 0.737714
| 222
| 1,750
| 5.594595
| 0.418919
| 0.057971
| 0.072464
| 0.096618
| 0.136876
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011494
| 0.154857
| 1,750
| 45
| 88
| 38.888889
| 0.828262
| 0.050286
| 0
| 0
| 0
| 0
| 0.066829
| 0.032807
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.088235
| 0
| 0.941176
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61d6182a3cde9be8c7c4791931417d4e0d9e7b55
| 187
|
py
|
Python
|
ejercicio_4.py
|
Laurardila440/taller-de-secuencias
|
9db216d2431661e0777273fc5b8360a316d7dbd2
|
[
"Apache-2.0"
] | null | null | null |
ejercicio_4.py
|
Laurardila440/taller-de-secuencias
|
9db216d2431661e0777273fc5b8360a316d7dbd2
|
[
"Apache-2.0"
] | null | null | null |
ejercicio_4.py
|
Laurardila440/taller-de-secuencias
|
9db216d2431661e0777273fc5b8360a316d7dbd2
|
[
"Apache-2.0"
] | null | null | null |
"""
Entradas
compra-->int-->c
salidas
Descuento-->flot-->d
"""
c=float(input("digite compra"))
#caja negra
d=(c*0.15)
total=(c-d)
#Salidas
print("el total a pagar es de :"+str(total))
| 14.384615
| 44
| 0.641711
| 32
| 187
| 3.75
| 0.71875
| 0.033333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018293
| 0.122995
| 187
| 12
| 45
| 15.583333
| 0.713415
| 0.395722
| 0
| 0
| 0
| 0
| 0.359223
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61d6aa3833e84422d5bd54157900ea1d35ffca0b
| 878
|
py
|
Python
|
429.py
|
geethakamath18/Leetcode
|
8e55e0a47ee35ed100b30dda6682c7ce1033d4b2
|
[
"MIT"
] | null | null | null |
429.py
|
geethakamath18/Leetcode
|
8e55e0a47ee35ed100b30dda6682c7ce1033d4b2
|
[
"MIT"
] | null | null | null |
429.py
|
geethakamath18/Leetcode
|
8e55e0a47ee35ed100b30dda6682c7ce1033d4b2
|
[
"MIT"
] | null | null | null |
#LeetCode problem 429: N-ary Tree Level Order Traversal
"""
# Definition for a Node.
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
"""
class Solution:
def levelOrder(self, root: 'Node') -> List[List[int]]:
res=[]
h=self.getHeight(root)
for i in range(1,h+1):
a=[]
self.getLevelOrder(root,a,i)
res.append(a)
return res
def getHeight(self, root:'Node')->int:
if root is None:
return 0
m=1
for child in root.children:
m=max(self.getHeight(child)+1,m)
return m
def getLevelOrder(self, root: 'Node', l:List, level:int):
if level==1:
l.append(root.val)
for child in root.children:
self.getLevelOrder(child,l,level-1)
| 26.606061
| 61
| 0.544419
| 116
| 878
| 4.086207
| 0.362069
| 0.050633
| 0.075949
| 0.059072
| 0.092827
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017094
| 0.333713
| 878
| 32
| 62
| 27.4375
| 0.793162
| 0.223235
| 0
| 0.095238
| 0
| 0
| 0.017831
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61d7cc4850de782acf97ce8fd6bae60d5d5eb06f
| 544
|
py
|
Python
|
PyhonServer/app-client.py
|
sixfourtwo/auhack19
|
65b94c6cbdbfbd50e355c12b8ca2792b3b086321
|
[
"Apache-2.0"
] | null | null | null |
PyhonServer/app-client.py
|
sixfourtwo/auhack19
|
65b94c6cbdbfbd50e355c12b8ca2792b3b086321
|
[
"Apache-2.0"
] | null | null | null |
PyhonServer/app-client.py
|
sixfourtwo/auhack19
|
65b94c6cbdbfbd50e355c12b8ca2792b3b086321
|
[
"Apache-2.0"
] | null | null | null |
# importing the requests library
import requests
import json
# api-endpoint
URL = "http://127.0.0.1:80/water_mark"
# defining a params dict for the parameters to be sent to the API
# data is picture data
# tagString is the text to embed into picture.
data = {
"data":"This is the original text",
"tagString":" Yesyesyes"
}
PARAMS = json.dumps(data)
rPost = requests.post(url = URL, data = PARAMS) # kør det med JSON
data1 = json.loads(rPost.text)
#print("waterMarked data: " + rPost.text )
print("DATA: \n" + data1["data"])
| 22.666667
| 66
| 0.6875
| 83
| 544
| 4.493976
| 0.554217
| 0.058981
| 0.075067
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022624
| 0.1875
| 544
| 23
| 67
| 23.652174
| 0.821267
| 0.430147
| 0
| 0
| 0
| 0
| 0.298013
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61d90f523acdcf1af2ba8df7242ffe2e2fdeac93
| 9,827
|
py
|
Python
|
memnet.py
|
404akhan/memnet
|
a8cf9e0a480575d9d36de6fa3357f667d64e0b05
|
[
"BSD-3-Clause"
] | 1
|
2018-02-01T05:17:13.000Z
|
2018-02-01T05:17:13.000Z
|
memnet.py
|
404akhan/memnet
|
a8cf9e0a480575d9d36de6fa3357f667d64e0b05
|
[
"BSD-3-Clause"
] | null | null | null |
memnet.py
|
404akhan/memnet
|
a8cf9e0a480575d9d36de6fa3357f667d64e0b05
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
import torch.nn.functional as F
import torch.nn.init as init
from torch import nn, autograd
from torch.utils.data import DataLoader
from babi import BabiDataset, pad_collate
from torch.nn.utils import clip_grad_norm
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.fastest = True
class MemoryCell(nn.Module):
def __init__(self, num_mem_slots, embed_dim):
super(MemoryCell, self).__init__()
self.num_mem_slots = num_mem_slots
self.embed_dim = embed_dim
# Memory update linear layers.
self.U = nn.Linear(embed_dim, embed_dim)
self.V = nn.Linear(embed_dim, embed_dim, bias=False)
self.W = nn.Linear(embed_dim, embed_dim, bias=False)
self.prelu_memory = nn.PReLU(init=1)
init.xavier_normal(self.U.weight)
init.xavier_normal(self.V.weight)
init.xavier_normal(self.W.weight)
def forward(self, inputs, keys):
memories = keys
memory_inputs = inputs
for index, sentence in enumerate(memory_inputs):
# Compute memory updates.
sentence = sentence.unsqueeze(1).repeat(1, self.num_mem_slots, 1)
sentence = sentence.view_as(memories)
memory_gates = F.sigmoid((sentence * (memories + keys)).sum(dim=-1))
memory_gates = memory_gates.expand_as(memories)
candidate_memories = self.prelu_memory(self.U(memories) + self.V(sentence) + self.W(keys))
updated_memories = memories + memory_gates * candidate_memories
updated_memories = updated_memories / (
updated_memories.norm(p=2, dim=-1).expand_as(updated_memories) + 1e-12)
memories = updated_memories
return memories
class RecurrentEntityNetwork(nn.Module):
def __init__(self, hidden_dim, max_num_sentences=150, vocab_size=50):
super(RecurrentEntityNetwork, self).__init__()
self.max_num_sentences = max_num_sentences
self.embed_dim = hidden_dim
self.num_mem_slots = 20
self.vocab_size = vocab_size
self.memory_mask = nn.Parameter(torch.randn(max_num_sentences, 1))
self.question_mask = nn.Parameter(torch.randn(max_num_sentences, 1))
self.embedding = nn.Embedding(vocab_size + self.num_mem_slots, hidden_dim, padding_idx=0)
init.uniform(self.embedding.weight, a=-(3 ** 0.5), b=3 ** 0.5)
self.cell = MemoryCell(self.num_mem_slots, hidden_dim)
# Fully connected linear layers.
self.C = nn.Linear(hidden_dim, hidden_dim)
self.H = nn.Linear(hidden_dim, hidden_dim, bias=False)
self.Z = nn.Linear(hidden_dim, vocab_size, bias=False)
self.prelu_outputs = nn.ReLU()
# Initialize weights.
init.xavier_normal(self.C.weight)
init.xavier_normal(self.H.weight)
init.xavier_normal(self.Z.weight)
self.memory_mask.data.fill_(1)
self.question_mask.data.fill_(1)
def forward(self, contexts, questions):
batch_size, context_length, context_num_words = contexts.size()
_, question_length = questions.size()
# List of sentence embeddings for every story in a batch. (num. sentences, batch size, encoder dim.)
contexts = self.embedding(contexts.view(batch_size, -1))
contexts = contexts.view(batch_size, context_length, context_num_words, -1)
questions = self.embedding(questions)
memory_mask = self.memory_mask[:context_length].unsqueeze(0).unsqueeze(2).expand(*contexts.size())
question_mask = self.question_mask[:question_length].unsqueeze(0).expand(*questions.size())
memory_inputs = torch.sum(contexts * memory_mask, dim=2).squeeze().t()
question_inputs = torch.sum(questions * question_mask, dim=1).squeeze()
# Compute memory updates.
keys = torch.arange(self.vocab_size, self.vocab_size + self.num_mem_slots)
keys = torch.autograd.Variable(keys.unsqueeze(0).expand(batch_size, self.num_mem_slots).long().cuda())
keys = self.embedding(keys).view(batch_size * self.num_mem_slots, -1)
network_graph = self.cell(memory_inputs, keys)
network_graph = self.C(network_graph).view(batch_size, self.num_mem_slots, self.embed_dim)
# Apply attention to the entire acyclic graph using the questions.
attention_energies = network_graph * question_inputs.unsqueeze(1).expand_as(network_graph)
attention_energies = attention_energies.sum(dim=-1)
attention_weights = F.softmax(attention_energies).expand_as(network_graph)
attended_network_graph = (network_graph * attention_weights).sum(dim=1).squeeze()
# Condition the fully-connected layer using the questions.
outputs = self.prelu_outputs(question_inputs + self.H(attended_network_graph))
outputs = self.Z(outputs)
return outputs
HIDDEN_DIM = 100
BATCH_SIZE = 100
NUM_EPOCHS = 250
LOG_FILE = "memnet.txt"
if __name__ == '__main__':
dataset = BabiDataset()
vocab_size = len(dataset.QA.VOCAB)
criterion = nn.CrossEntropyLoss(size_average=False)
model = RecurrentEntityNetwork(HIDDEN_DIM, 130, vocab_size)
model.cuda()
early_stopping_counter = 0
best_accuracy = 0
optimizer = torch.optim.Adam(model.parameters(), lr=0.005)
for epoch in range(NUM_EPOCHS):
dataset.set_mode('train')
train_loader = DataLoader(
dataset, batch_size=BATCH_SIZE, shuffle=True, collate_fn=pad_collate
)
model.train()
if early_stopping_counter < 20:
total_accuracy = 0
num_batches = 0
for batch_idx, data in enumerate(train_loader):
optimizer.zero_grad()
contexts, questions, answers = data
contexts = autograd.Variable(contexts.long().cuda())
questions = autograd.Variable(questions.long().cuda())
answers = autograd.Variable(answers.cuda())
outputs = model(contexts, questions)
l2_loss = 0
for name, param in model.named_parameters():
l2_loss += 0.001 * torch.sum(param * param)
loss = criterion(outputs, answers) + l2_loss
predictions = F.softmax(outputs).data.max(1)[1]
correct = predictions.eq(answers.data).cpu().sum()
acc = correct * 100. / len(contexts)
loss.backward()
clip_grad_norm(model.parameters(), 40)
total_accuracy += acc
num_batches += 1
if batch_idx % 20 == 0:
print('[Epoch %d] [Training] loss : %f, acc : %f, batch_idx : %d' % (
epoch, loss.data[0], total_accuracy / num_batches, batch_idx
))
optimizer.step()
dataset.set_mode('valid')
valid_loader = DataLoader(
dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=pad_collate
)
model.eval()
total_accuracy = 0
num_batches = 0
for batch_idx, data in enumerate(valid_loader):
contexts, questions, answers = data
contexts = autograd.Variable(contexts.long().cuda())
questions = autograd.Variable(questions.long().cuda())
answers = autograd.Variable(answers.cuda())
outputs = model(contexts, questions)
l2_loss = 0
for name, param in model.named_parameters():
l2_loss += 0.001 * torch.sum(param * param)
loss = criterion(outputs, answers) + l2_loss
predictions = F.softmax(outputs).data.max(1)[1]
correct = predictions.eq(answers.data).cpu().sum()
acc = correct * 100. / len(contexts)
total_accuracy += acc
num_batches += 1
total_accuracy = total_accuracy / num_batches
if total_accuracy > best_accuracy:
best_accuracy = total_accuracy
best_state = model.state_dict()
early_stopping_counter = 0
else:
early_stopping_counter += 1
print('[Epoch %d] [Validate] Accuracy : %f' % (epoch, total_accuracy))
with open(LOG_FILE, 'a') as fp:
fp.write('[Epoch %d] [Validate] Accuracy : %f' % (epoch, total_accuracy) + '\n')
if total_accuracy == 1.0:
break
else:
print('Early Stopping at Epoch %d, Valid Accuracy : %f' % (epoch, best_accuracy))
break
dataset.set_mode('test')
test_loader = DataLoader(
dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=pad_collate
)
test_acc = 0
num_batches = 0
for batch_idx, data in enumerate(test_loader):
contexts, questions, answers = data
contexts = autograd.Variable(contexts.long().cuda())
questions = autograd.Variable(questions.long().cuda())
answers = autograd.Variable(answers.cuda())
model.state_dict().update(best_state)
outputs = model(contexts, questions)
l2_loss = 0
for name, param in model.named_parameters():
l2_loss += 0.001 * torch.sum(param * param)
loss = criterion(outputs, answers) + l2_loss
predictions = F.softmax(outputs).data.max(1)[1]
correct = predictions.eq(answers.data).cpu().sum()
acc = correct * 100. / len(contexts)
test_acc += acc
num_batches += 1
print('[Epoch %d] [Test] Accuracy : %f' % (epoch, test_acc / num_batches))
with open(LOG_FILE, 'a') as fp:
fp.write('[Epoch %d] [Test] Accuracy : %f' % (epoch, test_acc / num_batches) + '\n')
| 37.083019
| 110
| 0.623792
| 1,189
| 9,827
| 4.936081
| 0.176619
| 0.021469
| 0.020617
| 0.025558
| 0.427671
| 0.381496
| 0.335492
| 0.313341
| 0.292213
| 0.279605
| 0
| 0.01646
| 0.270479
| 9,827
| 265
| 111
| 37.083019
| 0.802204
| 0.035413
| 0
| 0.302703
| 0
| 0.005405
| 0.02893
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021622
| false
| 0
| 0.037838
| 0
| 0.081081
| 0.021622
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61d93349709f00bb603d8566d8afdb83080026fb
| 3,444
|
py
|
Python
|
tests/test_tba.py
|
StanfordAHA/Lake
|
34df001db107e1a0824b7fdb05b9f2145bf49a3e
|
[
"BSD-3-Clause"
] | 11
|
2019-10-14T02:05:38.000Z
|
2022-03-10T14:10:22.000Z
|
tests/test_tba.py
|
StanfordAHA/Lake
|
34df001db107e1a0824b7fdb05b9f2145bf49a3e
|
[
"BSD-3-Clause"
] | 29
|
2019-09-02T05:49:40.000Z
|
2022-02-26T00:57:54.000Z
|
tests/test_tba.py
|
StanfordAHA/Lake
|
34df001db107e1a0824b7fdb05b9f2145bf49a3e
|
[
"BSD-3-Clause"
] | 1
|
2021-04-16T20:26:13.000Z
|
2021-04-16T20:26:13.000Z
|
from lake.models.tba_model import TBAModel
from lake.modules.transpose_buffer_aggregation import TransposeBufferAggregation
from lake.passes.passes import lift_config_reg
import magma as m
from magma import *
import fault
import tempfile
import kratos as k
import random as rand
import pytest
def test_tba(word_width=16,
fetch_width=4,
num_tb=1,
tb_height=1,
max_range=5,
max_range_inner=5):
model_tba = TBAModel(word_width,
fetch_width,
num_tb,
tb_height,
max_range,
max_range_inner)
new_config = {}
new_config["range_outer"] = 5
new_config["range_inner"] = 3
new_config["stride"] = 2
new_config["indices"] = [0, 1, 2]
new_config["dimensionality"] = 2
new_config["tb_height"] = 1
new_config["starting_addr"] = 0
model_tba.set_config(new_config=new_config)
dut = TransposeBufferAggregation(word_width,
fetch_width,
num_tb,
tb_height,
max_range,
max_range_inner,
max_stride=5,
tb_iterator_support=2)
lift_config_reg(dut.internal_generator)
magma_dut = k.util.to_magma(dut, flatten_array=True, check_flip_flop_always_ff=False)
tester = fault.Tester(magma_dut, magma_dut.clk)
# configuration registers
tester.circuit.tb_0_indices_0 = 0
tester.circuit.tb_0_indices_1 = 1
tester.circuit.tb_0_indices_2 = 2
tester.circuit.tb_0_range_outer = 5
tester.circuit.tb_0_range_inner = 3
tester.circuit.tb_0_stride = 2
tester.circuit.tb_0_dimensionality = 2
tester.circuit.tb_0_tb_height = 1
tester.circuit.tb_0_starting_addr = 0
tester.circuit.clk = 0
tester.circuit.rst_n = 1
tester.step(2)
tester.circuit.rst_n = 0
tester.step(2)
tester.circuit.tba_ren = 1
tester.circuit.rst_n = 1
rand.seed(0)
num_iters = 100
for i in range(num_iters):
data = []
for j in range(fetch_width):
data.append(rand.randint(0, 2**word_width - 1))
for j in range(fetch_width):
setattr(tester.circuit, f"SRAM_to_tb_data_{j}", data[j])
valid_data = rand.randint(0, 1)
tester.circuit.valid_data = valid_data
mem_valid_data = rand.randint(0, 1)
tester.circuit.mem_valid_data = mem_valid_data
tb_index_for_data = 0
tester.circuit.tb_index_for_data = tb_index_for_data
ack_in = valid_data
tester.circuit.ack_in = ack_in
model_data, model_valid = \
model_tba.tba_main(data, valid_data, ack_in, tb_index_for_data, 1, mem_valid_data)
tester.eval()
tester.circuit.tb_to_interconnect_valid.expect(model_valid)
if model_valid:
tester.circuit.tb_to_interconnect_data.expect(model_data[0])
tester.step(2)
with tempfile.TemporaryDirectory() as tempdir:
tester.compile_and_run(target="verilator",
directory=tempdir,
magma_output="verilog",
flags=["-Wno-fatal"])
if __name__ == "__main__":
test_tba()
| 29.947826
| 94
| 0.594948
| 440
| 3,444
| 4.313636
| 0.259091
| 0.143836
| 0.094837
| 0.075869
| 0.310327
| 0.114858
| 0.092729
| 0.092729
| 0.055848
| 0.055848
| 0
| 0.026237
| 0.324913
| 3,444
| 114
| 95
| 30.210526
| 0.790108
| 0.006678
| 0
| 0.170455
| 0
| 0
| 0.036268
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011364
| false
| 0.011364
| 0.113636
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61da398102287561106f2583dbf3dd6a0d400ea3
| 1,442
|
py
|
Python
|
2018/02/py/run.py
|
Bigsby/aoc
|
409fefbb0467628fa298288064acb622bb53ee58
|
[
"CC0-1.0"
] | 1
|
2021-06-11T17:24:05.000Z
|
2021-06-11T17:24:05.000Z
|
2018/02/py/run.py
|
Bigsby/aoc
|
409fefbb0467628fa298288064acb622bb53ee58
|
[
"CC0-1.0"
] | null | null | null |
2018/02/py/run.py
|
Bigsby/aoc
|
409fefbb0467628fa298288064acb622bb53ee58
|
[
"CC0-1.0"
] | null | null | null |
#! /usr/bin/python3
import sys, os, time
from typing import List, Tuple
from itertools import combinations
def part1(ids: List[str]) -> int:
twice_count = 0
thrice_count = 0
for id in ids:
id_counts = { id.count(c) for c in id }
twice_count += 2 in id_counts
thrice_count += 3 in id_counts
return twice_count * thrice_count
def part2(ids: List[str]) -> str:
for id1, id2 in combinations(ids, 2):
differences = [ i for i in range(len(id1)) if id1[i] != id2[i] ]
if len(differences) == 1:
diferent_index = differences[0]
return id1[:diferent_index] + id1[diferent_index + 1:]
raise Exception("Ids differencing 1 not found")
def solve(ids: List[str]) -> Tuple[int,str]:
return (
part1(ids),
part2(ids)
)
def get_input(file_path: str) -> List[str]:
if not os.path.isfile(file_path):
raise FileNotFoundError(file_path)
with open(file_path, "r") as file:
return [ line.strip() for line in file.readlines() ]
def main():
if len(sys.argv) != 2:
raise Exception("Please, add input file path as parameter")
start = time.perf_counter()
part1_result, part2_result = solve(get_input(sys.argv[1]))
end = time.perf_counter()
print("P1:", part1_result)
print("P2:", part2_result)
print()
print(f"Time: {end - start:.7f}")
if __name__ == "__main__":
main()
| 25.75
| 72
| 0.615811
| 207
| 1,442
| 4.135266
| 0.36715
| 0.046729
| 0.035047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027959
| 0.255895
| 1,442
| 56
| 73
| 25.75
| 0.769804
| 0.012483
| 0
| 0
| 0
| 0
| 0.074438
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.075
| 0.025
| 0.3
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61df694948c2ba5c7d34c79e97268eab5f090a30
| 3,272
|
py
|
Python
|
palette/core/color_transfer.py
|
SuziKim/PaletteSelection
|
cfc0052996b5c8dc1da2d6e30798dd1fed138ebe
|
[
"MIT"
] | 23
|
2015-08-25T12:31:44.000Z
|
2021-12-15T03:18:12.000Z
|
palette/core/color_transfer.py
|
SuziKim/PaletteSelection
|
cfc0052996b5c8dc1da2d6e30798dd1fed138ebe
|
[
"MIT"
] | null | null | null |
palette/core/color_transfer.py
|
SuziKim/PaletteSelection
|
cfc0052996b5c8dc1da2d6e30798dd1fed138ebe
|
[
"MIT"
] | 7
|
2017-07-27T10:57:36.000Z
|
2022-02-22T06:51:44.000Z
|
# -*- coding: utf-8 -*-
## @package palette.core.color_transfer
#
# Color transfer.
# @author tody
# @date 2015/09/16
import numpy as np
from scipy.interpolate import Rbf
import matplotlib.pyplot as plt
from palette.core.lab_slices import LabSlice, LabSlicePlot, Lab2rgb_py
## Color transfer for ab coordinates.
class ABTransfer:
## Constructor
# @param abs_original original ab coordinates.
# @param abs_edited edited ab coordinates.
def __init__(self, abs_original, abs_edited):
abs_original = np.array(abs_original)
abs_edited = np.array(abs_edited)
rbf_a = Rbf(abs_original[:, 0], abs_original[:, 1], abs_edited[:, 0])
rbf_b = Rbf(abs_original[:, 0], abs_original[:, 1], abs_edited[:, 1])
self._rbf = [rbf_a, rbf_b]
## Color transfer for ab coordinates.
def transfer(self, ab_original):
abs_edited = [rbf(ab_original[0], ab_original[1]) for rbf in self._rbf]
abs_edited = np.array(abs_edited)
return abs_edited
## Simple plotter for ABTransfer.
class ABTransferPlot:
## Constructor
# @param abs_original original ab coordinates.
# @param abs_edited edited ab coordinates.
# @param L target L coordinate.
# @param abs_animation list of ab coordinates for plot animation.
def __init__(self, abs_original, abs_edited, L=50, abs_animation=[]):
self._slice = LabSlice(func=Lab2rgb_py)
self._slice_plot = LabSlicePlot(self._slice)
self._slice_plot.plot(L)
self._abs_original = abs_original
self._abs_edited = abs_edited
self._abs_animation = abs_animation
self._transfer = ABTransfer(abs_original, abs_edited)
self._plot()
## Animation function for matplot.
def animationFunc(self, step, *args):
ab_id = step % len(self._abs_animation)
ab_original = self._abs_animation[ab_id]
xy_original, xy_edited = self._blendResult(ab_original)
self._setArrow(self._blend_plot, xy_original, xy_edited)
return self._blend_plot
def _plot(self):
xys_original = [self._slice.ab2xy(ab_original) for ab_original in self._abs_original]
xys_edited = [self._slice.ab2xy(ab_edited) for ab_edited in self._abs_edited]
for xy_original, xy_edited in zip(xys_original, xys_edited):
self._arrow(xy_original, xy_edited)
xy_original, xy_edited = self._blendResult(self._abs_animation[0])
self._blend_plot = self._arrow(xy_original, xy_edited, color=[0.7, 0.5, 0.4])
def _arrow(self, ps, pe, color=[1, 1, 1]):
xs = [ps[0], pe[0]]
ys = [ps[1], pe[1]]
return [plt.plot(xs, ys, '-', color=color, linewidth=2, alpha=0.8)[0],
plt.plot(ps[0], ps[1], 'o', color=color, linewidth=2, alpha=0.8)[0]]
def _setArrow(self, arrow_plot, ps, pe):
xs = [ps[0], pe[0]]
ys = [ps[1], pe[1]]
arrow_plot[0].set_data(xs, ys)
arrow_plot[1].set_data(ps[0], ps[1])
def _blendResult(self, ab_original):
ab_edited = self._transfer.transfer(ab_original)
xy_original = self._slice.ab2xy(ab_original)
xy_edited = self._slice.ab2xy(ab_edited)
return xy_original, xy_edited
| 37.181818
| 93
| 0.652812
| 457
| 3,272
| 4.38512
| 0.201313
| 0.071856
| 0.063872
| 0.062874
| 0.365269
| 0.336327
| 0.191617
| 0.160679
| 0.132735
| 0.096806
| 0
| 0.022673
| 0.231663
| 3,272
| 87
| 94
| 37.609195
| 0.774463
| 0.17665
| 0
| 0.113208
| 0
| 0
| 0.00075
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.150943
| false
| 0
| 0.075472
| 0
| 0.339623
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61dfafddb5a99f013e5962a29c6779ac49a5f150
| 1,447
|
py
|
Python
|
CursoEmVideoPython/desafio95.py
|
miguelabreuss/scripts_python
|
cf33934731a9d1b731672d4309aaea0a24ae151a
|
[
"MIT"
] | null | null | null |
CursoEmVideoPython/desafio95.py
|
miguelabreuss/scripts_python
|
cf33934731a9d1b731672d4309aaea0a24ae151a
|
[
"MIT"
] | 1
|
2020-07-04T16:27:25.000Z
|
2020-07-04T16:27:25.000Z
|
CursoEmVideoPython/desafio95.py
|
miguelabreuss/scripts_python
|
cf33934731a9d1b731672d4309aaea0a24ae151a
|
[
"MIT"
] | null | null | null |
scoult = dict()
gols = list()
time = list()
temp = 0
while True:
scoult['Jogador'] = str(input('Qual o nome do jogador: '))
scoult['Número partidas'] = int(input('Quantas partidas foram jogadas? '))
for i in range(0,scoult['Número partidas']):
gols.append(int(input(f'Quantos gols foram marcados na partida {i+1} de {scoult["Jogador"]}? ')))
scoult['Gols'] = gols[:]
for i in range(0,scoult['Número partidas']):
if i == 0:
scoult['Total de gols'] = gols[i]
else:
scoult['Total de gols'] += gols[i]
time.append(scoult.copy())
gols.clear()
if str(input('Deseja continuar [S/N]? ')) in 'Nn':
break
print('-' * 50)
print('-' * 50)
print('{:^50}'.format('TABELO PERFORMANCE'))
print('-' * 50)
print('{:<5}{:<15}{:<25}{:<5}'.format('cod', 'Jogador', 'Gols', 'Total'))
for e in time:
print('{:<5}{:<15}{:<25}{:<5}'.format(temp, e['Jogador'], str(e['Gols']), e['Total de gols']))
temp += 1
print('-' * 50)
while True:
temp = int(input('De aual jogador você deseja mais detalhes? [cod] 999 p/ sair. '))
if temp == 999:
break
else:
print(f'-- Performance do jogador: {time[temp]["Jogador"]}')
for i in range(0,time[temp]["Número partidas"]):
print(f' => Na partida {i+1} {time[temp]["Jogador"]} marcou {time[temp]["Gols"][i]} vez(es).')
print(f'Foi um total de {time[temp]["Total de gols"]} gols')
| 38.078947
| 109
| 0.561852
| 206
| 1,447
| 3.946602
| 0.325243
| 0.04305
| 0.054121
| 0.04059
| 0.189422
| 0.174662
| 0.078721
| 0.078721
| 0
| 0
| 0
| 0.031858
| 0.219074
| 1,447
| 37
| 110
| 39.108108
| 0.687611
| 0
| 0
| 0.324324
| 0
| 0.027027
| 0.422944
| 0.078093
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.27027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61e1ff665914cfb40790ee569edb6f9cb201dad5
| 3,668
|
py
|
Python
|
Algorithms/On-Policy/A2C/DISCOVER_A2C.py
|
baturaysaglam/DISCOVER
|
423158c84a5935ca5755ccad06ea5fe20fb57d76
|
[
"MIT"
] | null | null | null |
Algorithms/On-Policy/A2C/DISCOVER_A2C.py
|
baturaysaglam/DISCOVER
|
423158c84a5935ca5755ccad06ea5fe20fb57d76
|
[
"MIT"
] | null | null | null |
Algorithms/On-Policy/A2C/DISCOVER_A2C.py
|
baturaysaglam/DISCOVER
|
423158c84a5935ca5755ccad06ea5fe20fb57d76
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from utils import init
class Explorer(nn.Module):
def __init__(self, state_dim, max_action, exp_regularization):
super(Explorer, self).__init__()
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0), np.sqrt(2))
self.l1 = init_(nn.Linear(state_dim, 64))
self.l2 = init_(nn.Linear(64, 64))
self.l3 = init_(nn.Linear(64, state_dim))
self.max_action = max_action
self.exp_regularization = exp_regularization
def forward(self, state):
a = torch.tanh(self.l1(state))
a = torch.tanh(self.l2(a))
return self.max_action * torch.tanh(self.l3(a)) * self.exp_regularization ** 2
class DISCOVER_A2C():
def __init__(self,
state_dim,
max_action,
exp_regularization,
policy,
value_loss_coef,
entropy_coef,
learning_rate=None,
adam_eps=None,
alpha=None,
max_grad_norm=None,
device=None):
self.policy = policy
self.explorer = Explorer(state_dim, max_action, exp_regularization).to(device)
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.max_grad_norm = max_grad_norm
self.optimizer = optim.RMSprop(policy.parameters(), learning_rate, eps=adam_eps, alpha=alpha)
self.explorer_optimizer = optim.Adam(self.explorer.parameters(), lr=learning_rate, eps=adam_eps)
def explore(self, inputs):
return self.explorer(inputs)
def update_parameters(self, rollouts):
obs_shape = rollouts.obs.size()[2:]
action_shape = rollouts.actions.size()[-1]
num_steps, num_processes, _ = rollouts.rewards.size()
values, action_log_probs, dist_entropy, _ = self.policy.evaluate_actions(
rollouts.obs[:-1].view(-1, *obs_shape),
rollouts.exploration_directions.view(-1, *obs_shape),
rollouts.recurrent_hidden_states[0].view(-1, self.policy.recurrent_hidden_state_size),
rollouts.masks[:-1].view(-1, 1),
rollouts.actions.view(-1, action_shape))
values = values.view(num_steps, num_processes, 1)
action_log_probs = action_log_probs.view(num_steps, num_processes, 1)
advantages = rollouts.returns[:-1] - values
value_loss = advantages.pow(2).mean()
action_loss = -(advantages.detach() * action_log_probs).mean()
self.optimizer.zero_grad()
(value_loss * self.value_loss_coef + action_loss - dist_entropy * self.entropy_coef).backward()
nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.optimizer.step()
# Compute the explorer loss
values, action_log_probs, dist_entropy, _ = self.policy.evaluate_actions(
rollouts.obs[:-1].view(-1, *obs_shape),
self.explorer(rollouts.obs[:-1].view(-1, *obs_shape)),
rollouts.recurrent_hidden_states[0].view(-1, self.policy.recurrent_hidden_state_size),
rollouts.masks[:-1].view(-1, 1),
rollouts.actions.view(-1, action_shape))
values = values.view(num_steps, num_processes, 1)
advantages = rollouts.returns[:-1] - values
value_loss = advantages.pow(2).mean()
self.explorer_optimizer.zero_grad()
(-value_loss * self.value_loss_coef).backward()
nn.utils.clip_grad_norm_(self.explorer.parameters(), self.max_grad_norm)
self.explorer_optimizer.step()
| 37.050505
| 104
| 0.638768
| 464
| 3,668
| 4.771552
| 0.206897
| 0.022584
| 0.029359
| 0.036134
| 0.503162
| 0.45122
| 0.409666
| 0.409666
| 0.370822
| 0.291328
| 0
| 0.016619
| 0.245365
| 3,668
| 98
| 105
| 37.428571
| 0.783237
| 0.006816
| 0
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069444
| false
| 0
| 0.069444
| 0.013889
| 0.194444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61e3abea3e991562a75549fe727c93817d1999de
| 3,400
|
py
|
Python
|
user/beaninfo_Global.py
|
dvdrm/gd
|
c004724344577bb608fa0611d10c16b211995f72
|
[
"Apache-2.0"
] | null | null | null |
user/beaninfo_Global.py
|
dvdrm/gd
|
c004724344577bb608fa0611d10c16b211995f72
|
[
"Apache-2.0"
] | null | null | null |
user/beaninfo_Global.py
|
dvdrm/gd
|
c004724344577bb608fa0611d10c16b211995f72
|
[
"Apache-2.0"
] | null | null | null |
from telethon import events, Button
from .login import user
from .. import jdbot
from ..bot.utils import cmd, TASK_CMD,split_list, press_event
from ..diy.utils import read, write
import asyncio
import re
@user.on(events.NewMessage(pattern=r'^setbd', outgoing=True))
async def SetBeanDetailInfo(event):
try:
msg_text= event.raw_text.split(' ')
if len(msg_text) == 2:
text = msg_text[-1]
else:
text = None
if text==None:
await event.edit('请输入正确的格式: setbd 屏蔽京豆数量')
return
key="BOTShowTopNum"
kv=f'{key}="{text}"'
change=""
configs = read("str")
if kv not in configs:
if key in configs:
configs = re.sub(f'{key}=("|\').*("|\')', kv, configs)
write(configs)
else:
configs = read("str")
configs += f'export {key}="{text}"\n'
write(configs)
change = f'已替换屏蔽京豆数为{text}'
else:
change = f'设定没有改变,想好再来.'
await event.edit(change)
except Exception as e:
title = "【💥错误💥】"
name = "文件名:" + os.path.split(__file__)[-1].split(".")[0]
function = "函数名:" + e.__traceback__.tb_frame.f_code.co_name
details = "错误详情:第 " + str(e.__traceback__.tb_lineno) + " 行"
tip = '建议百度/谷歌进行查询'
await jdbot.send_message(chat_id, f"{title}\n\n{name}\n{function}\n错误原因:{str(e)}\n{details}\n{traceback.format_exc()}\n{tip}")
logger.error(f"错误--->{str(e)}")
@user.on(events.NewMessage(pattern=r'^bd', outgoing=True))
async def CCBeanDetailInfo(event):
msg_text= event.raw_text.split(' ')
if len(msg_text) == 2:
text = msg_text[-1]
else:
text = None
if text==None:
await event.edit('请指定要查询的账号,格式: cb 1 或 cb ptpin')
return
key="BOTCHECKCODE"
kv=f'{key}="{text}"'
change=""
configs = read("str")
intcount=0
if kv not in configs:
if key in configs:
configs = re.sub(f'{key}=("|\').*("|\')', kv, configs)
change += f"【替换】环境变量:`{kv}`\n"
write(configs)
else:
configs = read("str")
configs += f'export {key}="{text}"\n'
change += f"【新增】环境变量:`{kv}`\n"
write(configs)
await event.edit('开始查询账号'+text+'的资产,请稍后...')
cmdtext="task /ql/data/scripts/jk_script/bot_jd_bean_info_QL.js now"
p = await asyncio.create_subprocess_shell(
cmdtext, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
res_bytes, res_err = await p.communicate()
res = res_bytes.decode('utf-8')
txt=res.split('\n')
strReturn=""
await event.delete()
if res:
for line in txt:
if "【" in line and "🔔" not in line:
strReturn=strReturn+line+'\n'
if intcount==100:
intcount=0
if strReturn:
await user.send_message(event.chat_id, strReturn)
strReturn=""
else:
await user.send_message(event.chat_id,'查询失败!')
if strReturn:
await user.send_message(event.chat_id, strReturn)
| 33.009709
| 134
| 0.516471
| 406
| 3,400
| 4.219212
| 0.362069
| 0.024518
| 0.032691
| 0.035026
| 0.38704
| 0.371862
| 0.336836
| 0.318739
| 0.283713
| 0.283713
| 0
| 0.005809
| 0.341765
| 3,400
| 103
| 135
| 33.009709
| 0.758266
| 0
| 0
| 0.516854
| 0
| 0
| 0.142017
| 0.040282
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.078652
| 0
| 0.101124
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61e6fadc19dca2b7aaa1c0e67b41806d94ed6219
| 12,263
|
py
|
Python
|
pyemits/core/ml/regression/trainer.py
|
thompson0012/PyEmits
|
9cb6fbf27ca7e8952ed5aca26118055e04492c23
|
[
"Apache-2.0"
] | 6
|
2021-10-21T14:13:25.000Z
|
2021-12-26T12:22:51.000Z
|
pyemits/core/ml/regression/trainer.py
|
thompson0012/PyEmits
|
9cb6fbf27ca7e8952ed5aca26118055e04492c23
|
[
"Apache-2.0"
] | null | null | null |
pyemits/core/ml/regression/trainer.py
|
thompson0012/PyEmits
|
9cb6fbf27ca7e8952ed5aca26118055e04492c23
|
[
"Apache-2.0"
] | null | null | null |
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import ElasticNet, Ridge, Lasso, BayesianRidge, HuberRegressor
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from pyemits.core.ml.base import BaseTrainer, BaseWrapper, NeuralNetworkWrapperBase
from pyemits.common.config_model import BaseConfig, KerasSequentialConfig, TorchLightningSequentialConfig
from pyemits.common.data_model import RegressionDataModel
from pyemits.common.py_native_dtype import SliceableDeque
from pyemits.common.validation import raise_if_value_not_contains
from typing import List, Dict, Optional, Union, Any
from pyemits.core.ml.regression.nn import TorchLightningWrapper
RegModelContainer = {
'RF': RandomForestRegressor,
'GBDT': GradientBoostingRegressor,
# 'HGBDT': HistGradientBoostingRegressor,
'AdaBoost': AdaBoostRegressor,
'MLP': MLPRegressor,
'ElasticNet': ElasticNet,
'Ridge': Ridge,
'Lasso': Lasso,
'BayesianRidge': BayesianRidge,
'Huber': HuberRegressor,
'XGBoost': XGBRegressor,
'LightGBM': LGBMRegressor
}
def _get_reg_model(algo_or_wrapper: Union[str, BaseWrapper]):
if isinstance(algo_or_wrapper, str):
return RegModelContainer[algo_or_wrapper]
# return wrapper model
elif isinstance(algo_or_wrapper, BaseWrapper):
return algo_or_wrapper
def fill_algo_config_clf(clf_or_wrapper,
algo_config: Optional[BaseConfig] = None):
# nn wrapper
if isinstance(clf_or_wrapper, NeuralNetworkWrapperBase):
# have algo config
if algo_config is not None:
# if keras model object
if isinstance(algo_config, KerasSequentialConfig):
for i in algo_config.layer:
clf_or_wrapper.model_obj.add(i)
clf_or_wrapper.model_obj.compile(**algo_config.compile)
return clf_or_wrapper
elif isinstance(algo_config, TorchLightningSequentialConfig):
clf_or_wrapper: TorchLightningWrapper
for nos, layer in enumerate(algo_config.layer, 1):
clf_or_wrapper.add_layer2blank_model(str(nos), layer)
return clf_or_wrapper
# not support pytorch, mxnet model right now
raise TypeError('now only support KerasSequentialConfig')
# no algo config
return clf_or_wrapper
# sklearn clf path
if algo_config is None:
return clf_or_wrapper() # activate
else:
return clf_or_wrapper(**dict(algo_config))
def fill_fit_config_clf(clf_or_wrapper,
X,
y,
fit_config: Optional[Union[BaseConfig, Dict]] = None,
):
from pyemits.core.ml.regression.nn import torchlighting_data_helper
# nn wrapper
if isinstance(clf_or_wrapper, NeuralNetworkWrapperBase):
dl_train, dl_val = torchlighting_data_helper(X, y)
if fit_config is None:
# pytorch_lightning path
if isinstance(clf_or_wrapper, TorchLightningWrapper):
return clf_or_wrapper.fit(dl_train, dl_val)
# keras path
return clf_or_wrapper.fit(X, y)
if isinstance(fit_config, BaseConfig):
if isinstance(clf_or_wrapper, TorchLightningWrapper):
return clf_or_wrapper.fit(dl_train, dl_val, **dict(fit_config))
# keras path
return clf_or_wrapper.fit(X, y, **dict(fit_config))
elif isinstance(fit_config, Dict):
if isinstance(clf_or_wrapper, TorchLightningWrapper):
return clf_or_wrapper.fit(dl_train, dl_val, **fit_config)
# keras path
return clf_or_wrapper.fit(X, y, **fit_config)
# sklearn/xgboost/lightgbm clf
else:
if fit_config is None:
return clf_or_wrapper.fit(X, y)
else:
assert isinstance(fit_config, BaseConfig), "fig_config type not matched"
return clf_or_wrapper.fit(X, y, **dict(fit_config))
class RegTrainer(BaseTrainer):
def __init__(self,
algo: List[Union[str, Any]],
algo_config: List[Optional[BaseConfig]],
raw_data_model: RegressionDataModel,
other_config: Dict[str, Union[List, BaseConfig, Any]] = {}):
"""
universal class for regression model training,
all-in-one training including sklearn, xgboost, lightgbm, keras, pytorch_lightning
you are not required to fill the algo config if you have idea on algo_config
the algo config is designed for people to config their model based on the configuration that provided in config_model
so that people can easily config their model during creation
for Pytorch_lightning user, pls configured your model before use this. at that moment, no algo_config is
Parameters
----------
algo: List[str]
the machine learning algorithm, any machine learning model that have fit/predict can used in here
algo_config: List[BaseConfig] or List[None]
the respective config model of algo
raw_data_model: RegressionDataModel
data model obj, stores data and meta data
other_config: BaseConfig
other global config, shall be used in its sub-class
"""
super(RegTrainer, self).__init__(algo, algo_config)
# raise_if_value_not_contains(algo, list(RegModelContainer.keys()))
self.raw_data_model = raw_data_model
self.other_config = other_config
self.clf_models = SliceableDeque()
self._is_algo_valid()
self._is_algo_config_valid()
def _is_algo_valid(self):
for item in self._algo:
if not isinstance(item, (str, NeuralNetworkWrapperBase)):
raise TypeError('must be str or WrapperBase')
if isinstance(item, str):
raise_if_value_not_contains([item], list(RegModelContainer.keys()))
def _is_algo_config_valid(self):
for item in self._algo_config:
if item is None:
continue # skip to next loop
if not isinstance(item, (BaseConfig, Dict)):
raise TypeError('Only accept ConfigBase or Dict as input')
# no checking when model is object, which directly passing it
def is_config_exists(self, config_key: str):
config_item = self.other_config.get(config_key, None)
if config_item is None:
return False
return True
def get_fill_fit_config(self):
fit_config = self.other_config.get('fit_config', None)
if isinstance(fit_config, list):
assert len(fit_config) == len(self._algo), 'length not matched'
return fit_config
elif fit_config is None:
fit_config_ = [] # rename variable
for i in range(len(self._algo)):
fit_config_.append(None)
fit_config = fit_config_ # pointer,
return fit_config
else:
raise TypeError('fit config not a list type')
def _fit(self):
X = self.raw_data_model.X_data
y = self.raw_data_model.y_data
# make sure y is 1D array in RegTrainer
fit_config = self.get_fill_fit_config()
for n, (algo, algo_config) in enumerate(zip(self._algo, self._algo_config)):
clf = fill_algo_config_clf(_get_reg_model(algo), algo_config)
fill_fit_config_clf(clf, X, y, fit_config[n])
self.clf_models.append((str(algo), clf))
return
class ParallelRegTrainer(RegTrainer):
def __init__(self,
algo: List[str],
algo_config: List[BaseConfig],
raw_data_model: RegressionDataModel,
other_config: Dict[str, Union[List, BaseConfig, Any]] = {}):
"""
handy function to realize parallel training
Parameters
----------
algo: List[str]
the machine learning algorithm, any machine learning model that have fit/predict can used in here
algo_config: List[BaseConfig] or List[None]
the respective config model of algo
raw_data_model: RegressionDataModel
data model obj, stores data and meta data
other_config: BaseConfig
other global config, shall be used in its sub-class
"""
super(ParallelRegTrainer, self).__init__(algo, algo_config, raw_data_model, other_config)
def _fit(self):
from joblib import Parallel, delayed
parallel = Parallel(n_jobs=-1)
def _get_fitted_trainer(algo: List,
algo_config: List[BaseConfig],
raw_data_model: RegressionDataModel,
other_config: Dict[str, BaseConfig] = {}):
trainer = RegTrainer(algo, algo_config, raw_data_model, other_config)
trainer.fit() # fit config auto filled by RegTrainer, no need to handle
return trainer
out: List[RegTrainer] = parallel(
delayed(_get_fitted_trainer)([algo_], [algo_config_], self.raw_data_model, self.other_config) for
algo_, algo_config_ in
zip(self._algo, self._algo_config))
for obj in out:
self.clf_models.append(obj.clf_models)
return
def fit(self):
return self._fit()
class MultiOutputRegTrainer(RegTrainer):
"""
machine learning based multioutput regression trainer
bring forecasting power into machine learning model,
forecasting is not only the power of deep learning
"""
def __init__(self,
algo: List[Union[str, Any]],
algo_config: List[Optional[BaseConfig]],
raw_data_model: RegressionDataModel,
other_config: Dict[str, Union[List, BaseConfig, Any]] = {},
parallel_n_jobs: int = -1):
super(MultiOutputRegTrainer, self).__init__(algo, algo_config, raw_data_model, other_config)
self.parallel_n_jobs = parallel_n_jobs
def _fit(self):
fit_config = self.get_fill_fit_config()
from sklearn.multioutput import MultiOutputRegressor
X = self.raw_data_model.X_data
y = self.raw_data_model.y_data
for n, (algo, algo_config) in enumerate(zip(self._algo, self._algo_config)):
clf = fill_algo_config_clf(_get_reg_model(algo), algo_config) # clf already activated
clf = MultiOutputRegressor(estimator=clf, n_jobs=self.parallel_n_jobs)
fill_fit_config_clf(clf, X, y, fit_config[n])
self.clf_models.append((str(algo), clf))
return
class KFoldCVTrainer(RegTrainer):
def __init__(self,
algo: List[Union[str, Any]],
algo_config: List[Optional[BaseConfig]],
raw_data_model: RegressionDataModel,
other_config: Dict[str, Union[List, BaseConfig, Any]] = {},
):
super(KFoldCVTrainer, self).__init__(algo, algo_config, raw_data_model, other_config)
def _fit(self):
from pyemits.core.ml.cross_validation import KFoldCV
kfold_config = self.other_config.get('kfold_config', None)
if kfold_config is not None:
kfold_cv = KFoldCV(self.raw_data_model, kfold_config)
else:
kfold_cv = KFoldCV(self.raw_data_model)
splitted_kfold = kfold_cv.split()
for n, item in enumerate(splitted_kfold):
self._meta_data_model.add_meta_data('kfold_record', [item])
train_idx = item[0]
test_idx = item[1]
X_ = self.raw_data_model.X_data[train_idx]
y_ = self.raw_data_model.y_data[train_idx]
sliced_data_model = RegressionDataModel(X_, y_)
trainer = ParallelRegTrainer(self._algo, self._algo_config, sliced_data_model,
other_config=self.other_config)
trainer.fit()
self.clf_models.append((f'kfold_{n}', trainer.clf_models))
return
| 40.471947
| 125
| 0.644133
| 1,455
| 12,263
| 5.160825
| 0.171821
| 0.057265
| 0.038354
| 0.031163
| 0.408443
| 0.368491
| 0.352777
| 0.306299
| 0.28619
| 0.281928
| 0
| 0.000793
| 0.279785
| 12,263
| 302
| 126
| 40.60596
| 0.849411
| 0.166925
| 0
| 0.338308
| 0
| 0
| 0.02892
| 0.002116
| 0
| 0
| 0
| 0
| 0.00995
| 1
| 0.084577
| false
| 0
| 0.079602
| 0.004975
| 0.308458
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61e7231e5da397e138846e32322894665e310b28
| 7,092
|
py
|
Python
|
network_core/network_graph.py
|
markusgl/SocialCompanion
|
e816af21c600b33dbcac25d088d4d75957d0349a
|
[
"MIT"
] | 2
|
2018-12-21T12:55:21.000Z
|
2019-05-29T06:35:58.000Z
|
network_core/network_graph.py
|
markusgl/SocialCompanion
|
e816af21c600b33dbcac25d088d4d75957d0349a
|
[
"MIT"
] | 8
|
2019-12-16T21:08:36.000Z
|
2021-03-31T18:58:35.000Z
|
network_core/network_graph.py
|
markusgl/SocialCompanion
|
e816af21c600b33dbcac25d088d4d75957d0349a
|
[
"MIT"
] | null | null | null |
"""
knowledge graph representation using neo4j
this class uses py2neo with will be the final version
"""
import os
import json
from py2neo import Graph, Relationship, NodeMatcher, Node
from network_core.ogm.node_objects import Me, Contact, Misc
USERTYPE = "User"
CONTACTTYPE = "Contact"
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
relationships = {'freund': 'FRIEND',
'schwester': 'SISTER',
'bruder': 'BROTHER',
'mutter': 'MOTHER',
'vater': 'FATHER',
'tochter': 'DAUGHTER',
'sohn': 'SON',
'enkel': 'GRANDCHILD',
'enkelin': 'GRANDCHILD'}
class NetworkGraph:
def __init__(self):
path = os.path.realpath(ROOT_DIR + '/neo4j_creds.json')
with open(path) as f:
data = json.load(f)
username = data['username']
password = data['password']
self.graph = Graph(host="localhost", username=username, password=password)
def add_node_by_name(self, name, age=None, gender=None, node_type="PERSON"):
if name == 'USER':
node_type = 'user'
node = Node(node_type, name=name, age=age, gender=gender)
self.graph.create(node)
return node
def get_node_by_name(self, name):
matcher = NodeMatcher(self.graph)
node = matcher.match(name=name).first()
return node
def add_relationship(self, node1, node2, rel_type='KNOWS'):
first_node = self.get_node_by_name(node1)
second_node = self.get_node_by_name(node2)
if not first_node:
first_node = self.add_node_by_name(node1)
if not second_node:
second_node = self.add_node_by_name(node2)
self.graph.create(Relationship(first_node, rel_type, second_node))
def add_rel_tuple(self, ent1, ent2):
"""
Pushes a new central user 'Me' to the graph
Gets a username, creats an Me object and pushes it to the graph
:param username: string username
:return: me object (see ogm pkg)
"""
# define nodes
node1 = Misc()
node1.name = ent1
node2 = Misc()
node2.name = ent2
# add relationship to nodes
node1.related_ent.add(node2)
node2.related_ent.add(node1)
# save to neo4j
self.graph.create(node1)
self.graph.create(node2)
def search_node_by_name(self, node_name):
# replace white spaces
_node_name = node_name.replace(" ", "")
query = 'MATCH (n) WHERE n.name={node_name} RETURN n;'
result = self.graph.run(query,
node_name=_node_name,
).data()
if result:
node = result[0]['n.name']
else:
node = None
return node
def add_me_w_firstname(self, username, age="", gender=""):
"""
Pushes a new central user 'Me' to the graph
Gets a username, creats an Me object and pushes it to the graph
:param username: string username
:return: me object (see ogm pkg)
"""
# OGM
me = Me()
me.firstname = username.title()
me.lastname = ""
me.age = age
me.gender = gender
self.graph.push(me)
return me
def add_me_w_lastname(self, username, age="", gender=""):
"""
Pushes a new central user 'Me' to the graph
Gets a username, creats an Me object and pushes it to the graph
:param username: string username
:return: me object (see ogm pkg)
"""
# OGM
me = Me()
me.firstname = ""
me.lastname = username.title()
me.age = age
me.gender = gender
self.graph.push(me)
return me
def get_me_by_firstname(self, me_name):
"""
return me object by firstname
:param me_name: string with firstname of me
:return: me object
"""
result = self.graph.run('MATCH (n:Me) WHERE n.firstname="' + me_name.title() + '" RETURN n.firstname').data()
me = Me()
if result:
me.firstname = result[0]['n.firstname']
return me
else:
return None
def get_me_by_lastname(self, me_name):
"""
return me object by firstname
:param me_name: string with firstname of me
:return: me object
"""
result = self.graph.run('MATCH (n:Me) WHERE n.lastname="' + me_name.title() + '" RETURN n.lastname').data()
me = Me()
if result:
me.firstname = result[0]['n.lastname']
return me
else:
return None
def add_contact(self, me_name, contactname, relationship):
"""
adds a new contact to the central user i.e. 'Me' in graph
:param me: name of the centraluser object
:param contact: string will be converted to contact object
:param relationship: string will be converted to object property
:return:
"""
# select central user 'Me'
me = self.get_me_by_firstname(me_name)
contact = Contact()
contact.firstname = contactname
relationship = relationships[relationship]
if relationship == 'freund':
me.friend.add(contact)
contact.friend.add(me)
elif relationship == 'bruder':
me.brother.add(contact)
contact.brother.add(me)
elif relationship == 'schwester':
me.sister.add(contact)
contact.sister.add(me)
elif relationship == 'mutter':
me.mother.add(contact)
elif relationship == 'vater':
me.father.add(contact)
elif relationship == 'sohn':
me.son.add(contact)
elif relationship == 'tocher':
me.daughter.add(contact)
#TODO other relationships
self.graph.push(me)
def search_relationship_by_contactname(self, me_name, contact_name):
mename = me_name.replace(" ", "")
contactname = contact_name.replace(" ", "")
query = 'MATCH (n:Me)-[r]->(c:Contact) WHERE n.firstname={me_name} AND c.firstname={contactname} RETURN type(r);'
result = self.graph.run(query,
me_name=mename,
contactname=contactname
).data()
if result:
relationship = result[0]['type(r)']
else:
relationship = None
return relationship
def search_contactname_by_relationship(self, me_name, relationship):
relationship = relationships[relationship]
if relationship:
result = self.graph.run('MATCH (u:Me)-[:'+relationship+']->(c:Contact) RETURN c.firstname;', rel=relationship).data()
else:
return None
if result:
contactname = result[0]['c.firstname']
else:
contactname = None
return contactname
| 30.437768
| 129
| 0.563593
| 820
| 7,092
| 4.756098
| 0.181707
| 0.032308
| 0.017949
| 0.023077
| 0.351282
| 0.258205
| 0.223846
| 0.223846
| 0.223846
| 0.223846
| 0
| 0.006321
| 0.330795
| 7,092
| 232
| 130
| 30.568966
| 0.815424
| 0.164834
| 0
| 0.269504
| 0
| 0.007092
| 0.102432
| 0.012249
| 0
| 0
| 0
| 0.00431
| 0
| 1
| 0.092199
| false
| 0.014184
| 0.028369
| 0
| 0.212766
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61ea28b84ee81d7761635919c06d71cde4b781c4
| 2,355
|
py
|
Python
|
src/train_and_evaluate.py
|
rajeevteejwal/mlops_wine_quality
|
970ce27712932ca535309230da69fc5c29d82c38
|
[
"MIT"
] | null | null | null |
src/train_and_evaluate.py
|
rajeevteejwal/mlops_wine_quality
|
970ce27712932ca535309230da69fc5c29d82c38
|
[
"MIT"
] | null | null | null |
src/train_and_evaluate.py
|
rajeevteejwal/mlops_wine_quality
|
970ce27712932ca535309230da69fc5c29d82c38
|
[
"MIT"
] | null | null | null |
import os
import pandas as pd
from sklearn.linear_model import ElasticNet
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
import argparse
import numpy as np
import json
import joblib
from get_data import read_config
def evaluate_metrics(actual, pred):
r2 = r2_score(actual,pred)
mae = mean_absolute_error(actual,pred)
rmse = np.sqrt(mean_squared_error(actual,pred))
return r2, rmse, mae
def train_and_evaluate(config_path):
config = read_config(config_path)
train_data_path = config["split_data"]["train_path"]
test_data_path = config["split_data"]["test_path"]
output_col = config["base"]["target_col"]
random_state = config["base"]["random_state"]
train_dataset = pd.read_csv(train_data_path,sep=",", encoding="utf-8")
test_dataset = pd.read_csv(test_data_path,sep=",", encoding="utf-8")
y_train = train_dataset[[output_col]]
x_train = train_dataset.drop([output_col],axis=1)
y_test = test_dataset[[output_col]]
x_test = test_dataset.drop([output_col],axis=1)
alpha = config["estimators"]["ElasticNet"]["params"]["alpha"]
l1_ratio = config["estimators"]["ElasticNet"]["params"]["l1_ratio"]
lr = ElasticNet(alpha=alpha,l1_ratio=l1_ratio,random_state=random_state)
lr.fit(x_train,y_train)
prediction = lr.predict(x_test)
r2, rmse, mae = evaluate_metrics(y_test,prediction)
print(f"ElasticNet model (alpha: {alpha}, l1_ratio: {l1_ratio}")
print(f" RMSE: {rmse}")
print(f" MAE: {mae}")
print(f" R2 Score: {r2}")
scores_file = config["reports"]["scores"]
params_file = config["reports"]["params"]
with open(scores_file,"w") as f:
scores = {
"r2":r2,
"rmse":rmse,
"mae":mae
}
json.dump(scores,f,indent=4)
with open(params_file,"w") as f:
params = {
"alpha":alpha,
"l1_ratio":l1_ratio
}
json.dump(params,f,indent=4)
model_dir = config["model_dir"]
os.makedirs(model_dir,exist_ok=True)
model_path = os.path.join(model_dir,"model.joblib")
joblib.dump(lr,model_path)
if __name__ == '__main__':
args = argparse.ArgumentParser()
args.add_argument("--config",default="params.yaml")
parsed_args = args.parse_args()
train_and_evaluate(config_path=parsed_args.config)
| 31.824324
| 77
| 0.675159
| 331
| 2,355
| 4.52568
| 0.280967
| 0.037383
| 0.032043
| 0.034045
| 0.17757
| 0.11215
| 0
| 0
| 0
| 0
| 0
| 0.011992
| 0.185563
| 2,355
| 73
| 78
| 32.260274
| 0.76903
| 0
| 0
| 0
| 0
| 0
| 0.143161
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.15
| 0
| 0.2
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61ebdb6920b4b4c3e3a8b0b2f9c1a74ed61083fb
| 961
|
py
|
Python
|
examples/plot_magnitudes.py
|
zsiciarz/pygcvs
|
ed5522ab9cf9237592a6af7a0bc8cad079afeb67
|
[
"MIT"
] | null | null | null |
examples/plot_magnitudes.py
|
zsiciarz/pygcvs
|
ed5522ab9cf9237592a6af7a0bc8cad079afeb67
|
[
"MIT"
] | null | null | null |
examples/plot_magnitudes.py
|
zsiciarz/pygcvs
|
ed5522ab9cf9237592a6af7a0bc8cad079afeb67
|
[
"MIT"
] | null | null | null |
"""
Visualisation of maximum/minimum magnitude for GCVS stars.
"""
import sys
import matplotlib.pyplot as plot
from pygcvs import read_gcvs
if __name__ == '__main__':
try:
gcvs_file = sys.argv[1]
except IndexError:
print('Usage: python plot_magnitudes.py <path to iii.dat>')
else:
min_magnitudes = []
max_magnitudes = []
for star in read_gcvs(gcvs_file):
if star['min_magnitude'] and star['max_magnitude']:
min_magnitudes.append(star['min_magnitude'])
max_magnitudes.append(star['max_magnitude'])
plot.title('GCVS variable star magnitudes')
plot.plot(min_magnitudes, max_magnitudes, 'ro')
plot.xlabel('Min magnitude')
plot.ylabel('Max magnitude')
# invert axes because brightest stars have lowest magnitude value
plot.gca().invert_xaxis()
plot.gca().invert_yaxis()
plot.savefig('magnitudes.png')
| 30.03125
| 73
| 0.64204
| 115
| 961
| 5.147826
| 0.513043
| 0.065878
| 0.054054
| 0.087838
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001389
| 0.25078
| 961
| 31
| 74
| 31
| 0.820833
| 0.127992
| 0
| 0
| 0
| 0
| 0.218072
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.136364
| 0
| 0.136364
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61ed3298ce258d1708cb601b97ca2bb3d32448c9
| 18,023
|
py
|
Python
|
netor/tinydb/scripts/netorconf.py
|
aegiacometti/neto
|
4169a93a4d789facfe9a41d214b1a6c15e8f2fb9
|
[
"Apache-2.0"
] | 1
|
2020-01-02T04:31:11.000Z
|
2020-01-02T04:31:11.000Z
|
netor/tinydb/scripts/netorconf.py
|
aegiacometti/neto
|
4169a93a4d789facfe9a41d214b1a6c15e8f2fb9
|
[
"Apache-2.0"
] | null | null | null |
netor/tinydb/scripts/netorconf.py
|
aegiacometti/neto
|
4169a93a4d789facfe9a41d214b1a6c15e8f2fb9
|
[
"Apache-2.0"
] | 1
|
2021-02-23T04:34:48.000Z
|
2021-02-23T04:34:48.000Z
|
#!/usr/bin/env python3
import os
import sys
import configparser
import fileinput
import netorlogging
import datetime
from shutil import copyfile
def _netor_config():
"""
It is used for updating the Neto home directory in the configuration files and scripts.
This is useful, if you want to have 2 working installations of Neto in completely independent directories.
It will update the ``NETOR_HOME_DIRECTORY`` variable in the ``netor.conf`` file,
and also in the following Neto python scripts which then works with the TinyDB:
# netor/tinydb/scripts/listdb.py
# netor/tinydb/scripts/pushcustdb.py
# netor/tinydb/scripts/worker.py
# netor/tinydb/scripts/switchdb.py
Later it will also update the ``hosts_file`` variable in the following bash scripts:
# bin/netor-ping
# bin/netor-traceroute
:return: nothing
"""
_NETOR_HOME_DIRECTORY = os.getenv('NETOR')
config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())
netor_config_path_name = _NETOR_HOME_DIRECTORY + "netor/netor.config"
config.read(netor_config_path_name)
if os.path.isdir(_NETOR_HOME_DIRECTORY):
answer = input("\nDefault \"$NETOR/netor\" directory found at:\n" + str(_NETOR_HOME_DIRECTORY) +
"\nDo you want to keep it (y/n): ").lower()
if answer == "y":
print("Keeping same configuration\n")
try:
config['Netor']['netor_home_directory'] = _NETOR_HOME_DIRECTORY
except KeyError:
print("\nConfiguration files do no exist, clone the previous directory before start the changes\n")
sys.exit(1)
with open(netor_config_path_name, 'w') as configfile:
config.write(configfile)
_update_ansible(_NETOR_HOME_DIRECTORY)
tinydb_log_file = config['TinyDB']['tinydb_log_file']
_update_config(tinydb_log_file, __file__, _NETOR_HOME_DIRECTORY)
sys.exit()
elif answer == "n":
print('If you want to change the $NETOR directory, you must first update the $NETOR environment variable')
print('Set $NETOR environment value by adding/changing the line at the end of the file /etc/environment')
print('NETOR=\"/my/dir/netor/\"')
print('Restart the system and execute this script again')
else:
print("Invalid option/n")
sys.exit()
else:
print("\nDefault \"$NETOR/netor\" NOT found")
print('Set $NETOR environment value by adding/changing the line at the end of the file /etc/environment')
print('NETOR=\"/my/dir/netor/\"')
print('Restart the system and execute this script again')
def _update_ansible(netor_home_directory):
"""
Update Ansible configuration files.
:param netor_home_directory: Neto home directory to used for updating the configuration files
:return: nothing
"""
ansible_config_file = os.environ['HOME'] + '/.ansible.cfg'
replace_static_vars_scripts(ansible_config_file, '#inventory ', '= ' + netor_home_directory +
'netor/ansible/hosts', '', '')
replace_static_vars_scripts(ansible_config_file, 'transport', ' = paramiko', '', '')
replace_static_vars_scripts(ansible_config_file, 'host_key_auto_add', ' = True', '', '')
replace_static_vars_scripts(ansible_config_file, 'host_key_checking', ' = False', '', '')
replace_static_vars_scripts(ansible_config_file, 'inventory = ', netor_home_directory +
'netor/ansible/hosts', '', '')
print('\nNetor home directory replaced in Ansible.')
def _backup_filename(new_netor_home_directory, filename):
"""
Create a backup of the specified configuration file
:param new_netor_home_directory: it is the actual new Neto home directory to be updated on files
:param filename: file name to backup
:return: nothing
"""
print('\nBacking up ' + filename + ' to ' + new_netor_home_directory + 'netor/salt/backup/')
source = new_netor_home_directory + 'netor/salt/config/' + filename
destination = new_netor_home_directory + 'netor/salt/backup/' + filename + "_" + \
datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
copyfile(source, destination)
def _create_master_config_file(new_netor_home_directory, filename):
"""
Create new Salt master configuration file.
:param new_netor_home_directory: it is the actual new Neto home directory to be updated on files
:param filename: filename to backup
:return: nothing
"""
full_path_filename = new_netor_home_directory + 'netor/salt/config/' + filename
file = open(full_path_filename, '+w')
file.write('# for salt-sproxy\n')
file.write('use_existing_proxy: true\n')
file.write('##### Large-scale tuning settings #####\n')
file.write('##########################################\n')
file.write('#max_open_files: 100000\n')
file.write('\n')
file.write('##### Security settings #####\n')
file.write('# Enable "open mode", this mode still maintains encryption, but turns off\n')
file.write('# authentication, this is only intended for highly secure environments or for\n')
file.write('# the situation where your keys end up in a bad state. If you run in open mode\n')
file.write('# you do so at your own risk!\n')
file.write('open_mode: True\n')
file.write('\n')
file.write('# Enable auto_accept, this setting will automatically accept all incoming\n')
file.write('# public keys from the minions. Note that this is insecure.\n')
file.write('auto_accept: True\n')
file.write('\n')
file.write('# The path to the master\'s configuration file.\n')
file.write('conf_file: ' + new_netor_home_directory + 'netor/salt/config/master\n')
file.write('\n')
file.write('# Directory used to store public key data:\n')
file.write('pki_dir: ' + new_netor_home_directory + 'netor/salt/config/pki/master\n')
file.write('\n')
file.write('##### File Server settings #####\n')
file.write('file_roots:\n')
file.write(' base:\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/states/\n')
file.write('\n')
file.write('##### Pillar settings #####\n')
file.write('pillar_roots:\n')
file.write(' base:\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/states/\n')
file.write('engines:\n')
file.write(' - slack:\n')
file.write(' token: YOUR-TOKEN-GOES-HERE\n')
file.write(' control: true\n')
file.write(' fire_all: False\n')
file.write('######## CREATE YOUR OWN POLICIES FOR COMMAND PERMISSIONS ########\n')
file.write(' groups:\n')
file.write(' default:\n')
file.write(' users:\n')
file.write(' - \'*\'\n')
file.write(' commands:\n')
file.write(' - \'*\'\n')
file.close()
def _update_master_config_file(new_netor_home_directory, filename):
"""
Update Salt master configuration file.
:param new_netor_home_directory: Location where the file is located
:param filename: file name
:return: nothing
"""
_backup_filename(new_netor_home_directory, filename)
# pending to develop update of the file with the new directory
_create_master_config_file(new_netor_home_directory, filename)
def _create_minion_config_file(new_netor_home_directory, filename):
"""
Create Salt minion configuration file.
:param new_netor_home_directory: Location where the file will be located
:param filename: file name
:return: nothing
"""
full_path_filename = new_netor_home_directory + 'netor/salt/config/' + filename
file = open(full_path_filename, '+w')
file.write('##### Primary configuration settings #####\n')
file.write('master: localhost\n')
file.write('\n')
file.write('# The path to the minion\'s configuration file.\n')
file.write('conf_file: ' + new_netor_home_directory + 'netor/salt/config/minion\n')
file.write('# The directory to store the pki information in\n')
file.write('pki_dir: ' + new_netor_home_directory + 'netor/salt/config/pki/minion\n')
file.write('\n')
file.write('##### File Directory Settings #####\n')
file.write('file_roots:\n')
file.write(' base:\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/\n')
file.write(' - ' + new_netor_home_directory + 'neto/salt/config/pillar/states/\n')
file.write('\n')
file.write('pillar_roots:\n')
file.write(' base:\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/ states /\n')
file.write('\n')
file.write('###### Security settings #####\n')
file.write('# Enable "open mode", this mode still maintains encryption, but turns off\n')
file.write('# authentication, this is only intended for highly secure environments or for\n')
file.write('# the situation where your keys end up in a bad state. If you run in open mode\n')
file.write('# you do so at your own risk!\n')
file.write('open_mode: True\n')
file.close()
def _update_minion_config_file(new_netor_home_directory, filename):
"""
Update Salt minion configuration file.
:param new_netor_home_directory: Location where the file is located
:param filename: file name
:return:
"""
_backup_filename(new_netor_home_directory, filename)
# pending to develop update of the file with the new directory
_create_minion_config_file(new_netor_home_directory, filename)
def _create_proxy_config_file(new_netor_home_directory, filename):
"""
Create Salt proxy configuration file.
:param new_netor_home_directory: Location where the file will be located
:param filename: file name
:return:
"""
full_path_filename = new_netor_home_directory + 'netor/salt/config/' + filename
file = open(full_path_filename, '+w')
file.write('##### Primary configuration settings #####\n')
file.write('\n')
file.write('master: localhost\n')
file.write('conf_file: ' + new_netor_home_directory + 'netor/salt/config/proxy\n')
file.write('mine_enabled: true # not required, but nice to have\n')
file.write('mine_functions:\n')
file.write(' net.ipaddrs: []\n')
file.write(' net.lldp: []\n')
file.write(' net.mac: []\n')
file.write(' net.arp: []\n')
file.write(' net.interfaces: []\n')
file.write('mine_interval: 5\n')
file.write('\n')
file.write('###### Thread settings #####\n')
file.write('multiprocessing: false\n')
file.write('\n')
file.write('##### File Directory Settings #####\n')
file.write('file_roots:\n')
file.write(' base:\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/\n')
file.write('pillar_roots:\n')
file.write(' base:\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/\n')
file.write('\n')
file.write('###### Security settings #####\n')
file.write('###########################################\n')
file.write('# Enable "open mode", this mode still maintains encryption, but turns off\n')
file.write('# authentication, this is only intended for highly secure environments or for\n')
file.write('# the situation where your keys end up in a bad state. If you run in open mode\n')
file.write('# you do so at your own risk!\n')
file.write('open_mode: True\n')
file.write('# The directory to store the pki information in\n')
file.write('pki_dir: ' + new_netor_home_directory + 'netor/salt/config/pki/proxy # not required - this separates '
'the proxy keys into a different directory\n')
file.close()
def _update_proxy_config_file(new_netor_home_directory, filename):
"""
Update Salt proxy configuration file.
:param new_netor_home_directory: Directory where the file is located
:param filename: file name
:return:
"""
_backup_filename(new_netor_home_directory, filename)
# pending to develop update of the file with the new directory
_create_proxy_config_file(new_netor_home_directory, filename)
def _file_update_redirect(new_netor_home_directory, filename):
"""
Update the configuration files.
:param new_netor_home_directory: Directory where the files are located
:param filename: file name to update
:return: nothing
"""
if 'master' in filename:
_update_master_config_file(new_netor_home_directory, filename)
elif 'minion' in filename:
_update_minion_config_file(new_netor_home_directory, filename)
elif 'proxy' in filename:
_update_proxy_config_file(new_netor_home_directory, filename)
else:
print('\nError while checking Salt master, minion and proxy configuration files')
sys.exit(1)
def _file_create_redirect(new_netor_home_directory, filename):
"""
Create the configuration files.
:param new_netor_home_directory: it is the actual new Neto home directory where to create the file
:param filename: file name to create
:return: nothing
"""
if 'master' in filename:
_create_master_config_file(new_netor_home_directory, filename)
elif 'minion' in filename:
_create_minion_config_file(new_netor_home_directory, filename)
elif 'proxy' in filename:
_create_proxy_config_file(new_netor_home_directory, filename)
else:
print('\nError while checking Salt master, minion and proxy configuration files')
sys.exit(1)
def _create_update_master_minion_proxy(new_netor_home_directory, filename):
"""
Update or create (if do not exists) Salt configuration files.
:param new_netor_home_directory: it is the actual new Neto home directory to used in the process
:param filename: file name to update
:return: nothing
"""
full_salt_config_filename = new_netor_home_directory + 'netor/salt/' + filename
if os.path.isfile(full_salt_config_filename):
_file_update_redirect(new_netor_home_directory, filename)
else:
_file_create_redirect(new_netor_home_directory, filename)
def _update_config(tinydb_log_file, __file__, new_netor_home_directory):
"""
Execute the actual updates in the files. Salt master, minion and proxy.
:param tinydb_log_file: the filename to send the logging message after the operation is completed
:param __file__: script name who is sending the message to log
:param new_netor_home_directory: it is the actual new Neto home directory to be updated on files
:return: nothing
"""
_create_update_master_minion_proxy(new_netor_home_directory, 'master')
_create_update_master_minion_proxy(new_netor_home_directory, 'minion')
_create_update_master_minion_proxy(new_netor_home_directory, 'proxy')
print('\nNetor home directory replaced in salt master, minion and proxy.')
print("\nAdd or modified if necessary " + new_netor_home_directory + "bin to your .profile")
print(" vi $HOME/.profile")
print(" PATH=\"$PATH:" + new_netor_home_directory + "bin\n")
print("\nAdd or modified if necessary " + new_netor_home_directory + " to /etc/environment")
print(" sudo vi /etc/environment")
print(" NETOR=\"$PATH:" + new_netor_home_directory)
print("\nLogoff session or restart system, and login again.")
print("\nATTENTION: If you are using Salt restart the daemons with \"netor-salt-restart\"\n")
netorlogging.log_msg(tinydb_log_file, __file__,
"Netconf executed. Neto.config and static vars in scripts updated. ")
def replace_static_vars_scripts(filename, search, replace, delimiter, extra):
"""
Replace line by line the ``NETOR_HOME_DIRECTORY`` static variable in scripts.
:param filename: filename to review
:param search: search pattern to look for
:param replace: patter to replace
:param delimiter: to add a delimiter surrounding the path names
:param extra: add extra path information
:return: nothing
"""
try:
for line in fileinput.input(filename, inplace=True):
if search in line:
print((search + delimiter + replace + extra + delimiter), end="\n")
else:
print(line, end="")
except FileNotFoundError:
print("\nERROR File not found " + filename)
print("Manually find systemd folder and file " + filename.split("/")[-1] +
" and modify the parameter \"" + search + "\" in the file to point to " + replace + "\n")
except PermissionError:
print("\nERROR Permission denied to modify file " + filename)
print("Manually modify the parameter -\"" + search + "\" in the file to point to " + replace)
def check_netor_config(netor_home_directory):
"""
Verifies if the ``netor.config`` file exists in the file tree.
:param netor_home_directory: to verify if the netor home directory and file exists
:return: nothing
"""
if (os.path.isdir(netor_home_directory)) and (os.path.isfile((netor_home_directory + "netor/netor.config"))):
return
else:
print("Neto home directory or config file not found.\nRun configuration script (netor-config).")
sys.exit(1)
if __name__ == '__main__':
_netor_config()
print()
| 42.011655
| 119
| 0.668146
| 2,364
| 18,023
| 4.887902
| 0.140017
| 0.082562
| 0.089139
| 0.123583
| 0.648897
| 0.623107
| 0.59524
| 0.563479
| 0.525314
| 0.44855
| 0
| 0.000983
| 0.209455
| 18,023
| 428
| 120
| 42.109813
| 0.810008
| 0.195972
| 0
| 0.453815
| 0
| 0.02008
| 0.378009
| 0.045221
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060241
| false
| 0
| 0.028112
| 0
| 0.092369
| 0.128514
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61edb2c25c99c318b707a55fcdfcaaf007b47999
| 4,780
|
py
|
Python
|
test/api/mutations/test_check_repository_by_commit.py
|
uliana291/the-zoo
|
a15a4162c39553abe91224f4feff5d3b66f9413e
|
[
"MIT"
] | null | null | null |
test/api/mutations/test_check_repository_by_commit.py
|
uliana291/the-zoo
|
a15a4162c39553abe91224f4feff5d3b66f9413e
|
[
"MIT"
] | null | null | null |
test/api/mutations/test_check_repository_by_commit.py
|
uliana291/the-zoo
|
a15a4162c39553abe91224f4feff5d3b66f9413e
|
[
"MIT"
] | null | null | null |
import pytest
from zoo.auditing.models import Issue
from zoo.auditing.check_discovery import Effort, Kind, Severity
pytestmark = pytest.mark.django_db
@pytest.fixture
def scenario(mocker, repository_factory, issue_factory, check_factory, fake_path):
owner, name, sha = "games", "lemmings", "GINLNNIIJL"
repository = repository_factory(id=42, owner=owner, name=name, remote_id=3)
kinds = {}
for namespace, id, status, severity, effort in [
("A", "new", Issue.Status.NEW, Severity.UNDEFINED, Effort.UNDEFINED),
("A", "fixed", Issue.Status.FIXED, Severity.ADVICE, Effort.LOW),
("A", "wontfix", Issue.Status.WONTFIX, Severity.WARNING, Effort.MEDIUM),
("A", "not-found", Issue.Status.NOT_FOUND, Severity.CRITICAL, Effort.HIGH),
("A", "reopened", Issue.Status.REOPENED, Severity.UNDEFINED, Effort.UNDEFINED),
("B", "new", Issue.Status.NEW, Severity.ADVICE, Effort.LOW),
("B", "fixed", Issue.Status.FIXED, Severity.WARNING, Effort.MEDIUM),
("B", "wontfix", Issue.Status.WONTFIX, Severity.CRITICAL, Effort.HIGH),
("B", "not-found", Issue.Status.NOT_FOUND, Severity.ADVICE, Effort.LOW),
("B", "reopened", Issue.Status.REOPENED, Severity.UNDEFINED, Effort.HIGH),
("C", "is-found", Issue.Status.NEW, Severity.CRITICAL, Effort.HIGH),
("C", "not-found", Issue.Status.NOT_FOUND, Severity.WARNING, Effort.LOW),
]:
kind = Kind(
category="tests",
namespace=namespace,
id=id,
severity=severity,
effort=effort,
title=f"Title for {namespace}:{id}",
description=f"Description for {namespace}:{id} | Status: {{was}} -> {{is}}",
)
kinds[kind.key] = kind
if namespace != "C":
issue_factory(repository=repository, kind_key=kind.key, status=status.value)
checks = [
# known issues, found
check_factory("A:new", True, {"was": "new", "is": "known"}),
check_factory("A:fixed", True, {"was": "fixed", "is": "reopened"}),
check_factory("A:wontfix", True, {"was": "wontfix", "is": "wontfix"}),
check_factory("A:not-found", True, {"was": "not-found", "is": "new"}),
check_factory("A:reopened", True, {"was": "reopened", "is": "known"}),
# known issues, not found
check_factory("B:new", False, {"was": "new", "is": "fixed"}),
check_factory("B:fixed", False, {"was": "fixed", "is": "not-found"}),
check_factory("B:wontfix", False, {"was": "wontfix", "is": "fixed"}),
check_factory("B:not-found", False, {"was": "not-found", "is": "not-found"}),
check_factory("B:reopened", False, {"was": "reopened", "is": "fixed"}),
# new issues
check_factory("C:is-found", True),
check_factory("C:not-found", False),
]
mocker.patch("zoo.api.mutations.CHECKS", checks)
mocker.patch("zoo.auditing.check_discovery.KINDS", kinds)
m_download_repository = mocker.patch(
"zoo.api.mutations.download_repository", return_value=fake_path
)
yield repository, sha
m_download_repository.assert_called_once_with(repository, mocker.ANY, sha=sha)
query = """
mutation test ($input: CheckRepositoryByCommitInput!) {
checkRepositoryByCommit (input: $input) {
allCheckResults {
isFound
kindKey
status
details
severity
effort
title
description
}
}
}
"""
def test_unknown_repository(snapshot, call_api):
input = {"owner": "games", "name": "doom", "sha": "IDKFA"}
response = call_api(query, input)
snapshot.assert_match(response)
def test_all_results(scenario, snapshot, call_api):
repository, sha = scenario
input = {"owner": repository.owner, "name": repository.name, "sha": sha}
response = call_api(query, input)
snapshot.assert_match(response)
def test_only_found(scenario, snapshot, call_api):
repository, sha = scenario
input = {
"owner": repository.owner,
"name": repository.name,
"sha": sha,
"onlyFound": True,
}
response = call_api(query, input)
snapshot.assert_match(response)
def test_with_repository(scenario, snapshot, call_api):
repository, sha = scenario
query = """
mutation test ($input: CheckRepositoryByCommitInput!) {
checkRepositoryByCommit (input: $input) {
repository {
id
owner
name
url
remoteId
}
}
}
"""
input = {"owner": repository.owner, "name": repository.name, "sha": sha}
response = call_api(query, input)
snapshot.assert_match(response)
| 34.142857
| 88
| 0.604393
| 526
| 4,780
| 5.385932
| 0.19962
| 0.039534
| 0.022944
| 0.028239
| 0.43911
| 0.326509
| 0.310272
| 0.222379
| 0.163784
| 0.163784
| 0
| 0.000826
| 0.240167
| 4,780
| 139
| 89
| 34.388489
| 0.779185
| 0.011297
| 0
| 0.192661
| 0
| 0
| 0.262389
| 0.042567
| 0
| 0
| 0
| 0
| 0.045872
| 1
| 0.045872
| false
| 0
| 0.027523
| 0
| 0.073395
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61f65e88bb74b76264401d01893c2004742b5044
| 1,919
|
py
|
Python
|
build.py
|
micklenguyen/hw2-scripting
|
3603a2c4d7518890eacc4f071f347f90dd295ee6
|
[
"MIT"
] | null | null | null |
build.py
|
micklenguyen/hw2-scripting
|
3603a2c4d7518890eacc4f071f347f90dd295ee6
|
[
"MIT"
] | null | null | null |
build.py
|
micklenguyen/hw2-scripting
|
3603a2c4d7518890eacc4f071f347f90dd295ee6
|
[
"MIT"
] | null | null | null |
def main():
content_pages = auto_populate_content_files()
for page in content_pages:
filepath = page['filepath']
output = page['output']
title = page['title']
# Read content of html pages
content = open(filepath).read()
# Invoke function to return finished_page (base.html with filled in content)
finshed_page = apply_template(content, title, content_pages)
write_html(output, finshed_page)
def auto_populate_content_files():
import glob
import os
# Loop through files in the content/ directory and save paths as a list
all_html_files = glob.glob("content/*.html")
#print(all_html_files)
# Loop through the all_html_files list, modify and extract file_name and name_only from the path
pages = []
for file_path in all_html_files:
# Saving the path to a varaible (ex. content/resume.html)
file_path = file_path
# Removes the file path from the file name (ex. content/resume.html -> resume.html)
file_name = os.path.basename(file_path)
# Removes the file path from the file name (ex. content/resume.html -> resume.html)
file_name = os.path.basename(file_path)
#print(file_name)
# Split the name from the file extention (ex. resume.html -> resume)
name_only, extension = os.path.splitext(file_name)
# Build a list with dicts of content information
pages.append({
"filepath": file_path,
"title": name_only,
"output": "docs/" + file_name,
"filename": file_name
})
return pages
def apply_template(content, title, pages):
from jinja2 import Template
# Read base.html and save to template
template_html = open("templates/base.html").read()
new_template = Template(template_html)
finished_page = new_template.render(
title=title,
content=content,
pages=pages,
)
return finished_page
def write_html(output, finshed_page):
# Writes complete html files
open(output, "w+").write(finshed_page)
if __name__ == "__main__":
main()
| 24.922078
| 97
| 0.730589
| 281
| 1,919
| 4.790036
| 0.266904
| 0.053492
| 0.035661
| 0.042348
| 0.181278
| 0.142645
| 0.142645
| 0.142645
| 0.142645
| 0.142645
| 0
| 0.000628
| 0.16988
| 1,919
| 77
| 98
| 24.922078
| 0.844319
| 0.364773
| 0
| 0.05
| 0
| 0
| 0.077944
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.075
| 0
| 0.225
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61f94a0bece7deb448882a08f6a458e64ef93c8e
| 35,113
|
py
|
Python
|
src/jote/jote.py
|
InformaticsMatters/data-manager-job-tester
|
f8915e005f16685d159535a2455628eb1d7ac518
|
[
"MIT"
] | null | null | null |
src/jote/jote.py
|
InformaticsMatters/data-manager-job-tester
|
f8915e005f16685d159535a2455628eb1d7ac518
|
[
"MIT"
] | 1
|
2022-01-28T10:06:28.000Z
|
2022-01-31T14:51:52.000Z
|
src/jote/jote.py
|
InformaticsMatters/data-manager-job-tester
|
f8915e005f16685d159535a2455628eb1d7ac518
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Informatics Matters Job Tester (JOTE).
Get help running this utility with 'jote --help'
"""
import argparse
import os
import shutil
import stat
from stat import S_IRGRP, S_IRUSR, S_IWGRP, S_IWUSR
import subprocess
import sys
from typing import Any, Dict, List, Optional, Tuple
from munch import DefaultMunch
import yaml
from yamllint import linter
from yamllint.config import YamlLintConfig
from decoder import decoder
from .compose import get_test_root, INSTANCE_DIRECTORY, DEFAULT_TEST_TIMEOUT_M
from .compose import Compose
# Where can we expect to find Job definitions?
_DEFINITION_DIRECTORY: str = "data-manager"
# What's the default manifest file?
_DEFAULT_MANIFEST: str = os.path.join(_DEFINITION_DIRECTORY, "manifest.yaml")
# Where can we expect to find test data?
_DATA_DIRECTORY: str = "data"
# Our yamllint configuration file
# from the same directory as us.
_YAMLLINT_FILE: str = os.path.join(os.path.dirname(__file__), "jote.yamllint")
# Read the version file
_VERSION_FILE: str = os.path.join(os.path.dirname(__file__), "VERSION")
with open(_VERSION_FILE, "r", encoding="utf-8") as file_handle:
_VERSION = file_handle.read().strip()
# Job image types (lower-case)
_IMAGE_TYPE_SIMPLE: str = "simple"
_IMAGE_TYPE_NEXTFLOW: str = "nextflow"
_DEFAULT_IMAGE_TYPE: str = _IMAGE_TYPE_SIMPLE
# User HOME directory.
# Used to check for netflow files if nextflow is executed.
# The user CANNOT have any pf their own nextflow config.
_USR_HOME: str = os.environ.get("HOME", "")
def _print_test_banner(collection: str, job_name: str, job_test_name: str) -> None:
print(" ---")
print(f"+ collection={collection} job={job_name} test={job_test_name}")
def _lint(definition_filename: str) -> bool:
"""Lints the provided job definition file."""
if not os.path.isfile(_YAMLLINT_FILE):
print(f"! The yamllint file ({_YAMLLINT_FILE}) is missing")
return False
with open(definition_filename, "rt", encoding="UTF-8") as definition_file:
errors = linter.run(definition_file, YamlLintConfig(file=_YAMLLINT_FILE))
if errors:
# We're given a 'generator' and we don't know if there are errors
# until we iterator over it. So here we print an initial error message
# on the first error.
found_errors: bool = False
for error in errors:
if not found_errors:
print(f'! Job definition "{definition_file}" fails yamllint:')
found_errors = True
print(error)
if found_errors:
return False
return True
def _validate_schema(definition_filename: str) -> bool:
"""Checks the Job Definition against the decoder's schema."""
with open(definition_filename, "rt", encoding="UTF-8") as definition_file:
job_def: Optional[Dict[str, Any]] = yaml.load(
definition_file, Loader=yaml.FullLoader
)
assert job_def
# If the decoder returns something there's been an error.
error: Optional[str] = decoder.validate_job_schema(job_def)
if error:
print(
f'! Job definition "{definition_filename}"' " does not comply with schema"
)
print("! Full response follows:")
print(error)
return False
return True
def _validate_manifest_schema(manifest_filename: str) -> bool:
"""Checks the Manifest against the decoder's schema."""
with open(manifest_filename, "rt", encoding="UTF-8") as definition_file:
job_def: Optional[Dict[str, Any]] = yaml.load(
definition_file, Loader=yaml.FullLoader
)
assert job_def
# If the decoder returns something there's been an error.
error: Optional[str] = decoder.validate_manifest_schema(job_def)
if error:
print(f'! Manifest "{manifest_filename}"' " does not comply with schema")
print("! Full response follows:")
print(error)
return False
return True
def _check_cwd() -> bool:
"""Checks the execution directory for sanity (cwd). Here we must find
a data-manager directory
"""
expected_directories: List[str] = [_DEFINITION_DIRECTORY, _DATA_DIRECTORY]
for expected_directory in expected_directories:
if not os.path.isdir(expected_directory):
print(f'! Expected directory "{expected_directory}"' " but it is not here")
return False
return True
def _load(manifest_filename: str, skip_lint: bool) -> Tuple[List[DefaultMunch], int]:
"""Loads definition files listed in the manifest
and extracts the definitions that contain at least one test. The
definition blocks for those that have tests (ignored or otherwise)
are returned along with a count of the number of tests found
(ignored or otherwise).
If there was a problem loading the files an empty list and
-ve count is returned.
"""
# Prefix manifest filename with definition directory if required...
manifest_path: str = (
manifest_filename
if manifest_filename.startswith(f"{_DEFINITION_DIRECTORY}/")
else os.path.join(_DEFINITION_DIRECTORY, manifest_filename)
)
if not os.path.isfile(manifest_path):
print(f'! The manifest file is missing ("{manifest_path}")')
return [], -1
if not _validate_manifest_schema(manifest_path):
return [], -1
with open(manifest_path, "r", encoding="UTF-8") as manifest_file:
manifest: Dict[str, Any] = yaml.load(manifest_file, Loader=yaml.FullLoader)
if manifest:
manifest_munch: DefaultMunch = DefaultMunch.fromDict(manifest)
# Iterate through the named files...
job_definitions: List[DefaultMunch] = []
num_tests: int = 0
for jd_filename in manifest_munch["job-definition-files"]:
# Does the definition comply with the dschema?
# No options here - it must.
jd_path: str = os.path.join(_DEFINITION_DIRECTORY, jd_filename)
if not _validate_schema(jd_path):
return [], -1
# YAML-lint the definition?
if not skip_lint:
if not _lint(jd_path):
return [], -2
with open(jd_path, "r", encoding="UTF-8") as jd_file:
job_def: Dict[str, Any] = yaml.load(jd_file, Loader=yaml.FullLoader)
if job_def:
jd_munch: DefaultMunch = DefaultMunch.fromDict(job_def)
for jd_name in jd_munch.jobs:
if jd_munch.jobs[jd_name].tests:
num_tests += len(jd_munch.jobs[jd_name].tests)
if num_tests:
jd_munch.definition_filename = jd_filename
job_definitions.append(jd_munch)
return job_definitions, num_tests
def _copy_inputs(test_inputs: List[str], project_path: str) -> bool:
"""Copies all the test files into the test project directory."""
# The files are assumed to reside in the repo's 'data' directory.
print(f'# Copying inputs (from "${{PWD}}/{_DATA_DIRECTORY}")...')
expected_prefix: str = f"{_DATA_DIRECTORY}/"
for test_input in test_inputs:
print(f"# + {test_input}")
if not test_input.startswith(expected_prefix):
print("! FAILURE")
print(f'! Input file {test_input} must start with "{expected_prefix}"')
return False
if not os.path.isfile(test_input):
print("! FAILURE")
print(f"! Missing input file {test_input} ({test_input})")
return False
# Looks OK, copy it
shutil.copy(test_input, project_path)
print("# Copied")
return True
def _check_exists(name: str, path: str, expected: bool, fix_permissions: bool) -> bool:
exists: bool = os.path.exists(path)
if expected and not exists:
print(f"# exists ({expected}) [FAILED]")
print("! FAILURE")
print(f'! Check exists "{name}" (does not exist)')
return False
if not expected and exists:
print(f"# exists ({expected}) [FAILED]")
print("! FAILURE")
print(f'! Check does not exist "{name}" (exists)')
return False
# File exists or does not exist, as expected.
# If it exists we check its 'user' and 'group' read and write permission.
#
# If 'fix_permissions' is True (i.e. the DM is expected to fix (group) permissions)
# the group permissions are expected to be incorrect. If False
# then the group permissions are expected to be correct/
if exists:
stat_info: os.stat_result = os.stat(path)
# Check user permissions
file_mode: int = stat_info.st_mode
if file_mode & S_IRUSR == 0 or file_mode & S_IWUSR == 0:
print("! FAILURE")
print(
f'! "{name}" exists but has incorrect user permissions'
f" ({stat.filemode(file_mode)})"
)
return False
# Check group permissions
if file_mode & S_IRGRP == 0 or file_mode & S_IWGRP == 0:
# Incorrect permissions.
if not fix_permissions:
# And not told to fix them!
print("! FAILURE")
print(
f'! "{name}" exists but has incorrect group permissions (fix-permissions=False)'
f" ({stat.filemode(file_mode)})"
)
return False
else:
# Correct group permissions.
if fix_permissions:
# But told to fix them!
print("! FAILURE")
print(
f'! "{name}" exists but has correct group permissions (fix-permissions=True)'
f" ({stat.filemode(file_mode)})"
)
return False
print(f"# exists ({expected}) [OK]")
return True
def _check_line_count(name: str, path: str, expected: int) -> bool:
line_count: int = 0
with open(path, "rt", encoding="UTF-8") as check_file:
for _ in check_file:
line_count += 1
if line_count != expected:
print(f"# lineCount ({line_count}) [FAILED]")
print("! FAILURE")
print(f"! Check lineCount {name}" f" (found {line_count}, expected {expected})")
return False
print(f"# lineCount ({line_count}) [OK]")
return True
def _check(
t_compose: Compose, output_checks: DefaultMunch, fix_permissions: bool
) -> bool:
"""Runs the checks on the Job outputs.
We currently support 'exists' and 'lineCount'.
If 'fix_permissions' is True we error if the permissions are correct,
if False we error if the permissions are not correct.
"""
assert t_compose
assert isinstance(t_compose, Compose)
assert output_checks
assert isinstance(output_checks, List)
print("# Checking...")
for output_check in output_checks:
output_name: str = output_check.name
print(f"# - {output_name}")
expected_file: str = os.path.join(
t_compose.get_test_project_path(), output_name
)
for check in output_check.checks:
check_type: str = list(check.keys())[0]
if check_type == "exists":
if not _check_exists(
output_name, expected_file, check.exists, fix_permissions
):
return False
elif check_type == "lineCount":
if not _check_line_count(output_name, expected_file, check.lineCount):
return False
else:
print("! FAILURE")
print(f"! Unknown output check type ({check_type})")
return False
print("# Checked")
return True
def _run_nextflow(
command: str, project_path: str, timeout_minutes: int = DEFAULT_TEST_TIMEOUT_M
) -> Tuple[int, str, str]:
"""Runs nextflow in the project directory returning the exit code,
stdout and stderr.
"""
assert command
assert project_path
# The user cannot have a nextflow config in their home directory.
# Nextflow looks here and any config will be merged with the test config.
if _USR_HOME:
home_config: str = os.path.join(_USR_HOME, ".nextflow", "config")
if os.path.exists(home_config) and os.path.isfile(home_config):
print("! FAILURE")
print(
"! A nextflow test but"
f" you have your own config file ({home_config})"
)
print("! You cannot test Jobs and have your own config file")
return 1, "", ""
print('# Executing the test ("nextflow")...')
print(f'# Execution directory is "{project_path}"')
cwd = os.getcwd()
os.chdir(project_path)
try:
test = subprocess.run(
command,
shell=True,
check=False,
capture_output=True,
timeout=timeout_minutes * 60,
)
finally:
os.chdir(cwd)
return test.returncode, test.stdout.decode("utf-8"), test.stderr.decode("utf-8")
def _test(
args: argparse.Namespace,
filename: str,
collection: str,
job: str,
job_definition: DefaultMunch,
) -> Tuple[int, int, int, int]:
"""Runs the tests for a specific Job definition returning the number
of tests passed, skipped (due to run-level), ignored and failed.
"""
assert job_definition
assert isinstance(job_definition, DefaultMunch)
# The test status, assume success
tests_passed: int = 0
tests_skipped: int = 0
tests_ignored: int = 0
tests_failed: int = 0
if args.image_tag:
print(f"W Replacing image tag. Using '{args.image_tag}'")
job_image: str = f"{job_definition.image.name}:{args.image_tag}"
else:
job_image = f"{job_definition.image.name}:{job_definition.image.tag}"
job_image_memory: str = job_definition.image["memory"]
if job_image_memory is None:
job_image_memory = "1Gi"
job_image_cores: int = job_definition.image["cores"]
if job_image_cores is None:
job_image_cores = 1
job_project_directory: str = job_definition.image["project-directory"]
job_working_directory: str = job_definition.image["working-directory"]
if "type" in job_definition.image:
job_image_type: str = job_definition.image["type"].lower()
else:
job_image_type = _DEFAULT_IMAGE_TYPE
# Does the image need the (group write) permissions
# of files it creates fixing? Default is 'no'.
# If 'yes' (true) the DM is expected to fix the permissions of the
# generated files once the job has finished.
job_image_fix_permissions: bool = False
if "fix-permissions" in job_definition.image:
job_image_fix_permissions = job_definition.image["fix-permissions"]
for job_test_name in job_definition.tests:
# If a job test has been named,
# skip this test if it doesn't match.
# We do not include this test in the count.
if args.test and not args.test == job_test_name:
continue
_print_test_banner(collection, job, job_test_name)
# The status changes to False if any
# part of this block fails.
test_status: bool = True
print(f"> definition filename={filename}")
# Does the test have an 'ignore' declaration?
# Obey it unless the test is named explicitly -
# i.e. if th user has named a specific test, run it.
if "ignore" in job_definition.tests[job_test_name]:
if args.test:
print("W Ignoring the ignore: property (told to run this test)")
else:
print('W Ignoring test (found "ignore")')
tests_ignored += 1
continue
# Does the test have a 'run-level' declaration?
# If so, is it higher than the run-level specified?
if args.test:
print("W Ignoring any run-level check (told to run this test)")
else:
if "run-level" in job_definition.tests[job_test_name]:
run_level = job_definition.tests[job_test_name]["run-level"]
print(f"> run-level={run_level}")
if run_level > args.run_level:
print(f'W Skipping test (test is "run-level: {run_level}")')
tests_skipped += 1
continue
else:
print("> run-level=Undefined")
# Render the command for this test.
# First extract the variables and values from 'options'
# and then 'inputs'.
job_variables: Dict[str, Any] = {}
for variable in job_definition.tests[job_test_name].options:
job_variables[variable] = job_definition.tests[job_test_name].options[
variable
]
# If the option variable's declaration is 'multiple'
# it must be handled as a list, e.g. it might be declared like this: -
#
# The double-comment is used
# to avoid mypy getting upset by the 'type' line...
#
# # properties:
# # fragments:
# # title: Fragment molecules
# # multiple: true
# # mime-types:
# # - chemical/x-mdl-molfile
# # type: file
#
# We only pass the basename of the input to the command decoding
# i.e. strip the source directory.
# A list of input files (relative to this directory)
# We populate this with everything we find declared as an input
input_files: List[str] = []
# Process every 'input'
if job_definition.tests[job_test_name].inputs:
for variable in job_definition.tests[job_test_name].inputs:
# Test variable must be known as an input or option.
# Is the variable an option (otherwise it's an input)
variable_is_option: bool = False
variable_is_input: bool = False
if variable in job_definition.variables.options.properties:
variable_is_option = True
elif variable in job_definition.variables.inputs.properties:
variable_is_input = True
if not variable_is_option and not variable_is_input:
print("! FAILURE")
print(
f"! Test variable ({variable})"
+ " not declared as input or option"
)
# Record but do no further processing
tests_failed += 1
test_status = False
# Is it declared as a list?
value_is_list: bool = False
if variable_is_option:
if job_definition.variables.options.properties[variable].multiple:
value_is_list = True
else:
if job_definition.variables.inputs.properties[variable].multiple:
value_is_list = True
# Add each value or just one value
# (depending on whether it's a list)
if value_is_list:
job_variables[variable] = []
for value in job_definition.tests[job_test_name].inputs[variable]:
job_variables[variable].append(os.path.basename(value))
input_files.append(value)
else:
value = job_definition.tests[job_test_name].inputs[variable]
job_variables[variable] = os.path.basename(value)
input_files.append(value)
decoded_command: str = ""
test_environment: Dict[str, str] = {}
if test_status:
# Jote injects Job variables that are expected.
# 'DM_' variables are injected by the Data Manager,
# other are injected by Jote.
# - DM_INSTANCE_DIRECTORY
job_variables["DM_INSTANCE_DIRECTORY"] = INSTANCE_DIRECTORY
# - CODE_DIRECTORY
job_variables["CODE_DIRECTORY"] = os.getcwd()
# Has the user defined any environment variables in the test?
# If so they must exist, although we don't care about their value.
# Extract them here to pass to the test.
if "environment" in job_definition.tests[job_test_name]:
for env_name in job_definition.tests[job_test_name].environment:
env_value: Optional[str] = os.environ.get(env_name, None)
if env_value is None:
print("! FAILURE")
print("! Test environment variable is not defined")
print(f"! variable={env_name}")
# Record but do no further processing
tests_failed += 1
test_status = False
break
test_environment[env_name] = env_value
if test_status:
# Get the raw (encoded) command from the job definition...
raw_command: str = job_definition.command
# Decode it using our variables...
decoded_command, test_status = decoder.decode(
raw_command,
job_variables,
"command",
decoder.TextEncoding.JINJA2_3_0,
)
if not test_status:
print("! FAILURE")
print("! Failed to render command")
print(f"! error={decoded_command}")
# Record but do no further processing
tests_failed += 1
test_status = False
# Create the test directories, docker-compose file
# and copy inputs...
t_compose: Optional[Compose] = None
job_command: str = ""
project_path: str = ""
if test_status:
# The command must not contain new-lines.
# So split then join the command.
assert decoded_command
job_command = "".join(decoded_command.splitlines())
print(f"> image={job_image}")
print(f"> image-type={job_image_type}")
print(f"> command={job_command}")
# Create the project
t_compose = Compose(
collection,
job,
job_test_name,
job_image,
job_image_type,
job_image_memory,
job_image_cores,
job_project_directory,
job_working_directory,
job_command,
test_environment,
args.run_as_user,
)
project_path = t_compose.create()
if input_files:
# Copy the data into the test's project directory.
# Data's expected to be found in the Job's 'inputs'.
test_status = _copy_inputs(input_files, project_path)
# Run the container
if test_status and not args.dry_run:
timeout_minutes: int = DEFAULT_TEST_TIMEOUT_M
if "timeout-minutes" in job_definition.tests[job_test_name]:
timeout_minutes = job_definition.tests[job_test_name]["timeout-minutes"]
exit_code: int = 0
out: str = ""
err: str = ""
if job_image_type in [_IMAGE_TYPE_SIMPLE]:
# Run the image container
assert t_compose
exit_code, out, err = t_compose.run(timeout_minutes)
elif job_image_type in [_IMAGE_TYPE_NEXTFLOW]:
# Run nextflow directly
assert job_command
assert project_path
exit_code, out, err = _run_nextflow(
job_command, project_path, timeout_minutes
)
else:
print("! FAILURE")
print(f"! unsupported image-type ({job_image_type}")
test_status = False
if test_status:
expected_exit_code: int = job_definition.tests[
job_test_name
].checks.exitCode
if exit_code != expected_exit_code:
print("! FAILURE")
print(
f"! exit_code={exit_code}"
f" expected_exit_code={expected_exit_code}"
)
print("! Test stdout follows...")
print(out)
print("! Test stderr follows...")
print(err)
test_status = False
if args.verbose:
print(out)
# Inspect the results
# (only if successful so far)
if (
test_status
and not args.dry_run
and job_definition.tests[job_test_name].checks.outputs
):
assert t_compose
test_status = _check(
t_compose,
job_definition.tests[job_test_name].checks.outputs,
job_image_fix_permissions,
)
# Clean-up
if test_status and not args.keep_results:
assert t_compose
t_compose.delete()
# Count?
if test_status:
print("- SUCCESS")
tests_passed += 1
else:
tests_failed += 1
# Told to stop on first failure?
if not test_status and args.exit_on_failure:
break
return tests_passed, tests_skipped, tests_ignored, tests_failed
def _wipe() -> None:
"""Wipes the results of all tests."""
test_root: str = get_test_root()
if os.path.isdir(test_root):
shutil.rmtree(test_root)
def arg_check_run_level(value: str) -> int:
"""A type checker for the argparse run-level."""
i_value = int(value)
if i_value < 1:
raise argparse.ArgumentTypeError("Minimum value is 1")
if i_value > 100:
raise argparse.ArgumentTypeError("Maximum value is 100")
return i_value
def arg_check_run_as_user(value: str) -> int:
"""A type checker for the argparse run-as-user."""
i_value = int(value)
if i_value < 0:
raise argparse.ArgumentTypeError("Minimum value is 0")
if i_value > 65_535:
raise argparse.ArgumentTypeError("Maximum value is 65535")
return i_value
# -----------------------------------------------------------------------------
# main
# -----------------------------------------------------------------------------
def main() -> int:
"""The console script entry-point. Called when jote is executed
or from __main__.py, which is used by the installed console script.
"""
# Build a command-line parser
# and process the command-line...
arg_parser: argparse.ArgumentParser = argparse.ArgumentParser(
description="Data Manager Job Tester",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
arg_parser.add_argument(
"-m",
"--manifest",
help="The manifest file.",
default=_DEFAULT_MANIFEST,
type=str,
)
arg_parser.add_argument(
"-c",
"--collection",
help="The Job collection to test. If not"
" specified the Jobs in all collections"
" will be candidates for testing.",
)
arg_parser.add_argument(
"-j",
"--job",
help="The Job to test. If specified the collection"
" is required. If not specified all the Jobs"
" that match the collection will be"
" candidates for testing.",
)
arg_parser.add_argument(
"--image-tag",
help="An image tag to use rather then the one defined in the job definition.",
)
arg_parser.add_argument(
"-t",
"--test",
help="A specific test to run. If specified the job"
" is required. If not specified all the Tests"
" that match the collection will be"
" candidates for testing.",
)
arg_parser.add_argument(
"-r",
"--run-level",
help="The run-level of the tests you want to"
" execute. All tests at or below this level"
" will be executed, a value from 1 to 100",
default=1,
type=arg_check_run_level,
)
arg_parser.add_argument(
"-u",
"--run-as-user",
help="A user ID to run the tests as. If not set"
" your user ID is used to run the test"
" containers.",
type=arg_check_run_as_user,
)
arg_parser.add_argument(
"-d",
"--dry-run",
action="store_true",
help="Setting this flag will result in jote"
" simply parsing the Job definitions"
" but not running any of the tests."
" It is can be used to check the syntax of"
" your definition file and its test commands"
" and data.",
)
arg_parser.add_argument(
"-k",
"--keep-results",
action="store_true",
help="Normally all material created to run each"
" test is removed when the test is"
" successful",
)
arg_parser.add_argument(
"-v", "--verbose", action="store_true", help="Displays test stdout"
)
arg_parser.add_argument(
"--version", action="store_true", help="Displays jote version"
)
arg_parser.add_argument(
"-x",
"--exit-on-failure",
action="store_true",
help="Normally jote reports test failures but"
" continues with the next test."
" Setting this flag will force jote to"
" stop when it encounters the first failure",
)
arg_parser.add_argument(
"-s",
"--skip-lint",
action="store_true",
help="Normally jote runs the job definition"
" files against the prevailing lint"
" configuration of the repository under test."
" Using this flag skips that step",
)
arg_parser.add_argument(
"-w",
"--wipe",
action="store_true",
help="Wipe does nto run any tests, it simply"
" wipes the repository clean of jote"
" test material. It would be wise"
" to run this once you have finished testing."
" Using this negates the effect of any other"
" option.",
)
arg_parser.add_argument(
"-a",
"--allow-no-tests",
action="store_true",
help="Normally jote expects to run tests"
" and if you have no tests jote will fail."
" To prevent jote complaining about the lack"
" of tests you can use this option.",
)
args: argparse.Namespace = arg_parser.parse_args()
# If a version's been asked for act on it and then leave
if args.version:
print(_VERSION)
return 0
if args.test and args.job is None:
arg_parser.error("--test requires --job")
if args.job and args.collection is None:
arg_parser.error("--job requires --collection")
if args.wipe and args.keep_results:
arg_parser.error("Cannot use --wipe and --keep-results")
# Args are OK if we get here.
total_passed_count: int = 0
total_skipped_count: int = 0
total_ignore_count: int = 0
total_failed_count: int = 0
# Check CWD
if not _check_cwd():
print("! FAILURE")
print("! The directory does not look correct")
arg_parser.error("Done (FAILURE)")
# Told to wipe?
# If so wipe, and leave.
if args.wipe:
_wipe()
print("Done [Wiped]")
return 0
print(f'# Using manifest "{args.manifest}"')
# Load all the files we can and then run the tests.
job_definitions, num_tests = _load(args.manifest, args.skip_lint)
if num_tests < 0:
print("! FAILURE")
print("! Definition file has failed yamllint")
arg_parser.error("Done (FAILURE)")
msg: str = "test" if num_tests == 1 else "tests"
print(f"# Found {num_tests} {msg}")
if args.collection:
print(f'# Limiting to Collection "{args.collection}"')
if args.job:
print(f'# Limiting to Job "{args.job}"')
if args.test:
print(f'# Limiting to Test "{args.test}"')
if job_definitions:
# There is at least one job-definition with a test
# Now process all the Jobs that have tests...
for job_definition in job_definitions:
# If a collection's been named,
# skip this file if it's not the named collection
collection: str = job_definition.collection
if args.collection and not args.collection == collection:
continue
for job_name in job_definition.jobs:
# If a Job's been named,
# skip this test if the job does not match
if args.job and not args.job == job_name:
continue
if job_definition.jobs[job_name].tests:
num_passed, num_skipped, num_ignored, num_failed = _test(
args,
job_definition.definition_filename,
collection,
job_name,
job_definition.jobs[job_name],
)
total_passed_count += num_passed
total_skipped_count += num_skipped
total_ignore_count += num_ignored
total_failed_count += num_failed
# Break out of this loop if told to stop on failures
if num_failed > 0 and args.exit_on_failure:
break
# Break out of this loop if told to stop on failures
if num_failed > 0 and args.exit_on_failure:
break
# Success or failure?
# It's an error to find no tests.
print(" ---")
dry_run: str = "[DRY RUN]" if args.dry_run else ""
summary: str = (
f"passed={total_passed_count}"
f" skipped={total_skipped_count}"
f" ignored={total_ignore_count}"
f" failed={total_failed_count}"
)
failed: bool = False
if total_failed_count:
arg_parser.error(f"Done (FAILURE) {summary} {dry_run}")
failed = True
elif total_passed_count == 0 and not args.allow_no_tests:
arg_parser.error(
f"Done (FAILURE) {summary}" f" (at least one test must pass)" f" {dry_run}"
)
failed = True
else:
print(f"Done (OK) {summary} {dry_run}")
# Automatically wipe.
# If there have been no failures
# and not told to keep directories.
if total_failed_count == 0 and not args.keep_results:
_wipe()
return 1 if failed else 0
# -----------------------------------------------------------------------------
# MAIN
# -----------------------------------------------------------------------------
if __name__ == "__main__":
_RET_VAL: int = main()
if _RET_VAL != 0:
sys.exit(_RET_VAL)
| 35.183367
| 100
| 0.584741
| 4,265
| 35,113
| 4.637749
| 0.124033
| 0.034176
| 0.012235
| 0.016987
| 0.277199
| 0.207634
| 0.14909
| 0.108443
| 0.088827
| 0.076188
| 0
| 0.003386
| 0.318771
| 35,113
| 997
| 101
| 35.218656
| 0.823537
| 0.183123
| 0
| 0.245199
| 0
| 0
| 0.196027
| 0.023108
| 0
| 0
| 0
| 0
| 0.023634
| 1
| 0.023634
| false
| 0.013294
| 0.022157
| 0
| 0.100443
| 0.134417
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61f9d61ddf16dfe982de5cd443717f5e39b05a82
| 7,027
|
py
|
Python
|
transforms/waveform.py
|
koukyo1994/kaggle-rfcx
|
c3573d014d99312b58882e7b939de6c1055129b1
|
[
"MIT"
] | 6
|
2021-02-18T05:18:17.000Z
|
2022-02-19T02:49:32.000Z
|
transforms/waveform.py
|
koukyo1994/kaggle-rfcx
|
c3573d014d99312b58882e7b939de6c1055129b1
|
[
"MIT"
] | null | null | null |
transforms/waveform.py
|
koukyo1994/kaggle-rfcx
|
c3573d014d99312b58882e7b939de6c1055129b1
|
[
"MIT"
] | 2
|
2021-02-18T11:31:50.000Z
|
2022-02-19T02:49:07.000Z
|
import colorednoise as cn
import librosa
import numpy as np
def get_waveform_transforms(config: dict, phase: str):
transforms = config.get("transforms")
if transforms is None:
return None
else:
if transforms[phase] is None:
return None
trns_list = []
for trns_conf in transforms[phase]:
trns_name = trns_conf["name"]
trns_params = {} if trns_conf.get("params") is None else trns_conf["params"]
if globals().get(trns_name) is not None:
trns_cls = globals()[trns_name]
trns_list.append(trns_cls(**trns_params))
if len(trns_list) > 0:
return Compose(trns_list)
else:
return None
class Compose:
def __init__(self, transforms: list):
self.transforms = transforms
def __call__(self, y: np.ndarray):
for trns in self.transforms:
y = trns(y)
return y
class OneOf:
def __init__(self, transforms: list):
self.transforms = transforms
def __call__(self, y: np.ndarray):
n_trns = len(self.transforms)
trns_idx = np.random.choice(n_trns)
trns = self.transforms[trns_idx]
y = trns(y)
return y
class AudioTransform:
def __init__(self, always_apply=False, p=0.5):
self.always_apply = always_apply
self.p = p
def __call__(self, y: np.ndarray):
if self.always_apply:
return self.apply(y)
else:
if np.random.rand() < self.p:
return self.apply(y)
else:
return y
def apply(self, y: np.ndarray):
raise NotImplementedError
class Normalize:
def __call__(self, y: np.ndarray):
max_vol = np.abs(y).max()
y_vol = y * 1 / max_vol
return np.asfortranarray(y_vol)
class NewNormalize:
def __call__(self, y: np.ndarray):
y_mm = y - y.mean()
return y_mm / y_mm.abs().max()
class LibrosaNormalize:
def __call__(self, y: np.ndarray):
return librosa.util.normalize(y)
class GaussianNoiseSNR(AudioTransform):
def __init__(self, always_apply=False, p=0.5, min_snr=5.0, max_snr=20.0, **kwargs):
super().__init__(always_apply, p)
self.min_snr = min_snr
self.max_snr = max_snr
def apply(self, y: np.ndarray, **params):
snr = np.random.uniform(self.min_snr, self.max_snr)
a_signal = np.sqrt(y ** 2).max()
a_noise = a_signal / (10 ** (snr / 20))
white_noise = np.random.randn(len(y))
a_white = np.sqrt(white_noise ** 2).max()
augmented = (y + white_noise * 1 / a_white * a_noise).astype(y.dtype)
return augmented
class PinkNoiseSNR(AudioTransform):
def __init__(self, always_apply=False, p=0.5, min_snr=5.0, max_snr=20.0, **kwargs):
super().__init__(always_apply, p)
self.min_snr = min_snr
self.max_snr = max_snr
def apply(self, y: np.ndarray, **params):
snr = np.random.uniform(self.min_snr, self.max_snr)
a_signal = np.sqrt(y ** 2).max()
a_noise = a_signal / (10 ** (snr / 20))
pink_noise = cn.powerlaw_psd_gaussian(1, len(y))
a_pink = np.sqrt(pink_noise ** 2).max()
augmented = (y + pink_noise * 1 / a_pink * a_noise).astype(y.dtype)
return augmented
class PitchShift(AudioTransform):
def __init__(self, always_apply=False, p=0.5, max_steps=5, sr=32000):
super().__init__(always_apply, p)
self.max_steps = max_steps
self.sr = sr
def apply(self, y: np.ndarray, **params):
n_steps = np.random.randint(-self.max_steps, self.max_steps)
augmented = librosa.effects.pitch_shift(y, sr=self.sr, n_steps=n_steps)
return augmented
class Identity(AudioTransform):
def __init__(self, always_apply=False, p=0.5):
super().__init__(always_apply=always_apply, p=p)
def apply(self, y: np.ndarray, **params):
return y
class PitchUp(AudioTransform):
def __init__(self, always_apply=False, p=0.5, max_steps=5, sr=32000):
super().__init__(always_apply=always_apply, p=p)
self.max_steps = max_steps
self.sr = sr
def apply(self, y: np.ndarray, **params):
n_steps = np.random.randint(0, self.max_steps)
augmented = librosa.effects.pitch_shift(y, sr=self.sr, n_steps=n_steps)
return augmented
class PitchDown(AudioTransform):
def __init__(self, always_apply=False, p=0.5, max_steps=5, sr=32000):
super().__init__(always_apply=always_apply, p=p)
self.max_steps = max_steps
self.sr = sr
def apply(self, y: np.ndarray, **params):
n_steps = np.random.randint(-self.max_steps, 0)
augmented = librosa.effects.pitch_shift(y, sr=self.sr, n_steps=n_steps)
return augmented
class TimeStretch(AudioTransform):
def __init__(self, always_apply=False, p=0.5, max_rate=1.2):
super().__init__(always_apply, p)
self.max_rate = max_rate
def apply(self, y: np.ndarray, **params):
rate = np.random.uniform(0, self.max_rate)
augmented = librosa.effects.time_stretch(y, rate)
return augmented
class TimeShift(AudioTransform):
def __init__(self, always_apply=False, p=0.5, max_shift_second=2, sr=32000, padding_mode="replace"):
super().__init__(always_apply, p)
assert padding_mode in ["replace", "zero"], "`padding_mode` must be either 'replace' or 'zero'"
self.max_shift_second = max_shift_second
self.sr = sr
self.padding_mode = padding_mode
def apply(self, y: np.ndarray, **params):
shift = np.random.randint(-self.sr * self.max_shift_second, self.sr * self.max_shift_second)
augmented = np.roll(y, shift)
if self.padding_mode == "zero":
if shift > 0:
augmented[:shift] = 0
else:
augmented[shift:] = 0
return augmented
class VolumeControl(AudioTransform):
def __init__(self, always_apply=False, p=0.5, db_limit=10, mode="uniform"):
super().__init__(always_apply, p)
assert mode in ["uniform", "fade", "fade", "cosine", "sine"], \
"`mode` must be one of 'uniform', 'fade', 'cosine', 'sine'"
self.db_limit = db_limit
self.mode = mode
def apply(self, y: np.ndarray, **params):
db = np.random.uniform(-self.db_limit, self.db_limit)
if self.mode == "uniform":
db_translated = 10 ** (db / 20)
elif self.mode == "fade":
lin = np.arange(len(y))[::-1] / (len(y) - 1)
db_translated = 10 ** (db * lin / 20)
elif self.mode == "cosine":
cosine = np.cos(np.arange(len(y)) / len(y) * np.pi * 2)
db_translated = 10 ** (db * cosine / 20)
else:
sine = np.sin(np.arange(len(y)) / len(y) * np.pi * 2)
db_translated = 10 ** (db * sine / 20)
augmented = y * db_translated
return augmented
| 31.231111
| 104
| 0.605237
| 974
| 7,027
| 4.106776
| 0.13347
| 0.06875
| 0.028
| 0.056
| 0.56375
| 0.53925
| 0.47825
| 0.45725
| 0.414
| 0.414
| 0
| 0.019654
| 0.268678
| 7,027
| 224
| 105
| 31.370536
| 0.758708
| 0
| 0
| 0.479042
| 0
| 0
| 0.028889
| 0
| 0
| 0
| 0
| 0
| 0.011976
| 1
| 0.173653
| false
| 0
| 0.017964
| 0.011976
| 0.407186
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61fa91668b7e930a4d4c6429b8910bfdb88b86e5
| 1,095
|
py
|
Python
|
plyplus/test/test_trees.py
|
rubycandy/test-plyplus
|
ced9377e6c26dcf308dd9f480411af9c8dbe9c56
|
[
"MIT"
] | 169
|
2015-01-16T12:48:23.000Z
|
2021-12-09T16:00:13.000Z
|
plyplus/test/test_trees.py
|
rubycandy/test-plyplus
|
ced9377e6c26dcf308dd9f480411af9c8dbe9c56
|
[
"MIT"
] | 26
|
2015-01-23T16:30:28.000Z
|
2018-07-07T09:14:18.000Z
|
plyplus/test/test_trees.py
|
rubycandy/test-plyplus
|
ced9377e6c26dcf308dd9f480411af9c8dbe9c56
|
[
"MIT"
] | 53
|
2015-01-22T20:20:10.000Z
|
2021-12-05T13:39:57.000Z
|
from __future__ import absolute_import
import unittest
import logging
import copy
import pickle
from plyplus.plyplus import STree
logging.basicConfig(level=logging.INFO)
class TestSTrees(unittest.TestCase):
def setUp(self):
self.tree1 = STree('a', [STree(x, y) for x, y in zip('bcd', 'xyz')])
def test_deepcopy(self):
assert self.tree1 == copy.deepcopy(self.tree1)
def test_parents(self):
s = copy.deepcopy(self.tree1)
s.calc_parents()
for i, x in enumerate(s.tail):
assert x.parent() == s
assert x.index_in_parent == i
def test_pickle(self):
s = copy.deepcopy(self.tree1)
data = pickle.dumps(s)
assert pickle.loads(data) == s
def test_pickle_with_parents(self):
s = copy.deepcopy(self.tree1)
s.calc_parents()
data = pickle.dumps(s)
s2 = pickle.loads(data)
assert s2 == s
for i, x in enumerate(s2.tail):
assert x.parent() == s2
assert x.index_in_parent == i
if __name__ == '__main__':
unittest.main()
| 24.886364
| 76
| 0.613699
| 149
| 1,095
| 4.342282
| 0.315436
| 0.083462
| 0.098918
| 0.12983
| 0.293663
| 0.244204
| 0.139104
| 0.139104
| 0.139104
| 0.139104
| 0
| 0.012531
| 0.271233
| 1,095
| 43
| 77
| 25.465116
| 0.798246
| 0
| 0
| 0.272727
| 0
| 0
| 0.013699
| 0
| 0
| 0
| 0
| 0
| 0.212121
| 1
| 0.151515
| false
| 0
| 0.181818
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61fae1b5b671ac52f912549b4f9c186cb38b0495
| 1,563
|
py
|
Python
|
misaligned.py
|
clean-code-craft-tcq-2/test-failer-in-py-yashaswin-mayya
|
1861f2db11a508e9c1e2f7ce351d11d87c0c734c
|
[
"MIT"
] | null | null | null |
misaligned.py
|
clean-code-craft-tcq-2/test-failer-in-py-yashaswin-mayya
|
1861f2db11a508e9c1e2f7ce351d11d87c0c734c
|
[
"MIT"
] | null | null | null |
misaligned.py
|
clean-code-craft-tcq-2/test-failer-in-py-yashaswin-mayya
|
1861f2db11a508e9c1e2f7ce351d11d87c0c734c
|
[
"MIT"
] | null | null | null |
MAJOR_COLORS = ["White", "Red", "Black", "Yellow", "Violet"]
MINOR_COLORS = ["Blue", "Orange", "Green", "Brown", "Slate"]
def get_color_from_pair_number(pair_number):
zero_based_pair_number = pair_number - 1
major_index = zero_based_pair_number // len(MINOR_COLORS)
minor_index = zero_based_pair_number % len(MINOR_COLORS)
return MAJOR_COLORS[major_index], MINOR_COLORS[minor_index]
def print_color_map():
for i in range(5):
for j in range(5):
pair_number = i * 5 + j +1 #1 is added to account for zero error as list index begins with 0
print(f'{pair_number} | {get_color_from_pair_number(pair_number)[0]} | {get_color_from_pair_number(pair_number)[1]}')
print_color_map()
def test_color_map(test_paid_number, expected_major_colour, expected_minor_color):
assert(get_color_from_pair_number(test_paid_number) == (expected_major_colour, expected_minor_color))
#testing each of 25 color pairs
if __name__ == '__main__':
print_color_map()
test_color_map(1, 'White', 'Blue')
test_color_map(2, 'White', 'Orange')
test_color_map(3, 'White', 'Green')
test_color_map(4, 'White', 'Brown')
test_color_map(5, 'White', 'Slate')
test_color_map(6, 'Red', 'Blue')
test_color_map(7, 'Red', 'Orange')
test_color_map(8, 'Red', 'Green')
test_color_map(9, 'Red', 'Brown')
test_color_map(10, 'Red', 'Slate')
test_color_map(11, 'Black', 'Blue')
test_color_map(12, 'Black', 'Orange')
test_color_map(13, 'Black', 'Green')
print("All is well (maybe!)\n")
| 36.348837
| 129
| 0.685861
| 237
| 1,563
| 4.122363
| 0.303797
| 0.139202
| 0.171955
| 0.065507
| 0.325486
| 0.28045
| 0.28045
| 0.18219
| 0.104401
| 0
| 0
| 0.021622
| 0.171465
| 1,563
| 42
| 130
| 37.214286
| 0.732819
| 0.060141
| 0
| 0.064516
| 0
| 0
| 0.209413
| 0.060027
| 0
| 0
| 0
| 0
| 0.032258
| 1
| 0.096774
| false
| 0
| 0
| 0
| 0.129032
| 0.16129
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
61fe5553a131ad8494dec157c4505511e27beecb
| 611
|
py
|
Python
|
examples/embed_cmd.py
|
bentettmar/discord.py-self_embed
|
4253ea7977b17972de2e15de3606a183f70b22b0
|
[
"MIT"
] | 2
|
2022-03-31T04:06:05.000Z
|
2022-03-31T16:39:40.000Z
|
examples/embed_cmd.py
|
bentettmar/discord.py-self_embed
|
4253ea7977b17972de2e15de3606a183f70b22b0
|
[
"MIT"
] | 3
|
2022-03-29T11:58:16.000Z
|
2022-03-31T16:41:13.000Z
|
examples/embed_cmd.py
|
bentettmar/discord.py-self_embed
|
4253ea7977b17972de2e15de3606a183f70b22b0
|
[
"MIT"
] | null | null | null |
import discord_self_embed
from discord.ext import commands
bot = commands.Bot(command_prefix=".", self_bot=True)
@bot.event
async def on_ready():
print("ready")
@bot.command(name="embed")
async def embed_cmd(ctx):
embed = discord_self_embed.Embed("discord.py-self_embed", description="A way for selfbots to send embeds again.", colour="ff0000", url="https://github.com/bentettmar/discord.py-self_embed")
embed.set_author("Ben Tettmar")
await ctx.send(embed.generate_url(hide_url=True)) # You can also send the embed converted to a string which will auto hide the url.
bot.run("TOKEN_HERE")
| 33.944444
| 193
| 0.749591
| 97
| 611
| 4.57732
| 0.57732
| 0.081081
| 0.072072
| 0.081081
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007463
| 0.12275
| 611
| 17
| 194
| 35.941176
| 0.820896
| 0.129296
| 0
| 0
| 0
| 0
| 0.283019
| 0.039623
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1101b9ca063e23e2fd57ae664425f377c0723f09
| 8,823
|
py
|
Python
|
analysis.py
|
liunx/lmms
|
ea54f64934d90887a38446ef02ed2baed91548db
|
[
"MIT"
] | null | null | null |
analysis.py
|
liunx/lmms
|
ea54f64934d90887a38446ef02ed2baed91548db
|
[
"MIT"
] | null | null | null |
analysis.py
|
liunx/lmms
|
ea54f64934d90887a38446ef02ed2baed91548db
|
[
"MIT"
] | null | null | null |
import re
import copy
from operator import itemgetter
import music21 as m21
class Core:
meter_len = 192
notes = {'C': 60, 'D': 62, 'E': 64, 'F': 65, 'G': 67, 'A': 69, 'B': 71}
percussion = {
35: 'AcousticBassDrum', 36: 'BassDrum1', 37: 'SideStick', 38: 'AcousticSnare',
39: 'HandClap', 40: 'ElectricSnare', 41: 'LowFloorTom', 42: 'ClosedHiHat',
43: 'HighFloorTom', 44: 'PedalHi-Hat', 45: 'LowTom', 46: 'OpenHi-Hat',
47: 'Low-MidTom', 48: 'Hi-MidTom', 49: 'CrashCymbal1', 50: 'HighTom',
51: 'RideCymbal1', 52: 'ChineseCymbal', 53: 'RideBell', 54: 'Tambourine',
55: 'SplashCymbal', 56: 'Cowbell', 57: 'CrashCymbal2', 58: 'Vibraslap',
59: 'RideCymbal2', 60: 'HiBongo', 61: 'LowBongo', 62: 'MuteHiConga',
63: 'OpenHiConga', 64: 'LowConga', 65: 'HighTimbale', 66: 'LowTimbale',
67: 'HighAgogo', 68: 'LowAgogo', 69: 'Cabasa', 70: 'Maracas', 71: 'ShortWhistle',
72: 'LongWhistle', 73: 'ShortGuiro', 74: 'LongGuiro', 75: 'Claves', 76: 'HiWoodBlock',
77: 'LowWoodBlock', 78: 'MuteCuica', 79: 'OpenCuica', 80: 'MuteTriangle', 81: 'OpenTriangle'}
def __init__(self, staff, data):
self.total_len = 0
self.noteset = []
self.roman_numerals = []
self.instructions = {}
self.styles = {}
self.emotions = {}
self.time_signs = {0: staff['timesign']}
self.keys = {0: staff['key']}
self.analysis(copy.deepcopy(data))
def show_noteset(self):
print("==== total notes ====")
for i in self.noteset:
print(i)
def note_midi(self, note):
step = note[0].upper()
midi = self.notes[step]
if note[0].islower():
midi += 12 * note.count("'")
else:
midi -= 12 * note.count(step)
if note.count('-') > 0:
alter = note.count('-')
midi -= alter
elif note.count('#') > 0:
alter = note.count('#')
midi += alter
return midi
def note_len(self, note):
num = 0
dot = 0
# Rest & Notation
m = re.match(r'([a-grA-GR\'#-]+)(\d+)([.]*)', note)
if not m:
return 0
num = int(m.group(2))
dot = m.group(3).count('.')
n1 = self.meter_len / num
curr = n1
for _ in range(dot):
n1 += curr / 2
curr = curr / 2
return n1
def to_note(self, note, offset):
d = {}
d['offset'] = offset
midi = self.note_midi(note)
d['midi'] = midi
if note.count('~') > 0:
d['tie'] = 1
else:
d['tie'] = 0
return d
def is_note(self, note):
m = re.match(r'[a-grA-GR\'#-]+\d+', note)
if not m:
return False
return True
def divide_keyword(self, n, offset):
if n.startswith('!!'):
d = {'offset': offset, 'instruction': n[2:]}
self.instructions[offset] = n[2:]
elif n.startswith('$$'):
self.styles[offset] = n[2:]
elif n.startswith('!'):
d = {'offset': offset, 'roman_numeral': n[1:]}
self.roman_numerals.append(d)
elif n.startswith('*'):
self.emotions[offset] = n[1:]
else:
raise ValueError("Unknown keyword: {}!".format(n))
def to_noteset(self, data):
offset = 0
_len = 0
for n in data:
# chord | trip
if type(n) == list:
if n[0] == 'chord':
_len = self.note_len(n[-1])
for _n in n[1:]:
d = self.to_note(_n, offset)
d['len'] = _len
self.noteset.append(d)
offset += _len
elif n[0] == 'tripchord':
_len = self.note_len(n[-1]) * 2 / 3
for _n in n[1:]:
d = self.to_note(_n, offset)
d['len'] = _len
self.noteset.append(d)
offset += _len
elif n[0] == 'trip':
_len = self.note_len(n[-1]) * 2 / 3
for _n in n[1:]:
if _n[0] != 'r':
d = self.to_note(_n, offset)
d['len'] = _len
self.noteset.append(d)
offset += _len
else:
raise ValueError("Unknown keyword: {}!".format(n[0]))
else:
# skip keywords
if not self.is_note(n):
self.divide_keyword(n, offset)
continue
# skip Rest note
if n[0].upper() == 'R':
_len = self.note_len(n)
offset += _len
continue
d = self.to_note(n, offset)
_len = self.note_len(n)
offset += _len
d['len'] = _len
self.noteset.append(d)
self.total_len = offset
def _tie(self, nset, i):
_len = len(self.noteset)
while i < _len:
_nset = self.noteset[i]
if _nset['midi'] == nset['midi'] and (nset['offset'] + nset['len']) == _nset['offset']:
if _nset['tie'] > 0:
self._tie(_nset, i)
nset['tie'] = 0
nset['len'] += _nset['len']
_nset['drop'] = 1
else:
nset['tie'] = 0
nset['len'] += _nset['len']
_nset['drop'] = 1
break
i += 1
def update_tie(self):
_noteset = []
_noteset_len = len(self.noteset)
i = 0
while i < _noteset_len:
nset = self.noteset[i]
if nset.get('drop'):
i += 1
continue
if nset['tie'] > 0:
self._tie(nset, i)
i += 1
for i in self.noteset:
if i.get('drop'):
continue
_noteset.append(i)
self.noteset = _noteset
def update_roman_numeral(self):
# get the total length of notesets
if not self.total_len > 0:
return
_len = len(self.roman_numerals)
if _len == 0:
return
i = 0
while i < _len:
rn = self.roman_numerals[i]
if rn['roman_numeral'] == 'N':
rn['drop'] = 1
i += 1
continue
if (i + 1) == _len:
rn['len'] = self.total_len - rn['offset']
break
_rn = self.roman_numerals[i + 1]
rn['len'] = _rn['offset'] - rn['offset']
i += 1
# rm dropped set
l = []
for i in self.roman_numerals:
if 'drop' in i:
continue
l.append(i)
self.roman_numerals = l
def analysis(self, data):
raise NotImplementedError
class Analysis(Core):
def __init__(self, staff, data):
super().__init__(staff, data)
def reform_roman_numeral(self):
d = {}
for rn in self.roman_numerals:
d[rn['offset']] = rn
return d
def analysis(self, data):
self.to_noteset(data)
self.update_tie()
self.update_roman_numeral()
def get_result(self):
d = {}
d['noteset'] = self.noteset
d['styles'] = self.styles
d['roman_numerals'] = self.reform_roman_numeral()
d['emotions'] = self.emotions
d['instructions'] = self.instructions
d['total_len'] = self.total_len
d['time_signs'] = self.time_signs
d['keys'] = self.keys
return d
if __name__ == "__main__":
data = ['C4~', ['chord', 'E4~', 'G4~'], [
'chord', 'E4~', 'G4~'], ['chord', 'E4', 'G4']]
data2 = ['C4', ['trip', 'C4', 'E4', 'G4']]
data3 = ['C4~', 'C4', 'E4~', 'E4']
data4 = ['CC8', 'r8', 'DD8', 'CC8', 'CC8', 'r8', 'DD8', 'r8']
data5 = [
'c2', '!up', '!good', 'c4.', 'c8', 'c2', '!happy', 'c2', 'c1~', 'c1', 'G2', 'c4.', 'c8', 'c1', 'G2', 'd4.',
'B8', 'c1', 'G2', 'c4.', 'e8', 'g2', 'e4', 'c4', 'd2', 'c4.', 'd8', 'd1', 'G2', 'c4.',
'c8', 'c1', 'G2', 'd4.', 'B8', 'c1', 'G2', 'c4.', 'e8', 'g2', 'e4', 'c4', 'f2', 'e4.',
'd8', 'c1', 'r1', 'r1', 'r1', 'r1']
data6 = ['!I', 'R1', '!II', 'R1', '!III', '!IV', '!V', '!VI', '!VII']
data7 = ['$$pop', 'r1', '!I', 'r1', '*happy', '!IV',
'!V7', '!i', '!Isus4', '!!ts_44', '!!to_D']
#rym = Rhythm(data)
#bt = Beats(data4)
ml = Melody({}, data7)
# ml.show_noteset()
| 33.804598
| 115
| 0.438286
| 999
| 8,823
| 3.744745
| 0.264264
| 0.028067
| 0.036354
| 0.027265
| 0.262229
| 0.221866
| 0.193264
| 0.139
| 0.109596
| 0.109596
| 0
| 0.050727
| 0.39227
| 8,823
| 260
| 116
| 33.934615
| 0.64696
| 0.017908
| 0
| 0.295652
| 0
| 0
| 0.129752
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069565
| false
| 0
| 0.017391
| 0
| 0.152174
| 0.008696
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11025303e524cbae387748d4c806d2a09276590a
| 6,302
|
py
|
Python
|
tests/server/utils.py
|
csadorf/aiida-optimade
|
99ee1113cfc109a40a83bb43af8d07ce7e1601e6
|
[
"MIT"
] | null | null | null |
tests/server/utils.py
|
csadorf/aiida-optimade
|
99ee1113cfc109a40a83bb43af8d07ce7e1601e6
|
[
"MIT"
] | null | null | null |
tests/server/utils.py
|
csadorf/aiida-optimade
|
99ee1113cfc109a40a83bb43af8d07ce7e1601e6
|
[
"MIT"
] | null | null | null |
# pylint: disable=no-name-in-module,too-many-arguments
import json
import re
import typing
from urllib.parse import urlparse
import warnings
from requests import Response
from fastapi.testclient import TestClient
from pydantic import BaseModel
import pytest
from starlette import testclient
from optimade import __api_version__
from optimade.models import ResponseMeta
class OptimadeTestClient(TestClient):
"""Special OPTIMADE edition of FastAPI's (Starlette's) TestClient
This is needed, since `urllib.parse.urljoin` removes paths from the passed
`base_url`.
So this will prepend any requests with the MAJOR OPTIMADE version path.
"""
def __init__(
self,
app: typing.Union[testclient.ASGI2App, testclient.ASGI3App],
base_url: str = "http://example.org",
raise_server_exceptions: bool = True,
root_path: str = "",
version: str = "",
) -> None:
super(OptimadeTestClient, self).__init__(
app=app,
base_url=base_url,
raise_server_exceptions=raise_server_exceptions,
root_path=root_path,
)
if version:
if not version.startswith("v"):
version = f"/v{version}"
if re.match(r"v[0-9](.[0-9]){0,2}", version) is None:
warnings.warn(
f"Invalid version passed to client: '{version}'. "
f"Will use the default: '/v{__api_version__.split('.')[0]}'"
)
version = f"/v{__api_version__.split('.')[0]}"
self.version = version
def request( # pylint: disable=too-many-locals
self,
method: str,
url: str,
params: testclient.Params = None,
data: testclient.DataType = None,
headers: typing.MutableMapping[str, str] = None,
cookies: testclient.Cookies = None,
files: testclient.FileType = None,
auth: testclient.AuthType = None,
timeout: testclient.TimeOut = None,
allow_redirects: bool = None,
proxies: typing.MutableMapping[str, str] = None,
hooks: typing.Any = None,
stream: bool = None,
verify: typing.Union[bool, str] = None,
cert: typing.Union[str, typing.Tuple[str, str]] = None,
json: typing.Any = None, # pylint: disable=redefined-outer-name
) -> Response:
if (
re.match(r"/?v[0-9](.[0-9]){0,2}/", url) is None
and not urlparse(url).scheme
):
while url.startswith("/"):
url = url[1:]
url = f"{self.version}/{url}"
return super().request(
method=method,
url=url,
params=params,
data=data,
headers=headers,
cookies=cookies,
files=files,
auth=auth,
timeout=timeout,
allow_redirects=allow_redirects,
proxies=proxies,
hooks=hooks,
stream=stream,
verify=verify,
cert=cert,
json=json,
)
class EndpointTests:
"""Base class for common tests of endpoints"""
request_str: str = None
response_cls: BaseModel = None
response = None
json_response = None
@pytest.fixture(autouse=True)
def get_response(self, client):
"""Get response from client"""
self.response = client.get(self.request_str)
self.json_response = self.response.json()
yield
self.response = None
self.json_response = None
@staticmethod
def check_keys(keys: list, response_subset: typing.Iterable):
"""Utility function to help validate dict keys"""
for key in keys:
assert (
key in response_subset
), f"{key} missing from response {response_subset}"
def test_response_okay(self):
"""Make sure the response was successful"""
assert self.response.status_code == 200, (
f"Request to {self.request_str} failed: "
f"{json.dumps(self.json_response, indent=2)}"
)
def test_meta_response(self):
"""General test for `meta` property in response"""
assert "meta" in self.json_response
meta_required_keys = ResponseMeta.schema()["required"]
meta_optional_keys = list(
set(ResponseMeta.schema()["properties"].keys()) - set(meta_required_keys)
)
implemented_optional_keys = ["data_available", "implementation"]
self.check_keys(meta_required_keys, self.json_response["meta"])
self.check_keys(implemented_optional_keys, meta_optional_keys)
self.check_keys(implemented_optional_keys, self.json_response["meta"])
def test_serialize_response(self):
"""General test for response JSON and pydantic model serializability"""
assert self.response_cls is not None, "Response class unset for this endpoint"
self.response_cls(**self.json_response) # pylint: disable=not-callable
def client_factory():
"""Return TestClient for OPTIMADE server"""
from aiida_optimade.main import APP
def inner(
version: str = None, raise_server_exceptions: bool = True
) -> OptimadeTestClient:
if version:
return OptimadeTestClient(
APP,
base_url="http://example.org",
version=version,
raise_server_exceptions=raise_server_exceptions,
)
return OptimadeTestClient(
APP,
base_url="http://example.org",
raise_server_exceptions=raise_server_exceptions,
)
return inner
class NoJsonEndpointTests:
"""A simplified mixin class for tests on non-JSON endpoints."""
request_str: str = None
response_cls: BaseModel = None
response: Response = None
@pytest.fixture(autouse=True)
def get_response(self, client):
"""Get response from client"""
self.response = client.get(self.request_str)
yield
self.response = None
def test_response_okay(self):
"""Make sure the response was successful"""
assert (
self.response.status_code == 200
), f"Request to {self.request_str} failed: {self.response.content}"
| 32.65285
| 86
| 0.60822
| 704
| 6,302
| 5.292614
| 0.261364
| 0.032206
| 0.045089
| 0.020934
| 0.31562
| 0.254697
| 0.214707
| 0.190553
| 0.164788
| 0.164788
| 0
| 0.005367
| 0.290384
| 6,302
| 192
| 87
| 32.822917
| 0.827818
| 0.125674
| 0
| 0.209459
| 0
| 0
| 0.100663
| 0.0265
| 0
| 0
| 0
| 0
| 0.033784
| 1
| 0.074324
| false
| 0.006757
| 0.087838
| 0
| 0.256757
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11026c0c5eee347310533201a00163d72346ee00
| 3,673
|
py
|
Python
|
super_topic/main.py
|
susmote/WeiboTools
|
659232b4525bcbedf350da1127d382ff6c6e9e71
|
[
"MIT"
] | 3
|
2018-11-11T22:07:23.000Z
|
2019-03-08T08:20:31.000Z
|
super_topic/main.py
|
susmote/WeiboTools
|
659232b4525bcbedf350da1127d382ff6c6e9e71
|
[
"MIT"
] | null | null | null |
super_topic/main.py
|
susmote/WeiboTools
|
659232b4525bcbedf350da1127d382ff6c6e9e71
|
[
"MIT"
] | 1
|
2021-08-31T06:44:54.000Z
|
2021-08-31T06:44:54.000Z
|
# -*- coding: utf-8 -*-
"""
Created on 2018/11/5
@author: susmote
"""
import time
import requests
import json
# 查看自己关注的超话
if __name__ == '__main__':
username = input("请输入用户名: ")
password = input("请输入密码: ")
login_url = "https://passport.weibo.cn/sso/login"
headers = {
"Referer": "https://passport.weibo.cn/signin/login?entry=mweibo&res=wel&wm=3349&r=https%3A%2F%2Fm.weibo.cn%2F"
}
session = requests.session()
login_post_data = {
"username": username,
"password": password,
"savestate": "1",
"r": "https://m.weibo.cn/",
"ec": "0",
"pagerefer": "https://m.weibo.cn/login?backURL=https%253A%252F%252Fm.weibo.cn%252F",
"entry": "mweibo",
"wentry": "",
"loginfrom": "",
"client_id": "",
"code": "",
"qq": "",
"mainpageflag": "1",
"hff": "",
"hfp": ""
}
login_page_res = session.post(login_url, data=login_post_data, headers=headers)
login_page_res_json = json.loads(login_page_res.text)
judge_login_res = session.get("https://m.weibo.cn/api/config").text
judge_login_res_json = json.loads(judge_login_res)
cookie_str = ''
if judge_login_res_json["data"]["login"] == True:
print(1, "自动登录成功")
for key in list(session.cookies.get_dict().keys()): # 遍历字典
cookie_str += (key + '=' + session.cookies.get_dict()[key] + ';') # 将键值对拿出来拼接一下
else:
if login_page_res_json["msg"] == "用户名或密码错误":
print("用户名或密码错误")
exit()
else:
print(login_page_res_json)
print("不能直接登录,需要进行手势验证码验证")
exit()
followtopic_list = []
url = "https://m.weibo.cn/api/container/getIndex?containerid=100803_-_followsuper"
session = requests.session()
headers = {
"Host": "m.weibo.cn",
"Referer": "https://m.weibo.cn",
"Cookie": cookie_str
}
followtopic_res = session.get(url, headers=headers)
followtopic_res_json = json.loads(followtopic_res.text)
for i in range(0, len(followtopic_res_json["data"]["cards"][0]["card_group"])):
if followtopic_res_json["data"]["cards"][0]["card_group"][i]["card_type"] == "8":
followtopic_list.append(followtopic_res_json["data"]["cards"][0]["card_group"][i])
if followtopic_res_json["data"]["cardlistInfo"]["since_id"] != "":
followtopic_url = url+"&since_id="+ followtopic_res_json["data"]["cardlistInfo"]["since_id"]
res = session.get(followtopic_url, headers=headers)
res_json = json.loads(res.text)
for i in range(0, len(res_json["data"]["cards"][0]["card_group"])-1):
if res_json["data"]["cards"][0]["card_group"][i]["card_type"] == "8":
followtopic_list.append(res_json["data"]["cards"][0]["card_group"][i])
for i in range(0, len(followtopic_list)):
print(followtopic_list[i]["title_sub"])
st_url = "https://m.weibo.cn/api/config"
login_data = session.get(st_url, headers=headers).text
login_data_json = json.loads(login_data)["data"]
postdata = {
"st": login_data_json["st"]
}
if followtopic_list[i]["buttons"][0]["scheme"] == False:
continue
else:
checkin_url = "https://m.weibo.cn"+str(followtopic_list[i]["buttons"][0]["scheme"])
print(checkin_url)
res = session.post(checkin_url, data=postdata, headers=headers)
res_json = json.loads(res.text)
if res_json["ok"] == 1:
print("签到成功 "+res_json["data"]["msg"])
else:
print("签到失败 "+res_json)
| 37.865979
| 118
| 0.58263
| 453
| 3,673
| 4.505519
| 0.267108
| 0.065164
| 0.053895
| 0.044586
| 0.302793
| 0.292994
| 0.231259
| 0.165605
| 0.079863
| 0.055855
| 0
| 0.018949
| 0.238497
| 3,673
| 97
| 119
| 37.865979
| 0.710762
| 0.024231
| 0
| 0.142857
| 0
| 0.02381
| 0.238109
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.047619
| 0.035714
| 0
| 0.035714
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11028d4ec017320409e77b44e5459cd4e2c1cd81
| 1,163
|
py
|
Python
|
websupportsk_ddns/notifiers.py
|
JozefGalbicka/websupportsk-ddns
|
8fe1408121dc5f14f42e6603d9a50bcaa5afabee
|
[
"MIT"
] | 2
|
2021-07-28T09:09:58.000Z
|
2021-07-28T10:28:45.000Z
|
websupportsk_ddns/notifiers.py
|
JozefGalbicka/websupportsk-ddns
|
8fe1408121dc5f14f42e6603d9a50bcaa5afabee
|
[
"MIT"
] | 1
|
2021-11-14T11:31:38.000Z
|
2021-11-19T22:38:44.000Z
|
websupportsk_ddns/notifiers.py
|
JozefGalbicka/websupportsk-ddns
|
8fe1408121dc5f14f42e6603d9a50bcaa5afabee
|
[
"MIT"
] | null | null | null |
import requests
import logging
logger = logging.getLogger(__name__)
def send_notifications(notifiers, message):
for notifier in notifiers:
notifier.send_notification(message)
class Pushover:
def __init__(self, api_token, user_key):
self.api_token = api_token
self.user_key = user_key
self.url = "https://api.pushover.net/1/messages.json"
def send_notification(self, text):
r = requests.post(self.url, data={
"token": self.api_token,
"user": self.user_key,
"message": text
})
logger.debug(f"Pushover notification response: {r.text}")
if "errors" in r.text:
logger.error(f"Pushover error occured: {r.text}")
class Gotify:
def __init__(self, url, api_token):
self.api_token = api_token
self.url = f"http://{url}/message?token={api_token}"
def send_notification(self, text):
r = requests.post(self.url, data={
"message": text
})
logger.debug(f"Gotify notification response: {r.text}")
if "error" in r.text:
logger.error(f"Gotify error occured: {r.text}")
| 29.075
| 65
| 0.618229
| 147
| 1,163
| 4.70068
| 0.285714
| 0.092619
| 0.069465
| 0.04631
| 0.416787
| 0.272069
| 0.147612
| 0.147612
| 0.147612
| 0.147612
| 0
| 0.001161
| 0.259673
| 1,163
| 40
| 66
| 29.075
| 0.801394
| 0
| 0
| 0.322581
| 0
| 0
| 0.216495
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16129
| false
| 0
| 0.064516
| 0
| 0.290323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11070c63ba36e05b385352144090c398a2ed7415
| 15,806
|
py
|
Python
|
code/plotting/plot_lsst.py
|
modichirag/21cm_cleaning
|
1615fea4e2d617bb6ef00770a49698901227daa8
|
[
"MIT"
] | 1
|
2019-08-27T10:05:41.000Z
|
2019-08-27T10:05:41.000Z
|
code/plotting/plot_lsst.py
|
modichirag/21cm_cleaning
|
1615fea4e2d617bb6ef00770a49698901227daa8
|
[
"MIT"
] | null | null | null |
code/plotting/plot_lsst.py
|
modichirag/21cm_cleaning
|
1615fea4e2d617bb6ef00770a49698901227daa8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Plots the power spectra and Fourier-space biases for the HI.
#
import warnings
from mpi4py import MPI
rank = MPI.COMM_WORLD.rank
#warnings.filterwarnings("ignore")
if rank!=0: warnings.filterwarnings("ignore")
import numpy as np
import os, sys
import matplotlib.pyplot as plt
from pmesh.pm import ParticleMesh
from scipy.interpolate import InterpolatedUnivariateSpline as ius
from nbodykit.lab import BigFileMesh, BigFileCatalog, FFTPower
from nbodykit.cosmology import Planck15, EHPower, Cosmology
sys.path.append('../utils/')
sys.path.append('../recon/')
sys.path.append('../recon/cosmo4d/')
from cosmo4d.pmeshengine import nyquist_mask
from lab import mapbias as mapp
from lab import mapnoise as mapn
from lab import report as rp
from lab import dg
from getbiasparams import getbias
import tools
#
from matplotlib import rc, rcParams, font_manager
rcParams['font.family'] = 'serif'
fsize = 12-1
fontmanage = font_manager.FontProperties(family='serif', style='normal',
size=fsize, weight='normal', stretch='normal')
font = {'family': fontmanage.get_family()[0],
'style': fontmanage.get_style(),
'weight': fontmanage.get_weight(),
'size': fontmanage.get_size(),
}
#
import argparse
parser = argparse.ArgumentParser()
#parser.add_argument('-m', '--model', help='model name to use')
parser.add_argument('-a', '--aa', help='scale factor', default=0.5000, type=float)
parser.add_argument('-l', '--bs', help='boxsize', default=1024, type=float)
parser.add_argument('-n', '--nmesh', help='nmesh', default=256, type=int)
parser.add_argument('-t', '--angle', help='angle of the wedge', default=50, type=float)
parser.add_argument('-k', '--kmin', help='kmin of the wedge', default=0.03, type=float)
parser.add_argument( '--pp', help='upsample', default=1)
args = parser.parse_args()
figpath = './figs/'
dpath = '../../data/'
bs, nc, aa = args.bs, args.nmesh, args.aa
nc2 = nc*2
zz = 1/aa- 1
kmin = args.kmin
ang = args.angle
if args.pp: pm = ParticleMesh(BoxSize=bs, Nmesh=[nc2, nc2, nc2])
else: pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc])
rank = pm.comm.rank
##
def save2dphoto(Nmu=4, numd=1e-2, aa=None, scatter=False):
if numd > 1e-2:
print('Too high number density')
sys.exit()
num = int(numd*bs**3)
if aa is None: aas = [0.3333, 0.2000, 0.1429]
else: aas = [aa]
for ia, aa in enumerate(aas):
zz = 1/aa-1
sigz = lambda z : 120*((1+z)/5)**-0.5
##
cat = BigFileCatalog('/global/cscratch1/sd/chmodi/m3127/H1mass/highres/10240-9100/fastpm_%0.4f/Hcat-Numd-%04d/'%(aa, 1e-2*1e4))
if scatter:
pos = cat['Position'][:num].compute()
dz = np.random.normal(0, sigz(zz), size=pos[:, -1].size)
pos[:, -1] += dz
layout = pm.decompose(pos)
hmesh = pm.paint(pos, layout=layout)
else:
pos = cat['Position'][:num].compute()
layout = pm.decompose(pos)
hmesh = pm.paint(pos, layout=layout)
def tf(k): #Photoz smoothing
kmesh = sum(ki ** 2 for ki in k)**0.5
kmesh[kmesh == 0] = 1
mumesh = k[2]/kmesh
weights = np.exp(-kmesh**2 * mumesh**2 * sigz(zz)**2/2.)
return weights
hmesh /= hmesh.cmean()
if not scatter:
hmeshc = hmesh.r2c()
hmeshc.apply(lambda k, v: nyquist_mask(tf(k), v) * v, out=Ellipsis)
hmesh = hmeshc.c2r()
ph = FFTPower(hmesh, mode='2d', Nmu=Nmu).power
#
for iw, wopt in enumerate(['opt', 'pess']):
#for iw, wopt in enumerate(['opt']):
for it, thopt in enumerate(['opt', 'pess', 'reas']):
#for it, thopt in enumerate([ 'reas']):
if rank == 0: print(aa, wopt, thopt)
angle = np.round(mapn.wedge(zz, att=wopt, angle=True), 0)
#dpath = '/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/recon/fastpm_%0.4f/wedge_kmin%0.2f_ang%0.1f/'%(aa, 0.03, angle)
dpath = '/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/recon/fastpm_%0.4f/wedge_kmin%0.2f_%s/'%(aa, 0.03, wopt)
dpath += 'L%04d-N%04d-R//thermal-%s-hex/ZA/opt_s999_h1massA_fourier_rsdpos/'%(bs, nc, thopt)
if scatter: ofolder = '../../data/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/photog-Numd%04d-Nmu%d/'%(wopt, thopt, numd*1e4, Nmu)
else: ofolder = '../../data/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/photo-Numd%04d-Nmu%d/'%(wopt, thopt, numd*1e4, Nmu)
try: os.makedirs(ofolder)
except: pass
if rank == 0: print(ofolder)
if args.pp:
datapp = BigFileMesh(dpath+'/dataw_up/', 'mapp').paint()
bpaths = [dpath+'upsample2/%d-0.00//best-fit'%nc2] + [dpath + 'upsample2/%d-0.00//%04d/fit_p/'%(nc2,i) for i in range(100, 50, -20)]
else:
datapp = BigFileMesh(dpath+'/dataw/', 'mapp').paint()
bpaths = [dpath+'%d-0.00//best-fit'%nc] + [dpath + '%d-0.00//%04d/fit_p/'%(nc,i) for i in range(100, 50, -20)]
for path in bpaths:
if os.path.isdir(path):
break
if rank == 0: print(path)
bfit = BigFileMesh(path, 'mapp').paint()
pxrh = FFTPower(hmesh, second=bfit, mode='2d', Nmu=Nmu).power
pxwh = FFTPower(hmesh, second=datapp, mode='2d', Nmu=Nmu).power
fname = ofolder + 'photo-L%04d_%0.4f.txt'%(bs, aa)
if args.pp : fname = fname[:-4] + '-up.txt'
np.savetxt(fname, ph['power'].real)
fname = ofolder + 'xdataw-L%04d_%0.4f.txt'%(bs, aa)
if args.pp : fname = fname[:-4] + '-up.txt'
np.savetxt(fname, pxwh['power'].real)
fname = ofolder + 'xrecon-L%04d_%0.4f.txt'%(bs, aa)
if args.pp : fname = fname[:-4] + '-up.txt'
np.savetxt(fname, pxrh['power'].real)
def make_plot(Nmu=4, wopt='opt', thopt='reas'):
sigz = lambda z : 120*((1+z)/5)**-0.5
nbar = 10**-2.5
b = 3.2
Dphoto = lambda k, mu, z: np.exp(-k**2 * mu**2 * sigz(z)**2/2.)
kk = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-opt/thermal-reas-hex/Nmu%d/recon-L%04d_%0.4f-up-k.txt'%(Nmu, bs, aa))
try: modes = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-opt/thermal-reas-hex/Nmu%d/recon-L%04d_%0.4f-up-modes.txt'%(Nmu, bs, aa))
except:
datap = mapp.Observable.load('/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/recon/fastpm_%0.4f/wedge_kmin0.03_opt/L%04d-N0256-R/thermal-reas-hex/ZA/opt_s999_h1massA_fourier_rsdpos/datap_up/'%(aa, bs))
tmp = FFTPower(datap.mapp, mode='2d', Nmu=Nmu).power
modes = tmp['modes'].astype('float64')
np.savetxt(dpath + '/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/Nmu%d/recon-L%04d_%0.4f-up-modes.txt'%(wopt, thopt, Nmu, bs, aa), modes)
pm1 = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/Nmu%d/recon-L%04d_%0.4f-up-pm1.txt'%(wopt, thopt, Nmu, bs, aa))
pm2 = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/Nmu%d/recon-L%04d_%0.4f-up-pm2.txt'%(wopt, thopt, Nmu, bs, aa))
xm = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/Nmu%d/recon-L%04d_%0.4f-up-xm.txt'%(wopt, thopt, Nmu, bs, aa))
xmw = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/Nmu%d/dataw-L%04d_%0.4f-up-xm.txt'%(wopt, thopt, Nmu, bs, aa))
pm1w = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/Nmu%d/dataw-L%04d_%0.4f-up-pm1.txt'%(wopt, thopt, Nmu, bs, aa))
mubins = np.linspace(0, 1, kk.shape[1]+1)
mu = (mubins[1:] + mubins[:-1])*0.5
pkd = np.loadtxt(dpath + '/pklin_%0.4f.txt'%aa)
# pk = np.loadtxt(dpath + '/pklin_1.0000.txt')
ipkd = ius(pkd[:, 0], pkd[:, 1])
rr = xm/(pm1*pm2)**0.5
rrw = xmw/(pm1w*pm2)**0.5
pkd = ipkd(kk)
fac = b**2*Dphoto(kk, mu, zz)**2 *nbar*pkd
rhosq = rr**2*fac/(1+fac)
rhosqw = rrw**2*fac/(1+fac)
fig, ax = plt.subplots(1, 2, figsize=(10, 4))
for i in range(mu.size):
lbl1, lbl2 = None, None
if i < mu.size//2: lbl1 = '$\mu$=%0.3f-%0.3f'%(mubins[i], mubins[i+1])
else: lbl2 = '$\mu$=%0.3f-%0.3f'%(mubins[i], mubins[i+1])
ax[0].plot(kk[:, i], rhosq[:, i], 'C%d'%i, label=lbl1, lw=2)
ax[1].plot(kk[:, i], modes[:, i]**-1*(1+rhosq[:, i]**-1), 'C%d'%i, label=lbl2, lw=2)
ax[0].plot(kk[:, i], rhosqw[:, i], 'C%d--'%i, alpha=0.5)
ax[1].plot(kk[:, i], modes[:, i]**-1*(1+rhosqw[:, i]**-1), 'C%d--'%i, alpha=0.5)
ax[0].plot(kk[:, 0], Dphoto(kk[:, 0], mu[i], zz)**2, 'C%d'%i, lw=1, alpha=1, ls=":")
ax[1].set_ylim(1e-3, 100)
ax[1].set_yscale('log')
ax[1].axhline(1, color='k', ls="--")
ax[0].set_ylabel(r'$\rho^2$', fontdict=font)
#ax[1].set_ylabel(r'$N^{-1}(1+\rho^{-2})$', fontsize=14)
ax[1].set_ylabel(r'Var$(P_\times)/P_\times^2$', fontdict=font)
ax[0].legend(prop=fontmanage, loc=1)
ax[1].legend(prop=fontmanage, loc=4)
for axis in ax[:]: axis.set_xlabel(r'$k\quad [h\,{\rm Mpc}^{-1}]$', fontdict=font)
for axis in ax.flatten():
#axis.axhline(1, color='k', ls=':')
axis.set_xscale('log')
axis.grid(which='both', lw=0.2, alpha=0.2, color='gray')
# Put on some more labels.
for axis in ax.flatten():
for tick in axis.xaxis.get_major_ticks():
tick.label.set_fontproperties(fontmanage)
for tick in axis.yaxis.get_major_ticks():
tick.label.set_fontproperties(fontmanage)
##and finish
plt.tight_layout(rect=[0, 0, 1, 0.95])
if rank == 0 and not args.pp: plt.savefig(figpath + '/photo_z%d_L%04d-Nmu%d.pdf'%(zz*10, bs, Nmu))
if rank == 0 and args.pp: plt.savefig(figpath + '/photo_z%d_L%04d-Nmu%d-up.pdf'%(zz*10, bs, Nmu))
def make_plot_data(aa, numd, Nmu=8, wopt='opt', thopt='reas', scatter=False):
#
mubins = np.linspace(0, 1, Nmu+1)
mu = (mubins[1:] + mubins[:-1])*0.5
kk = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-opt/thermal-reas-hex/Nmu%d/recon-L%04d_%0.4f-up-k.txt'%(Nmu, bs, aa))
modes = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-opt/thermal-reas-hex/Nmu%d/recon-L%04d_%0.4f-up-modes.txt'%(Nmu, bs, aa))
pr = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/Nmu%d/recon-L%04d_%0.4f-up-pm1.txt'%(wopt, thopt, Nmu, bs, aa))
pw = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/Nmu%d/dataw-L%04d_%0.4f-up-pm1.txt'%(wopt, thopt, Nmu, bs, aa))
pm1 = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/Nmu%d/recon-L%04d_%0.4f-up-pm1.txt'%(wopt, thopt, Nmu, bs, aa))
pm2 = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/Nmu%d/recon-L%04d_%0.4f-up-pm2.txt'%(wopt, thopt, Nmu, bs, aa))
xm = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/Nmu%d/recon-L%04d_%0.4f-up-xm.txt'%(wopt, thopt, Nmu, bs, aa))
rr = xm/(pm1*pm2)**0.5
pkd = np.loadtxt(dpath + '/pklin_%0.4f.txt'%aa)
ipkd = ius(pkd[:, 0], pkd[:, 1])
pkd = ipkd(kk)
if scatter : ofolder = '../../data/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/photog-Numd%04d-Nmu%d/'%(wopt, thopt, numd*1e4, Nmu)
else: ofolder = '../../data/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/photo-Numd%04d-Nmu%d/'%(wopt, thopt, numd*1e4, Nmu)
print(ofolder)
#get data
fname = ofolder + 'photo-L%04d_%0.4f.txt'%(bs, aa)
if args.pp : fname = fname[:-4] + '-up.txt'
ph = np.loadtxt(fname)
ph += 1/numd
fname = ofolder + 'xrecon-L%04d_%0.4f.txt'%(bs, aa)
if args.pp : fname = fname[:-4] + '-up.txt'
pxrh = np.loadtxt(fname)
fname = ofolder + 'xdataw-L%04d_%0.4f.txt'%(bs, aa)
if args.pp : fname = fname[:-4] + '-up.txt'
pxwh = np.loadtxt(fname)
rhosq = pxrh**2/ph/pr
rhosqw = pxwh**2/ph/pw
#get theory
sigz = lambda z : 120*((1+z)/5)**-0.5
Dphoto = lambda k, mu, z: np.exp(-k**2 * mu**2 * sigz(z)**2/2.)
nbar = 10**-2.5
b = 3.2
def iget(ii, k=1):
yy = rr[ii]
mask = ~np.isnan(yy)
return ius(mu[mask], yy[mask], k=k)
mus = np.linspace(0, 1, 500)
rhosqmu = np.zeros((kk.shape[0], mus.size))
for ik, kv in enumerate(kk[:, -1]):
fac = b**2*Dphoto(kv, mus, zz)**2 *nbar*ipkd(kv)
try: rhosqmu[ik] = iget(ik)(mus)**2*fac/(1+fac)
except Exception as e: print(ik, e)
rhosqav = np.zeros((kk.shape[0], mu.size))
for i in range(mu.size):
mask = (mus > mubins[i]) & (mus < mubins[i+1])
rhosqav[: ,i] = np.trapz(rhosqmu[:, mask], mus[mask])/(mubins[i+1]-mubins[i])
#make figure
fig, ax = plt.subplots(1, 2, figsize=(10, 4))
for i in range(mu.size):
lbl1, lbl2 = None, None
if i <= mu.size: lbl1 = '$\mu$=%0.3f-%0.3f'%(mubins[i], mubins[i+1])
#else: lbl2 = '$\mu$=%0.3f-%0.3f'%(mubins[i], mubins[i+1])
if i ==0: lbl2 = r'Recon$_{\rm Sim}$'
ax[0].plot(kk[:, i], rhosq[:, i], 'C%d'%i, label=lbl1)
ax[1].plot(kk[:, i], modes[:, i]**-1*(1+rhosq[:, i]**-1), 'C%d'%i, label=lbl2)
#ax[0].plot(kk[:, i], rhosqw[:, i], 'C%d--'%i, alpha=0.4)
if i ==0: lbl2 = r'Noisy$_{\rm Sim}$'
ax[1].plot(kk[:, i], modes[:, i]**-1*(1+rhosqw[:, i]**-1), 'C%d:'%i, alpha=1, lw=0.5, label=lbl2)
ax[0].plot(kk[:, i], rhosqav[:, i], 'C%d--'%i, alpha=1, lw=1)
if i ==0: lbl2 = r'Recon$_{\rm Pred}$'
ax[1].plot(kk[:, i], modes[:, i]**-1*(1+rhosqav[:, i]**-1), 'C%d--'%i, alpha=1, lw=1, label=lbl2)
if i ==0: lbl0 = r'$D_{\rm photo}^2$'
else: lbl0 = None
ax[0].plot(kk[:, 0], Dphoto(kk[:, 0], mu[i], zz)**2, 'C%d'%i, lw=0.5, alpha=1, ls=":", label=lbl0)
#
ax[0].set_ylim(-.05, 1.1)
ax[1].set_ylim(9e-4, 100)
ax[1].set_yscale('log')
ax[1].axhline(1, color='k', ls="--")
ax[0].set_ylabel(r'$\rho^2$', fontdict=font)
#ax[1].set_ylabel(r'$N^{-1}(1+\rho^{-2})$', fontsize=14)
ax[1].set_ylabel(r'Var$(P_\times)/P_\times^2$', fontdict=font)
ax[0].legend(prop=fontmanage, loc=1, ncol=1)
ax[1].legend(prop=fontmanage, loc=3, ncol=1)
for axis in ax[:]: axis.set_xlabel(r'$k\quad [h\,{\rm Mpc}^{-1}]$', fontdict=font)
for axis in ax.flatten():
#axis.axhline(1, color='k', ls=':')
axis.set_xscale('log')
axis.grid(which='both', lw=0.2, alpha=0.2, color='gray')
# Put on some more labels.
for axis in ax.flatten():
for tick in axis.xaxis.get_major_ticks():
tick.label.set_fontproperties(fontmanage)
for tick in axis.yaxis.get_major_ticks():
tick.label.set_fontproperties(fontmanage)
# and finish
plt.tight_layout(rect=[0, 0, 1, 0.95])
if rank == 0 and not args.pp: plt.savefig(figpath + '/photod_z%d_L%04d-Nmu%d.pdf'%(zz*10, bs, Nmu))
if rank == 0 and args.pp:
if scatter : plt.savefig(figpath + '/photodg_z%d_L%04d-Nmu%d-up.pdf'%(zz*10, bs, Nmu))
else : plt.savefig(figpath + '/photod_z%d_L%04d-Nmu%d-up.pdf'%(zz*10, bs, Nmu))
################
if __name__=="__main__":
#save2dphoto(Nmu=4, numd=10**-2.5, aa=0.2000)
#save2dphoto(Nmu=8, numd=10**-2.5, aa=0.2000)
#save2dphoto(Nmu=4, numd=10**-2.5, aa=0.2000, scatter=True)
#save2dphoto(Nmu=8, numd=10**-2.5, aa=0.2000, scatter=True)
#make_plot(Nmu=4)
#make_plot(Nmu=8)
make_plot_data(aa=0.2000, numd=10**-2.5, Nmu=8)
make_plot_data(aa=0.2000, numd=10**-2.5, Nmu=8, scatter=True)
make_plot_data(aa=0.2000, numd=10**-2.5, Nmu=4)
make_plot_data(aa=0.2000, numd=10**-2.5, Nmu=4, scatter=True)
#
| 41.704485
| 210
| 0.575161
| 2,619
| 15,806
| 3.412753
| 0.148148
| 0.012531
| 0.011748
| 0.016447
| 0.624636
| 0.571828
| 0.554486
| 0.535131
| 0.521369
| 0.510517
| 0
| 0.06259
| 0.211565
| 15,806
| 378
| 211
| 41.814815
| 0.65463
| 0.067696
| 0
| 0.337165
| 0
| 0.084291
| 0.208203
| 0.156901
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019157
| false
| 0.003831
| 0.068966
| 0
| 0.095785
| 0.022989
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1107964a13a8c587e9dedd0f0fb6a2581ecb0887
| 3,999
|
py
|
Python
|
ndfinance/strategies/basic/__init__.py
|
gomtinQQ/NDFinance
|
522bf0486e5f5337c522d0e34b088f386c7c3290
|
[
"MIT"
] | 35
|
2020-09-26T16:31:45.000Z
|
2022-01-01T12:12:21.000Z
|
ndfinance/strategies/basic/__init__.py
|
gomtinQQ/NDFinance
|
522bf0486e5f5337c522d0e34b088f386c7c3290
|
[
"MIT"
] | 1
|
2020-09-27T08:54:23.000Z
|
2020-09-27T08:54:23.000Z
|
ndfinance/strategies/basic/__init__.py
|
gomtinQQ/NDFinance
|
522bf0486e5f5337c522d0e34b088f386c7c3290
|
[
"MIT"
] | 8
|
2020-10-06T23:51:22.000Z
|
2022-02-16T12:11:10.000Z
|
from ndfinance.strategies import Strategy, PeriodicRebalancingStrategy
from ndfinance.brokers.base import order
from ndfinance.brokers.base.order import *
from ndfinance.strategies.utils import *
class SameWeightBuyHold(Strategy):
def __init__(self):
super(SameWeightBuyHold, self).__init__()
self.ordered = False
def _logic(self):
if not self.ordered:
weight = 1 / len(self.broker.assets)
[self.broker.order(order.Weight(asset, self.broker.portfolio.portfolio_value, 1, weight))
for asset in self.broker.assets.values()]
self.ordered = True
class SameWeightBuynRebalance(PeriodicRebalancingStrategy):
def __init__(self, rebalance_period):
super(SameWeightBuynRebalance, self).__init__(rebalance_period)
def register_engine(self, *args, **kwargs):
super(SameWeightBuynRebalance, self).register_engine(*args, **kwargs)
weight = 1 / len(self.broker.assets.keys())
self.weights = [weight for _ in self.broker.assets.keys()]
return self
def _logic(self):
self.broker.order(order.Rebalance(tickers=self.broker.assets.keys(), weights=self.weights))
class OscillatorStrategy(Strategy):
def __init__(self, breakout_threshold, oversold_threshold, overbought_threshold, osc_label,
use_short=False, use_time_cut=False, timecut_params=None, use_n_perc_rule=False, n_perc_params=None,
use_stop_loss=False, stop_loss_params=None, *args, **kwargs):
super(OscillatorStrategy, self).__init__()
self.use_short = use_short
self.breakout_threshold = breakout_threshold
self.oversold_threshold = oversold_threshold
self.overbought_threshold = overbought_threshold
self.osc_label = osc_label
self.use_time_cut = use_time_cut
self.timecut_params = timecut_params
self.use_n_perc_rule = use_n_perc_rule
self.n_perc_params = n_perc_params
self.use_stop_loss = use_stop_loss
self.stop_loss_params = stop_loss_params
def register_engine(self, *args, **kwargs):
super(OscillatorStrategy, self).register_engine(*args, **kwargs)
self.ticker = list(self.broker.assets.keys())[0]
return self
def _logic(self):
indicator_ = self.data_provider.get_ohlcvt(self.ticker, self.osc_label, n=2)
indicator = indicator_[-1]
indicator_prev = indicator_[0]
if not self.broker.portfolio.positions:
ordered = True
value = apply_n_percent_rule(self.broker.portfolio.portfolio_value, **self.n_perc_params) \
if self.use_n_perc_rule else self.broker.portfolio.portfolio_value
if (indicator >= self.breakout_threshold) & (indicator_prev < self.breakout_threshold):
self.broker.order(Weight(self.broker.assets[self.ticker], value, 1, 1))
elif (((indicator <= self.breakout_threshold) & (indicator_prev > self.breakout_threshold)) & self.use_short):
self.broker.order(Weight(self.broker.assets[self.ticker], value, -1, 1))
else:
ordered = False
if ordered & self.use_time_cut:
self.broker.order(TimeCutClose(self.broker.assets[self.ticker], self.indexer.timestamp, **self.timecut_params))
if ordered & self.use_stop_loss:
self.broker.order(StopLoss(self.broker.assets[self.ticker], **self.stop_loss_params))
elif self.broker.portfolio.positions[self.ticker].side == 1:
if (indicator <= self.overbought_threshold) & (indicator_prev > self.overbought_threshold):
self.broker.order(Close(self.broker.assets[self.ticker]))
elif self.broker.portfolio.positions[self.ticker].side == -1:
if (indicator >= self.oversold_threshold) & (indicator_prev < self.oversold_threshold):
self.broker.order(Close(self.broker.assets[self.ticker]))
| 44.932584
| 127
| 0.68017
| 473
| 3,999
| 5.490486
| 0.17759
| 0.100116
| 0.073931
| 0.053908
| 0.363496
| 0.253369
| 0.210243
| 0.182518
| 0.182518
| 0.182518
| 0
| 0.004147
| 0.216054
| 3,999
| 88
| 128
| 45.443182
| 0.824242
| 0
| 0
| 0.132353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.058824
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
110af0aa9cc468fbee2f90b29540e3ee61251308
| 1,975
|
py
|
Python
|
daemon.py
|
hletrd/TRIPOL_polarizer
|
124d202bf876635bd402306fb5d7572fd45ce599
|
[
"MIT"
] | null | null | null |
daemon.py
|
hletrd/TRIPOL_polarizer
|
124d202bf876635bd402306fb5d7572fd45ce599
|
[
"MIT"
] | null | null | null |
daemon.py
|
hletrd/TRIPOL_polarizer
|
124d202bf876635bd402306fb5d7572fd45ce599
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, send_from_directory
import serial
import serial.tools.list_ports
import threading
app = Flask(__name__)
def run_server():
app.run(host=bind_ip, debug=True, port=bind_port)
@app.route('/')
def index():
return render_template('_basic.html', ports=serialhandler.get_port_list())
@app.route('/get/angle/now')
def get_angle():
return str(serialhandler.angle_now)
@app.route('/get/angle/to')
def get_angle_to():
return str(serialhandler.angle_to)
@app.route('/open/<path:port>')
def open_serial(port):
serialhandler.connect(port[1:])
return '1'
@app.route('/move/<string:angle>')
def move_angle(angle):
if (360 >= float(angle) >= 0):
serialhandler.move_angle(str(float(angle)))
return '1'
return '0'
@app.route('/static/<path:path>')
def send_static(path):
return send_from_directory('static', path, as_attachment=False)
class SerialHandler(object):
def __init__(self):
self.Serial = serial.Serial()
self.Serial.baudrate = 115200
self.Serial.timeout = 0.1
self.angle_now = 0.0
self.angle_to = '0.0'
self.q = ''
def get_port_list(self):
result = serial.tools.list_ports.comports()
return result
def connect(self, port):
self.Serial.port = port
self.Serial.open()
threading.Timer(0.2, self.read_serial).start()
def move_angle(self, angle):
self.Serial.write(angle.encode('utf-8'))
self.angle_to = angle
def read_serial(self):
threading.Timer(0.2, self.read_serial).start()
try:
while self.Serial.in_wating > 0:
self.q += self.Serial.read().decode('utf-8')
except:
while self.Serial.inWaiting() > 0:
self.q += self.Serial.read(1).decode('utf-8')
splitted = self.q.split('\n\n')
last = splitted[len(splitted)-1]
if 'angpos:' in last and 'speed:' in last:
self.q = ''
self.angle_now = (float) (last.split('angpos:')[1].split('\n')[0])
if __name__ == '__main__':
bind_ip = '127.0.0.1'
bind_port = 8000
serialhandler = SerialHandler()
run_server()
| 23.511905
| 75
| 0.698228
| 301
| 1,975
| 4.401993
| 0.27907
| 0.075472
| 0.013585
| 0.030189
| 0.083019
| 0.083019
| 0.05283
| 0.05283
| 0
| 0
| 0
| 0.025088
| 0.132152
| 1,975
| 84
| 76
| 23.511905
| 0.747958
| 0
| 0
| 0.092308
| 0
| 0
| 0.083502
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.184615
| false
| 0
| 0.061538
| 0.061538
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
110ec99e58e5ce9d328a5556af8ee117cc5ebd9a
| 3,304
|
py
|
Python
|
src/utils.py
|
senadkurtisi/neural-style-transfer
|
0048d8b184959de095f0821f63205c8ce3ff2dff
|
[
"MIT"
] | null | null | null |
src/utils.py
|
senadkurtisi/neural-style-transfer
|
0048d8b184959de095f0821f63205c8ce3ff2dff
|
[
"MIT"
] | null | null | null |
src/utils.py
|
senadkurtisi/neural-style-transfer
|
0048d8b184959de095f0821f63205c8ce3ff2dff
|
[
"MIT"
] | null | null | null |
from PIL import Image
import numpy as np
import torch
import torchvision.transforms.transforms as transforms
import os
from config import cfg
def preprocess_img(img_path):
""" Loads the desired image and prepares it
for VGG19 model.
Parameters:
img_path: path to the image
Returns:
processed: loaded image after preprocessing
"""
prep = transforms.Compose([transforms.Resize((cfg.IMG_SIZE, cfg.IMG_SIZE)),
transforms.ToTensor(),
transforms.Lambda(lambda x: x[torch.LongTensor([2, 1, 0])]),
transforms.Normalize(mean=[0.40760392, 0.45795686, 0.48501961],
std=[1, 1, 1]),
transforms.Lambda(lambda x: x.mul_(255)),
])
img = Image.open(img_path)
processed = prep(img)
if cfg.cuda:
processed = processed.cuda()
return processed.unsqueeze(0)
def get_init_img(mode='noise', source_img=None):
""" Constructs the initial image for the NST algorithm.
Parameters:
mode: how to initialize the image? {'noise', 'other'}
source_img: image used for initialization of @mode is set to 'other'
Returns:
opt_image: initialized image
"""
assert mode in ['noise', 'other'], f"{mode} is and illegal initialization mode!"
if mode == 'style' or mode == 'other':
assert (source_img is not None), f"Can't initialize from {mode}!"
if mode == 'noise':
if cfg.cuda:
opt_image = np.random.normal(loc=0, scale=90.,
size=(1, 3, cfg.IMG_SIZE,
cfg.IMG_SIZE)).astype(np.float32)
opt_image = torch.from_numpy(opt_image).float().cuda()
else:
pass
else:
opt_image = (source_img.detach()).clone()
# Make sure that gradients are being calculated for this image
# During forward pass
opt_image.requires_grad = True
return opt_image
def gram_matrix(x):
""" Calculates the Gram matrix for the
feature maps contained in x.
Parameters:
x: feature maps
Returns:
G: gram matrix
"""
b, c, h, w = x.size()
F = x.view(b, c, h * w)
G = torch.bmm(F, F.transpose(1, 2))
G.div_(h * w)
return G
def postprocess(img):
""" Prepares the image for display and saving. """
postp = transforms.Compose([transforms.Lambda(lambda x: x.mul_(1. / 255)),
transforms.Normalize(mean=[-0.40760392, -0.45795686, -0.48501961],
std=[1, 1, 1]),
transforms.Lambda(lambda x: x[torch.LongTensor([2, 1, 0])]), # turn to RGB
])
img = postp(img)
# In order to have more visually appealing images
# We need to clip the pixel values
img[img > 1] = 1
img[img < 0] = 0
img = transforms.ToPILImage()(img)
return img
def get_file_name(path):
""" Extracts only the filename from the given
path. Extension is removed as well.
"""
base = os.path.basename(path)
return os.path.splitext(base)[0]
| 29.765766
| 106
| 0.553874
| 408
| 3,304
| 4.414216
| 0.384804
| 0.031094
| 0.02221
| 0.051083
| 0.161022
| 0.161022
| 0.122154
| 0.122154
| 0.122154
| 0.122154
| 0
| 0.041265
| 0.339891
| 3,304
| 110
| 107
| 30.036364
| 0.784503
| 0.251211
| 0
| 0.181818
| 0
| 0
| 0.042979
| 0
| 0
| 0
| 0
| 0
| 0.036364
| 1
| 0.090909
| false
| 0.018182
| 0.109091
| 0
| 0.290909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|